content
stringlengths
5
1.05M
# -*- UTF-8 -*- import sqlite3,os,sys import re import pprint from time import time from hashlib import md5 from metadata import * """ This module is intended for working with SQLite database. runsql decorator is used to perform queries in a simple and consistent manner. Instead of opening database each and every time we run query, this is already done with the decorator. """ db_file = os.path.join( os.environ['HOME'], '.config','metadb.sqlite') def runsql(func): ''' Decorator for easy sqlite queries. Decorated function must return a dictionary consisting of 3 items: query - string of SQL statements to be executed, args - tuple of arguments to parametrized queries, func - user-defined function used within query or related triggers ''' def wrapper(*w_args,**w_kwargs): global db_file query_dict = func(*w_args,**w_kwargs) try: conn = sqlite3.connect(db_file) except sqlite.Error as sqlerr: # TODO: write a custom function for this print(sys.argv[0], "Could not open the database file:{0}".format(sqlerr)) exit(2) c = conn.cursor() if query_dict['func']: sqlite3.enable_callback_tracebacks(True) conn.create_function(*query_dict['func']) c.execute(query_dict['query'],query_dict['args']) if query_dict['query'].lstrip().upper().startswith("SELECT"): return c.fetchall() return conn.commit() return wrapper @runsql def vacuum(): return { 'query': """ DELETE FROM file WHERE file_exists(file.f_path) = 0 """, 'args': None, 'func': tuple([ "file_exists", 1 , lambda x: 1 if os.path.exists(x) else 0 ]) } def init_db(): global db_file # The database file doesn't exist. # Create database file with all basic tables with open(os.path.join(sys.path[0],'setup.sql')) as setup: try: conn = sqlite3.connect(db_file) except sqlite3.Error as sqliterr: print("SQLite error: {0}".format(sqliterr)) sys.exit(2) curs = conn.cursor() script = setup.read() curs.executescript(script) conn.commit() conn.close() def updatedb(): pass def load_db(): @runsql def insert_files( value ): # value arg should already be tuple return { 'query': """ INSERT INTO file VALUES (?,?,?,?,?,?,?)""", 'args': value, 'func': tuple([ "get_metadata", 3 , get_metadata]) } if not os.path.exists(db_file): init_db() for i in walk_home_tree(): insert_files(i) def regexp(y,x): """ Used to implement REGEXP function for SQLite, see https://www.sqlite.org/lang_expr.html """ return True if re.search(y,x) else False def find_file(pattern): ''' Returns all information in parent table plus information specific to the file in pretty print format''' @runsql def get_file_and_subtype(pattern): return { 'query': """ SELECT f_path,ftype_major FROM file WHERE f_path REGEXP ? """, 'args': (pattern, ), 'func': tuple(["REGEXP",2,regexp]) } @runsql def query_full_info(fpath,table): return { 'query': """ SELECT * FROM file JOIN {0} ON file.f_path = {0}.f_path AND file.f_path = ?""".format(table), 'args': (fpath, ), 'func': None } matched_files = get_file_and_subtype(pattern) pp = pprint.PrettyPrinter(indent=4) if matched_files: for i in matched_files: result = query_full_info(*i) if result: newresult = list(result[0]) newresult.pop(7) pp.pprint(newresult) # vim: syntax=python:
from collections import deque from typing import List class Solution: def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]: # Build email to name mapping email_map = dict() for account in accounts: for email in account[1:]: if email not in email_map: email_map[email] = set(account[1:]) else: for e in account[1:]: email_map[email].add(e) result = [] visited = set() # visited email for account in accounts: cur_account = [account[0]] if account[1] in visited: continue queue = deque([account[1]]) visited.add(account[1]) while queue: cur = queue.pop() cur_account.append(cur) for neighbor in email_map[cur]: if neighbor in visited: continue queue.append(neighbor) visited.add(neighbor) # Emails should be in sorted order. cur_account = [cur_account[0]] + sorted(cur_account[1:]) result.append(cur_account) return result
import decimal import logging class InvalidConfigurationException(Exception): pass class Configuration(object): def __init__(self, config): if config.key != 'Module': raise InvalidConfigurationException("config.key %s != Module\n" % config.key) if config.values[0] != 'oci_write_plugin': raise InvalidConfigurationException("config.values %s != oci\n" % config.values) logging_level_raw = '' for node in config.children: key = node.key.lower() value = node.values[0] if key == 'host': self.host = value elif key == 'token': self.token = value self.headers = {"Content-type": "application/json", "X-OCI-Integration": self.token} elif key == 'report_interval': self.report_interval = decimal.Decimal(value) elif key == 'failed_report_queue_size': self.failed_report_queue_size = decimal.Decimal(value) elif key == "aggregation_type": self.aggregation_type = value elif key == "plugins": self.plugins = set([s.strip() for s in value.split(',')]) elif key == "logging_level": logging_level_raw = value self.logging_level = logging.getLevelName(value.upper()) else: raise InvalidConfigurationException("Invalid configuration: %s = %s\n" % (key, value)) if not self.host: raise InvalidConfigurationException("Host is not configured in the module configuration") if not self.token: raise InvalidConfigurationException("Token is not configured in the module configuration") if self.report_interval < 60 or self.report_interval > 3600: raise InvalidConfigurationException("Interval for data report should be between 60 and 3600 inclusive") if self.failed_report_queue_size < 0 or self.failed_report_queue_size > 10000: raise InvalidConfigurationException( "Queue size for failed data report should be between 0 and 10000 inclusive") if self.aggregation_type not in ['average', 'minimum', 'maximum', 'last']: raise InvalidConfigurationException( "Aggregation type for guaged value can be only one of ['average', 'minimum', 'maximum', 'last']") if not self.plugins: raise InvalidConfigurationException("Plugins is not configured in the module configuration") if logging_level_raw not in ['debug', 'info', 'warning', 'error']: raise InvalidConfigurationException( "Logging level can be only one of ['debug', 'info', 'warning', 'error']")
from django.conf.urls import url, patterns from ginger.conf.urls import scan from . import views urlpatterns = scan(views)
import logging from pygerrit.rest import GerritRestAPI from rh_nexttask.client import Client from rh_nexttask.constants import Constants from rh_nexttask.date import Date from rh_nexttask.review import Review logger = logging.getLogger('rh-nexttask') class Bz(object): """Hold a reference to one bugzilla. """ def __init__(self, bugzilla, dfg=None): self.bugzilla = bugzilla self.logger = logging.getLogger('rh-nexttask') # Seems it doesn't always work the first time for fixed_in. self._check_attributes() default_properties = { 'id': None, 'summary': bugzilla.summary.encode('utf-8'), 'url': bugzilla.weburl, 'status': None, 'fixed_in': None, 'id': None, 'reviews': {}, 'external_bugs': None, 'target_release': bugzilla.target_release[0], 'keywords': None, 'assigned_to': None, 'internal_whiteboard': None, 'devel_whiteboard': None, 'target_milestone': None, 'flags': None, 'last_change_time': None, 'version': None, } self.dfg = dfg self._upstream_gerrit = None self._internal_gerrit = None self._gerrit = {} self.upstream_reviews = [] self.internal_reviews = [] self.launchpad_ids = [] self._planned_for = None self.is_client = False self.is_escalated = False self.advices = [] self.clients = [] self._pm_flags = None for (prop, default) in default_properties.items(): if default: # When an true value we use it setattr(self, prop, default) else: # empty or false try to get it or set default setattr(self, prop, getattr(bugzilla, prop, default)) self.no_change_during = Date(self.last_change_time).diff_date.days for ext in self.external_bugs: if 'type' in ext and 'description' in ext['type']: eid = ext['ext_bz_bug_id'] eurl = ext['type']['url'] etype = ext['type']['description'] edescription = ext['type']['description'] if edescription in 'OpenStack gerrit': self.upstream_reviews.append(Review(self._get_review(eurl, eid), 'upstream', eurl)) elif edescription in 'Red Hat Engineering Gerrit': self.internal_reviews.append(Review(self._get_review(eurl, eid), 'internal', eurl)) elif edescription in 'Launchpad': self.launchpad_ids.append(eid) elif etype in 'Red Hat Customer Portal': self.is_client = True self.clients.append(Client(ext)) if not self.is_client: # Check the whiteboard for any clue if 'client' in self.devel_whiteboard: self.is_client = True self.clients.append(Client({})) if 'escalated' in self.devel_whiteboard: self.is_escalated = True if not self.is_client: self.is_client = True self.clients.append(Client({})) def __str__(self): return '{} {} - {} - {}\t- {} - ({} days without activity)'.format(self.id, self.status, self.assigned_to, self.summary, self.planned_for, self.no_change_during) def _get_review(self, url, gerrit_id): request = "/changes/?q=change:{0}&o=LABELS&o=DETAILED_ACCOUNTS".format(gerrit_id) # python2/3 wonders # request = bytes(request.encode('utf-8')) request = self.gerrit(url).get(request) try: review = request[0] except IndexError: raise Exception("Cannot get review for {} on {} for BZ {}".format(gerrit_id, url, self.url)) return review @property def planned_for(self): if self._planned_for is None: self._planned_for = 'Unknown' if self.target_milestone not in ['---']: self._planned_for = '{} {}'.format(self.version, self.target_milestone) elif self.target_release not in ['---']: self._planned_for = '{}'.format(self.target_release) return self._planned_for @property def pm_flags(self): if not self._pm_flags: self._pm_flags = [flag for flag in self.flags if flag['setter'] == 'pm-rhel@redhat.com'] return self._pm_flags def gerrit(self, url): if url not in self._gerrit: ssl_verify = True if 'review.openstack.org' not in url: ssl_verify = False self._gerrit[url] = GerritRestAPI(url=url, auth=None, verify=ssl_verify) return self._gerrit[url] def _check_attributes(self): try: for attribute in Constants.INCLUDE_FIELDS: getattr(self.bugzilla, attribute) except AttributeError: self.logger.debug("Refreshing bug {}".format(self.bugzilla.id)) self.bugzilla.refresh(Constants.INCLUDE_FIELDS)
import cProfile as profiler import pstats def start_profiler(): profile = profiler.Profile(timeunit=100) profile.enable() return profile def stop_profiler(profiler, filename=None): filename = filename or "./profile.pf" profiler.disable() sortby = 'cumulative' ps = pstats.Stats(profiler).sort_stats(sortby) ps.print_stats(0.25) ps.dump_stats(filename)
import platform import os import sys import vlc from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtGui import QPalette, QColor class Ui_MainWindow(object): def __init__(self): super().__init__() #self.setWindowTitle("PyMediaPlayer") self.instance = vlc.Instance() self.media = None self.mediaplayer = self.instance.media_player_new() self.is_paused = False self.dpath='' self.dirpths=[] def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(1024, 768) MainWindow.setWindowOpacity(1.0) MainWindow.setAutoFillBackground(True) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.gridLayout = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout.setObjectName("gridLayout") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.verticalLayout = QtWidgets.QVBoxLayout() self.verticalLayout.setObjectName("verticalLayout") if platform.system() == "Darwin": self.frame = QtWidgets.QMacCocoaViewContainer(0) else: self.frame = QtWidgets.QFrame(self.centralwidget) self.palette = self.frame.palette() self.palette.setColor(QPalette.Window, QtGui.QColor(0, 0, 0)) self.frame.setPalette(self.palette) self.frame.setAutoFillBackground(True) #self.frame = QtWidgets.QFrame(self.centralwidget) self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame.setFrameShadow(QtWidgets.QFrame.Raised) self.frame.setObjectName("frame") self.verticalLayout.addWidget(self.frame) self.hsl1 = QtWidgets.QSlider(self.centralwidget) self.hsl1.setOrientation(QtCore.Qt.Horizontal) self.hsl1.setObjectName("hsl1") self.hsl1.setMaximum(1000) self.verticalLayout.addWidget(self.hsl1) self.hsl1.sliderPressed.connect(self.set_position) self.hsl1.sliderMoved.connect(self.set_position) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize) self.horizontalLayout.setObjectName("horizontalLayout") self.play = QtWidgets.QPushButton(self.centralwidget) self.play.setMaximumSize(QtCore.QSize(50, 20)) self.play.setObjectName("play") self.horizontalLayout.addWidget(self.play) self.play.clicked.connect(self.play_pause) self.stop = QtWidgets.QPushButton(self.centralwidget) self.stop.setMaximumSize(QtCore.QSize(50, 20)) self.stop.setObjectName("pause") self.horizontalLayout.addWidget(self.stop) self.stop.clicked.connect(self.stop1) self.playlist = QtWidgets.QPushButton(self.centralwidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.playlist.sizePolicy().hasHeightForWidth()) self.playlist.setSizePolicy(sizePolicy) self.playlist.setMinimumSize(QtCore.QSize(0, 0)) self.playlist.setMaximumSize(QtCore.QSize(50, 20)) self.playlist.setObjectName("playlist") self.horizontalLayout.addWidget(self.playlist) self.playlist.clicked.connect(self.shplaylist) self.label = QtWidgets.QLabel(self.centralwidget) self.label.setText("") self.label.setObjectName("label") self.horizontalLayout.addWidget(self.label) self.hsl2 = QtWidgets.QSlider(self.centralwidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.hsl2.sizePolicy().hasHeightForWidth()) self.hsl2.setSizePolicy(sizePolicy) self.hsl2.setMaximumSize(QtCore.QSize(16777215, 20)) self.hsl2.setOrientation(QtCore.Qt.Horizontal) self.hsl2.setObjectName("hsl2") self.hsl2.setMaximum(200) self.hsl2.setValue(self.mediaplayer.audio_get_volume()) self.horizontalLayout.addWidget(self.hsl2) self.hsl2.valueChanged.connect(self.set_volume) self.verticalLayout.addLayout(self.horizontalLayout) self.horizontalLayout_2.addLayout(self.verticalLayout) self.listWidget = QtWidgets.QListWidget(self.centralwidget) self.listWidget.setObjectName("listWidget") self.horizontalLayout_2.addWidget(self.listWidget) self.gridLayout.addLayout(self.horizontalLayout_2, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 774, 21)) self.menubar.setObjectName("menubar") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName("menuFile") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.actionOpen_Folder = QtWidgets.QAction(MainWindow) self.actionOpen_Folder.setObjectName("actionOpen_Folder") self.actionOpen_File = QtWidgets.QAction(MainWindow) self.actionOpen_File.setObjectName("actionOpen_File") self.actionadd_File = QtWidgets.QAction(MainWindow) self.actionadd_File.setObjectName("actionadd_File") self.actionClose = QtWidgets.QAction(MainWindow) self.actionClose.setObjectName("actionClose") self.menuFile.addAction(self.actionOpen_File) self.menuFile.addAction(self.actionOpen_Folder) self.menuFile.addAction(self.actionadd_File) self.menuFile.addAction(self.actionClose) self.menubar.addAction(self.menuFile.menuAction()) self.actionOpen_File.triggered.connect(self.open_file) self.actionOpen_Folder.triggered.connect(self.open_folder) self.actionClose.triggered.connect(sys.exit) self.actionadd_File.triggered.connect(self.addtoplaylist) self.listWidget.clicked.connect(self.selectfromplaylist) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.timer = QtCore.QTimer(MainWindow) self.timer.setInterval(100) self.timer.timeout.connect(self.update_ui) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "PyMediaPlayer")) self.stop.setText(_translate("MainWindow", "Stop")) self.play.setText(_translate("MainWindow", "Play")) self.playlist.setText(_translate("MainWindow", "Playlist")) self.menuFile.setTitle(_translate("MainWindow", "File")) self.actionOpen_Folder.setText(_translate("MainWindow", "Open Folder")) self.actionOpen_File.setText(_translate("MainWindow", "Open File")) self.actionClose.setText(_translate("MainWindow", "Close")) self.actionadd_File.setText(_translate("MainWindow", "Add File to Playlist")) self.listWidget.setParent(None) def play_pause(self): if self.mediaplayer.is_playing(): self.mediaplayer.pause() self.play.setText("Play") self.is_paused = True self.timer.stop() else: if self.mediaplayer.play() == -1: #self.open_file() return self.mediaplayer.play() self.play.setText("Pause") self.timer.start() self.is_paused = False def stop1(self): self.mediaplayer.stop() self.play.setText("Play") self.hsl1.setValue(int(self.mediaplayer.get_position() * 1000)) def update_ui(self): media_pos = int(self.mediaplayer.get_position() * 1000) self.hsl1.setValue(media_pos) if not self.mediaplayer.is_playing(): self.timer.stop() if not self.is_paused: self.stop1() def open_file(self): dialog_txt = "Choose Media File" filename = QtWidgets.QFileDialog.getOpenFileName(MainWindow, dialog_txt, os.path.expanduser('~')) if not filename: return self.media = self.instance.media_new(filename[0]) self.statusbar.showMessage(filename[0].split("/")[-1]) self.mediaplayer.set_media(self.media) self.media.parse() if platform.system() == "Linux": self.mediaplayer.set_xwindow(int(self.frame.winId())) elif platform.system() == "Windows": self.mediaplayer.set_hwnd(int(self.frame.winId())) elif platform.system() == "Darwin": self.mediaplayer.set_nsobject(int(self.frame.winId())) self.play_pause() def open_folder(self): self.dpath = str(QtWidgets.QFileDialog.getExistingDirectory(None, "Select Directory")) self.getmedia() def getmedia(self): filess=[] dirpthss=[] for dirpath,dirs,files in os.walk(self.dpath): filess.append(files) dirpthss.append(dirpath) for files in filess: for file in files: if os.path.splitext(file)[-1] in ['.mp3','.mp4','.mkv','.avi']: self.dirpths.append(str(os.path.join(dirpthss[filess.index(files)], file))) self.fillists(self.listWidget) def fillists(self,a): i=0 _translate = QtCore.QCoreApplication.translate for dirs in self.dirpths: item = QtWidgets.QListWidgetItem() a.addItem(item) item = a.item(i) item.setText(_translate("MainWindow", str(dirs))) i+=1 self.statusbar.showMessage(f"{len(self.dirpths)} Media files added to Playlist.") self.hsl2.setValue(self.mediaplayer.audio_get_volume()) def set_volume(self, volume): self.mediaplayer.audio_set_volume(volume) def set_position(self): self.timer.stop() pos = self.hsl1.value() self.mediaplayer.set_position(pos / 1000.0) self.timer.start() def shplaylist(self): if self.horizontalLayout_2.count() == 1 : self.horizontalLayout_2.addWidget(self.listWidget) else: self.listWidget.setParent(None) def selectfromplaylist(self): item1=self.listWidget.currentItem().text() self.media = self.instance.media_new(item1) self.statusbar.showMessage(item1.split("/")[-1]) self.mediaplayer.set_media(self.media) self.media.parse() if platform.system() == "Linux": self.mediaplayer.set_xwindow(int(self.frame.winId())) elif platform.system() == "Windows": self.mediaplayer.set_hwnd(int(self.frame.winId())) elif platform.system() == "Darwin": self.mediaplayer.set_nsobject(int(self.frame.winId())) self.play_pause() def addtoplaylist(self): dialog_txt = "Choose Media File" filename = QtWidgets.QFileDialog.getOpenFileName(MainWindow, dialog_txt, os.path.expanduser('~')) if not filename: return if str(filename[0]) not in self.dirpths: self.dirpths.append(str(filename[0])) if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) app.setStyle("Fusion") dark_palette = QPalette() dark_palette.setColor(QPalette.Window, QColor(53, 53, 53)) dark_palette.setColor(QPalette.WindowText, QColor(255,255,255)) dark_palette.setColor(QPalette.Base, QColor(25, 25, 25)) dark_palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53)) dark_palette.setColor(QPalette.ToolTipBase, QColor(255,255,255)) dark_palette.setColor(QPalette.ToolTipText, QColor(255,255,255)) dark_palette.setColor(QPalette.Text, QColor(255,255,255)) dark_palette.setColor(QPalette.Button, QColor(53, 53, 53)) dark_palette.setColor(QPalette.ButtonText, QColor(255,255,255)) dark_palette.setColor(QPalette.BrightText, QColor(255, 0, 0)) dark_palette.setColor(QPalette.Link, QColor(42, 130, 218)) dark_palette.setColor(QPalette.Highlight, QColor(42, 130, 218)) dark_palette.setColor(QPalette.HighlightedText, QColor(0, 0, 0)) app.setPalette(dark_palette) app.setStyleSheet("QToolTip { color: #ffffff; background-color: #2a82da; border: 1px solid white; }") MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
#!/usr/bin/env python # -*- coding: utf8 -*- # python setup.py build_ext --inplace """ setup.py file """ import multiprocessing import os import pathlib import platform import re import shutil import subprocess import sys import sysconfig import time import unittest import warnings from abc import ABC import distutils from distutils import sysconfig from distutils.version import LooseVersion from distutils.command.build import build from distutils.command.clean import clean from setuptools import find_packages, setup, Command from setuptools.command.install import install from setuptools.extension import Extension force_blas = False if "--force-blas" in sys.argv: force_blas = True sys.argv.remove("--force-blas") # Available debug flags # # DEBUG_C_ARRAY : count #allocations of C-arrays # DEBUG_ARRAY : Track creation/destruction of Array objects # DEBUG_SHAREDARRAY : Track creation/destruction of SharedArray objects # DEBUG_VARRAY : Track VArray # DEBUG_COSTLY_THROW : Enables some costly tests to throw error # (such as Array[i] if i not in range) # DEBUG_VERBOSE : Error messages from CPP extensions will include # backtrace and error loc # debug_flags = ['DEBUG_C_ARRAY', 'DEBUG_ARRAY', 'DEBUG_COSTLY_THROW', # 'DEBUG_SHAREDARRAY', 'DEBUG_VARRAY', 'DEBUG_VERBOSE'] debug_flags = ['DEBUG_COSTLY_THROW'] # If true, add compilation flags to use fast (but maybe inaccurate) math # See https://gcc.gnu.org/wiki/FloatingPointMath use_fast_math = True version_info = sys.version_info python_min_ver = (3, 6, 0) python_ver = (version_info.major, version_info.minor, version_info.micro) if python_ver < python_min_ver: txt = 'Python version {0}.{1}.{2} ' \ 'lower than the required version >= {3}.{4}.{5}.' warnings.warn(txt.format(*(python_ver + python_min_ver))) # The next block ensures that we build a link-time linkable dynamic library for # OSX builds instead of a bundle. # # Snippet from http://stackoverflow.com/a/32765319/2299947 if sys.platform == 'darwin': vars = sysconfig.get_config_vars() vars['LDSHARED'] = vars['LDSHARED'].replace('-bundle', '-dynamiclib') # If we're installing via a wheel or not is_building_tick = any(arg in ("build", "build_ext", "bdist", "bdist_wheel", "develop",) for arg in sys.argv) # Obtain the numpy include directory. # This logic works across numpy versions. numpy_available = False numpy_include = "" blas_info = {} try: import numpy as np from numpy.distutils.system_info import get_info try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() # Determine if we have an available BLAS implementation if force_blas: # activated with build --force-blas blas_info = get_info("blas_opt", 0) elif platform.system() == 'Windows': try: with open(os.devnull, 'w') as devnull: exitCode = subprocess.check_output( "python tools/python/blas/check_cblas.py build_ext", stderr=devnull, shell=True) blas_info = get_info("blas_opt", 0) except subprocess.CalledProcessError as subError: print("Error executing check_cblas.py - cblas not found") else: try: with open(os.devnull, 'w') as devnull: exitCode = subprocess.check_output( "python tools/python/blas/check_mkl.py build_ext", stderr=devnull, shell=True) blas_info = get_info("blas_opt", 0) except subprocess.CalledProcessError as subError: print("Error executing check_mkl.py - mkl not found") numpy_available = True except ImportError as e: if is_building_tick: print(e) warnings.warn("numpy is not installed:\n" " - Include directory for numpy integration may not be " "correct\n " " - BLAS will not be used for this build\n") # sometimes disabling blas is desired if os.environ.get('TICK_NO_OPTS') is not None: if os.environ['TICK_NO_OPTS'] == '1': blas_info = {} # By default, we assume that scipy uses 32 bit integers for indices in sparse # arrays sparse_indices_flag = "-DTICK_SPARSE_INDICES_INT32" try: import numpy as np from scipy.sparse import sputils sparsearray_type = sputils.get_index_dtype() if sparsearray_type == np.int64: sparse_indices_flag = "-DTICK_SPARSE_INDICES_INT64" except ImportError as e: if is_building_tick and numpy_available: print(e) warnings.warn("scipy is not installed, unable to determine " "sparse array integer type (assuming 32 bits)\n") if os.name == 'posix': if platform.system() == 'Darwin': os_version = platform.mac_ver()[0] # keep only major + minor os_version = '.'.join(os_version.split('.')[:2]) if LooseVersion(os_version) < LooseVersion('10.9'): raise ValueError( 'You need to have at least mac os 10.9 to build this package') # We set this variable manually because anaconda set it to a deprecated # one os.environ['MACOSX_DEPLOYMENT_TARGET'] = os_version # check for debug pyenv - PYVER must be exported as env var. Debug pyenv setup: # PYENV=3.7.0 # CFLAGS="-O0 -ggdb" CONFIGURE_OPTS="--enable-shared" pyenv install -kg $PYVER # PYENV=${PYENV}-debug # eval "$(pyenv init -)" # pyenv global ${PYVER} # pyenv local ${PYVER} PYVER = "" PYVER_DBG = "" if os.environ.get('PYVER') is not None: PYVER = os.environ['PYVER'] if PYVER.endswith("-debug"): PYVER_DBG = "-pydebug" # Directory containing built .so files before they are moved either # in source (with build flag --inplace) or to site-packages (by install) # E.g. build/lib.macosx-10.11-x86_64-3.5 build_dir = "build/lib.{}-{}"+PYVER_DBG build_dir = build_dir.format(distutils.util.get_platform(), sys.version[0:3]) class SwigExtension(Extension): """This only adds information about extension construction, useful for library sharing """ def __init__(self, *args, module_ref=None, ext_name=None, **kwargs): super().__init__(*args, **kwargs) self.module_ref = module_ref self.ext_name = ext_name class SwigPath: """Small class to handle module creation and check project structure """ def __init__(self, module_path, extension_name): module_path = os.path.normpath(module_path) # Module C++ source directory (e.g. lib/cpp/tick/base) self.src = os.path.join(module_path, 'src') # Module SWIG interface files directory (e.g. tick/array/swig) self.swig = "lib/swig/" + module_path[5:] # Module build directory. Will contain generated .py files, and .so # files if built with flag --inplace. # # E.g. tick/array/build self.build = os.path.join(module_path, 'build') self.extension_name = extension_name self.private_extension_name = '_' + extension_name # Transform folder path to module path self.extension_path = self.build \ .replace('.', '') \ .replace('/', '.') \ + '.' + self.private_extension_name # Filename of the produced .so file (e.g. _array.so) self.lib_filename = '{}{}'.format(self.private_extension_name, sysconfig.get_config_var('EXT_SUFFIX')) def create_extension(extension_name, module_dir, cpp_files, h_files, swig_files, folders=[], include_modules=None, extra_compile_args=None, swig_opts=None): swig_path = SwigPath(module_dir, extension_name) extension_path = swig_path.extension_path # Add directory to filenames def add_dir_name(dir_name, filenames): return list(os.path.join(dir_name, filename) for filename in filenames) swig_files = add_dir_name("lib/swig/tick/" + module_dir[7:], swig_files) for folder in folders: for file in os.listdir(folder): file_path = os.path.join(folder, file) if os.path.isfile(file_path): _, ext = os.path.splitext(file) if ext == '.cpp': cpp_files += [os.path.join(folder, file)] elif ext == ".txt": pass elif ext == ".inl": pass else: warnings.warn('Included file %s in folder %s has an ' 'unknown extension "%s"' % (file, folder, ext)) min_swig_opts = ['-py3', '-c++', '-Ilib/swig', '-Ilib/include', '-outdir', swig_path.build, ] if swig_opts is None: swig_opts = min_swig_opts else: swig_opts.extend(min_swig_opts) # Here we set the minimum compile flags. min_extra_compile_args = ["-D_FILE_OFFSET_BITS=64", "-DPYTHON_LINK", "-DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION", '-Ilib/include', sparse_indices_flag, '-std=c++11', '-O2', # -O3 is sometimes dangerous and has caused segfaults on Travis '-DNDEBUG', # some assertions fail without this (TODO tbh) ] if use_fast_math: min_extra_compile_args.append('-ffast-math') if extra_compile_args is None: extra_compile_args = min_extra_compile_args else: extra_compile_args.extend(min_extra_compile_args) extra_compile_args.append("-Wall") if platform.system() == 'Windows': extra_compile_args.append("-DBUILDING_DLL") else: ## Added -Wall to get all warnings and -Werror to treat them as errors extra_compile_args.append("-Werror") # Include directory of module mod = SwigPath(module_dir, extension_name) libraries = [] library_dirs = [] runtime_library_dirs = [] extra_link_args = [] define_macros = [] extra_include_dirs = ["include", "swig"] # Deal with (optional) BLAS extra_compile_args.extend(blas_info.get("extra_compile_args", [])) extra_link_args.extend(blas_info.get("extra_link_args", [])) libraries.extend(blas_info.get("libraries", [])) library_dirs.extend(blas_info.get("library_dirs", [])) define_macros.extend(blas_info.get("define_macros", [])) if 'define_macros' in blas_info and \ any(key == 'HAVE_CBLAS' for key, _ in blas_info['define_macros']): define_macros.append(('TICK_USE_CBLAS', None)) if "libraries" in blas_info and "mkl_rt" in blas_info["libraries"]: define_macros.append(('TICK_USE_MKL', None)) extra_include_dirs.extend(blas_info["include_dirs"]) if platform.system() != 'Windows': for lib_dir in blas_info["library_dirs"]: extra_link_args.append( "-Wl,-rpath,"+ lib_dir ) # if not Linux assume MacOS if platform.system() != 'Linux': rel_path = os.path.relpath(lib_dir, swig_path.build) if os.path.exists(rel_path): extra_link_args.append("-Wl,-rpath,@loader_path/"+ rel_path) if include_modules is None: include_modules = [] # Include all what need for module link for mod in include_modules: if mod.__class__ != SwigPath: raise ValueError("Unhandled class for included module") for opts in [swig_opts, extra_compile_args]: opts.extend(["-I" + mod.swig]) # Because setuptools produces shared object files with non-standard # On windows we need to use ".lib" rather than ".pyd" # when linking libs to other libs if platform.system() == 'Windows': lib = os.path.join(build_dir, mod.build, "_"+mod.extension_name) lib += os.path.splitext(sysconfig.get_config_var("EXT_SUFFIX"))[0] libraries.append(lib) # names (i.e. not lib<name>.so) we specify the full library path else: extra_link_args.append(os.path.abspath( os.path.join(build_dir, mod.build, mod.lib_filename))) # Make sure that the runtime linker can find shared object # dependencies by using the relative path to the dependency library. rel_path = os.path.relpath(mod.build, swig_path.build) if platform.system() == 'Linux': # $ORIGIN refers to the location of the current shared object file # at runtime runtime_library_dirs.append("\$ORIGIN/%s" % rel_path) elif platform.system() == 'Windows': pass else: # Assuming non-Windows builds for now # For OSX builds we use @loader_path instead extra_link_args.append( "-Wl,-rpath,%s" % '@loader_path/%s' % rel_path ) # Setting the SONAME/install_name for the built libraries. It ensures that # the runtime linker will have a chance to find the libraries even after # they're moved (during install, for example) filename = swig_path.lib_filename if platform.system() == 'Linux': extra_link_args.append('-Wl,-soname,%s' % filename) elif platform.system() == 'Windows': pass else: # For OSX the install_name needs to be prefixed with @rpath extra_link_args.append('-Wl,-install_name,@rpath/%s' % filename) for df in debug_flags: full_flag = "-D" + df extra_compile_args.append(full_flag) if df == 'DEBUG_COSTLY_THROW': swig_opts.append(full_flag) # Adding Cereal serialization library extra_include_dirs.append("lib/third_party/cereal/include") # Adding numpy include directory if numpy_include: extra_include_dirs.append(numpy_include) # This is to override the use of IMPLIB in distutils # which puts the lib/exp files in the wrong directory # see: https://github.com/python/cpython/blob/08bb8a41cc976343795bd0e241cd7388e9f44ad5/Lib/distutils/_msvccompiler.py#L467 if platform.system() == 'Windows': implib = "/IMPLIB:" + os.path.abspath( os.path.join(build_dir, swig_path.build, "_"+extension_name)) implib += os.path.splitext(sysconfig.get_config_var("EXT_SUFFIX"))[0] extra_link_args.append(implib + ".lib") core_module = SwigExtension(extension_path, module_ref=swig_path, sources=swig_files + cpp_files, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, define_macros=define_macros, swig_opts=swig_opts, libraries=libraries, include_dirs=extra_include_dirs, library_dirs=library_dirs, runtime_library_dirs=runtime_library_dirs, depends=h_files, language="c++", ext_name=extension_name) return core_module ############################## # Create extensions ############################## array_extension_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/array" ], "swig_files": ["array_module.i"], "module_dir": "./tick/array/", "extension_name": "array" } array_extension = create_extension(**array_extension_info) base_extension_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/base", "lib/cpp/base/math" ], "swig_files": ["base_module.i"], "module_dir": "./tick/base", "extension_name": "base", "include_modules": [array_extension.module_ref] } base_extension = create_extension(**base_extension_info) base_array_modules = [array_extension.module_ref, base_extension.module_ref] array_test_extension_info = { "cpp_files": [], "h_files": [], "folders": ["lib/cpp/array_test"], "swig_files": ["array_test_module.i"], "module_dir": "./tick/array_test/", "extension_name": "array_test", "include_modules": base_array_modules, } test_extension = create_extension(**array_test_extension_info) random_extension_info = { "cpp_files": [], "h_files": [], "folders": ["lib/cpp/random"], "swig_files": ["crandom_module.i"], "module_dir": "./tick/random/", "extension_name": "crandom", "include_modules": base_array_modules } random_extension = create_extension(**random_extension_info) base_model_core_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/base_model" ], "swig_files": ["base_model_module.i"], "module_dir": "./tick/base_model/", "extension_name": "base_model", "include_modules": base_array_modules } base_model_core = create_extension(**base_model_core_info) linear_model_core_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/linear_model" ], "swig_files": ["linear_model_module.i"], "module_dir": "./tick/linear_model/", "extension_name": "linear_model", "include_modules": base_array_modules + [ base_model_core.module_ref, ] } linear_model_core = create_extension(**linear_model_core_info) hawkes_simulation_extension_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/hawkes/simulation", "lib/cpp/hawkes/simulation/hawkes_baselines", "lib/cpp/hawkes/simulation/hawkes_kernels" ], "swig_files": [ "hawkes_simulation_module.i" ], "module_dir": "./tick/hawkes/simulation/", "extension_name": "hawkes_simulation", "include_modules": base_array_modules + [random_extension.module_ref] } hawkes_simulation_extension = \ create_extension(**hawkes_simulation_extension_info) hawkes_model_extension_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/hawkes/model", "lib/cpp/hawkes/model/base", "lib/cpp/hawkes/model/list_of_realizations", ], "swig_files": [ "hawkes_model_module.i" ], "module_dir": "./tick/hawkes/model/", "extension_name": "hawkes_model", "include_modules": base_array_modules + [base_model_core.module_ref] } hawkes_model_extension = create_extension(**hawkes_model_extension_info) hawkes_inference_extension_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/hawkes/inference", ], "swig_files": [ "hawkes_inference_module.i" ], "module_dir": "./tick/hawkes/inference/", "extension_name": "hawkes_inference", "include_modules": base_array_modules + [ base_model_core.module_ref, hawkes_model_extension.module_ref, ] } hawkes_inference_extension = create_extension(**hawkes_inference_extension_info) prox_core_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/prox" ], "swig_files": ["prox_module.i"], "module_dir": "./tick/prox/", "extension_name": "prox", "include_modules": base_array_modules } prox_core = create_extension(**prox_core_info) robust_extension_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/robust" ], "swig_files": ["robust_module.i"], "module_dir": "./tick/robust/", "extension_name": "robust", "include_modules": base_array_modules + [ base_model_core.module_ref,linear_model_core.module_ref] } robust_extension = create_extension(**robust_extension_info) solver_core_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/solver" ], "swig_files": ["solver_module.i"], "module_dir": "./tick/solver/", "extension_name": "solver", "include_modules": base_array_modules + [random_extension.module_ref, base_model_core.module_ref, linear_model_core.module_ref, prox_core.module_ref, robust_extension.module_ref] } solver_core = create_extension(**solver_core_info) preprocessing_core_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/preprocessing" ], "swig_files": ["preprocessing_module.i"], "module_dir": "./tick/preprocessing/", "extension_name": "preprocessing", "include_modules": base_array_modules } preprocessing_core = create_extension(**preprocessing_core_info) survival_extension_info = { "cpp_files": [], "h_files": [], "folders": [ "lib/cpp/survival" ], "swig_files": ["survival_module.i"], "module_dir": "./tick/survival/", "extension_name": "survival", "include_modules": base_array_modules + [base_model_core.module_ref] } survival_extension = create_extension(**survival_extension_info) tick_modules = [ array_extension, base_extension, test_extension, random_extension, base_model_core, linear_model_core, hawkes_simulation_extension, hawkes_model_extension, hawkes_inference_extension, prox_core, preprocessing_core, robust_extension, survival_extension, solver_core ] # Abstract class for tick-specific commands that need access to common build # directories class TickCommand(Command, ABC): tick_dir = os.path.abspath(os.path.join(os.curdir, 'tick')) cpp_build_dir = os.path.abspath(os.path.join(build_dir, 'cpptest')) user_options = [] def initialize_options(self): """Set default values for options.""" pass def finalize_options(self): """Post-process options.""" pass class TickBuild(build): swig_min_ver = (4, 0, 0) @staticmethod def extract_swig_version(swig_ver_str): m = re.search('SWIG Version (\d+).(\d+).(\d+)', swig_ver_str) if not m: txt = 'Could not extract SWIG version from string: {0}' warnings.warn(txt.format(swig_ver_str)) return 0, 0, 0 return tuple(int(x) for x in m.groups()[0:3]) def run(self): swig_ver = self.extract_swig_version( str(subprocess.check_output(['swig', '-version']))) if swig_ver < self.swig_min_ver: txt = 'SWIG version {0}.{1}.{2} ' \ 'lower than the required version >= {3}.{4}.{5}. ' \ 'This will likely cause build errors!' warnings.warn(txt.format(*(swig_ver + self.swig_min_ver))) self.run_command('build_ext') build.run(self) class TickInstall(install): def run(self): self.run_command('build_ext') install.run(self) class BuildRunCPPTests(TickCommand): description = 'build and run tick C++ tests' def run(self): self.run_command('makecpptest') self.run_command('runcpptest') class RunCPPTests(TickCommand): description = 'run tick C++ tests' def run(self): make_cmd = ['make', 'check'] subprocess.check_call(make_cmd, cwd=self.cpp_build_dir) class BuildCPPTests(TickCommand): build_jobs = 1 description = 'build tick C++ tests' user_options = [ ('build-jobs=', 'j', 'number of parallel build jobs (default is number of available CPU ' 'cores reported by Python)'), ] def initialize_options(self): """Set default values for options.""" self.build_jobs = multiprocessing.cpu_count() def run(self): relpath = os.path.relpath(self.tick_dir, self.cpp_build_dir) cmake_exe = os.environ.get('TICK_CMAKE', 'cmake') inc_dir = sysconfig.get_python_inc() cmake_cmd = [cmake_exe, '-DPYTHON_INCLUDE_DIR={}'.format(inc_dir), '-DTICK_REBUILD_LIBS=OFF', '-DBENCHMARK=OFF', relpath + '/../lib'] # Feed the path to the built C++ extensions so CMake does not have to # build them again for mod in tick_modules: full_path = os.path.abspath( os.path.join(mod.module_ref.build, mod.module_ref.lib_filename)) cmake_cmd.append( '-DTICK_LIB_{}={}'.format(mod.ext_name.upper(), full_path)) define_macros = [] if 'define_macros' in blas_info and \ any(key == 'HAVE_CBLAS' for key, _ in blas_info['define_macros']): cmake_cmd.append('-DUSE_BLAS=ON') os.makedirs(os.path.join(self.cpp_build_dir, 'cpptest'), exist_ok=True) subprocess.check_call(cmake_cmd, cwd=self.cpp_build_dir) make_cmd = ['make', 'VERBOSE=1', 'all', '-j{}'.format(self.build_jobs)] subprocess.check_call(make_cmd, cwd=self.cpp_build_dir) class RunCPPLint(TickCommand): description = 'run cpplint on tick C++ source files' CPPLINT_DIRS = [ 'lib/include', 'lib/cpp', ] def run(self): try: import cpplint as cl cl_state = cl._cpplint_state error_count = 0 for dir in self.CPPLINT_DIRS: print("Processing {}".format(dir)) cl_state.ResetErrorCounts() filenames = list(pathlib.Path(dir).glob('**/*.h')) + \ list(pathlib.Path(dir).glob('**/*.cpp')) for filename in filenames: cl.ProcessFile(str(filename), cl_state.verbose_level) cl_state.PrintErrorCounts() error_count += cl_state.error_count print('') if error_count > 0: raise RuntimeError("Codestyle check by cpplint failed") except ImportError: warnings.warn("Stylecheck by cpplint failed because cpplint " "is not installed as a Python module") class RunPyLint(TickCommand): # We need to define if and how we run pylint description = 'run tick PyLint codestyle check' start_dir = '.' @staticmethod def run(): raise NotImplementedError('Running pylint from setup.py' 'not supported yet') class RunPyTests(TickCommand): description = 'run tick Python tests' start_dir = '.' user_options = [ ('start-dir=', 's', 'directory to start looking for Python tests (e.g. tick/simulation)'), ] def initialize_options(self): """Set default values for options.""" self.start_dir = '.' def run(self): if platform.system() == 'Windows': print("The pytest command has issues with threads on Windows") print('Instead please run:') print('python3 -m unittest discover -v . "*_test.py"') exit(1) loader = unittest.TestLoader() alltests = loader.discover(self.start_dir, pattern="*_test.py") result = unittest.TextTestRunner(verbosity=2).run(alltests) sys.exit(not result.wasSuccessful()) class RunTestSuites(TickCommand): description = 'run tick Python and C++ tests' def run(self): self.run_command('cpptest') self.run_command('pytest') class CleanTick(clean): description = 'cleans all generated and built files' def run(self): seconds_until_clean = 5 print("Cleaning source directories in %d seconds..." % seconds_until_clean) time.sleep(seconds_until_clean) clean.run(self) shutil.rmtree(build_dir, ignore_errors=True) patterns = [ '**/*.so', '**/*_wrap.cpp', '**/__pycache__/*.pyc', '**/__pycache__', ] for paths in (pathlib.Path(os.curdir).glob(p) for p in patterns): for path in paths: print("Deleting {}".format(path)) if path.is_dir(): path.rmdir() else: path.unlink() setup(name="tick", version='0.6.0.1', author="Emmanuel Bacry, " "Stephane Gaiffas, " "Martin Bompaire, " "Søren V. Poulsen, " "Maryan Morel, " "Simon Bussy, " "Philip Deegan", author_email='martin.bompaire@polytechnique.edu, ' 'philip.deegan@polytechnique.edu', url="https://x-datainitiative.github.io/tick/", description="Module for statistical learning, with a particular emphasis " "on time-dependent modelling", ext_modules=tick_modules, install_requires=['numpy', 'scipy', 'numpydoc', 'matplotlib', 'sphinx', 'pandas', 'dill', 'scikit-learn'], packages=find_packages(), cmdclass={'build': TickBuild, 'install': TickInstall, 'makecpptest': BuildCPPTests, 'runcpptest': RunCPPTests, 'cpptest': BuildRunCPPTests, 'cpplint': RunCPPLint, 'pytest': RunPyTests, 'pylint': RunPyLint, 'test': RunTestSuites, 'clean': CleanTick}, classifiers=['Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'Programming Language :: C++', 'Programming Language :: Python', 'Topic :: Software Development', 'Topic :: Scientific/Engineering', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'License :: OSI Approved :: BSD License'], )
# Mine domain-specific paraphrases from SNIPs datasets import os import logging import csv import spacy import codecs import argparse from generate_word_alignment import make_alignment_matrix from preprocessing import create_valid_groups, normalize_sent, divide_sent_by_prep from fsa import create_fsa, process_sents, find_verb_paraphrases, find_phrase_paraphrases, get_repr_nd_text nlp = spacy.load('en_core_web_lg') def create_fsa_from_file(fpath): """ Generate all sentence-level paraphrases for a given data file """ # Load sentences sents = [] tk_sents = {} with codecs.open(fpath, encoding='utf-8') as ifile: idx = 0 for line in ifile: line = line.strip() norm_line = normalize_sent(line) if norm_line in sents: print("Found a duplicated sentence.") continue doc = nlp(norm_line) sent_tks = [tk.text for tk in doc] tk_sents[idx] = sent_tks #print("Sent {}: {}".format(idx, line)) #print("Normalized sent: {}".format(idx, norm_line)) sents.append(norm_line) idx += 1 # Create word alignment print("\t Generate pairwise alignment between sentences...") align_matrix = make_alignment_matrix(sents) # Validity checking sents_cluster = create_valid_groups(align_matrix, tk_sents) #print("Sent clusters: {}".format(sents_cluster)) # Create the word lattice fsa = create_fsa(tk_sents) for i, cluster in enumerate(sents_cluster): fsa = process_sents(fsa, tk_sents, align_matrix, cluster) sent_num = len(sents) tk_num = 0 for _, tk_list in tk_sents.items(): tk_num += len(tk_list) # Display the word lattice # print("idx_to_node:{}".format(idx_to_node)) # nx.draw_circular(fsa, with_labels=True) # plt.show() return fsa, sent_num, sents, tk_num def store_paraphrase_csv(input_path, fsa, word_to_para, output_dir): logging.info("Store paraphrases in csv files.") dataset_name = os.path.basename(input_path) output_file = os.path.join(output_dir, os.path.splitext(os.path.basename(input_path))[0]+"_essentia.csv") with open(output_file, "w") as ofile: csv_writer = csv.writer(ofile) for ndpair, para_set in word_to_para.items(): nd1 = ndpair[0] nd2 = ndpair[1] nd1_text = get_repr_nd_text(fsa, nd1) nd2_text = get_repr_nd_text(fsa, nd2) csv_writer.writerow([dataset_name, nd1_text, para_set, nd2_text]) # End loop def store_vp_paraphrase_csv(input_path, fsa, word_to_para, output_dir): logging.info("Store vp paraphrases in csv files.") dataset_name = os.path.basename(input_path) output_file = os.path.join(output_dir, os.path.splitext(os.path.basename(input_path))[0]+"_essentia.csv") # Merge paraphrases of different clusters together in word_to_para para_sets = [paras for _, paras in word_to_para.items() if len(paras) > 0] #print("para_sets: {}".format(para_sets)) set_to_root = {} # A union find data structure for i in range(len(para_sets)): set_to_root[i] = i for i, set1 in enumerate(para_sets): if i == len(para_sets) - 1: break for j in range(i+1, len(para_sets)): #print("i set: {}".format(set1)) #print("j set: {}".format(para_sets[j])) has_dup = False for ele in set1: if ele in para_sets[j]: has_dup = True break if has_dup: #print("set {} and set {} share elements.".format(i, j)) set_to_root[j] = set_to_root[i] #print("set_to_root: {}".format(set_to_root)) root_to_mems = {} # all paraphrase sets of the same member for key, value in set_to_root.items(): equi_sets = root_to_mems.get(value) #print("equi_sets: {}".format(equi_sets)) if equi_sets is None: root_to_mems[value] = [key] continue new_sets = list(equi_sets) new_sets.append(key) #print("new_sets: {}".format(new_sets)) root_to_mems[value] = new_sets #print("root_to_mems: {}".format(root_to_mems)) new_para_sets = [] for _, equi_sets_ids in root_to_mems.items(): equi_set = [] for id in equi_sets_ids: equi_set += para_sets[id] new_para_sets.append(list(set(equi_set))) with open(output_file, "w") as ofile: csv_writer = csv.writer(ofile) for para_set in new_para_sets: if len(para_set) > 0: csv_writer.writerow([dataset_name, para_set]) # End loop # with open(output_file, "w") as ofile: # csv_writer = csv.writer(ofile) # for ndpair, para_set in word_to_para.items(): # if len(para_set) > 0: # csv_writer.writerow([dataset_name, para_set]) # if one_example_mode: # break # # End loop def gene_phrase_paras_single_file(input_file, output_dir): logging.info("Generate phrase paraphrases for: {}".format(input_file)) fsa, sent_num = create_fsa_from_file(input_file) word_to_para = find_phrase_paraphrases(fsa) # Store paraphrases #store_paraphrase_plain_text() store_paraphrase_csv(input_file, fsa, word_to_para, output_dir) def gene_phrase_paras_first_chunk_single_file(input_file, output_dir, origin_sents): logging.info("Select the first chunk of each sentence as paraphrases: {}".format(input_file)) paras = [] with open(input_file) as ifile: for line in ifile: chunks = divide_sent_by_prep(line) paras.append(chunks[0]) paras = list(set(paras)) output_file = os.path.join(output_dir, os.path.splitext(os.path.basename(input_file))[0]+"_essentia.txt") with open(output_file, "w") as ofile: for p in paras: ofile.write(p) ofile.write("\n") return paras def gene_phrase_paras_vps_single_file(input_file, output_dir): logging.info("Generate verb phrase paraphrases for: {}".format(input_file)) logging.info("Creating the FSA representing the input sentences...") fsa, sent_num, origin_sents, _ = create_fsa_from_file(input_file) logging.info("FSA recreated!") # Find verb phrases out of the paraphrases logging.info("Searching for paraphrases...") word_to_para = find_verb_paraphrases(fsa, origin_sents) logging.info("Storing the mined paraphrases at {}...".format(output_dir)) # Store paraphrases #store_paraphrase_plain_text() store_vp_paraphrase_csv(input_file, fsa, word_to_para, output_dir) #print("\n\n Candidate paraphrases can be found at: {} \n\n".format(output_dir)) def gene_para_dir_first_chunk(output_dir): snips_dir = "datasets/processed/snips" for input_file in os.listdir(snips_dir): fpath = os.path.join(snips_dir, input_file) print(fpath) gene_phrase_paras_first_chunk_single_file(fpath, output_dir) def gene_para_dir(output_dir): snips_dir = "datasets/processed/snips" for input_file in os.listdir(snips_dir): fpath = os.path.join(snips_dir, input_file) print(fpath) #gene_phrase_paraphrases(fpath) gene_phrase_paras_single_file(fpath, output_dir) def gene_vp_para_dir(output_dir, input_dir): for input_file in os.listdir(input_dir): fpath = os.path.join(input_dir, input_file) print("Processing {} ...".format(fpath)) #gene_phrase_paraphrases(fpath) gene_phrase_paras_vps_single_file(fpath, output_dir) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument("--dir", default="./datasets/", type=str, help="The input directory of sentence files.") parser.add_argument("--output", default="./snips_paras/", type=str, help="The output directory containing domain-specific paraphrases.") args = parser.parse_args() if not os.path.exists(args.output): os.makedirs(args.output) gene_vp_para_dir(args.output, args.dir)
import datetime import pandas as pd import numpy as np from dateutil.relativedelta import relativedelta def print_schedule(data): if len(data[0]) == 5: fmt = "{:5,d} {:11,.2f} {:11,.2f} {:11,.2f} {:11,.2f}" elif len(data[0]) == 6: fmt = "{:12} {:5,d} {:11,.2f} {:11,.2f} {:11,.2f} {:11,.2f}" for d in data: print(fmt.format(*d)) def pd_schedule(data): header = ["period", "contribution", "gain", "increase", "balance"] if len(data[0]) == 6: header.insert(0, "date") df = pd.DataFrame(data, columns=header) return df def with_dates(data, start_date_str): start_date_obj = datetime.datetime.strptime(start_date_str, "%Y-%m-%d").date() res = [] for i, d in enumerate(data): res.append([str(start_date_obj + relativedelta(months=+i))] + d) return res class Loan: n_periods_per_year = 12 # Periods is in months def __init__(self, rate_yearly=0.02875, n_term=30, loan_amount=338700 ): self.rate_yearly = rate_yearly self.n_term = n_term self.loan_amount = loan_amount self.n_periods = self.n_term * self.n_periods_per_year self.rate_per_period = self.rate_yearly / self.n_periods_per_year self.payment = np.pmt(rate=self.rate_per_period, nper=self.n_periods, fv=0, pv=self.loan_amount, when=0) self.balance = self.loan_amount def schedule(self, xtra=[]): res = [] _balance = self.balance for i in range(self.n_periods): _interest = -_balance * self.rate_per_period _principal = self.payment - _interest try: _principal += xtra[i] except IndexError: pass _balance += _principal res.append([i, self.payment, _interest, _principal, _balance]) _next_balance = _balance * (1.0 + self.rate_per_period) if abs(_next_balance) <= abs(_principal) and i < self.n_periods - 1: # one payment remaining if we assume consistent payment _pay = -_next_balance _interest = -_balance * self.rate_per_period _principal = _pay - _interest _balance += _principal res.append([i + 1, _pay, _interest, _principal, _balance]) break return res @staticmethod def total_interest(data): return np.sum(data, axis=0)[2] @staticmethod def total_payments(data): return np.sum(data, axis=0)[3] + np.sum(data, axis=0)[2]
""" asynchronous bot fetches reservation events from if-algerie login to portal go to /exams fetch events crawl events event_crawler_callback return fetch payment days crawl payments payment_Crawler_callback return on error login and retry """ import requests import ujson as json import time import logging import re import threading from typing import Dict, Callable, Any from urllib3.exceptions import ConnectTimeoutError from tcfbot.account import Account from tcfbot.event import EventList from tcfbot.payment_day import PaymentDay, PaymentDayList # logger logger = logging.getLogger(__file__) # exceptions class LoginError(Exception): pass class FetchEventError(Exception): pass class NotLoggedInError(Exception): pass class FetchPaymentDayError(Exception): pass class CrawlerBot: # init some stuff headers = dict() headers['Host'] = 'portail.if-algerie.com' headers['Accept'] = '*/*' headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \ AppleWebKit/537.36 (KHTML, like Gecko) Chrom\ e/91.0.4472.164 Safari/537.36' headers['Accept-Language'] = 'fr-FR,fr;q=0.9,en-US;q=0.8,en;q=0.7' headers['Connection'] = 'keep-alive' session = requests.Session() csrf: str = '' # constructor def __init__(self, **kwargs): """ crawler bot @params account: Account class object callback: Function callback when bot job was done @params: sender: self: Bot, error: Dict, data: Dict event_list: open event list payment_days: PaymentDayList associated to event_list events rate_limit: sleep time in (ms) to avoid api bans """ self.account: Account = kwargs.get('account', Account()) self.event_crawler_callback: Callable = kwargs.get('event_crawler_callback', None) self.payment_day_crawler_callback: Callable = kwargs.get('payment_day_crawler_callback', None) self.rate_limit = kwargs.get('rate_limit', 200) % 10000 def call_event_crawler_callback(self, error: Dict[str, Any], data: Dict[str, Any]) -> None: if self.event_crawler_callback is not None: try: self.event_crawler_callback(self, error, data) except Exception as e: logger.error(f'call_callback error: {e}') def call_payment_day_crawler_callback(self, error: Dict[str, Any], data: Dict[str, Any]) -> None: if self.payment_day_crawler_callback is not None: try: self.payment_day_crawler_callback(self, error, data) except Exception as e: logger.error(f'call_callback error: {e}') def login(self) -> None: """ login method: login() -> bool: login to account, success: fetches, else: callback(err, data) """ head = self.headers.copy() self.session.cookies.clear() # clear session cookies while True: try: # get csrf req = self.session.get('https://portail.if-algerie.com/login', headers=head, verify=False, timeout=10) csrf_token = re.findall('(?<=<meta name="csrf-token" content=")(.*?)(?=")', req.text)[0] # rate limit time.sleep(self.rate_limit / 1000) # post login head['X-CSRF-TOKEN'] = csrf_token head['X-Requested-With'] = 'XMLHttpRequest' data = { 'rt': 'https://portail.if-algerie.com/exams', 'email': self.account.email, 'password': self.account.password } req = self.session.post('https://portail.if-algerie.com/login', headers=head, data=data, verify=False, timeout=10) #check login res = req.json() if res.get('notification', {}).get('importance', '') != 'success': raise LoginError(f"login failed: f'{json.dumps(res)}") # rate limit csrf = '' while not csrf: logger.warning('fetching csrf token') time.sleep(self.rate_limit / 1000) req = self.session.get('https://portail.if-algerie.com/exams', headers=head, verify=False, timeout=10) csrf = ''.join(re.findall('(?<=<meta name="csrf-token" content=")(.*?)(?=")', req.text)) if csrf: self.csrf = csrf #if not self.csrf: # raise LoginError("Couldn't fetch csrf token") return except ConnectTimeoutError: # log exception and increment logger.error(f'login error: ConnectTimeoutError') logger.warning(f'login Retrying...') except Exception as e: raise LoginError(json.dumps({ 'error': 'login error', 'message': f'exception occurred: {e}' })) def is_logged_in(self) -> bool: return len(self.session.cookies.get('ifa_session', '')) > 0 and \ len(self.session.cookies.get('XSRF-TOKEN', '')) > 0 def crawl_events(self) -> EventList: """ place a reservation """ head = self.headers.copy() while True: try: time.sleep(self.rate_limit / 1000) req = self.session.get('https://portail.if-algerie.com/exams', headers=head, verify=False, timeout=10) if req.url.find('/login') >= 0: raise NotLoggedInError('account not logged in') csrf = ''.join(re.findall('(?<=<meta name="csrf-token" content=")(.*?)(?=")', req.text)) if csrf: self.csrf = csrf event_list = EventList() if not event_list.parse_events(req.text): raise FetchEventError('Could not fetch events data') return event_list except ConnectTimeoutError: logger.error(f'crawl_events error: ConnectTimeoutError') logger.warning(f'crawl_events Retrying...') except NotLoggedInError as e: raise NotLoggedInError(f'{e}') except Exception as e: raise FetchEventError(json.dumps({ 'error': 'crawl_events error', 'message': f'exception occurred: {e}' })) def clear_session(self): self.session.cookies.clear() def crawl_payment_days(self, event_list: EventList) -> PaymentDayList: """ crawl payment days of specified event list """ head = self.headers.copy() head['X-Requested-With'] = 'XMLHttpRequest' head['X-CSRF-TOKEN'] = self.csrf head['Referer'] = 'https://portail.if-algerie.com/exams' payment_day_list = PaymentDayList() for event in event_list: logger.warning(f"fetching payment days for event: {event.uid}") retires = 0 while True: time.sleep(self.rate_limit / 1000) data = {'uid': event.uid, 'service_type': 'EX'} try: req = self.session.post('https://portail.if-algerie.com/exams/getdays', headers=head, data=data, timeout=10) res = req.json() if res.get('success', False): for i in range(len(res.get('dates', []))): payment_day = PaymentDay(event_uid=event.uid, date_from=res.get('dates')[i].get('info', {}).get('From', ''), date_to=res.get('dates')[i].get('info', {}).get('To', ''), time_shift_uid=res.get('dates')[i].get('timeShift', {}) .get('uid', ''), time_shift_morning=res.get('dates')[i].get('timeShift', {}) .get('is_Morning', False)) payment_day_list.add_payment_day(payment_day) else: logger.warning(f'Could not fetch payment days list') logger.warning(f'Response: {json.dumps(res)}') break except ConnectTimeoutError: logger.error(f'crawl_payment_days error: ConnectTimeoutError') logger.warning(f'crawl_payment_days Retrying...') except Exception as e: logger.error(f"Exception: {e}") return payment_day_list def bot_worker_event(self) -> None: """ crawl events worker procedure """ try: if not self.is_logged_in(): self.login() event_list = self.crawl_events() self.call_event_crawler_callback(None, { 'success': True, 'event_list': event_list, 'message': 'successfully crawled events' }) return except LoginError as le: self.call_event_crawler_callback({ 'error': 'LoginError', 'message': f'{le}' }, None) except NotLoggedInError as r_e: self.call_event_crawler_callback({ 'error': 'NotLoggedInError', 'message': f'{r_e}' }, None) except FetchEventError as fe: self.call_event_crawler_callback({ 'error': 'FetchEventError', 'message': f'{fe}' }, None) # extra run-time exceptions maybe thrown except Exception as e: self.call_event_crawler_callback({ 'error': 'Exception', 'message': f'{e}' }, None) def bot_worker_payment(self, event_list: EventList) -> None: """ crawl payments procedure """ try: payment_day_list = self.crawl_payment_days(event_list=event_list) self.call_payment_day_crawler_callback(None, { 'success': True, 'payment_day_list': payment_day_list, 'event_list': event_list, 'message': 'successfully crawled payment days' }) return except FetchPaymentDayError as fe: self.call_event_crawler_callback({ 'error': 'FetchPaymentDayError', 'message': f'{fe}' }, None) # extra run-time exceptions maybe thrown except Exception as e: self.call_event_crawler_callback({ 'error': 'Exception', 'message': f'{e}' }, None) def run_event_crawler(self): """ asynchronous event crawler """ t = threading.Thread(target=self.bot_worker_event) t.start() def run_payment_crawler(self, event_list: EventList): """ asynchronous payment days crawler """ t = threading.Thread(target=self.bot_worker_payment, args=(event_list,)) t.start()
# Core Pkgs import streamlit as st import streamlit.components.v1 as stc import requests base_url = "https://jobs.indeed.com/positions.json?description={}&location={}" # Fxn to Retrieve Data def get_data(url): resp = requests.get(url) return resp.json() JOB_HTML_TEMPLATE = """ <div style="width:100%;height:100%;margin:1px;padding:5px;position:relative;border-radius:5px;border-bottom-right-radius: 10px; box-shadow:0 0 1px 1px #eee; background-color: #31333F; border-left: 5px solid #6c6c6c;color:white;"> <h4>{}</h4> <h4>{}</h4> <h5>{}</h5> <h6>{}</h6> </div> """ JOB_DES_HTML_TEMPLATE = """ <div style='color:#fff'> {} </div> """ def main(): menu = ["Home","About"] choice = st.sidebar.selectbox("Menu",menu) st.title("DevDeeds -Search Jobs") if choice == "Home": st.subheader("Home") # Nav Search Form with st.form(key='searchform'): nav1,nav2,nav3 = st.columns([3,2,1]) with nav1: search_term = st.text_input("Search Job") with nav2: location = st.text_input("Location") with nav3: st.text("Search ") submit_search = st.form_submit_button(label='Search') st.success("You searched for {} in {}".format(search_term,location)) # Results col1, col2 = st.columns([2,1]) with col1: if submit_search: # Create Search Query search_url = base_url.format(search_term,location) # st.write(search_url) data = get_data(search_url) # Number of Results num_of_results = len(data) st.subheader("Showing {} jobs".format(num_of_results)) # st.write(data) for i in data: job_title = i['title'] job_location = i['location'] company = i['company'] company_url = i['company_url'] job_post_date = i['created_at'] job_desc = i['description'] job_howtoapply = i['how_to_apply'] st.markdown(JOB_HTML_TEMPLATE.format(job_title,company,job_location,job_post_date), unsafe_allow_html=True) # Description with st.beta_expander("Description"): stc.html(JOB_DES_HTML_TEMPLATE.format(job_desc),scrolling=True) # How to Apply with st.beta_expander("How To Apply"): # stc.html(job_howtoapply) # For White Theme stc.html(JOB_DES_HTML_TEMPLATE.format(job_howtoapply),scrolling=True) # For Dark Theme with col2: with st.form(key='email_form'): st.write("Be the first to get new jobs info") email = st.text_input("Email") submit_email = st.form_submit_button(label='Subscribe') if submit_email: st.success("A message was sent to {}".format(email)) else: st.subheader("About") if __name__ == '__main__': main()
from django.contrib import admin from django.urls import path,include from Admin import views urlpatterns = [ path(r'approveEvent/', views.approveEvents, name='approveEvents'), path(r'operate/', views.operate, name='operate'), path(r'createEvent/', views.createEvent, name='createEvent'), path(r'createCommittee/', views.createCommittee, name='createCommittee'), path(r'deskA/', views.Desk, name='deskA') ]
# -*- coding: utf-8 -*- """ apis.wordpress_xml_rpc ~~~~~~~~~~~~~~~~~~~~~~ Wordpress-compatible XML-RPC API http://flask.pocoo.org/snippets/96/ """ import os import sys sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from flask import request, Response from app import app import string from datetime import datetime from flask import url_for from flask.ext.xmlrpc import XMLRPCHandler, Fault from labs import app, db from labs.models import User, Post, Tag, Category POST_ROOT = 'http://127.0.0.1/post/' # MetaWeblogAPI XML-RPC handler = XMLRPCHandler('api') handler.connect(app, '/api') metaweblog = handler.namespace('metaWeblog') blogger = handler.namespace('blogger') wordpress = handler.namespace('wp') moveabletype = handler.namespace('mt') @metaweblog.register def newPost(blog_id, username, password, content, publish): user = db.session.query(User).filter(User.username == username).first() if user is None or not user.check_password(password): raise Fault("invalid_user", "Invalid username/password, please try again.") post = Post(content['title'], content['description']) post.author = user post.teaser = content['mt_excerpt'] if 'wp_slug' in content: post.slug = content['wp_slug'] if 'dateCreated' in content: post.create_date = datetime.strptime(str(content['dateCreated']), "%Y%m%dT%H:%M:%SZ") if 'custom_fields' in content: for custom_field in content['custom_fields']: if custom_field['key'] == 'subtitle': post.subtitle = custom_field['value'] elif custom_field['key'] == 'lead_img': post.lead_img = custom_field['value'] tag_names = string.split(content['mt_tags'], ',') for tag_name in tag_names: tag = Tag.query.filter(Tag.name == tag_name).first() if tag is None: tag = Tag(tag_name) db.session.add(tag) db.session.commit() post.tags.append(tag) db.session.add(post) db.session.commit() return post.id @metaweblog.register def editPost(post_id, username, password, content, publish): user = db.session.query(User).filter(User.username == username).first() if user is None or not user.check_password(password): raise Fault("invalid_user", "Invalid username/password, please try again.") post = Post.query.get(post_id) post.title = content['title'] post.markdown = content['description'] post.set_html() post.teaser = content['mt_excerpt'] if 'wp_slug' in content: post.slug = content['wp_slug'] if 'dateCreated' in content: post.create_date = datetime.strptime(str(content['dateCreated']), "%Y%m%dT%H:%M:%SZ") if 'custom_fields' in content: for custom_field in content['custom_fields']: if custom_field['key'] == 'subtitle': post.subtitle = custom_field['value'] elif custom_field['key'] == 'lead_img': post.lead_img = custom_field['value'] tag_names = string.split(content['mt_tags'], ',') tags = [] for tag_name in tag_names: tag = Tag.query.filter(Tag.name == tag_name).first() if tag is None: tag = Tag(tag_name) db.session.add(tag) db.session.commit() tags.append(tag) post.tags = tags db.session.add(post) db.session.commit() return True @metaweblog.register def getPost(post_id, username, password): user = db.session.query(User).filter(User.username == username).first() if user is None or not user.check_password(password): raise Fault("invalid_user", "Invalid username/password, please try again.") post = Post.query.filter(Post.id == post_id).first() if not post: raise Fault("not_found", "Post not found.") item = {} item['title'] = post.title item['link'] = POST_ROOT + post.slug item['description'] = post.markdown item['postid'] = post.id item['mt_excerpt'] = post.teaser item['custom_fields'] = [ { 'key': 'subtitle', 'value': post.subtitle }, { 'key': 'lead_img', 'value': post.lead_img } ] item['wp_slug'] = post.slug if post.tags: item['mt_tags'] = ','.join(map(lambda tag: tag.name, post.tags)) item['dateCreated'] = post.create_date return item @metaweblog.register def getRecentPosts(blogid, username, password, numberOfPosts): user = db.session.query(User).filter(User.username == username).first() if user is None or not user.check_password(password): raise Fault("invalid_user", "Invalid username/password, please try again.") posts = Post.query.order_by('create_date').all() response = [] for post in posts: item = {} item['title'] = post.title item['link'] = POST_ROOT + post.slug item['description'] = post.markdown item['postid'] = post.id item['mt_excerpt'] = post.teaser item['wp_slug'] = post.slug item['custom_fields'] = [ { 'key': 'subtitle', 'value': post.subtitle }, { 'key': 'lead_img', 'value': post.lead_img } ] tags = [] for tag in post.tags: tags.append(tag.name) item['mt_tags'] = ','.join(tags) item['dateCreated'] = post.create_date # if post['draft']: # item['draft'] = 'Yes' response.append(item) return response @wordpress.register def getPages(blogid, username, password, numberOfPages): return [] @wordpress.register def newCategory(blogid, username, password, new_category): user = db.session.query(User).filter(User.username == username).first() if user is None or not user.check_password(password): raise Fault("invalid_user", "Invalid username/password, please try again.") category = Category.query.filter(Category.name == new_category['name']).first() if category is None: category = Category(new_category['name']) db.session.add(category) db.session.commit() return category.id @wordpress.register def getTags(blogid, username, password): return map(lambda tag: { 'tag_id': tag.id, 'name': tag.name }, Tag.query.all()) @wordpress.register def getCategories(blogid, username, password): return map(lambda category: { 'categoryId': category.id, 'categoryName': category.name, 'categoryDescription': category.description }, Category.query.all()) @moveabletype.register def setPostCategories(post_id, username, password, post_categories): post = Post.query.get(post_id) for post_category in post_categories: category = Category.query.filter( Category.name == post_category['categoryName'] ).first() # only single category per post supported post.category = category db.session.add(post) db.session.commit() return True @moveabletype.register def getPostCategories(post_id, username, password): # only single per post supported category = Post.query.get(post_id).category if category is not None: post_category = { 'categoryId': category.id, 'categoryName': category.name, 'categoryDescription': category.description } return [post_category] return [] @moveabletype.register def supportedTextFilters(): return [] @blogger.register def deletePost(appkey, post_id, username, password, publish): user = db.session.query(User).filter(User.username == username).first() if user is None or not user.check_password(password): raise Fault("invalid_user", "Invalid username/password, please try again.") post = Post.query.get(int(post_id)) db.session.delete(post) db.session.commit() pass @app.route('/') def index(): return 'index' if __name__ == "__main__": app.run()
from unittest import TestCase import matplotlib.pyplot as plt import numpy as np #from smithers.dataset import Dataset class TestDataset(TestCase): def test_init(self): print('boh') pass
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Class level test-set accuracy for all ImageNet classes.""" from absl import flags import tensorflow as tf FLAGS = flags.FLAGS def create_eval_metrics(labels, logits, human_labels, params): """Creates the evaluation metrics for the model.""" eval_metrics = {} predictions = tf.cast(tf.argmax(logits, axis=1), tf.int32) in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32) eval_metrics['top_5_eval_accuracy'] = tf.metrics.mean(in_top_5) eval_metrics['eval_accuracy'] = tf.metrics.accuracy( labels=labels, predictions=predictions) num_label_classes = params['num_label_classes'] log_class_level_summaries = params['log_class_level_summaries'] if log_class_level_summaries: labels = tf.cast(labels, tf.int64) with tf.name_scope('class_level_summaries') as scope: for i in range(num_label_classes): name = scope + '/{}_{}'.format(human_labels[i], i) eval_metrics['precision/{}_{}'.format(human_labels[i], i)] = tf.metrics.precision_at_k( labels=labels, predictions=logits, class_id=i, k=1, name=name) eval_metrics['recall/{}_{}'.format(human_labels[i], i)] = tf.metrics.recall_at_k( labels=labels, predictions=logits, class_id=i, k=1, name=name) return eval_metrics
import numpy import six from chainer import cuda from chainer import utils def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True): """Asserts if some corresponding element of x and y differs too much. This function can handle both CPU and GPU arrays simultaneously. Args: x: Left-hand-side array. y: Right-hand-side array. atol (float): Absolute tolerance. rtol (float): Relative tolerance. verbose (bool): If ``True``, it outputs verbose messages on error. """ x = cuda.to_cpu(utils.force_array(x)) y = cuda.to_cpu(utils.force_array(y)) try: numpy.testing.assert_allclose( x, y, atol=atol, rtol=rtol, verbose=verbose) except AssertionError as e: f = six.StringIO() f.write(str(e) + '\n\n') f.write( 'assert_allclose failed: \n' + ' shape: {} {}\n'.format(x.shape, y.shape) + ' dtype: {} {}\n'.format(x.dtype, y.dtype)) if x.shape == y.shape: xx = x if x.ndim != 0 else x.reshape((1,)) yy = y if y.ndim != 0 else y.reshape((1,)) err = numpy.abs(xx - yy) i = numpy.unravel_index(numpy.argmax(err), err.shape) f.write( ' i: {}\n'.format(i) + ' x[i]: {}\n'.format(xx[i]) + ' y[i]: {}\n'.format(yy[i]) + ' err[i]: {}\n'.format(err[i])) opts = numpy.get_printoptions() try: numpy.set_printoptions(threshold=10000) f.write('x: ' + numpy.array2string(x, prefix='x: ') + '\n') f.write('y: ' + numpy.array2string(y, prefix='y: ') + '\n') finally: numpy.set_printoptions(**opts) raise AssertionError(f.getvalue())
#This is a percentage calculator import time import colorama from colorama import Fore, Back, Style colorama.init() print(Fore.LIGHTGREEN_EX) print(''' ██▓███ ▄████▄ ▄▄▄ ██▓ ██▓███ ▓██ ██▓ ▓██░ ██▒▒██▀ ▀█ ▒████▄ ▓██▒ ▓██░ ██▒▒██ ██▒ ▓██░ ██▓▒▒▓█ ▄ ▒██ ▀█▄ ▒██░ ▓██░ ██▓▒ ▒██ ██░ ▒██▄█▓▒ ▒▒▓▓▄ ▄██▒░██▄▄▄▄██ ▒██░ ▒██▄█▓▒ ▒ ░ ▐██▓░ ▒██▒ ░ ░▒ ▓███▀ ░ ▓█ ▓██▒░██████▒▒██▒ ░ ░ ░ ██▒▓░ ▒▓▒░ ░ ░░ ░▒ ▒ ░ ▒▒ ▓▒█░░ ▒░▓ ░▒▓▒░ ░ ░ ██▒▒▒ ░▒ ░ ░ ▒ ▒ ▒▒ ░░ ░ ▒ ░░▒ ░ ▓██ ░▒░ ░░ ░ ░ ▒ ░ ░ ░░ ▒ ▒ ░░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ''') print("Tool Name: Pcalpy") print("Version: 1.0.0") print("License : LS MARTIN") print(Fore.LIGHTRED_EX) #Options Section print("[-]Press 1 To Find Percentage") print("[-]Press 2 To Find Total Number") print("[-]Press 3 To Find Number") print("[-]Enter 4 To Exit") time.sleep(1) #What to find: print(" ") q = int(input("[ - ] Enter Choice: ")) # a = Your marks # b = total marks # p = Percentage if q==1: a = int(input("Enter Your Marks: ")) b = int(input("Enter Total Marks: ")) p = (a*100/b) print("Your Percentagae is:",p) elif q==2: p = int(input("Enter Your Percetage: ")) a = int(input("Enter Your Marks: ")) b = (a*100/p) print("Total Marks is: ",b) elif q==3: p = int(input("Enter Your Percetage: ")) b = int(input("Enter Total Marks: ")) a = (p*b/100) print("Your Marks is: ",a) elif q==4: time.sleep(1) print(Fore.LIGHTGREEN_EX) print("Thanks For using this Tool") print("This tool will be developed Further") time.sleep(4) exit() else: print("Invalid Input") time.sleep(2) #Outro Section print(Fore.MAGENTA) print("Thanks For using this Tool") print("This tool will be developed Further") time.sleep(5)
from h2o.estimators.xgboost import * from tests import pyunit_utils, os import sys sys.path.insert(1,"../../../") from h2o.two_dim_table import H2OTwoDimTable def xgboost_feature_interactions(): prostate_frame = h2o.import_file(pyunit_utils.locate('smalldata/prostate/prostate.csv')) y = "RACE" ignored_columns = ['ID'] model = H2OXGBoostEstimator(training_frame=prostate_frame, seed=0xDECAF) model.train(y=y, ignored_columns=ignored_columns, training_frame=prostate_frame) feature_interactions = model.feature_interaction(2, 100, -1) assert isinstance(feature_interactions[0], H2OTwoDimTable) assert len(feature_interactions) == 11 path = pyunit_utils.locate("results") feature_interactions = model.feature_interaction(2, 100, -1, os.path.join(path, 'feature_interactions.xlsx')) assert isinstance(feature_interactions[0], H2OTwoDimTable) assert len(feature_interactions) == 11 assert os.path.exists(os.path.join(path, 'feature_interactions.xlsx')) os.remove(os.path.join(path, 'feature_interactions.xlsx')) if __name__ == "__main__": pyunit_utils.standalone_test(xgboost_feature_interactions) else: xgboost_feature_interactions()
# weird mypy bug with imports from typing import Any, Dict, Generator # pylint: disable=unused-import import attr from ...models import TestResultSet from ...utils import get_requests_auth from .. import events from .core import BaseRunner, get_session, network_test, wsgi_test @attr.s(slots=True) # pragma: no mutate class SingleThreadRunner(BaseRunner): """Fast runner that runs tests sequentially in the main thread.""" def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]: auth = get_requests_auth(self.auth, self.auth_type) with get_session(auth) as session: yield from self._run_tests( self.schema.get_all_tests, network_test, self.hypothesis_settings, self.seed, checks=self.checks, targets=self.targets, results=results, session=session, headers=self.headers, request_timeout=self.request_timeout, store_interactions=self.store_interactions, ) @attr.s(slots=True) # pragma: no mutate class SingleThreadWSGIRunner(SingleThreadRunner): def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]: yield from self._run_tests( self.schema.get_all_tests, wsgi_test, self.hypothesis_settings, self.seed, checks=self.checks, targets=self.targets, results=results, auth=self.auth, auth_type=self.auth_type, headers=self.headers, store_interactions=self.store_interactions, )
# # General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall # model for 1st, 2nd and 3rd generation solar cells. # Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com # # https://www.gpvdm.com # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License v2.0, as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # ## @package json_time_domain # Store the time domain json data # import sys import os import shutil import json from json_base import json_base class json_time_domain_mesh_segment(json_base): def __init__(self): json_base.__init__(self,"json_time_domain_mesh_segment") self.var_list=[] self.var_list.append(["len",10e-6]) self.var_list.append(["dt",0.01e-6]) self.var_list.append(["voltage_start",0.0]) self.var_list.append(["voltage_stop",0.0]) self.var_list.append(["mul",1.0]) self.var_list.append(["sun_start",0.0]) self.var_list.append(["sun_stop",0.0]) self.var_list.append(["laser_start",0.0]) self.var_list.append(["laser_stop",0.0]) self.var_list_build() class json_time_domain_mesh(json_base): def __init__(self): json_base.__init__(self,"mesh",segment_class=True) def load_from_json(self,json): self.segments=[] segs=json['segments'] for i in range(0,segs): a=json_time_domain_mesh_segment() segment_name="segment"+str(i) a.load_from_json(json[segment_name]) try: laser=json[segment_name]['laser'] a.laser_start=laser a.laser_stop=laser except: pass self.segments.append(a) class json_time_domain_config(json_base): def __init__(self): json_base.__init__(self,"config") self.var_list=[] self.var_list.append(["pulse_shift",5e-6]) self.var_list.append(["load_type","load"]) self.var_list.append(["pulse_L",0.0]) self.var_list.append(["Rload",50]) self.var_list.append(["pump_laser","green"]) self.var_list.append(["pulse_bias",0.0]) self.var_list.append(["pulse_light_efficiency",1.0]) self.var_list.append(["pulse_subtract_dc","false"]) self.var_list.append(["start_time",-4e-12]) self.var_list.append(["fs_laser_time",-1.0]) self.var_list.append(["dump_verbosity",1]) self.var_list_build() class json_time_domain_simulation(json_base): def __init__(self): json_base.__init__(self,"time_domain_segment") self.var_list=[] self.var_list.append(["english_name","celiv"]) self.var_list.append(["icon","celiv"]) self.var_list.append(["config",json_time_domain_config()]) self.var_list.append(["mesh",json_time_domain_mesh()]) self.var_list.append(["id",self.random_id()]) self.var_list_build() class json_time_domain(json_base): def __init__(self): json_base.__init__(self,"time_domain",segment_class=True,segment_example=json_time_domain_simulation())
#!/usr/bin/python # # Copyright 2021 Jigsaw Operations LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import datetime as dt from functools import singledispatch import gzip import logging from multiprocessing.pool import ThreadPool import os import pathlib import sys from typing import List import ujson from . import ooni_client @singledispatch def trim_measurement(json_obj, max_string_size: int): return json_obj @trim_measurement.register(dict) def _(json_dict: dict, max_string_size: int): keys_to_delete: List[str] = [] for key, value in json_dict.items(): if type(value) == str and len(value) > max_string_size: keys_to_delete.append(key) else: trim_measurement(value, max_string_size) for key in keys_to_delete: del json_dict[key] return json_dict @trim_measurement.register(list) def _(json_list: list, max_string_size: int): for item in json_list: trim_measurement(item, max_string_size) return json_list class CostLimitError(Exception): def __init__(self, message: str) -> None: super().__init__(message) def _make_local_path(output_dir: pathlib.Path, entry: ooni_client.FileEntry) -> pathlib.Path: basename = pathlib.PurePosixPath(entry.url.path).name # Convert .json.lz4 and .tar.lz4 filenames. if not basename.endswith('.jsonl.gz'): basename = basename.rsplit('.', 2)[0] + '.jsonl.gz' return output_dir / entry.country / entry.test_type / f'{entry.date:%Y-%m-%d}' / basename def main(args): logging.basicConfig(level=logging.INFO) if args.debug: logging.basicConfig(level=logging.DEBUG) ooni = ooni_client.OoniClient() num_measurements = 0 file_entries = ooni.list_files( args.first_date, args.last_date, args.test_type, args.country) def sync_file(entry: ooni_client.FileEntry): target_file_path = _make_local_path(args.output_dir, entry) if target_file_path.is_file(): return f'Skipped existing {entry.url.geturl()}]' return fetch_file(entry, target_file_path) def fetch_file(entry: ooni_client.FileEntry, target_file_path: pathlib.Path): nonlocal num_measurements os.makedirs(target_file_path.parent, exist_ok=True) if ooni.cost_usd > args.cost_limit_usd: raise CostLimitError( f'Downloaded {ooni.bytes_downloaded / 2**20} MiB') # We use a temporary file to atomatically write the destination and make sure we don't have partially written files. # We put the temporary file in the same location as the destination because you can't atomically # rename if they are in different devices, as is the case for Kaggle. temp_path = target_file_path.with_name(f'{target_file_path.name}.tmp') try: with gzip.open(temp_path, mode='wt', encoding='utf-8', newline='\n') as target_file: for measurement in entry.get_measurements(): num_measurements += 1 m = trim_measurement(measurement, args.max_string_size) ujson.dump(m, target_file) target_file.write('\n') temp_path.replace(target_file_path) except: temp_path.unlink() raise return f'Downloaded {entry.url.geturl()} [{entry.size:,} bytes]' with ThreadPool(processes=5 * os.cpu_count()) as sync_pool: for msg in sync_pool.imap_unordered(sync_file, file_entries): logging.info(msg) logging.info(f'Measurements: {num_measurements}, Downloaded {ooni.bytes_downloaded/2**20:0.3f} MiB, Estimated Cost: ${ooni.cost_usd:02f}') def _parse_date_flag(date_str: str) -> dt.date: return dt.datetime.strptime(date_str, "%Y-%m-%d").date() if __name__ == "__main__": parser = argparse.ArgumentParser("Sync OONI measurements") parser.add_argument("--country", type=str, required=True) parser.add_argument("--first_date", type=_parse_date_flag, default=dt.date.today() - dt.timedelta(days=14)) parser.add_argument("--last_date", type=_parse_date_flag, default=dt.date.today()) parser.add_argument("--test_type", type=str, default='webconnectivity') parser.add_argument("--max_string_size", type=int, default=1000) parser.add_argument("--cost_limit_usd", type=float, default=1.00) parser.add_argument("--output_dir", type=pathlib.Path, required=True) parser.add_argument("--debug", action="store_true") sys.exit(main(parser.parse_args()))
class PackObject: type: int hash: str ref: bytes data: bytes object_size: int def __init__(self, hash: str): self.type = 0 self.ref = None self.hash = hash
# Generated by Django 3.2.6 on 2021-08-16 13:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('emailmanager', '0002_rename_emails_email'), ] operations = [ migrations.AlterField( model_name='email', name='sender', field=models.CharField(max_length=100), ), migrations.AlterField( model_name='email', name='title', field=models.CharField(max_length=100), ), ]
#!/usr/bin/python # A simple data loader that imports the train and test mat files # from the `filename` and converts them to torch.tesnors() # to be loaded for training and testing DLoc network # `features_wo_offset`: targets for the consistency decoder # `features_w_offset` : inputs for the network/encoder # `labels_gaussian_2d`: targets for the location decoder import torch import h5py import scipy.io import numpy as np def load_data(filename): print('Loading '+filename) arrays = {} f = h5py.File(filename,'r') features_wo_offset = torch.tensor(np.transpose(np.array(f.get('features_wo_offset'), dtype=np.float32)), dtype=torch.float32) features_w_offset = torch.tensor(np.transpose(np.array(f.get('features_w_offset'), dtype=np.float32)), dtype=torch.float32) labels_gaussian_2d = torch.tensor(np.transpose(np.array(f.get('labels_gaussian_2d'), dtype=np.float32)), dtype=torch.float32) return features_wo_offset,features_w_offset, labels_gaussian_2d
#!/usr/bin/env python3 from experiment import Experiment if __name__ == '__main__': Experiment.run()
""" Given a characters array tasks, representing the tasks a CPU needs to do, where each letter represents a different task. Tasks could be done in any order. Each task is done in one unit of time. For each unit of time, the CPU could complete either one task or just be idle. However, there is a non-negative integer n that represents the cooldown period between two same tasks (the same letter in the array), that is that there must be at least n units of time between any two same tasks. Return the least number of units of times that the CPU will take to finish all the given tasks IDEA: The total number of CPU intervals we need consists of busy and idle slots. Number of busy slots is defined by the number of tasks to execute: len(tasks). The problem is to compute a number of idle slots. Maximum possible number of idle slots is defined by the frequency of the most frequent task: idle_time <= (f_max - 1) * n. Example: ABCA, cooling=2 |AAAA|B |C | cooling just calculate #empty + tot_len of letters """ class Solution621: pass
# # This file is part of the PyMeasure package. # # Copyright (c) 2013-2020 PyMeasure Developers # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from pymeasure.instruments import Instrument, discreteTruncate from pymeasure.instruments.validators import strict_discrete_set, \ truncated_discrete_set, truncated_range import numpy as np import time import re class ApexAP1000(Instrument): """Represents Apex's AP1000 Multi-test platform""" SLOTS = range(1,9) DEV_KINDS = {'PM':'POW','TF':'FLT','TL':'TLS'} PM_CHANNELS = [1,2] PM_UNIT = ['W','mW','dBm'] STATUS = {'Off':0,'On':1} def __init__(self, resourceName, **kwargs): super(ApexAP1000, self).__init__( resourceName, "Apex AP1000", **kwargs ) self.__slot = None def headStr(self,kind): return ApexAP1000.DEV_KINDS[kind]+"[%02d]:"%self.slot @property def slot(self): """ Get current slot number """ if self.__slot in ApexAP1000.SLOTS: return self.__slot else: raise ValueError('Bad slot number !') @slot.setter def slot(self,num): """ Set current slot number You have to set the good slot before asking the functions related to any module """ if num in ApexAP1000.SLOTS: self.__slot=num else: raise ValueError('Bad slot number !') # Powermeter module related functions @property def PM_averages(self): """ PowerMeter module. Get wanted number of averages for current slot. """ return int(self.ask(self.headStr('PM')+'AVG?')) @PM_averages.setter def PM_averages(self,num): """ PowerMeter module. Sets wanted number of averages for current slot. """ self.write(self.headStr('PM')+'AVG %d'%num) def PM_getPower(self,channel,unit='W'): """ PowerMeter module. Get actual power on given channel with given unit. - channel can be {}. - unit can be {}. """ if unit not in ApexAP1000.PM_UNIT: raise ValueError('Unknow physical unit during power measurement') if channel not in ApexAP1000.PM_CHANNELS: raise ValueError('Unknow channel during power measurement') str = {'W':'MW','mW':'MW','dBm':'DBM'} value = float(self.ask(self.headStr('PM')+'%s[%d]?'%(str[unit],channel))) if unit is 'W': value = value * 1e-3 return value def PM_setWavelength(self,channel,wavelength): """ PowerMeter module. Sets wavelength for given channel for calibration - channel can be {}. """ if channel not in ApexAP1000.PM_CHANNELS: raise ValueError('Unknow channel during power measurement') sentStr = self.headStr('PM')+'SETWAVELENGTH[%d] %g'%(channel,wavelength) return self.write(sentStr) # tunablefilter module related functions @property def TF_wavelength(self): """ Tunable Filter module. Gets tunable filter wavelength. """ return int(self.ask(self.headStr('TF')+'TWL?')) @TF_wavelength.setter def TF_wavelength(self,wavelength): """ Tunable Filter module. Sets tunable filter wavelength. """ self.write(self.headStr('TF')+'TWL %g',wavelength) def TF_stopSweep(self): """ Tunable Filter module. Stops current wavelength sweep """ self.write(self.headStr('TF')+'TSTO') def TF_startSweep(self): """ Tunable Filter module. Starts wavelength sweeping """ self.write(self.headStr('TF')+'TSTGL') # tunable laser module related functions @property def TL_wavelength(self): """ Tunable Laser module. Gets tunable laser wavelength. """ return int(self.ask(self.headStr('TL')+'TWL?')) @TL_wavelength.setter def TL_wavelength(self,wavelength): """ Tunable Filter module. Sets tunable laser wavelength. """ self.write(self.headStr('TL')+'TWL %d',wavelength) @property def TL_power(self): """ Tunable Laser module. Gets tunable laser power. """ return int(self.ask(self.headStr('TL')+'TPDB?')) @TL_power.setter def TL_power(self,power): """ Tunable Filter module. Sets tunable laser power. """ self.write(self.headStr('TL')+'TPDB %d',power) def TL_status(self,status): """ Tunable Laser module. Sets tunable laser On or Off : - status is 'On' or 'Off' """ self.write(self.headStr('TL')+'L%d',ApexAP1000.STATUS['status'])
def miniMaxSum(arr): sumValue = maxV = minV = arr[0] for item in range(1, len(arr)): print(item) if maxV < arr[item]: maxV = arr[item] if minV > arr[item]: minV = arr[item] sumValue += arr[item] print(sumValue - maxV, sumValue - minV) miniMaxSum([7, 69, 2, 221, 8974])
from django.shortcuts import redirect from django.contrib.auth import authenticate, login, logout from celery_tasks.tasks import celery_send_mail from apps.user.models import User import re from django.shortcuts import render from django.views import View from utils.security import get_user_token, get_activation_link, get_user_id from django.conf import settings from django.http import HttpResponse from django.urls import reverse # Create your views here. # /user/register class RegisterView(View): def get(self, request): return render(request, 'user_register.html') def post(self, request): username = request.POST.get('username') password = request.POST.get('password') rpassword = request.POST.get('rpassword') email = request.POST.get('email') allow = request.POST.get('allow') if not all([username, password, rpassword, email, allow]): context = {'errmsg': '数据不完整'} return render(request, 'user_register.html', context=context) if password != rpassword: context = {'errmsg': '密码不一致'} return render(request, 'user_register.html', context=context) if not re.match(r'^[a-z0-9][\w.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email): context = {'errmsg': '邮箱格式不正确'} return render(request, 'user_register.html', context=context) if allow != 'on': context = {'errmsg': '请同意天天生鲜用户协议'} try: user = User.objects.get(username=username) except User.DoesNotExist: user = None if user is not None: context = {'errmsg': '已经创建该用户名'} return render(request, 'user_register.html', context=context) user = User.objects.create_user(username, email, password) user.is_active = 0 user.save() user_token = get_user_token(user.id) activation_link = get_activation_link(settings.ACTIVATION_URL_PATH, user_token) # send email subject = '天天生鲜欢迎信息' message = '' html_message = ( '<h1>%s,欢迎您成为天天生鲜的注册会员</h1><p>请点击以下链接激活你的账户</p><br><a href="%s">%s</a>' % (username, activation_link, activation_link)) from_email = 'dailyfresh<awsbreathpanda@163.com>' recipient_list = [ 'awsbreathpanda@163.com', ] celery_send_mail.delay(subject, message, from_email, recipient_list, html_message=html_message) context = {'errmsg': '添加用户成功'} return render(request, 'user_register.html', context=context) # /user/activate/(token) class ActivateView(View): def get(self, request, token): token_bytes = token.encode('utf-8') user_id = get_user_id(token_bytes) user = User.objects.get(id=user_id) user.is_active = 1 user.save() # TODO return HttpResponse('<h1>Activate User Successfully</h1>') # /user/login class LoginView(View): def get(self, request): username = request.COOKIES.get('username') checked = 'checked' if username is None: username = '' checked = '' context = {'username': username, 'checked': checked} return render(request, 'user_login.html', context=context) def post(self, request): username = request.POST.get('username') password = request.POST.get('password') remember = request.POST.get('remember') if not all([username, password]): context = {'errmsg': '参数不完整'} return render(request, 'user_login.html', context=context) user = authenticate(request, username=username, password=password) if user is None: context = {'errmsg': '用户不存在'} return render(request, 'user_login.html', context=context) if not user.is_active: context = {'errmsg': '用户未激活'} return render(request, 'user_login.html', context=context) login(request, user) next_url = request.GET.get('next', reverse('goods:index')) response = redirect(next_url) if remember == 'on': response.set_cookie('username', username, max_age=7 * 24 * 3600) else: response.delete_cookie('username') return response # /user/ class UserInfoView(View): def get(self, request): if not request.user.is_authenticated: next_url = reverse( 'user:login') + '?next=' + request.get_full_path() return redirect(next_url) else: return render(request, 'user_center_info.html') # /user/order/(page) class UserOrderView(View): def get(self, request, page): if not request.user.is_authenticated: next_url = reverse( 'user:login') + '?next=' + request.get_full_path() return redirect(next_url) else: return render(request, 'user_center_order.html') # /user/address class UserAddressView(View): def get(self, request): if not request.user.is_authenticated: next_url = reverse( 'user:login') + '?next=' + request.get_full_path() return redirect(next_url) else: return render(request, 'user_center_site.html') # /user/logout class LogoutView(View): def get(self, request): logout(request) return redirect(reverse('goods:index'))
import sys import logging import inspect import numpy as np import sympy as sp import ANNarchy_future.api as api import ANNarchy_future.parser as parser class SynapseParser(object): """Synapse parser. Attributes: synapse (api.Synapse): Synapse class. pre (api.Neuron): pre-synaptic Neuron class. post (api.Neuron): post-synaptic Neuron class. name (str): name of the Neuron class attributes (list): list of attributes (parameters and variables) parameters (list): list of parameters variables (list): list of variables TODO: update_equations (list): update equations. spike_condition (Condition): spike condition. reset_equations (list): reset equations. """ def __init__(self, synapse:'api.Synapse', pre:'api.Neuron', post:'api.Neuron'): """Initializes the parser. Sets: * `self.synapse` * `self.name` """ self.synapse = synapse self.pre = pre self.post = post self._spiking = False self.name = self.synapse.__class__.__name__ # Logging self._logger = logging.getLogger(__name__) self._logger.debug("Synapse parser created.") # Attributes self.attributes = [] self.parameters = [] self.variables = [] self.shared = [] # Equations to retrieve self.update_equations = [] self.update_dependencies = [] def is_spiking(self) -> bool: "Returns True if the Neuron class is spiking." return self._spiking def extract_variables(self): """Iterates over `synapse.__dict__` and extracts all `Parameter()` and `Variable()` instances. Sets: * `self._spiking` * `self.attributes` * `self.parameters` * `self.variables` * `self.shared` """ # List attributes current_attributes = list(self.synapse.__dict__.keys()) for attr in current_attributes: # Parameter if isinstance(getattr(self.synapse, attr), (api.Parameter, )): self.parameters.append(attr) self.attributes.append(attr) # Variable if isinstance(getattr(self.synapse, attr), (api.Variable, )): self.variables.append(attr) self.attributes.append(attr) # Shared variables for attr in self.attributes: if getattr(self.synapse, attr)._shared: self.shared.append(attr) # Get lists of parameters and variables self._logger.info("Attributes: " + str(self.attributes)) self._logger.info("Parameters: " + str(self.parameters)) self._logger.info("Variables: " + str(self.variables)) # Set the attributes to the synapse self.synapse.attributes = self.attributes self.synapse.pre_attributes = self.pre.attributes self.synapse.post_attributes = self.post.attributes self.synapse._parser = self def analyse_equations(self): """Analyses the synapse equations. Calls update(), spike() and reset() to retrieve the `Equations` objects. Sets: * `self.update_equations` * `self.spike_condition` * `self.reset_equations` """ # List of methods callables = [f for f in dir(self.synapse) if callable(getattr(self.synapse, f))] if 'update' in callables: self._logger.info("Calling Synapse.update().") signature = inspect.signature(self.synapse.update) if 'method' in signature.parameters.keys(): method = signature.parameters['method'].default if not method in parser.Config.numerical_methods: self._logger.error(self.name+".update(): "+ method + " is not available.") sys.exit(1) else: method = 'euler' try: with self.synapse.Equations(method=method) as s: self.synapse.update(s) except Exception: self._logger.exception("Error when parsing " + self.name + ".update().") sys.exit(1) else: self.update_equations, self.update_dependencies = self.process_equations(self.synapse._current_eq) self.synapse._current_eq = [] def process_equations(self, equations) -> list: """Checks all declared equations and applies a numerical method if necessary. Args: equations: list of Equations objects. Returns: a list of blocks, which are lists of equations of three types: assignments, ODEs and conditions. """ dependencies = [] blocks = parser.get_blocks(self, equations) for block in blocks: block.dependencies() for dep in block._dependencies: dependencies.append(dep) block.parse() dependencies = list(set(dependencies)) return blocks, dependencies def __str__(self): code = "Synapse " + self.name + "\n" code += "*"*60 + "\n" code += "Parameters: " + str(self.parameters) + "\n" code += "Variables: " + str(self.variables) + "\n\n" code += "Synaptic equations:\n" for block in self.update_equations: code += str(block) return code
from rasa.core.channels.channel import InputChannel,UserMessage,RestInput,CollectingOutputChannel from sanic import Sanic, Blueprint, response import asyncio import inspect import json import logging import uuid from asyncio import Queue, CancelledError from sanic import Sanic, Blueprint, response from sanic.request import Request from typing import Text, List, Dict, Any, Optional, Callable, Iterable, Awaitable import rasa.utils.endpoints from rasa.cli import utils as cli_utils from rasa.constants import DOCS_BASE_URL from rasa.core import utils from sanic.response import HTTPResponse from typing import NoReturn from apis.ibapi import query_by_id from log.BCLog import log class CustomApi(RestInput): @classmethod def name(cls) -> Text: return "CustomApi" def blueprint(self, on_new_message: Callable[[UserMessage], Awaitable[None]]) -> Blueprint: custom_webhook = Blueprint( "custom_webhook_{}".format(type(self).__name__), inspect.getmodule(self).__name__, ) # noinspection PyUnusedLocal @custom_webhook.route("/", methods=["GET"]) async def health(request: Request) -> HTTPResponse: return response.json({"status": "ok"}) @custom_webhook.route("/cloudstatus", methods=["GET"]) async def cloudstatus(request: Request) -> HTTPResponse: return response.json({"status": "UP"}) @custom_webhook.route("/webhook", methods=["POST"]) async def receive(request: Request) -> HTTPResponse: #todo # 1 参数转换 done # 2 请求转发 done # 注: 前端加上senderId log.debug(json.dumps(request.json)) sender_id = await self._extract_sender(request) text = self._extract_message(request) should_use_stream = rasa.utils.endpoints.bool_arg( request, "stream", default=False ) input_channel = self._extract_input_channel(request) metadata = self.get_metadata(request) answer_id = self._extract_answer_id(request) problem_id = self._extract_answer_id(request) if answer_id is not None or problem_id is not None: log.debug('query by id') json_result = query_by_id(request.json) log.debug(json.dumps(json_result)) return response.json(json_result) # rasa 处理 log.debug('query by rasa') if should_use_stream: return response.stream( self.stream_response( on_new_message, text, sender_id, input_channel, metadata ), content_type="text/event-stream", ) else: collector = CollectingOutputChannel() # noinspection PyBroadException try: await on_new_message( UserMessage( text, collector, sender_id, input_channel=input_channel, metadata=metadata, ) ) except CancelledError: log.error( "Message handling timed out for " "user message '{}'.".format(text) ) except Exception: log.exception( "An exception occured while handling " "user message '{}'.".format(text) ) return response.json(collector.messages) return custom_webhook def _extract_message(self,req:Request): return req.json.get('inputText') def _extract_answer_id(self,req:Request): ask_param = req.json.get('askParam') if ask_param is not None: return ask_param['answerId'] return None def _extract_problem_id(self,req:Request): ask_param = req.json.get('askParam') if ask_param is not None: return ask_param['problemId'] return None
from enum import Enum class VibrationType(Enum): NONE = 0 SHORT = 1 MEDIUM = 2 LONG = 3
""" Created on Mon Oct 15 17:11:01 2018 @author: zhengyi """
from rest_framework import viewsets from api.componentgroup import serializers from core.models import ComponentGroup class ComponentViewSet(viewsets.ModelViewSet): def get_queryset(self): return ComponentGroup.objects.all() def get_serializer_class(self): if self.action == "list": return serializers.ComponentGroupSerializerList if self.action == "retrieve": return serializers.ComponentGroupSerializerDetail return serializers.ComponentGroupSerializerList
# -*- coding: utf-8 -*- """ test_build_gettext ~~~~~~~~~~~~~~~~~~ Test the build process with gettext builder with the test root. :copyright: Copyright 2010 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import gettext import os from subprocess import Popen, PIPE from util import * from util import SkipTest def teardown_module(): (test_root / '_build').rmtree(True) @with_app(buildername='gettext') def test_all(app): # Generic build; should fail only when the builder is horribly broken. app.builder.build_all() @with_app(buildername='gettext') def test_build(app): # Do messages end up in the correct location? app.builder.build(['extapi', 'subdir/includes']) # top-level documents end up in a message catalog assert (app.outdir / 'extapi.pot').isfile() # directory items are grouped into sections assert (app.outdir / 'subdir.pot').isfile() @with_app(buildername='gettext') def test_gettext(app): app.builder.build(['markup']) (app.outdir / 'en' / 'LC_MESSAGES').makedirs() cwd = os.getcwd() os.chdir(app.outdir) try: try: p = Popen(['msginit', '--no-translator', '-i', 'markup.pot', '--locale', 'en_US'], stdout=PIPE, stderr=PIPE) except OSError: raise SkipTest # most likely msginit was not found else: stdout, stderr = p.communicate() if p.returncode != 0: print stdout print stderr assert False, 'msginit exited with return code %s' % \ p.returncode assert (app.outdir / 'en_US.po').isfile(), 'msginit failed' try: p = Popen(['msgfmt', 'en_US.po', '-o', os.path.join('en', 'LC_MESSAGES', 'test_root.mo')], stdout=PIPE, stderr=PIPE) except OSError: raise SkipTest # most likely msgfmt was not found else: stdout, stderr = p.communicate() if p.returncode != 0: print stdout print stderr assert False, 'msgfmt exited with return code %s' % \ p.returncode assert (app.outdir / 'en' / 'LC_MESSAGES' / 'test_root.mo').isfile(), \ 'msgfmt failed' finally: os.chdir(cwd) _ = gettext.translation('test_root', app.outdir, languages=['en']).gettext assert _("Testing various markup") == u"Testing various markup"
from .baseactions import BaseActions from models.state import State import re class StateActions(BaseActions): @classmethod def _regular_attribute_actions(cls, diff: dict, obj, old_obj=None): actions = [] for root_attr in diff: attr = root_attr.split('.')[1] if attr == 'key': actions.append({'action': 'changeKey', 'key': obj.key}) elif attr == '_type': actions.append({'action': 'changeType', 'type': obj.type}) elif attr == 'initial': actions.append( {'action': 'changeInitial', 'initial': obj.initial}) elif attr == 'roles': actions.append({'action': 'setRoles', 'roles': obj.roles}) elif attr == 'transitions': actions.append({'action': 'setTransitions', 'transitions': [ transition.__dict__ for transition in obj.transitions]}) elif attr.startswith('name'): actions.append({'action': 'setName', 'name': obj.name}) elif attr.startswith('description'): actions.append({'action': 'setDescription', 'description': obj.description}) return actions @classmethod def _iterable_attribute_add_actions(cls, diff: dict, obj, old_obj=None): actions = [] for root_attr in diff: attr = root_attr.split('.')[1] if attr.__contains__('roles'): actions.append( {'action': 'addRoles', 'roles': [diff[root_attr]]}) elif attr.__contains__('transitions'): actions.append({'action': 'setTransitions', 'transitions': [ transition.__dict__ for transition in obj.transitions]}) return actions @classmethod def _iterable_attribute_update_actions(cls, diff: dict, obj, old_obj): actions = [] for root_attr in diff: attr = root_attr.split('.')[1] if attr.__contains__('roles'): actions.append({'action': 'removeRoles', 'roles': [ old_obj.roles[int(re.findall(r'[\d+]', attr)[0])]]}) actions.append({'action': 'addRoles', 'roles': [ obj.roles[int(re.findall(r'[\d+]', attr)[0])]]}) elif attr.__contains__('transitions'): actions.append({'action': 'setTransitions', 'transitions': [ transition.__dict__ for transition in obj.transitions]}) return actions @classmethod def _iterable_attribute_remove_actions(cls, diff: dict, obj, old_obj=None): actions = [] for root_attr in diff: attr = root_attr.split('.')[1] if attr.__contains__('roles'): actions.append( {'action': 'removeRoles', 'roles': [diff[root_attr]]}) elif attr.__contains__('transitions'): actions.append({'action': 'setTransitions', 'transitions': [ transition.__dict__ for transition in obj.transitions]}) return actions @classmethod def _diccionary_attribute_add_actions(cls, diff: dict, obj, old_obj=None): actions = [] for root_attr in diff: attr = root_attr.split('.')[1] if attr.startswith('name'): actions.append({'action': 'setName', 'name': obj.name}) elif attr.startswith('description'): actions.append({'action': 'setDescription', 'description': obj.description}) return actions @classmethod def _diccionary_attribute_remove_actions(cls, diff: dict, obj, old_obj=None): actions = [] for root_attr in diff: attr = root_attr.split('.')[1] if attr.startswith('name'): actions.append({'action': 'setName', 'name': obj.name}) elif attr.startswith('description'): actions.append({'action': 'setDescription', 'description': obj.description}) return actions
# Generated by Django 2.1.7 on 2019-07-07 21:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("app", "0023_auto_20190706_2208")] operations = [ migrations.AlterField( model_name="move", name="action_type", field=models.CharField( choices=[ ("post_selfie", "Post a selfie"), ("post_group_selfie", "Post group selfie"), ("post_story", "Post a story"), ("go_live", "Go live"), ("leave_comment", "Leave a comment"), ("dont_post", "Don't post"), ("no_move", "No move"), ], default="dont_post", max_length=200, ), ) ]
import operator from functools import partial from typing import Callable, Tuple, Optional, Dict from pypika import functions from pypika.enums import SqlTypes from tortoise.fields import BackwardFKField, Field, ManyToManyField, RelationField # # Encoders # def identity_encoder(value, *args): return value def bool_encoder(value, *args): return bool(value) def string_encoder(value, *args): return str(value) def list_encoder(values, instance, field: Field): """Encodes an iterable of a given field into a database-compatible format.""" return [field.db_value(element, instance) for element in values] # # to_db_value functions # def related_to_db_value_func(field: RelationField): return field.remote_model._meta.pk.db_value def list_pk_encoder(values, instance, field: Field): return [field.db_value(getattr(v, "pk", v), instance) for v in values] def related_list_to_db_values_func(field: RelationField): return partial(list_pk_encoder, field=field.remote_model._meta.pk) # # Filters # def is_in(field, value): return field.isin(value) def not_in(field, value): return field.notin(value) | field.isnull() def not_equal(field, value): return field.ne(value) | field.isnull() def is_null(field, value): if value: return field.isnull() return field.notnull() def not_null(field, value): if value: return field.notnull() return field.isnull() def contains(field, value): return functions.Cast(field, SqlTypes.VARCHAR).like(f"%{value}%") def starts_with(field, value): return functions.Cast(field, SqlTypes.VARCHAR).like(f"{value}%") def ends_with(field, value): return functions.Cast(field, SqlTypes.VARCHAR).like(f"%{value}") def insensitive_exact(field, value): return functions\ .Upper(functions.Cast(field, SqlTypes.VARCHAR))\ .eq(functions.Upper(f"{value}")) def insensitive_contains(field, value): return functions\ .Upper(functions.Cast(field, SqlTypes.VARCHAR))\ .like(functions.Upper(f"%{value}%")) def insensitive_starts_with(field, value): return functions\ .Upper(functions.Cast(field, SqlTypes.VARCHAR))\ .like(functions.Upper(f"{value}%")) def insensitive_ends_with(field, value): return functions\ .Upper(functions.Cast(field, SqlTypes.VARCHAR))\ .like(functions.Upper(f"%{value}")) class BaseFilter: FILTER_FUNC_MAP: Dict[str, Tuple[Callable, Optional[Callable]]] = { "": (operator.eq, None), "exact": (operator.eq, None), "not": (not_equal, None), "in": (is_in, list_encoder), "not_in": (not_in, list_encoder), "isnull": (is_null, bool_encoder), "not_isnull": (not_null, bool_encoder), "gte": (operator.ge, None), "lte": (operator.le, None), "gt": (operator.gt, None), "lt": (operator.lt, None), "contains": (contains, string_encoder), "startswith": (starts_with, string_encoder), "endswith": (ends_with, string_encoder), "iexact": (insensitive_exact, string_encoder), "icontains": (insensitive_contains, string_encoder), "istartswith": (insensitive_starts_with, string_encoder), "iendswith": (insensitive_ends_with, string_encoder), } RELATED_FILTER_FUNC_MAP: Dict[str, Tuple[Callable, Callable]] = { "": (operator.eq, related_to_db_value_func), "exact": (operator.eq, related_to_db_value_func), "not": (not_equal, related_to_db_value_func), "in": (is_in, related_list_to_db_values_func), "not_in": (not_in, related_list_to_db_values_func) } @classmethod def get_filter_func_for(cls, field: Field, comparison: str) -> Optional[Tuple[Callable, Optional[Callable]]]: if isinstance(field, (BackwardFKField, ManyToManyField)): if comparison not in cls.RELATED_FILTER_FUNC_MAP: return None (filter_operator, filter_encoder) = cls.RELATED_FILTER_FUNC_MAP[comparison] return filter_operator, filter_encoder(field) else: if comparison not in cls.FILTER_FUNC_MAP: return None return cls.FILTER_FUNC_MAP[comparison]
from cloud_vision_samples.args import parse_request_vision_args from cloud_vision_samples.vision_api import create_single_post, request_post def main(): # parse arguments input_loc, api_key, detection_types, = parse_request_vision_args() # print(input_loc) # print(detection_types) # print(vision_url(api_key)) # print(create_post_single(input_loc, detection_types)) response = request_post(api_key, create_single_post(input_loc, detection_types)) print(response.text) if __name__ == '__main__': main()
# test the PredictZero method class import tigerforecast import jax.numpy as np import matplotlib.pyplot as plt def test_predict_zero(steps=1000, show_plot=True): T = steps p, q = 3, 3 problem = tigerforecast.problem("ARMA-v0") cur_x = problem.initialize(p, q) method = tigerforecast.method("PredictZero") method.initialize() loss = lambda y_true, y_pred: (y_true - y_pred)**2 results = [] for i in range(T): cur_y_pred = method.predict(cur_x) #print(method.forecast(cur_x, 10)) cur_y_true = problem.step() cur_loss = loss(cur_y_true, cur_y_pred) results.append(cur_loss) method.update(cur_y_true) cur_x = cur_y_true if show_plot: plt.plot(results) plt.title("Zero method on ARMA problem") plt.show(block=False) plt.pause(1) plt.close() print("test_predict_zero passed") return if __name__=="__main__": test_predict_zero()
from django.apps import AppConfig class DismissalConfig(AppConfig): name = 'dismissal'
""" Выполнить собственную программную реализацию любой хеш функции. """ import hashlib from mymd5 import MD5 def main(): input_string = input("Введите строку для хеширования -> ") md5_hash = MD5.hash(input_string) print("Хеш MD5 собственной реализации\n{}".format(md5_hash)) result = hashlib.md5(input_string.encode()) print("Хеш MD5 из hashlib\n{}".format(result.hexdigest())) if __name__ == "__main__": main()
import requests import json def find_needle(): r = requests.post('http://challenge.code2040.org/api/haystack', data={'token': '7628d19dc804169849b04c989c364a4e'}) d = json.loads(r.text) for idx, val in enumerate(d['haystack']): if val == d['needle']: return idx if __name__ == '__main__': target = find_needle() r = requests.post('http://challenge.code2040.org/api/haystack/validate', data={'token': '7628d19dc804169849b04c989c364a4e', 'needle': target})
# Copyright 2021 The WAX-ML Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Relative change between the current and a prior element.""" import haiku as hk from wax.modules import Ffill, Lag class PctChange(hk.Module): """Relative change between the current and a prior element.""" def __init__( self, periods: int = 1, fill_method: str = "pad", limit: int = None, name: str = None, ): super().__init__(name=name) self.periods = periods self.fill_method = fill_method self.limit = limit assert periods == 1, "periods > 1 not implemented." def __call__(self, x): if self.fill_method in ["ffill", "pad"]: previous_x = Lag(self.periods)(Ffill()(x)) else: previous_x = Lag(self.periods)() pct_change = x / previous_x - 1.0 return pct_change
from django import forms from ..models import Athlete class AthleteForm(forms.ModelForm): class Meta: model = Athlete fields = '__all__'
# coding: utf-8 """ Traitement des VUES """ import re import collections from django.contrib.auth.decorators import login_required from django.shortcuts import render, redirect, get_object_or_404 from django.core.exceptions import PermissionDenied from django.urls import resolve from django.http import JsonResponse from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.db.models import OuterRef, Subquery from crudy.crudy import Crudy from crudy.views import CrudyListView from . import forms from .models import TarotPartie, TarotJoueur, TarotParticipant, TarotJeu APP_NAME = "tarot" def folder_required(function): def wrap(request, *args, **kwargs): crudy = Crudy(request, APP_NAME) if crudy.folder_id: return function(request, *args, **kwargs) else: return redirect("v_tarot_partie_select") wrap.__doc__ = function.__doc__ wrap.__name__ = function.__name__ return wrap def p_tarot_home(request): """ vue Home """ return redirect("p_tarot_help") def p_tarot_help(request): """ Guide """ crudy = Crudy(request, APP_NAME) title = crudy.application.get("title") crudy.url_actions = [] crudy.layout = "help" form = None return render(request, 'p_crudy_help.html', locals()) """ Gestion des parties """ class TarotPartieSelectView(CrudyListView): """ Sélection d'une partie """ class Meta(CrudyListView.Options): application = APP_NAME model = TarotPartie title = "Sélection / ajout d'une partie" url_add = "f_tarot_partie_create" url_folder = "f_tarot_partie_folder" cols_ordered = ['name'] cols = { "name": {"title":"Partie", "type":"button", "url":"f_tarot_partie_update"}, } order_by = ('name',) url_view = "v_tarot_partie_select" url_next_page = "v_tarot_participant_select" def get_queryset(self): """ queryset générique """ objs = self.meta.model.objects.all().filter(owner=self.request.user.username)\ .filter(**self.meta.filters)\ .order_by(*self.meta.order_by)\ .values(*self.meta.cols_ordered) # tri des colonnes si version python 3.5 self.objs = self.sort_cols(objs) crudy = Crudy(self.request, APP_NAME) if not crudy.folder_id: self.meta.url_next_page = None return self.objs def f_tarot_partie_folder(request, record_id): """ Enregistrement d'une partie dans folder""" crudy = Crudy(request, APP_NAME) crudy.layout = "form" obj = get_object_or_404(TarotPartie, id=record_id) crudy.folder_id = obj.id crudy.folder_name = obj.name return redirect("v_tarot_participant_select") def f_tarot_partie_create(request): """ Nouvelle partie """ crudy = Crudy(request, APP_NAME) crudy.layout = "form" title = "Nouvelle Partie" model = TarotPartie crudy.message = "" if request.POST: form = forms.TarotPartieForm(request.POST, request=request) if form.is_valid(): form.save() post = form.save(commit=False) post.owner = request.user.username post.name = post.name.upper() post.save() return redirect("v_tarot_partie_select") else: form = forms.TarotPartieForm(request=request) return render(request, 'f_crudy_form.html', locals()) def f_tarot_partie_update(request, record_id): """ Modification d'une partie """ crudy = Crudy(request, APP_NAME) crudy.layout = "form" title = "Modification d'une Partie" crudy.message = "" crudy.url_delete = "f_tarot_partie_delete" obj = get_object_or_404(TarotPartie, id=record_id) model = TarotPartie form = forms.TarotPartieForm(request.POST or None, request=request, instance=obj) if form.is_valid(): form.save() return redirect("v_tarot_partie_select") return render(request, "f_crudy_form.html", locals()) def f_tarot_partie_delete(request, record_id): """ suppression de l'enregistrement """ crudy = Crudy(request, APP_NAME) obj = get_object_or_404(TarotPartie, id=record_id) obj.delete() return redirect("v_tarot_partie_select") """ Gestion des participants """ class TarotParticipantSelectView(CrudyListView): """ Liste des participants """ class Meta(CrudyListView.Options): application = APP_NAME model = TarotJoueur title = "Sélection des Participants" cols_ordered = ['pseudo'] cols = { "pseudo": {"title":"Nom du joueur", "type":"button", "url":"f_tarot_joueur_update"}, } order_by = ('pseudo',) url_add = "f_tarot_joueur_create" url_join = "v_tarot_participant_join" url_view = "v_tarot_participant_select" help_page = "i_tarot_participant_select.md" url_next_page = "v_tarot_participant_list" def get_queryset(self): """ queryset générique """ crudy = Crudy(self.request, APP_NAME) objs = TarotJoueur.objects.all().filter(owner=self.request.user.username)\ .filter(**self.meta.filters)\ .order_by(*self.meta.order_by)\ .values(*self.meta.cols_ordered) # tri des colonnes si version python 3.5 self.objs = self.sort_cols(objs) # Cochage des participants dans la liste des joueurs participants = TarotParticipant.objects.all().filter(partie__id=crudy.folder_id) crudy.joined = [] for obj in self.objs: for participant in participants: if participant.joueur_id == obj["id"]: crudy.joined.append(obj["id"]) return self.objs def v_tarot_participant_join(request, record_id): """ Création d'un participant à partir de la sélection dans la vue des joueurs """ crudy = Crudy(request, APP_NAME) iid = int(record_id) if iid in crudy.joined: participant = TarotParticipant.objects.all().filter(partie_id=crudy.folder_id, joueur_id=iid) participant.delete() # compute_ordre() dans post_delete_tarot crudy.joined.remove(iid) else: participant = TarotParticipant(partie_id=crudy.folder_id, joueur_id=iid) participant.save() participant.compute_order() crudy.joined.append(iid) return redirect("v_tarot_participant_select") def f_tarot_joueur_create(request): """ création d'un joueur """ crudy = Crudy(request, APP_NAME) crudy.layout = "form" title = "Nouveau Joueur" crudy.message = "" model = TarotJoueur if request.POST: form = forms.TarotJoueurForm(request.POST, request=request) if form.is_valid(): post = form.save(commit=False) post.owner = request.user.username post.pseudo = post.pseudo.upper() post.save() return redirect("v_tarot_participant_select") else: form = forms.TarotJoueurForm(request=request) return render(request, 'f_crudy_form.html', locals()) def f_tarot_joueur_update(request, record_id): """ mise à jour d'un joueur """ crudy = Crudy(request, APP_NAME) crudy.layout = "form" crudy.url_delete = "f_tarot_joueur_delete" title = "Modification d'un Joueur" crudy.message = "" obj = get_object_or_404(TarotJoueur, id=record_id) form = forms.TarotJoueurForm(request.POST or None, instance=obj, request=request) if form.is_valid(): form.save() return redirect("v_tarot_participant_select") return render(request, "f_crudy_form.html", locals()) def f_tarot_joueur_delete(request, record_id): """ suppression de l'enregistrement """ crudy = Crudy(request, APP_NAME) obj = get_object_or_404(TarotJoueur, id=record_id) obj.delete() return redirect("v_tarot_participant_select") class TarotParticipantListView(CrudyListView): """ Tri des participants """ class Meta(CrudyListView.Options): application = APP_NAME model = TarotParticipant title = "Ordre des Participants autour de la table" order_by = ('order', 'joueur__pseudo') url_order = "v_tarot_participant_order" cols_ordered = ['order','joueur__pseudo','donneur'] cols = { "order": {"title":"Nom du joueur", "hide": True}, "joueur__pseudo": {"title":"Nom du joueur", "type": "text"}, "donneur": {"title":"Donneur Initial", "type": "check", "url":"f_tarot_participant_update"}, } url_actions = [ ("f_tarot_jeu_create", "Initialiser les jeux") ] url_view = "v_tarot_participant_list" help_page = "i_tarot_participant_list.md" url_next_page = "v_tarot_jeu_list" def get_queryset(self): """ queryset générique """ crudy = Crudy(self.request, APP_NAME) objs = self.meta.model.objects.all()\ .filter(partie_id=crudy.folder_id)\ .order_by(*self.meta.order_by)\ .values(*self.meta.cols_ordered) # tri des colonnes si version python 3.5 self.objs = self.sort_cols(objs) crudy.url_participant_update = 'f_tarot_participant_update' crudy.action_param = 0 if len(self.objs) == 0: self.meta.url_actions = [] return self.objs def f_tarot_participant_update(request, record_id, checked): """ Mise à jour du donneur """ crudy = Crudy(request, APP_NAME) participants = TarotParticipant.objects.all().filter(partie__id=crudy.folder_id) for participant in participants: if participant.id == int(record_id): participant.donneur = False if checked == "True" else True else: participant.donneur = False participant.save() return redirect("v_tarot_participant_list") def v_tarot_participant_order(request, record_id, orientation): """ On remonte le joueur dans la liste """ crudy = Crudy(request, APP_NAME) iid = int(record_id) participant = get_object_or_404(TarotParticipant, id=iid) participant.order += int(orientation) * 3 participant.save() participant.compute_order() return redirect("v_tarot_participant_list") """ Gestion des jeux """ class TarotJeuListView(CrudyListView): """ Liste des jeux """ class Meta(CrudyListView.Options): application = APP_NAME model = TarotJeu title = "Faites vos Jeux" cols_ordered = ['donneur','participant__joueur__pseudo','participant__id','medal','score','pari','partenaire','real','primes'\ ,'ptbout','poignee1','poignee2','poignee3','misere1','misere2','grchelem','gchelem','ptchelem','pchelem','ppchelem','points'] cols = { "donneur": {"title":"", "type": "position", "tooltip": "Le donneur pour ce tour"}, "participant__joueur__pseudo": {"title":"Participant", "type":"medal", "url":"v_tarot_jeu_participant"}, "participant__id": {"hide": True}, "medal": {"hide":True}, "score": {"title":"Score", "type":"number", "sort":"v_tarot_jeu_sort"}, "pari": {"title":"Enchères", "type":"radio", "url": "f_tarot_jeu_pari", "list": [("...","..."), ("PT","Petite"), ("PC","Pouce"), ("GA","Garde"), ("GS","Garde Sans"), ("GC","Garde Contre")]}, "partenaire": {"title":"Avec", "type":"check", "url": "f_tarot_jeu_partenaire"}, "real": {"title":"Réal", "type":"point", "url": "f_tarot_jeu_real", "list": [(-30,"- 30"),(-20,"- 20"),(-10,"- 10"),(-1,"- 0"),(0,"0")\ ,(+1,"+ 0"),(+10,"+ 10"),(+20,"+ 20"),(+30,"+ 30"),(+40,"+ 40"),(+50,"+ 50"),(+60,"+ 60")]}, "primes": {"title":"Primes", "type":"category", "url": "f_tarot_jeu_prime", "category": "prime"}, # primes "ptbout": {"hide": True, "title":"Petit au bout", "type":"check", "url": "f_tarot_jeu_prime", "category": "prime"}, "poignee1": {"hide": True, "title": "Poignée", "type":"check", "category": "prime"}, "poignee2": {"hide": True, "title": "Double Poignée", "type":"check", "category": "prime"}, "poignee3": {"hide": True, "title": "Triple Poignée", "type":"check", "category": "prime"}, "misere1": {"hide": True, "title": "Misère d'Atout", "type":"check", "category": "prime"}, "misere2": {"hide": True, "title": "Misère de Tête", "type":"check", "category": "prime"}, "grchelem": {"hide": True, "title": "Grand Chelem", "type":"check", "category": "prime"}, "gchelem": {"hide": True, "title": "Grand Chelem non annoncé", "type":"check", "category": "prime"}, "gpchelem": {"hide": True, "title": "Grand Chelem perdu", "type":"check", "category": "prime"}, "ptchelem": {"hide": True, "title": "Petit Chelem", "type":"check", "category": "prime"}, "pchelem": {"hide": True, "title": "Petit Chelem non annoncé", "type":"check", "category": "prime"}, "ppchelem": {"hide": True, "title": "Petit Chelem perdu", "type":"check", "category": "prime"}, "points": {"title":"Points", "type":"number"}, "param": {"medal": "participant__id"}, } url_view = "v_tarot_jeu_list" def dispatch(self, request, *args, **kwargs): """ dispatch is called when the class instance loads """ self.sort = kwargs.get('sort', None) self.page = kwargs.get('page') return super().dispatch(request, args, kwargs) def get_queryset(self): """ fournir les données à afficher dans la vue """ crudy = Crudy(self.request, APP_NAME) crudy.add_title = "Ajouter un jeux" # prise en compte de la colonne à trier en fonction de sort if self.sort == "score": crudy.sort = "score" if self.sort == "participant": crudy.sort = "participant" if crudy.sort == "score": order_by = ('jeu', '-score',) else: order_by = ('jeu', 'participant__order',) partie = get_object_or_404(TarotPartie, id=crudy.folder_id) crudy.modified = partie.modified objs = self.meta.model.objects.all()\ .filter(participant__partie__id__exact=crudy.folder_id)\ .order_by(*order_by)\ .values(*self.meta.cols_ordered) # Init cols b_calcul_realised = False for row in objs: # on remplit la colonne ptbout avec la catégorie prime primes = [] for key, col in self.meta.cols_list: if col.get("category") == "prime": if row[key]: primes.append((col.get("title"))) if len(primes) == 0: primes.append(("0")) row["primes"] = primes # raz real si pas d'enchère if row["pari"] == ".": row["real"] = 99 objs_all = self.sort_cols(objs) self.meta.url_actions = [] qparticipant = TarotParticipant.objects.all().filter(partie__id=crudy.folder_id).count() if qparticipant > 0: self.paginator = Paginator(objs_all, qparticipant) self.objs = self.paginator.page(self.page) for row in self.objs: if row.get("points") != 0: b_calcul_realised = True crudy.action_param = self.page crudy.jeu = int(self.page) crudy.url_sort = 'v_tarot_jeu_sort' partie = get_object_or_404(TarotPartie, id=crudy.folder_id) crudy.jeu_current = partie.jeu if crudy.modified: self.meta.url_actions.append(("f_tarot_jeu_compute", "Calculer les points")) else: if int(self.page) == self.paginator.num_pages and b_calcul_realised: self.meta.url_add = "f_tarot_jeu_add" # on cache la colonne partenaire si jeu à 4 ou 3 if qparticipant == 3 or qparticipant == 4: self.meta.cols["partenaire"]["hide"] = True else: self.meta.cols["partenaire"]["hide"] = False crudy.url_sort = 'v_tarot_jeu_sort' return self.objs def f_tarot_jeu_create(request, id): """ création des jeux (tours) de la partie """ crudy = Crudy(request, APP_NAME) jeu = TarotJeu() jeu.create_jeux(crudy) return redirect("v_tarot_jeu_list", 1) def f_tarot_jeu_add(request): """ ajout d'un jeu dans la partie """ crudy = Crudy(request, APP_NAME) jeu = TarotJeu() jeu.add_jeux(crudy) partie = get_object_or_404(TarotPartie, id=crudy.folder_id) return redirect("v_tarot_jeu_list", partie.jeu) encheres = { "PT": 10, "PC": 20, "GA": 40, "GS": 60, "GC": 80, } primes = { "ptbout": 10, "misere1": 10, "misere2": 10, "poignee1": 20, "poignee2": 30, "poignee3": 40, "grchelem": 400, "gchelem": 200, "gpchelem": -200, "ptchelem": 200, "pchelem": 100, "ppchelem": -100, } def f_tarot_jeu_compute(request, ijeu): """ Calcul des points du jeux, du score et du rang du joueur """ crudy = Crudy(request, APP_NAME) jeux = TarotJeu.objects.all().filter(participant__partie__id=crudy.folder_id).order_by("jeu","-pari") participants = TarotParticipant.objects.all().filter(partie__id=crudy.folder_id) score = {} miseres = {} points = 0 ijeu = 0 # CALCUL DES POINTS for jeu in jeux: if ijeu != jeu.jeu: # changement de jeu # enregistrement des points et score des joueurs if ijeu != 0: jjs = TarotJeu.objects.all().filter(participant__partie__id=crudy.folder_id, jeu=ijeu) for jj in jjs: jj.points = 0 if jj.prenneur: if participants.count() == 3: jj.points = points * 1 # car ausi partenaire elif participants.count() == 4: jj.points = points * 2 # car ausi partenaire elif participants.count() == 5: jj.points = points * 2 elif participants.count() == 6: jj.points = points * 2 if jj.partenaire: if jj.prenneur and participants.count() > 4: jj.points += points * 2 else: jj.points += points if not jj.prenneur and not jj.partenaire: jj.points = points * -1 # misères for j_id in miseres: if j_id == jj.participant.id: jj.points += miseres[j_id] * (participants.count() -1) else: jj.points += miseres[j_id] * -1 if participants.count() == 6 and jj.donneur: jj.points = 0 score[jj.participant.id] = score.get(jj.participant.id, 0) + jj.points jj.score = score[jj.participant.id] jj.save() # on prépare le tour suivant miseres = {} points = 0 ijeu = jeu.jeu # Calcul de la donne à répartir sur les joueurs if jeu.prenneur: points = encheres[jeu.pari] if jeu.real > 0: if jeu.real == 1: points = points else: points = points + jeu.real else: if jeu.real == -1: points = points * -1 else: points = points * -1 + jeu.real if jeu.prenneur or jeu.partenaire: if jeu.ptbout: points = points + 10 if jeu.real > 0: if jeu.poignee1: points = points + 20 if jeu.poignee2: points = points + 30 if jeu.poignee3: points = points + 40 if jeu.ptchelem: points = points + 200 if jeu.pchelem: points = points + 100 if jeu.grchelem: points = points + 400 if jeu.gchelem: points = points + 200 else: if jeu.gpchelem: points = points - 200 if jeu.ppchelem: points = points - 100 else: if jeu.ptbout: points = points - 10 # misères if jeu.misere1: miseres[jeu.participant.id] = miseres.get(jeu.participant.id, 0) + 10 if jeu.misere2: miseres[jeu.participant.id] = miseres.get(jeu.participant.id, 0) + 10 # DERNIER TOUR if ijeu != 0: # enregistrement des points et score des joueurs # on refait une boucle jjs = TarotJeu.objects.all().filter(participant__partie__id=crudy.folder_id, jeu=ijeu) for jj in jjs: jj.points = 0 if jj.prenneur: if participants.count() == 3: jj.points = points * 1 elif participants.count() == 4: jj.points = points * 2 elif participants.count() == 5: jj.points = points * 2 elif participants.count() == 6: jj.points = points * 2 if jj.partenaire: if jj.prenneur and participants.count() > 4: jj.points += points * 2 else: jj.points += points if not jj.prenneur and not jj.partenaire: jj.points = points * -1 # misères for j_id in miseres: if j_id == jj.participant.id: jj.points += miseres[j_id] * (participants.count() -1) else: jj.points += miseres[j_id] * -1 if participants.count() == 6 and jj.donneur: jj.points = 0 score[jj.participant.id] = score.get(jj.participant.id, 0) + jj.points jj.score = score[jj.participant.id] jj.save() # Attribution des médailles jeux = TarotJeu.objects.all()\ .filter(participant__partie__id=crudy.folder_id)\ .order_by("jeu", "-score") gold = 1000 silver = 1000 bronze = 1000 rupt_jeu = 0 last_pk = 0 for jeu in jeux: if rupt_jeu != jeu.jeu: # changement de jeu rupt_jeu = jeu.jeu gold = 1000 silver = 1000 bronze = 1000 # Médaille de chocolat if last_pk != 0: last_jeu = get_object_or_404(TarotJeu, pk=last_pk) last_jeu.medal = 9 last_jeu.save() last_pk = jeu.pk jeu.medal = 0 if gold == 1000: gold = jeu.score jeu.medal = 1 elif gold == jeu.score: jeu.medal = 1 elif silver == 1000: silver = jeu.score jeu.medal = 2 elif silver == jeu.score: jeu.medal = 2 elif bronze == 1000: bronze = jeu.score jeu.medal = 3 elif bronze == jeu.score: jeu.medal = 3 jeu.save() # Médaille de chocolat if last_pk != 0: last_jeu = get_object_or_404(TarotJeu, pk=last_pk) last_jeu.medal = 9 last_jeu.save() # maj du score des participants for j_id in score: participant = get_object_or_404(TarotParticipant, id=j_id) participant.score = score.get(j_id) participant.save() partie = get_object_or_404(TarotPartie, id=crudy.folder_id) partie.modified = False partie.save() return redirect("v_tarot_jeu_list", ijeu) def f_tarot_jeu_pari(request, record_id): """ Saisie pari d'un joueur """ crudy = Crudy(request, APP_NAME) crudy.layout = "form" obj = get_object_or_404(TarotJeu, id=record_id) title = "Enchère de %s" % (obj.participant.joueur.pseudo.upper()) form = forms.TarotJeuPariForm(request.POST or None, request=request, instance=obj) if form.is_valid(): form.save() partie = get_object_or_404(TarotPartie, id=crudy.folder_id) jeu_courant = get_object_or_404(TarotJeu, id=record_id) jeu_courant.prenneur = True if partie.qparticipant < 5: jeu_courant.partenaire = True jeu_courant.real = 0 jeu_courant.save() if jeu_courant.pari != "...": # Nettoyage des enchères des autres joueurs jeux = TarotJeu.objects.all().filter(participant__partie__id=obj.participant.partie_id, jeu=obj.jeu) for jeu in jeux: if jeu.id != jeu_courant.id: jeu.pari = "..." jeu.prenneur = False if partie.qparticipant < 5: jeu.partenaire = False jeu.real = 99 jeu.save() return redirect("v_tarot_jeu_list", obj.jeu) return render(request, "f_crudy_form.html", locals()) def f_tarot_jeu_real(request, record_id): """ Saisie du réalisé 0 1 2 """ crudy = Crudy(request, APP_NAME) crudy.layout = "form" obj = get_object_or_404(TarotJeu, id=record_id) title = "Nombre de points réalisé par %s" % (obj.participant.joueur.pseudo.upper()) form = forms.TarotJeuRealForm(request.POST or None, request=request, instance=obj) if form.is_valid(): form.save() partie = get_object_or_404(TarotPartie, id=crudy.folder_id) partie.modified = True partie.save() return redirect("v_tarot_jeu_list", obj.jeu) return render(request, "f_crudy_form.html", locals()) def f_tarot_jeu_partenaire(request, record_id, checked): """ Saisie du partenaire """ crudy = Crudy(request, APP_NAME) crudy.layout = "form" tarotJeu = get_object_or_404(TarotJeu, id=record_id) jeux = TarotJeu.objects.all().filter(participant__partie_id=tarotJeu.participant.partie_id, jeu=tarotJeu.jeu) for jeu in jeux: if jeu.id == int(record_id): jeu.partenaire = False if checked == "True" else True else: jeu.partenaire = False jeu.save() partie = get_object_or_404(TarotPartie, id=crudy.folder_id) partie.modified = True partie.save() return redirect("v_tarot_jeu_list", tarotJeu.jeu) def f_tarot_jeu_prime(request, record_id): """ Saisie des primes """ crudy = Crudy(request, APP_NAME) crudy.layout = "form" obj = get_object_or_404(TarotJeu, id=record_id) title = "Saisie des primes pour %s" % (obj.participant.joueur.pseudo.upper()) form = forms.TarotJeuPrimeForm(request.POST or None, request=request, instance=obj) if form.is_valid(): form.save() # un seul ptbout par jeu if obj.ptbout: jeux = TarotJeu.objects.all().filter(participant__partie__id=obj.participant.partie_id, jeu=obj.jeu) for jeu in jeux: if jeu.id != obj.id: jeu.ptbout = False jeu.save() partie = get_object_or_404(TarotPartie, id=crudy.folder_id) partie.modified = True partie.save() return redirect("v_tarot_jeu_list", obj.jeu) return render(request, "f_crudy_form.html", locals()) class TarotJeuParticipantView(CrudyListView): """ Liste des jeux par joueur """ class Meta(CrudyListView.Options): application = APP_NAME model = TarotJeu title = "Jeux de" cols_ordered = ['jeu','participant__joueur__pseudo',"participant__id",'medal','score','pari','partenaire','real','primes'\ ,'ptbout','poignee1','poignee2','poignee3','misere1','misere2','grchelem','gchelem','ptchelem','pchelem','ppchelem','points','prise'] cols = { "jeu": {"title":"Jeu", "type":"numeric"}, "participant__joueur__pseudo": {"title":"Participant", "type":"medal", "url":"v_tarot_jeu_list"}, "participant__id": {"hide": True}, "medal": {"hide":True}, "score": {"title":"Score", "type":"number"}, "pari": {"title":"Enchères", "type":"radio", "url": "f_tarot_jeu_pari", "disabled": True, "list": [("...","..."), ("PT","Petite"), ("PC","Pouce"), ("GA","Garde"), ("GS","Garde Sans"), ("GC","Garde Contre")]}, "partenaire": {"title":"Avec", "type":"check", "url": "f_tarot_jeu_partenaire", "disabled": True}, "real": {"title":"Réal", "type":"point", "url": "f_tarot_jeu_real", "disabled": True, "list": [(-30,"- 30"),(-20,"- 20"),(-10,"- 10"),(-1,"- 0"),(0,"0")\ ,(+1,"+ 0"),(+10,"+ 10"),(+20,"+ 20"),(+30,"+ 30"),(+40,"+ 40"),(+50,"+ 50"),(+60,"+ 60")]}, "primes": {"title":"Primes", "type":"category", "url": "f_tarot_jeu_prime", "category": "prime", "disabled": True}, # primes "ptbout": {"hide": True, "title":"Petit au bout", "type":"check", "url": "f_tarot_jeu_prime", "category": "prime"}, "poignee1": {"hide": True, "title": "Poignée", "type":"check", "category": "prime"}, "poignee2": {"hide": True, "title": "Double Poignée", "type":"check", "category": "prime"}, "poignee3": {"hide": True, "title": "Triple Poignée", "type":"check", "category": "prime"}, "misere1": {"hide": True, "title": "Misère d'Atout", "type":"check", "category": "prime"}, "misere2": {"hide": True, "title": "Misère de Tête", "type":"check", "category": "prime"}, "grchelem": {"hide": True, "title": "Grand Chelem", "type":"check", "category": "prime"}, "gchelem": {"hide": True, "title": "Grand Chelem non annoncé", "type":"check", "category": "prime"}, "gpchelem": {"hide": True, "title": "Grand Chelem perdu", "type":"check", "category": "prime"}, "ptchelem": {"hide": True, "title": "Petit Chelem", "type":"check", "category": "prime"}, "pchelem": {"hide": True, "title": "Petit Chelem non annoncé", "type":"check", "category": "prime"}, "ppchelem": {"hide": True, "title": "Petit Chelem perdu", "type":"check", "category": "prime"}, "points": {"title":"Points", "type":"number"}, "prise": {"title":"Contre", "type":"radio", "url": "f_tarot_jeu_pari", "disabled": True ,"list": [("...","..."), ("PT","Petite"), ("PC","Pouce"), ("GA","Garde"), ("GS","Garde Sans"), ("GC","Garde Contre")]}, "param": {"medal": "jeu"} } url_view = "v_tarot_jeu_participant" def dispatch(self, request, *args, **kwargs): """ dispatch is called when the class instance loads """ self.participant_id = kwargs.get('participant_id') return super().dispatch(request, args, kwargs) def get_queryset(self): """ fournir les données à afficher dans la vue """ crudy = Crudy(self.request, APP_NAME) self.meta.url_back = "/tarot/jeu/list/%s" % self.participant_id prise = TarotJeu.objects.filter(participant__partie__id=crudy.folder_id, jeu=OuterRef('jeu'), prenneur=True)\ .exclude(pk=OuterRef('pk')) partie = get_object_or_404(TarotPartie, id=crudy.folder_id) objs = TarotJeu.objects.filter(participant__partie__id=crudy.folder_id, participant__id=self.participant_id)\ .order_by('jeu')\ .values(*self.meta.cols_ordered, prise=Subquery(prise.values('pari'))) # tri des colonnes si version python 3.5 self.objs = self.sort_cols(objs) # Tri des colonnes dans l'ordre de cols + remplissage des colonnes calculées participant_name = "" for row in self.objs: participant_name = row["participant__joueur__pseudo"] primes = [] for key, col in self.meta.cols_list: # on remplit la colonne prime avec la catégorie prime if col.get("category") == "prime": if row[key]: primes.append((col.get("title"))) if len(primes) == 0: primes.append(("0")) row["primes"] = primes # on cache la colonne partenaire si jeu à 4 ou 3 qparticipant = TarotParticipant.objects.all().filter(partie__id__exact=crudy.folder_id).count() self.meta.title = 'Jeux de "%s"' % participant_name if qparticipant == 3 or qparticipant == 4: self.meta.cols["partenaire"]["hide"] = True else: self.meta.cols["partenaire"]["hide"] = False return self.objs
__version__ = '0.1' __all__ = ["hamilton", "objects", "hamutilities", "structure", "utilities", "gui", "masterjobs", "workbench.py", "workbench_gui.py", "setup.py", "update.py"]
import cv2 import os import gco import argparse import numpy as np import pickle as pkl from tqdm import tqdm, trange from glob import glob from scipy import signal from opendr.camera import ProjectPoints from sklearn.mixture import GaussianMixture from util.visibility import VisibilityChecker from util.labels import LABELS_REDUCED, LABEL_COMP, LABELS_MIXTURES, read_segmentation from tex.texture import TextureData, Texture def main(data_file, frame_dir, segm_dir, out_file, num_iter): # Step 1: Make unwraps data = pkl.load(open(data_file, 'rb')) segm_files = np.array(sorted(glob(os.path.join(segm_dir, '*.png')) + glob(os.path.join(segm_dir, '*.jpg')))) frame_files = np.array(sorted(glob(os.path.join(frame_dir, '*.png')) + glob(os.path.join(frame_dir, '*.jpg')))) vt = np.load('assets/basicModel_vt.npy') ft = np.load('assets/basicModel_ft.npy') f = np.load('assets/basicModel_f.npy') verts = data['vertices'] camera_c = data['camera_c'] camera_f = data['camera_f'] width = data['width'] height = data['height'] camera = ProjectPoints(t=np.zeros(3), rt=np.array([-np.pi, 0., 0.]), c=camera_c, f=camera_f, k=np.zeros(5)) visibility = VisibilityChecker(width, height, f) texture = TextureData(1000, f, vt, ft, visibility) isos, vises, iso_segms = [], [], [] print('Unwrapping inputs...') for i, (v, frame_file, segm_file) in enumerate(zip(tqdm(verts), frame_files, segm_files)): frame = cv2.imread(frame_file) / 255. segm = read_segmentation(segm_file) / 255. mask = np.float32(np.any(segm > 0, axis=-1)) camera.set(v=v) vis, iso, iso_segm = texture.get_data(frame, camera, mask, segm) vises.append(vis) isos.append(iso) iso_segms.append(np.uint8(iso_segm * 255)) # Step 2: Segm vote gmm iso_mask = cv2.imread('assets/tex_mask_1000.png', flags=cv2.IMREAD_GRAYSCALE) / 255. iso_mask = cv2.resize(iso_mask, (1000, 1000), interpolation=cv2.INTER_NEAREST) voting = np.zeros((1000, 1000, len(LABELS_REDUCED))) gmms = {} gmm_pixels = {} for color_id in LABELS_REDUCED: gmms[color_id] = GaussianMixture(LABELS_MIXTURES[color_id]) gmm_pixels[color_id] = [] for frame, segm, vis in zip(isos, iso_segms, vises): # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) / 255. tex_segm = read_segmentation(segm) tex_weights = 1 - vis tex_weights = np.sqrt(tex_weights) for i, color_id in enumerate(LABELS_REDUCED): if color_id != 'Unseen' and color_id != 'BG': where = np.all(tex_segm == LABELS_REDUCED[color_id], axis=2) voting[where, i] += tex_weights[where] gmm_pixels[color_id].extend(frame[where].tolist()) print('Fitting GMMs...') for color_id in tqdm(LABELS_REDUCED): if gmm_pixels[color_id]: gmms[color_id].fit(np.array(gmm_pixels[color_id])) for i, color_id in enumerate(LABELS_REDUCED): if color_id == 'Unseen' or color_id == 'BG': voting[:, i] = -10 voting[iso_mask == 0] = 0 voting[iso_mask == 0, 0] = 1 unaries = np.ascontiguousarray((1 - voting / len(isos)) * 10) pairwise = np.ascontiguousarray(LABEL_COMP) seams = np.load('assets/basicModel_seams.npy') edge_idx = pkl.load(open('assets/basicModel_edge_idx_1000_.pkl', 'rb')) dr_v = signal.convolve2d(iso_mask, [[-1, 1]])[:, 1:] dr_h = signal.convolve2d(iso_mask, [[-1], [1]])[1:, :] where_v = iso_mask - dr_v where_h = iso_mask - dr_h idxs = np.arange(1000 ** 2).reshape(1000, 1000) v_edges_from = idxs[:-1, :][where_v[:-1, :] == 1].flatten() v_edges_to = idxs[1:, :][where_v[:-1, :] == 1].flatten() h_edges_from = idxs[:, :-1][where_h[:, :-1] == 1].flatten() h_edges_to = idxs[:, 1:][where_h[:, :-1] == 1].flatten() s_edges_from, s_edges_to = edges_seams(seams, 1000, edge_idx) edges_from = np.r_[v_edges_from, h_edges_from, s_edges_from] edges_to = np.r_[v_edges_to, h_edges_to, s_edges_to] edges_w = np.r_[np.ones_like(v_edges_from), np.ones_like(h_edges_from), np.ones_like(s_edges_from)] gc = gco.GCO() gc.create_general_graph(1000 ** 2, pairwise.shape[0], True) gc.set_data_cost(unaries.reshape(1000 ** 2, pairwise.shape[0])) gc.set_all_neighbors(edges_from, edges_to, edges_w) gc.set_smooth_cost(pairwise) gc.swap(-1) labels = gc.get_labels().reshape(1000, 1000) gc.destroy_graph() segm_colors = np.zeros((1000, 1000, 3), dtype=np.uint8) for i, color_id in enumerate(LABELS_REDUCED): segm_colors[labels == i] = LABELS_REDUCED[color_id] # Step 3: Stitch texture seams = np.load('assets/basicModel_seams.npy') mask = cv2.imread('assets/tex_mask_1000.png', flags=cv2.IMREAD_GRAYSCALE) / 255. segm_template = read_segmentation(segm_colors) num_labels = len(isos) texture = Texture(1000, seams, mask, segm_template, gmms) texture_agg = isos[0] visibility_agg = np.array(vises[0]) tex, _ = texture.add_iso(texture_agg, visibility_agg, np.zeros_like(visibility_agg), inpaint=False) print('Aggregating texture...') for i in trange(num_iter): rl = np.random.choice(num_labels) texture_agg, labels = texture.add_iso(isos[rl], vises[rl], rl, inpaint=i == (num_iter-1)) print('saving {}...'.format(os.path.basename(out_file))) cv2.imwrite(out_file, np.uint8(255 * texture_agg)) def edges_seams(seams, tex_res, edge_idx): edges = np.zeros((0, 2), dtype=np.int32) for _, e0, _, e1 in seams: idx0 = np.array(edge_idx[e0][0]) * tex_res + np.array(edge_idx[e0][1]) idx1 = np.array(edge_idx[e1][0]) * tex_res + np.array(edge_idx[e1][1]) if len(idx0) and len(idx1): if idx0.shape[0] < idx1.shape[0]: idx0 = cv2.resize(idx0.reshape(-1, 1), (1, idx1.shape[0]), interpolation=cv2.INTER_NEAREST) elif idx0.shape[0] > idx1.shape[0]: idx1 = cv2.resize(idx1.reshape(-1, 1), (1, idx0.shape[0]), interpolation=cv2.INTER_NEAREST) edges_new = np.hstack((idx0.reshape(-1, 1), idx1.reshape(-1, 1))) edges = np.vstack((edges, edges_new)) edges = np.sort(edges, axis=1) return edges[:, 0], edges[:, 1] if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( 'data_file', type=str, help="pkl data file") parser.add_argument( 'frame_dir', type=str, help="Directory that contains frame files") parser.add_argument( 'segm_dir', type=str, help="Directory that contains clothes segmentation files") parser.add_argument( 'out_file', type=str, help="Texture output file (JPG or PNG)") parser.add_argument( '--iter', '-t', default=15, type=int, help="Texture optimization steps") args = parser.parse_args() main(args.data_file, args.frame_dir, args.segm_dir, args.out_file, args.iter)
from .client import HandlerClient
from PyQt5.QtWidgets import QGridLayout def add_grid_to_layout(grid: list, layout: QGridLayout, start_index: int = 0): """Automatically adds a specified grid of QWidgets to a given Layout :param grid: The Grid to add to the Layout :param layout: The Layout to use :param start_index: What index to start at """ for i in range(len(grid)): for j in range(len(grid[i])): if grid[i][j] is not None: column_span = 1 for k in range(j + 1, len(grid[i])): if grid[i][k] is not None: break column_span += 1 if isinstance(grid[i][j], tuple): for grid_widget in grid[i][j]: layout.addWidget(grid_widget, i + start_index, j, 1, column_span) else: layout.addWidget(grid[i][j], i + start_index, j, 1, column_span)
import numpy as np class LinearRegression: def __init__(self, lr=0.01, n_iters=1000): self.lr = lr self.n_iters = n_iters self.weights = None self.bias = None def fit(self, X,y): #init parameters n_samples, n_features = X.shape self.weights = np.zeros(n_features) self.bias = 0 #gradient decent for _ in range(self.n_iters): y_predicted = np.dot(X, self.weights) + self.bias dw = (1/n_samples) * np.dot(X.T, (y_predicted - y)) db = (1/n_samples) * np.sum(y_predicted - y) self.weights -= self.lr * dw self.bias -= self.lr * db def predict(self, X): y_predicted = np.dot(X, self.weights) + self.bias return y_predicted
#!/usr/bin/env python # pylint: disable=invalid-name,bare-except # Script to output table with the count of triage related tickets, grouped by week for the past 3 weeks import json import tabulate import jira_cmd class TriageStats: def __init__(self): parser = jira_cmd.build_parser() def my_exit(msg): pass parser.error = my_exit self.args = parser.parse_args([]) self.args.print_json = True self.args.linked_issues = True self.stats = {} def add_past_week(self, past_week_num): self.args.search_query = ( f'project = MGMT AND component = "Assisted-installer Triage AND ' f'labels in (AI_CLOUD_TRIAGE) AND created < -{past_week_num - 1}w AND created >= -{past_week_num}w') if past_week_num == 1: self.args.search_query = ( 'project = MGMT AND component = "Assisted-installer Triage" AND ' 'labels in (AI_CLOUD_TRIAGE) AND created >= -1w') j = jira_cmd.main(self.args) data = json.loads(j) for i in data: if i['key'] not in self.stats: self.stats[i['key']] = dict(key=i['key'], summary=i['summary'], status=i['status']) self.stats[i['key']]['week -{}'.format(past_week_num)] = i['count'] if __name__ == "__main__": a = TriageStats() a.add_past_week(1) a.add_past_week(2) a.add_past_week(3) values = [x for x in a.stats.values()] print(tabulate.tabulate(values, headers="keys", tablefmt="github"))
#!/usr/bin/python # -*- encoding: utf-8 -*- import time import logging import os import sys import torch import torch.nn as nn from torch.utils.data import DataLoader import numpy as np from backbone import Embeddor from loss import BottleneckLoss from market1501 import Market1501 from balanced_sampler import BalancedSampler ## logging if not os.path.exists('./res/'): os.makedirs('./res/') logfile = 'sft_reid-{}.log'.format(time.strftime('%Y-%m-%d-%H-%M-%S')) logfile = os.path.join('res', logfile) FORMAT = '%(levelname)s %(filename)s(%(lineno)d): %(message)s' logging.basicConfig(level=logging.INFO, format=FORMAT, filename=logfile) logging.root.addHandler(logging.StreamHandler()) logger = logging.getLogger(__name__) def lr_scheduler(epoch, optimizer): ## TODO: warmup epoch or iter ? warmup_epoch = 20 warmup_lr = 1e-3 lr_steps = [80, 100] start_lr = 1e-1 lr_factor = 0.1 if epoch <= warmup_epoch: # lr warmup warmup_scale = (start_lr / warmup_lr) ** (1.0 / warmup_epoch) lr = warmup_lr * (warmup_scale ** epoch) for param_group in optimizer.param_groups: param_group['lr'] = lr optimizer.defaults['lr'] = lr else: # lr jump for i, el in enumerate(lr_steps): if epoch == el: lr = start_lr * (lr_factor ** (i + 1)) logger.info('====> LR is set to: {}'.format(lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr optimizer.defaults['lr'] = lr lrs = [round(el['lr'], 6) for el in optimizer.param_groups] return optimizer, lrs def train(): ## data P, K = 16, 8 batchsize = P * K logger.info('creating dataloader') dataset = Market1501('./dataset/Market-1501-v15.09.15/bounding_box_train', is_train = True) num_classes = dataset.get_num_classes() sampler = BalancedSampler(dataset, P, K) dl = DataLoader(dataset, batch_sampler = sampler, num_workers = 8) ## network and loss logger.info('setup model and loss') bottleneck_loss = BottleneckLoss(2048, num_classes) bottleneck_loss.cuda() bottleneck_loss.train() net = Embeddor() net.cuda() net.train() # net = nn.DataParallel(net) ## optimizer logger.info('creating optimizer') lr = 0.1 momentum = 0.9 params = list(net.parameters()) params += list(bottleneck_loss.parameters()) optim = torch.optim.SGD(params, lr=lr, momentum=momentum) ## training logger.info('start training') n_epochs = 140 t_start = time.time() loss_it = [] for ep in range(n_epochs): optim, lrs = lr_scheduler(ep, optim) for it, (imgs, lbs, _) in enumerate(dl): imgs = imgs.cuda() lbs = lbs.cuda() optim.zero_grad() embs_org, embs_sft = net(imgs) loss_org = bottleneck_loss(embs_org, lbs) loss_sft = bottleneck_loss(embs_sft, lbs) loss = loss_org + loss_sft loss.backward() optim.step() loss = loss.cpu().item() loss_it.append(loss) # print logging message t_end = time.time() t_interval = t_end - t_start log_loss = sum(loss_it) / len(loss_it) msg = ', '.join([ 'epoch: {}', 'loss: {:.4f}', 'lr: {}', 'time: {:.4f}' ]).format(ep, log_loss, lrs, t_interval) logger.info(msg) loss_it = [] t_start = t_end ## save model if hasattr(net, 'module'): state_dict = net.module.state_dict() else: state_dict = net.state_dict() torch.save(state_dict, './res/model_final.pth') logger.info('\nTraining done, model saved to {}\n\n'.format('./res/model_final.pth')) if __name__ == '__main__': train()
"""This module provides the Pins contrib class""" import json import time import datetime from ..handlers import CommandHandler, ReactionHandler from ..dataclasses import Message, Reaction, MessageReaction from .._i18n import _ class Pins(object): """ This class provides a system for pinning messages. The messages can be pinned with the "pin" command by passing either text as an argument, or by replying with it to a message. By setting the `confirms` kwarg, this class can requre "confirmation" of a pin by a set amount of reactions. Pinned messages can be listed by the "list" command. Both commands can be renamed with the `pin_cmd` and `list_cmd` kwargs. Pinned messages are stored in json format at the location specified by `db_file`. This class provides two commands, so it has to be registered as: `bot.register(*pins.handlers())` """ _db = [] _db_file = None _pin_cmd = None _list_cmd = None _confirms = 0 def __init__(self, db_file, pin_cmd='pin', list_cmd='list', confirms=0): self._db_file = db_file self._pin_cmd = pin_cmd self._list_cmd = list_cmd self._confirms = confirms self._load() def _load(self): with open(self._db_file) as fd: self._db = json.load(fd) def _save(self): with open(self._db_file, 'w') as fd: json.dump(self._db, fd) def add_pin(self, author, text, timestamp=None): if timestamp is None: timestamp = time.time() self._db.append([timestamp, author, text]) self._save() def _format_pin(self, pin): timestamp = datetime.datetime\ .fromtimestamp(pin[0]).strftime('%Y-%m-%d') out = '\n'.join([ f'{pin[1]}, {timestamp}', f'===', f'{pin[2]}', ]) return out def get_page(self, n): pins = [] for pin in self._db[::-1][5*(n-1):5*n]: pins.append(self._format_pin(pin)) return '\n\n'.join(pins) def _list_fn(self, message: Message, bot): if message.args.isnumeric(): n = int(message.args) else: n = 1 message.reply(self.get_page(n)) def _pin_fn(self, message: Message, bot): if message.args: author = message.get_author_name() text = message.args timestamp = message.timestamp elif message.replied_to is not None: author = message.replied_to.get_author_name() text = message.replied_to.text timestamp = message.replied_to.timestamp else: message.reply(_('Please provide text or reply to a message to be pinned')) return if self._confirms == 0: self.add_pin(author, text, timestamp) message.reply(_('Message was pinned!')) else: mid = message.reply( _('Your pin is waiting for confirmation. ') + _('Ask {n} people to confirm it by reacting with ').format(n=self._confirms) + MessageReaction.YES.value ) def _callback(reaction: Reaction, bot): reactions = reaction.message.reactions if len( # count YES reactions [k for k, v in reactions.items() if v == MessageReaction.YES] ) == self._confirms: self.add_pin(author, text, timestamp) message.reply(_('Message was pinned!')) bot.register(ReactionHandler(_callback, mid, timeout=120)) def handlers(self): """Returns a list of handlers that need to be registered""" handlers = [] handlers.append( CommandHandler(self._list_fn, self._list_cmd) ) handlers.append( CommandHandler(self._pin_fn, self._pin_cmd) ) return handlers
# coding:utf-8 import json from urllib import request from urllib.parse import quote import math import xlwt # 作者:wangzhipan,Email:1044625113@qq.com # 特别注意,高德地图下载的POI点为火星坐标系统,我们需要后续处理,转成WGS84坐标系统下才能用 # 根据城市名称和分类关键字获取poi数据 def getpois(cityname, keywords): i = 1 poilist = [] while True: # 使用while循环不断分页获取数据 result = getpoi_page(cityname, keywords, i) print(result) result = json.loads(result) # 将字符串转换为json if result['count'] == '0': break hand(poilist, result) i = i + 1 return poilist # 高德地图坐标转WGS84坐标系统 def transformlat(lng, lat): pi = 3.1415926535897932384626 ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + \ 0.1 * lng * lat + 0.002 * math.sqrt(abs(lng)) ret = ret + (20.0 * math.sin(6.0 * lng * pi) + 20.0 * math.sin(2.0 * lng * pi)) * 2.0 / 3.0 ret = ret + (20.0 * math.sin(lat * pi) + 40.0 * math.sin(lat / 3.0 * pi)) * 2.0 / 3.0 ret = ret + (160.0 * math.sin(lat / 12.0 * pi) + 320 * math.sin(lat * pi / 30.0)) * 2.0 / 3.0 return ret def transformlng(lng, lat): pi = 3.1415926535897932384626 ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + \ 0.1 * lng * lat + 0.1 * math.sqrt(abs(lng)) ret = ret + (20.0 * math.sin(6.0 * lng * pi) + 20.0 * math.sin(2.0 * lng * pi)) * 2.0 / 3.0 ret = ret + (20.0 * math.sin(lng * pi) + 40.0 * math.sin(lng / 3.0 * pi)) * 2.0 / 3.0 ret = ret + (150.0 * math.sin(lng / 12.0 * pi) + 300.0 * math.sin(lng / 30.0 * pi)) * 2.0 / 3.0 return ret def GD_to_WGS84(lng, lat): pi = 3.1415926535897932384626 ee = 0.00669342162296594323 # 偏心率平方 a = 6378245.0 # 长半轴 if lng > 73.66 and lng < 135.05 and lat > 3.86 and lat < 53.55: # 判断是否在中国范围内, 在国内需要进行偏移 dlat = transformlat(lng - 105.0, lat - 35.0) dlng = transformlng(lng - 105.0, lat - 35.0) radlat = lat / 180.0 * pi magic = math.sin(radlat) magic = 1 - ee * magic * magic sqrtmagic = math.sqrt(magic) dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi) dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi) mglat = lat + dlat mglng = lng + dlng longitude = lng * 2 - mglng latitude = lat * 2 - mglat else: longitude = lng latitude = lat return longitude, latitude # 数据写入excel def write_to_excel(poilist, cityname, classfield): # 一个Workbook对象,这就相当于创建了一个Excel文件 book = xlwt.Workbook(encoding='utf-8', style_compression=0) sheet = book.add_sheet(classfield, cell_overwrite_ok=True) # 第一行(列标题) sheet.write(0, 0, 'longitude') sheet.write(0, 1, 'latitude') sheet.write(0, 2, 'count') sheet.write(0, 3, 'name') sheet.write(0, 4, 'address') sheet.write(0, 5, 'tel') for i in range(len(poilist)): location = poilist[i]['location'] name = poilist[i]['name'] address = poilist[i]['address'] tel = poilist[i]['tel'] lng = str(location).split(",")[0] lat = str(location).split(",")[1] lng, lat = GD_to_WGS84(float(lng), float(lat)) # 高德地图(火星坐标)转换为WGS84坐标 lng = str(lng) lat = str(lat) ''' result = gcj02_to_wgs84(float(lng), float(lat)) lng = result[0] lat = result[1] ''' # 每一行写入 sheet.write(i + 1, 0, lng) sheet.write(i + 1, 1, lat) sheet.write(i + 1, 2, 1) sheet.write(i + 1, 3, name) sheet.write(i + 1, 4, address) sheet.write(i + 1, 5, tel) # 最后,将以上操作保存到指定的Excel文件中 book.save(r'' + cityname + "_" + classfield + '.xls') # 将返回的poi数据装入集合返回 def hand(poilist, result): # result = json.loads(result) # 将字符串转换为json pois = result['pois'] for i in range(len(pois)): poilist.append(pois[i]) # 单页获取pois def getpoi_page(cityname, keywords, page): req_url = poi_search_url + "?key=" + amap_web_key + '&extensions=all&keywords=' + quote( keywords) + '&city=' + quote(cityname) + '&citylimit=true' + '&offset=25' + '&page=' + str( page) + '&output=json' data = '' with request.urlopen(req_url) as f: data = f.read() data = data.decode('utf-8') return data # TODO 替换为上面申请的密钥 amap_web_key = 'your api key' poi_search_url = "http://restapi.amap.com/v3/place/text" poi_boundary_url = "https://ditu.amap.com/detail/get/detail" # from transCoordinateSystem import gcj02_to_wgs84 # TODO cityname为需要爬取的POI所属的城市名,nanning_areas为城市下面的所有区,classes为多个分类名集合. (中文名或者代码都可以,代码详见高德地图的POI分类编码表) cityname = '北京市' # nanning_areas = ['望城区', '开福区', '岳麓区', '天心区', '雨花区', '芙蓉区'] nanning_areas = ['东城区', '西城区', '朝阳区', '海淀区'] # classes = ['商场', '超级市场', '综合市场', '运动场馆', '娱乐场所', '影剧院', '医院', '宾馆酒店', '公园广场', # '风景名胜', '科研机构', '政府机关', '博物馆', '展览馆', '美术馆', '图书馆', # '学校', '火车站', '机场', '港口码头', '会展中心', '停车场', '银行'] classes = ['学校', '医院', '地铁站', '公交站', '公园'] for clas in classes: classes_all_pois = [] for area in nanning_areas: pois_area = getpois(area, clas) print('当前城区:' + str(area) + ', 分类:' + str(clas) + ", 总的有" + str(len(pois_area)) + "条数据") classes_all_pois.extend(pois_area) print("所有城区的数据汇总,总数为:" + str(len(classes_all_pois))) write_to_excel(classes_all_pois, cityname, clas) print('================分类:' + str(clas) + "写入成功")
from mxnet.gluon.nn import HybridSequential # from d2l import mxnet as d2l class RFN(HybridSequential): def __init__(self, output_mode: {'node', 'edge', 'between_edge'}=None, **kwargs): super().__init__(**kwargs) self.output_mode = output_mode def _output(self, Z_N, Z_E, Z_B): if self.output_mode is None: return Z_N, Z_E, Z_B if self.output_mode == 'node': return Z_N elif self.output_mode == 'edge': return Z_E elif self.output_mode == 'between_edge': return Z_B else: raise ValueError( f'RFN output mode {self.output_mode} is not supported.') def get_gcn_args(self, *args): return args def hybrid_forward(self, F, X_V, X_E, X_B, N_node_primal, N_edge_primal, node_mask_primal, N_node_dual, N_edge_dual, N_shared_node_dual, node_mask_dual): # X_V=d2l.MultiHeadAttention(X_V, 2, 0.5) for rfn_layer in self._children.values(): X_V, X_E, X_B = rfn_layer( X_V, X_E, X_B, N_node_primal, N_edge_primal, node_mask_primal, N_node_dual, N_edge_dual, N_shared_node_dual, node_mask_dual) return self._output(X_V, X_E, X_B) def hybrid_forward_first_conv(self, F, X_V, X_E, X_B, N_node_primal, N_edge_primal, node_mask_primal, N_node_dual, N_edge_dual, N_shared_node_dual, node_mask_dual): for rfn_layer in self._children.values(): X_V, X_E, X_B = rfn_layer( X_V, X_E, X_B, N_node_primal, N_edge_primal, node_mask_primal, N_node_dual, N_edge_dual, N_shared_node_dual, node_mask_dual) break return X_V, X_E, X_B def hybrid_forward_output_all(self, F, X_V, X_E, X_B, N_node_primal, N_edge_primal, node_mask_primal, N_node_dual, N_edge_dual, N_shared_node_dual, node_mask_dual): leng = len(self._children.values()) for index,rfn_layer in enumerate(self._children.values()): if index == leng - 1: break X_V, X_E, X_B = rfn_layer( X_V, X_E, X_B, N_node_primal, N_edge_primal, node_mask_primal, N_node_dual, N_edge_dual, N_shared_node_dual, node_mask_dual) return X_V, X_E, X_B
#!/usr/bin/env python3 # Copyright 2021, Collabora, Ltd. # SPDX-License-Identifier: BSL-1.0 """Handle the process of recording data from the "log" firmware.""" import asyncio import logging import dataclasses import datetime from typing import Optional import aioconsole import aioserial from serial.tools import list_ports logging.basicConfig(level=logging.DEBUG) FILENAME = "measurements.csv" def _make_filename(): now = datetime.datetime.now() return "meas_{}{:02d}{:02d}_{:02d}{:02d}.csv".format(now.year, now.month, now.day, now.hour, now.minute) def _get_known_devices(): return {_vid_pid("2341", "805A"): "Arduino Nano 33 BLE"} def _vid_pid(vid_string, pid_string): return (int(vid_string, 16), int(pid_string, 16)) def _get_known_ports(): known_devices = _get_known_devices() for info in list_ports.comports(): our_vid_pid = (info.vid, info.pid) if our_vid_pid in known_devices: print( info.device, info.description, hex(info.vid), hex(info.pid), known_devices[our_vid_pid], ) return info.device raise RuntimeError("Could not find any connected devices") @dataclasses.dataclass class Measurement: us: int drx: float dry: float drz: float brightness: int @classmethod def from_csv_line(cls, line: bytes): try: us_str, drx_str, dry_str, drz_str, brightness_str = line.split(b",") return Measurement( us=int(us_str), drx=float(drx_str), dry=float(dry_str), drz=float(drz_str), brightness=int(brightness_str), ) except: return None def get_csv_line(self): return f"{self.us},{self.drx},{self.dry},{self.drz},{self.brightness}\n" async def get_measurement(serial_port: aioserial.AioSerial, retries=1): for _ in range(retries + 1): line = await serial_port.readline_async() if not line: # all done return if b"\n" not in line: # partial line: all done return meas = Measurement.from_csv_line(line) if meas: return meas @dataclasses.dataclass class RunningExtrema: min_val: Optional[float] = None max_val: Optional[float] = None def get_range(self) -> float: if self.max_val is None or self.min_val is None: raise ValueError("Cannot get range when we have not processed any values") return self.max_val - self.min_val def process(self, val) -> bool: changed = False if self.min_val is None: self.min_val = val changed = True if self.max_val is None: self.max_val = val changed = True if val < self.min_val: self.min_val = val changed = True if val > self.max_val: self.max_val = val changed = True return changed async def get_measurement_or_enter( input_task: asyncio.Task, serial_port: aioserial.AioSerial, retries=1 ): """Return a measurement, or None if there was a problem or the user hit enter.""" meas_task = asyncio.create_task(get_measurement(serial_port, retries=retries)) done, _ = await asyncio.wait( (input_task, meas_task), return_when=asyncio.FIRST_COMPLETED ) if input_task in done: # They hit enter meas_task.cancel() return None meas = meas_task.result() if not meas: return None return meas async def main(device: str): serial_port = aioserial.AioSerial(port=device, baudrate=115200) print("Talking with your device") meas = await get_measurement(serial_port) print(meas) if not meas: return print( "OK - Please rotate thru the range of brightnesses to determine the limits. Press enter when done" ) brightness_extrema = RunningExtrema() input_task = asyncio.create_task(aioconsole.ainput()) while True: meas = await get_measurement_or_enter(input_task, serial_port) if not meas: # they hit enter break if brightness_extrema.process(meas.brightness): print(brightness_extrema.min_val, brightness_extrema.max_val) if brightness_extrema.min_val == 0: print("Got a brightness of zero, not OK. Is your sensor connected right?") return print("OK, limits found.") if brightness_extrema.get_range() < 200: print( "Range not large enough: improve sensitivity of sensor or connection to display" ) return filename = _make_filename() print("OK, recording to disk, enter to stop.") with open(filename, "w") as fp: # header row fp.write("us,drx,dry,drz,brightness\n") base_meas = await get_measurement(serial_port) if not base_meas: raise RuntimeError("Could not get our baseline timestamp") zero_time = base_meas.us # the task watching for the enter press input_task = asyncio.create_task(aioconsole.ainput()) while True: meas = await get_measurement_or_enter(input_task, serial_port) if not meas: # they hit enter break # Offset the timestamp for ease of use. meas.us -= zero_time fp.write(meas.get_csv_line()) print(f"All done! Go move/rename {filename} and analyze it!") if __name__ == "__main__": device = _get_known_ports() print(f"Opening {device}") # app = Capture() asyncio.run(main(device))
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import json import yaml #from urllib.parse import urlparse import influxdb import openstack #from keystoneauth1 import discover results = {} def read_sc_config(conn, config_path='service_config.yaml'): data = {} with open(config_path) as fp: data = yaml.load(fp, Loader=yaml.FullLoader) return data def _normalize_catalog(catalog): cat = {} for rec in catalog: entry_type = rec.get('type') cat[entry_type] = rec return cat def _log(level, section, service, msg): # if not section in results: # results[section] = {} # if service not in results[section]: # results[section][service] = {} # if level not in results[section][service]: # results[section][service][level] = [] # results[section][service][level].append(msg) if service not in results: results[service] = {} if level not in results[service]: results[service][level] = [] results[service][level].append(msg) def _error(section, service, msg): return _log('error', section, service, msg) def _warn(section, service, msg): return _log('warn', section, service, msg) def _success(section, service, msg=None): return _log('success', section, service, msg) def validate_catalog_valid(catalog): cat = {} for rec in catalog: entry_type = rec.get('type') if entry_type in cat: _error( 'catalog', 'general', 'Service %s present multiple times' % entry_type) cat[entry_type] = rec def _validate_service_known_in_regions(service_name, service, endpoints, regions, type='catalog'): match = {} for reg in regions: for cat_srv in endpoints: reg_id = cat_srv.get('region_id') if reg == reg_id: match[reg] = True if reg not in match: _error( type + '_catalog', service_name, 'Service %s is not registered in region %s' % (service.get('type'), reg)) def validate_service_known_in_region(conn, catalog, config, regions): for srv in config['services']: service_name = list(srv.keys())[0] srv_data = srv[service_name] service_type = srv_data.get('service_type') service_in_catalog = catalog.get(service_type) if not service_in_catalog: _error( 'token_catalog', service_name, 'Service %s is not know in catalog' % service_type) continue _validate_service_known_in_regions( service_name, service_in_catalog, service_in_catalog.get('endpoints', []), regions, 'token') # Validate standalone endpoints for the service are actually same _validate_service_known_in_regions( service_name, service_in_catalog, conn.identity.endpoints(service_id=service_in_catalog.get('id')), regions, 'catalog' ) def validate_service_supports_version_discovery(conn, catalog, config, regions): for srv in config['services']: service_name = list(srv.keys())[0] srv_data = srv[service_name] service_type = srv_data.get('service_type') service_in_catalog = catalog.get(service_type) if not service_in_catalog: # bad service with no entries continue for ep in service_in_catalog.get('endpoints'): supports_vd = False client = conn.config.get_session_client(service_type) url = ep['url'] try: data = client.get_endpoint_data() if data.service_url: # If we got service_url - most likely we are good supports_vd = True url = data.service_url or data.catalog_url except Exception as e: supports_vd = False if not supports_vd: _warn('service_version_discovery', service_name, 'Service %s does not support version discovery properly' % service_type) # Now verify that the URL we have points to a usable point expected_suffix = srv_data.get('expected_suffix').format( project_id=conn.current_project_id ).rstrip('/') if not url.rstrip('/').endswith(expected_suffix): _warn('service_version_discovery', service_name, 'Service %s exposes wrong suffix' % service_type) def write_result(influx_client, config): data_points = [] services = [] for srv in config.get('services'): services.append(list(srv.keys())[0]) services.append('general') for service_name in services: result = results.get(service_name, {}) point = dict( measurement='scmon1', tags=dict( service_type=service_name, ) ) point['fields'] = dict( service=service_name, errors=len(result.get('error', [])) or 0, warn=len(result.get('warn', [])) or 0 ) data_points.append(point) if data_points and influx_client: influx_client.write_points(data_points) def get_influx_client(): host = os.environ.get('INFLUXDB_HOST', 'localhost') port = int(os.environ.get('INFLUXDB_PORT', 8086)) user = os.environ.get('INFLUXDB_USER') password = os.environ.get('INFLUXDB_PASSWORD') database = os.environ.get('INFLUXDB_DATABASE', 'default') ssl = os.environ.get('INFLUXDB_SSL', False) influx = None if host and port and user and password: influx = influxdb.InfluxDBClient( host, port, user, password, database, ssl, timeout=5) return influx def main(): conn = openstack.connect() sc_config = read_sc_config('../../service_config.yaml') project_catalog = conn.config.get_service_catalog().catalog normalized_project_catalog = _normalize_catalog(project_catalog) regions = [x.id for x in conn.identity.regions()] validate_catalog_valid(project_catalog) validate_service_known_in_region( conn, normalized_project_catalog, sc_config, regions ) validate_service_supports_version_discovery( conn, normalized_project_catalog, sc_config, regions ) print(json.dumps(results, sort_keys=True, indent=True)) influx_client = get_influx_client() if influx_client: write_result(influx_client, sc_config) if __name__ == '__main__': main()
import pytest from investmentstk.data_feeds import AvanzaFeed from investmentstk.data_feeds.data_feed import TimeResolution from investmentstk.models.asset import Asset from investmentstk.models.source import Source @pytest.fixture def subject() -> AvanzaFeed: return AvanzaFeed() @pytest.fixture def volvo() -> Asset: return Asset(Source.Avanza, "5269", "VOLV B") @pytest.mark.parametrize( "resolution, expected_possible_days_difference", [[TimeResolution.day, [1, 3]], [TimeResolution.week, [7]], [TimeResolution.month, [28, 29, 30, 31]]], ids=["day", "week", "month"], ) @pytest.mark.external_http def test_retrieve_ohlc(subject, volvo, resolution, expected_possible_days_difference): dataframe = subject.retrieve_ohlc(volvo.source_id, resolution=resolution) first_bar = dataframe.iloc[0] second_bar = dataframe.iloc[1] assert len(dataframe) > 0 assert first_bar.high >= first_bar.low assert (second_bar.name - first_bar.name).days in expected_possible_days_difference @pytest.mark.external_http def test_retrieve_asset_name(subject, volvo): name = subject.retrieve_asset_name(volvo.source_id) assert name == volvo.name @pytest.mark.external_http def test_retrieve_price(subject, volvo): price = subject.retrieve_price(volvo.source_id) assert 100 <= price.last <= 300 assert -10 <= price.change <= 10 assert -10 <= price.change_pct <= 10
from .merge_transformers import ConcatMerger, AverageMerger __all__ = ["ConcatMerger", "AverageMerger"]
# Copyright 2010-2019 Dan Elliott, Russell Valentine # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tests import TestIsadore import unittest from restkit import Resource, BasicAuth, request, Unauthorized import json import ConfigParser import util import random from passgen import generatePassword class TestAccounts(TestIsadore): def testAccountsGet(self): path = '/resources/accounts' for user in [self.testpoweruser, self.testsuperuser]: accounts = self.getJSONResource(path, user) #TODO: Check that power user got no super users. self.unauthorizedChecks([None, self.testuser, self.testfilluser, self.testconfiguser], path) def testAccountsGetId(self): path = "/resources/accounts/" + str(self.dbuser.id) for user in [self.testuser, self.testpoweruser, self.testsuperuser]: account = self.getJSONResource(path, user) self.assertEqual(self.dbuser.email, account['email'], "Wrong account retrieved as " + user.credentials[0]) path = "/resources/accounts/" + str(self.dbsuperuser.id) self.unauthorizedChecks([None, self.testuser, self.testfilluser, self.testconfiguser, self.testpoweruser], path) def testAccountsUpdate(self): path = "/resources/accounts/" + str(self.dbfilluser.id) for user in [self.testfilluser, self.testpoweruser, self.testsuperuser]: name = user.credentials[0] res = Resource(self.baseurl, filters=[user]) phone = str(random.randrange(10000000000, 99999999999)) params = {"name": self.dbfilluser.name, "email":self.dbfilluser.email, "phone":phone, "privilege_id":self.dbfilluser.privilege_id} output = res.request("PUT", path=path, params_dict=params) self.assertEqual(204, output.status_int, "Wrong response code: " + name) self.dbfilluser = util.getAccountByEmail(self.dbfilluser.email) self.assertEqual(self.dbfilluser.phone, phone, "Account not updated.") self.unauthorizedChecks([None, self.testuser, self.testconfiguser], path, method='PUT', params_dict=params) def testAccountsNewDelete(self): path = "/resources/accounts" user = self.testpoweruser res = Resource(self.baseurl, filters=[user]) email = "newusertest@exotericanalytics.com" self.assertEqual(None, util.getAccountByEmail(email), "Account to add already exists.") params = {"name": "New User Test", "email": "newusertest@exotericanalytics.com", "phone": "15555555555", "privilege_id": 100} output = res.request("POST", path=path, params_dict=params) self.assertEqual(200, output.status_int, "Wrong response code from post.") xlink = json.loads(output.body_string()) #check account got created. new_account = util.getAccountByEmail(email) self.assertNotEqual(None, new_account, "New account was not created") #Check link returned is correct. path = xlink["xlink"][0] self.assertEquals("/resources/accounts/" + str(new_account.id), path, "Link returned does not match new account") #DELETE new account output = res.request("DELETE", path=path) self.assertEqual(204, output.status_int, "Wrong response code from delete.") new_account = util.getAccountByEmail(email) self.assertEqual(None, util.getAccountByEmail(email), "Failed to delete account.") #authorized checks #new self.unauthorizedChecks([None, self.testuser, self.testfilluser, self.testconfiguser], "/resources/accounts", "POST", params_dict=params) #delete self.unauthorizedChecks([None, self.testuser, self.testfilluser, self.testconfiguser], "/resources/accounts/1", "DELETE") # Local Variables: # indent-tabs-mode: t # python-indent: 4 # tab-width: 4 # End:
#program for Toffolli gate. #expected file name is 'in.qc' program = """version 1.0 qubits 3 Toffoli q[0], q[1], q[2] """ f = open('in.qc','w') print >>f, program f.close()
#! /usr/bin/env python # -*- coding: utf-8 -*- from flask.views import MethodView from flask import request, g, current_app # dracarys import from flask_restapi.views import APIMethodView from .models import Author from .forms import AuthorForm class AuthorView(APIMethodView): model = Author paginate_by = 10 context_object_name = 'items' pk_url_kwarg = 'author_id' form_class = AuthorForm
# Copyright 2021 The MT3 Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions that operate on NoteSequence protos.""" import dataclasses import itertools from typing import MutableMapping, MutableSet, Optional, Sequence, Tuple from mt3 import event_codec from mt3 import run_length_encoding from mt3 import vocabularies import note_seq DEFAULT_VELOCITY = 100 DEFAULT_NOTE_DURATION = 0.01 # Quantization can result in zero-length notes; enforce a minimum duration. MIN_NOTE_DURATION = 0.01 @dataclasses.dataclass class TrackSpec: name: str program: int = 0 is_drum: bool = False def extract_track(ns, program, is_drum): track = note_seq.NoteSequence(ticks_per_quarter=220) track_notes = [note for note in ns.notes if note.program == program and note.is_drum == is_drum] track.notes.extend(track_notes) track.total_time = (max(note.end_time for note in track.notes) if track.notes else 0.0) return track def trim_overlapping_notes(ns: note_seq.NoteSequence) -> note_seq.NoteSequence: """Trim overlapping notes from a NoteSequence, dropping zero-length notes.""" ns_trimmed = note_seq.NoteSequence() ns_trimmed.CopyFrom(ns) channels = set((note.pitch, note.program, note.is_drum) for note in ns_trimmed.notes) for pitch, program, is_drum in channels: notes = [note for note in ns_trimmed.notes if note.pitch == pitch and note.program == program and note.is_drum == is_drum] sorted_notes = sorted(notes, key=lambda note: note.start_time) for i in range(1, len(sorted_notes)): if sorted_notes[i - 1].end_time > sorted_notes[i].start_time: sorted_notes[i - 1].end_time = sorted_notes[i].start_time valid_notes = [note for note in ns_trimmed.notes if note.start_time < note.end_time] del ns_trimmed.notes[:] ns_trimmed.notes.extend(valid_notes) return ns_trimmed def assign_instruments(ns: note_seq.NoteSequence) -> None: """Assign instrument numbers to notes; modifies NoteSequence in place.""" program_instruments = {} for note in ns.notes: if note.program not in program_instruments and not note.is_drum: num_instruments = len(program_instruments) note.instrument = (num_instruments if num_instruments < 9 else num_instruments + 1) program_instruments[note.program] = note.instrument elif note.is_drum: note.instrument = 9 else: note.instrument = program_instruments[note.program] def validate_note_sequence(ns: note_seq.NoteSequence) -> None: """Raise ValueError if NoteSequence contains invalid notes.""" for note in ns.notes: if note.start_time >= note.end_time: raise ValueError('note has start time >= end time: %f >= %f' % (note.start_time, note.end_time)) if note.velocity == 0: raise ValueError('note has zero velocity') def note_arrays_to_note_sequence( onset_times: Sequence[float], pitches: Sequence[int], offset_times: Optional[Sequence[float]] = None, velocities: Optional[Sequence[int]] = None, programs: Optional[Sequence[int]] = None, is_drums: Optional[Sequence[bool]] = None ) -> note_seq.NoteSequence: """Convert note onset / offset / pitch / velocity arrays to NoteSequence.""" ns = note_seq.NoteSequence(ticks_per_quarter=220) for onset_time, offset_time, pitch, velocity, program, is_drum in itertools.zip_longest( onset_times, [] if offset_times is None else offset_times, pitches, [] if velocities is None else velocities, [] if programs is None else programs, [] if is_drums is None else is_drums): if offset_time is None: offset_time = onset_time + DEFAULT_NOTE_DURATION if velocity is None: velocity = DEFAULT_VELOCITY if program is None: program = 0 if is_drum is None: is_drum = False ns.notes.add( start_time=onset_time, end_time=offset_time, pitch=pitch, velocity=velocity, program=program, is_drum=is_drum) ns.total_time = max(ns.total_time, offset_time) assign_instruments(ns) return ns @dataclasses.dataclass class NoteEventData: pitch: int velocity: Optional[int] = None program: Optional[int] = None is_drum: Optional[bool] = None instrument: Optional[int] = None def note_sequence_to_onsets( ns: note_seq.NoteSequence ) -> Tuple[Sequence[float], Sequence[NoteEventData]]: """Extract note onsets and pitches from NoteSequence proto.""" # Sort by pitch to use as a tiebreaker for subsequent stable sort. notes = sorted(ns.notes, key=lambda note: note.pitch) return ([note.start_time for note in notes], [NoteEventData(pitch=note.pitch) for note in notes]) def note_sequence_to_onsets_and_offsets( ns: note_seq.NoteSequence, ) -> Tuple[Sequence[float], Sequence[NoteEventData]]: """Extract onset & offset times and pitches from a NoteSequence proto. The onset & offset times will not necessarily be in sorted order. Args: ns: NoteSequence from which to extract onsets and offsets. Returns: times: A list of note onset and offset times. values: A list of NoteEventData objects where velocity is zero for note offsets. """ # Sort by pitch and put offsets before onsets as a tiebreaker for subsequent # stable sort. notes = sorted(ns.notes, key=lambda note: note.pitch) times = ([note.end_time for note in notes] + [note.start_time for note in notes]) values = ([NoteEventData(pitch=note.pitch, velocity=0) for note in notes] + [NoteEventData(pitch=note.pitch, velocity=note.velocity) for note in notes]) return times, values def note_sequence_to_onsets_and_offsets_and_programs( ns: note_seq.NoteSequence, ) -> Tuple[Sequence[float], Sequence[NoteEventData]]: """Extract onset & offset times and pitches & programs from a NoteSequence. The onset & offset times will not necessarily be in sorted order. Args: ns: NoteSequence from which to extract onsets and offsets. Returns: times: A list of note onset and offset times. values: A list of NoteEventData objects where velocity is zero for note offsets. """ # Sort by program and pitch and put offsets before onsets as a tiebreaker for # subsequent stable sort. notes = sorted(ns.notes, key=lambda note: (note.is_drum, note.program, note.pitch)) times = ([note.end_time for note in notes if not note.is_drum] + [note.start_time for note in notes]) values = ([NoteEventData(pitch=note.pitch, velocity=0, program=note.program, is_drum=False) for note in notes if not note.is_drum] + [NoteEventData(pitch=note.pitch, velocity=note.velocity, program=note.program, is_drum=note.is_drum) for note in notes]) return times, values @dataclasses.dataclass class NoteEncodingState: """Encoding state for note transcription, keeping track of active pitches.""" # velocity bin for active pitches and programs active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field( default_factory=dict) def note_event_data_to_events( state: Optional[NoteEncodingState], value: NoteEventData, codec: event_codec.Codec, ) -> Sequence[event_codec.Event]: """Convert note event data to a sequence of events.""" if value.velocity is None: # onsets only, no program or velocity return [event_codec.Event('pitch', value.pitch)] else: num_velocity_bins = vocabularies.num_velocity_bins_from_codec(codec) velocity_bin = vocabularies.velocity_to_bin( value.velocity, num_velocity_bins) if value.program is None: # onsets + offsets + velocities only, no programs if state is not None: state.active_pitches[(value.pitch, 0)] = velocity_bin return [event_codec.Event('velocity', velocity_bin), event_codec.Event('pitch', value.pitch)] else: if value.is_drum: # drum events use a separate vocabulary return [event_codec.Event('velocity', velocity_bin), event_codec.Event('drum', value.pitch)] else: # program + velocity + pitch if state is not None: state.active_pitches[(value.pitch, value.program)] = velocity_bin return [event_codec.Event('program', value.program), event_codec.Event('velocity', velocity_bin), event_codec.Event('pitch', value.pitch)] def note_encoding_state_to_events( state: NoteEncodingState ) -> Sequence[event_codec.Event]: """Output program and pitch events for active notes plus a final tie event.""" events = [] for pitch, program in sorted( state.active_pitches.keys(), key=lambda k: k[::-1]): if state.active_pitches[(pitch, program)]: events += [event_codec.Event('program', program), event_codec.Event('pitch', pitch)] events.append(event_codec.Event('tie', 0)) return events @dataclasses.dataclass class NoteDecodingState: """Decoding state for note transcription.""" current_time: float = 0.0 # velocity to apply to subsequent pitch events (zero for note-off) current_velocity: int = DEFAULT_VELOCITY # program to apply to subsequent pitch events current_program: int = 0 # onset time and velocity for active pitches and programs active_pitches: MutableMapping[Tuple[int, int], Tuple[float, int]] = dataclasses.field( default_factory=dict) # pitches (with programs) to continue from previous segment tied_pitches: MutableSet[Tuple[int, int]] = dataclasses.field( default_factory=set) # whether or not we are in the tie section at the beginning of a segment is_tie_section: bool = False # partially-decoded NoteSequence note_sequence: note_seq.NoteSequence = dataclasses.field( default_factory=lambda: note_seq.NoteSequence(ticks_per_quarter=220)) def decode_note_onset_event( state: NoteDecodingState, time: float, event: event_codec.Event, codec: event_codec.Codec, ) -> None: """Process note onset event and update decoding state.""" if event.type == 'pitch': state.note_sequence.notes.add( start_time=time, end_time=time + DEFAULT_NOTE_DURATION, pitch=event.value, velocity=DEFAULT_VELOCITY) state.note_sequence.total_time = max(state.note_sequence.total_time, time + DEFAULT_NOTE_DURATION) else: raise ValueError('unexpected event type: %s' % event.type) def _add_note_to_sequence( ns: note_seq.NoteSequence, start_time: float, end_time: float, pitch: int, velocity: int, program: int = 0, is_drum: bool = False ) -> None: end_time = max(end_time, start_time + MIN_NOTE_DURATION) ns.notes.add( start_time=start_time, end_time=end_time, pitch=pitch, velocity=velocity, program=program, is_drum=is_drum) ns.total_time = max(ns.total_time, end_time) def decode_note_event( state: NoteDecodingState, time: float, event: event_codec.Event, codec: event_codec.Codec ) -> None: """Process note event and update decoding state.""" if time < state.current_time: raise ValueError('event time < current time, %f < %f' % ( time, state.current_time)) state.current_time = time if event.type == 'pitch': pitch = event.value if state.is_tie_section: # "tied" pitch if (pitch, state.current_program) not in state.active_pitches: raise ValueError('inactive pitch/program in tie section: %d/%d' % (pitch, state.current_program)) if (pitch, state.current_program) in state.tied_pitches: raise ValueError('pitch/program is already tied: %d/%d' % (pitch, state.current_program)) state.tied_pitches.add((pitch, state.current_program)) elif state.current_velocity == 0: # note offset if (pitch, state.current_program) not in state.active_pitches: raise ValueError('note-off for inactive pitch/program: %d/%d' % (pitch, state.current_program)) onset_time, onset_velocity = state.active_pitches.pop( (pitch, state.current_program)) _add_note_to_sequence( state.note_sequence, start_time=onset_time, end_time=time, pitch=pitch, velocity=onset_velocity, program=state.current_program) else: # note onset if (pitch, state.current_program) in state.active_pitches: # The pitch is already active; this shouldn't really happen but we'll # try to handle it gracefully by ending the previous note and starting a # new one. onset_time, onset_velocity = state.active_pitches.pop( (pitch, state.current_program)) _add_note_to_sequence( state.note_sequence, start_time=onset_time, end_time=time, pitch=pitch, velocity=onset_velocity, program=state.current_program) state.active_pitches[(pitch, state.current_program)] = ( time, state.current_velocity) elif event.type == 'drum': # drum onset (drums have no offset) if state.current_velocity == 0: raise ValueError('velocity cannot be zero for drum event') offset_time = time + DEFAULT_NOTE_DURATION _add_note_to_sequence( state.note_sequence, start_time=time, end_time=offset_time, pitch=event.value, velocity=state.current_velocity, is_drum=True) elif event.type == 'velocity': # velocity change num_velocity_bins = vocabularies.num_velocity_bins_from_codec(codec) velocity = vocabularies.bin_to_velocity(event.value, num_velocity_bins) state.current_velocity = velocity elif event.type == 'program': # program change state.current_program = event.value elif event.type == 'tie': # end of tie section; end active notes that weren't declared tied if not state.is_tie_section: raise ValueError('tie section end event when not in tie section') for (pitch, program) in list(state.active_pitches.keys()): if (pitch, program) not in state.tied_pitches: onset_time, onset_velocity = state.active_pitches.pop((pitch, program)) _add_note_to_sequence( state.note_sequence, start_time=onset_time, end_time=state.current_time, pitch=pitch, velocity=onset_velocity, program=program) state.is_tie_section = False else: raise ValueError('unexpected event type: %s' % event.type) def begin_tied_pitches_section(state: NoteDecodingState) -> None: """Begin the tied pitches section at the start of a segment.""" state.tied_pitches = set() state.is_tie_section = True def flush_note_decoding_state( state: NoteDecodingState ) -> note_seq.NoteSequence: """End all active notes and return resulting NoteSequence.""" for onset_time, _ in state.active_pitches.values(): state.current_time = max(state.current_time, onset_time + MIN_NOTE_DURATION) for (pitch, program) in list(state.active_pitches.keys()): onset_time, onset_velocity = state.active_pitches.pop((pitch, program)) _add_note_to_sequence( state.note_sequence, start_time=onset_time, end_time=state.current_time, pitch=pitch, velocity=onset_velocity, program=program) assign_instruments(state.note_sequence) return state.note_sequence class NoteEncodingSpecType(run_length_encoding.EventEncodingSpec): pass # encoding spec for modeling note onsets only NoteOnsetEncodingSpec = NoteEncodingSpecType( init_encoding_state_fn=lambda: None, encode_event_fn=note_event_data_to_events, encoding_state_to_events_fn=None, init_decoding_state_fn=NoteDecodingState, begin_decoding_segment_fn=lambda state: None, decode_event_fn=decode_note_onset_event, flush_decoding_state_fn=lambda state: state.note_sequence) # encoding spec for modeling onsets and offsets NoteEncodingSpec = NoteEncodingSpecType( init_encoding_state_fn=lambda: None, encode_event_fn=note_event_data_to_events, encoding_state_to_events_fn=None, init_decoding_state_fn=NoteDecodingState, begin_decoding_segment_fn=lambda state: None, decode_event_fn=decode_note_event, flush_decoding_state_fn=flush_note_decoding_state) # encoding spec for modeling onsets and offsets, with a "tie" section at the # beginning of each segment listing already-active notes NoteEncodingWithTiesSpec = NoteEncodingSpecType( init_encoding_state_fn=NoteEncodingState, encode_event_fn=note_event_data_to_events, encoding_state_to_events_fn=note_encoding_state_to_events, init_decoding_state_fn=NoteDecodingState, begin_decoding_segment_fn=begin_tied_pitches_section, decode_event_fn=decode_note_event, flush_decoding_state_fn=flush_note_decoding_state)
# i=1 # while i<=100: # if i % 5 == 0 and i % 7 == 0: #ja i dalās ar 5 bez atlikuma # print("FizzBuzz", end=",") # elif i % 5 == 0: # print("Fizz", end=",") # elif i % 7 == 0: # print("Buzz", end=",") # else: # print(i, end=",") # i = i+1 # result = "" # for x in range (1, 100): # if x % 5 == 0: # if x % 7 == 0: # string = "Fizzbuzz," # result += string # continue # string = "Fizz," # elif x % 7 == 0: # string = "Buzz," # else: # string = str(x)+"," # # result += string # print(result)
import requests from tenacity import retry, stop_after_attempt, wait_fixed from utils import config, json, log headers = {"token": config.app["apiToken"], "Content-Type": "application/json"} def retry_failed(retry_state): log.error(0, f"请求重试失败: {retry_state.args[0]}, {retry_state.args[1]}, {retry_state.outcome.result()}") return None # @retry(wait=wait_fixed(3), stop=stop_after_attempt(3), retry_error_callback=retry_failed) def request(method, path, data=None): url = f"{config.app['apiOrigin']}{path}" response = getattr(requests, method)(url, headers=headers, data=json.dumps(data) if data is not None else None) if response.status_code == 200: response_data = response.json() if response_data["code"] == 0: return response_data["data"] if "data" in response_data else None else: log.error(0, f"请求出错: {method} {url}, code: {response_data['code']}, response: {response.text}") return None else: log.error(0, f"请求出错: {method} {url}, status code: {response.status_code}, response: {response.text}") return None def get(path): return request("get", path) def post(path, data): return request("post", path, data) def put(path, data): return request("put", path, data) def patch(path, data): return request("patch", path, data)
class Memento: def restore(self): pass class Originator: def save(self): pass class ConcreteOriginator: def __init__(self, state): self.__state = state def set_state(self, state): self.__state = state def get_state(self): return self.__state def save(self): return ConcreteMemento(self, self.__state) class ConcreteMemento(Memento): def __init__(self, originator: ConcreteOriginator, state): self.__originator = originator self.__state = state def set_state(self, state): self.__state = state def get_state(self): return self.__state def restore(self): self.__originator.set_state(self.__state) class CareTaker: __history = [] def __init__(self, memento: Memento): self.__history.append(memento) def undo(self): if self.__history is not None: self.__history.pop().restore() if __name__ == "__main__": ori = ConcreteOriginator("ONE") mem = ConcreteMemento(ori, ori.get_state()) caretaker = CareTaker(mem) print(ori.get_state()) ori.set_state("TWO") print(ori.get_state()) caretaker.undo() print(ori.get_state())
import torch from torch._C import INSERT_FOLD_PREPACK_OPS, Node import torch.nn as nn import torch.nn.functional as F import random import numpy as np class HGCN(nn.Module): def __init__(self, n_edges, in_feature, out_feature, n_agents): super(HGCN, self).__init__() print(n_edges) self.W_line = nn.Parameter(torch.ones(n_edges).cuda()) self.W = None def forward(self, node_features, hyper_graph): self.W = torch.diag_embed(self.W_line) B_inv = torch.sum(hyper_graph.detach(), dim=-2) B_inv = torch.diag_embed(B_inv) softmax_w = torch.abs(self.W).detach() D_inv = torch.matmul(hyper_graph.detach(), softmax_w).sum(dim=-1) D_inv = torch.diag_embed(D_inv) D_inv = D_inv **(-0.5) B_inv = B_inv **(-1) D_inv[D_inv == float('inf')] = 0 D_inv[D_inv == float('nan')] = 0 B_inv[B_inv == float('inf')] = 0 B_inv[B_inv == float('nan')] = 0 A = torch.bmm(D_inv, hyper_graph) A = torch.matmul(A, torch.abs(self.W)) A = torch.bmm(A, B_inv) A = torch.bmm(A, hyper_graph.transpose(-2, -1)) A = torch.bmm(A, D_inv) X = torch.bmm(A, node_features) return X class Encoder(nn.Module): def __init__(self, aggregator, feature_dim): super(Encoder, self).__init__() self.aggregator = aggregator self.feature_dim = feature_dim def forward(self, node_features, hyper_graph): output = self.aggregator.forward(node_features, hyper_graph) return output class HGCNMixer(nn.Module): def __init__(self, args): super(HGCNMixer, self).__init__() self.args = args self.add_self = args.add_self self.hyper_hidden_dim = args.hyper_hidden_dim self.head_num = 1 self.hyper_edge_num = args.hyper_edge_num self.n_agents = args.n_agents self.state_dim = int(np.prod(args.state_shape)) self.indiv_u_dim = int(np.prod(args.observation_shape)) self.use_one_hot = False self.n_hyper_edge = self.hyper_edge_num if self.use_one_hot: self.n_hyper_edge += self.n_agents self.use_elu = True self.hyper_edge_net = nn.Sequential( nn.Linear(in_features=self.indiv_u_dim, out_features=self.hyper_edge_num), nn.ReLU(), ) self.hidden_dim = 64 self.encoder_1 = nn.ModuleList([Encoder(HGCN(self.n_hyper_edge, 1, self.hidden_dim, self.n_agents), self.indiv_u_dim) for _ in range(self.head_num)]) self.encoder_2 = nn.ModuleList([Encoder(HGCN(self.n_hyper_edge, 1, self.hidden_dim, self.n_agents), self.indiv_u_dim) for _ in range(self.head_num)]) self.hyper_weight_layer_1 = nn.Sequential( nn.Linear(self.state_dim, self.hyper_hidden_dim), nn.ReLU(), nn.Linear(self.hyper_hidden_dim, self.n_agents) ) self.hyper_const_layer_1 = nn.Sequential( nn.Linear(self.state_dim, self.hyper_hidden_dim), nn.ReLU(), nn.Linear(self.hyper_hidden_dim, self.n_agents) ) self.hyper_weight_layer = nn.Sequential( nn.Linear(self.state_dim, self.hyper_hidden_dim), nn.ReLU(), nn.Linear(self.hyper_hidden_dim, self.n_agents) ) self.hyper_const_layer = nn.Sequential( nn.Linear(self.state_dim, self.hyper_hidden_dim), nn.ReLU(), nn.Linear(self.hyper_hidden_dim, 1) ) def build_hyper_net(self, indiv_us): out = self.hyper_edge_net(indiv_us) mean = out.clone().detach().mean() out = out.reshape([out.shape[0], self.n_agents, -1]) if self.use_one_hot: one_hot = torch.eye(self.n_agents) one_hot = one_hot.flatten().cuda() mean = out.clone().detach().mean() one_hot = one_hot * mean one_hot = one_hot.repeat(indiv_us.shape[0], 1).reshape([indiv_us.shape[0],self.n_agents, -1]).cuda() out = torch.cat([out, one_hot], dim=-1) return out.reshape([out.shape[0], out.shape[1], -1]) def forward(self, agent_qs, states, indiv_us): bs = agent_qs.size(0) sl = agent_qs.size(1) agent_qs = agent_qs.view(-1, agent_qs.size(-1)) indiv_us = indiv_us.reshape(-1, indiv_us.size(-2), indiv_us.size(-1)) hyper_graph = self.build_hyper_net(indiv_us) states = states.reshape(-1, states.size(-1)) hyper_graph = hyper_graph.reshape(-1, hyper_graph.size(-2), hyper_graph.size(-1)) node_features = agent_qs.unsqueeze(dim=-1) # qs_tot = node_features.squeeze(dim=-1) qs_tot = self.encoder_2[0](self.encoder_1[0].forward(node_features, hyper_graph), hyper_graph).squeeze(dim=-1) hyper_weight_1 = torch.abs(self.hyper_weight_layer_1(states)) hyper_const_1 = self.hyper_const_layer_1(states) q_tot = (qs_tot * hyper_weight_1) + hyper_const_1 if self.use_elu: q_tot = F.elu(q_tot) hyper_weight = torch.abs(self.hyper_weight_layer(states)) hyper_const = self.hyper_const_layer(states).squeeze(dim=-1) q_tot = (q_tot*hyper_weight).sum(dim=-1) + hyper_const.squeeze(dim=-1).squeeze(dim=-1) return q_tot.view(bs, sl, 1)
#!/usr/bin/env python3.5 # driver.py # # William N. Sexton and Simson L. Garfinkel # # Major Modification log: # 2018-06-12 bam - refactored DAS to modularize code found in the run function # 2017-12-10 slg - refactored the creation of objects for the DAS() object. # 2017-11-19 slg - rewrite for abstract modular design, created experiment runner # 2017-08-10 wns - initial framework working # 2017-07-20 slg - created file """ This is the main driver for the Disclosure Avoidance Subsystem (DAS). It executes the disclosure avoidance programs: it runs a setup module and data reader, runs the selected DAS engine, calls the output writer, and evaluates the output against the input. For systems that use Apache Spark, the driver run command is: spark-submit driver.py path/to/config.ini For systems that do not use Spark, the driver run command is: python3 driver.py path/to/config.ini or: python3 path/to/driver.py config.ini Note that the driver.py can be included and run in another program. """ import sys import os import datetime import json import logging import logging.handlers import re import time import zipfile import numpy import __main__ from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter from configparser import ConfigParser, NoOptionError, NoSectionError from fractions import Fraction UNKNOWN_VARIABLES_IGNORE = ['APPLICATIONID'] # DAS-specific libraries try: import ctools import ctools.clogging as clogging import experiment import dfxml_writer from certificate.bom import get_bom from ctools.hierarchical_configparser import HierarchicalConfigParser except ModuleNotFoundError: # Try relative to parent. # This is required when running out of the ZIP file on the spark worker # import das_framework.ctools as ctools import das_framework.ctools.clogging as clogging import das_framework.experiment as experiment import das_framework.dfxml_writer as dfxml_writer from das_framework.certificate.bom import get_bom from das_framework.ctools.hierarchical_configparser import HierarchicalConfigParser DEFAULT = 'DEFAULT' ENVIRONMENT = "ENVIRONMENT" SETUP = "setup" READER = "reader" ENGINE = "engine" ERROR_METRICS = "error_metrics" WRITER = "writer" VALIDATOR = "validator" TAKEDOWN = "takedown" RUN_SECTION = "run" # LOGGING LOGGING_SECTION = 'logging' LOGFILENAME_OPTION = 'logfilename' LOGLEVEL_OPTION = 'loglevel' LOGFOLDER_OPTION = 'logfolder' ROOT = 'root' # where the experiment is running LOGFILENAME = 'logfilename' # DEFAULT_LOGFILENAME = 'das' OUTPUT_FNAME = 'output_fname' OUTPUT_DIR = "output_dir" # EXPERIMENT values EXPERIMENT = 'experiment' RUN_EXPERIMENT_FLAG = "run_experiment_flag" EXPERIMENT_SCAFFOLD = 'scaffold' EXPERIMENT_DIR = 'dir' # the directory in which the experiment is taking place EXPERIMENT_CONFIG = 'config' # the name of the configuration file EXPERIMENT_XLABEL = 'xlabel' # what to label the X axis EXPERIMENT_YLABEL = 'ylabel' # what to label the Y axis EXPERIMENT_GRID = 'grid' # Draw the grid? True/False EXPERIMENT_GRAPH_FNAME = 'graph_fname' # filename for figure we are saving EXPERIMENT_GRAPH_DATA_FNAME = 'graph_data_fname' # Filename for the graph data EXPERIMENT_AVERAGEX = 'averagex' # should all Y values for a certain X be averaged? EXPERIMENT_TITLE = 'title' EXPERIMENT_DRAW_LEGEND = 'draw_legend' EXPERIMENT_GRAPHX = 'graphx' EXPERIMENT_GRAPHY = 'graphy' RUN_TYPE = 'run_type' DEV_RUN_TYPE = 'dev' PROD_RUN_TYPE = 'prod' DEV_RUN_TYPE_PATH = f'$DAS_S3ROOT/runs/{DEV_RUN_TYPE}/$JBID' PROD_RUN_TYPE_PATH = f'$DAS_S3ROOT/runs/{PROD_RUN_TYPE}/$JBID' OUTPUT_PATH = 'output_path' WRITER = "writer" CUI_LABEL = '(CUI' + r'//' + 'SP-CENS) ' def config_apply_environment(config): """Look for the ENVIRONMENT section and apply the variables to the environment Note: By default, section names are case sensitive, but variable names are not. Because the convention is that environment variables are all upper-case, we uppercase them. Then put all of the environment variables into CONFIG. That's so they will be available on the driver """ if ENVIRONMENT in config: for var in config[ENVIRONMENT]: name = var.upper() value = config[ENVIRONMENT][var] logging.info("EXPORT {}={}".format(name, value)) os.environ[name] = value else: config.add_section(ENVIRONMENT) # copy over the environment variables specified in [setup] environment for var in config.get(SETUP, ENVIRONMENT, fallback='').split(","): if var in os.environ: config.set(ENVIRONMENT, var, os.environ[var]) VARIABLE_RE = re.compile(r"([$][A-Za-z0-9_]+)") def config_validate(config, extra_sections=None): """Make sure mandatory sections exist and that all $variables are defined in the environment""" if extra_sections is None: extra_sections = [] for section in [SETUP, READER, ENGINE, WRITER, VALIDATOR, TAKEDOWN] + extra_sections: if section not in config: logging.error("config file missing section [{}]".format(section)) raise RuntimeError("config file missing section [{}]".format(section)) errors = [] for section in config.sections(): logging.info(f"Validating config section [{section}]") for option in config.options(section): val = config.get(section, option) for var in VARIABLE_RE.findall(val): if var[1:] not in os.environ and var[1:] not in UNKNOWN_VARIABLES_IGNORE: logging.error(f"[{section}] option {option} variable {var} not in environment") errors.append((section, option, val, var)) if errors: print("Current Environment:", file=sys.stderr) for (key, val) in sorted(os.environ.items()): print(f" {key}={val}", file=sys.stderr) print("\nUnknown variables:", file=sys.stderr) message = "\nUnknown variables in config file:\n" for (section, option, val, var) in errors: message += f" [{section}] {option}: {val} ({var} is undefined)\n" raise ValueError(message) ### numpy integers can't be serialized; we need our own serializer ### https://stackoverflow.com/questions/27050108/convert-numpy-type-to-python/27050186#27050186 class DriverEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, numpy.integer): return int(obj) elif isinstance(obj, numpy.floating): return float(obj) elif isinstance(obj, numpy.ndarray): return obj.tolist() else: return super(DriverEncoder, self).default(obj) def strtobool(val, default=None): if val in ["", None] and default is not None: return default v = val.lower() if v in ['y', 'yes', 't', 'true', 'on', '1']: return True if v in ['n', 'no', 'f', 'false', 'off', '0']: return False raise ValueError(f"The value {v} cannot be converted to boolean") class AbstractDASModule: def __init__(self, *, name, config, das, setup=None, output_path=None, **kwargs): assert isinstance(config, ConfigParser) self.name = name self.config = config self.setup = setup self.das = das self.output_path = output_path def log_and_print(self, *args, **kwargs): self.das.log_and_print(*args, **kwargs) def log_warning_and_print(self, *args, **kwargs): self.das.log_warning_and_print(*args, **kwargs) def running_time(self): return self.das.running_time() def add_output_path(self, path): self.das.output_paths.append(path) def annotate(self, *args, **kwargs): self.das.annotate(*args, **kwargs) @staticmethod def do_expandvars(val, expandvars): if expandvars: val = val.replace("$$", str(os.getpid())) val = os.path.expandvars(val) return val def getconfig(self, key, default=None, section=None, expandvars=True): """if expandvars is None and key ends with _fname, expandvars is set to True. Otherwise it is set to false. """ if section is None: section = self.name try: val = self.do_expandvars(self.config.get(section, key), expandvars) logging.debug("config[{}][{}]={}".format(section, key, val)) return val except (NoOptionError, NoSectionError) as err: if default is not None: val = self.do_expandvars(str(default), expandvars) logging.info("config[{}][{}] not present; returning default {}".format(section, key, val)) return val msg = "config[{}][{}] does not exist and no default provided".format(section, key) logging.error(msg) err.message = msg + " :: " + err.message raise err def getint(self, key, section=None, **kwargs): val = self.getconfig(key, section=section, **kwargs) if section is None: section = self.name try: intval = int(val) return intval except ValueError: err_msg = f"Config option \"[{section}]/{key}\" value ( \"{val}\" ) cannot be converted to int" logging.error(err_msg) raise ValueError(err_msg) def getfloat(self, key, section=None, **kwargs): val = self.getconfig(key, section=section, **kwargs) if section is None: section = self.name try: floatval = float(val) return floatval except ValueError: err_msg = f"Config option \"[{section}]/{key}\" value ( \"{val}\" ) cannot be converted to float" logging.error(err_msg) raise ValueError(err_msg) def getfraction(self, key, section=None, **kwargs): val = self.getconfig(key, section=section, **kwargs) if section is None: section = self.name try: fracval = Fraction(val) return fracval except ValueError: err_msg = f"Config option \"[{section}]/{key}\" value ( \"{val}\" ) cannot be converted to Fraction" logging.error(err_msg) raise ValueError(err_msg) def getboolean(self, key, default=None, section=None): # https://stackoverflow.com/questions/715417/converting-from-a-string-to-boolean-in-python # Language flaw! val = self.getconfig(key, section=section, default=default) if section is None: section = self.name try: boolval = strtobool(val, default=default) return boolval except ValueError: err_msg = f"Config option \"[{section}]/{key}\" value ( \"{val}\" ) cannot be converted to boolean" logging.error(err_msg) raise ValueError(err_msg) def getiter(self, key, sep=',', **kwargs): return map(lambda s: s.strip(), re.split(sep, self.getconfig(key, **kwargs))) def gettuple(self, key, default=None, **kwargs): try: tuple_val = tuple(self.getiter(key, **kwargs)) return tuple_val except (NoOptionError, NoSectionError) as err: if default is not None: return default raise err def getiter_of_ints(self, key, **kwargs): return map(int, self.getiter(key, **kwargs)) def gettuple_of_ints(self, key, **kwargs): try: return tuple(self.getiter_of_ints(key, **kwargs)) except ValueError as err: err_msg = f"Some of elements of \"{self.getconfig(key, **kwargs)}\" cannot be converted to int; " + str(err.args[0]) logging.error(err_msg) raise ValueError(err_msg) def getiter_of_floats(self, key, **kwargs): return map(float, self.getiter(key, **kwargs)) def gettuple_of_floats(self, key, **kwargs): try: return tuple(self.getiter_of_floats(key, **kwargs)) except ValueError as err: err_msg = f"Some of elements of \"{self.getconfig(key)}\" cannot be converted to float; " + str(err.args[0]) logging.error(err_msg) raise ValueError(err_msg) def getiter_of_fractions(self, key, **kwargs): return map(Fraction, self.getiter(key, **kwargs)) def gettuple_of_fractions(self, key, **kwargs): try: return tuple(self.getiter_of_fractions(key, **kwargs)) except ValueError as err: err_msg = f"Some of elements of \"{self.getconfig(key)}\" cannot be converted to Fraction" + str(err.args[0]) logging.error(err_msg) raise ValueError(err_msg) def getiter_of_fraction2floats(self, key, **kwargs): return map(float, self.gettuple_of_fractions(key, **kwargs)) def gettuple_of_fraction2floats(self, key, **kwargs): try: return tuple(self.getiter_of_fraction2floats(key, **kwargs)) except ValueError as err: err_msg = f"Some of elements of \"{self.gettuple_of_fractions(key, **kwargs)}\" cannot be converted to Fraction then to float; " + str(err.args[0]) logging.error(err_msg) raise ValueError(err_msg) def getconfitems(self, section): """ !! ONLY WORKS with regular ConfigParser! Doesn't work with HierarchicalConfigParser, since it explicitly adds the stuff from the DEFAULT section to other sections Filters out DEFAULTs from config items of the section, :param section: section of config files :return: iterator of config items in the section """ if isinstance(self.config, HierarchicalConfigParser): self.log_and_print("Trying to filter out [DEFAULT] section items from config that is not regular ConfigParser, but HierarchicalConfigParser. If this causes " "problems, try --nohierconfig command option when starting the DAS.") if self.config.has_section(section): return list(filter(lambda item: item not in self.config.items(self.config.default_section), self.config.items(section))) else: return {} class AbstractExperiment(AbstractDASModule): def __init__(self, das=None, **kwargs): super().__init__(das=das, **kwargs) self.das = das def runExperiment(self): return None class AbstractDASExperiment(AbstractExperiment): """This is the experiment driver. This is where the loops will be done. It brings in the experiment module. Do not import this at top level to avoid It being imported if we are shipped off to spark. """ def __init__(self, **kwargs): super().__init__(**kwargs) self.loops = experiment.build_loops(self.config) self.state = experiment.initial_state(self.loops) def increment_state(self): """ Given a set of loops and a state, increment the state to the next position, handling roll-over. Return the next state. If we are finished, return None. """ self.state = experiment.increment_state(self.loops, self.state) return self def substitute_config(self): """Generate a new config given a current config and a state of the loops.""" for rank in range(len(self.loops)): section = self.loops[rank][0] var = self.loops[rank][1] self.das.config[section][var] = str(self.state[rank]) return self def runExperiment(self): scaffold = Scaffolding(config=self.config) scaffold.experimentSetup() while self.state is not None: self.substitute_config() DAS(config=self.config).run() self.increment_state() scaffold.experimentTakedown() return None def experimentSetup(self): pass def experimentTakedown(self): pass class AbstractDASSetup(AbstractDASModule): def __init__(self, **kwargs): super().__init__(**kwargs) def setup_func(self): """Setup Function. Note special name.""" return None class AbstractDASReader(AbstractDASModule): def __init__(self, **kwargs): super().__init__(**kwargs) def willRead(self): return True def read(self): """Read the data; return a reference. Location to read specified in config file.""" return None # no read data in prototype def didRead(self): return class AbstractDASEngine(AbstractDASModule): def __init__(self, **kwargs): super().__init__(**kwargs) def willRun(self): return True def run(self, original_data): """Nothing to do in the prototype""" return def didRun(self): return class AbstractDASErrorMetrics(AbstractDASModule): def __init__(self, **kwargs): super().__init__(**kwargs) def willRun(self): return True def run(self, data): """Nothing to do in the prototype""" return None def didRun(self): return class AbstractDASWriter(AbstractDASModule): def __init__(self, **kwargs): super().__init__(**kwargs) def willWrite(self): return True def write(self, privatized_data): """Return the written data""" return privatized_data # by default, just return the privatized_data, nothing is written def didWrite(self): return class AbstractDASValidator(AbstractDASModule): def __init__(self, **kwargs): super().__init__(**kwargs) def willValidate(self): return True def validate(self, original_data, written_data_reference, **kwargs): """No validation in prototype""" return True def didValidate(self): return def storeResults(self, data): """data is a dictionary with results. The default implementation stores them in a file called 'results' specified in the config file""" with open(self.getconfig('results_fname', default='results.json'), "a") as f: json.dump(data, f, cls=DriverEncoder) f.write("\n") class AbstractDASTakedown(AbstractDASModule): def __init__(self, **kwargs): super().__init__(**kwargs) def willTakedown(self): return True def takedown(self): """No takedown in prototype""" return True def removeWrittenData(self, reference): """Delete what's referred to by reference. Do not call superclass""" raise RuntimeError("No method defined to removeWrittenData({})".format(reference)) def didTakedown(self): return True class Scaffolding(object): """ Scaffolding for an experiment""" def __init__(self, config): assert isinstance(config, ConfigParser) self.config = config scaffoldstr = config[EXPERIMENT].get(EXPERIMENT_SCAFFOLD, None) if not scaffoldstr: logging.info("No scaffolding") self.scaffold = None return (scaffold_file, scaffold_class_name) = scaffoldstr.split(".") try: scaffold_module = __import__(scaffold_file) if scaffold_file else None except ModuleNotFoundError as e: logging.exception("Scaffolding import failed. current directory: {}".format(os.getcwd())) raise e self.scaffold = getattr(scaffold_module, scaffold_class_name)(config=config) def experimentSetup(self): if self.scaffold: self.scaffold.experimentSetup(self.config) def experimentTakedown(self): if self.scaffold: self.scaffold.experimentTakedown(self.config) class DAS: """ The Disclosure Avoidance System Class. The DAS() class is a class that returns a singleton of the DAS._DAS class, which is where the action takes place. """ instance = None def __init__(self, *args, config, **kwargs): if not DAS.instance: DAS.instance = DAS._DAS(*args, config=config, **kwargs) else: reader_class_name, reader_module = DAS.instance.load_module(config, READER, READER, 'driver', 'AbstractDASReader') engine_class_name, engine_module = DAS.instance.load_module(config, ENGINE, ENGINE, 'driver', 'AbstractDASEngine') error_metrics_class_name, error_metrics_module = DAS.instance.load_module(config, ERROR_METRICS, ERROR_METRICS, 'driver', 'AbstractDASErrorMetrics') writer_class_name, writer_module = DAS.instance.load_module(config, WRITER, WRITER, 'driver', 'AbstractDASWriter') validator_class_name, validator_module = DAS.instance.load_module(config, VALIDATOR, VALIDATOR, 'driver', 'AbstractDASValidator') logging.debug( "classes: {} {} {} {} {}".format(engine_class_name, error_metrics_class_name, reader_class_name, writer_class_name, validator_class_name)) # Create the instances logging.debug( "modules: {} {} {} {} {}".format(engine_module, error_metrics_module, reader_module, writer_module, validator_module)) DAS.instance.writer = getattr(writer_module, writer_class_name)(config=config, setup=DAS.instance.setup_data, name=WRITER, das=DAS.instance) DAS.instance.reader = getattr(reader_module, reader_class_name)(config=config, setup=DAS.instance.setup_data, name=READER, das=DAS.instance) DAS.instance.engine = getattr(engine_module, engine_class_name)(config=config, setup=DAS.instance.setup_data, name=ENGINE, das=DAS.instance) DAS.instance.error_metrics = getattr(error_metrics_module, error_metrics_class_name)(config=config, setup=DAS.instance.setup_data, name=ERROR_METRICS, das=DAS.instance) DAS.instance.validator = getattr(validator_module, validator_class_name)(config=config, setup=DAS.instance.setup_data, name=VALIDATOR, das=DAS.instance) logging.debug("DAS modules recreated") def __getattr__(self, name): return getattr(self.instance, name) def __setattr__(self, name, value): setattr(self.instance, name, value) class _DAS: def __init__(self, *, config, dfxml_writer=None, logfilename=None, printing_disabled=False, args=None, creating_bom=False, delegate=None): """ Initialize a DAS given a config file. This creates all of the objects that will be used""" assert isinstance(config, ConfigParser) self.args = args self.config = config self.dfxml_writer = dfxml_writer self.logfilename = logfilename self.output_paths = [] # all output paths self.t0 = time.time() self.annotation_hooks = [] # called for annotate self.printing_disabled = printing_disabled self.delegate = delegate args_create_bom = args.print_bom or args.make_release if args is not None else False self.creating_bom = creating_bom or args_create_bom # Create output_path # output_path_raw = config[CC.WRITER][CC.OUTPUT_PATH] # run_type = config[CC.WRITER][CC.RUN_TYPE] # Get the input file and the class for each logging.debug("Reading filenames and class names from config file") # This section can possibly combined with the following section importing the modules and creating the objects, # so that the default objects can be created by just using AbstractDASxxxxxx() constructor setup_class_name, setup_module = self.load_module(config, SETUP, SETUP, 'driver', 'AbstractDASSetup') reader_class_name, reader_module = self.load_module(config, READER, READER, 'driver', 'AbstractDASReader') engine_class_name, engine_module = self.load_module(config, ENGINE, ENGINE, 'driver', 'AbstractDASEngine') error_metrics_class_name, error_metrics_module = self.load_module(config, ERROR_METRICS, ERROR_METRICS, 'driver', 'AbstractDASErrorMetrics') writer_class_name, writer_module = self.load_module(config, WRITER, WRITER, 'driver', 'AbstractDASWriter') validator_class_name, validator_module = self.load_module(config, VALIDATOR, VALIDATOR, 'driver', 'AbstractDASValidator') takedown_class_name, takedown_module = self.load_module(config, TAKEDOWN, TAKEDOWN, 'driver', 'AbstractDASTakedown') logging.debug( "classes: {} {} {} {} {} {} {}".format(setup_class_name, engine_class_name, error_metrics_class_name, reader_class_name, writer_class_name, validator_class_name, takedown_class_name)) # Create the instances logging.debug( "modules: {} {} {} {} {} {} {}".format(setup_module, engine_module, error_metrics_module, reader_module, writer_module, validator_module, takedown_module)) logging.info("Creating and running DAS setup object") setup_obj = getattr(setup_module, setup_class_name)(config=config, name=SETUP, das=self) self.setup_data = setup_obj.setup_func() logging.debug("DAS setup returned {}".format(self.setup_data)) # Now create the other objects self.writer = getattr(writer_module, writer_class_name)(config=config, setup=self.setup_data, name=WRITER, das=self) self.reader = getattr(reader_module, reader_class_name)(config=config, setup=self.setup_data, name=READER, das=self) self.engine = getattr(engine_module, engine_class_name)(config=config, setup=self.setup_data, name=ENGINE, das=self) self.error_metrics = getattr(error_metrics_module, error_metrics_class_name)(config=config, setup=self.setup_data, name=ERROR_METRICS, das=self) self.validator = getattr(validator_module, validator_class_name)(config=config, setup=self.setup_data, name=VALIDATOR, das=self) self.takedown = getattr(takedown_module, takedown_class_name)(config=config, setup=self.setup_data, name=TAKEDOWN, das=self) logging.debug("DAS object complete") @staticmethod def load_module(config, section, option, default_file, default_class): try: (module_file, module_class_name) = config.get(section=section, option=option).rsplit(".", 1) except (NoSectionError, NoOptionError) as e: msg = (f"Option {option} in section [{section}] not found when specifying module to load, substituting default {default_file}.{default_class}\n{e}") print(msg) logging.info(msg) (module_file, module_class_name) = (default_file, default_class) try: module = __import__(module_file, fromlist=[module_class_name]) except ImportError as e: err_msg = f"Module {module_file} import failed.\nCurrent directory: {os.getcwd()}\nFile:{__file__}\nsys.path:{sys.path}\n{e.args[0]}" logging.error(err_msg) raise ImportError(err_msg) logging.debug("__import__ file: {}".format(module_file)) return module_class_name, module def timestamp(self, message): try: self.dfxml_writer.timestamp(message) except AttributeError: pass logging.info(message) def make_bom_only(self): """Is this specific DAS making a bom? If so, do not launch Java or other expensive operations, just load the modules and exit.""" return self.creating_bom def log_and_print_cui(self, log_func, print_func, message, cui=False): assert os.getenv("ISMASTER")!='false' if not cui: log_func(message) else: message = CUI_LABEL + " " + message if not self.printing_disabled: print_func(message) def log_and_print(self, message, cui=False): assert os.getenv("ISMASTER")!='false' self.log_and_print_cui(lambda m: logging.info(m), lambda m: print(f"INFO: {m}"), message, cui=cui) def log_warning_and_print(self, message, cui=False): # logging.warning may generate a console output assert os.getenv("ISMASTER")!='false' self.log_and_print_cui(lambda m: logging.warning(m), lambda m: print(f"WARNING: {m}"), message, cui=cui) def add_annotation_hook(self, hook): self.annotation_hooks.append(hook) def annotate(self, message, verbose=True): """ Annotate the DFXML file. If verbose=True, also print. Must be run on the master node. """ assert os.getenv("ISMASTER")!='false' when = time.asctime()[11:16] whent = round(self.running_time(),2) print(f"ANNOTATE: {when} t={whent} {message}") def runReader(self): self.timestamp("runReader: Creating and running DAS reader") if not self.reader.willRead(): logging.info("self.reader.willRead() returned false") raise RuntimeError("reader willRead() returned False") if hasattr(self.delegate,'willRunReader'): self.delegate.willRunReader(self) original_data = self.reader.read() self.reader.didRead() if hasattr(self.delegate,'didRunReader'): self.delegate.didRunReader(self) logging.debug("original_data={}".format(original_data)) return original_data def runEngine(self, original_data): self.timestamp("runEngine: Creating and running DAS engine") if not self.engine.willRun(): logging.info("self.engine.willRun() returned false") raise RuntimeError("engine willRun() returned False") if hasattr(self.delegate, 'willRunEngine'): self.delegate.willRunEngine(self) privatized_data = self.engine.run(original_data) if hasattr(self.delegate, 'didRunEngine'): self.delegate.didRunEngine(self) self.engine.didRun() logging.debug("privatized_data={}".format(privatized_data)) return privatized_data def runErrorMetrics(self, privatized_data): self.timestamp("runErrorMetrics: Creating and running DAS error_metrics") if not self.error_metrics.willRun(): logging.info("self.error_metrics.willRun() returned false") raise RuntimeError("error_metrics willRun() returned False") if hasattr(self.delegate, 'willRunErrorMetrics'): self.delegate.willRunErrorMetrics(self) error_metrics_data = self.error_metrics.run(privatized_data) if hasattr(self.delegate, 'didRunErrorMetrics'): self.delegate.didRunErrorMetrics(self) logging.debug("Error Metrics data = {}".format(error_metrics_data)) self.error_metrics.didRun() return error_metrics_data def runWriter(self, privatized_data): self.timestamp("runWriter: Creating and running DAS writer") if not self.writer.willWrite(): logging.info("self.writer.willWrite() returned false") raise RuntimeError("engine willWrite() returned False") if hasattr(self.delegate, 'willRunWriter'): self.delegate.willRunWriter(self) written_data = self.writer.write(privatized_data) if hasattr(self.delegate, 'didRunWriter'): self.delegate.didRunWriter(self) logging.debug("written_data={}".format(written_data)) self.writer.didWrite() return written_data def runValidator(self, original_data, written_data): self.timestamp("runValidator: Creating and running DAS validator") if not self.validator.willValidate(): logging.info("self.validator.willValidate() returned false") raise RuntimeError("validator willValidate() returned False") if hasattr(self.delegate, 'willRunValidator'): self.delegate.willRunValidator(self) valid = self.validator.validate(original_data, written_data) if hasattr(self.delegate, 'didRunValidator'): self.delegate.didRunValidator(self) logging.debug("valid={}".format(valid)) if not valid: logging.info("self.validator.validate() returned false") raise RuntimeError("Did not validate.") self.validator.didValidate() # If we were asked to get graphx and graphy, get it. data = {} if EXPERIMENT in self.config: for var in ['graphx', 'graphy']: if var in self.config[EXPERIMENT]: (a, b) = self.config[EXPERIMENT][var].split('.') assert a == 'validator' func = getattr(self.validator, b) data[var] = func() # Finally take down return valid def runTakedown(self, written_data): self.timestamp("runTakedown: Creating and running DAS takedown") if not self.takedown.willTakedown(): logging.info("self.takedown.willTakedown() returned false") raise RuntimeError("validator willTakedown() returned False") if hasattr(self.delegate, 'willRunTakedown'): self.delegate.willRunTakedown(self) self.takedown.takedown() if hasattr(self.delegate, 'didRunTakedown'): self.delegate.didRunTakedown(self) if self.takedown.getboolean("delete_output", False): logging.info("deleting output {}".format(written_data)) self.takedown.removeWrittenData(written_data) self.takedown.didTakedown() def run(self): """ Run the DAS. Returns data collected as a dictionary if an EXPERIMENT section is specified in the config file.""" # First run the engine and write the results # Create the instances is now done when running self.timestamp("run") if hasattr(self.delegate, 'willRun'): self.delegate.willRun(self) original_data = self.runReader() privatized_data = self.runEngine(original_data) error_metrics_data = self.runErrorMetrics(privatized_data) written_data = self.runWriter(privatized_data) valid = self.runValidator(original_data, written_data) self.runTakedown(written_data) if hasattr(self.delegate, 'didRun'): self.delegate.didRun(self) data = {} return data def running_time(self): return time.time() - self.t0 # Only include with these suffixes BOM_INCLUDE_SUFFIXES = ['Makefile', '.md', '.doc', '.pdf', '.docx'] BOM_ALLOW_SUFFIXES = ['.py', '.ini'] BOM_ALLOWED_SUFFIXES = set(BOM_INCLUDE_SUFFIXES + BOM_ALLOW_SUFFIXES) BOM_OMIT_DIRS = ['hdmm', 'legacy_code', 'etl_2020', '.cache', '__pycache__', '.git', '.github'] def get_das_dir(): return os.path.dirname(os.path.abspath( __main__.__file__ )) def bom_files(*, config, das=None, args=None): """ Returns the bill of materials, relative to the current directory. Includes any files with BOM_SUFFIXES and no files in BOM_OMIT_DIRS BOM is for the given DAS object. If none is provided, make one. The only purpose of making the das object is to assure that the config files are loaded. """ if das is None: if args is None: raise ValueError("args must be provided if das is None") logging.disable(sys.maxsize) das = DAS(config=config, args=args, printing_disabled=True, creating_bom=True) system_prefix = "/".join(sys.executable.split("/")[0:-2]) suppress_prefixes = [system_prefix, '/usr/lib'] full_paths = set() # start with the bom_files from the certificate printer for (name, path, ver, bytecount) in get_bom(content=False): if not isinstance(path, str): continue if any([path.startswith(prefix) for prefix in suppress_prefixes]): continue if len(path) > 0 and any([path.endswith(suffix) for suffix in BOM_ALLOWED_SUFFIXES]): full_paths.add(path) # add in the config files for path in config.seen_files: full_paths.add(path) # walk the file system from the DAS_DIR and find any filenames with the requested suffixes DAS_DIR = get_das_dir() for root, dirs, files in os.walk(DAS_DIR): if any([(dirpart in BOM_OMIT_DIRS) for dirpart in root.split('/')]): continue for fname in files: if any([fname.endswith(suffix) for suffix in BOM_INCLUDE_SUFFIXES]): full_paths.add(os.path.join(root, fname)) pruned_full_paths = [ (path[len(DAS_DIR)+1:] if path.startswith(DAS_DIR) else path) for path in full_paths] return list(sorted(pruned_full_paths)) def print_bom(*, config, das=None, args=None, file=sys.stdout): """Print a bom :param config: the config file :param das: the das object :param args: any special arguments :param file: where the output goes """ for path in bom_files(config=config, das=das, args=args): print(path, file=file) def make_release(*, config, zipfilename, args, verbose=False): """Given a config and a set of arguments, create a named zipfile @param config - INPUT - the loaded config to use (loaded) @param zipfilename - OUTPUT - where to write the zip file @param args - INPUT - arguments provided to bom_files. Typically the args from ArgumentParser. """ if os.path.exists(zipfilename): os.unlink(zipfilename) with zipfile.ZipFile(zipfilename, 'w', zipfile.ZIP_DEFLATED) as zf: for filename in bom_files(config=config, args=args): try: zf.write(filename, filename) except FileNotFoundError: logging.warning("Could not add file %s to zipfile",filename) else: if verbose: print("{} -> {}".format(filename, zipfilename)) return zipfilename def main_setup(additional_args = []): """ Setup the DAS system logging, parses arguments and loads the configuration file, returning the args and config objects. """ parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("config", help="Main Config File") parser.add_argument("--print_bom", help="Output a bill of materials", action='store_true') parser.add_argument("--make_release", help="Create a zip file with all of the files necessary to run the DAS. Similar to print_bom") parser.add_argument("--experiment", help="Run an experiment according to the [experiment] section, with the results in this directory") parser.add_argument("--isolation", help="Specifies isolation mode for experiments", choices=['sameprocess', 'subprocess'], default='sameprocess') parser.add_argument("--graphdata", help="Just draw the graph from the data that was already collected.", action='store_true') parser.add_argument("--logfilename", help="Specify logfilename, otherwise auto-generate") parser.add_argument("--nohierconfig", help='Use regular Python configparser. ConfigParser instead of ctools.HierarchicalConfigParser', action="store_true") parser.add_argument("--dump_config", help="dump the config file, then exit", action='store_true') parser.add_argument("--get", help="output the section:option:default from the config file, then exit") parser.add_argument("--dry-run", help="Dry run; do not run the algorithm", action='store_true') for (args,kwargs) in additional_args: parser.add_argument(*args, **kwargs) clogging.add_argument(parser) args = parser.parse_args() if not os.path.exists(args.config): raise RuntimeError("{} does not exist".format(args.config)) if args.graphdata and args.experiment is None: parser.error("--graphdata requires --experiment") ### ### Read the configuration file and handle config-related options ### config = ConfigParser() if args.nohierconfig else HierarchicalConfigParser() config.read(args.config) if args.dump_config: config.write(sys.stdout) exit(0) if args.get: if args.get.count(":")!=2: raise ValueError("Specify section:option:default as the --get argument") (section, option, default) = args.get.split(":") if (section in config) and (option in config[section]): print(config[section][option]) else: print(default) exit(0) ### ### Logging must be set up before any logging is done ### By default it is in the current directory, but if we run an experiment, put the logfile in that directory ### Added option to put logs in a subfolder specified in the config if not args.logfilename: isodate = datetime.datetime.now().isoformat()[0:19] if (config.has_section(LOGGING_SECTION) and config.has_option(LOGGING_SECTION, LOGFOLDER_OPTION) and config.has_option(LOGGING_SECTION, LOGFILENAME_OPTION)): args.logfilename = (f"{config[LOGGING_SECTION][LOGFOLDER_OPTION]}/" f"{config[LOGGING_SECTION][LOGFILENAME_OPTION]}-{isodate}-{os.getpid()}.log") else: args.logfilename = f"{isodate}-{os.getpid()}.log" # CB: Code needs to be removed. # Left here for backward compatibility, to be removed in future versions if args.experiment: if not os.path.exists(args.experiment): os.makedirs(args.experiment) if not os.path.isdir(args.experiment): raise RuntimeError("{} is not a directory".format(args.experiment)) config[config.default_section][ROOT] = args.experiment args.logfilename = os.path.join(args.experiment, args.logfilename) if EXPERIMENT not in config: config.add_section(EXPERIMENT) config[EXPERIMENT][RUN_EXPERIMENT_FLAG] = "1" # If we are making the BOM, make a DAS object so the config file gets processed, then make the bom and exit if args.print_bom: print_bom(config=config, args=args) exit(0) if args.make_release: make_release(config=config, zipfilename=args.make_release, args=args) print("Release: {}".format(args.make_release)) exit(0) # # # Make sure the directory for the logfile exists. If not, make it. logdirname = os.path.dirname(args.logfilename) if logdirname and not os.path.exists(logdirname): os.mkdir(logdirname) clogging.setup(args.loglevel, syslog=False, filename=args.logfilename) logging.info("Config path: {}".format(os.path.abspath(args.config))) return args, config def main_make_das(args, config, **kwargs): """ Creates the das object after determining whether to run in experiment mode based on config file. """ ############################# # Set up the logging ############################# the_dfxml_writer = dfxml_writer.DFXMLWriter(filename=args.logfilename.replace(".log", ".dfxml"), prettyprint=True) logging.getLogger().addHandler(the_dfxml_writer.logHandler()) ######################### # Set up the experiment # ######################### # if there is no experiment section in the config file, add one if EXPERIMENT not in config: config.add_section(EXPERIMENT) # If there is no run experiment flag in the config section, add it run_experiment = config[EXPERIMENT].getint(RUN_EXPERIMENT_FLAG, 0) if args.experiment: run_experiment = 1 ### Now validate and apply the config file config_apply_environment(config) config_validate(config) ######################### # Create the DAS object # ######################### das = DAS(config=config, args=args, logfilename=args.logfilename, dfxml_writer=the_dfxml_writer, **kwargs) das.experiment = run_experiment return das def main_run_das(das, shutdown_logging_on_exit:bool=True): """ Run the DAS! :param shutdown_logging_on_exit: If True, execute logging.shutdown() after running the DAS. This can result in unexpected errors in logging if logging is used after main_run_das completes. Set to False if logging will be used after this method is run. Defaults to True. """ logging.info("START {}".format(os.path.abspath(__file__))) ############################# # DAS Running Section. # Option 1 - run_experiment # Option 2 - just run the das ############################# logging.info("Config file:") for section in das.config.sections(): logging.info(f"[{section}]") for option in das.config.options(section): logging.info(f"{option}: {das.config.get(section,option)}") logging.info("") if das.experiment: # set up the Experiment module logging.debug("== experiment ==") try: (experiment_file, experiment_class_name) = das.config[EXPERIMENT][EXPERIMENT].rsplit(".", 1) except KeyError: (experiment_file, experiment_class_name) = ('driver', 'AbstractDASExperiment') try: experiment_module = __import__(experiment_file, fromlist=[experiment_class_name]) except ImportError as e: print("Module import failed.") print("current directory: {}".format(os.getcwd())) print("__file__: {}".format(__file__)) raise e # Name "experiment" conflicts with imported module experiment_instance = getattr(experiment_module, experiment_class_name)(das=das, config=das.config, name=EXPERIMENT) logging.debug("Running DAS Experiment. Logfile: {}".format(das.logfilename)) experiment_data = experiment_instance.runExperiment() else: #### Run the DAS without an experiment logging.debug("== no experiment ==") try: data = das.run() except Exception as e: raise e ### ### Shutdown ### t = das.running_time() logging.info("Elapsed time: {:6.2f} seconds".format(t)) logging.info("END {}".format(os.path.abspath(__file__))) if shutdown_logging_on_exit: logging.shutdown() print("*****************************************************") print("driver.py: Run completed in {:,.2f} seconds. Logfile: {}".format(t, das.logfilename)) if __name__ == '__main__': (main_args, main_config) = main_setup() main_das = main_make_das(main_args, main_config) main_run_das(main_das)
""" MIT License Copyright (c) 2020 Christoph Kreisl Copyright (c) 2021 Lukas Ruppert Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from core.color import Color4f from core.point import Point3f import typing from stream.stream import Stream from model.intersection_data import IntersectionData from model.user_data import UserData class PathData(UserData): """ PathData Represents one traced path with added user data """ def __init__(self, stream : Stream): super().__init__(stream) self._sample_idx = stream.read_uint() self._path_depth = stream.read_uint() self._path_origin = stream.read_point3f() self._final_estimate = None if stream.read_bool(): self._final_estimate = stream.read_color4f() self._dict_intersections = {} self._intersection_count = stream.read_uint() for i in range(0, self._intersection_count): intersection = IntersectionData(stream) self._dict_intersections[intersection.depth_idx] = intersection @property def final_estimate(self) -> Color4f: """ Returns the Final Estimate value of this path """ return self._final_estimate @property def sample_idx(self) -> int: """ Returns the samples index which indicates the path index """ return self._sample_idx @property def path_origin(self) -> Point3f: """ Returns the path origin """ return self._path_origin @property def path_depth(self) -> int: """ Returns the path depth (amount of bounces and containing vertices) """ return self._path_depth @property def intersections(self) -> typing.Dict[int, IntersectionData]: """ Returns the a dict containing all path vertices """ return self._dict_intersections @property def intersection_count(self) -> int: """ Returns the amount of vertices (intersections) """ return self._intersection_count def valid_depth(self) -> bool: """ Checks if the path depth is valid """ return self._path_depth is not None def to_string(self) -> str: return "SampleIdx = {}\n" \ "PathDepth = {}\n" \ "PathOrigin = {}\n" \ "FinalEstimate = {}\n" \ "ShowPath = {}\n" \ "Intersections = {}\n" \ "IntersectionCount = {}".format(self._sample_idx, self._path_depth, self._path_origin, self._final_estimate, self._dict_intersections, self._intersection_count)
import unittest import numpy as np from pyfpt.numerics import histogram_normalisation class TestHistogramNormalisation(unittest.TestCase): def test_histogram_normalisation(self): num_data_points = 100000 # Testing uneven bins bins = np.array([-4., -3.3, -3.2, -3., -2.9, -2.4, -1.9, -1.5, -1.45, -1.3, -0.9, -0.6, -0.4, -0.05, 0.1, 0.6, 0.8, 1., 1.3, 1.8, 2., 2.4, 3., 3.1, 3.5, 4.]) # Need to draw the same random numbers each time, for consistent tests np.random.seed(1) # Using a normal distribution as mock data data = np.random.normal(size=num_data_points) # The normalisation per bin used in histogram_normalisation.py should # be approxiately the same as numpy's. So testing against numpy. # This of course requires the total area to 1 when normalsied, which # is the case for unweighted data heights_raw, _ = np.histogram(data, bins=bins) heights_normed, _ = np.histogram(data, bins=bins, density=True) expected = heights_raw/heights_normed result = histogram_normalisation(bins, num_data_points) # Just want overal normalisation, not normalisation per bin result = result # Want the differance to be small diff = np.abs((result-expected)/expected) self.assertTrue(all(diff <= 0.001)) # We need the following to execute the tests when we run the file in python if __name__ == '__main__': unittest.main()
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.test import TestCase from .models import Profile # Create your tests here. class ProfileClass(TestCase): def setUp(self): ''' Set up method to run before each test cases. ''' self.new_profile=Profile(name = 'bakari') def test_instance(self): ''' Test case to test if the object is initialized properly ''' self.assertTrue(isinstance(self.new_profile,Profile)) def test_save_method(self): ''' Test case to test the save functionality ''' self.new_profile.save_profile() profiles = Profile.objects.all() self.assertTrue(len(profiles)>0) def test_update_method(self): self.new_profile.save_profile() self.new_profile= Profile.objects.filter(id==1).update() self.updated_profile=Profile.objects.get(id==1) self.assertTrue(self.updated_profile.name,'bakari') def test_profile_delete(self): self.profile.save_profile() self.searched_profile = Profile.objects.get(id==1) self.searched_profile.delete_profile() self.assertTrue(len(Profile.objects.all()) == 0) class BusinesClass(TestCase): def setUp(self): ''' Set up method to run before each test cases. ''' self.new_business(name = 'Bakes') def test_instance(self): ''' Test case to test if the object is initialized properly ''' self.assertTrue(isinstance(self.new_business,Business)) def test_save_method(self): ''' Test case to test the save functionality ''' self.new_business.save_business() businesses = Business.objects.all() self.assertTrue(len(businesses)>0) def test_update_method(self): self.new_business.save_business() self.new_business= Business.objects.filter(id==1).update() self.updated_business=Business.objects.get(id==1) self.assertTrue(self.updated_business.name,'Bakes') def test_business_delete(self): self.business.save_business() self.searched_business = Business.objects.get(id==1) self.searched_business.delete_business() self.assertTrue(len(Business.objects.all()) == 0) class NeighborHoodClass(TestCase): def setUp(self): ''' Set up method to run before each test cases. ''' self.new_neighborhood=Neighborhood(name = 'Kidfarmaco') def test_instance(self): ''' Test case to test if the object is initialized properly ''' self.assertTrue(isinstance(self.new_neighborhood,Neighborhood)) def test_save_method(self): ''' Test case to test the save functionality ''' self.new_neighborhood.save_neighborhood() neighborhood = Neighborhood.objects.all() self.assertTrue(len(neighborhoodS)>0) def test_update_method(self): self.new_neighborhood.save_neighborhood() self.new_neighborhood= Neighborhood.objects.filter(id==1).update() self.updated_neighborhood=Neighborhood.objects.get(id==1) self.assertTrue(self.updated_neighborhood.name,'Kidfarmaco') def test_neighborhood_delete(self): self.neighborhood.save_neighborhood() self.searched_neighborhood = Neighborhood.objects.get(id==1) self.searched_neighborhood.delete_neighborhood() self.assertTrue(len(Neighborhood.objects.all()) == 0)
# -*- coding: utf8 -*- """ Decodes FLAC files into the WAV format. Author: Eduardo Ferreira License: MIT (see LICENSE for details) """ from anarky.audio.decode import decode_flac_wav from anarky.enum.description import Description from anarky.enum.script import Script from anarky.interface import get_options def run(): (files, destination) = get_options(Script.FLAC2WAV.value, Description.FLAC2WAV.value, True) for item in files: decode_flac_wav(item, destination)
for path in ("drift", "accel", "boost", "brake"): speedTables = [] lastSpeed = -999999 lastAngle = 0 with open("turn." + path + ".txt", "r+") as f: for line in f: inSpeed, angle, totalTicks, driftTicks, outSpeed, outX, outY = map(float, line.split("\t")) if inSpeed != lastSpeed: speedTables.append((int(driftTicks), [])) lastSpeed = inSpeed lastAngle = 0 if angle > lastAngle: lastAngle = angle timeStr = f"{int(totalTicks)},\t" if "drift" in path else "" speedTables[-1][1].append(f"{{{angle}f,\t{timeStr}{outSpeed}f,\t{outX}f,\t{outY}f}}") with open("turnTable" + path.capitalize() + ".cpp", "w+") as f: f.write( """#include <drive/turn.h> ManouvreTable<""" + ("DriftManouvre" if "drift" in path else "Manouvre") + """, 2300/50+1> """ + path.upper() + """TABLE = { 0, 50, { """ + ",\n\t\t".join(map(lambda t: "AngleTable<" + ("DriftManouvre" if "drift" in path else "Manouvre") + ">{" + str(t[0]) + ", 5, {\n\t\t\t" + ",\n\t\t\t".join(t[1]) + "\n\t\t}}", speedTables)) + """ } }; """)
from __future__ import print_function, division, absolute_import class EmptyList(object): """ Utility that can be numerically indexed, but always returns None. If a no length or a negative length are passed at construction, the list will ALWAYS return None. If a non-negative length is passsed at construction, an indexed 0 <= index < length will return None. Others raise an IndexError """ def __init__(self, length=-1): self.length = length def __getitem__(self, idx): if self.length < 0: return None if idx >= self.length or idx < 0: raise IndexError return None class Color(list): def __init__(self, r, g, b, a): list.__init__(self, [r, g, b, a]) self.r = r self.g = g self.b = b self.a = a if ((r > 255 or r < 0) or (g > 255 or g < 0) or (b > 255 or b < 0) or (a > 255 or a < 0)): raise ValueError def zoom_fit(screen, bounds, balanced=True): """What affine transform will zoom-fit the given items? screen: (w,h) of the viewing region bounds: (x,y,w,h) of the items to fit balance: Should the x and y scales match? returns: [translate x, translate y, scale x, scale y] """ (sw, sh) = screen (gx, gy, gw, gh) = bounds x_scale = sw/gw y_scale = sh/gh if (balanced): x_scale = min(x_scale, y_scale) y_scale = x_scale return [-gx*x_scale, -gy*y_scale, x_scale, y_scale]
""" A simple set of functions to compute LJ energies and gradients. """ import numpy as np def calc_energy_and_gradient(positions, sigma, epsilon, do_gradient=True): """ Computes the energy and gradient of a expression in the form V_{ij} = 4 \epsilon [ (sigma / r) ^ 12 - (sigma / r)^6] """ # Holds the energy and the energy gradient E = 0.0 if do_gradient: gradient = np.zeros((positions.shape[0], 3)) sigma6 = sigma**6 sigma12 = sigma6**2 # Double loop over all particles for i in range(0, positions.shape[0] - 1): jvals = positions[(i + 1):] dr = jvals - positions[i] r = np.einsum("ij,ij->i", dr, dr) r6 = np.power(r, 6) r12 = np.power(r6, 2) E += np.sum((sigma12 / r12) - (sigma6 / r6)) if do_gradient: g = 12 * r12 - 6 * r6 g *= dr gradient[i] += -np.sum(g) gradient[i:] += g E *= 4.0 * epsilon if do_gradient: gradient *= 4.0 * epsilon return E, gradient else: return E
import sys import os sys.path.append(os.path.abspath("../"))
import FWCore.ParameterSet.Config as cms process = cms.Process("CTPPS") from FWCore.MessageLogger.MessageLogger_cfi import * process.load("Configuration.StandardSequences.GeometryExtended_cff") process.load("Configuration.StandardSequences.MagneticField_38T_cff") from Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi import XMLIdealGeometryESSource_CTPPS process.XMLIdealGeometryESSource = XMLIdealGeometryESSource_CTPPS.clone() process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.source = cms.Source("EmptySource") process.prod = cms.EDProducer("GeometryProducer", MagneticField = cms.PSet( delta = cms.double(1.0) ), UseMagneticField = cms.bool(False), UseSensitiveDetectors = cms.bool(False) ) process.add_( cms.ESProducer("TGeoMgrFromDdd", verbose = cms.untracked.bool(False), level = cms.untracked.int32(14) ) ) process.dump = cms.EDAnalyzer("DumpSimGeometry", outputFileName = cms.untracked.string('ctppsGeometry.root') ) process.p = cms.Path(process.prod+process.dump)
import pathlib import os import logging from datetime import datetime from typing import Tuple import numpy as np import dicom2nifti from tqdm import tqdm from glob import glob from natsort import natsorted from ballir_dicom_manager.directory_manager import DirManager from ballir_dicom_manager.file_readers.read_dicom import ReadDicom from ballir_dicom_manager.file_readers.read_nifti import ReadNifti from ballir_dicom_manager.file_readers.read_image_label_pair import ReadImageLabelPair from ballir_dicom_manager.preprocess.dicom_finder import DicomFinder log = logging.getLogger(__name__) class PreProcess: dicom_finder = DicomFinder() def __init__(self, DIR_RAW, add_subgroup=False, value_clip=False, allow=[]): self.RAW_DICOM_DIRS = self.dicom_finder.get_dicom_dirs(DIR_RAW) DIR_PREPROCESSED = ( "raw".join(DIR_RAW.split("raw")[:-1]) + "preprocessed" ) # join in case of 2nd "raw" dir somewhere in directory structure # option for images and labels? DIR_PRE_DICOM_IMAGES, DIR_PRE_DICOM_LABELS = self.get_preprocessed_dir( DIR_PREPROCESSED, image_type="dicom", add_subgroup=add_subgroup ) DIR_PRE_NIFTI_IMAGES, DIR_PRE_NIFTI_LABELS = self.get_preprocessed_dir( DIR_PREPROCESSED, image_type="nifti", add_subgroup=add_subgroup ) self.DIRS = DirManager( DIR_RAW=DIR_RAW, DIR_PREPROCESSED=DIR_PREPROCESSED, DIR_PRE_DICOM_IMAGES=DIR_PRE_DICOM_IMAGES, DIR_PRE_DICOM_LABELS=DIR_PRE_DICOM_LABELS, DIR_PRE_NIFTI_IMAGES=DIR_PRE_NIFTI_IMAGES, DIR_PRE_NIFTI_LABELS=DIR_PRE_NIFTI_LABELS, ) self.configure_logger(DIR_PREPROCESSED, add_subgroup) self.value_clip = value_clip self.allow = allow def configure_logger(self, log_directory: pathlib.Path, add_subgroup) -> None: log_date = datetime.now() log_date = "_".join( [ str(log_date.year), str(log_date.month), str(log_date.day), str(log_date.hour), str(log_date.minute), ] ) if add_subgroup: log_date = "_".join([add_subgroup, log_date]) # set up log config logging.basicConfig( filename=os.path.join( log_directory, f"preprocess_{log_date}.log", ), level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S", ) def get_preprocessed_dir( self, DIR_PREPROCESSED: pathlib.Path, image_type: str, add_subgroup ) -> Tuple[pathlib.Path]: if add_subgroup: return os.path.join( DIR_PREPROCESSED, image_type, "images", add_subgroup ), os.path.join(DIR_PREPROCESSED, image_type, "labels", add_subgroup) else: return os.path.join(DIR_PREPROCESSED, image_type, "images"), os.path.join( DIR_PREPROCESSED, image_type, "labels" ) def clean_dicom( self, raw_dicom_dir: pathlib.Path, clean_dicom_dir: pathlib.Path ) -> None: raw = ReadDicom(raw_dicom_dir, value_clip=self.value_clip, allow=self.allow) raw.prep_for_nifti(raw.files) raw.writer.save_all(raw.files, clean_dicom_dir) log.info(f"{raw_dicom_dir} preprocessed as DICOM to {clean_dicom_dir}") def get_clean_dicom_dir(self, case_name: str, is_label=bool) -> pathlib.Path: if is_label: return os.path.join(self.DIRS.DIR_PRE_DICOM_LABELS, case_name) else: return os.path.join(self.DIRS.DIR_PRE_DICOM_IMAGES, case_name) def get_case_name(self, raw_dicom_dir, case_name_fn) -> str: if not case_name_fn: return os.path.basename(raw_dicom_dir) else: return case_name_fn(raw_dicom_dir) def get_nifti_write_path(self, case_name: str, is_label: bool) -> pathlib.Path: if is_label: return os.path.join( self.DIRS.DIR_PRE_NIFTI_LABELS, f"{case_name}_0000.nii.gz" ) else: return os.path.join( self.DIRS.DIR_PRE_NIFTI_IMAGES, f"{case_name}_0000.nii.gz" ) def write_nifti( self, clean_dicom_dir: pathlib.Path, case_name: str, is_label=bool ) -> None: nifti_write_path = self.get_nifti_write_path(case_name, is_label) dicom2nifti.dicom_series_to_nifti(clean_dicom_dir, nifti_write_path) log.info(f"{clean_dicom_dir} preprocessed as NIFTI to {nifti_write_path}") def build_legend(self, dicom_dir: pathlib.Path, pixel_array: np.array, **kwargs): if "legend" in kwargs and not kwargs["legend"]: return kwargs if not "legend_size" in kwargs: kwargs["legend_size"] = 12 if not "legend" in kwargs: kwargs["legend"] = [ [ f"{os.path.basename(dicom_dir.rstrip('/'))}: {np.amin(pixel_array)}:{np.amax(pixel_array)}", "#808080", ] ] return kwargs return kwargs def preview_raw_dicom(self, value_clip=False, **kwargs) -> None: """Generate orthoview previews of original/raw DICOM files.""" for raw_dicom_dir in tqdm( natsorted(glob(os.path.join(self.DIRS.DIR_RAW, "*/"))), desc="generating previews of raw data...", ): raw_dicom = ReadDicom( raw_dicom_dir, allow=self.allow, value_clip=value_clip ) raw_dicom.viewer.orthoview( **self.build_legend(raw_dicom_dir, raw_dicom.arr, **kwargs) ) def preview_preprocessed_dicom(self, value_clip=False, **kwargs) -> None: """Generate orthoview previews of preprocessed DICOM files.""" for clean_dicom_dir in tqdm( natsorted(glob(os.path.join(self.DIRS.DIR_PRE_DICOM_IMAGES, "*/"))) + natsorted(glob(os.path.join(self.DIRS.DIR_PRE_DICOM_LABELS, "*/"))), desc="generating previews of preprocessed DICOM data...", ): clean_dicom = ReadDicom( clean_dicom_dir, allow=self.allow, value_clip=value_clip ) clean_dicom.viewer.orthoview( **self.build_legend(clean_dicom_dir, clean_dicom.arr, **kwargs) ) def preview_preprocessed_dicom_pair(self, value_clip=False, **kwargs) -> None: """Generate orthoview previews of preprocessed DICOM files.""" for clean_dicom_dir in tqdm( natsorted(glob(os.path.join(self.DIRS.DIR_PRE_DICOM_IMAGES, "*/"))), desc="generating previews of preprocessed DICOM data...", ): clean_dicom_image = ReadDicom( clean_dicom_dir, allow=self.allow, value_clip=value_clip ) clean_dicom_label = ReadDicom( "labels".join(clean_dicom_dir.split("images")), allow=self.allow, value_clip=value_clip, ) clean_dicom_pair = ReadImageLabelPair(clean_dicom_image, clean_dicom_label) clean_dicom_pair.viewer.orthoview( **self.build_legend(clean_dicom_dir, clean_dicom_image.arr, **kwargs) ) def preview_preprocessed_nifti(self, value_clip=False, **kwargs) -> None: """Generate orthoview previews of preprocessed NIFTI files.""" for nifti_path in tqdm( natsorted(glob(os.path.join(self.DIRS.DIR_PRE_NIFTI_IMAGES, "*.nii.gz"))) + natsorted(glob(os.path.join(self.DIRS.DIR_PRE_NIFTI_LABELS, "*.nii.gz"))), desc="generating previews of preprocessed NIFTI data...", ): nifti_file = ReadNifti(nifti_path, value_clip=value_clip) nifti_file.viewer.orthoview( **self.build_legend(nifti_path, nifti_file.arr, **kwargs) ) def preview_preprocessed_dicom_pair(self, value_clip=False, **kwargs) -> None: """Generate orthoview previews of preprocessed DICOM files.""" for nifti_path in tqdm( natsorted(glob(os.path.join(self.DIRS.DIR_PRE_NIFTI_IMAGES, "*.nii.gz"))), desc="generating previews of preprocessed DICOM data...", ): nifti_file_image = ReadNifti(nifti_path, value_clip=value_clip) nifti_file_label = ReadNifti( "labels".join(nifti_path.split("images")), value_clip=value_clip ) nifti_pair = ReadImageLabelPair(nifti_file_image, nifti_file_label) nifti_pair.viewer.orthoview( **self.build_legend(nifti_path, nifti_file_image.arr, **kwargs) ) def check_path_is_label( self, raw_dicom_dir: pathlib.Path, label_identifier ) -> bool: if not label_identifier: return False return label_identifier(raw_dicom_dir) def preprocess(self, case_name_fn=False, label_identifier=False) -> None: for raw_dicom_dir in tqdm( natsorted(self.RAW_DICOM_DIRS), desc="preprocessing..." ): # try: is_label = self.check_path_is_label(raw_dicom_dir, label_identifier) case_name = self.get_case_name(raw_dicom_dir, case_name_fn) clean_dicom_dir = self.get_clean_dicom_dir( case_name, is_label, ) self.clean_dicom(raw_dicom_dir, clean_dicom_dir) self.write_nifti(clean_dicom_dir, case_name, is_label) # except Exception as e: # log.exception(e) # print(f'ERROR converting {raw_dicom_dir}: {e}') if __name__ == "__main__": main()
from django.shortcuts import render, redirect from .models import Category, Photo from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from .forms import CustomUserCreationForm # Create your views here. def loginUser(request): page = 'login' if request.method == 'POST': username = request.POST['username'] password = request.POST['password'] user = authenticate(request, username=username, password=password) if user is not None: login(request, user) return redirect('gallery') return render(request, 'photos/login_register.html', {'page': page}) def logoutUser(request): logout(request) return redirect('login') def registerUser(request): page = 'register' form = CustomUserCreationForm() if request.method == 'POST': form = CustomUserCreationForm(request.POST) if form.is_valid(): user = form.save(commit=False) user.save() if user is not None: login(request, user) return redirect('gallery') context = {'form': form, 'page': page} return render(request, 'photos/login_register.html', context) @login_required(login_url='login') def gallery(request): user = request.user category = request.GET.get('category') if category == None: photos = Photo.objects.filter(category__user=user) else: photos = Photo.objects.filter( category__name=category, category__user=user) categories = Category.objects.filter(user=user) context = {'categories': categories, 'photos': photos} return render(request, 'photos/gallery.html', context) @login_required(login_url='login') def viewPhoto(request, pk): photo = Photo.objects.get(id=pk) return render(request, 'photos/photo.html', {'photo': photo}) @login_required(login_url='login') def addPhoto(request): user = request.user categories = user.category_set.all() if request.method == 'POST': data = request.POST images = request.FILES.getlist('images') if data['category'] != 'none': category = Category.objects.get(id=data['category']) elif data['category_new'] != '': category, created = Category.objects.get_or_create( user=user, name=data['category_new']) else: category = None for image in images: photo = Photo.objects.create( category=category, description=data['description'], image=image, ) return redirect('gallery') context = {'categories': categories} return render(request, 'photos/add.html', context)
import os import numpy as np import imageio from torch import multiprocessing from torch.utils.data import DataLoader import voc12.dataloader from misc import torchutils, imutils def _work(process_id, infer_dataset, args): databin = infer_dataset[process_id] infer_data_loader = DataLoader(databin, shuffle=False, num_workers=0, pin_memory=False) for iter, pack in enumerate(infer_data_loader): img_name = voc12.dataloader.decode_int_filename(pack['name'][0]) img = pack['img'][0].numpy() cam_dict = np.load(os.path.join(args.cam_out_dir, img_name + '.npy'), allow_pickle=True).item() cams = cam_dict['high_res'] keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant') # 1. find confident fg & bg fg_conf_cam = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.conf_fg_thres) fg_conf_cam = np.argmax(fg_conf_cam, axis=0) pred = imutils.crf_inference_label(img, fg_conf_cam, n_labels=keys.shape[0]) fg_conf = keys[pred] bg_conf_cam = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.conf_bg_thres) bg_conf_cam = np.argmax(bg_conf_cam, axis=0) pred = imutils.crf_inference_label(img, bg_conf_cam, n_labels=keys.shape[0]) bg_conf = keys[pred] # 2. combine confident fg & bg conf = fg_conf.copy() conf[fg_conf == 0] = 255 conf[bg_conf + fg_conf == 0] = 0 imageio.imwrite(os.path.join(args.ir_label_out_dir, img_name + '.png'), conf.astype(np.uint8)) if process_id == args.num_workers - 1 and iter % (len(databin) // 20) == 0: print("%d " % ((5 * iter + 1) // (len(databin) // 20)), end='') def run(args): dataset = voc12.dataloader.VOC12ImageDataset(args.train_list, voc12_root=args.voc12_root, img_normal=None, to_torch=False) dataset = torchutils.split_dataset(dataset, args.num_workers) print('[ ', end='') multiprocessing.spawn(_work, nprocs=args.num_workers, args=(dataset, args), join=True) print(']')
# coding=utf-8 from __future__ import absolute_import from wecubek8s.apps.openapi import route
# -*- coding: utf-8 -*- """ Created on Fri Mar 29 14:02:59 2019 @author: darfyma """ import vrep import sys import numpy as np import skfuzzy as fuzz from skfuzzy import control as ctrl #-------------------------Fuzzy Setup------------------------------------------ lf = np.arange(0,1, 0.001) left = ctrl.Antecedent(lf, 'left') right = ctrl.Antecedent(lf, 'right') out = ctrl.Consequent(np.arange(-3, 3, 0.001), 'output') left['jauh'] = fuzz.gaussmf(left.universe,0.645,0.07) left['sedang']= fuzz.gaussmf(left.universe,0.340,0.07) left['dekat'] = fuzz.gaussmf(left.universe,0.100,0.07) right['jauh'] = fuzz.gaussmf(right.universe,0.645,0.07) right['sedang'] = fuzz.gaussmf(right.universe,0.340,0.07) right['dekat'] = fuzz.gaussmf(right.universe,0.100,0.07) #s8['jauh'] = fuzz.gaussmf(s8.universe,0.25,0.07) #s8['sedang'] = fuzz.gaussmf(s8.universe,0.5,0.07) #s8['dekat'] = fuzz.gaussmf(s8.universe,0.75,0.07) #s9['jauh'] = fuzz.gaussmf(s9.universe,0.25,0.07) #s9['sedang'] = fuzz.gaussmf(s9.universe,0.5,0.07) #s9['dekat'] = fuzz.gaussmf(s9.universe,0.75,0.07) out['lambat'] = fuzz.gaussmf(out.universe,0,0.5) out['sedang'] = fuzz.gaussmf(out.universe,1.5,0.5) out['cepat'] = fuzz.gaussmf(out.universe,3,0.5) ##out roda kiri (susur kiri) rule1 = ctrl.Rule(left['jauh'] | right['dekat'], out['cepat']) rule2 = ctrl.Rule(left['jauh'] | right['sedang'], out['cepat']) rule3 = ctrl.Rule(left['jauh'] | right['jauh'], out['lambat']) rule4 = ctrl.Rule(left['sedang'] | right['dekat'], out['sedang']) rule5 = ctrl.Rule(left['sedang'] | right['sedang'], out['lambat']) #center rule6 = ctrl.Rule(left['sedang'] | right['jauh'], out['sed_cep']) rule7 = ctrl.Rule(left['dekat'] | right['dekat'], out['lambat']) rule8 = ctrl.Rule(left['dekat'] | right['sedang'], out['lambat']) rule9 = ctrl.Rule(left['dekat'] | right['jauh'], out['sed_cep']) outing_ctrl = ctrl.ControlSystem([rule1,rule2,rule3,rule4,rule5,rule6,rule7,rule8,rule9]) outing = ctrl.ControlSystemSimulation(outing_ctrl) #-----------------------------Remote Api--------------------------------------- vrep.simxFinish(-1) clientID=vrep.simxStart ('127.0.0.1',19997,True,True,5000,5) if clientID!=-1: print ("Connected to remote API server") vrep.simxAddStatusbarMessage(clientID,"Program Loaded!",vrep.simx_opmode_oneshot) else: print ("Connection not successful") sys.exit("Could not connect") vrep.simxStartSimulation(clientID,vrep.simx_opmode_oneshot_wait) val_s=[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1] for x in range(0,18): err,signal=vrep.simxGetFloatSignal(clientID,'s'+str(x+1),vrep.simx_opmode_streaming) #-----------------------------Looping------------------------------------------ while vrep.simxGetConnectionId(clientID) != -1: for i in range(0,18): err,signal=vrep.simxGetFloatSignal(clientID,'s'+str(i+1),vrep.simx_opmode_buffer) if (err==vrep.simx_return_ok): val_s[i]=round(signal,2) #------------------------------------------------------------------------------ #note sensor kiri, s1 = val_s[2],s16 = val_s[17] #note sensor kanan, s8 = val_s[9],s9 = val_s[10] sensleft = (val_s[2]+val_s[17])/2 sensright= (val_s[8]+val_s[9])/2 erorr = sensleft - sensright outing.input['left'] = sensleft outing.input['right'] = sensright outing.compute() c = outing.output['output'] err=vrep.simxSetFloatSignal(clientID,'vLeft',2 ,vrep.simx_opmode_oneshot) err=vrep.simxSetFloatSignal(clientID,'vRight',2+c,vrep.simx_opmode_oneshot) print(round(sensleft,4),round(sensright,4),round(erorr,4), round(c,1) )
# # PySNMP MIB module STN-ATM-VPN-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/STN-ATM-VPN-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 21:03:27 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection") InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, ModuleIdentity, MibIdentifier, NotificationType, Unsigned32, Counter32, Gauge32, Counter64, iso, ObjectIdentity, TimeTicks, Bits, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "ModuleIdentity", "MibIdentifier", "NotificationType", "Unsigned32", "Counter32", "Gauge32", "Counter64", "iso", "ObjectIdentity", "TimeTicks", "Bits", "IpAddress") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") stnNotification, = mibBuilder.importSymbols("SPRING-TIDE-NETWORKS-SMI", "stnNotification") stnRouterAtmVpn, = mibBuilder.importSymbols("STN-ROUTER-MIB", "stnRouterAtmVpn") stnAtmVpn = ModuleIdentity((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1)) if mibBuilder.loadTexts: stnAtmVpn.setLastUpdated('0008080000Z') if mibBuilder.loadTexts: stnAtmVpn.setOrganization('Spring Tide Networks') stnAtmVpnTrunkObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1)) stnAtmVpnLinkObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2)) stnAtmVpnTrunkTable = MibTable((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1), ) if mibBuilder.loadTexts: stnAtmVpnTrunkTable.setStatus('current') stnAtmVpnTrunkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1), ).setIndexNames((0, "STN-ATM-VPN-MIB", "stnAtmVpnTrunkIfIndex")) if mibBuilder.loadTexts: stnAtmVpnTrunkEntry.setStatus('current') stnAtmVpnTrunkIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1, 1), InterfaceIndex()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkIfIndex.setStatus('current') stnAtmVpnTrunkViId = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkViId.setStatus('current') stnAtmVpnTrunkName = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkName.setStatus('current') stnAtmVpnTrunkState = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkState.setStatus('current') stnAtmVpnTrunkLowerIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1, 5), InterfaceIndex()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkLowerIfIndex.setStatus('current') stnAtmVpnTrunkVpnPaths = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkVpnPaths.setStatus('current') stnAtmVpnTrunkInUnknownVpnId = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1, 7), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkInUnknownVpnId.setStatus('current') stnAtmVpnTrunkInVpnIdIfaceInvalid = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1, 8), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkInVpnIdIfaceInvalid.setStatus('current') stnAtmVpnTrunkOutUnknownVpnId = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1, 9), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkOutUnknownVpnId.setStatus('current') stnAtmVpnTrunkOutVpnIdIfaceInvalid = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 1, 1, 10), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkOutVpnIdIfaceInvalid.setStatus('current') stnAtmVpnTrunkPathTable = MibTable((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2), ) if mibBuilder.loadTexts: stnAtmVpnTrunkPathTable.setStatus('current') stnAtmVpnTrunkPathEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1), ).setIndexNames((0, "STN-ATM-VPN-MIB", "stnAtmVpnTrunkPathTrunkIfIndex"), (0, "STN-ATM-VPN-MIB", "stnAtmVpnTrunkPathVpnOUI"), (0, "STN-ATM-VPN-MIB", "stnAtmVpnTrunkPathVpnIndex"), (0, "STN-ATM-VPN-MIB", "stnAtmVpnTrunkPathVpnSubIndex")) if mibBuilder.loadTexts: stnAtmVpnTrunkPathEntry.setStatus('current') stnAtmVpnTrunkPathTrunkIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 1), InterfaceIndex()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathTrunkIfIndex.setStatus('current') stnAtmVpnTrunkPathVpnOUI = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathVpnOUI.setStatus('current') stnAtmVpnTrunkPathVpnIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathVpnIndex.setStatus('current') stnAtmVpnTrunkPathVpnSubIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathVpnSubIndex.setStatus('current') stnAtmVpnTrunkPathType = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("vpnLearnedPath", 1), ("vpnStaticPath", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathType.setStatus('current') stnAtmVpnTrunkPathNextIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("atmVpnLink", 1), ("atmVpnTrunk", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathNextIfType.setStatus('current') stnAtmVpnTrunkPathNextIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 7), InterfaceIndex()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathNextIfIndex.setStatus('current') stnAtmVpnTrunkPathInPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 8), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathInPackets.setStatus('current') stnAtmVpnTrunkPathInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 9), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathInOctets.setStatus('current') stnAtmVpnTrunkPathOutPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 10), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathOutPackets.setStatus('current') stnAtmVpnTrunkPathOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 1, 2, 1, 11), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnTrunkPathOutOctets.setStatus('current') stnAtmVpnLinkTable = MibTable((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1), ) if mibBuilder.loadTexts: stnAtmVpnLinkTable.setStatus('current') stnAtmVpnLinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1), ).setIndexNames((0, "STN-ATM-VPN-MIB", "stnAtmVpnLinkIfIndex")) if mibBuilder.loadTexts: stnAtmVpnLinkEntry.setStatus('current') stnAtmVpnLinkIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 1), InterfaceIndex()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkIfIndex.setStatus('current') stnAtmVpnLinkViId = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkViId.setStatus('current') stnAtmVpnLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkName.setStatus('current') stnAtmVpnLinkVpnOUI = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkVpnOUI.setStatus('current') stnAtmVpnLinkVpnIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkVpnIndex.setStatus('current') stnAtmVpnLinkVpnSubIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkVpnSubIndex.setStatus('current') stnAtmVpnLinkState = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkState.setStatus('current') stnAtmVpnLinkTrunkViId = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkTrunkViId.setStatus('current') stnAtmVpnLinkLowerIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 9), InterfaceIndex()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkLowerIfIndex.setStatus('current') stnAtmVpnLinkOutUnknownVpnId = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 10), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkOutUnknownVpnId.setStatus('current') stnAtmVpnLinkOutVpnIdIfaceInvalid = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 11), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkOutVpnIdIfaceInvalid.setStatus('current') stnAtmVpnLinkInVpnIdIfaceInvalid = MibTableColumn((1, 3, 6, 1, 4, 1, 3551, 2, 7, 1, 7, 1, 2, 1, 1, 12), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: stnAtmVpnLinkInVpnIdIfaceInvalid.setStatus('current') mibBuilder.exportSymbols("STN-ATM-VPN-MIB", stnAtmVpnLinkIfIndex=stnAtmVpnLinkIfIndex, stnAtmVpnTrunkState=stnAtmVpnTrunkState, stnAtmVpnTrunkPathInOctets=stnAtmVpnTrunkPathInOctets, stnAtmVpnLinkLowerIfIndex=stnAtmVpnLinkLowerIfIndex, stnAtmVpnLinkInVpnIdIfaceInvalid=stnAtmVpnLinkInVpnIdIfaceInvalid, stnAtmVpnLinkObjects=stnAtmVpnLinkObjects, stnAtmVpnTrunkPathVpnSubIndex=stnAtmVpnTrunkPathVpnSubIndex, stnAtmVpnTrunkPathTable=stnAtmVpnTrunkPathTable, stnAtmVpn=stnAtmVpn, PYSNMP_MODULE_ID=stnAtmVpn, stnAtmVpnLinkState=stnAtmVpnLinkState, stnAtmVpnTrunkOutVpnIdIfaceInvalid=stnAtmVpnTrunkOutVpnIdIfaceInvalid, stnAtmVpnTrunkInVpnIdIfaceInvalid=stnAtmVpnTrunkInVpnIdIfaceInvalid, stnAtmVpnTrunkName=stnAtmVpnTrunkName, stnAtmVpnTrunkEntry=stnAtmVpnTrunkEntry, stnAtmVpnTrunkVpnPaths=stnAtmVpnTrunkVpnPaths, stnAtmVpnTrunkPathVpnOUI=stnAtmVpnTrunkPathVpnOUI, stnAtmVpnTrunkViId=stnAtmVpnTrunkViId, stnAtmVpnTrunkTable=stnAtmVpnTrunkTable, stnAtmVpnLinkVpnOUI=stnAtmVpnLinkVpnOUI, stnAtmVpnTrunkPathVpnIndex=stnAtmVpnTrunkPathVpnIndex, stnAtmVpnTrunkPathInPackets=stnAtmVpnTrunkPathInPackets, stnAtmVpnTrunkPathNextIfType=stnAtmVpnTrunkPathNextIfType, stnAtmVpnTrunkOutUnknownVpnId=stnAtmVpnTrunkOutUnknownVpnId, stnAtmVpnTrunkInUnknownVpnId=stnAtmVpnTrunkInUnknownVpnId, stnAtmVpnLinkOutUnknownVpnId=stnAtmVpnLinkOutUnknownVpnId, stnAtmVpnTrunkPathOutPackets=stnAtmVpnTrunkPathOutPackets, stnAtmVpnLinkTrunkViId=stnAtmVpnLinkTrunkViId, stnAtmVpnTrunkPathTrunkIfIndex=stnAtmVpnTrunkPathTrunkIfIndex, stnAtmVpnTrunkPathNextIfIndex=stnAtmVpnTrunkPathNextIfIndex, stnAtmVpnTrunkPathEntry=stnAtmVpnTrunkPathEntry, stnAtmVpnTrunkIfIndex=stnAtmVpnTrunkIfIndex, stnAtmVpnLinkTable=stnAtmVpnLinkTable, stnAtmVpnLinkOutVpnIdIfaceInvalid=stnAtmVpnLinkOutVpnIdIfaceInvalid, stnAtmVpnTrunkPathType=stnAtmVpnTrunkPathType, stnAtmVpnLinkVpnSubIndex=stnAtmVpnLinkVpnSubIndex, stnAtmVpnLinkName=stnAtmVpnLinkName, stnAtmVpnTrunkObjects=stnAtmVpnTrunkObjects, stnAtmVpnLinkVpnIndex=stnAtmVpnLinkVpnIndex, stnAtmVpnLinkEntry=stnAtmVpnLinkEntry, stnAtmVpnLinkViId=stnAtmVpnLinkViId, stnAtmVpnTrunkPathOutOctets=stnAtmVpnTrunkPathOutOctets, stnAtmVpnTrunkLowerIfIndex=stnAtmVpnTrunkLowerIfIndex)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from decimal import Decimal from collections import defaultdict import sys sys.stdin = open('input.txt') class KnapsackItem: """Один предмет в рюкзаке""" def __init__(self, name, size, price): # Наименование предмета self.name = name # Для размера и ценности предмета специально используется # тип Decimal, чтобы более точно работать с вещественными числами, # в частности нахождения остатка от деления на вещ. число # Размер предмета self.size = Decimal(size) # Ценность предмета self.price = Decimal(price) def input_data(): """Получаем данные от пользователя""" # Общая ёмкость всего рюкзака capacity = Decimal(input()) # Массив с инфомацией о предметах (инвентарь) inventory = [] # Обходим все предметы, что есть for line in sys.stdin: # Собираем параметры для каждого предмета (название, размер, ценность) name, size, price = line.split() # Добавляем новый предмет к остальным inventory.append(KnapsackItem(name, size, price)) # результат обработки данных от пользователя - это # возможная вмещаемость рюкзака и все возможные прдеметы return capacity, inventory def get_knapsack_volumes(capacity, inventory): """ Получить разбиение рюкзака на части (подрюкзаки - подобъёмы) capacity - ёмкость всего рюкзака inventory - список всех возможных предметов """ # По сути это задача нахождения Наибольшего Общего Делителя (НОД) # для всех размеров предметов и ёмкости самого рюкзака # Множество размеров всех предметов # У нескольких предметов может быть один и тот же размер # Чтобы ускорить вычисления испольуем множество, а не список sizes = set(ks_item.size for ks_item in inventory) # Также к множеству всех размеров нужно добавить размер всего рюкзака # Так как дальше будет строится таблица # от минимальноного подрюкака до общего sizes.add(capacity) # Размер минимального предмета в рюкзаке - это первоначальное приближение # значения шага подрюкзаков step = min(sizes) # Пока не найдём нужный оптимальный шаг while True: # Обходим размеры всех предметов в наборе for size in sizes: # Находим значение остатка от деления размера предмета на шаг remainder = size % step # если остаток от деления НЕ нулевой if remainder != Decimal('0'): # Назначаем новый шаг step = remainder # И выходим из внутреннего цикла for # Возвращаемся к while break else: # Если цикл for отработал без обрывания break # Значит мы нашли нужный шаг # Выходим из внешнего цикла while break # Здесь мы точно УЖЕ получили значение шага подрюкзаков - step # создадим массив подрюкзаков # int(...) преоббразует Decimal к типу int # это нужно для функции range, которая принимает только целые числа ks_volumes = [i*step for i in range(1, int(capacity / step)+1)] # И возвращаем все подобъёмы нашего рюкзака return ks_volumes def find_optimal_set(capacity, inventory): """Найти оптимальный набор предметов""" # Размеры подрюкзаков - массив чисел Decimal ks_volumes = get_knapsack_volumes(capacity, inventory) # Заготовка для таблицы с вычислениями # К ечейки таблицы будем обращаться так tab[i, volume] # cначала указываем i - индекс предмета в инвентаре # потом через запятую размер подрюказка # python позволяет такую запись, так как как кортеж # из чисел имеет не изменяющийся хеш # Дальше нам нужно хранить в ячейке таблицы # одновременно и ЦЕННОСТЬ предметов и НАБОР этих предметов # Для простоты будем просто хранить индексы предметов в инвентаре # Сначала заполним всё нулями - текущая оценка стоимости подрюкзака # и пустыми массивами - так как еще нет никаких наборов предметов tab = defaultdict(lambda: {'price': Decimal('0'), 'items': []}) # Заполняем построчно таблицу для расчётов # Обходим все строки таблицы for i, item in enumerate(inventory): # Обходим столбцы текущей строки for volume in ks_volumes: # Находим предыдущий максимум before_max = tab[i-1, volume]['price'] # Находим возможный новый максимум # Если предмет помещается в текущий подрюкзак if item.size <= volume: # Это цена текущего предмета + ценность оставшегося пространства new_max = item.price + tab[i-1, volume-item.size]['price'] else: # Если не помещается, то это любое отрицательное число # так как в этом случае ценность данного подрюкзака # это предыдущий максимум # Минимально возможный предыдущий максимум = 0, тогда # для расчётов нам нужно просто для текущего # максимума указать число < 0, пусть это будет -1 new_max = -1 # Заметьте, что если ячейки не существует в таблице, то благодаря # defaultdict к ней можно обратиться и получить, что её ценность = 0, # что упрощает код и не влияет на вычисления # Выбираем из двух максимумов больший и заполяем тукущую ячейку if before_max > new_max: # Ценность подрюкзака - ценость пердыдущего максимума tab[i, volume]['price'] = before_max # Набор предметов также берем из предыдущего максимального tab[i, volume]['items'] = tab[i-1, volume]['items'] else: # Ценность подрюкзака - ценость нового максимума tab[i, volume]['price'] = new_max # набор предметов - текущий предмет tab[i, volume]['items'].append(i) # + предметы из оставшегося пустого пространства empty_items = tab[i-1, volume-item.size]['items'] tab[i, volume]['items'].extend(empty_items) # В самом последней ячейки таблицы будет оптимальный набор предметов # с суммарной максимальной стоимостью # Индексы оптимальных предметов index_ks_set = tab[len(inventory)-1, ks_volumes[-1]]['items'] return index_ks_set def output_results(capacity, inventory, index_ks_set): """Вывод результатов""" # Итоговая ценность score = Decimal('0') # Итоговый суммарный объём volume = Decimal('0') print(f'Набор предметов в рюкзаке ёмкостью "{capacity}":') # Заголовок таблицы print(f'\t{"Предмет":^10}| размер | ценность |') print('-'*40) # Строки с предметами for i in index_ks_set: # Получаем предмет из инвенторя item = inventory[i] print(f'\t{item.name:<10}|{item.size:>8}|{item.price:>10}|') volume += item.size score += item.price # Итоговая самая нижняя строка print('-'*40) print(f'\t{"СУММА":<10}|{volume:>8}|{score:>10}|') def main(): """Основная программа""" # Получаем данные от пользователя # Вмещаемость рюкзака и все возможные предметы capacity, inventory = input_data() # Решаем задачу, получая набор предметов, # что в сумме дадут максимальную ценность index_ks_set = find_optimal_set(capacity, inventory) # Выводим результаты output_results(capacity, inventory, index_ks_set) # Стартовая точка запуска программы if __name__ == "__main__": main()
from .routers import NoMatchFound, NoRouteFound, Prefix, Router, Routes from .routes import BaseRoute, HttpRoute, SocketRoute __all__ = [ "Router", "Routes", "BaseRoute", "HttpRoute", "SocketRoute", "Prefix", "NoMatchFound", "NoRouteFound", ]
from typing import Optional from dataclasses import dataclass from .Crypto import Crypto from .ManageInvoiceOperation import ManageInvoiceOperation @dataclass class InvoiceOperation: """Invoice operation of the request :param index: Sequence number of the invoice within the request :param invoice_operation: Type of the desired invoice operation :param invoice_data: Invoice data in BASE64 encoded form :param electronic_invoice_hash: Electronic invoice or modification document file hash value """ index: int invoice_operation: ManageInvoiceOperation invoice_data: str electronic_invoice_hash: Optional[Crypto]
from time import sleep import random print(f'{30*"-"}\n{"JOGO NA MEGA SENA":^30}\n{30*"-"}') nj = int(input('Quantos jogos você quer que eu sorteie? ')) print(f'{" SORTEANDO ":=>15}{nj}{" JOGOS ":=<15}') lista = [] for n in range(0, nj): num = list(range(1, 61)) # noinspection PyRedeclaration n = list() for c in range(0, 6): x = random.choice(num) n.append(x) num.remove(x) n.sort() lista.append(n) for i, c in enumerate(lista): print(f'Jogo {i + 1}: {c}') sleep(0.5) print(f'{" < BOA SORTE! > ":=^30}') # pode ser feito de outra forma lista1 = list() jogos = list() print(f'{30*"-"}\n{"JOGO NA MEGA SENA":^30}\n{30*"-"}') quant = int(input('Quantos jogos você quer que eu sorteie? ')) tot = 1 while tot <= quant: cont = 0 while True: num = random.randint(1, 60) if num not in lista1: lista1.append(num) cont += 1 if cont >= 6: break lista1.sort() jogos.append(lista1[:]) lista1.clear() tot += 1 print(f'{" SORTEANDO ":=>15}{quant}{" JOGOS ":=<15}') for i, l in enumerate(jogos): print(f'Jogo {i + 1}: {l}') sleep(0.5) print(f'{" < BOA SORTE! > ":=^30}')
# -*- coding: utf-8 -*- class EmotivOutputTask(object): def __init__(self, received=False, decrypted=False, data=None): self.packet_received = received self.packet_decrypted = decrypted self.packet_data = data class EmotivReaderTask(object): def __init__(self, data, timestamp): self.data = data self.timestamp = timestamp class EmotivWriterTask(object): def __init__(self, data=None, encrypted=False, values=True, timestamp=None): self.is_encrypted = encrypted self.is_values = values self.data = data self.timestamp = timestamp
"""根据 PRS 协议组合 block data, 并且使用 privateKey 进行签名""" import prs_utility with open(__file__) as fp: content = fp.read() data = { 'file_hash': prs_utility.keccak256(text=content), } key_pair = prs_utility.create_key_pair() private_key = key_pair['privateKey'] sig = prs_utility.sign_block_data(data, private_key) print('signature:', sig)
from datetime import timedelta from django.db.models import Max from django.utils.timezone import now from preferences import preferences from rest_framework import serializers from bikesharing.models import Bike, Station class GbfsFreeBikeStatusSerializer(serializers.HyperlinkedModelSerializer): bike_id = serializers.CharField(source="non_static_bike_uuid", read_only=True) class Meta: model = Bike fields = ("bike_id",) def to_representation(self, instance): representation = super().to_representation(instance) # Default to False TODO: maybe configuration later representation["is_reserved"] = False # Default to False TODO: maybe configuration later representation["is_disabled"] = False public_geolocation = instance.public_geolocation() if public_geolocation is not None: pos = public_geolocation.geo if pos and pos.x and pos.y: representation["lat"] = pos.y representation["lon"] = pos.x return representation # only return bikes with public geolocation class GbfsStationInformationSerializer(serializers.HyperlinkedModelSerializer): name = serializers.CharField(source="station_name", read_only=True) capacity = serializers.IntegerField(source="max_bikes", read_only=True) station_id = serializers.CharField(source="id", read_only=True) class Meta: model = Station fields = ( "name", "capacity", "station_id", ) def to_representation(self, instance): representation = super().to_representation(instance) if ( instance.location is not None and instance.location.x and instance.location.y ): representation["lat"] = instance.location.y representation["lon"] = instance.location.x return representation class GbfsStationStatusSerializer(serializers.HyperlinkedModelSerializer): station_id = serializers.CharField(source="id", read_only=True) class Meta: model = Station fields = ("station_id",) def to_representation(self, instance): representation = super().to_representation(instance) # if configured filter vehicles, where time report # is older than configure allowed silent timeperiod bsp = preferences.BikeSharePreferences if bsp.gbfs_hide_bikes_after_location_report_silence: available_bikes = instance.bike_set.filter( availability_status="AV", last_reported__gte=now() - timedelta(hours=bsp.gbfs_hide_bikes_after_location_report_hours), ) else: available_bikes = instance.bike_set.filter(availability_status="AV") representation["num_bikes_available"] = available_bikes.count() representation["num_docks_available"] = ( instance.max_bikes - representation["num_bikes_available"] ) last_reported_bike = available_bikes.aggregate(Max("last_reported")) if last_reported_bike["last_reported__max"] is not None: representation["last_reported"] = int( last_reported_bike["last_reported__max"].timestamp() ) else: # if no bike is on station, last_report is now # not shure if this is the intended behavior of the field # or it should be the timestamp of the last bike removed # but it is not so easy to implement representation["last_reported"] = int(now().timestamp()) status = (instance.status == "AC") or False representation["is_installed"] = status representation["is_renting"] = status representation["is_returning"] = status return representation
import frappe def execute(): frappe.reload_doc('core', 'doctype', 'system_settings') frappe.db.sql("update `tabSystem Settings` set allow_error_traceback=1")
""" Test the Frog lemmatizer functions and task. """ import logging import socket from unittest import SkipTest from nose.tools import assert_equal, assert_not_equal from xtas.tasks._frog import (FROG_HOST, FROG_PORT, call_frog, frog_to_saf, parse_frog) def _check_frog(): s = socket.socket() try: s.connect((FROG_HOST, FROG_PORT)) except: logging.exception("Unable to connect to {}:{}, skipping tests" .format(FROG_HOST, FROG_PORT)) raise SkipTest("Cannot connect to frog, skipping tests") logging.info("Frog is alive!") def test_call_frog(): _check_frog() lines = list(call_frog("dit is in Amsterdam. Tweede zin!")) assert_equal(len(lines), 10) test = lines[3].split("\t")[:5] assert_equal(test, ['4', 'Amsterdam', 'Amsterdam', '[Amsterdam]', 'SPEC(deeleigen)']) assert_equal(lines[5], '') LINES = ['1\tdit\tdit\t[dit]\tVNW(aanw,pron,stan,vol,3o,ev)\t0.9\tO\tB-NP\t\t', '2\tis\tzijn\t[zijn]\tWW(pv,tgw,ev)\t0.9\tO\tB-VP\t\t', '3\tin\tin\t[in]\tVZ(init)\t0.998321\tO\tB-PP\t\t', '4\tAmsterdam\tAmsterdam\t[Amsterdam]\tSPEC(deeleigen)' '\t1.000000\tB-LOC\tB-NP\t\t', '5\t.\t.\t[.]\tLET()\t0.999956\tO\tO\t\t', '', '1\tTweede\ttwee\t[twee][de]\tTW(rang,prenom,stan)\t0.9\tO\tB-NP\t\t', '2\tzin\tzin\t[zin]\tN(soort,ev,basis,zijd,stan)\t0.99\tO\tI-NP\t\t', '3\t!\t!\t[!]\tLET()\t0.995005\tO\tO\t\t', ''] def test_parse_frog(): tokens = list(parse_frog(LINES)) assert_equal(len(tokens), 8) expected = dict(id=0, sentence=0, lemma='dit', word='dit', pos='VNW(aanw,pron,stan,vol,3o,ev)', pos_confidence=0.9) assert_equal(tokens[0], expected) assert_equal(tokens[7]['sentence'], 1) def test_frog_to_saf(): tokens = list(parse_frog(LINES)) saf = frog_to_saf(tokens) assert_equal(len(saf['tokens']), 8) token = [t for t in saf['tokens'] if t['lemma'] == 'Amsterdam'] assert_equal(len(token), 1) assert_equal(token[0]['pos1'], 'M') def test_frog_task(): "Test whether the xtas.tasks.single.frog call works" _check_frog() from xtas.tasks.single import frog raw = frog("dit is een test", output='raw') assert_equal(len(raw), 5) assert_equal(raw[4], '') tokens = frog("dit is een test", output='tokens') assert_equal(len(tokens), 4) assert_equal(tokens[0]['lemma'], 'dit') saf = frog("dit is een test", output='saf') assert_equal(len(saf['tokens']), 4) assert_equal(saf['header']['processed'][0]['module'], 'frog')
""" core mixin for temporal Models """ import sqlalchemy.ext.declarative as declarative import sqlalchemy.orm as orm from temporal_sqlalchemy import bases, clock class TemporalModel(bases.Clocked): """ Mixin Class the enable temporal history for a sqlalchemy model """ @declarative.declared_attr def __mapper_cls__(cls): # pylint: disable=no-self-argument assert hasattr(cls, 'Temporal') def mapper(cls_, *args, **kwargs): options = cls_.Temporal mp = orm.mapper(cls_, *args, **kwargs) clock.temporal_map( *options.track, mapper=mp, activity_class=getattr(options, 'activity_class'), schema=getattr(options, 'schema')) return mp return mapper
def GenCPPProperty(type, name): assert(name.startswith('m_')) # if name.startswith('m_'): # name = name[2:] pretty_name = name[2:] # print '+++++++++++++' # print('') if type in ('int', 'float', 'bool', 'uint32_t', 'bool') or type.endswith('*'): # print '==== v1' print('{0} Get{1}() const {{ return m_{1}; }}'.format(type, pretty_name)) print('void Set{1}({0} value) {{ m_{1} = value; }}'.format(type, pretty_name)) else: # print '=== v2' print('const {0}& Get{1}() const {{ return m_{1}; }}'.format(type, pretty_name)) print('void Set{1}(const {0}& value) {{ m_{1} = value; }}'.format(type, pretty_name)) print('') def GenPythonDef(klass, name): pretty_name = name[2:] # print('.def("Get{1}", &{0}::Get{1})'.format(klass, pretty_name)) # print('.def("Set{1}", &{0}::Set{1})'.format(klass, pretty_name)) name2 = pretty_name[0].lower() + pretty_name[1:] print('.def_property("{mass}", &{Rigidbody}::Get{Mass}, &{Rigidbody}::Set{Mass})'.format(mass=name2, Mass=pretty_name, Rigidbody=klass)) template3 = ''' @property def {center}(self)->{Vector3}: return self.cpp.Get{Center}() @{center}.setter def {center}(self, value:{Vector3}): self.cpp.Set{Center}(value)''' def GenPythonProperty(type, name): pretty_name = name[2:] name2 = pretty_name[0].lower() + pretty_name[1:] print(template3.format(center=name2, Vector3=type, Center=pretty_name)) klass = 'Prefab' s = ''' PrefabModification m_Modification; Prefab* m_ParentPrefab = nullptr; GameObject* m_RootGameObject = nullptr; bool m_IsPrefabParent = true; ''' s = s.strip().split('\n') s = [x.strip() for x in s] pairs = [] for line in s: line = line.strip() if line.endswith(';'): line = line[:-1] if line.startswith('//'): continue t = line.split() if len(t) >= 2: pairs.append((t[0], t[1])) print(pairs) for type, name in pairs: GenCPPProperty(type, name) for _, name in pairs: GenPythonDef(klass, name) # for type, name in pairs: # GenPythonProperty(type, name)
#!/usr/bin/env python from distutils.core import setup setup(name='pystatplottools', version='0.1', description='Python modules for performing simple statistics and plotting routines.', author='Lukas Kades', author_email='lukaskades@googlemail.com', url='https://github.com/statphysandml/pystatplottools', packages=['pystatplottools', 'pystatplottools.distributions', 'pystatplottools.expectation_values', 'pystatplottools.pdf_env', 'pystatplottools.plotting', 'pystatplottools.pytorch_data_generation', 'pystatplottools.utils', 'pystatplottools.visualization'], )
import random import sys import cryptomath_module as cryptomath SYMBOLS = ( r""" !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`""" r"""abcdefghijklmnopqrstuvwxyz{|}~""" ) def main(): """ >>> key = get_random_key() >>> msg = "This is a test!" >>> decrypt_message(key, encrypt_message(key, msg)) == msg True """ message = input("Enter message: ").strip() key = int(input("Enter key [2000 - 9000]: ").strip()) mode = input("Encrypt/Decrypt [E/D]: ").strip().lower() if mode.startswith("e"): mode = "encrypt" translated = encrypt_message(key, message) elif mode.startswith("d"): mode = "decrypt" translated = decrypt_message(key, message) print(f"\n{mode.title()}ed text: \n{translated}") def check_keys(keyA, keyB, mode): if mode == "encrypt": if keyA == 1: sys.exit( "The affine cipher becomes weak when key " "A is set to 1. Choose different key" ) if keyB == 0: sys.exit( "The affine cipher becomes weak when key " "B is set to 0. Choose different key" ) if keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1: sys.exit( "Key A must be greater than 0 and key B must " f"be between 0 and {len(SYMBOLS) - 1}." ) if cryptomath.gcd(keyA, len(SYMBOLS)) != 1: sys.exit( f"Key A {keyA} and the symbol set size {len(SYMBOLS)} " "are not relatively prime. Choose a different key." ) def encrypt_message(key: int, message: str) -> str: """ >>> encrypt_message(4545, 'The affine cipher is a type of monoalphabetic ' ... 'substitution cipher.') 'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF{xIp~{HL}Gi' """ keyA, keyB = divmod(key, len(SYMBOLS)) check_keys(keyA, keyB, "encrypt") cipherText = "" for symbol in message: if symbol in SYMBOLS: symIndex = SYMBOLS.find(symbol) cipherText += SYMBOLS[(symIndex * keyA + keyB) % len(SYMBOLS)] else: cipherText += symbol return cipherText def decrypt_message(key: int, message: str) -> str: """ >>> decrypt_message(4545, 'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF' ... '{xIp~{HL}Gi') 'The affine cipher is a type of monoalphabetic substitution cipher.' """ keyA, keyB = divmod(key, len(SYMBOLS)) check_keys(keyA, keyB, "decrypt") plainText = "" modInverseOfkeyA = cryptomath.findModInverse(keyA, len(SYMBOLS)) for symbol in message: if symbol in SYMBOLS: symIndex = SYMBOLS.find(symbol) plainText += SYMBOLS[(symIndex - keyB) * modInverseOfkeyA % len(SYMBOLS)] else: plainText += symbol return plainText def get_random_key(): while True: keyA = random.randint(2, len(SYMBOLS)) keyB = random.randint(2, len(SYMBOLS)) if cryptomath.gcd(keyA, len(SYMBOLS)) == 1: return keyA * len(SYMBOLS) + keyB if __name__ == "__main__": import doctest doctest.testmod() main()
#!/usr/bin/env python # coding: utf-8 from PyQt5.QtWidgets import * import sys from window import MainWindow, TurtleWindow import rospy if __name__ == '__main__': rospy.init_node('turtle_ctrl_node') # Qt ui 部分 app = QApplication(sys.argv) # 窗体展示 window = TurtleWindow() window.show() sys.exit(app.exec_())
#!/usr/bin/env python3 import abc import argparse import collections import numbers import os.path import re import sys import token import tokenize import fett from typing import Any, Dict, List, Set, Union, Optional def stderr(*args) -> None: sys.stderr.write('{}\n'.format(' '.join(str(element) for element in args))) def get_token_text(tok) -> str: _, a = tok.start _, b = tok.end return tok.line[a:b] def error_unknown_token(tok) -> None: name = token.tok_name[tok.exact_type] stderr('Unknown token: {}'.format(name)) stderr(' in: {}'.format(repr(tok.line))) sys.exit(1) def error_unknown_name(tok) -> None: name = token.tok_name[tok.exact_type] stderr('Unknown type: {}'.format(name)) stderr(' in: {}'.format(repr(tok.line))) stderr(' Expected: one of "struct", "enum", "const", "union", "fn", or "signal"') sys.exit(1) def error_expected(tok, name: str) -> None: tok_type = token.tok_name[tok.exact_type] stderr('Expected', repr(name)) stderr(' in', repr(tok.line)) stderr(' got: {} ({})'.format(repr(get_token_text(tok)), tok_type)) sys.exit(1) def error_duplicate(tok, name: str) -> None: stderr('Duplicate definition', name) if tok: token_name = token.tok_name[tok.exact_type] stderr(' in', repr(tok.line)) sys.exit(1) def parse_type(text: str): if text.startswith('[') and text.endswith(']'): return ('list', parse_type(text[1:-1])) if text.startswith('(') and text.endswith(')'): text = text[1:-1] return ('tuple', [parse_type(x.strip()) for x in text.split(',')]) if text in ('i8', 'i16', 'i32', 'i64', 'u8', 'u16', 'u32', 'u64', 'f32', 'f64'): return ('number', text) if text == 'nil': return (text,) if text == 'fireandforget': return (text,) raise ValueError(text) class State(metaclass=abc.ABCMeta): def __init__(self, defs: Dict[str, Any], stack: List['State']) -> None: self.defs = defs self.stack = stack self.last_return = None # type: Any @abc.abstractmethod def handle(self, tok): ... def initialize(self) -> None: pass def push_state(self, state_type: type) -> None: state = state_type(self.defs, self.stack) state.initialize() self.stack.append(state) def pop_state(self, arg=None) -> None: self.stack.pop() if arg: self.stack[-1].last_return = arg def register_def(self, name: str, kv: Any) -> None: if name in self.defs: error_duplicate(None, name) self.defs[name] = kv class StateRoot(State): def handle(self, tok): value = get_token_text(tok) if tok.exact_type == tokenize.COMMENT: pass elif tok.exact_type == tokenize.ENCODING: pass elif tok.type == token.ENDMARKER: return elif tok.exact_type == token.NAME: if value == 'enum': self.push_state(StateEnum) elif value == 'struct': self.push_state(StateStruct) elif value == 'fn': self.push_state(StateFunction) elif value == 'signal': self.push_state(StateSignal) elif value == 'const': self.push_state(StateConst) elif value == 'union': self.push_state(StateUnion) else: error_unknown_name(tok) else: error_unknown_token(tok) class StateBlock(State): @classmethod @abc.abstractmethod def container(cls) -> type: ... @staticmethod @abc.abstractmethod def delimiter() -> str: ... @staticmethod def check_value(tok, value): pass def initialize(self) -> None: self.name = '<unknown>' self.state = 0 self.value = None # type: Any self.curname = '' self.kv = self.container()() def handle(self, tok): value = get_token_text(tok) if self.state == 0: if tok.exact_type == token.NAME: self.name = value self.state = 1 else: error_unknown_token(tok) elif self.state == 1: if tok.exact_type == token.COLON: self.state = 2 else: error_expected(tok, '":"') elif self.state == 2: if tok.exact_type == token.NEWLINE: self.state = 3 else: error_expected(tok, 'newline') elif self.state == 3: if tok.exact_type == token.INDENT: self.state = 4 else: error_expected(tok, 'indent') elif self.state == 4: if tok.exact_type == token.NAME: self.state = 5 self.curname = value elif tok.type == token.DEDENT: self.pop_state() self.register_def(self.name, self.kv) return else: error_expected(tok, 'name') elif self.state == 5: if tok.type == token.OP and value == self.delimiter(): self.state = 6 else: error_expected(tok, self.delimiter()) elif self.state == 6: if tok.exact_type == token.NAME: pass elif tok.exact_type == token.LSQB: self.push_state(StateList) self.state = 7 return elif tok.exact_type == token.LPAR: self.push_state(StateTuple) self.state = 7 return elif tok.type == token.NUMBER: value = int(value) elif self.last_return: value = self.last_return else: error_expected(tok, 'name, number, or type') self.value = value self.state = 7 elif self.state == 7: if self.curname in self.kv: error_duplicate(tok, self.curname) if self.last_return: self.value = self.last_return self.last_return = None self.check_value(tok, self.value) self.kv[self.curname] = self.value self.value = None self.state = 8 if tok.exact_type == token.NEWLINE: self.state = 4 elif self.state == 8: if tok.exact_type != token.NEWLINE: error_expected(tok, 'newline') self.state = 4 else: assert False class StateEnum(StateBlock): class Container(collections.OrderedDict): pass @classmethod def container(cls) -> type: return cls.Container @staticmethod def delimiter() -> str: return '=' def register_def(self, name: str, kv) -> None: kv_set = set() # type: Set[Union[str, int, float]] for fieldname, fieldvalue in kv.items(): if fieldvalue in kv_set: error_duplicate(None, '{}.{}'.format(name, fieldname)) kv_set.add(fieldvalue) StateBlock.register_def(self, name, kv) class StateStruct(StateBlock): class Container(collections.OrderedDict): pass @classmethod def container(cls) -> type: return cls.Container @staticmethod def delimiter() -> str: return ':' @staticmethod def check_value(tok, value: Any) -> None: try: return parse_type(value) except ValueError: error_expected(tok, 'valid type') class StateFunction(State): class Container(list): pass def initialize(self) -> None: self.state = 0 self.name = '' self.args = [] # type: List[str] self.return_type = '' self.current_parameter = None # type: Optional[str] self.prototype = self.Container(([], None)) def handle(self, tok): value = get_token_text(tok) if self.state == 0: if tok.exact_type != token.NAME or not value: error_expected(tok, 'function name') self.name = value self.state = 1 elif self.state == 1: if tok.exact_type != token.LPAR: error_expected(tok, '"("') self.state = 2 elif self.state == 2: if tok.exact_type == token.NAME: self.state = 3 self.current_parameter = value elif tok.exact_type == token.RPAR: self.state = 10 else: error_expected(tok, 'name') elif self.state == 3: if tok.exact_type == token.COLON: self.state = 4 else: error_expected(tok, '":"') elif self.state == 4: if tok.exact_type != token.NAME: error_expected(tok, 'name') self.state = 5 self.prototype[0].append((self.current_parameter, value)) elif self.state == 5: if tok.exact_type == token.RPAR: self.state = 10 elif tok.exact_type == token.COMMA: self.state = 2 else: error_expected(tok, '"," or ")"') elif self.state == 7: # Return list assert False elif self.state == 10: if tok.type != token.OP or value != '->': error_expected(tok, '"->"') self.state = 11 elif self.state == 11: if tok.exact_type == token.NAME: self.state = 12 self.prototype[1] = value elif tok.exact_type == token.LSQB: self.push_state(StateList) self.state = 7 else: error_expected(tok, "type") elif self.state == 12: self.pop_state() self.register_def(self.name, self.prototype) else: assert False class StateSignal(StateFunction): class Container(list): pass class StateConst(State): def initialize(self) -> None: self.name = '' self.state = 0 def handle(self, tok): value = get_token_text(tok) if self.state == 0: if tok.exact_type == token.NAME: self.name = value self.state = 1 else: error_unknown_token(tok) elif self.state == 1: if tok.exact_type == token.EQUAL: self.state = 2 else: error_expected(tok, '=') elif self.state == 2: if tok.type == token.NAME: pass elif tok.type == token.NUMBER: value = int(value) else: error_expected(tok, 'name') self.register_def(self.name, value) self.state = 3 elif self.state == 3: if tok.exact_type == token.NEWLINE: self.pop_state() else: error_expected(tok, 'newline') class StateUnion(State): def initialize(self) -> None: self.name = '' self.state = 0 self.types = set() # type: Set[str] def handle(self, tok): value = get_token_text(tok) if self.state == 0: if tok.exact_type == token.NAME: self.name = value self.state = 1 else: error_unknown_token(tok) elif self.state == 1: if tok.exact_type == token.EQUAL: self.state = 2 else: error_expected(tok, '=') elif self.state == 2: if tok.type != token.NAME: error_expected(tok, 'name') self.types.add(value) self.state = 3 elif self.state == 3: if tok.exact_type == token.VBAR: self.state = 2 elif tok.exact_type == token.NEWLINE: self.pop_state() self.register_def(self.name, list(self.types)) else: error_expected(tok, '| or newline') class StateTuple(State): def initialize(self) -> None: self.types = [] # type: List[str] def handle(self, tok): value = get_token_text(tok) if tok.exact_type == token.COMMA: pass elif tok.exact_type == token.RPAR: self.pop_state('(' + ', '.join(self.types) + ')') elif tok.type == token.NAME: self.types.append(value) class StateList(State): def initialize(self) -> None: self.type = '' def handle(self, tok): value = get_token_text(tok) if tok.exact_type == token.COMMA: pass elif tok.exact_type == token.RSQB and self.type: self.pop_state(self.type) elif tok.type == token.NAME and not self.type: self.type = '[' + value + ']' def resolve_c_type(typename) -> Optional[str]: return { 'u8': 'uint8_t', 'i8': 'int8_t', 'u16': 'uint16_t', 'i16': 'int16_t', 'u32': 'uint32_t', 'i32': 'int32_t', 'u64': 'uint64_t', 'i64': 'int64_t', 'f32': 'float', 'f64': 'double', 'bin': 'char*', 'str': 'char*', 'bool': 'bool' }.get(typename, None) def serialize_type(type: str, defs): if type in ('i8', 'i16', 'i32', 'i64'): return 'cmp_write_integer();' elif type in ('u8', 'u16', 'u32', 'u64'): return 'cmp_write_uinteger();' elif type in ('f32', 'f64'): return 'cmp_write_decimal();' elif type == 'bool': return 'cmp_write_bool();' elif type == 'str': return 'cmp_write_str();' elif type == 'bin': return 'cmp_write_bin();' assert 'Unknown type ' + type args = defs[type][0] return_type = defs[type][1] print(args, return_type) return ''' ''' def render_c_header(name: str, defs) -> None: functions = {} # type: Dict[str, List[Any]] print(f'#ifndef __{name.upper()}_H__\n#define __{name.upper()}_H__') print('#include <stdint.h>') print('''#ifdef __cplusplus extern "C" { #endif''') for orig_key, value in defs.items(): key = '{}_{}'.format(name, orig_key) if isinstance(value, str): print('#define {} "{}"'.format(key, value)) elif isinstance(value, numbers.Number): print('#define {} {}'.format(key, value)) elif isinstance(value, StateStruct.Container): print('struct {} {{'.format(key)) for fieldname, fieldvalue in value.items(): typename = resolve_c_type(fieldvalue) print(' {} {};'.format(typename, fieldname)) print('};\n') elif isinstance(value, StateEnum.Container): print('enum {} {{'.format(key)) for fieldname, fieldvalue in value.items(): print(' {}_{} = {},'.format(key, fieldname, fieldvalue)) print('};\n') elif isinstance(value, StateFunction.Container): functions[orig_key] = value elif isinstance(value, StateSignal.Container): pass # print('signal', key, value) print('typedef enum {') for function_name in functions.keys(): print(f' {name.upper()}_METHOD_{function_name.upper()},') print(f'}} {name}_methodid_t;\n') for function_name, function_prototype in functions.items(): print('typedef struct {') for arg in function_prototype[0]: print(f' {resolve_c_type(arg[1])} {arg[0]};') print(f'}} {name}_{function_name}_args_t;\n') print('typedef struct {') print(f' uint64_t messageid;') print(f' {name}_methodid_t methodid;') print(f' union {{') for function_name in functions.keys(): print(f' {name}_{function_name}_args_t args_{function_name};') print(' };') print(f'}} {name}_method_t;') fett.Template.FILTERS['serializeType'] = lambda x: serialize_type(x, defs) template = fett.Template(''' int {{ name }}_read_message(int, {{ name }}_method_t*); int {{ name }}_write_message({{ name }}_method_t*, int); #ifdef {{ name upperCase }}_IMPLEMENTATION #include <cmp/cmp.h> int {{ name }}_read_message(int fd, {{ name }}_method_t* args) { cmp_ctx_t cmp; cmp_init(&cmp, fd, file_reader, file_writer); } int {{ name }}_write_message({{ name }}_method_t* args, int fd) { cmp_ctx_t cmp; cmp_init(&cmp, fd, file_reader, file_writer); if (!cmp_write_array(&cmp, 2)) { return 1; } if (!cmp_write_uinteger(&cmp, args->messageid)) { return 1; } if (!cmp_write_uinteger(&cmp, args->methodid)) { return 1; } switch (args->methodid) { {{ for method in methods }} case {{ name upperCase }}_METHOD_{{ method upperCase }}: {{ method serializeType }} break; {{ end }} } } #endif /* {{ name upperCase }}_IMPLEMENTATION */ #endif /* __{{ name upperCase }}_H__ */ ''') print(template.render({ 'name': name, 'methods': functions.keys() })) print('''#ifdef __cplusplus } /* extern "C" */ #endif''') def main(args): parser = argparse.ArgumentParser(description='Generate code implementing an RPC interface') parser.add_argument('input', metavar='INPUT', help='The input RPC definition file') parser.add_argument('--name', metavar='NAME') parser.add_argument('--verbose', '-v', action='count') args = parser.parse_args() if not args.name: try: args.name = os.path.basename(os.path.splitext(args.input)[0]) except IndexError: args.name = args.input if not re.match(r'^[a-z_]+$', args.name, re.I): raise ValueError('Invalid name: {}'.format(repr(args.name))) with open(args.input, 'rb') as f: lines = f.readlines() defs = collections.OrderedDict() stack = [] stack.append(StateRoot(defs, stack)) lines = iter(lines) for tok in tokenize.tokenize(lambda: next(lines)): if tok.type == tokenize.COMMENT: continue elif tok.type == tokenize.NL: continue if args.verbose: stderr(stack[-1].__class__.__name__, tok) stack[-1].handle(tok) render_c_header(args.name, defs) if __name__ == '__main__': main(sys.argv)