content stringlengths 5 1.05M |
|---|
from __future__ import division, absolute_import
__copyright__ = """
Copyright (C) 2017 Andreas Kloeckner
Copyright (C) 2018 Alexandru Fikl
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pytools import memoize_method
import logging
logger = logging.getLogger(__name__)
__doc__ = """
.. exception:: GmshError
.. autoclass:: ScriptSource
.. autoclass:: FileSource
.. autoclass:: ScriptWithFilesSource
.. autoclass:: GmshRunner
"""
class GmshError(RuntimeError):
pass
# {{{ tools
def _erase_dir(dir):
from os import listdir, unlink, rmdir
from os.path import join
for name in listdir(dir):
unlink(join(dir, name))
rmdir(dir)
class _TempDirManager(object):
def __init__(self):
from tempfile import mkdtemp
self.path = mkdtemp()
def sub(self, n):
from os.path import join
return join(self.path, n)
def clean_up(self):
_erase_dir(self.path)
def error_clean_up(self):
_erase_dir(self.path)
class ScriptSource(object):
"""
.. versionadded:: 2016.1
"""
def __init__(self, source, extension):
self.source = source
self.extension = extension
class LiteralSource(ScriptSource):
"""
.. versionadded:: 2014.1
"""
def __init__(self, source, extension):
super(LiteralSource, self).__init__(source, extension)
from warnings import warn
warn("LiteralSource is deprecated, use ScriptSource instead",
DeprecationWarning, stacklevel=2)
class FileSource(object):
"""
.. versionadded:: 2014.1
"""
def __init__(self, filename):
self.filename = filename
class ScriptWithFilesSource(object):
"""
.. versionadded:: 2016.1
.. attribute:: source
The script code to be fed to gmsh.
.. attribute:: filenames
The names of files to be copied to the temporary directory where
gmsh is run.
"""
def __init__(self, source, filenames, source_name="temp.geo"):
self.source = source
self.source_name = source_name
self.filenames = filenames
class GmshRunner(object):
def __init__(self, source, dimensions=None, order=None,
incomplete_elements=None, other_options=[],
extension="geo", gmsh_executable="gmsh",
output_file_name=None,
target_unit=None,
save_tmp_files_in=None):
if isinstance(source, str):
from warnings import warn
warn("passing a string as 'source' is deprecated--use "
"one of the *Source classes",
DeprecationWarning)
source = ScriptSource(source, extension)
if target_unit is None:
target_unit = "MM"
from warnings import warn
warn("Not specifying target_unit is deprecated. Set target_unit='MM' "
"to retain prior behavior.", DeprecationWarning, stacklevel=2)
if output_file_name is None:
output_file_name = "output.msh"
self.source = source
self.dimensions = dimensions
self.order = order
self.incomplete_elements = incomplete_elements
self.other_options = other_options
self.gmsh_executable = gmsh_executable
self.output_file_name = output_file_name
self.save_tmp_files_in = save_tmp_files_in
self.target_unit = target_unit.upper()
if self.dimensions not in [1, 2, 3, None]:
raise RuntimeError("dimensions must be one of 1,2,3 or None")
if self.target_unit not in ['M', 'MM']:
raise RuntimeError("units must be 'M' (meters) or 'MM' (millimeters)")
@property
@memoize_method
def version(self):
from distutils.version import LooseVersion
cmdline = [
self.gmsh_executable,
'-version'
]
from pytools.prefork import call_capture_output
retcode, stdout, stderr = call_capture_output(cmdline)
# stderr can contain irregular info
import re
version = re.search(r'[0-9]+.[0-9]+.[0-9]+', stderr.decode().strip()).group()
return LooseVersion(version)
def __enter__(self):
self.temp_dir_mgr = None
temp_dir_mgr = _TempDirManager()
try:
working_dir = temp_dir_mgr.path
from os.path import join, abspath, exists
if isinstance(self.source, ScriptSource):
source_file_name = join(
working_dir, "temp."+self.source.extension)
with open(source_file_name, "w") as source_file:
source_file.write(self.source.source)
elif isinstance(self.source, FileSource):
source_file_name = abspath(self.source.filename)
if not exists(source_file_name):
raise IOError("'%s' does not exist" % source_file_name)
elif isinstance(self.source, ScriptWithFilesSource):
source_file_name = join(
working_dir, self.source.source_name)
with open(source_file_name, "w") as source_file:
source_file.write(self.source.source)
from os.path import basename
from shutil import copyfile
for f in self.source.filenames:
copyfile(f, join(working_dir, basename(f)))
else:
raise RuntimeError("'source' type unrecognized")
output_file_name = join(working_dir, self.output_file_name)
cmdline = [
self.gmsh_executable,
"-o", self.output_file_name,
"-nopopup",
"-format", "msh2"]
# NOTE: handle unit incompatibility introduced in GMSH4
# https://gitlab.onelab.info/gmsh/gmsh/issues/397
if self.version < '4.0.0':
if self.target_unit == 'M':
cmdline.extend(["-string", "Geometry.OCCScaling=1000;"])
else:
cmdline.extend(["-string",
"Geometry.OCCTargetUnit='{}';".format(self.target_unit)])
if self.dimensions is not None:
cmdline.append("-%d" % self.dimensions)
if self.order is not None:
cmdline.extend(["-order", str(self.order)])
if self.incomplete_elements is not None:
cmdline.extend(["-string",
"Mesh.SecondOrderIncomplete = %d;"
% int(self.incomplete_elements)])
cmdline.extend(self.other_options)
cmdline.append(source_file_name)
if self.dimensions is None:
cmdline.append("-")
logger.info("invoking gmsh: '%s'" % " ".join(cmdline))
from pytools.prefork import call_capture_output
retcode, stdout, stderr = call_capture_output(
cmdline, working_dir)
logger.info("return from gmsh")
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
import re
error_match = re.match(r"([0-9]+)\s+error", stdout)
warning_match = re.match(r"([0-9]+)\s+warning", stdout)
if error_match is not None or warning_match is not None:
# if we have one, we expect to see both
assert error_match is not None or warning_match is not None
num_warnings = int(warning_match.group(1))
num_errors = int(error_match.group(1))
else:
num_warnings = 0
num_errors = 0
if num_errors:
msg = "gmsh execution failed with message:\n\n"
if stdout:
msg += stdout+"\n"
msg += stderr+"\n"
raise GmshError(msg)
if num_warnings:
from warnings import warn
msg = "gmsh issued the following warning messages:\n\n"
if stdout:
msg += stdout+"\n"
msg += stderr+"\n"
warn(msg)
self.output_file = open(output_file_name, "r")
if self.save_tmp_files_in:
import shutil
import errno
try:
shutil.copytree(working_dir, self.save_tmp_files_in)
except FileExistsError:
import select
import sys
print("%s exists! Overwrite? (Y/N, will default to Y in 10sec)."
% self.save_tmp_files_in)
decision = None
while decision is None:
i, o, e = select.select([sys.stdin], [], [], 10)
if i:
resp = sys.stdin.readline().strip()
if resp == "N" or resp == "n":
logger.info("Not overwriting.")
decision = 0
elif resp == "Y" or resp == "y" or not i:
decision = 1
logger.info("Overwriting.")
else:
print("Illegal input %s, please retry." % i)
else:
decision = 1 # default
if decision == 0:
pass
else:
assert decision == 1
shutil.rmtree(self.save_tmp_files_in)
shutil.copytree(working_dir, self.save_tmp_files_in)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(output_file_name,
'/'.join([self.save_tmp_files_in,
self.output_file_name]))
else:
raise
self.temp_dir_mgr = temp_dir_mgr
return self
except Exception:
temp_dir_mgr.clean_up()
raise
def __exit__(self, type, value, traceback):
self.output_file.close()
if self.temp_dir_mgr is not None:
self.temp_dir_mgr.clean_up()
|
# from here: https://github.com/hamx0r/stocktwits
import logging as log
from requestors import ST_BASE_PARAMS, ST_BASE_URL
# Select which library to use for handling HTTP request. If running on Google App Engine, use `GAE`.
# Otherwise, use `Requests` which is based on the `requests` module.
from requestors import Requests as r
R = r()
__author__ = 'Jason Haury'
# Example list of exchanges to limit a watchlist to
EXCHANGES = ['NYSE', 'NASDAQ', 'NYSEMkt', 'NYSEArca']
# ---------------------------------------------------------------------
# Basic StockTwits interface
# ---------------------------------------------------------------------
def get_watched_stocks(wl_id):
""" Get list of symbols being watched by specified StockTwits watchlist
"""
wl, req_left, reset_time = R.get_json(ST_BASE_URL + 'watchlists/show/{}.json'.format(wl_id), params=ST_BASE_PARAMS)
wl = wl['watchlist']['symbols']
return [s['symbol'] for s in wl]
def get_stock_stream(symbol, params={}):
""" gets stream of messages for given symbol
"""
all_params = ST_BASE_PARAMS.copy()
for k, v in params.items():
all_params[k] = v
return R.get_json(ST_BASE_URL + 'streams/symbol/{}.json'.format(symbol), params=all_params)
def get_message_stream(wl_id, params={}):
""" Gets up to 30 messages from Watchlist (wl_id) according to additional params
"""
all_params = ST_BASE_PARAMS.copy()
for k, v in params.items():
all_params[k] = v
return R.get_json(ST_BASE_URL + 'streams/watchlist/{}.json'.format(wl_id), params=all_params)
def add_to_watchlist(symbols, wl_id):
""" Adds list of symbols to our StockTwits watchlist. Returns list of new symbols added
"""
deadline = 30 * len(symbols)
symbols = ','.join(symbols) # must be a csv list
params = ST_BASE_PARAMS.copy()
params['symbols'] = symbols
resp, req_left, reset_time = R.post_json(ST_BASE_URL + 'watchlists/{}/symbols/create.json'.format(wl_id), params=params, deadline=deadline)
if resp['response']['status'] == 200:
return [s['symbol'] for s in resp['symbols']]
else:
return []
def delete_from_watchlist(symbol, wl_id):
""" removes a single "symbols" (str) from watchlist. Returns True on success, False on failure
"""
params = ST_BASE_PARAMS.copy()
params['symbols'] = symbol
resp, req_left, reset_time = R.post_json(ST_BASE_URL + 'watchlists/{}/symbols/destroy.json'.format(wl_id), params=params)
if resp['response']['status'] == 200:
return True
else:
return False
def get_trending_stocks():
""" returns list of trending stock symbols,
used to ensure each symbol is part of a NYSE or NASDAQ but doesn't seem to be available anymore
returns a lot more than visible on the site
seems to return in order of trending most
"""
trending = None
while trending is None:
trending, req_left, reset_time = R.get_json(ST_BASE_URL + 'trending/symbols.json', params=ST_BASE_PARAMS)
if trending is None:
print('trending stocks returned None; sleeping 30s...')
time.sleep(30)
if trending is False:
return None
trending = trending['symbols']
# exchange does not seem to be in there anymore
# symbols = [s['symbol'] for s in trending if s['exchange'] in EXCHANGES]
symbols = [s['symbol'] for s in trending]
return symbols
def clean_watchlist(wl_id):
""" Deletes stocks to follow if they aren't part of NASDAQ or NYSE
"""
wl, req_left, reset_time = R.get_json(ST_BASE_URL + 'watchlists/show/{}.json'.format(wl_id),
params=ST_BASE_PARAMS)
wl = ['watchlist']['symbols']
qty_deleted = 0
for sym in wl:
if sym['exchange'] not in EXCHANGES:
log.info("Removing {}".format(sym))
if delete_from_watchlist(sym['symbol'], wl_id=wl_id):
qty_deleted += 1
else:
log.error("Error deleting symbol from watchlist: {}".format(sym))
return qty_deleted
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from gstools import SRF, Gaussian
import os
import sys
import sklearn
from sklearn.preprocessing import MinMaxScaler as MinMaxScaler
from datetime import datetime
def scale(x, max_CRU, exp):
out_range = ((0.95 * (1 - (max_CRU - 1)) * exp), (0.95 * max_CRU) * exp)
domain = np.min(x), np.max(x)
y = (x - (domain[1] + domain[0]) / 2) / (domain[1] - domain[0])
return y * (out_range[1] - out_range[0]) + (out_range[1] + out_range[0]) / 2
def generate_preview(mainUI):
var = 1.0
X = 50
Y = 50
Z = 1
len_long = float(mainUI.dSB_Longitudinal.value())
len_trans = float(mainUI.dSB_Transverse.value())
CRU_var = 1.0
MeanExp = 1.0
max_CRU = 1.5
min_CRU = 0.5
x = range(X)
y = range(Y)
model = Gaussian(dim = 2, var = var, len_scale = (len_long, len_trans))
srf = SRF(model)
field = srf((x, y), mesh_type = 'structured')
map_total = np.sum(field)
if map_total != X*Y*MeanExp:
field = field * ((X*Y*MeanExp) / map_total)
field = scale(field, max_CRU, MeanExp)
field = field + (MeanExp - np.mean(field))
return field
def save_preview_to_file(field):
X = 50
Y = 50
with open("SRFs/PreviewSRF.vtk", "w") as vtkfile:
with open("SRFs/PreviewSRF.dat", "w") as datfile:
vtkfile.write("# vtk DataFile Version 3.0\n"
+ "vtk output\n"
+ "ASCII\n"
+ "DATASET STRUCTURED_POINTS\n"
+ "DIMENSIONS 50 50 1\n"
+ "SPACING 1 1 1\n"
+ "ORIGIN 0 0 0\n"
+ "POINT_DATA 2500\n"
+ "SCALARS Image float 1\n"
+ "LOOKUP_TABLE default\n")
for j in range(Y):
for i in range(X):
vtkfile.write(str(field[i][j]) + " ")
datfile.write(str(field[i][j]) + " ")
vtkfile.write("\n")
datfile.write("\n")
vtkfile.close()
datfile.close()
def produce_preview_graphic(field):
contour_data = (field - np.amin(field)) / (np.amax(field) - np.amin(field))
fig1 = plt.Figure(facecolor = 'white', figsize = (10, 10), dpi = 100)
lY = len(field)
lX = len(field[0])
x = np.linspace(0, lX, lX)
y = np.linspace(0, lY, lY)
X, Y = np.meshgrid(x, y)
ax1 = fig1.add_subplot(1, 1, 1)
V = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
D = ax1.contourf(X, Y, contour_data, V, alpha = 1, cmap = "coolwarm", vmin = 0, vmax = 1.0)
C = ax1.contour(X, Y, contour_data, V, colors = 'black', linewidths = .5, vmin = 0, vmax = 1.0)
ax1.set_xticklabels('', visible = False)
ax1.set_yticklabels('', visible = False)
plt.Figure.subplots_adjust(fig1, left = 0.0, bottom = 0.0, right = 1.0, top = 1.0, wspace = 0.0, hspace = 0.0)
fig_name = 'SRFs/PreviewSRFContour.png'
plt.Figure.savefig(fig1, fig_name)
def produce_preview_graphic_vario(field):
contour_data = (field - np.amin(field)) / (np.amax(field) - np.amin(field))
fig1 = plt.Figure(facecolor = 'white', figsize = (10, 10), dpi = 100)
lY = len(field)
lX = len(field[0])
x = np.linspace(0, lX, lX)
y = np.linspace(0, lY, lY)
X, Y = np.meshgrid(x, y)
ax1 = fig1.add_subplot(1, 1, 1)
V = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
D = ax1.contourf(X, Y, contour_data, V, alpha = 1, cmap = "coolwarm", vmin = 0, vmax = 1.0)
C = ax1.contour(X, Y, contour_data, V, colors = 'black', linewidths = .5, vmin = 0, vmax = 1.0)
ax1.set_xticklabels('', visible = False)
ax1.set_yticklabels('', visible = False)
plt.Figure.subplots_adjust(fig1, left = 0.0, bottom = 0.0, right = 1.0, top = 1.0, wspace = 0.0, hspace = 0.0)
fig_name = 'SRFs/PreviewVarioContour.png'
plt.Figure.savefig(fig1, fig_name)
def create_SRFs(mainUI):
mainUI.progressBarSRF.setValue(0)
name = str(mainUI.EntrySRFName.text())
var = 1.0
X = int(mainUI.SB_SRFDimX.value())
Y = int(mainUI.SB_SRFDimY.value())
Z = int(mainUI.SB_SRFDimZ.value())
num = int(mainUI.SB_SRFNum.value())
CRU_var = float(mainUI.SB_NeighbourVar.value())
MeanExp = float(mainUI.SB_MeanExp.value())
len_long = float(mainUI.dSB_Longitudinal.value())
len_trans = float(mainUI.dSB_Transverse.value())
progress_full = (num * 3) + 2
max_CRU = MeanExp + CRU_var
min_CRU = MeanExp - CRU_var
x = range(X)
y = range(Y)
z = range(Z)
print("Name: " + str(name) + "\n"
+ "Variation: " + str(var) + "\n"
+ "Lengthscale: " + str(len_long) + ", " + str(len_trans) + "\n"
+ "Total Expression: " + str(MeanExp) + "\n"
+ "CRU Expression Max: " + str(max_CRU) + "\n"
+ "CRU Expression Min: " + str(min_CRU) + "\n"
+ "Dimensions : X " + str(X) + " Y " + str(Y) + " Z " + str(Z))
count = 0
progress = 1
mainUI.progressBarSRF.setValue(np.rint((progress/progress_full) * 100))
while count < num:
print("Generating SRF {}".format(count + 1))
SRF_check = 0
while SRF_check == 0:
model = Gaussian(dim = 3, var = var, len_scale = (len_long, len_trans))
srf = SRF(model)
field = srf((x, y, z), mesh_type = 'structured')
map_total = np.sum(field)
if map_total != X*Y*Z*MeanExp:
field = field * ((X*Y*Z*MeanExp) / map_total)
field = scale(field, max_CRU, MeanExp)
field = field + (MeanExp - np.mean(field))
if np.mean(field) == MeanExp:
if np.max(field) < (max_CRU * MeanExp):
if np.min(field) > (min_CRU * MeanExp):
SRF_check = 1
progress += 1
mainUI.progressBarSRF.setValue(np.rint((progress/progress_full) * 100))
count += 1
output_name = "SRFs/" + str(name) + "_" + str(count) + "_"
output_SRF(field, output_name, X, Y, Z)
progress += 1
mainUI.progressBarSRF.setValue(np.rint((progress/progress_full) * 100))
mainUI.outputSRF = "SRFs/" + str(name) + "_1_VTK.vtk"
print("SRF Generation Completed")
mainUI.progressBarSRF.setValue(100)
def output_SRF(field, name, X, Y, Z):
with open(name + "VTK.vtk", "w") as vtkfile:
with open(name + "DAT.dat", "w") as datfile:
vtkfile.write("# vtk DataFile Version 3.0\n"
+ "vtk output\n"
+ "ASCII\n"
+ "DATASET STRUCTURED_POINTS\n"
+ "DIMENSIONS {} {} {}\n".format(X, Y, Z)
+ "SPACING 1 1 1\n"
+ "ORIGIN 0 0 0\n"
+ "POINT_DATA {}\n".format(X*Y*Z)
+ "SCALARS Image float 1\n"
+ "LOOKUP_TABLE default\n")
for k in range(Z):
for j in range(Y):
for i in range(X):
vtkfile.write(str(field[i][j][k]) + " ")
datfile.write(str(field[i][j][k]) + " ")
vtkfile.write("\n")
datfile.write("\n")
vtkfile.write("\n")
datfile.write("\n")
vtkfile.close()
datfile.close()
|
import random
from django.test import RequestFactory, TestCase
from model_bakery import baker
from social_core.backends.google import GoogleOAuth2
from social_core.storage import BaseStorage
from social_core.tests.strategy import TestStrategy
from .. import social_pipeline
class PipelineTests(TestCase):
def setUp(self):
random.seed(1)
self.user = baker.make("User")
storage = BaseStorage()
storage.partial = baker.make(
"Partial",
token="foo",
kwargs={"user": self.user.id},
)
self.strategy = TestStrategy(storage=storage)
factory = RequestFactory()
self.request = factory.get("/")
self.request.user = self.user
self.request.session = self.client.session
self.backend = GoogleOAuth2()
def test_pipeline(self):
"""Complete pipeline if no device is set"""
ret = social_pipeline.two_factor_auth(
self.strategy,
backend=self.backend,
details="details",
pipeline_index=11,
user=self.user,
request=self.request,
)
self.assertEqual(ret, "details")
def test_pipeline_with_device_tfa_completed(self):
"""If two factor authentication is completed, don't redirect even if device is set"""
self.user.staticdevice_set.create(name="default")
self.request.session["tfa_completed"] = True
ret = social_pipeline.two_factor_auth(
self.strategy,
backend=self.backend,
details="details",
pipeline_index=11,
user=self.user,
request=self.request,
)
self.assertEqual(ret, "details")
def test_pipeline_with_device(self):
"""Redirect to 2 factor authentication"""
self.user.staticdevice_set.create(name="default")
ret = social_pipeline.two_factor_auth(
self.strategy,
backend=self.backend,
details="details",
pipeline_index=11,
user=self.user,
request=self.request,
)
self.assertIn("/two-factor-social-auth/?partial_token=", ret.url)
|
# Copyright (C) 2002-2007 Python Software Foundation
# Contact: email-sig@python.org
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.builtins import int
__all__ = ["mktime_tz", "parsedate", "parsedate_tz", "quote"]
import time, calendar
SPACE = " "
EMPTYSTRING = ""
COMMASPACE = ", "
# Parse a date field
_monthnames = [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
"january",
"february",
"march",
"april",
"may",
"june",
"july",
"august",
"september",
"october",
"november",
"december",
]
_daynames = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {
"UT": 0,
"UTC": 0,
"GMT": 0,
"Z": 0,
"AST": -400,
"ADT": -300, # Atlantic (used in Canada)
"EST": -500,
"EDT": -400, # Eastern
"CST": -600,
"CDT": -500, # Central
"MST": -700,
"MDT": -600, # Mountain
"PST": -800,
"PDT": -700, # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
res = _parsedate_tz(data)
if not res:
return
if res[9] is None:
res[9] = 0
return tuple(res)
def _parsedate_tz(data):
"""Convert date to extended time tuple.
The last (additional) element is the time zone offset in seconds, except if
the timezone was specified as -0000. In that case the last element is
None. This indicates a UTC timestamp that explicitly declaims knowledge of
the source timezone, as opposed to a +0000 timestamp that indicates the
source timezone really was UTC.
"""
if not data:
return
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(",") or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(",")
if i >= 0:
data[0] = data[0][i + 1 :]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split("-")
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find("+")
if i == -1:
i = s.find("-")
if i > 0:
data[3:] = [s[:i], s[i:]]
else:
data.append("") # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ",":
dd = dd[:-1]
i = yy.find(":")
if i > 0:
yy, tm = tm, yy
if yy[-1] == ",":
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ",":
tm = tm[:-1]
tm = tm.split(":")
if len(tm) == 2:
[thh, tmm] = tm
tss = "0"
elif len(tm) == 3:
[thh, tmm, tss] = tm
elif len(tm) == 1 and "." in tm[0]:
# Some non-compliant MUAs use '.' to separate time elements.
tm = tm[0].split(".")
if len(tm) == 2:
[thh, tmm] = tm
tss = 0
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
# Check for a yy specified in two-digit format, then convert it to the
# appropriate four-digit format, according to the POSIX standard. RFC 822
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
# mandates a 4-digit yy. For more information, see the documentation for
# the time module.
if yy < 100:
# The year is between 1969 and 1999 (inclusive).
if yy > 68:
yy += 1900
# The year is between 2000 and 2068 (inclusive).
else:
yy += 2000
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
if tzoffset == 0 and tz.startswith("-"):
tzoffset = None
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ((tzoffset // 100) * 3600 + (tzoffset % 100) * 60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset]
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = calendar.timegm(data)
return t - data[9]
def quote(str):
"""Prepare string to be used in a quoted string.
Turns backslash and double quote characters into quoted pairs. These
are the only characters that need to be quoted inside a quoted string.
Does not add the surrounding double quotes.
"""
return str.replace("\\", "\\\\").replace('"', '\\"')
class AddrlistClass(object):
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use email.utils.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;."[]'
self.pos = 0
self.LWS = " \t"
self.CR = "\r\n"
self.FWS = self.LWS + self.CR
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace(".", "")
self.field = field
self.commentlist = []
def gotonext(self):
"""Skip white space and extract comments."""
wslist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + "\n\r":
if self.field[self.pos] not in "\n\r":
wslist.append(self.field[self.pos])
self.pos += 1
elif self.field[self.pos] == "(":
self.commentlist.append(self.getcomment())
else:
break
return EMPTYSTRING.join(wslist)
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(("", ""))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in ".@":
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ":":
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ";":
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == "<":
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [
(
SPACE.join(plist) + " (" + " ".join(self.commentlist) + ")",
routeaddr,
)
]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ",":
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != "<":
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ""
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == ">":
self.pos += 1
break
elif self.field[self.pos] == "@":
self.pos += 1
expectroute = True
elif self.field[self.pos] == ":":
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
preserve_ws = True
if self.field[self.pos] == ".":
if aslist and not aslist[-1].strip():
aslist.pop()
aslist.append(".")
self.pos += 1
preserve_ws = False
elif self.field[self.pos] == '"':
aslist.append('"%s"' % quote(self.getquote()))
elif self.field[self.pos] in self.atomends:
if aslist and not aslist[-1].strip():
aslist.pop()
break
else:
aslist.append(self.getatom())
ws = self.gotonext()
if preserve_ws and ws:
aslist.append(ws)
if self.pos >= len(self.field) or self.field[self.pos] != "@":
return EMPTYSTRING.join(aslist)
aslist.append("@")
self.pos += 1
self.gotonext()
return EMPTYSTRING.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == "(":
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == "[":
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == ".":
self.pos += 1
sdlist.append(".")
elif self.field[self.pos] in self.atomends:
break
else:
sdlist.append(self.getatom())
return EMPTYSTRING.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments=True):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ""
slist = [""]
quote = False
self.pos += 1
while self.pos < len(self.field):
if quote:
slist.append(self.field[self.pos])
quote = False
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == "(":
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == "\\":
quote = True
else:
slist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', False)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited("(", ")\r", True)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return "[%s]" % self.getdelimited("[", "]\r", False)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = [""]
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.FWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == "(":
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
|
import datetime
import functools
import itertools
import logging
import pandas as pd
from . import config
logger = logging.getLogger('lacework_sdk.jupyter.utils')
def dataframe_decorator(function):
"""
A decorator used to convert Lacework JSON API output into a dataframe.
"""
@functools.wraps(function)
def get_output(*args, **kwargs):
data = function(*args, **kwargs)
if isinstance(data, dict):
df = pd.DataFrame(data.get('data', []))
if 'SEVERITY' in df:
df['SEVERITY'] = df.SEVERITY.apply(
lambda x: config.SEVERITY_DICT.get(x, x))
return df
return data
return get_output
def flatten_json_output( # noqa: C901
json_data, pre_key='', lists_to_rows=False):
"""
Flatten and yield dict objects from a Lacework JSON structure.
:param dict json_data: A JSON dict object.
:param str pre_key: Optional string with the key path into the JSON object.
:param lists_to_rows bool: Determines whether values in lists get expanded
into column_1, column_2, ..., column_2 or if it gets expanded
into multiple rows (defaults to column_N, set to True to expand
to rows).
:yields: A dict with all elements of the JSON structure flattened.
"""
flattened_data = {}
data_to_list = {}
for key, value in json_data.items():
if pre_key:
use_key = f'{pre_key}.{key}'
else:
use_key = key
if isinstance(value, dict):
new_dicts = flatten_json_output(
value, pre_key=use_key, lists_to_rows=lists_to_rows)
for new_dict in new_dicts:
flattened_data.update(new_dict)
elif isinstance(value, list):
count = 1
for list_value in value:
if lists_to_rows:
list_key = use_key
else:
list_key = f'{use_key}_{count}'
if isinstance(list_value, dict):
new_dicts = flatten_json_output(
list_value, pre_key=list_key,
lists_to_rows=lists_to_rows)
for new_dict in new_dicts:
if lists_to_rows:
data_to_list.setdefault(list_key, [])
data_to_list[list_key].append(new_dict)
else:
flattened_data.update(new_dict)
else:
flattened_data[list_key] = list_value
count += 1
else:
flattened_data[use_key] = value
if lists_to_rows:
if data_to_list:
keys, values = zip(*data_to_list.items())
expanded_dicts = [
dict(zip(keys, v)) for v in itertools.product(*values)]
for expanded_dict in expanded_dicts:
new_dict = flattened_data.copy()
for dict_value in expanded_dict.values():
new_dict.update(dict_value)
yield new_dict
else:
yield flattened_data
else:
yield flattened_data
def flatten_data_frame(data_frame, lists_to_rows=False):
"""
Flatten a DataFrame that contains nested dicts in columns.
Be careful using this function on a larger data frames since this
is a slow flattening process.
:param DataFrame DataFrame: The data frame to flatten.
:param lists_to_rows bool: Determines whether values in lists get expanded
into column_1, column_2, ..., column_2 or if it gets expanded
into multiple rows (defaults to column_N, set to True to expand
to rows).
:return: A DataFrame that is flattened.
"""
rows = []
for _, row in data_frame.iterrows():
new_rows = flatten_json_output(
row.to_dict(), lists_to_rows=lists_to_rows)
rows.extend(list(new_rows))
return pd.DataFrame(rows)
def parse_date_offset(offset_string): # noqa: C901
"""
Parse date offset string and return a start and end time.
:param str offset_string: The offset string describing the time period.
:raises ValueError: If not able to convert the string to dates.
:return: A tuple with start and end time as ISO 8601 formatted strings.
"""
string = offset_string.lower()
if not string.startswith('last'):
raise ValueError('Offset string needs to start with LAST to be valid')
end_time_object = datetime.datetime.utcnow()
end_time = end_time_object.isoformat()
items = string.split()
if len(items) != 3:
raise ValueError(
'Offset string needs to be three words, '
'"LAST X SECONDS/MINUTES/HOURS/DAYS"')
try:
quantity = int(items[1], 10)
except ValueError:
raise ValueError(
'Offset string needs to have a valid integer as '
'the second word.')
unit = items[2]
time_delta = None
if unit.startswith('minute'):
time_delta = datetime.timedelta(minutes=quantity)
elif unit.startswith('hour'):
time_delta = datetime.timedelta(hours=quantity)
elif unit.startswith('day'):
time_delta = datetime.timedelta(days=quantity)
elif unit.startswith('week'):
time_delta = datetime.timedelta(weeks=quantity)
elif unit.startswith('second'):
time_delta = datetime.timedelta(seconds=quantity)
if not time_delta:
raise ValueError('Unable to determine the time delta')
start_time_object = end_time_object - time_delta
start_time = start_time_object.isoformat()
return f'{start_time}Z', f'{end_time}Z'
|
from picamera import PiCamera
from time import sleep
camera=PiCamera()
camera.start_preview()
sleep(2)
camera.capture('/home/pi/Desktop/test.jpg')
camera.stop_preview() |
import os
import playsound
import speech_recognition as sr
from gtts import gTTS
from datetime import datetime
def speak(text, play_aloud=True):
tts = gTTS(text=text, lang='en')
filename = 'voice.mp3'
os.remove(filename) # removes file if it already exists from previous execution
tts.save(filename)
if play_aloud:
playsound.playsound(filename)
def get_audio(print_words=False):
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source=source)
said = ""
try:
said = r.recognize_google(audio_data=audio) # uses Google speech recognition
if print_words:
print(said)
except Exception as e:
pass
# print("Exception: " + str(e))
return said
class VoiceCommands:
def __init__(self):
self.keywords = "okay robot"
self.passive_listen = ""
self.active_listen = ""
def start_passive_listen(self):
self.passive_listen = get_audio()
print("Listening for wake commands...")
def heard_keyword():
if self.passive_listen.count(self.keywords) > 0:
speak("Yes?")
self.passive_listen = ""
print("Actively listening...")
self.active_listen = get_audio(print_words=True)
return True
return heard_keyword()
def stop_stream(self):
pass
def predict_command(self):
# take speech that it actively hears and predicts which command is suggested
listen = self.active_listen
# print the command robot thinks is being suggested. Maybe add a voice command confirm clause?
# if listen == "can you tell me the time":
# c_time = datetime.now().strftime("%H %M %S").split()
# hours = c_time[0]
# mins = c_time[1]
# seconds = c_time[2]
# speak(f"The current time is {hours} hours, {mins} minutes, and {seconds} seconds.", [hours, mins, seconds])
# pass active_listen tokenizer/whatever and then into model
if __name__ == '__main__':
vc = VoiceCommands()
vc.start_passive_listen()
while True:
if vc.start_passive_listen():
vc.predict_command()
|
from __future__ import unicode_literals, division, absolute_import
import logging
import re
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils.requests import Session, TimedLimiter
from flexget.utils.soup import get_soup
log = logging.getLogger('newpct')
requests = Session()
requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})
requests.add_domain_limiter(TimedLimiter('imdb.com', '2 seconds'))
class UrlRewriteNewPCT(object):
"""NewPCT urlrewriter."""
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
rewritable_regex = '^http:\/\/(www.)?newpct1?.com\/.*'
return re.match(rewritable_regex, url) and not url.startswith('http://www.newpct.com/descargar/')
# urlrewriter API
def url_rewrite(self, task, entry):
entry['url'] = self.parse_download_page(entry['url'])
@plugin.internet(log)
def parse_download_page(self, url):
page = requests.get(url)
try:
soup = get_soup(page.text)
except Exception as e:
raise UrlRewritingError(e)
torrent_id_prog = re.compile("'(?:torrentID|id)'\s*:\s*'(\d+)'")
torrent_ids = soup.findAll(text=torrent_id_prog)
if len(torrent_ids) == 0:
raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)
torrent_id = torrent_id_prog.search(torrent_ids[0]).group(1)
return 'http://www.newpct.com/descargar/torrent/%s/dummy.html' % torrent_id
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteNewPCT, 'newpct', groups=['urlrewriter'], api_ver=2)
|
import os
fp = open("db.sqlite3",'w')
fp.close()
print(os.popen('python manage.py migrate').read()) |
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.utils.vis_utils import plot_model
from keras.models import Sequential
from keras.layers import LSTM, Dense, Embedding, RepeatVector, TimeDistributed
from keras.callbacks import ModelCheckpoint
from prepare_data import load_clean_sentences
def create_tokenizer(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
def max_length(lines):
return max(len(line.split()) for line in lines)
dataset = load_clean_sentences('ger-mhd-both.pkl')
train = load_clean_sentences('ger-mhd-train.pkl')
test = load_clean_sentences('ger-mhd-test.pkl') |
"""
Trading-Technical-Indicators (tti) python library
File name: run_all_calculation_times.py
Run example code for all the trading technical indicators and return
calculation times. To be used as source for the optimization of indicators
calculation.
Use as:
python run_all_calculation_times.py
"""
import time
import inspect
import pandas as pd
import tti.indicators as ti
def calculate_ti(indicator_object, output_file=None, add_info=None, **kwargs):
"""
Creates the indicator ``indicator_object`` and measures the calculation
time.
Parameters:
indicator_object (tti.indicators object): The indicator object for
which the tti API should be executed.
output_file (file, default=None): File object where the calculation
time is redirected. If None, output goes to console.
add_info (str, default is None): Additional information for the running
calculation, is used for information purposes in the printing
functions.
**kwargs: Arguments to be passed to the indicator constructor.
"""
start_time = time.time()
indicator = indicator_object(**kwargs)
ti_name = str(type(indicator).__name__) + \
(' (' + add_info + ')' if add_info is not None else '')
print(ti_name, ',', round(time.time() - start_time,2), sep='',
file=output_file)
if __name__ == '__main__':
# Read data from csv file. Set the index to the correct column
# (dates column)
df = pd.read_csv('./data/sample_data.csv', parse_dates=True, index_col=0)
# File object to redirect the output of the execution
out_file = open('Calculation_times.csv', 'w')
print('indicator,calculation_time_in_seconds', file=out_file)
# Run all the indicators implemented in the tti.indicators package
for x in inspect.getmembers(ti):
if inspect.isclass(x[1]):
# Moving Average includes five indicators
if x[1] == ti.MovingAverage:
calculate_ti(indicator_object=x[1],
output_file=out_file, add_info='simple',
input_data=df,
ma_type='simple')
calculate_ti(indicator_object=x[1],
output_file=out_file, add_info='exponential',
input_data=df,
ma_type='exponential')
calculate_ti(indicator_object=x[1],
output_file=out_file, add_info='time_series',
input_data=df,
ma_type='time_series')
calculate_ti(indicator_object=x[1],
output_file=out_file, add_info='triangular',
input_data=df,
ma_type='triangular')
calculate_ti(indicator_object=x[1],
output_file=out_file, add_info='variable',
input_data=df,
ma_type='variable')
# Stochastic Oscillator includes two indicators
elif x[1] == ti.StochasticOscillator:
calculate_ti(indicator_object=x[1],
output_file=out_file,
add_info='fast',
input_data=df,
k_slowing_periods=1)
calculate_ti(indicator_object=x[1],
output_file=out_file, add_info='slow',
input_data=df,
k_slowing_periods=3)
else:
calculate_ti(indicator_object=x[1],
output_file=out_file, add_info=None,
input_data=df)
|
###############################################################################
# EnsembleRegressor
from nimbusml import Pipeline, FileDataStream
from nimbusml.datasets import get_dataset
from nimbusml.feature_extraction.categorical import OneHotVectorizer
from nimbusml.ensemble import EnsembleRegressor
from nimbusml.ensemble.feature_selector import RandomFeatureSelector
from nimbusml.ensemble.output_combiner import RegressorMedian
from nimbusml.ensemble.subset_selector import RandomPartitionSelector
from nimbusml.ensemble.sub_model_selector import RegressorBestDiverseSelector
# data input (as a FileDataStream)
path = get_dataset('infert').as_filepath()
data = FileDataStream.read_csv(path)
print(data.head())
# age case education induced parity ... row_num spontaneous ...
# 0 26 1 0-5yrs 1 6 ... 1 2 ...
# 1 42 1 0-5yrs 1 1 ... 2 0 ...
# 2 39 1 0-5yrs 2 6 ... 3 0 ...
# 3 34 1 0-5yrs 2 4 ... 4 0 ...
# 4 35 1 6-11yrs 1 3 ... 5 1 ...
# define the training pipeline using default sampling and ensembling parameters
pipeline_with_defaults = Pipeline([
OneHotVectorizer(columns={'edu': 'education'}),
EnsembleRegressor(feature=['induced', 'edu'], label='age', num_models=3)
])
# train, predict, and evaluate
metrics, predictions = pipeline_with_defaults.fit(data).test(data, output_scores=True)
# print predictions
print(predictions.head())
# Score
# 0 26.046741
# 1 26.046741
# 2 29.225840
# 3 29.225840
# 4 33.849384
# print evaluation metrics
print(metrics)
# L1(avg) L2(avg) RMS(avg) Loss-fn(avg) R Squared
# 0 4.69884 33.346123 5.77461 33.346124 -0.214011
# define the training pipeline with specific sampling and ensembling options
pipeline_with_options = Pipeline([
OneHotVectorizer(columns={'edu': 'education'}),
EnsembleRegressor(feature=['induced', 'edu'],
label='age',
num_models=3,
sampling_type = RandomPartitionSelector(
feature_selector=RandomFeatureSelector(
features_selction_proportion=0.7)),
sub_model_selector_type=RegressorBestDiverseSelector(),
output_combiner=RegressorMedian())
])
# train, predict, and evaluate
metrics, predictions = pipeline_with_options.fit(data).test(data, output_scores=True)
# print predictions
print(predictions.head())
# Score
# 0 37.122200
# 1 37.122200
# 2 41.296204
# 3 41.296204
# 4 33.591423
# print evaluation metrics
# note that the converged loss function values are worse than with defaults as
# this is a small dataset that we partition into 3 chunks for each regressor,
# which decreases model quality
print(metrics)
# L1(avg) L2(avg) RMS(avg) Loss-fn(avg) R Squared
# 0 5.481676 44.924838 6.702599 44.924838 -0.63555
|
"""
【2】使用selenium+浏览器 登录网易163邮箱 : https://mail.163.com/
"""
from selenium import webdriver
class WangyiSpider:
def __init__(self):
# self.options = webdriver.ChromeOptions()
# self.options.add_argument('--headless')
self.driver = webdriver.Chrome(executable_path="/Users/aiden_zcf/PycharmProjects/Tmooc/chromedriver")
self.driver.get('https://mail.163.com/')
def crawl(self):
# 方法一 因为是第一个,所以可以直接find_element
frame = self.driver.find_element_by_tag_name('iframe')
# 方法二
frame = self.driver.find_element_by_xpath('//div[@id="loginDiv"]/iframe')
self.driver.switch_to.frame(frame)
input_name = self.driver.find_element_by_xpath('.//input[@data-placeholder="邮箱帐号或手机号码"]')
input_password = self.driver.find_element_by_xpath('.//input[@data-placeholder="输入密码"]')
input_name.send_keys('123@163.com')
input_password.send_keys('123456')
self.driver.quit()
if __name__ == '__main__':
spider = WangyiSpider()
spider.crawl()
|
from abc import ABC, abstractmethod
from typing import Dict, Tuple, Union, List
from pathlib import Path
from importlib import import_module
import ast
import logging
import discord
from movienightbot.db.controllers import ServerController
__ALL__ = ["KNOWN_ACTIONS", "unknown_default_action"]
logger = logging.getLogger("movienightbot")
async def unknown_default_action(msg: discord.message, command: str) -> None:
await msg.channel.send(
f"Unknown command {command} given, try reading the tutorial at `m!help` "
f"to see what commands are available!"
)
class BaseAction(ABC):
# action name is what the action will be called on discord
action_name = None
admin = False
guild_only = True
async def _check_proceed(self, msg: discord.message) -> bool:
if self.guild_only and msg.guild is None:
logging.debug(f"User {msg.author.name} trying non-DM action in a DM")
await msg.author.send("You can't do this command from a DM!")
return False
server_settings = ServerController().get_by_id(msg.guild.id)
if msg.channel.id != server_settings.channel:
logging.debug(
f"User {msg.author.name} using non-permitted channel {msg.channel.name} "
f"instead of {server_settings.channel}"
)
return False
if not msg.author.guild_permissions.administrator and (
self.admin
and server_settings.admin_role not in {r.name for r in msg.author.roles}
):
logging.debug(f"User {msg.author.name} does not have admin")
await msg.channel.send("Hey now, you're not an admin on this server!")
return False
return True
async def __call__(self, msg: discord.message) -> None:
error_message = (
"OOPSIE WOOPSIE!! UwU We made a fucky wucky!! A wittle fucko boingo! The code "
"monkeys at our headquarters are working VEWY HAWD to fix this!"
)
try:
if not await self._check_proceed(msg):
return
except Exception as e:
logger.error(e, exc_info=e)
await msg.channel.send(error_message)
guild = msg.guild.name if msg.guild is not None else "DM"
logger.info(f"Running action {self.action_name} on server {guild}")
try:
await self.action(msg)
except discord.Forbidden as e:
if e.code == 50007:
await msg.channel.send(f"I can't DM you {msg.author.name}!")
return
else:
logger.error(e, exc_info=e)
await msg.channel.send(error_message)
except Exception as e:
logger.error(e, exc_info=e)
await msg.channel.send(error_message)
@staticmethod
def get_message_data(
msg: discord.message, data_parts: int = 1
) -> Union[str, Tuple[str]]:
"""Gets and sanitizes the message data associated with the command
Parameters
----------
msg
The discord message object
data_parts
The number of pieces of information expected, space separated. Default 1.
For example, if the message text is "m!suggest Some Movie Name" and we set to 1,
this will return "Some Movie Name". Set to 2, it will return ("Some", "Movie Name")
Notes
-----
Will return an empty string if the data_parts is set to 1 but no data is given. Will return an empty tuple
if data_parts is > 1 and no data given.
"""
data = msg.content.strip().split(" ", data_parts)[1:]
# sanitize the input to only have one space in case multiple put in
data = tuple(" ".join(s.split()) for s in data)
if data_parts == 1:
return "" if not data else data[0]
return data
@property
@abstractmethod
def help_text(self) -> str:
return
@property
def help_options(self) -> List[str]:
return []
@abstractmethod
async def action(self, msg: discord.message) -> None:
return
def _get_actions() -> Dict[str, BaseAction]:
"""Loads all actions in the submodule to a dict
Returns
-------
dict of str
The actions, with class name as key and an instantiated class as value
Notes
-----
Any Action class must be a child of the BaseAction ABC to be added to this dict.
Done this way so you can create a new file in the actions submodule and it will auto-import and register.
"""
base_dir = Path(__file__).parent
actions = {}
for file in base_dir.iterdir():
if file.is_dir() or file.name.startswith("__") or not file.name.endswith(".py"):
continue
with file.open() as f:
parsed = ast.parse(f.read())
classes = [
node.name for node in ast.walk(parsed) if isinstance(node, ast.ClassDef)
]
rc = import_module(f"movienightbot.actions.{file.stem}")
for class_name in classes:
class_def = rc.__dict__[class_name]
if not issubclass(class_def, BaseAction):
continue
actions[class_def.action_name] = class_def()
return actions
KNOWN_ACTIONS = _get_actions()
|
import pkgutil, inspect, sys,os, importlib,json,enum,warnings,nbformat,re
from IPython.core.display import display, Markdown
from nbconvert.preprocessors import ExecutePreprocessor
import nbformat.sign
from pathlib import Path
from .core import *
from .nbdoc import *
__all__ = ['create_module_page', 'generate_all', 'update_module_page', 'update_all']
def get_empty_notebook():
"a default notbook with the minimum metadata"
#TODO: check python version and nbformat
return {'metadata': {'kernelspec': {'display_name': 'Python 3',
'language': 'python',
'name': 'python3'},
'language_info': {'codemirror_mode': {'name': 'ipython', 'version': 3},
'file_extension': '.py',
'mimetype': 'text/x-python',
'name': 'python',
'nbconvert_exporter': 'python',
'pygments_lexer': 'ipython3',
'version': '3.6.6'}},
'nbformat': 4,
'nbformat_minor': 2}
def get_md_cell(source, metadata=None):
"a markdown cell containing the source text"
return {'cell_type': 'markdown',
'metadata': {} if metadata is None else metadata,
'source': source}
def get_empty_cell(ctype='markdown'):
"an empty cell of type ctype"
return {'cell_type': ctype, 'metadata': {}, 'source': []}
def get_code_cell(code, hidden=False):
"a code cell containing the code"
return {'cell_type' : 'code',
'execution_count': 0,
'metadata' : {'hide_input': hidden, 'trusted':True},
'source' : code,
'outputs': []}
def get_doc_cell(ft_name):
"a code cell with the command to show the doc of a given function"
code = f"show_doc({ft_name})"
return get_code_cell(code, True)
def is_enum(cls):
"True if cls is a enumerator class"
return cls == enum.Enum or cls == enum.EnumMeta
def get_inner_fts(elt):
"List the inner functions of a class"
fts = []
for ft_name in elt.__dict__.keys():
if ft_name[:2] == '__': continue
ft = getattr(elt, ft_name)
if inspect.isfunction(ft): fts.append(f'{elt.__name__}.{ft_name}')
if inspect.ismethod(ft): fts.append(f'{elt.__name__}.{ft_name}')
if inspect.isclass(ft): fts += [f'{elt.__name__}.{n}' for n in get_inner_fts(ft)]
return fts
def get_global_vars(mod):
"Returns globally assigned variables"
# https://stackoverflow.com/questions/8820276/docstring-for-variable/31764368#31764368
import ast,re
with open(mod.__file__, 'r') as f: fstr = f.read()
flines = fstr.splitlines()
d = {}
for node in ast.walk(ast.parse(fstr)):
if isinstance(node,ast.Assign) and hasattr(node.targets[0], 'id'):
key,lineno = node.targets[0].id,node.targets[0].lineno-1
codestr = flines[lineno]
if re.match(f"^{key}\s*=\s*.*", codestr): # only top level assignment
d[key] = f'`{codestr}` {get_source_link(mod, lineno)}'
return d
def get_source_link(mod, lineno) -> str:
"Returns link to line number in source code"
fpath = os.path.realpath(inspect.getfile(mod))
relpath = os.path.relpath(fpath, os.getcwd())
link = f"{relpath}#L{lineno}"
return f'<div style="text-align: right"><a href="{link}">[source]</a></div>'
def execute_nb(fname):
"Execute notebook `fname`"
# Any module used in the notebook that isn't inside must be in the same directory as this script
with open(fname) as f: nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name='python3')
ep.preprocess(nb, {})
with open(fname, 'wt') as f: nbformat.write(nb, f)
nbformat.sign.NotebookNotary().sign(nb)
def _symbol_skeleton(name): return [get_doc_cell(name), get_md_cell(f"`{name}`")]
def create_module_page(mod, dest_path, force=False):
"Creates the documentation notebook for module `mod_name` in path `dest_path`"
nb = get_empty_notebook()
mod_name = mod.__name__
strip_name = strip_fastai(mod_name)
init_cell = [get_md_cell(f'# {strip_name}'), get_md_cell('Type an introduction of the package here.')]
cells = [get_code_cell(f'from fastai.gen_doc.nbdoc import *\nfrom {mod_name} import * ', True)]
gvar_map = get_global_vars(mod)
if gvar_map: cells.append(get_md_cell('### Global Variable Definitions:'))
for name in get_exports(mod):
if name in gvar_map: cells.append(get_md_cell(gvar_map[name]))
for ft_name in get_ft_names(mod):
if not hasattr(mod, ft_name):
warnings.warn(f"Module {strip_name} doesn't have a function named {ft_name}.")
continue
cells += _symbol_skeleton(ft_name)
elt = getattr(mod, ft_name)
if inspect.isclass(elt) and not is_enum(elt.__class__):
in_ft_names = get_inner_fts(elt)
in_ft_names.sort(key = str.lower)
for name in in_ft_names:
cells += _symbol_skeleton(name)
nb['cells'] = init_cell + cells
doc_path = get_doc_path(mod, dest_path)
json.dump(nb, open(doc_path, 'w' if force else 'x'))
execute_nb(doc_path)
_default_exclude = ['.ipynb_checkpoints', '__pycache__', '__init__.py', 'imports']
def get_module_names(path_dir, exclude=None):
if exclude is None: exclude = _default_exclude
"Searches a given directory and returns all the modules contained inside"
files = path_dir.glob('*')
res = []
for f in files:
if f.is_dir() and f.name in exclude: continue # exclude directories
if any([f.name.endswith(ex) for ex in exclude]): continue # exclude extensions
if f.name[-3:] == '.py': res.append(f'{path_dir.name}.{f.name[:-3]}')
elif f.is_dir(): res += [f'{path_dir.name}.{name}' for name in get_module_names(f)]
return res
def generate_all(pkg_name, dest_path, exclude=None):
"Generate the documentation for all the modules in `pkg_name`"
if exclude is None: exclude = _default_exclude
mod_files = get_module_names(Path(pkg_name), exclude)
for mod_name in mod_files:
mod = import_mod(mod_name)
if mod is None: continue
create_module_page(mod, dest_path)
def read_nb(fname):
"Read a notebook and returns its corresponding json"
with open(fname,'r') as f: return nbformat.reads(f.read(), as_version=4)
def read_nb_content(cells, mod_name):
"Builds a dictionary containing the position of the cells giving the document for functions in a notebook"
doc_fns = {}
for i, cell in enumerate(cells):
if cell['cell_type'] == 'code':
match = re.match(r"(.*)show_doc\(([\w\.]*)", cell['source'])
if match is not None: doc_fns[match.groups()[1]] = i
return doc_fns
def read_nb_types(cells):
doc_fns = {}
for i, cell in enumerate(cells):
if cell['cell_type'] == 'markdown':
match = re.match(r"^(?:<code>|`)?(\w*)\s*=\s*", cell['source'])
if match is not None: doc_fns[match.group(1)] = i
return doc_fns
def link_markdown_cells(cells, mod):
for i, cell in enumerate(cells):
if cell['cell_type'] == 'markdown':
cell['source'] = link_docstring(mod, cell['source'])
def get_insert_idx(pos_dict, name):
"Return the position to insert a given function doc in a notebook"
keys,i = list(pos_dict.keys()),0
while i < len(keys) and str.lower(keys[i]) < str.lower(name): i+=1
if i == len(keys): return -1
else: return pos_dict[keys[i]]
def update_pos(pos_dict, start_key, nbr=2):
"Updates the position dictionary by moving all positions after start_ket by nbr"
for key,idx in pos_dict.items():
if str.lower(key) >= str.lower(start_key): pos_dict[key] += nbr
return pos_dict
def insert_cells(cells, pos_dict, ft_name):
"Insert the function doc cells of a function in the list of cells at their correct position and updates the position dictionary"
idx = get_insert_idx(pos_dict, ft_name)
if idx == -1: cells += [get_doc_cell(ft_name), get_empty_cell()]
else:
cells.insert(idx, get_doc_cell(ft_name))
cells.insert(idx+1, get_empty_cell())
pos_dict = update_pos(pos_dict, ft_name, 2)
return cells, pos_dict
def get_doc_path(mod, dest_path):
strip_name = strip_fastai(mod.__name__)
return os.path.join(dest_path,f'{strip_name}.ipynb')
def update_module_metadata(mod, dest_path='.', title=None, summary=None, keywords=None, overwrite=True):
"Creates jekyll metadata. Always overwrites existing"
if not (title or summary or keywords): return
doc_path = get_doc_path(mod, dest_path)
nb = read_nb(doc_path)
jm = {'title':title,'summary':summary,'keywords':keywords}
update_nb_metadata(nb, jm, overwrite)
json.dump(nb, open(doc_path,'w'))
def update_nb_metadata(nb, data, overwrite=True):
"Creates jekyll metadata. Always overwrites existing"
data = {k:v for (k,v) in data.items() if v is not None} # remove none values
if not data: return
if 'metadata' not in nb: nb['metadata'] = {}
if overwrite: nb['metadata']['jekyll'] = data
else: nb['metadata']['jekyll'] = nb['metadata'].get('jekyll', {}).update(data)
def update_module_page(mod, dest_path='.'):
"Updates the documentation notebook of a given module"
doc_path = get_doc_path(mod, dest_path)
strip_name = strip_fastai(mod.__name__)
nb = read_nb(doc_path)
update_nb_metadata(nb, {'title':strip_name, 'summary':inspect.getdoc(mod)})
cells = nb['cells']
link_markdown_cells(cells, mod)
type_dict = read_nb_types(cells)
gvar_map = get_global_vars(mod)
for name in get_exports(mod):
if name not in gvar_map: continue
code = gvar_map[name]
if name in type_dict: cells[type_dict[name]] = get_md_cell(code)
else: cells.append(get_md_cell(code))
pos_dict = read_nb_content(cells, strip_name)
for ft_name in get_ft_names(mod):
if not hasattr(mod, ft_name):
warnings.warn(f"Module {strip_name} doesn't have a function named {ft_name}.")
continue
if ft_name not in pos_dict.keys():
cells, pos_dict = insert_cells(cells, pos_dict, ft_name)
elt = getattr(mod, ft_name)
if inspect.isclass(elt) and not is_enum(elt.__class__):
in_ft_names = get_inner_fts(elt)
in_ft_names.sort(key = str.lower)
for name in in_ft_names:
if name not in pos_dict.keys():
cells, pos_dict = insert_cells(cells, pos_dict, name)
nb['cells'] = cells
json.dump(nb, open(doc_path,'w'))
#execute_nb(doc_path)
def update_all(pkg_name, dest_path='.', exclude=None, create_missing=True):
"Updates all the notebooks in `pkg_name`"
if exclude is None: exclude = _default_exclude
mod_files = get_module_names(Path(pkg_name), exclude)
for f in mod_files:
mod = import_mod(f)
if mod is None: continue
if os.path.exists(get_doc_path(mod, dest_path)):
update_module_page(mod, dest_path)
elif create_missing:
print(f'Creating module page of {f}')
create_module_page(mod, dest_path)
|
#!/usr/bin/env python3
# coding: utf-8
# In[1]:
from pyspark.sql.functions import array, col, explode, lit, struct, regexp_replace, to_date, desc
from pyspark.sql import DataFrame, SparkSession
from pyspark.context import SparkContext
# In[2]:
from config import datalake_raw_path, datalake_staged_path, staged_countries
# In[3]:
spark = SparkSession.builder.appName('covid_staged').getOrCreate()
# In[4]:
def melt(
df,
id_vars, value_vars,
var_name, value_name):
"""Convert :class:`DataFrame` from wide to long format."""
# Create array<struct<variable: str, value: ...>>
_vars_and_vals = array(*(
struct(lit(c).alias(var_name), col(c).alias(value_name))
for c in value_vars))
# Add to the DataFrame and explode
_tmp = df.withColumn("_vars_and_vals", explode(_vars_and_vals))
cols = id_vars + [
col("_vars_and_vals")[x].alias(x) for x in [var_name, value_name]]
return _tmp.select(*cols)
# In[5]:
def read_and_clean_df(table_name, values_column_name):
df = (spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(datalake_raw_path+"/202004140200/jhu_csse_covid_19_time_series_"+values_column_name+"_global__202004140200__202004140200.csv")
.withColumnRenamed("Country/Region", "country_region")
.withColumnRenamed("Lat", "latitude")
.withColumnRenamed("Long", "longitude")
.withColumnRenamed("Province/State", "province_state")
)
cols = df.schema.names
base_cols = cols[:4]
date_cols = cols[4:]
df = df.filter(col("country_region").isin(staged_countries))
df = melt(df, base_cols, date_cols,
"date_not_formatted", values_column_name)
return (df.withColumn("date",
to_date(regexp_replace("date_not_formatted", "_", "/"), "M/dd/yy"))
.drop("date_not_formatted")
.fillna({'province_state': 'n/a'})
)
# In[6]:
confirmed_df = read_and_clean_df("raw_confirmed", "confirmed")
deaths_df = read_and_clean_df("raw_deaths", "deaths")
recovered_df = read_and_clean_df("raw_recovered", "recovered")
# In[7]:
join_cols = ["province_state", "country_region",
"latitude", "longitude", "date"]
full_df = (
confirmed_df
.join(deaths_df, join_cols, "outer")
.join(recovered_df, join_cols, "outer")
)
# In[8]:
(full_df.repartition(4, col("country_region"))
.write
.partitionBy("country_region")
.mode("Overwrite")
.parquet(datalake_staged_path+"/full/"))
|
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import route_table_ids_from_querystring, filters_from_querystring, optional_from_querystring
class RouteTables(BaseResponse):
def associate_route_table(self):
route_table_id = self.querystring.get('RouteTableId')[0]
subnet_id = self.querystring.get('SubnetId')[0]
association_id = self.ec2_backend.associate_route_table(route_table_id, subnet_id)
template = self.response_template(ASSOCIATE_ROUTE_TABLE_RESPONSE)
return template.render(association_id=association_id)
def create_route(self):
route_table_id = self.querystring.get('RouteTableId')[0]
destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]
gateway_id = optional_from_querystring('GatewayId', self.querystring)
instance_id = optional_from_querystring('InstanceId', self.querystring)
interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring)
pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring)
self.ec2_backend.create_route(route_table_id, destination_cidr_block,
gateway_id=gateway_id,
instance_id=instance_id,
interface_id=interface_id,
vpc_peering_connection_id=pcx_id)
template = self.response_template(CREATE_ROUTE_RESPONSE)
return template.render()
def create_route_table(self):
vpc_id = self.querystring.get('VpcId')[0]
route_table = self.ec2_backend.create_route_table(vpc_id)
template = self.response_template(CREATE_ROUTE_TABLE_RESPONSE)
return template.render(route_table=route_table)
def delete_route(self):
route_table_id = self.querystring.get('RouteTableId')[0]
destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]
self.ec2_backend.delete_route(route_table_id, destination_cidr_block)
template = self.response_template(DELETE_ROUTE_RESPONSE)
return template.render()
def delete_route_table(self):
route_table_id = self.querystring.get('RouteTableId')[0]
self.ec2_backend.delete_route_table(route_table_id)
template = self.response_template(DELETE_ROUTE_TABLE_RESPONSE)
return template.render()
def describe_route_tables(self):
route_table_ids = route_table_ids_from_querystring(self.querystring)
filters = filters_from_querystring(self.querystring)
route_tables = self.ec2_backend.get_all_route_tables(route_table_ids, filters)
template = self.response_template(DESCRIBE_ROUTE_TABLES_RESPONSE)
return template.render(route_tables=route_tables)
def disassociate_route_table(self):
association_id = self.querystring.get('AssociationId')[0]
self.ec2_backend.disassociate_route_table(association_id)
template = self.response_template(DISASSOCIATE_ROUTE_TABLE_RESPONSE)
return template.render()
def replace_route(self):
route_table_id = self.querystring.get('RouteTableId')[0]
destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]
gateway_id = optional_from_querystring('GatewayId', self.querystring)
instance_id = optional_from_querystring('InstanceId', self.querystring)
interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring)
pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring)
self.ec2_backend.replace_route(route_table_id, destination_cidr_block,
gateway_id=gateway_id,
instance_id=instance_id,
interface_id=interface_id,
vpc_peering_connection_id=pcx_id)
template = self.response_template(REPLACE_ROUTE_RESPONSE)
return template.render()
def replace_route_table_association(self):
route_table_id = self.querystring.get('RouteTableId')[0]
association_id = self.querystring.get('AssociationId')[0]
new_association_id = self.ec2_backend.replace_route_table_association(association_id, route_table_id)
template = self.response_template(REPLACE_ROUTE_TABLE_ASSOCIATION_RESPONSE)
return template.render(association_id=new_association_id)
CREATE_ROUTE_RESPONSE = """
<CreateRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</CreateRouteResponse>
"""
REPLACE_ROUTE_RESPONSE = """
<ReplaceRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</ReplaceRouteResponse>
"""
CREATE_ROUTE_TABLE_RESPONSE = """
<CreateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<routeTable>
<routeTableId>{{ route_table.id }}</routeTableId>
<vpcId>{{ route_table.vpc_id }}</vpcId>
<routeSet>
{% for route in route_table.routes.values() %}
{% if route.local %}
<item>
<destinationCidrBlock>{{ route.destination_cidr_block }}</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
</item>
{% endif %}
{% endfor %}
</routeSet>
<associationSet/>
<tagSet>
{% for tag in route_table.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</routeTable>
</CreateRouteTableResponse>
"""
DESCRIBE_ROUTE_TABLES_RESPONSE = """
<DescribeRouteTablesResponse xmlns="http://ec2.amazonaws.com/doc/2013-08-15/">
<requestId>6f570b0b-9c18-4b07-bdec-73740dcf861a</requestId>
<routeTableSet>
{% for route_table in route_tables %}
<item>
<routeTableId>{{ route_table.id }}</routeTableId>
<vpcId>{{ route_table.vpc_id }}</vpcId>
<routeSet>
{% for route in route_table.routes.values() %}
<item>
<destinationCidrBlock>{{ route.destination_cidr_block }}</destinationCidrBlock>
{% if route.local %}
<gatewayId>local</gatewayId>
<origin>CreateRouteTable</origin>
<state>active</state>
{% endif %}
{% if route.gateway %}
<gatewayId>{{ route.gateway.id }}</gatewayId>
<origin>CreateRoute</origin>
<state>active</state>
{% endif %}
{% if route.instance %}
<instanceId>{{ route.instance.id }}</instanceId>
<origin>CreateRoute</origin>
<state>active</state>
{% endif %}
{% if route.vpc_pcx %}
<vpcPeeringConnectionId>{{ route.vpc_pcx.id }}</vpcPeeringConnectionId>
<origin>CreateRoute</origin>
<state>blackhole</state>
{% endif %}
</item>
{% endfor %}
</routeSet>
<associationSet>
{% for association_id,subnet_id in route_table.associations.items() %}
<item>
<routeTableAssociationId>{{ association_id }}</routeTableAssociationId>
<routeTableId>{{ route_table.id }}</routeTableId>
<main>false</main>
<subnetId>{{ subnet_id }}</subnetId>
</item>
{% endfor %}
</associationSet>
<tagSet/>
<tagSet>
{% for tag in route_table.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</routeTableSet>
</DescribeRouteTablesResponse>
"""
DELETE_ROUTE_RESPONSE = """
<DeleteRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteRouteResponse>
"""
DELETE_ROUTE_TABLE_RESPONSE = """
<DeleteRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteRouteTableResponse>
"""
ASSOCIATE_ROUTE_TABLE_RESPONSE = """
<AssociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<associationId>{{ association_id }}</associationId>
</AssociateRouteTableResponse>
"""
DISASSOCIATE_ROUTE_TABLE_RESPONSE = """
<DisassociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateRouteTableResponse>
"""
REPLACE_ROUTE_TABLE_ASSOCIATION_RESPONSE = """
<ReplaceRouteTableAssociationResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<newAssociationId>{{ association_id }}</newAssociationId>
</ReplaceRouteTableAssociationResponse>
"""
|
import unittest
from flask import Flask
from flask_sieve.exceptions import ValidationException, register_error_handler
class TestErrorHandler(unittest.TestCase):
def test_error_handler(self):
app = Flask(__name__)
register_error_handler(app)
self.assertIn(ValidationException, app.error_handler_spec[None][None])
errors = {'field': 'Test error'}
with app.app_context():
response, status = app.error_handler_spec[None][None][ValidationException](
ValidationException(errors)
)
self.assertEqual(400, status)
self.assertIn('Test error', str(response.get_json()))
|
# MIT License
#
# Copyright (c) 2020 Arkadiusz Netczuk <dev.arnet@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
import datetime
from PyQt5.QtCore import Qt
from stockmonitor.gui.appwindow import ChartAppWindow
from stockmonitor.gui.utils import set_label_url
from stockmonitor.gui import threadlist
from stockmonitor.gui.widget.mpl.baseintradaychart import set_ref_format_coord
from .. import uiloader
from .mpl.mpltoolbar import NavigationToolbar
_LOGGER = logging.getLogger(__name__)
UiTargetClass, QtBaseClass = uiloader.load_ui_from_class_name( __file__ )
class IndexChartWidget(QtBaseClass): # type: ignore
def __init__(self, parentWidget=None):
super().__init__(parentWidget)
self.ui = UiTargetClass()
self.ui.setupUi(self)
self.dataObject = None
self.isin = None
if parentWidget is not None:
bgcolor = parentWidget.palette().color(parentWidget.backgroundRole())
self.ui.dataChart.setBackgroundByQColor( bgcolor )
self.toolbar = NavigationToolbar(self.ui.dataChart, self)
self.ui.toolbarLayout.addWidget( self.toolbar )
self.ui.sourceLabel.setOpenExternalLinks(True)
self.ui.nameLabel.setStyleSheet("font-weight: bold")
self.ui.refreshPB.clicked.connect( self.refreshData )
self.ui.rangeCB.currentIndexChanged.connect( self.repaintData )
def connectData(self, dataObject, isin):
self.dataObject = dataObject
self.isin = isin
self.dataObject.stockDataChanged.connect( self.updateData )
self.updateData( True )
def clearData(self):
self.ui.dataChart.clearPlot()
def refreshData(self):
self.updateData( True )
def repaintData(self):
self.updateData( False )
def updateData(self, forceRefresh=False):
self.ui.refreshPB.setEnabled( False )
threads = threadlist.QThreadMeasuredList( self )
threads.finished.connect( threads.deleteLater )
threads.finished.connect( self._updateView, Qt.QueuedConnection )
# intraSource = self.getIntradayDataSource()
# threads.appendFunction( intraSource.getWorksheet, [forceRefresh] )
for i in range(0, self.ui.rangeCB.count()):
rangeText = self.ui.rangeCB.itemText( i )
indexData = self.dataObject.dataContainer.gpwIndexIntradayData
intraSource = indexData.getSource( self.isin, rangeText )
threads.appendFunction( intraSource.getWorksheet, [forceRefresh] )
currentData = self.getCurrentDataSource()
threads.appendFunction( currentData.loadWorksheet, [forceRefresh] )
threads.start()
def _updateView(self):
self.ui.refreshPB.setEnabled( True )
rangeText = self.ui.rangeCB.currentText()
_LOGGER.debug( "updating chart data, range[%s]", rangeText )
intraSource = self.getIntradayDataSource()
dataFrame = intraSource.getWorksheet()
self.clearData()
if dataFrame is None:
return
currentSource = self.getCurrentDataSource()
currentSource.loadWorksheet()
# print( "got intraday data:", dataFrame )
timeColumn = dataFrame["t"]
priceColumn = dataFrame["c"]
value = currentSource.getRecentValue( self.isin )
change = currentSource.getRecentChange( self.isin )
timestamp = timeColumn.iloc[-1]
timeData = list(timeColumn)
self.ui.dataChart.addPriceLine( timeData, priceColumn )
# refPrice = priceColumn[ 0 ]
refPrice = self.getReferenceValue()
self.ui.dataChart.addPriceSecondaryY( refPrice )
refX = [ timeData[0], timeData[-1] ]
refY = [ refPrice, refPrice ]
self.ui.dataChart.addPriceLine( refX, refY, style="--" )
currTime = datetime.datetime.now() - datetime.timedelta(minutes=15)
if currTime < timeData[-1] and currTime > timeData[0]:
self.ui.dataChart.pricePlot.axvline( x=currTime, color="black", linestyle="--" )
set_ref_format_coord( self.ui.dataChart.pricePlot, refPrice )
self.ui.valueLabel.setText( str(value) )
self.ui.changeLabel.setText( str(change) + "%" )
self.ui.timeLabel.setText( str(timestamp) )
set_label_url( self.ui.sourceLabel, intraSource.sourceLink() )
def getReferenceValue(self):
indexData = self.dataObject.dataContainer.gpwIndexIntradayData
intraSource = indexData.getSource( self.isin, "14D" )
dataFrame = intraSource.getWorksheet()
if dataFrame is None:
return None
priceColumn = dataFrame["c"]
timeColumn = dataFrame["t"]
recentTime = timeColumn.iloc[-1]
recentDate = recentTime.date()
currDate = datetime.datetime.now().date()
if recentDate == currDate:
## after end of session, but the same day
return priceColumn.iloc[-2]
## during the session or before the session
return priceColumn.iloc[-1]
def getIntradayDataSource(self):
rangeText = self.ui.rangeCB.currentText()
indexData = self.dataObject.dataContainer.gpwIndexIntradayData
intraSource = indexData.getSource( self.isin, rangeText )
return intraSource
def getCurrentDataSource(self):
return self.dataObject.gpwIndexesData
def create_window( dataObject, isin, parent=None ):
chartWindow = ChartAppWindow( parent )
chart = IndexChartWidget( chartWindow )
chartWindow.addWidget( chart )
chartWindow.refreshAction.triggered.connect( chart.refreshData )
chart.connectData(dataObject, isin)
currentSource = chart.getCurrentDataSource()
name = currentSource.getNameFromIsin( isin )
title = name + " [" + isin + "]"
chartWindow.setWindowTitleSuffix( "- " + title )
chart.ui.nameLabel.setText( name )
chartWindow.show()
return chartWindow
|
import numpy
from utils import shared_zeros,shared_glorot_uniform
from theano import theano
import theano.tensor as T
def model(x, embedding_size, n_hidden):
# input gate
W_xi = shared_glorot_uniform(( embedding_size,n_hidden))
W_hi = shared_glorot_uniform(( n_hidden,n_hidden))
W_ci = shared_glorot_uniform(( n_hidden,n_hidden))
b_i = shared_zeros((n_hidden,))
# forget gate
W_xf = shared_glorot_uniform(( embedding_size, n_hidden))
W_hf = shared_glorot_uniform(( n_hidden,n_hidden))
W_cf = shared_glorot_uniform(( n_hidden,n_hidden))
b_f = shared_zeros((n_hidden,))
# output gate
W_xo = shared_glorot_uniform(( embedding_size, n_hidden))
W_ho = shared_glorot_uniform(( n_hidden,n_hidden))
W_co = shared_glorot_uniform(( n_hidden,n_hidden))
b_o = shared_zeros((n_hidden,))
# cell weights
W_xc = shared_glorot_uniform(( embedding_size, n_hidden))
W_hc = shared_glorot_uniform(( n_hidden,n_hidden))
b_c = shared_zeros((n_hidden,))
# output weights
W_y = shared_glorot_uniform(( n_hidden, embedding_size), name="V")
b_y = shared_zeros((embedding_size,), name="by")
params = [W_xi,W_hi,W_ci,b_i,W_xf,W_hf,W_cf,b_f,W_xo,W_ho,W_co,b_o,W_xc,W_hc,b_c,W_y,b_y]
def step(x_t, h_tm1, c_tm1):
i_t = T.nnet.sigmoid(W_xi[x_t] + T.dot(W_hi, h_tm1) + T.dot(W_ci, c_tm1) + b_i)
f_t = T.nnet.sigmoid(W_xf[x_t] + T.dot(W_hf, h_tm1) + T.dot(W_cf, c_tm1) + b_f)
c_t = f_t * c_tm1 + i_t * T.tanh(W_xc[x_t] + T.dot(W_hc, h_tm1) + b_c)
o_t = T.nnet.sigmoid(W_xo[x_t] + T.dot(W_ho, h_tm1) + T.dot(W_co, c_t) + b_o)
h_t = o_t * T.tanh(c_t)
y_t = T.dot(h_t, W_y) + b_y
return h_t, c_t, y_t
h0 = shared_zeros((n_hidden,), name='h0')
c0 = shared_zeros((n_hidden,), name='c0')
[h, c, y_pred], _ = theano.scan(step, sequences=x, outputs_info=[h0, c0, None], truncate_gradient=10)
model = T.nnet.softmax(y_pred)
return model, params
|
import json
class messaging(object):
def __init__(self, ws_interface):
self.ws_interface = ws_interface
# WS functions
def global_controller_msgevent(self, is_rpc, message_event_type, message_payload):
message_info = dict()
message_info['message_type'] = 'global_controller_msgevent'
message_info['message_event_type'] = message_event_type
message_info['is_rpc'] = is_rpc
message = dict()
message['message_info'] = message_info
message['message_payload'] = message_payload
#print("Sending..")
json_message = json.dumps(message)
#print(json_message)
self.ws_interface.ws.send(json_message)
if is_rpc:
#print("Receiving...")
json_incoming = json.loads(self.ws_interface.ws.recv())
#print(json_incoming)
return json_incoming
else:
return None
def regional_controller_msgevent(self, is_rpc, message_event_type, message_payload):
message_info = dict()
message_info['message_type'] = 'regional_controller_msgevent'
message_info['message_event_type'] = message_event_type
message_info['is_rpc'] = is_rpc
message = dict()
message['message_info'] = message_info
message['message_payload'] = message_payload
#print("Sending..")
json_message = json.dumps(message)
#print(json_message)
self.ws_interface.ws.send(json_message)
if is_rpc:
#print("Receiving...")
json_incoming = json.loads(self.ws_interface.ws.recv())
#print(json_incoming)
return json_incoming
else:
return None
def global_agent_msgevent(self, is_rpc, message_event_type, message_payload, dst_region, dst_agent):
message_info = dict()
message_info['message_type'] = 'global_agent_msgevent'
message_info['message_event_type'] = message_event_type
message_info['dst_region'] = dst_region
message_info['dst_agent'] = dst_agent
message_info['is_rpc'] = is_rpc
message = dict()
message['message_info'] = message_info
message['message_payload'] = message_payload
#print("Sending..")
json_message = json.dumps(message)
#print(json_message)
self.ws_interface.ws.send(json_message)
if is_rpc:
#print("Receiving...")
json_incoming = json.loads(self.ws_interface.ws.recv())
#print(json_incoming)
return json_incoming
else:
return None
def regional_agent_msgevent(self, is_rpc, message_event_type, message_payload, dst_agent):
message_info = dict()
message_info['message_type'] = 'regional_agent_msgevent'
message_info['message_event_type'] = message_event_type
message_info['dst_agent'] = dst_agent
message_info['is_rpc'] = is_rpc
message = dict()
message['message_info'] = message_info
message['message_payload'] = message_payload
#print("Sending..")
json_message = json.dumps(message)
#print(json_message)
self.ws_interface.ws.send(json_message)
if is_rpc:
#print("Receiving...")
json_incoming = json.loads(self.ws_interface.ws.recv())
#print(json_incoming)
return json_incoming
else:
return None
def agent_msgevent(self, is_rpc, message_event_type, message_payload):
message_info = dict()
message_info['message_type'] = 'agent_msgevent'
message_info['message_event_type'] = message_event_type
message_info['is_rpc'] = is_rpc
message = dict()
message['message_info'] = message_info
message['message_payload'] = message_payload
#print("Sending..")
json_message = json.dumps(message)
#print(json_message)
self.ws_interface.ws.send(json_message)
if is_rpc:
#print("Receiving...")
json_incoming = json.loads(self.ws_interface.ws.recv())
#print(json_incoming)
return json_incoming
else:
return None
def global_plugin_msgevent(self, is_rpc, message_event_type, message_payload, dst_region, dst_agent, dst_plugin):
message_info = dict()
message_info['message_type'] = 'global_plugin_msgevent'
message_info['message_event_type'] = message_event_type
message_info['dst_region'] = dst_region
message_info['dst_agent'] = dst_agent
message_info['dst_plugin'] = dst_plugin
message_info['is_rpc'] = is_rpc
message = dict()
message['message_info'] = message_info
message['message_payload'] = message_payload
#print("Sending..")
json_message = json.dumps(message)
#print(json_message)
self.ws_interface.ws.send(json_message)
if is_rpc:
#print("Receiving...")
json_incoming = json.loads(self.ws_interface.ws.recv())
#print(json_incoming)
return json_incoming
else:
return None
def regional_plugin_msgevent(self, is_rpc, message_event_type, message_payload, dst_agent, dst_plugin):
message_info = dict()
message_info['message_type'] = 'regional_plugin_msgevent'
message_info['message_event_type'] = message_event_type
message_info['dst_agent'] = dst_agent
message_info['dst_plugin'] = dst_plugin
message_info['is_rpc'] = is_rpc
message = dict()
message['message_info'] = message_info
message['message_payload'] = message_payload
#print("Sending..")
json_message = json.dumps(message)
#print(json_message)
self.ws_interface.ws.send(json_message)
if is_rpc:
#print("Receiving...")
json_incoming = json.loads(self.ws_interface.ws.recv())
#print(json_incoming)
return json_incoming
else:
return None
def plugin_msgevent(self, is_rpc, message_event_type, message_payload, dst_plugin):
message_info = dict()
message_info['message_type'] = 'plugin_msgevent'
message_info['message_event_type'] = message_event_type
message_info['dst_plugin'] = dst_plugin
message_info['is_rpc'] = is_rpc
message = dict()
message['message_info'] = message_info
message['message_payload'] = message_payload
#print("Sending..")
json_message = json.dumps(message)
#print(json_message)
self.ws_interface.ws.send(json_message)
if is_rpc:
#print("Receiving...")
json_incoming = json.loads(self.ws_interface.ws.recv())
#print(json_incoming)
return json_incoming
else:
return None
|
import subprocess
import time
import sys
"""
Kicks off a pipeline that schedules Turk jobs for tool 1D,
collects results in batches and collates data.
1. Read in newline separated commands and construct CSV input.
2. Create HITs for each input command using tool 1D template.
3. Continuously check for completed assignments, fetching results in batches.
4. Collate turk output data with the input job specs.
5. Postprocess datasets to obtain well formed action dictionaries.
"""
# CSV input
rc = subprocess.call(
[
"python3 ../text_to_tree_tool/construct_input_for_turk.py --input_file 1D_input.txt --tool_num 4 > turk_input.csv"
],
shell=True,
)
if rc != 0:
print("Error preprocessing. Exiting.")
sys.exit()
# Load input commands and create a separate HIT for each row
rc = subprocess.call(
["python3 create_jobs.py --xml_file fetch_question_D.xml --tool_num 4"], shell=True
)
if rc != 0:
print("Error creating HIT jobs. Exiting.")
sys.exit()
# Wait for results to be ready
print("Turk jobs created at : %s \n Waiting for results..." % time.ctime())
time.sleep(100)
# Check if results are ready
rc = subprocess.call(["python3 get_results.py --tool_num 4"], shell=True)
if rc != 0:
print("Error fetching HIT results. Exiting.")
sys.exit()
# Collate datasets
print("*** Collating turk outputs and input job specs ***")
rc = subprocess.call(["python3 collate_answers.py"], shell=True)
if rc != 0:
print("Error collating answers. Exiting.")
sys.exit()
# Postprocess
print("*** Postprocessing results ***")
rc = subprocess.call(["python3 parse_tool_D_outputs.py"], shell=True)
if rc != 0:
print("Error collating answers. Exiting.")
sys.exit()
|
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from yacs.config import CfgNode as CN
__C = CN()
cfg = __C
__C.META_ARC = "TCTrack_alexnet"
__C.CUDA = True
# ------------------------------------------------------------------------ #
# Training options
# ------------------------------------------------------------------------ #
__C.TRAIN = CN()
# Anchor Target
# Positive anchor threshold
__C.TRAIN.THR_HIGH = 0.6
__C.TRAIN.apnchannel = 256
__C.TRAIN.clsandlocchannel = 256
__C.TRAIN.groupchannel = 32
__C.TRAIN.THR_LOW = 0.3
__C.TRAIN.NEG_NUM = 16
__C.TRAIN.POS_NUM = 16
__C.TRAIN.TOTAL_NUM = 64
__C.TRAIN.PR = 1
__C.TRAIN.CLS_WEIGHT = 1.0
__C.TRAIN.LOC_WEIGHT = 3.0
__C.TRAIN.SHAPE_WEIGHT =2.0
__C.TRAIN.EXEMPLAR_SIZE = 127
__C.TRAIN.SEARCH_SIZE = 287 #255
__C.TRAIN.BASE_SIZE = 8
__C.TRAIN.OUTPUT_SIZE = 21 #25
__C.TRAIN.RESUME = ''
__C.TRAIN.PRETRAINED = 1
__C.TRAIN.LARGER=2.0
__C.TRAIN.LOG_DIR = './logs'
__C.TRAIN.SNAPSHOT_DIR = './snapshot'
__C.TRAIN.EPOCH = 30
__C.TRAIN.START_EPOCH = 0
__C.TRAIN.BATCH_SIZE = 100
__C.TRAIN.videorange = 5
__C.TRAIN.NUM_GPU = 2
__C.TRAIN.NUM_WORKERS = 1
__C.TRAIN.MOMENTUM = 0.9
__C.TRAIN.WEIGHT_DECAY = 0.0001
__C.TRAIN.w1=1.0
__C.TRAIN.w2=1.0
__C.TRAIN.w3=1.0
__C.TRAIN.w4=1.0
__C.TRAIN.w5=1.0
__C.TRAIN.range=2.0
__C.TRAIN.MASK_WEIGHT = 1
__C.TRAIN.PRINT_FREQ = 20
__C.TRAIN.LOG_GRADS = False
__C.TRAIN.GRAD_CLIP = 10.0
__C.TRAIN.BASE_LR = 0.005
__C.TRAIN.LR = CN()
__C.TRAIN.LR.TYPE = 'log'
__C.TRAIN.LR.KWARGS = CN(new_allowed=True)
__C.TRAIN.LR_WARMUP = CN()
__C.TRAIN.LR_WARMUP.WARMUP = True
__C.TRAIN.LR_WARMUP.TYPE = 'step'
__C.TRAIN.LR_WARMUP.EPOCH = 5
__C.TRAIN.LR_WARMUP.KWARGS = CN(new_allowed=True)
# ------------------------------------------------------------------------ #
# Dataset options
# ------------------------------------------------------------------------ #
__C.DATASET = CN(new_allowed=True)
# Augmentation
# for template
__C.DATASET.TEMPLATE = CN()
# for detail discussion
__C.DATASET.TEMPLATE.SHIFT = 4
__C.DATASET.TEMPLATE.SCALE = 0.05
__C.DATASET.TEMPLATE.BLUR = 0.0
__C.DATASET.TEMPLATE.FLIP = 0.0
__C.DATASET.TEMPLATE.COLOR = 1.0
__C.DATASET.SEARCH = CN()
__C.DATASET.SEARCH.SHIFT = 64
__C.DATASET.SEARCH.SCALE = 0.18
__C.DATASET.SEARCH.BLUR = 0.0
__C.DATASET.SEARCH.FLIP = 0.0
__C.DATASET.SEARCH.COLOR = 1.0
# for detail discussion
__C.DATASET.NEG = 0.2
__C.DATASET.GRAY = 0.0
__C.DATASET.NAMES = ('VID', 'COCO', 'GOT', 'LaSOT','YOUTUBEBB')
__C.DATASET.VID = CN()
__C.DATASET.VID.ROOT = './train_dataset/vid/crop511'
__C.DATASET.VID.ANNO = './train_dataset/vid/train.json'
__C.DATASET.VID.FRAME_RANGE = 50 #100
__C.DATASET.VID.NUM_USE = 100000 # repeat until reach NUM_USE
__C.DATASET.YOUTUBEBB = CN()
__C.DATASET.YOUTUBEBB.ROOT = './train_dataset/yt_bb/crop511'
__C.DATASET.YOUTUBEBB.ANNO = './train_dataset/yt_bb/train.json'
__C.DATASET.YOUTUBEBB.FRAME_RANGE = 3
__C.DATASET.YOUTUBEBB.NUM_USE = -1 # use all not repeat
__C.DATASET.COCO = CN()
__C.DATASET.COCO.ROOT = './train_dataset/coco/crop511'
__C.DATASET.COCO.ANNO = './train_dataset/coco/train2017.json'
__C.DATASET.COCO.FRAME_RANGE = 1
__C.DATASET.COCO.NUM_USE = -1
__C.DATASET.LaSOT = CN()
__C.DATASET.LaSOT.ROOT = './train_dataset/lasot/crop511' # LaSOT dataset path
__C.DATASET.LaSOT.ANNO = './train_dataset/lasot/train.json'
__C.DATASET.LaSOT.FRAME_RANGE = 50 #100
__C.DATASET.LaSOT.NUM_USE = 100000
__C.DATASET.GOT = CN()
__C.DATASET.GOT.ROOT = './train_dataset/got10k/crop511' # GOT dataset path
__C.DATASET.GOT.ANNO = './train_dataset/got10k/train.json'
__C.DATASET.GOT.FRAME_RANGE = 50
__C.DATASET.GOT.NUM_USE = 100000
__C.DATASET.VIDEOS_PER_EPOCH = 600000
# ------------------------------------------------------------------------ #
# Backbone options
# ------------------------------------------------------------------------ #
__C.BACKBONE = CN()
# Backbone type, current only support resnet18,34,50;alexnet;mobilenet
__C.BACKBONE.TYPE = 'alexnet'
__C.BACKBONE.KWARGS = CN(new_allowed=True)
# Pretrained backbone weights
__C.BACKBONE.PRETRAINED = 'back.pth'
# Train layers
__C.BACKBONE.TRAIN_LAYERS = ['layer3', 'layer4', 'layer5']
__C.BACKBONE.Tempor_TRAIN_LAYERS = ['layer3', 'layer4', 'layer5']
# Layer LR
__C.BACKBONE.LAYERS_LR = 0.1
# Switch to train layer
__C.BACKBONE.TRAIN_EPOCH = 10
# # ------------------------------------------------------------------------ #
# # Anchor options
# # ------------------------------------------------------------------------ #
__C.ANCHOR = CN()
# # Anchor stride
__C.ANCHOR.STRIDE = 16
# ------------------------------------------------------------------------ #
# Tracker options
# ------------------------------------------------------------------------ #
__C.TRACK = CN()
__C.TRACK.TYPE = 'TCTracktracker'
# Scale penalty
__C.TRACK.PENALTY_K = 0.04
# Window influence
__C.TRACK.WINDOW_INFLUENCE = 0.44
# Interpolation learning rate
__C.TRACK.LR = 0.4
__C.TRACK.w1=1.2
__C.TRACK.w2=1.0
__C.TRACK.w3=1.6
__C.TRACK.LARGER=1.4
# Exemplar size
__C.TRACK.EXEMPLAR_SIZE = 127
# Instance size
__C.TRACK.INSTANCE_SIZE = 255
# Base size
__C.TRACK.BASE_SIZE = 8
__C.TRACK.STRIDE = 8
__C.TRACK.strict = 0.5
# Context amount
__C.TRACK.CONTEXT_AMOUNT = 0.5
# Long term lost search size
__C.TRACK.LOST_INSTANCE_SIZE = 831
# Long term confidence low
__C.TRACK.CONFIDENCE_LOW = 0.85
# Long term confidence high
__C.TRACK.CONFIDENCE_HIGH = 0.998
# Mask threshold
__C.TRACK.MASK_THERSHOLD = 0.30
# Mask output size
__C.TRACK.MASK_OUTPUT_SIZE = 127
|
from age.entities import get_basket
from age.rules import (UpdateRule, RuleSequence, MODES, RuleSaveWalkers, RuleSetWalkers)
from aiida.backends.testbase import AiidaTestCase, check_if_tests_can_run
from aiida.common.exceptions import TestsNotAllowedError
from aiida.common.links import LinkType
from aiida.orm import Node
from aiida.orm.data import Data
from aiida.orm.calculation import Calculation
from aiida.orm.calculation.work import WorkCalculation
import numpy as np
class TestNodes(AiidaTestCase):
# Hardcoding here how deep I go
DEPTH = 4
# Hardcoding the branching at every level, i.e. the number
# of children per parent Node.
NR_OF_CHILDREN = 2
def runTest(self):
"""
Just wrapping the other functions
"""
self.test_data_provenance()
# ~ self.test_returns_calls()
self.test_cycle()
self.test_stash()
def test_data_provenance(self):
"""
Creating a parent (Data) node.
Attaching a sequence of Calculation/Data to create a "provenance".
"""
from age.utils import create_tree
created_dict = create_tree(self.DEPTH, self.NR_OF_CHILDREN)
parent = created_dict['parent']
desc_dict = created_dict['depth_dict']
# Created all the nodes, tree.
#Now testing whether I find all the descendants
# Using the utility function to create the starting entity set:
es = get_basket(node_ids=(parent.id,))
qb = QueryBuilder().append(Node).append(Node)
for depth in range(0, self.DEPTH):
#print('At depth {}'.format(depth))
rule = UpdateRule(qb, mode=MODES.REPLACE, max_iterations=depth)
res = rule.run(es.copy())['nodes']._set
#print(' Replace-mode results: {}'.format(', '.join(map(str, sorted(res)))))
should_set = desc_dict[depth]
self.assertTrue(not(res.difference(should_set) or should_set.difference(res)))
rule = UpdateRule(qb, mode=MODES.APPEND, max_iterations=depth)
res = rule.run(es.copy())['nodes']._set
#print(' Append-mode results: {}'.format(', '.join(map(str, sorted(res)))))
should_set = set()
[[should_set.add(s) for s in desc_dict[d]] for d in range(depth+1)]
self.assertTrue(not(res.difference(should_set) or should_set.difference(res)))
def test_cycle(self):
"""
Creating a cycle: A data-instance is both input to and returned by a WorkFlowNode
"""
d = Data().store()
c = WorkCalculation().store()
# New provenance design branch
# ~ c.add_incoming(d, link_type=LinkType.INPUT_WORK, link_label='lala')
# ~ d.add_incoming(c, link_type=LinkType.RETURN, link_label='lala')
c.add_link_from(d, link_type=LinkType.INPUT, label='lala')
d.add_link_from(c, link_type=LinkType.RETURN, label='lala')
qb = QueryBuilder().append(Node).append(Node)
rule = UpdateRule(qb, max_iterations=np.inf)
es = get_basket(node_ids=(d.id,))
res = rule.run(es)
self.assertEqual( res['nodes']._set, set([d.id, c.id]))
def test_stash(self):
"""
Here I'm testing the 'stash'
"""
# creatin a first calculation with 3 input data:
c = Calculation().store()
dins = set() # To compare later, dins is a set of the input data pks.
for i in range(3):
data_in = Data().store()
dins.add(data_in.id)
# ~ c.add_incoming(data_in,
# ~ link_type=LinkType.INPUT_CALC,
# ~ link_label='lala-{}'.format(i))
c.add_link_from(data_in,
link_type=LinkType.INPUT,
label='lala-{}'.format(i))
# Creating output data to that calculation:
douts = set() # Similar to dins, this is the set of data output pks
for i in range(4):
data_out = Data().store()
douts.add(data_out.id)
# ~ data_out.add_incoming(c,
# ~ link_type=LinkType.CREATE,
# ~ link_label='lala-{}'.format(i))
data_out.add_link_from(c,
link_type=LinkType.CREATE,
label='lala-{}'.format(i))
#print(draw_children
# adding another calculation, with one input from c's outputs,
# and one input from c's inputs
c2 = Calculation().store()
# ~ c2.add_incoming(data_in, link_type=LinkType.INPUT_CALC, link_label='b')
# ~ c2.add_incoming(data_out, link_type=LinkType.INPUT_CALC, link_label='c')
c2.add_link_from(data_in, link_type=LinkType.INPUT, label='b')
c2.add_link_from(data_out, link_type=LinkType.INPUT, label='c')
# ALso here starting with a set that only contains the starting the calculation:
es = get_basket(node_ids=(c.id,))
# Creating the rule for getting input nodes:
rule_in = UpdateRule(QueryBuilder().append(
Node, tag='n').append(
Node, input_of='n'))
# ~ rule_in = UpdateRule(QueryBuilder().append(
# ~ Node, tag='n').append(
# ~ Node, with_outgoing='n'))
# Creating the rule for getting output nodes
rule_out = UpdateRule(QueryBuilder().append(
Node, tag='n').append(
Node, output_of='n'))
# ~ Node, with_incoming='n'))
#, edge_filters={'type':LinkType.CREATE.value}))
# I'm testing the input rule. Since I'm updating, I should
# have the input and the calculation itself:
is_set = rule_in.run(es.copy())['nodes']._set
self.assertEqual(is_set, dins.union({c.id}))
# Testing the output rule, also here, output + calculation c is expected:
is_set = rule_out.run(es.copy())['nodes']._set
self.assertEqual(is_set, douts.union({c.id}))
# Now I'm testing the rule sequence.
# I first apply the rule to get outputs, than the rule to get inputs
rs1 = RuleSequence((rule_out, rule_in))
is_set = rs1.run(es.copy())['nodes']._set
# I expect the union of inputs, outputs, and the calculation:
self.assertEqual(is_set, douts.union(dins).union({c.id}))
# If the order of the rules is exchanged, I end up of also attaching c2 to the results.
# This is because c and c2 share one data-input:
rs2 = RuleSequence((rule_in, rule_out))
is_set = rs2.run(es.copy())['nodes']._set
self.assertEqual(is_set, douts.union(dins).union({c.id, c2.id}))
# Testing similar rule, but with the possibility to stash the results:
stash = es.copy(with_data=False)
rsave = RuleSaveWalkers(stash)
# Checking whether Rule does the right thing i.e If I stash the result,
# the active walkers should be an empty set:
self.assertEqual(rsave.run(es.copy()), es.copy(with_data=False))
# Whereas the stash contains the same data as the starting point:
self.assertEqual(stash,es)
rs2 = RuleSequence((
RuleSaveWalkers(stash), rule_in,
RuleSetWalkers(stash) ,rule_out))
is_set = rs2.run(es.copy())['nodes']._set
# NOw I test whether the stash does the right thing,
# namely not including c2 in the results:
self.assertEqual(is_set, douts.union(dins).union({c.id}))
def test_returns_calls(self ):
rules = []
# linking all processes to input data:
qb = QueryBuilder()
qb.append(Data, tag='predecessor')
qb.append(ProcessNode, with_incoming='predecessor',
edge_filters={'type': {'in': [
LinkType.INPUT_CALC.value,
LinkType.INPUT_WORK.value]}})
rules.append(UpdateRule(qb))
# CREATE/RETURN(ProcessNode, Data) - Forward
qb = QueryBuilder()
qb.append(ProcessNode, tag='predecessor')
qb.append(Data, with_incoming='predecessor', edge_filters={
'type': {'in': [LinkType.CREATE.value, LinkType.RETURN.value]}})
rules.append(UpdateRule(qb))
# CALL(ProcessNode, ProcessNode) - Forward
qb = QueryBuilder()
qb.append(ProcessNode, tag='predecessor')
qb.append(ProcessNode, with_incoming='predecessor',
edge_filters={'type': {'in': [
LinkType.CALL_CALC.value,
LinkType.CALL_WORK.value]}})
rules.append(UpdateRule(qb))
# CREATE(ProcessNode, Data) - Reversed
if create_reversed:
qb = QueryBuilder()
qb.append(ProcessNode, tag='predecessor', project=['id'])
qb.append(Data,
with_incoming='predecessor',
edge_filters={'type': {'in': [LinkType.CREATE.value]}})
rules.append(UpdateRule(qb))
# Case 3:
# RETURN(ProcessNode, Data) - Reversed
if return_reversed:
qb = QueryBuilder()
qb.append(ProcessNode, tag='predecessor',)
qb.append(Data,
output_of='predecessor',
edge_filters={'type': {'in': [LinkType.RETURN.value]}})
rules.append(UpdateRule(qb))
seq = RuleSequence(rules, max_iterations=np.inf )
class TestGroups(AiidaTestCase):
N_GROUPS = 10
def runTest(self):
"""
Testing whether groups and nodes can be traversed with the Graph explorer:
"""
# I create a certain number of groups and save them in this list:
groups = []
for igroup in range(self.N_GROUPS):
name='g-{}'.format(igroup) # Name has to be unique
groups.append(Group(name=name).store())
# Same with nodes: Create 1 node less than I have groups
nodes = []
for inode in range(1, self.N_GROUPS):
d = Data().store()
# The node I create, I added both to the group of
# same index and the group of index - 1
groups[inode].add_nodes(d)
groups[inode-1].add_nodes(d)
nodes.append(d)
# Creating sets for the test:
nodes_set = set([n.id for n in nodes])
groups_set = set([g.id for g in groups])
# Now I want rule that gives me all the data starting
# from the last node, with links being
# belonging to the same group:
qb = QueryBuilder()
qb.append(Data, tag='d')
# ~ qb.append(Group, with_node='d', tag='g', filters={'type':''} ) # The filter here is
qb.append(Group, group_of='d', tag='g', filters={'type':''} ) # The filter here is
# there for avoiding problems with autogrouping. Depending how the test
# exactly is run, nodes can be put into autogroups.
qb.append(Data, member_of='g')
# ~ qb.append(Data, with_group='g')
es = get_basket(node_ids=(d.id,))
rule = UpdateRule(qb, max_iterations=np.inf)
res = rule.run(es.copy())['nodes']._set
# checking whether this updateRule above really visits all the nodes I created:
self.assertEqual(res, nodes_set)
# The visits:
self.assertEqual(rule.get_visits()['nodes']._set,res)
# I can do the same with 2 rules chained into a RuleSequence:
qb1=QueryBuilder().append(Node, tag='n').append(
Group, group_of='n', filters={'type':''})
# ~ Group, with_node='n', filters={'type':''})
qb2=QueryBuilder().append(Group, tag='n').append(
Node, member_of='n')
# ~ Node, with_group='n')
rule1 = UpdateRule(qb1)
rule2 = UpdateRule(qb2)
seq = RuleSequence((rule1, rule2), max_iterations=np.inf)
res = seq.run(es.copy())
for should_set, is_set in (
(nodes_set.copy(), res['nodes']._set),
(groups_set,res['groups']._set)):
self.assertEqual(is_set, should_set)
class TestEdges(AiidaTestCase):
DEPTH = 4
NR_OF_CHILDREN = 2
def runTest(self):
"""
Testing whether nodes (and nodes) can be traversed with the Graph explorer,
with the links being stored
"""
from age.utils import create_tree
# I create a certain number of groups and save them in this list:
created_dict = create_tree(self.DEPTH, self.NR_OF_CHILDREN, draw=True)
instances = created_dict['instances']
adjacency = created_dict['adjacency']
es = get_basket(node_ids=(created_dict['parent'].id,))
qb = QueryBuilder().append(Node).append(Node)
rule = UpdateRule(qb, mode=MODES.APPEND, max_iterations=self.DEPTH-1, track_edges=True)
res = rule.run(es.copy())
#print(' Append-mode results: {}'.format(', '.join(map(str, sorted(res)))))
should_set = set()
[[should_set.add(s) for s in created_dict['depth_dict'][d]] for d in range(self.DEPTH)]
self.assertEqual(res['nodes']._set, should_set) #) or should_set.difference(res)))
touples_should = set((instances[i],instances[j]) for i, j in zip(*np.where(adjacency)))
touples_are = set(zip(*zip(*res['nodes_nodes']._set)[:2]))
self.assertEqual(touples_are, touples_should)
rule = UpdateRule(qb, mode=MODES.REPLACE, max_iterations=self.DEPTH-1, track_edges=True)
res = rule.run(es.copy())
# Since I apply the replace rule, the last set of links should appear:
instances = created_dict['instances']
adjacency = created_dict['adjacency']
touples_should = set()
[touples_should.add((pk1, pk2))
for idx1,pk1 in enumerate(instances)
for idx2,pk2 in enumerate(instances)
if adjacency[idx1, idx2]
and pk1 in created_dict['depth_dict'][self.DEPTH-2]
and pk2 in created_dict['depth_dict'][self.DEPTH-1]
]
touples_are = set(zip(*zip(*res['nodes_nodes']._set)[:2]))
self.assertEqual(touples_are, touples_should)
if __name__ == '__main__':
from unittest import TestSuite, TextTestRunner
try:
check_if_tests_can_run()
except TestsNotAllowedError as e:
print >> sys.stderr, e.message
sys.exit(1)
test_suite = TestSuite()
test_suite.addTest(TestNodes())
test_suite.addTest(TestGroups())
test_suite.addTest(TestEdges())
results = TextTestRunner(failfast=False, verbosity=2).run(test_suite)
|
from itertools import combinations
from tqdm import tqdm
import numpy as np
import scipy as sp
from sklearn.preprocessing import MinMaxScaler
def get_complements(x_union_y):
'''Generator function that yields pairs of equal-size disjoint subsets
of x_union_y.
x_union_y should a set type.'''
for seq in combinations(x_union_y, len(x_union_y)//2):
complement = frozenset(x_union_y.difference(seq))
yield (seq, complement)
def get_expSG_1storder_relation_no_cache_NEW(word_from, words_to, we_model):
ctx_vecs = []
for _word in words_to:
_idx = we_model.wv.vocab[_word].index
ctx_vecs.append(we_model.trainables.syn1neg[_idx])
ctx_vecs = np.array(ctx_vecs)
_vec = we_model.wv[word_from]
relations = sp.special.expit(np.dot(ctx_vecs, _vec))
return relations
def get_expSG_1storder_relation_no_cache_NEW_ALLWORDS(words_to, we_model):
ctx_vecs = []
for _word in words_to:
_idx = we_model.wv.vocab[_word].index
ctx_vecs.append(we_model.trainables.syn1neg[_idx])
ctx_vecs = np.array(ctx_vecs)
_vecs = we_model.wv.vectors
relations = sp.special.expit(np.dot(_vecs, ctx_vecs.T))
return relations
def get_1storder_association_metric_fast(word, A_terms, B_terms, we_model):
A_relations = get_expSG_1storder_relation_no_cache_NEW(word, A_terms, we_model)
B_relations = get_expSG_1storder_relation_no_cache_NEW(word, B_terms, we_model)
return np.mean(A_relations) - np.mean(B_relations)
def get_all_relations_1storder(A_terms, B_terms, we_model):
A_relations=get_expSG_1storder_relation_no_cache_NEW_ALLWORDS(A_terms, we_model)
B_relations=get_expSG_1storder_relation_no_cache_NEW_ALLWORDS(B_terms, we_model)
all_associations = np.mean(A_relations, axis=1) - np.mean(B_relations, axis=1)
return all_associations
def get_1storder_association_metric_list_for_target_list(target_list, A_terms, B_terms, we_model):
global ORDER
ORDER = 'first'
associations = np.array([])
for word in tqdm(target_list):
association = get_1storder_association_metric_fast(word, A_terms, B_terms, we_model)
associations = np.append(associations, association)
scaler = MinMaxScaler(feature_range=(-1,1))
all_associations = get_all_relations_1storder(A_terms, B_terms, we_model)
scaler.fit(all_associations.reshape(-1,1)) # Reshape is for a single feature, NOT for a single sample
transformed = scaler.transform(associations.reshape(-1,1))
return transformed.reshape(len(transformed)) |
"""Filter to convert booleans to icons."""
import json
from django.template import Library
register = Library()
@register.filter(is_safe=True)
def fontawesomize(val):
"""Convert boolean to font awesome icon."""
if val:
return '<i class="fa fa-check" style="color: green"></i>'
return '<i class="fa fa-times" style="color: red"></i>'
|
import sys
import os
import time
from src.model import *
from src.util import *
import numpy as np
import pandas as pd
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
label_path = "/public/malware_dataset/kaggle_microsoft_9_10000/"
train_data_path = label_path + "bytes/" # Training data
train_label_path = label_path + "kaggle_microsoft_trainlabels.csv" # Training label
#valid_label_path = label_path + "example-valid-label.csv" # Validation Label
#name
exp_name = "malconv-classification"
# Parameter
# os.environ["CUDA_VISIBLE_DEVICES"] = "2" # single-GPU
use_gpu = True #
use_cpu = 32 # Number of cores to use for data loader
display_step = 5 # Std output update rate during training 和 保存训练结果步长
test_step = 50 # Test per n step
learning_rate = 0.0001 #
max_step = 1000 # Number of steps to train
batch_size = 768 #
first_n_byte = (
100000 # First N bytes of a PE file as the input of MalConv (defualt: 2 million)
)
window_size = 512 # Kernel size & stride for Malconv (defualt : 500)
# output path
log_dir = "/log/"
pred_dir = "/pred/"
checkpoint_dir = "/checkpoint/"
log_file_path = log_dir + exp_name + ".log"
chkpt_acc_path = checkpoint_dir + exp_name + "1000.pt"
pred_path = pred_dir + exp_name + ".pred"
df = pd.read_csv(train_label_path)
train, valid, train_label, valid_label = train_test_split(
df["Id"],
df["Class"],
test_size=0.2,
stratify=df["Class"],
random_state=100,
)
"""
# Dataset preparation
class ExeDataset(Dataset):
def __init__(self, fp_list, data_path, label_list, first_n_byte=2000000):
self.fp_list = fp_list
self.data_path = data_path
self.label_list = label_list
self.first_n_byte = first_n_byte
def __len__(self):
return len(self.fp_list)
def __getitem__(self, idx):
try:
with open(self.data_path + self.fp_list[idx],'rb') as f:
tmp = [i+1 for i in f.read()[:self.first_n_byte]] # index 0 will be special padding index 每个值加一
tmp = tmp+[0]*(self.first_n_byte-len(tmp))
except:
with open(self.data_path + self.fp_list[idx].lower(),'rb') as f:
tmp = [i+1 for i in f.read()[:self.first_n_byte]]
tmp = tmp+[0]*(self.first_n_byte-len(tmp))
return np.array(tmp), np.array([self.label_list[idx]])
"""
trainset = pd.DataFrame({"id": train, "labels": train_label})
validset = pd.DataFrame({"id": valid, "labels": valid_label})
trainloader = DataLoader(
ExeDataset(
list(trainset["id"]), train_data_path, list(trainset["labels"]), first_n_byte
),
batch_size=batch_size,
shuffle=False,
num_workers=use_cpu,
pin_memory=True,
)
validloader = DataLoader(
ExeDataset(
list(validset["id"]), train_data_path, list(validset["labels"]), first_n_byte
),
batch_size=batch_size,
shuffle=False,
num_workers=use_cpu,
pin_memory=True,
)
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda:1" if USE_CUDA else "cpu")
malconv = MalConv(input_length=first_n_byte, window_size=window_size)
malconv = nn.DataParallel(malconv, device_ids=[1,2,3]) # multi-GPU
#malconv = MalConvBase(8, 4096, 128, 32)
bce_loss = nn.BCEWithLogitsLoss()
ce_loss = nn.CrossEntropyLoss()
adam_optim = optim.Adam([{"params": malconv.parameters()}], lr=learning_rate)
sigmoid = nn.Sigmoid()
if use_gpu:
malconv = malconv.to(device)
bce_loss = bce_loss.to(device)
sigmoid = sigmoid.to(device)
step_msg = "step-{}-loss-{:.6f}-acc-{:.4f}-time-{:.2f}s"
valid_msg = "step-{}-tr_loss-{:.6f}-tr_acc-{:.4f}-val_loss-{:.6f}-val_acc-{:.4f}"
log_msg = "{}, {:.6f}, {:.4f}, {:.6f}, {:.4f}, {:.2f}"
history = {}
history["tr_loss"] = []
history["tr_acc"] = []
train_acc = [] # 保存训练结果
valid_best_acc = 0.0
total_step = 0
step_cost_time = 0
valid_idx = list(validset["id"])
while total_step < max_step:
# Training
for step, batch_data in enumerate(trainloader):
start = time.time()
adam_optim.zero_grad()
cur_batch_size = batch_data[0].size(0)
exe_input = batch_data[0].to(device) if use_gpu else batch_data[0]
exe_input = Variable(exe_input.long(), requires_grad=False)
label = batch_data[1].to(device) if use_gpu else batch_data[1]
label = Variable(label, requires_grad=False)
label = label.squeeze() - 1
pred = malconv(exe_input)
loss = ce_loss(pred, label)
loss.backward()
adam_optim.step()
_, predicted = torch.max(pred.data, 1)
train_Macc = (label.cpu().data.numpy().astype(int) == (predicted.cpu().data.numpy()).astype(int)).sum().item()
train_Macc = train_Macc / cur_batch_size
if (step + 1) % display_step == 0:
print("train:{}".format(train_Macc))
total_step += 1
# Interupt for validation
if total_step % test_step == 0:
break
for step, val_batch_data in enumerate(validloader):
cur_batch_size = val_batch_data[0].size(0)
exe_input = val_batch_data[0].to(device) if use_gpu else val_batch_data[0]
exe_input = Variable(exe_input.long(), requires_grad=False)
label = val_batch_data[1].to(device) if use_gpu else val_batch_data[1]
label = Variable(label, requires_grad=False)
label = label.squeeze() - 1
pred = malconv(exe_input)
loss = ce_loss(pred, label)
# loss.backward()
# adam_optim.step()
_, predicted = torch.max(pred.data, 1)
val_Macc = (label.cpu().data.numpy().astype(int) == (predicted.cpu().data.numpy()).astype(int)).sum().item()
val_Macc = val_Macc / cur_batch_size
if (step + 1) % display_step == 0:
print("test:{}".format(val_Macc))
|
import re
try:
from mock import mock_open
except ImportError:
from unittest.mock import mock_open
from hq.hq import main
from test.common_test_util import simulate_args_dict, wrap_html_body, capture_console_output
def test_preserve_space_flag_turns_off_space_normalization(capsys, mocker):
hquery = '`${//p}`'
content_with_spaces = ' PyCharm rocks! '
mocker.patch('sys.stdin.read').return_value = wrap_html_body('<p>{0}</p>'.format(content_with_spaces))
mocker.patch('hq.hq.docopt').return_value = simulate_args_dict(expression=hquery, preserve='s')
main()
actual, _ = capture_console_output(capsys, strip=False)
assert actual == content_with_spaces
mocker.patch('hq.hq.docopt').return_value = simulate_args_dict(expression=hquery, preserve='')
main()
actual, _ = capture_console_output(capsys, strip=False)
assert actual == 'PyCharm rocks!'
def test_preserve_space_flag_causes_non_breaking_spaces_to_be_how_shall_we_say_preserved(capsys, mocker):
mocker.patch('sys.stdin.read').return_value = wrap_html_body(u'<p>non\u00a0breaking spaces?</p>')
mocker.patch('hq.hq.docopt').return_value = simulate_args_dict(expression='//p/text()', preserve='s')
main()
actual, _ = capture_console_output(capsys)
assert actual == u'non\u00a0breaking\u00a0spaces?'
mocker.patch('hq.hq.docopt').return_value = simulate_args_dict(expression='//p/text()', preserve='')
main()
actual, _ = capture_console_output(capsys)
assert actual == u'non breaking spaces?'
def test_ugly_flag_preserves_markup_formatting(capsys, mocker):
expected = '<p>I, too, enjoy PyCharm.</p>'
mocker.patch('hq.hq.docopt').return_value = simulate_args_dict(expression='//p', ugly=True)
mocker.patch('sys.stdin.read').return_value = wrap_html_body(expected)
main()
actual, _ = capture_console_output(capsys, strip=False)
assert actual == expected
def test_syntax_error_prints_proper_error_message(capsys, mocker):
mocker.patch('hq.hq.docopt').return_value = simulate_args_dict(expression='child:://')
mocker.patch('sys.stdin.read').return_value = wrap_html_body('')
main()
_, actual = capture_console_output(capsys)
assert re.match(r'^syntax error.+expected.+name.+got.+slash', actual.lower())
def test_query_error_prints_proper_error_message(capsys, mocker):
mocker.patch('hq.hq.docopt').return_value = simulate_args_dict(expression='no-such-function()')
mocker.patch('sys.stdin.read').return_value = wrap_html_body('')
main()
_, actual = capture_console_output(capsys)
assert re.match(r'^query error.+unknown function.+no-such-function', actual.lower())
def test_reading_input_from_a_file_instead_of_stdin(capsys, mocker):
expected_filename = 'filename.html'
mocked_open = mock_open(read_data=wrap_html_body('<p>foo</p>'))
mocker.patch('hq.hq.docopt').return_value = simulate_args_dict(
expression='//p/text()', file=expected_filename)
mocker.patch('hq.hq.open', mocked_open, create=True)
main()
actual, _ = capture_console_output(capsys)
mocked_open.assert_called_with(expected_filename)
assert actual == 'foo'
def test_program_flag_reads_hquery_program_from_file(capsys, mocker):
expected_filename = 'filename.hq'
mocked_open = mock_open(read_data='''
//p
->
$_/text()''')
mocker.patch('hq.hq.docopt').return_value = simulate_args_dict(
program=expected_filename)
mocker.patch('sys.stdin.read').return_value = wrap_html_body('<p>foo</p>')
mocker.patch('hq.hq.open', mocked_open, create=True)
main()
actual, _ = capture_console_output(capsys)
mocked_open.assert_called_with(expected_filename)
assert actual == 'foo'
|
# <pep8-80 compliant>
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "imdjs, Nutti <nutti.metro@gmail.com>"
__status__ = "production"
__version__ = "5.2"
__date__ = "17 Nov 2018"
import bpy
from bpy.props import BoolProperty, FloatProperty
from ..utils.bl_class_registry import BlClassRegistry
from ..utils.property_class_registry import PropertyClassRegistry
from ..impl import smooth_uv_impl as impl
@PropertyClassRegistry()
class _Properties:
idname = "smooth_uv"
@classmethod
def init_props(cls, scene):
scene.muv_smooth_uv_enabled = BoolProperty(
name="Smooth UV Enabled",
description="Smooth UV is enabled",
default=False
)
scene.muv_smooth_uv_transmission = BoolProperty(
name="Transmission",
description="Smooth linked UVs",
default=False
)
scene.muv_smooth_uv_mesh_infl = FloatProperty(
name="Mesh Influence",
description="Influence rate of mesh vertex",
min=0.0,
max=1.0,
default=0.0
)
scene.muv_smooth_uv_select = BoolProperty(
name="Select",
description="Select UVs which are smoothed",
default=False
)
@classmethod
def del_props(cls, scene):
del scene.muv_smooth_uv_enabled
del scene.muv_smooth_uv_transmission
del scene.muv_smooth_uv_mesh_infl
del scene.muv_smooth_uv_select
@BlClassRegistry()
class MUV_OT_SmoothUV(bpy.types.Operator):
bl_idname = "uv.muv_smooth_uv_operator"
bl_label = "Smooth"
bl_description = "Smooth UV coordinates"
bl_options = {'REGISTER', 'UNDO'}
transmission: BoolProperty(
name="Transmission",
description="Smooth linked UVs",
default=False
)
mesh_infl: FloatProperty(
name="Mesh Influence",
description="Influence rate of mesh vertex",
min=0.0,
max=1.0,
default=0.0
)
select: BoolProperty(
name="Select",
description="Select UVs which are smoothed",
default=False
)
def __init__(self):
self.__impl = impl.SmoothUVImpl()
@classmethod
def poll(cls, context):
return impl.SmoothUVImpl.poll(context)
def execute(self, context):
return self.__impl.execute(self, context)
|
# import time
# import socket
import re
from JumpScale import j
class HostFileFactory:
def __init__(self):
self.__jslocation__ = "j.sal.hostsfile"
def get(self):
return HostFile()
class HostFile:
def __init__(self):
self.hostfilePath = "/etc/hosts"
self.logger = j.logger.get("j.sal.hostsfile")
def remove(self, ip):
"""Update a hostfile, delete ip from hostsfile
@param hostsfile: File where hosts are defined
@param ip: Ip of the machine to remove
"""
# get content of hostsfile
filecontents = j.sal.fs.fileGetContents(self.hostfilePath)
searchObj = re.search('^%s\s.*\n' % ip, filecontents, re.MULTILINE)
if searchObj:
filecontents = filecontents.replace(searchObj.group(0), '')
j.sal.fs.writeFile(self.hostfilePath, filecontents)
else:
self.logger.warning('Ip address %s not found in hosts file' % ip)
def existsIP(self, ip):
"""Check if ip is in the hostsfile
@param hostsfile: File where hosts are defined
@param ip: Ip of the machine to check
"""
# get content of hostsfile
filecontents = j.sal.fs.fileGetContents(self.hostfilePath)
res = re.search('^%s\s' % ip, filecontents, re.MULTILINE)
if res:
return True
else:
return False
def getNames(self, ip):
"""Get hostnames for ip address
@param hostsfile: File where hosts are defined
@param ip: Ip of the machine to get hostnames from
@return: List of machinehostnames
"""
if self.existsIP(ip):
filecontents = j.sal.fs.fileGetContents(self.hostfilePath)
searchObj = re.search('^%s\s.*\n' % ip, filecontents, re.MULTILINE)
hostnames = searchObj.group(0).strip().split()
hostnames.pop(0)
return hostnames
else:
return []
def set(self, ip, hostname):
"""Update a hostfile to contain the basic information install
@param hostsfile: File where hosts are defined
@param ip: Ip of the machine to add/modify
@param hostname: List of machinehostnames to add/modify
"""
if isinstance(hostname, str):
hostname = hostname.split()
filecontents = j.sal.fs.fileGetContents(self.hostfilePath)
searchObj = re.search('^%s\s.*\n' % ip, filecontents, re.MULTILINE)
hostnames = ' '.join(hostname)
if searchObj:
filecontents = filecontents.replace(
searchObj.group(0), '%s %s\n' % (ip, hostnames))
else:
filecontents += '%s %s\n' % (ip, hostnames)
j.sal.fs.writeFile(self.hostfilePath, filecontents)
|
#!/usr/bin/env python
from flask import Flask, send_from_directory
import sys
app = Flask(__name__)
lang = 'en'
@app.route('/')
def show_index():
return send_from_directory(basedir,'index.html')
@app.route("/resources/<path:filename>")
def custom_static(filename):
return send_from_directory(basedir + 'resources/', filename)
@app.route("/<path:page>")
def show_page(page):
return send_from_directory(basedir,'{}.html'.format(page))
if __name__ == "__main__":
if len(sys.argv) > 1:
lang = sys.argv[1]
basedir = 'output/{lang}/'.format(lang=lang)
app.run(host='0.0.0.0', debug=True)
|
# Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""tests the filter cache file parsing and report generation"""
import unittest
import types
from pysper import parser, dates
from pysper.search.filtercache import generate_recommendations, calculate_eviction_stats
def _build_node(name, avg_evict_freq=10.0, avg_evict_duration=500.0):
node = types.SimpleNamespace()
node.name = name
node.last_evict_item_limit = 64000
node.byte_limit = 10
node.item_limit = 5
node.perc_item_limit = 0.95
node.avg_evict_duration = avg_evict_duration
node.avg_evict_freq = avg_evict_freq
return node
class TestFilterCache(unittest.TestCase):
"""test filter cache"""
def test_generate_recommendations_with_2_recs(self):
"""should note which one is most important"""
node_info = [
_build_node("node1", avg_evict_freq=10.0),
_build_node("node2", avg_evict_freq=5.1),
_build_node("node21", avg_evict_freq=5.1),
_build_node("node22", avg_evict_freq=5.1),
_build_node("node23", avg_evict_freq=5.1),
_build_node("node3", avg_evict_freq=500.0, avg_evict_duration=2120.1),
]
report = []
generate_recommendations(report, node_info)
output = "\n".join(report)
self.assertEqual(
"""recommendations
---------------
NOTE: Do top recommendation first.
* affects nodes: node1, node2, node21
node22, node23
reason: Filter cache evictions are happening too frequently.
fix: Raise filter cache item limit from 64000 to 256000 via -Dsolr.solrfiltercache.maxSize.
* affects nodes: node3
reason: Filter cache eviction duration is too long.
fix: Lower filter cache item limit from 64000 to 32000 via -Dsolr.solrfiltercache.maxSize.
""",
output,
)
def test_generate_recommmendations_with_no_recs(self):
"""list no recommendations"""
node_info = [
_build_node("node1", avg_evict_freq=110.0),
_build_node("node2", avg_evict_freq=55.1),
_build_node("node3", avg_evict_freq=50, avg_evict_duration=20),
]
report = []
generate_recommendations(report, node_info)
self.assertEqual(report[0], "recommendations")
self.assertEqual(report[1], "---------------")
self.assertIn("No recommendations\n", report)
self.assertNotIn("NOTE: Do top recommendation first.", report)
def test_generate_report_with_1_rec(self):
"""show recommendation"""
node_info = [
_build_node("node1", avg_evict_freq=1.0),
_build_node("node2", avg_evict_freq=50.1),
_build_node("node3", avg_evict_freq=50, avg_evict_duration=10),
]
report = []
generate_recommendations(report, node_info)
self.assertEqual(report[0], "recommendations")
self.assertEqual(report[1], "---------------")
self.assertIn(
"reason: Filter cache evictions are happening too frequently.", report[2]
)
self.assertIn(
"fix: Raise filter cache item limit from 64000 to 256000 via -Dsolr.solrfiltercache.maxSize.",
report[2],
)
self.assertNotIn("NOTE: Do top recommendation first.", report)
def test_calculate_eviction_stats(self):
lines = [
"INFO [RemoteMessageServer query worker - 81] 2020-01-21 11:34:33,033 SolrFilterCache.java:340 - Filter cache org.apache.solr.search.SolrFilterCache$1@7c723229 has reached 8000000 entries of a maximum of 8000000. Evicting oldest entries...",
"ERROR [RemoteMessageServer query worker - 18] 2020-01-21 11:34:34,475 MessageServer.java:277 - Failed to process request:",
"INFO [RemoteMessageServer query worker - 81] 2020-01-21 11:34:35,448 SolrFilterCache.java:356 - ...eviction completed in 1304 milliseconds. Filter cache org.apache.solr.search.SolrFilterCache$1@7c723229 usage is now 32441266 bytes across 4000000 entries.",
"INFO [LocalMessageServer query worker - 77] 2020-01-21 12:24:23,912 SolrFilterCache.java:340 - Filter cache org.apache.solr.search.SolrFilterCache$1@324b2c16 has reached 3999974 entries of a maximum of 8000000. Evicting oldest entries...",
"INFO [LocalMessageServer query worker - 77] 2020-01-21 12:24:23,912 SolrFilterCache.java:356 - ...eviction completed in 1 milliseconds. Filter cache org.apache.solr.search.SolrFilterCache$1@324b2c16 usage is now 32005744 bytes across 3999962 entries.",
"INFO [RemoteMessageServer query worker - 41] 2020-01-21 12:47:26,942 SolrFilterCache.java:311 - Filter cache org.apache.solr.search.SolrFilterCache$6@5af917a4 has reached 16 GB bytes of off-heap memory usage, the maximum is 16 GB. Evicting oldest entries...",
"INFO [RemoteMessageServer query worker - 41] 2020-01-21 12:47:26,950 SolrFilterCache.java:328 - ...eviction completed in 9 milliseconds. Filter cache org.apache.solr.search.SolrFilterCache$6@5af917a4 usage is now 114781220 across 159 entries.",
# new version of logs, after DSP-18693
"INFO [RemoteMessageServer query worker - 41] 2020-01-21 12:47:26,942 SolrFilterCache.java:311 - Filter cache org.apache.solr.search.SolrFilterCache$6@5af917b6 has reached 16 GB bytes of off-heap memory usage, the maximum is 16 GB. Evicting oldest entries...",
"INFO [RemoteMessageServer query worker - 41] 2020-01-21 12:47:26,950 SolrFilterCache.java:328 - ...eviction completed in 8 milliseconds. Filter cache org.apache.solr.search.SolrFilterCache$6@5af917b6 usage is now 114781220 bytes across 159 entries.",
# eviction event without duration log line
"INFO [RemoteMessageServer query worker - 41] 2020-01-21 12:47:26,970 SolrFilterCache.java:311 - Filter cache org.apache.solr.search.SolrFilterCache$6@5af917c7 has reached 16 GB bytes of off-heap memory usage, the maximum is 16 GB. Evicting oldest entries...",
]
raw_events = parser.read_system_log(lines)
after_time = dates.date_parse("2020-01-21 00:00:00,000")
before_time = dates.date_parse("2020-02-21 00:00:00,000")
item_ev_stats, bytes_ev_stats = calculate_eviction_stats(
raw_events, after_time, before_time
)
assert len(item_ev_stats.values()) == 2
assert sum([s.duration for s in item_ev_stats.values()]) == 1304 + 1
assert len(bytes_ev_stats.values()) == 3
assert sum([s.duration for s in bytes_ev_stats.values()]) == 9 + 8 + 0
|
#!/usr/bin/env python3
import sys
def main():
h = 0
d = 0
d2 = 0
a = 0
with open(sys.argv[1], 'r') as fhan:
for line in fhan.readlines():
[move, delta] = line.split(' ')
if move == 'forward':
h += int(delta)
d2 += a * int(delta)
elif move == 'down':
d += int(delta)
a += int(delta)
elif move == 'up':
d -= int(delta)
a -= int(delta)
else:
print('Unexpected input:', line)
print('Part 1:', h * d)
print('Part 2:', h * d2)
if __name__ == '__main__':
main()
|
"""This problem was asked by Microsoft.
Implement the singleton pattern with a twist. First, instead of storing one instance, store two instances.
And in every even call of getInstance(), return the first instance and in every odd call of getInstance(),
return the second instance.
"""
class Signleton():
def __init__(self):
self.instance_one = None
self.instance_two = None
self.counter = 0
def get_instance():
if self.counter == 0:
self.instance_one = Signleton()
self.instance_two = Signleton()
self.counter += 1
if self.counter %2 == 0:
return self.instance_one
else:
return self.instance_two |
"""
Server layout:
~/services/
This contains two subfolders
/apache/
/supervisor/
which hold the configurations for these applications
for each environment (staging, demo, etc) running on the server.
Theses folders are included in the global /etc/apache2 and
/etc/supervisor configurations.
~/www/
This folder contains the code, python environment, and logs
for each environment (staging, demo, etc) running on the server.
Each environment has its own subfolder named for its evironment
(i.e. ~/www/staging/logs and ~/www/demo/logs).
"""
import pdb
import uuid
from fabric.context_managers import settings, cd
from fabric.decorators import hosts
from fabric.operations import require, local, prompt
import os, sys
from fabric.api import run, roles, execute, task, sudo, env, serial, parallel
from fabric.contrib import files, console
from fabric import utils
import posixpath
from collections import defaultdict
PROJECT_ROOT = os.path.dirname(__file__)
RSYNC_EXCLUDE = (
'.DS_Store',
'.git',
'*.pyc',
'*.example',
'*.db',
)
env.project = 'commcare-hq'
env.code_repo = 'git://github.com/dimagi/commcare-hq.git'
env.home = "/home/cchq"
env.selenium_url = 'http://jenkins.dimagi.com/job/commcare-hq-post-deploy/buildWithParameters?token=%(token)s&TARGET=%(environment)s'
env.roledefs = {
'django_celery': [],
'django_app': [],
'django_public': [],
'django_pillowtop': [], #for now combined with celery
'django_monolith': [], # all of the above config - use this ONLY for single server config, lest deploy() will run multiple times in parallel causing bad contentions
'formsplayer': [],
'staticfiles': [],
'remote_es': [], #remote elasticsearch ssh tunnel config
#package level configs that are not quite config'ed yet in this fabfile
'couch': [],
'pg': [],
'rabbitmq': [],
'lb': [],
'deploy': [], #a placeholder to ensure deploy only runs once on a bogus, non functioning task, to split out the real tasks in the execute() block
}
@task
def _setup_path():
# using posixpath to ensure unix style slashes. See bug-ticket: http://code.fabfile.org/attachments/61/posixpath.patch
env.root = posixpath.join(env.home, 'www', env.environment)
env.log_dir = posixpath.join(env.home, 'www', env.environment, 'log')
env.code_root = posixpath.join(env.root, 'code_root')
env.code_root_preindex = posixpath.join(env.root, 'code_root_preindex')
env.project_root = posixpath.join(env.code_root, env.project)
env.project_media = posixpath.join(env.code_root, 'media')
env.virtualenv_root = posixpath.join(env.root, 'python_env')
env.virtualenv_root_preindex = posixpath.join(env.root, 'python_env_preindex')
env.services = posixpath.join(env.home, 'services')
@task
def _set_apache_user():
if what_os() == 'ubuntu':
env.apache_user = 'www-data'
elif what_os() == 'redhat':
env.apache_user = 'apache'
@roles('lb')
def setup_apache_dirs():
sudo('mkdir -p %(services)s/apache' % env, user=env.sudo_user)
@roles('django_celery', 'django_app', 'staticfiles') #'django_public','formsplayer','staticfiles'
def setup_dirs():
""" create (if necessary) and make writable uploaded media, log, etc. directories """
sudo('mkdir -p %(log_dir)s' % env, user=env.sudo_user)
sudo('chmod a+w %(log_dir)s' % env, user=env.sudo_user)
sudo('mkdir -p %(services)s/supervisor' % env, user=env.sudo_user)
#execute(setup_apache_dirs)
@task
def staging():
""" use staging environment on remote host"""
env.code_branch = 'develop'
env.sudo_user = 'commcare-hq'
env.environment = 'staging'
env.server_port = '9002'
env.server_name = 'noneset'
env.hosts = ['192.168.56.1']
env.settings = '%(project)s.localsettings' % env
env.host_os_map = None
env.db = '%s_%s' % (env.project, env.environment)
_setup_path()
env.user = prompt("Username: ", default='dimagivm')
env.make_bootstrap_command = 'python manage.py make_bootstrap direct-lessc'
@task
def india():
"""Our production server in India."""
env.home = '/home/commcarehq/'
env.root = root = '/home/commcarehq'
env.environment = 'india'
env.log_root = posixpath.join(root, 'log')
env.code_branch = 'master'
env.sudo_user = 'commcarehq'
env.hosts = ['220.226.209.82']
env.user = prompt("Username: ", default=env.user)
env.service_manager = "supervisor"
env.make_bootstrap_command = 'python manage.py make_bootstrap'
env.server_port = '8001'
_setup_path()
env.virtualenv_root = posixpath.join(root, '.virtualenvs/commcarehq')
env.virtualenv_root_preindex = posixpath.join(root, '.virtualenvs/commcarehq_preindex')
env.roledefs = {
'couch': [],
'pg': [],
'rabbitmq': [],
'sofabed': [],
'django_celery': [],
'django_app': [],
'django_public': [],
'django_pillowtop': [],
'formsplayer': [],
'remote_es': [],
'staticfiles': [],
'lb': [],
'deploy': [],
'django_monolith': ['220.226.209.82'],
}
env.jython_home = '/usr/local/lib/jython'
env.roles = ['django_monolith']
@task
def production():
""" use production environment on remote host"""
env.code_branch = 'master'
env.sudo_user = 'cchq'
env.environment = 'production'
env.server_port = '9010'
#env.hosts = None
env.roledefs = {
'couch': ['hqdb.internal.commcarehq.org'],
'pg': ['hqdb.internal.commcarehq.org'],
'rabbitmq': ['hqdb.internal.commcarehq.org'],
'sofabed': ['hqdb.internal.commcarehq.org'], #todo, right now group it with celery
'django_celery': ['hqdb.internal.commcarehq.org'],
'django_app': ['hqdjango0.internal.commcarehq.org', 'hqdjango2.internal.commcarehq.org'],
'django_public': ['hqdjango1.internal.commcarehq.org',],
'django_pillowtop': ['hqdb.internal.commcarehq.org'],
'remote_es': ['hqdb.internal.commcarehq.org', 'hqdjango0.internal.commcarehq.org',
'hqdjango1.internal.commcarehq.org', 'hqdjango2.internal.commcarehq.org'],
'formsplayer': ['hqdjango0.internal.commcarehq.org'],
'lb': [], #todo on apache level config
'staticfiles': ['hqproxy0.internal.commcarehq.org'],
'deploy': ['hqdb.internal.commcarehq.org'], #this is a stub becuaue we don't want to be prompted for a host or run deploy too many times
'django_monolith': [] # fab complains if this doesn't exist
}
env.server_name = 'commcare-hq-production'
env.settings = '%(project)s.localsettings' % env
env.host_os_map = None # e.g. 'ubuntu' or 'redhat'. Gets autopopulated by what_os() if you don't know what it is or don't want to specify.
env.db = '%s_%s' % (env.project, env.environment)
env.roles = ['deploy', ]
env.jython_home = '/usr/local/lib/jython'
_setup_path()
@task
def install_packages():
"""Install packages, given a list of package names"""
require('environment', provided_by=('staging', 'production'))
packages_list = ''
installer_command = ''
if what_os() == 'ubuntu':
packages_list = 'apt-packages.txt'
installer_command = 'apt-get install -y'
elif what_os() == 'redhat':
packages_list = 'yum-packages.txt'
installer_command = 'yum install -y'
return
packages_file = posixpath.join(PROJECT_ROOT, 'requirements', packages_list)
with open(packages_file) as f:
packages = f.readlines()
sudo("%s %s" % (installer_command, " ".join(map(lambda x: x.strip('\n\r'), packages))))
@task
@roles('django_app','django_celery','staticfiles')
@parallel
def upgrade_packages():
"""
Bring all the installed packages up to date.
This is a bad idea in RedHat as it can lead to an
OS Upgrade (e.g RHEL 5.1 to RHEL 6).
Should be avoided. Run install packages instead.
"""
require('environment', provided_by=('staging', 'production'))
if what_os() == 'ubuntu':
sudo("apt-get update", shell=False)
sudo("apt-get upgrade -y", shell=False)
else:
return #disabled for RedHat (see docstring)
@task
def what_os():
with settings(warn_only=True):
require('environment', provided_by=('staging','production'))
if env.host_os_map is None:
#prior use case of setting a env.remote_os did not work when doing multiple hosts with different os! Need to keep state per host!
env.host_os_map = defaultdict(lambda: '')
if env.host_os_map[env.host_string] == '':
print 'Testing operating system type...'
if(files.exists('/etc/lsb-release',verbose=True) and files.contains(text='DISTRIB_ID=Ubuntu', filename='/etc/lsb-release')):
remote_os = 'ubuntu'
print 'Found lsb-release and contains "DISTRIB_ID=Ubuntu", this is an Ubuntu System.'
elif(files.exists('/etc/redhat-release',verbose=True)):
remote_os = 'redhat'
print 'Found /etc/redhat-release, this is a RedHat system.'
else:
print 'System OS not recognized! Aborting.'
exit()
env.host_os_map[env.host_string] = remote_os
return env.host_os_map[env.host_string]
#@parallel
@roles('pg','django_celery','django_app','staticfiles', 'django_monolith')
@task
def setup_server():
"""Set up a server for the first time in preparation for deployments."""
require('environment', provided_by=('staging', 'production', 'india'))
# Install required system packages for deployment, plus some extras
# Install pip, and use it to install virtualenv
install_packages()
sudo("easy_install -U pip", user=env.sudo_user)
sudo("pip install -U virtualenv", user=env.sudo_user)
upgrade_packages()
execute(create_db_user)
execute(create_db)
@roles('pg')
def create_db_user():
"""Create the Postgres user."""
require('environment', provided_by=('staging', 'production'))
sudo('createuser -D -A -R %(sudo_user)s' % env, user='postgres')
@roles('pg')
def create_db():
"""Create the Postgres database."""
require('environment', provided_by=('staging', 'production'))
sudo('createdb -O %(sudo_user)s %(db)s' % env, user='postgres')
@task
def bootstrap():
"""Initialize remote host environment (virtualenv, deploy, update) """
require('root', provided_by=('staging', 'production'))
sudo('mkdir -p %(root)s' % env, shell=False, user=env.sudo_user)
execute(clone_repo)
update_code()
execute(create_virtualenv)
execute(update_env)
execute(setup_dirs)
execute(generate_supervisorconf_file)
execute(fix_locale_perms)
#@parallel
@roles('django_celery', 'django_app', 'staticfiles', 'django_monolith') #'django_public','formsplayer'
def create_virtualenv():
""" setup virtualenv on remote host """
require('virtualenv_root', provided_by=('staging', 'production', 'india'))
with settings(warn_only=True):
sudo('rm -rf %(virtualenv_root)s' % env, user=env.sudo_user)
args = '--clear --distribute --no-site-packages'
sudo('virtualenv %s %s' % (args, env.virtualenv_root), user=env.sudo_user)
#@parallel
@roles('django_celery', 'django_app', 'staticfiles', 'django_monolith') #'django_public', 'formsplayer'
def clone_repo():
""" clone a new copy of the git repository """
with settings(warn_only=True):
with cd(env.root):
if not files.exists(env.code_root):
sudo('git clone %(code_repo)s %(code_root)s' % env, user=env.sudo_user)
with cd(env.code_root):
sudo('git submodule init', user=env.sudo_user)
@task
@roles('pg', 'django_monolith')
def preindex_views():
with cd(env.code_root_preindex):
#update the codebase of the preindex dir...
update_code(preindex=True)
update_env(preindex=True) #no update to env - the actual deploy will do - this may break if a new dependency is introduced in preindex
sudo('echo "%(virtualenv_root_preindex)s/bin/python %(code_root_preindex)s/manage.py \
sync_prepare_couchdb_multi 8 %(user)s" | at -t `date -d "5 seconds" \
+%%m%%d%%H%%M.%%S`' % env, user=env.sudo_user)
@roles('django_app','django_celery', 'staticfiles', 'django_public', 'django_monolith')#,'formsplayer')
@parallel
def update_code(preindex=False):
if preindex:
root_to_use = env.code_root_preindex
else:
root_to_use = env.code_root
with cd(root_to_use):
sudo('git checkout %(code_branch)s' % env, user=env.sudo_user)
sudo('git pull', user=env.sudo_user)
sudo('git submodule sync', user=env.sudo_user)
sudo('git submodule update --init --recursive', user=env.sudo_user)
@task
def deploy():
""" deploy code to remote host by checking out the latest via git """
if not console.confirm('Are you sure you want to deploy {env.environment}?'.format(env=env), default=False) or \
not console.confirm('Did you run "fab {env.environment} preindex_views"? '.format(env=env), default=False):
utils.abort('Deployment aborted.')
require('root', provided_by=('staging', 'production', 'india'))
run('echo ping!') #hack/workaround for delayed console response
try:
execute(update_code)
execute(update_env)
execute(clear_services_dir)
upload_and_set_supervisor_config()
execute(migrate)
execute(_do_collectstatic)
execute(version_static)
finally:
# hopefully bring the server back to life if anything goes wrong
execute(services_restart)
@task
@roles('django_app','django_celery','staticfiles', 'django_public', 'django_monolith')#,'formsplayer')
@parallel
def update_env(preindex=False):
""" update external dependencies on remote host assumes you've done a code update"""
require('code_root', provided_by=('staging', 'production', 'india'))
if preindex:
root_to_use = env.code_root_preindex
env_to_use = env.virtualenv_root_preindex
else:
root_to_use = env.code_root
env_to_use = env.virtualenv_root
requirements = posixpath.join(env.code_root, 'requirements')
with cd(root_to_use):
cmd = ['%s/bin/pip install' % env_to_use]
cmd += ['--requirement %s' % posixpath.join(requirements, 'prod-requirements.txt')]
cmd += ['--requirement %s' % posixpath.join(requirements, 'requirements.txt')]
sudo(' '.join(cmd), user=env.sudo_user)
@roles('lb')
def touch_apache():
"""Touch apache and supervisor conf files to trigger reload. Also calls supervisorctl update
to load latest supervisor.conf """
require('code_root', provided_by=('staging', 'production'))
apache_path = posixpath.join(posixpath.join(env.services, 'apache'), 'apache.conf')
sudo('touch %s' % apache_path, user=env.sudo_user)
@roles('django_celery', 'django_app', 'django_monolith')
def touch_supervisor():
""" touch apache and supervisor conf files to trigger reload. Also calls supervisorctl update
to load latest supervisor.conf """
require('code_root', provided_by=('staging', 'production'))
supervisor_path = posixpath.join(posixpath.join(env.services, 'supervisor'), 'supervisor.conf')
sudo('touch %s' % supervisor_path, user=env.sudo_user)
_supervisor_command('update')
@roles('django_app', 'django_celery', 'django_public', 'django_monolith')# 'formsplayer')
@parallel
def clear_services_dir():
#remove old confs from directory first
services_dir = posixpath.join(env.services, u'supervisor', 'supervisor_*.conf')
sudo('rm -f %s' % services_dir, user=env.sudo_user)
@roles('lb')
def configtest():
""" test Apache configuration """
require('root', provided_by=('staging', 'production'))
sudo('apache2ctl configtest')
@roles('lb')
def apache_reload():
""" reload Apache on remote host """
require('root', provided_by=('staging', 'production'))
if what_os() == 'redhat':
sudo('/etc/init.d/httpd reload')
elif what_os() == 'ubuntu':
sudo('/etc/init.d/apache2 reload')
@roles('lb')
def apache_restart():
""" restart Apache on remote host """
require('root', provided_by=('staging', 'production'))
sudo('/etc/init.d/apache2 restart')
@task
def netstat_plnt():
""" run netstat -plnt on a remote host """
require('hosts', provided_by=('production', 'staging'))
sudo('netstat -plnt')
############################################################3
#Start service functions
@roles('django_app', 'django_celery','django_public','django_monolith')# 'formsplayer'
def services_start():
''' Start the gunicorn servers '''
require('environment', provided_by=('staging', 'demo', 'production'))
_supervisor_command('update')
_supervisor_command('reload')
_supervisor_command('start all')
######################################################
########################################################
#Stop service Functions
@roles('django_app', 'django_celery','django_public', 'django_monolith')#, 'formsplayer')
def services_stop():
''' Stop the gunicorn servers '''
require('environment', provided_by=('staging', 'demo', 'production'))
_supervisor_command('stop all')
###########################################################
@roles('django_app', 'django_celery','django_public', 'django_monolith')#, 'formsplayer')
def services_restart():
''' Stop and restart all supervisord services'''
require('environment', provided_by=('staging', 'demo', 'production', 'india'))
_supervisor_command('stop all')
_supervisor_command('update')
_supervisor_command('reload')
_supervisor_command('start all')
#
@roles('django_celery','django_monolith')
def migrate():
""" run south migration on remote environment """
require('code_root', provided_by=('production', 'demo', 'staging', "india"))
with cd(env.code_root):
sudo('%(virtualenv_root)s/bin/python manage.py sync_finish_couchdb' % env, user=env.sudo_user)
sudo('%(virtualenv_root)s/bin/python manage.py syncdb --noinput' % env, user=env.sudo_user)
sudo('%(virtualenv_root)s/bin/python manage.py migrate --noinput' % env, user=env.sudo_user)
@roles('staticfiles', 'django_monolith')
def _do_collectstatic():
"""
Collect static after a code update
"""
with cd(env.code_root):
sudo('%(virtualenv_root)s/bin/python manage.py make_bootstrap' % env, user=env.sudo_user)
sudo('%(virtualenv_root)s/bin/python manage.py collectstatic --noinput' % env, user=env.sudo_user)
@roles('django_app', 'django_monolith')
@parallel
def version_static():
"""
Put refs on all static references to prevent stale browser cache hits when things change.
This needs to be run on the WEB WORKER since the web worker governs the actual static
reference.
"""
with cd(env.code_root):
sudo('rm -f tmp.sh resource_versions.py; %(virtualenv_root)s/bin/python manage.py \
printstatic > tmp.sh; bash tmp.sh > resource_versions.py' % env, user=env.sudo_user)
@task
@roles('staticfiles',)
def collectstatic():
""" run collectstatic on remote environment """
require('code_root', provided_by=('production', 'demo', 'staging'))
update_code()
_do_collectstatic()
@task
def reset_local_db():
""" Reset local database from remote host """
require('code_root', provided_by=('production', 'staging'))
if env.environment == 'production':
utils.abort('Local DB reset is for staging environment only')
question = 'Are you sure you want to reset your local '\
'database with the %(environment)s database?' % env
sys.path.append('.')
if not console.confirm(question, default=False):
utils.abort('Local database reset aborted.')
local_db = loc['default']['NAME']
remote_db = remote['default']['NAME']
with settings(warn_only=True):
local('dropdb %s' % local_db)
local('createdb %s' % local_db)
host = '%s@%s' % (env.user, env.hosts[0])
local('ssh -C %s sudo -u commcare-hq pg_dump -Ox %s | psql %s' % (host, remote_db, local_db))
@task
def fix_locale_perms():
""" Fix the permissions on the locale directory """
require('root', provided_by=('staging', 'production'))
_set_apache_user()
locale_dir = '%s/commcare-hq/locale/' % env.code_root
sudo('chown -R %s %s' % (env.sudo_user, locale_dir), user=env.sudo_user)
sudo('chgrp -R %s %s' % (env.apache_user, locale_dir), user=env.sudo_user)
sudo('chmod -R g+w %s' % (locale_dir), user=env.sudo_user)
@task
def commit_locale_changes():
""" Commit locale changes on the remote server and pull them in locally """
fix_locale_perms()
with cd(env.code_root):
sudo('-H -u %s git add commcare-hq/locale' % env.sudo_user, user=env.sudo_user)
sudo('-H -u %s git commit -m "updating translation"' % env.sudo_user, user=env.sudo_user)
local('git pull ssh://%s%s' % (env.host, env.code_root))
def _upload_supervisor_conf_file(filename):
upload_dict = {}
upload_dict["template"] = posixpath.join(os.path.dirname(__file__), 'services', 'templates', filename)
upload_dict["destination"] = '/tmp/%s.blah' % filename
upload_dict["enabled"] = posixpath.join(env.services, u'supervisor/%s' % filename)
upload_dict["main_supervisor_conf_dir"] = '/etc'
files.upload_template(upload_dict["template"], upload_dict["destination"], context=env, use_sudo=False)
sudo('chown -R %s %s' % (env.sudo_user, upload_dict["destination"]), shell=False)
#sudo('chgrp -R %s %s' % (env.apache_user, upload_dict["destination"]))
sudo('chmod -R g+w %(destination)s' % upload_dict, shell=False)
sudo('mv -f %(destination)s %(enabled)s' % upload_dict, shell=False)
@roles('django_celery', 'django_monolith')
def upload_celery_supervisorconf():
_upload_supervisor_conf_file('supervisor_celery.conf')
_upload_supervisor_conf_file('supervisor_celerybeat.conf')
_upload_supervisor_conf_file('supervisor_celerymon.conf')
_upload_supervisor_conf_file('supervisor_couchdb_lucene.conf') #to be deprecated
#in reality this also should be another machine if the number of listeners gets too high
_upload_supervisor_conf_file('supervisor_pillowtop.conf')
@roles('django_celery', 'django_monolith')
def upload_sofabed_supervisorconf():
_upload_supervisor_conf_file('supervisor_sofabed.conf')
@roles('django_app', 'django_monolith')
def upload_djangoapp_supervisorconf():
_upload_supervisor_conf_file('supervisor_django.conf')
@roles('remote_es')
def upload_elasticsearch_supervisorconf():
_upload_supervisor_conf_file('supervisor_elasticsearch.conf')
@roles('django_public')
def upload_django_public_supervisorconf():
_upload_supervisor_conf_file('supervisor_django_public.conf')
_upload_supervisor_conf_file('supervisor_sync_domains.conf')
@roles('formsplayer', 'django_monolith')
def upload_formsplayer_supervisorconf():
_upload_supervisor_conf_file('supervisor_formsplayer.conf')
def upload_and_set_supervisor_config():
"""Upload and link Supervisor configuration from the template."""
require('environment', provided_by=('staging', 'demo', 'production', 'india'))
execute(upload_celery_supervisorconf)
execute(upload_sofabed_supervisorconf)
execute(upload_djangoapp_supervisorconf)
execute(upload_elasticsearch_supervisorconf)
execute(upload_django_public_supervisorconf)
execute(upload_formsplayer_supervisorconf)
#generate_supervisorconf_file()
def generate_supervisorconf_file():
#regenerate a brand new supervisor conf file from scratch.
#Set the line in the supervisord config file that points to our project's supervisor directory with conf files
replace_dict = {}
replace_dict["main_supervisor_conf_dir"] = '/etc'
replace_dict["tmp"] = posixpath.join('/','tmp', "supervisord_%s.tmp" % uuid.uuid4().hex)
#create brand new one
sudo("echo_supervisord_conf > %(tmp)s" % replace_dict)
files.uncomment(replace_dict['tmp'], "^;\[include\]", use_sudo=True, char=';')
files.sed(replace_dict["tmp"], ";files = relative/directory/\*\.ini", "files = %s/supervisor/*.conf" % env.services, use_sudo=True)
#sudo('mv -f %(tmp)s %(main_supervisor_conf_dir)s/supervisord.conf' % replace_dict)
sudo('cp -f %(tmp)s %(main_supervisor_conf_dir)s/supervisord.conf' % replace_dict)
def _supervisor_command(command):
require('hosts', provided_by=('staging', 'production'))
#if what_os() == 'redhat':
#cmd_exec = "/usr/bin/supervisorctl"
#elif what_os() == 'ubuntu':
#cmd_exec = "/usr/local/bin/supervisorctl"
sudo('supervisorctl %s' % (command), shell=False)
# tests
@task
def selenium_test():
require('environment', provided_by=('staging', 'demo', 'production', 'india'))
prompt("Jenkins username:", key="jenkins_user", default="selenium")
prompt("Jenkins password:", key="jenkins_password")
url = env.selenium_url % {"token": "foobar", "environment": env.environment}
local("curl --user %(user)s:%(pass)s '%(url)s'" % \
{'user': env.jenkins_user, 'pass': env.jenkins_password, 'url': url})
|
from setuptools import setup
setup(
name='datetime-interval',
version='0.2',
description='A representation of a duration of time',
long_description=open('README.rst').read(),
author='Kyle Marek-Spartz',
author_email='kyle.marek.spartz@gmail.com',
url='https://www.github.com/zeckalpha/datetime-interval',
include_package_data=True,
packages=['datetime_interval'],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"
],
license='MIT'
)
|
"""
Returns a NSGP model object.
"""
import GPy
import tqdm
from _core import *
import itertools
import time
from pyDOE import *
import scipy
from scipy.stats import multivariate_normal
from scipy.stats import norm
from scipy.optimize import check_grad
__all__ = ['NSGPModel']
class NSGPModel(object):
"""
NSGP model object.
"""
def __init__(self, X, Y, X_m, X_val, Y_val,
lat_model_kern=GPy.kern.Matern32,
num_lat_points=5,
l_params_bounds=None,
nugget=1e-3,
jitter=1e-4,
num_designs_l_hyp=50,
num_opt_restarts=20,
opt_bounds=None):
assert X.ndim == 2
self.X = X
assert Y.ndim == 2
self.Y = Y
assert self.X.shape[0] == self.Y.shape[0]
if X_val is not None:
self.X_val = X_val
self.Y_val = Y_val
else:
self.X_val = X
self.Y_val = Y
self.dim = self.X.shape[1]
self.num_obj = self.Y.shape[1]
self.X_m = X_m
self.num_lat_points = self.X_m.shape[0]
if l_params_bounds is not None:
self.l_params_bounds = l_params_bounds
else:
self.l_params_bounds = [(1e-4, 1e-6), # noise variance of latent GP
(0.0, 1.), # signal strength of latent GP
(0.0, 1.)]
self.nugget = nugget
self.jitter = jitter
self.lat_model_kern = lat_model_kern
self.num_opt_restarts = num_opt_restarts
self.num_designs_l_hyp = num_designs_l_hyp
self.opt_bounds = opt_bounds
self._sigma_l = 1e-3
self._ss_l = 1.
self._ell_l = 0.3
self._lengthscale_factor = 0.5
self._nugget_factor = 0.5
self._signalstrength_factor = 1.
self.A_inv = None
def kern_mat(self, xi, xj):
"""
Computes an ```nxn``` matrix whose elements are the RBF kernel based values for the two
input arrays. This is the prior covariance.
:param xi: array of input(s)
:param xj: array of input(s)
"""
k = self.model[0].kern.K(xi, xj)
return k
def get_dist_mat(self, X_i, X_j=None):
"""
:param X_i: array of inputs
:param X_j: array of an input
"""
if X_j is None:
return scipy.spatial.distance.cdist(X_i, X_i) ** 2
else:
return scipy.spatial.distance.cdist(X_i, X_j) ** 2
def make_model_l(self, l_lat=None, l_gp_params=None):
"""
A stationary GP model for the lengthscale process.
:param l_lat: latent-lengthscale(s) GP's training data
:param sigma_l: noise variance of the lengthscale GP
:param ss_l: signal-strength i.e. s, of the lengthscale GP
:param ell_l: lengthscale of the lengthscale GP
"""
lat_m = GPy.models.GPRegression(self.X_m, np.atleast_2d(l_lat), self.lat_model_kern(input_dim=self.dim, ARD=True))
lat_m.kern.lengthscale.fix(l_gp_params[2:], warning=False)
lat_m.kern.variance.fix(l_gp_params[1] ** 2., warning=False)
lat_m.likelihood.variance.fix(l_gp_params[0] ** 2., warning=False)
return lat_m
def make_model_n(self, n_lat=None, n_gp_params=None):
"""
A stationary GP model for the lengthscale process.
:param l_lat: latent-lengthscale(s) GP's training data
:param sigma_l: noise variance of the lengthscale GP
:param ss_l: signal-strength i.e. s, of the lengthscale GP
:param ell_l: lengthscale of the lengthscale GP
"""
lat_m = GPy.models.GPRegression(self.X_m, np.atleast_2d(n_lat), self.lat_model_kern(input_dim=self.dim, ARD=True))
lat_m.kern.lengthscale.fix(n_gp_params[2:], warning=False)
lat_m.kern.variance.fix(n_gp_params[1] ** 2., warning=False)
lat_m.likelihood.variance.fix(n_gp_params[0] ** 2., warning=False)
return lat_m
def grad_log_obj(self, params, l_gp_params, n_gp_params):
"""
Gradients of the objective function wrt. the parameters ```l_bar```,
and the hyper-parameters of the non-stationary GP (excluding the
hyper-parameters of the latent-lengthscale process).
:param params: parameters to be calibrated in the inner loop
:param sigma_l: noise variance of the lengthscale GP
:param ss_l: signal-strength of the lengthscale GP
:param ell_l: lengthscale of the lengthscale GP
"""
grads_l = []
grads_sigma_f = []
assert len(params) == 2 * self.num_lat_points + 1
l_lat = params[:self.num_lat_points]
n_lat = params[self.num_lat_points:2 * self.num_lat_points]
ss_f = params[-1]
self.model_l = self.make_model_l(l_lat=l_lat[:, None], l_gp_params=l_gp_params)
self.model_n = self.make_model_n(n_lat=n_lat[:, None], n_gp_params=n_gp_params)
l = np.exp(self.model_l.predict(self.X)[0])
n = np.exp(self.model_n.predict(self.X)[0])
assert len(l) == len(n)
p = l ** 2.
p_r = np.matmul(p, np.ones((1, len(p))))
p_c = np.matmul(np.ones((1, len(p))).T, p.T)
P_r = np.matmul(p, np.ones((1, len(p)))) ** (1. * self.dim)
P_c = np.matmul(np.ones((1, len(p))).T, p.T) ** (1. * self.dim)
P_s = (p_r + p_c) ** (1. * self.dim)
P_d = p_r + p_c
s_X = self.get_dist_mat(self.X)
E = np.exp(((-1. * s_X) / P_d)) # TODO: Make sure the denominator has ```P_d```. Comment: DONE
K_x_x = (ss_f ** 2.) * (np.sqrt(2.) ** self.dim) * np.multiply(np.multiply(np.multiply((P_r ** (1 / 4.)), (P_c ** (1 / 4.))), (P_s ** (-1 / 2.))), E)
K_x_x_l = self.model_l.kern.K(self.X_m, self.X_m)
K_x_x_n = self.model_n.kern.K(self.X_m, self.X_m)
A = K_x_x + np.multiply(n ** 2, np.eye(n.shape[0]))
B = K_x_x_l + (l_gp_params[0] ** 2.) * np.eye(K_x_x_l.shape[0])
C = K_x_x_n + (n_gp_params[0] ** 2.) * np.eye(K_x_x_n.shape[0])
try:
A_inv = np.linalg.inv(A)
B_inv = np.linalg.inv(B)
C_inv = np.linalg.inv(C)
except:
print ">... trying with a larger jitter"
try:
A_inv = np.linalg.inv(A + self.jitter * np.eye(A.shape[0]))
B_inv = np.linalg.inv(B + self.jitter * np.eye(B.shape[0]))
C_inv = np.linalg.inv(C + self.jitter * np.eye(C.shape[0]))
except:
import pdb
pdb.set_trace()
# A_inv = np.linalg.inv(A)
# B_inv = np.linalg.inv(B) # same as ```self.model_l.posterior.woodbury_inv```
# grad_obj_sigma_f = (2. * sigma_f) * (-1. * np.matmul(np.matmul(self.Y.T, A_inv), np.matmul(A_inv, self.Y)) + np.trace(A_inv)) # gradient wrt the noise variacne of the GP
# The following gradient is different than the one in the paper
grad_obj_ss_f = (2. / ss_f) * (-1. * np.matmul(np.matmul(np.matmul(self.Y.T, A_inv), K_x_x), np.matmul(A_inv, self.Y)) + np.trace(np.matmul(A_inv, K_x_x))) # gradient wrt the signal strength of the GP
# Now we code the gradients of the negative of the objective function wrt. the latent lengthscales
#K_x_X = np.vstack([self.model_l.kern.K(np.atleast_2d(x), self.X_m) for x in self.X]) # nxm to be multiplied with B_inv
K_x_X = self.model_l.kern.K(self.X, self.X_m)
W = np.matmul(K_x_X, B_inv)
M = np.matmul(K_x_X, C_inv)
for j in xrange(len(l_lat)):
# sigma_f_grad_fac = np.ones(len(l_lat))
grad_pr = 2. * np.multiply((W[:, j])[:, None], p_r)
grad_pr_l_j = (self.dim * 2. / 4) * np.multiply((W[:, j])[:, None], (P_r ** (1 / 4.)))
grad_pc_l_j = grad_pr_l_j.T
grad_ps_l_j = -(self.dim * 0.5) * np.multiply((P_d ** (-1 - self.dim / 2.)), (grad_pr + grad_pr.T))
grad_E_l_j = 1. * np.multiply(np.multiply(np.exp(-1. * s_X / (P_d)), s_X / (P_d ** (2))), (grad_pr + grad_pr.T))
grad_k_x_x_l_j = (ss_f ** 2.) * (np.sqrt(2.) ** self.dim) * (np.multiply(np.multiply(grad_pr_l_j, P_c ** (1 / 4.)), np.multiply(P_s ** (-1 / 2.), E)) + np.multiply(np.multiply(P_r ** (1 / 4.), grad_pc_l_j), np.multiply(P_s ** (-1 / 2.), E)) + np.multiply(np.multiply(P_r ** (1 / 4.), P_c ** (1 / 4.)), np.multiply(grad_ps_l_j, E)) + np.multiply(np.multiply(P_r ** (1 / 4.), P_c ** (1 / 4.)), np.multiply(P_s ** (-1 / 2.), grad_E_l_j)))
grad_l_j = -1. * np.matmul(np.matmul(np.matmul(self.Y.T, A_inv), grad_k_x_x_l_j), np.matmul(A_inv, self.Y)) + np.trace(np.matmul(A_inv, grad_k_x_x_l_j))
grads_l.append(grad_l_j[0, 0])
# sigma_f_grad_fac[j] = 2
grad_a_sigma_f_j = np.multiply(2. * np.multiply((M[:, j])[:, None], n ** 2), np.eye(n.shape[0]))
# grad_a_sigma_f_j = np.matmul(sigma_f_grad_fac[:, None], np.multiply(n ** 2, np.eye(n.shape[0])))
grad_obj_sigma_f_j = -1 * np.matmul(np.matmul(np.matmul(self.Y.T, A_inv), grad_a_sigma_f_j), np.matmul(A_inv, self.Y)) + np.trace(np.matmul(A_inv, grad_a_sigma_f_j))
grads_sigma_f.append(grad_obj_sigma_f_j[0, 0])
# return 0.5 * np.hstack([grads_l, grad_obj_sigma_f[0, 0], grad_obj_ss_f[0, 0]])
return 0.5 * np.hstack([grads_l, grads_sigma_f, grad_obj_ss_f[0, 0]])
def log_obj_opt(self, params, l_gp_params, n_gp_params):
"""
The objective function to be optimized wrt. the values in the ```theta``` vector.
:param params: array of latent GP values for lengthscale, noise variance
and scalar value of signal-strength, in that order
:param l_lat: array of supporting lengthscales at the observations
:param sigma_f: noise in the original process
:param ss_f: signal-strength of the original process
:param sigma_l: noise in the latent-lengthscale process
:param ss_l: signal-strength of the latent-lengthscale process
:param ell_l: lengthscale of the latent-lengthscale process
"""
assert len(params) == 2 * self.num_lat_points + 1
l_lat = params[:self.num_lat_points]
n_lat = params[self.num_lat_points:2 * self.num_lat_points]
# sigma_f = params[-2]
ss_f = params[-1]
self.model_l = self.make_model_l(l_lat=l_lat[:, None], l_gp_params=l_gp_params)
self.model_n = self.make_model_n(n_lat=n_lat[:, None], n_gp_params=n_gp_params)
l = np.exp(self.model_l.predict(self.X)[0])
n = np.exp(self.model_n.predict(self.X)[0])
K_x_x = self.cov_func_mat(self.X, self.X, params, l_gp_params)
K_x_x_l = self.model_l.kern.K(self.X_m, self.X_m)
K_x_x_n = self.model_n.kern.K(self.X_m, self.X_m)
A = K_x_x + np.multiply(n ** 2, np.eye(n.shape[0]))
B = K_x_x_l + (l_gp_params[0] ** 2.) * np.eye(K_x_x_l.shape[0])
C = K_x_x_n + (n_gp_params[0] ** 2.) * np.eye(K_x_x_n.shape[0])
try:
A_inv = np.linalg.inv(A)
except:
print "trying with a larger jitter"
try:
A = A + self.jitter * np.eye(A.shape[0])
A_inv = np.linalg.inv(A)
except:
import pdb
pdb.set_trace()
log_obj = 1.5 * (self.X.shape[0] * np.log(2. * np.pi)) + 0.5 * ((np.matmul(np.matmul(self.Y.T, A_inv), self.Y)) + np.log(np.linalg.det(A)) + np.log(np.linalg.det(B)) + np.log(np.linalg.det(C)))
return log_obj
def cov_func_mat(self, Xi, Xj, params, l_gp_params):
"""
Covariance matrix between Xi and Xj.
Note: Currently only for the isotropic lengthscale non-stationary GPs.
:param xi: a vector input(s)
:param xj: a vector input(s)
:param ss_f: signal strength of the process
:param li: lengthscale(s) at ```Xi```
:param lj: lengthscale(s) at ```Xj```
"""
assert len(params) == 2 * self.num_lat_points + 1
l_lat = params[:self.num_lat_points]
ss_f = params[-1]
self.model_l = self.make_model_l(l_lat=l_lat[:, None], l_gp_params=l_gp_params )
li = np.exp(self.model_l.predict(Xi)[0])
lj = np.exp(self.model_l.predict(Xj)[0])
p_r = np.repeat(li ** 2., Xj.shape[0], axis=1)
p_c = np.repeat((lj ** 2.).T, Xi.shape[0], axis=0)
P_r = p_r ** self.dim
P_c = p_c ** self.dim
P_d = p_r + p_c
P_s = P_d ** self.dim
E = np.exp(-1. * self.get_dist_mat(Xi, Xj) / P_d)
K_Xi_Xj = (ss_f ** 2) * (np.sqrt(2.) ** self.dim) * np.multiply(np.multiply(np.multiply((P_r ** (1 / 4.)) , (P_c ** (1 / 4.))), (P_s ** (-1 / 2.))), E)
return K_Xi_Xj
def pred_cov(self, Xi, Xj=None, params=None, include_likelihood=True, l_gp_params=None, n_gp_params=None, A_inv=None):
"""
Computes the predictive covariance b/w ```Xi``` and ```Xj```.
"""
check_noise = False
if Xj is None:
Xj = Xi
check_noise = True
if params is None:
params = self.get_params()['nsgp_params']
if n_gp_params is None:
n_gp_params = self.get_params()['n_gp_params']
if l_gp_params is None:
l_gp_params = self.get_params()['l_gp_params']
l_lat = params[:self.num_lat_points]
n_lat = params[self.num_lat_points:2 * self.num_lat_points]
ss_f = params[-1]
self.model_l = self.make_model_l(l_lat=l_lat[:, None],l_gp_params= l_gp_params)
self.model_n = self.make_model_n(n_lat=n_lat[:, None], n_gp_params= n_gp_params)
K_x_s_x_s = self.cov_func_mat(Xi, Xj, params, l_gp_params)
K_Xi_X = self.cov_func_mat(Xi, self.X, params, l_gp_params)
K_Xj_X = self.cov_func_mat(Xj, self.X, params, l_gp_params)
if A_inv is None:
K_x_x = self.cov_func_mat(self.X, self.X, params, l_gp_params)
n = np.exp(self.model_n.predict(self.X)[0])
A = K_x_x + np.multiply(n ** 2, np.eye(n.shape[0]))
A_inv = np.linalg.inv(A)
# var = K_x_s_x_s - np.matmul(K_x_X, np.matmul(A_inv, K_x_X.T))
var = K_x_s_x_s - np.matmul(K_Xi_X, np.matmul(A_inv, K_Xj_X.T))
if include_likelihood:
if check_noise:
n_pred = np.exp(self.model_n.predict(Xi)[0])
return var + np.multiply(n_pred ** 2, np.eye(n_pred.shape[0]))
else:
return var
def pred_mean_ns(self, X, params, full_cov=False, include_likelihood=True, l_gp_params=None, n_gp_params=None, A_inv=None):
"""
Computes the predictive mean at given input(s) ```X``` given the values of the
hyper-parameters.
"""
l_lat = params[:self.num_lat_points]
n_lat = params[self.num_lat_points:2 * self.num_lat_points]
ss_f = params[-1]
self.model_l = self.make_model_l(l_lat=l_lat[:, None],l_gp_params= l_gp_params)
self.model_n = self.make_model_n(n_lat=n_lat[:, None], n_gp_params= n_gp_params)
K_x_x = self.cov_func_mat(self.X, self.X, params, l_gp_params)
if A_inv is None:
n = np.exp(self.model_n.predict(self.X)[0])
A = K_x_x + np.multiply(n ** 2, np.eye(n.shape[0]))
try:
A_inv = np.linalg.inv(A)
except:
A_inv = np.linalg.inv(A + self.jitter * np.eye(A.shape[0]))
K_x_X = self.cov_func_mat(X, self.X, params, l_gp_params)
mu = np.matmul(K_x_X, np.matmul(A_inv, self.Y))
var = self.pred_cov(X, params=params, include_likelihood=include_likelihood, l_gp_params= l_gp_params, n_gp_params= n_gp_params, A_inv=A_inv)
if full_cov:
return (mu, var)
else:
return (mu, np.diagonal(var)[:, None])
def predict(self, X, full_cov=False, include_likelihood=True):
"""
Predict the output at X. A faster method to ```pred_mean_ns``` as it uses the
A_inv which has been saved off.
"""
model_params = self.get_params()['nsgp_params']
l_gp_params = self.get_params()['l_gp_params']
n_gp_params = self.get_params()['n_gp_params']
preds = self.pred_mean_ns(X=X, params=model_params, full_cov=full_cov, include_likelihood=include_likelihood, l_gp_params=l_gp_params, n_gp_params=n_gp_params, A_inv=self.A_inv)
return preds
def posterior_samples(self, X, n_samp=1, full_cov=False, include_likelihood=True, A_inv=None):
"""
Sample the non-stationary GP at X.
:params X: an array of input(s).
:params n_samp: number of samples of the function.
:params full_cov: this computes the full covariance of the Xs.
"""
preds = self.predict(X=X, full_cov=full_cov, include_likelihood=include_likelihood)
return np.random.multivariate_normal(preds[0][:, 0], preds[1], n_samp)
def _get_val_error(self, params, l_gp_params, n_gp_params):
"""
Computes the mean squared error on the separate validation data.
"""
Y_val_pred = self.pred_mean_ns(X=self.X_val, params=params, l_gp_params=l_gp_params, n_gp_params=n_gp_params)[0]
return np.sum((Y_val_pred - self.Y_val) ** 2.)
def get_params(self):
"""
Get the calibrated hyper-parameters of the non-stationary GP.
"""
return {'nsgp_params':self.nsgp_params, 'l_gp_params':self.l_gp_params, 'n_gp_params':self.n_gp_params}
def set_params(self, params):
"""
Sets the parameters of the surrogate GP
"""
self.nsgp_params = params['nsgp_params']
self.l_gp_params = params['l_gp_params']
self.n_gp_params = params['n_gp_params']
def set_A_inv(self):
"""
saving off the A_inv after the model has been trained.
"""
params = self.get_params()
K_x_x = self.cov_func_mat(self.X, self.X, params['nsgp_params'], params['l_gp_params'])
n_lat = params['nsgp_params'][self.num_lat_points:2 * self.num_lat_points]
self.model_n = self.make_model_n(n_lat=n_lat[:, None], n_gp_params= params['n_gp_params'])
n = np.exp(self.model_n.predict(self.X)[0])
A = K_x_x + np.multiply(n ** 2, np.eye(n.shape[0]))
self.A_inv = np.linalg.inv(A)
def get_A_inv(self):
"""
getter for the saved ```A_inv```.
"""
return self.A_inv
def make_model(self):
"""
Calibrates the parameters of the surrogate non-stationary GP(s).
"""
m = self.Y.shape[1]
surrogates = []
for i in xrange(m):
if isinstance(self.num_designs_l_hyp, int):
params_l = lhs(2 + self.dim, self.num_designs_l_hyp)
if self.l_params_bounds:
b = np.array(self.l_params_bounds)
params_l = b[:, 0] + (b[:, 1] - b[:, 0]) * params_l
params_n = params_l.copy()
err = np.zeros(params_l.shape[0])
m_params = np.ndarray((params_l.shape[0], 2 * self.num_lat_points + 1))
else:
params_l = np.concatenate([[self._sigma_l], [self._ss_l], [self._ell_l] * self.dim])[None, :]
m_params = np.ndarray((params_l.shape[0], self.num_lat_points + 2))
for k in xrange(params_l.shape[0]):
# print params_l[k, :]
opt_res = scipy.optimize.minimize(fun=self.log_obj_opt,
x0= np.hstack([self._lengthscale_factor * np.random.rand(self.num_lat_points), self._nugget_factor * np.random.rand(self.num_lat_points), self._signalstrength_factor * np.random.rand(1)]),
method='L-BFGS-B',
jac=self.grad_log_obj,
bounds=np.concatenate([[self.opt_bounds['ell_f']] * (self.num_lat_points), [self.opt_bounds['sigma_f']] * (self.num_lat_points), [self.opt_bounds['ss_f']]]),
# args= (params_l[k, 0], params_l[k, 1], params_l[k, 2:]),
args=(params_l[k, :], params_n[k, :]),
options={'maxiter':500})
# print 'using L-BFGS-B', opt_res
# print 'scipy_grad', scipy.optimize.approx_fprime(opt_res.x, self.log_obj_opt, 1e-6, *(params_l[k, :], params_n[k, :]))
# print 'grad_analytic', self.grad_log_obj(params=opt_res.x, l_gp_params=params_l[k, :], n_gp_params=params_n[k, :])
if isinstance(self.num_designs_l_hyp, int):
m_params[k, ] = opt_res.x
err[k] = self._get_val_error(m_params[k, ], l_gp_params=params_l[k, :], n_gp_params=params_n[k, :])
else:
m_params[k, ] = opt_res.x
if isinstance(self.num_designs_l_hyp, int):
m_best = {'nsgp_params':m_params[np.argmin(err), :], 'l_gp_params':params_l[np.argmin(err), :], 'n_gp_params':params_n[np.argmin(err), :]}
# print 'validation error', err
else:
m_best = {'nsgp_params':m_params[0, :], 'l_gp_params':params_l[0, :], 'n_gp_params':params_n[np.argmin(err), :]}
print 'parameters selected', m_best
surrogates.append(m_best)
self.set_params(params=m_best)
self.set_A_inv()
def get_model_l(self):
l_lat = np.atleast_2d(self.get_params()['nsgp_params'][:self.num_lat_points]).T
model_l = self.make_model_l(l_lat=l_lat, l_gp_params=self.get_params()['l_gp_params'])
return model_l
def get_model_n(self):
n_lat = np.atleast_2d(self.get_params()['nsgp_params'][self.num_lat_points:2 * self.num_lat_points]).T
model_n = self.make_model_n(n_lat=n_lat, n_gp_params=self.get_params()['n_gp_params'])
return model_n |
import asyncio
from threading import Thread
from apscheduler.schedulers.blocking import BlockingScheduler
from config.configManager import *
from module.ccsun import *
from module.cqEncoder import *
from module.functions import *
scheduler = BlockingScheduler()
configManager = Config()
CQEncoder = CQEncoder()
CCSUN = CCSUN(False)
# 运行命令
qq = configManager.config["user"]["qq"]
authKey = configManager.config["user"]["authKey"]
mirai_api_http_locate = configManager.config["user"]["httpapi"]
app = Mirai(f"mirai://{mirai_api_http_locate}?authKey={authKey}&qq={qq}")
ccsun_group = configManager.config["ccsunGroup"]
write_log("[Notice] CCSUN-Bot已启动", 3)
# 提交当天流量数据
async def auto_update():
try:
info = await updateBandwidth()
write_log(info, 0)
except Exception as e:
print(e)
write_log(e, 3)
# 每日0点定时任务
@scheduler.scheduled_job('cron', hour='0', minute='0', second='0')
def run_timer():
asyncio.run(auto_update())
Thread(target=scheduler.start).start()
# 更新流量
async def updateBandwidth():
info = CCSUN.getBandwidthStr(True)
await app.sendGroupMessage(ccsun_group, info)
if CCSUN.resetTotal():
notice = '[Notice]\n今天是月结日,已重置流量。\n上月流量使用情况:\n'
image_path = CCSUN.getChart("reset", str(days_before_month()), False)
await app.sendGroupMessage(ccsun_group, [Plain(notice), Image.fromFileSystem(image_path)])
os.remove(image_path)
write_log(notice + "[图表]", 3)
return info
# 运行指令
async def run_command(message_type: str, data: dict):
group = data[Group]
if group.id != ccsun_group or message_type != "group":
return
mirai_app = data[Mirai]
member = data[Member]
source = data[Source]
message = data[MessageChain]
info = ""
cq_message = CQEncoder.messageChainToCQ(message)
write_log(f"[{member.id}]{cq_message}", 1)
command = commandDecode(cq_message)
# 订阅回复检测
if is_number(cq_message) and querySubTask(member.id, True):
cq_message = f'订阅{cq_message}'
else:
querySubTask(member.id, True)
# 登录指令
if cq_message == "登录":
CCSUN.Login()
CCSUN.refreshToken()
info = '[Notice]\n已登录并刷新Token'
await mirai_app.sendGroupMessage(ccsun_group, info)
# 流量指令
if cq_message == "流量":
info = CCSUN.getBandwidthStr()
if info.find('Error') != -1:
CCSUN.Login()
CCSUN.refreshToken()
info = CCSUN.getBandwidthStr()
await mirai_app.sendGroupMessage(ccsun_group, info)
# 订阅指令
if cq_message[:2] == "订阅":
keyword_len = 2
num = None
# 字数大于2并且后面的内容为数字则获取订阅链接
if len(cq_message) > keyword_len:
num = cq_message[keyword_len:]
if not is_number(num): return
if is_number(num): info = CCSUN.getSubscribeForMenu(num)
else:
# 获取订阅菜单
info = CCSUN.getSubscribeForMenu()
# 获取失败重试一次
if info == '':
CCSUN.Login()
info = CCSUN.getSubscribeForMenu(num)
info = info if info != '' else '[getSubscribeForMenu Error]\n获取数据失败'
await mirai_app.sendGroupMessage(ccsun_group, info, quoteSource=source if num is not None else None)
if 'Error' not in info: addSubTask(member.id)
# 图表指令
if cq_message[:2] == "图表" or cq_message[:4] == "离线图表":
is_offline = True if cq_message[:2] == "图表" else False
keyword_len = 2 if cq_message[:2] == "图表" else 4
day = ""
# 获取指令后面的数字,如没有则默认为7
if len(cq_message) >= keyword_len:
day = cq_message[keyword_len:]
if not is_number(day):
day = "7"
# 不允许大于180天的请求
if int(day) > 180:
day = "180"
info = "[Notice]\n最大查询过去180天的数据"
await mirai_app.sendGroupMessage(ccsun_group, info)
# 获取图表
image_path = CCSUN.getChart(str(source.id), day, is_offline)
# 获取失败重试一次
if image_path == '': image_path = CCSUN.getChart(str(source.id), day, is_offline)
# 获取成功发送图表,失败则提示
if image_path != '':
await mirai_app.sendGroupMessage(ccsun_group, [Image.fromFileSystem(image_path)])
os.remove(image_path)
info = "[图表]"
else:
await mirai_app.sendGroupMessage(ccsun_group, "[getChart Error] 获取图表失败")
# ccsun指令集
if command[0].lower() == "/ccsun":
# 强制提交当天流量数据
if len(command) >= 1:
if command[1].lower() == "update": info = await updateBandwidth()
write_log(info, 0)
if __name__ == "__main__":
app.run()
|
from django.db import models
from moondance.meta_models import MetaModel
from operations.models import Product, UNIT_OF_MEASURES
from simple_history.models import HistoricalRecords
from django.utils import timezone
LOCATION_LIST = [
("Bondo - Garage", "Bondo - Garage"),
("Bondo - 2nd Floor", "Bondo - 2nd Floor"),
("MoonDance - Workshop", "MoonDance - Workshop"),
("MoonDance - Fulfillment Center", "MoonDance - Fulfillment Center"),
]
class Supplier(MetaModel):
type_choices = (
("Distributor", "Distributor"),
("Manufacturer", "Manufacturer"),
)
history = HistoricalRecords()
type = models.CharField(
max_length=200, choices=type_choices, default="Manufacturer"
)
name = models.CharField(max_length=200, unique=True)
contact_name = models.CharField(max_length=200, null=True, blank=True)
contact_email = models.CharField(max_length=200, null=True, blank=True)
street_address = models.CharField(max_length=200, null=True, blank=True)
city = models.CharField(max_length=200, null=True, blank=True)
state = models.CharField(max_length=200, null=True, blank=True)
postal_code = models.CharField(max_length=200, null=True, blank=True)
country = models.CharField(
max_length=200, null=True, blank=True, default="United States"
)
supplier_website = models.URLField(max_length=200, null=True, blank=True)
notes = models.TextField(null=True, blank=True)
phone_number = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return "{}".format(self.name)
class Meta:
verbose_name = "Supplier"
verbose_name_plural = "Suppliers"
ordering = ("name",)
class Supplier_Product(MetaModel):
history = HistoricalRecords()
sku = models.ForeignKey(
Product,
on_delete=models.PROTECT,
related_name="Supplier_Product_product_fk",
)
supplier = models.ForeignKey(
Supplier, on_delete=models.PROTECT, related_name="supplier_product_supplier_fk"
)
supplier_sku = models.CharField(max_length=200)
supplier_sku_description = models.CharField(max_length=200)
supplier_sku_link = models.URLField(max_length=200, null=True, blank=True)
def __str__(self):
return "{}: {} ({})".format(self.sku, self.supplier, self.supplier_sku)
class Meta:
verbose_name = "Supplier Product"
verbose_name_plural = "Supplier Products"
unique_together = ("supplier", "supplier_sku")
ordering = ("sku", "supplier_sku")
class Invoice(MetaModel):
history = HistoricalRecords()
supplier = models.ForeignKey(
Supplier,
on_delete=models.PROTECT,
related_name="Invoice_supplier_fk",
verbose_name="Invoicing Supplier",
)
invoice = models.CharField(max_length=200, blank=True, null=True)
order = models.CharField(max_length=200, blank=True, null=True)
date_invoiced = models.DateField(default=timezone.now)
freight_charges = models.DecimalField(
max_digits=12, decimal_places=2, null=True, blank=True
)
def __str__(self):
return "{} ({})".format(self.invoice, self.supplier)
class Meta:
verbose_name = "Inventory Receipt"
verbose_name_plural = "Inventory Receipts"
ordering = ("-date_invoiced", "invoice")
class Invoice_Line(MetaModel):
history = HistoricalRecords()
invoice = models.ForeignKey(
Invoice, on_delete=models.CASCADE, related_name="Invoice_Line_invoice_fk"
)
sku = models.ForeignKey(
Product,
on_delete=models.PROTECT,
related_name="Invoice_Line_sku_fk",
verbose_name="MoonDance SKU",
)
manufacturer = models.ForeignKey(
Supplier,
on_delete=models.PROTECT,
related_name="Invoice_Manufacturer_fk",
verbose_name="Manufacturer",
blank=True,
null=True,
help_text="Only needs to be populated if the manufacturer is different than the invoicing supplier.",
)
unit_of_measure = models.CharField(max_length=200, choices=UNIT_OF_MEASURES)
quantity = models.DecimalField(max_digits=12, decimal_places=2)
total_cost = models.DecimalField(max_digits=12, decimal_places=2)
def __str__(self):
return "{} ({})".format(self.invoice, self.sku)
class Meta:
verbose_name = "Recipt Line"
verbose_name_plural = "Recipt Lines"
unique_together = (
(
"sku",
"invoice",
),
)
ordering = ("sku",)
class Inventory_Onhand(MetaModel):
history = HistoricalRecords()
sku = models.ForeignKey(
Product,
on_delete=models.PROTECT,
related_name="Inventory_Onhand_sku_fk",
)
location = models.CharField(
max_length=200, choices=LOCATION_LIST, verbose_name="Current Location"
)
quantity_onhand = models.DecimalField(max_digits=12, decimal_places=2)
to_location = models.CharField(
max_length=200,
null=True,
blank=True,
choices=LOCATION_LIST,
verbose_name="Transfer To Location",
)
transfer_quantity = models.DecimalField(
max_digits=12, decimal_places=2, null=True, blank=True
)
def __str__(self):
return "{} ({})".format(self.sku.sku, self.location)
class Meta:
verbose_name = "Inventory Onhand"
verbose_name_plural = "Inventory Onhand"
ordering = ("sku", "location")
unique_together = (
(
"sku",
"location",
),
)
|
class OrderingError(Exception):
"""
Class used to define an exception when there's any kind of problem
during the ordering process. In general, used when the configuration
file has problems.
"""
def __init__(self, message):
self.message = message
|
import json
import db
import os
import random
import time
from flask import Flask, redirect, url_for, request, g
app = Flask(__name__)
upload_folder = "upload_folder"
def upload_file():
start = time.time()
if request.method == 'POST':
print(request.get_json())
files = request.files
print("start")
user_name = "enes_1411"
for f in files:
file = request.files[f]
counter = random.randint(1, 101)
filename = "photo_"+str(counter)+".jpg"
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
if(True):
filename = upload_folder + "/" + filename
print(filename)
identified_user, score = db.identify_face_api_with_binary(
filename)
print(db.identify_face_api_with_binary(filename))
if(len(os.listdir(upload_folder)) > 5 and False):
file_list = os.listdir(upload_folder)
for fi in file_list:
filename = upload_folder + "/" + fi
db.add_face_face_api_personGroup_person_binary(
user_name, filename)
db.train_face_api_personGroup_person()
#messages[model_created] = True
response = {}
response["username"] = identified_user
response["score"] = score
finish = time.time()
print(finish-start)
return json.dumps(response)
user_name = "enes_1411"
filename = "enes_photo.jpg"
db.add_face_face_api_personGroup_person_binary(user_name, filename)
db.train_face_api_personGroup_person()
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import threading
import string
import datetime
import signal
import logger
import option_parser
import utils
import test_utils
default_timeout = 10.0 # 10s
class AppRunner(object):
"""
Class for running command line applications. It handles timeouts, logging and reading stdout/stderr.
Stdout and stderr of an application is also stored in log output dir in files process_<NB>_stdout/stderr.txt
If application finished with non 0 error code you need to mark it with .validate() function. Otherwise testing
framework will fail a subtest. AppRunner is also "validated" when .terminate() is called.
You can access all lines read so far (or when the application terminates all lines printed) from attributes
.stdout_lines
.stderr_lines
You can see how long the application ran for +/- some minimal overhead (must run() for time to be accurate:
.runTime
# Sample usage
app = AppRunner("nvidia-smi", ["-l", "1"])
app.run(timeout=2.5)
print string.join(app.stdout_lines, "\n")
Notes: AppRunner works very closely with test_utils SubTest environment. SubTest at the end of the test
checks that all applications finished successfully and kills applications that didn't finish by
the end of the test.
"""
RETVALUE_TERMINATED = "Terminated"
RETVALUE_TIMEOUT = "Terminated - Timeout"
_processes = [] # Contains list of all processes running in the background
_processes_not_validated = [] # Contains list of processes that finished with non 0 error code
# and were not marked as validated
_process_nb = 0
def __init__(self, executable, args=None, cwd=None, env=None):
self.executable = executable
if args is None:
args = []
self.args = args
self.cwd = cwd
if env is None:
env = dict()
self.env = env
self._timer = None # to implement timeout
self._subprocess = None
self._retvalue = None # stored return code or string when the app was terminated
self._lock = threading.Lock() # to implement thread safe timeout/terminate
self.stdout_lines = [] # buff that stores all app's output
self.stderr_lines = []
self._logfile_stdout = None
self._logfile_stderr = None
self._is_validated = False
self._info_message = False
self.process_nb = AppRunner._process_nb
AppRunner._process_nb += 1
def run(self, timeout=default_timeout):
"""
Run the application and wait for it to finish.
Returns the app's error code/string
"""
self.start(timeout)
return self.wait()
def start(self, timeout=default_timeout):
"""
Begin executing the application.
The application may block if stdout/stderr buffers become full.
This should be followed by self.terminate() or self.wait() to finish execution.
Execution will be forcefully terminated if the timeout expires.
If timeout is None, then this app will never timeout.
"""
assert self._subprocess is None
logger.debug("Starting " + str(self))
env = self._create_subprocess_env()
if utils.is_linux():
if os.path.exists(self.executable):
# On linux, for binaries inside the package (not just commands in the path) test that they have +x
# e.g. if package is extracted on windows and copied to Linux, the +x privileges will be lost
assert os.access(self.executable, os.X_OK), "Application binary %s is not executable! Make sure that the testing archive has been correctly extracted." % (self.executable)
self.startTime = datetime.datetime.now()
self._subprocess = subprocess.Popen(
[self.executable] + self.args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.cwd,
env=env)
AppRunner._processes.append(self) # keep track of running processe
# Start timeout if we want one
self._timer = None
if timeout is not None:
self._timer = threading.Timer(timeout, self._trigger_timeout)
self._timer.start()
if not test_utils.noLogging:
def args_to_fname(args):
# crop each argument to 16 characters and make sure the output string is no longer than 50 chars
# Long file names are hard to read (hard to find the extension of the file)
# Also python sometimes complains about file names being too long.
# IOError: [Errno 36] File name too long
return string.join(map(lambda x: utils.string_to_valid_file_name(x)[:16], self.args), "_")[:50]
shortname = os.path.basename(self.executable) + "_" + args_to_fname(self.args)
stdout_fname = os.path.relpath(os.path.join(
logger.log_dir, "app_%03d_%s_stdout.txt" % (self.process_nb, shortname)))
stderr_fname = os.path.relpath(os.path.join(
logger.log_dir, "app_%03d_%s_stderr.txt" % (self.process_nb, shortname)))
# If the app fails, this message will get printed. If it succeeds it'll get popped in _process_finish
self._info_message = logger.info("Starting %s...\nstdout in %s\nstderr in %s" % (
str(self)[:64], # cut the string to make it more readable
stdout_fname, stderr_fname), defer=True)
self._logfile_stdout = open(stdout_fname, "w")
self._logfile_stderr = open(stderr_fname, "w")
def _process_finish(self, stdout_buf, stderr_buf):
"""
Logs return code/string and reads the remaining stdout/stderr.
"""
logger.debug("Application %s returned with status: %s" % (self.executable, self._retvalue))
self.runTime = datetime.datetime.now() - self.startTime
self._split_and_log_lines(stdout_buf, self.stdout_lines, self._logfile_stdout)
self._split_and_log_lines(stderr_buf, self.stderr_lines, self._logfile_stderr)
if self._logfile_stdout:
self._logfile_stdout.close()
if self._logfile_stderr:
self._logfile_stderr.close()
AppRunner._processes.remove(self)
if self._retvalue != 0 and self._retvalue != AppRunner.RETVALUE_TERMINATED:
AppRunner._processes_not_validated.append(self)
else:
self._is_validated = True
logger.pop_defered(self._info_message)
def wait(self):
"""
Wait for application to finish and return the app's error code/string
"""
if self._retvalue is not None:
return self._retvalue
logger.debug("Waiting for application %s, pid %d to finish" % (str(self), self._subprocess.pid))
stdout_buf, stderr_buf = self._subprocess.communicate()
if self._timer is not None:
self._timer.cancel()
with self._lock: # set ._retvalue in thread safe way. Make sure it wasn't set by timeout already
if self._retvalue is None:
self._retvalue = self._subprocess.returncode
self._process_finish(stdout_buf, stderr_buf)
return self._retvalue
def poll(self):
if self._retvalue is None:
self._retvalue = self._subprocess.poll()
if self._retvalue is not None:
stdout_buf = self._read_all_remaining(self._subprocess.stdout)
stderr_buf = self._read_all_remaining(self._subprocess.stderr)
self._process_finish(stdout_buf, stderr_buf)
return self._retvalue
def _trigger_timeout(self):
"""
Function called by timeout routine. Kills the app in a thread safe way.
"""
logger.warning("App %s with pid %d has timed out. Killing it." % (self.executable, self.getpid()))
with self._lock: # set ._retvalue in thread safe way. Make sure that app wasn't terminated already
if self._retvalue is not None:
return self._retvalue
self._subprocess.kill()
stdout_buf = self._read_all_remaining(self._subprocess.stdout)
stderr_buf = self._read_all_remaining(self._subprocess.stderr)
self._retvalue = AppRunner.RETVALUE_TIMEOUT
self._process_finish(stdout_buf, stderr_buf)
return self._retvalue
def _create_subprocess_env(self):
''' Merge additional env with current env '''
env = os.environ.copy()
for key in self.env:
env[key] = self.env[key]
return env
def validate(self):
"""
Marks the process that finished with error code as validated - the error was either expected or handled by the caller
If process finished with error but wasn't validated one of the subtest will fail.
"""
assert self.retvalue() != None, "This function shouldn't be called when process is still running"
if self._is_validated:
return
self._is_validated = True
self._processes_not_validated.remove(self)
logger.pop_defered(self._info_message)
def terminate(self):
"""
Forcfully terminates the application and return the app's error code/string.
"""
with self._lock: # set ._retvalue in thread safe way. Make sure that app didn't timeout
if self._retvalue is not None:
return self._retvalue
if self._timer is not None:
self._timer.cancel()
self._subprocess.kill()
stdout_buf = self._read_all_remaining(self._subprocess.stdout)
stderr_buf = self._read_all_remaining(self._subprocess.stderr)
self._retvalue = AppRunner.RETVALUE_TERMINATED
self._process_finish(stdout_buf, stderr_buf)
return self._retvalue
def signal(self, signal):
"""
Send a signal to the process
"""
self._subprocess.send_signal(signal)
def _read_all_remaining(self, stream):
"""
Return a string representing the entire remaining contents of the specified stream
This will block if the stream does not end
Should only be called on a terminated process
"""
out_buf = ""
while True:
rawline = stream.readline()
if rawline == "":
break
else:
out_buf += rawline
return out_buf
def _split_and_log_lines(self, input_string, buff, log_file):
"""
Splits string into lines, removes '\\n's, and appends to buffer & log file
"""
lines = input_string.splitlines()
for i in xrange(len(lines)):
lines[i] = string.rstrip(lines[i], "\n\r")
if log_file:
log_file.write(lines[i])
log_file.write("\n")
buff.append(lines[i])
def stdout_readtillmatch(self, match_fn):
"""
Blocking function that reads input until function match_fn(line : str) returns True.
If match_fn didn't match anything function raises EOFError exception
"""
logger.debug("stdout_readtillmatch called", caller_depth=1)
while True:
rawline = self._subprocess.stdout.readline()
if rawline == "":
break
else:
rawline = string.rstrip(rawline, "\n\r")
if self._logfile_stdout:
self._logfile_stdout.write(rawline)
self._logfile_stdout.write("\n")
self.stdout_lines.append(rawline)
if match_fn(rawline) is True:
return
raise EOFError("Process finished and requested match wasn't found")
def retvalue(self):
"""
Returns code/string if application finished or None otherwise.
"""
if self._subprocess.poll() is not None:
self.wait()
return self._retvalue
def getpid(self):
"""
Returns the pid of the process
"""
return self._subprocess.pid
def __str__(self):
return ("AppRunner #%d: %s %s (cwd: %s; env: %s)" %
(self.process_nb, self.executable, string.join(self.args, " "), self.cwd, self.env))
def __repr__(self):
return str(self)
@classmethod
def clean_all(cls):
"""
Terminate all processes that were created using this class and makes sure that all processes that were spawned were validated.
"""
import test_utils
def log_output(message, process):
"""
Prints last 10 lines of stdout and stderr for faster lookup
"""
logger.info("%s: %s" % (message, process))
numLinesToPrint = 100
#Print more lines for ERIS since this is all we'll have to go by
if option_parser.options.dvssc_testing or option_parser.options.eris:
numLinesToPrint = 500
logger.info("Last %d lines of stdout" % numLinesToPrint)
with logger.IndentBlock():
for line in process.stdout_lines[-numLinesToPrint:]:
logger.info(line)
logger.info("Last %d lines of stderr" % numLinesToPrint)
with logger.IndentBlock():
for line in process.stderr_lines[-numLinesToPrint:]:
logger.info(line)
with test_utils.SubTest("not terminated processes", quiet=True):
assert AppRunner._processes == [], "Some processes were not terminated by previous test: " + str(AppRunner._processes)
for process in AppRunner._processes[:]:
log_output("Unterminated process", process)
process.terminate()
with test_utils.SubTest("not validated processes", quiet=True):
for process in AppRunner._processes_not_validated:
log_output("Process returned %s ret code" % process.retvalue(), process)
assert AppRunner._processes_not_validated == [], "Some processes failed and were not validated by previous test: " + str(AppRunner._processes_not_validated)
AppRunner._processes_not_validated = []
|
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import numpy as np
from config import *
import pickle
class TextTokenizer:
def __init__(self, df):
if df.empty:
return "Data Frame is Empty, check preprocessing"
else:
self.df = df
# self.max_text_len = 30
# self.max_summary_len = 8
def split_data(self):
return train_test_split(np.array(self.df['text']), np.array(self.df['summary']), test_size=0.1, random_state=0, shuffle=True)
def rarewords(self, tokenizer):
thresh = 4
cnt = 0
tot_cnt = 0
freq = 0
tot_freq = 0
for key, value in tokenizer.word_counts.items():
tot_cnt = tot_cnt + 1
tot_freq = tot_freq + value
if (value < thresh):
cnt = cnt + 1
freq = freq + value
print("% of rare words in vocabulary:", (cnt / tot_cnt) * 100)
print("Total Coverage of rare words:", (freq / tot_freq) * 100)
return cnt, tot_cnt, freq, tot_freq
def remove_tags(self, x_val, y_val):
ind = []
for i in range(len(y_val)):
cnt = 0
for j in y_val[i]:
if j != 0:
cnt = cnt + 1
if (cnt == 2):
ind.append(i)
y_val = np.delete(y_val, ind, axis=0)
x_val = np.delete(x_val, ind, axis=0)
return x_val, y_val
def tokenizer(self):
# prepare a tokenizer for reviews on training data
x_tr, x_val, y_tr, y_val = self.split_data()
x_tokenizer = Tokenizer()
x_tokenizer.fit_on_texts(list(x_tr))
print("\nProportion of Rarewords and its coverage in entire Text:")
cnt, tot_cnt, freq, tot_freq = self.rarewords(x_tokenizer)
# prepare a tokenizer for reviews on training data
x_tokenizer = Tokenizer(num_words=tot_cnt - cnt)
x_tokenizer.fit_on_texts(list(x_tr))
# convert text sequences into integer sequences
x_tr_seq = x_tokenizer.texts_to_sequences(x_tr)
x_val_seq = x_tokenizer.texts_to_sequences(x_val)
# padding zero upto maximum length
x_tr = pad_sequences(x_tr_seq, maxlen=max_text_len, padding='post')
x_val = pad_sequences(x_val_seq, maxlen=max_text_len, padding='post')
# size of vocabulary ( +1 for padding token)
x_voc = x_tokenizer.num_words + 1
# prepare a tokenizer for reviews on training data
y_tokenizer = Tokenizer()
y_tokenizer.fit_on_texts(list(y_tr))
print("\nProportion of Rarewords and its coverage in entire Summary:")
self.rarewords(y_tokenizer)
# prepare a tokenizer for reviews on training data
y_tokenizer = Tokenizer(num_words=tot_cnt - cnt)
y_tokenizer.fit_on_texts(list(y_tr))
# convert text sequences into integer sequences
y_tr_seq = y_tokenizer.texts_to_sequences(y_tr)
y_val_seq = y_tokenizer.texts_to_sequences(y_val)
# padding zero upto maximum length
y_tr = pad_sequences(y_tr_seq, maxlen=max_summary_len, padding='post')
y_val = pad_sequences(y_val_seq, maxlen=max_summary_len, padding='post')
# size of vocabulary
y_voc = y_tokenizer.num_words + 1
x_tr, y_tr = self.remove_tags(x_tr, y_tr)
x_val, y_val = self.remove_tags(x_val, y_val)
# print(f"{x_tr}\n{x_val}\n{x_voc}")
# print(f"{y_tr}\n{y_val}\n{y_voc}")
print(f"\ny_tokenizer: {y_tokenizer} \nx_tokenizer: {x_tokenizer}")
import pickle
# saving
with open('saved_model_data/y_tokenizer.pickle', 'wb') as handle:
pickle.dump(y_tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('saved_model_data/x_tokenizer.pickle', 'wb') as handle:
pickle.dump(x_tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
return x_tr, x_val, x_voc, y_tr, y_val, y_voc
|
# Generated by Django 2.2.13 on 2020-10-29 19:14
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('datasets', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='dataset',
managers=[
('objects_version', django.db.models.manager.Manager()),
],
),
migrations.AlterUniqueTogether(
name='dataset',
unique_together={('name', 'version', 'project_slug')},
),
]
|
from django.shortcuts import render_to_response, HttpResponseRedirect, render, redirect, HttpResponse
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import user_passes_test
import json
from main.apps.core.smpp import TelnetConnection, SMPPCCM
@user_passes_test(lambda u: u.is_superuser)
def smppccm_view(request):
args = {}
return render(request, 'administration/smppccm.html', args)
@user_passes_test(lambda u: u.is_superuser)
def smppccm_view_manage(request):
args = {}
if request.POST:
service = request.POST.get("s", None)
if service == "list":
tc = TelnetConnection()
smppccm = SMPPCCM(telnet=tc.telnet)
args = smppccm.list()
elif service == "add":
tc = TelnetConnection()
smppccm = SMPPCCM(telnet=tc.telnet)
smppccm.create(data={
"cid": request.POST.get("cid"),
"host": request.POST.get("host"),
"port": request.POST.get("port"),
"username": request.POST.get("username"),
"password": request.POST.get("password"),
})
elif service == "edit":
tc = TelnetConnection()
smppccm = SMPPCCM(telnet=tc.telnet)
smppccm.partial_update(data={
"cid": request.POST.get("cid"),
"logfile": request.POST.get("logfile"),
"logrotate": request.POST.get("logrotate"),
"loglevel": request.POST.get("loglevel"),
"host": request.POST.get("host"),
"port": request.POST.get("port"),
"ssl": request.POST.get("ssl"),
"username": request.POST.get("username"),
"password": request.POST.get("password"),
"bind": request.POST.get("bind"),
"bind_to": request.POST.get("bind_to"),
"trx_to": request.POST.get("trx_to"),
"res_to": request.POST.get("res_to"),
"pdu_red_to": request.POST.get("pdu_red_to"),
"con_loss_retry": request.POST.get("con_loss_retry"),
"con_loss_delay": request.POST.get("con_loss_delay"),
"con_fail_retry": request.POST.get("con_fail_retry"),
"con_fail_delay": request.POST.get("con_fail_delay"),
"src_addr": request.POST.get("src_addr"),
"src_ton": request.POST.get("src_ton"),
"src_npi": request.POST.get("src_npi"),
"dst_ton": request.POST.get("dst_ton"),
"dst_npi": request.POST.get("dst_npi"),
"bind_ton": request.POST.get("bind_ton"),
"bind_npi": request.POST.get("bind_npi"),
"validity": request.POST.get("validity"),
"priority": request.POST.get("priority"),
"requeue_delay": request.POST.get("requeue_delay"),
"addr_range": request.POST.get("addr_range"),
"systype": request.POST.get("systype"),
"dlr_expiry": request.POST.get("dlr_expiry"),
"submit_throughput": request.POST.get("submit_throughput"),
"proto_id": request.POST.get("proto_id"),
"coding": request.POST.get("coding"),
"elink_interval": request.POST.get("elink_interval"),
"def_msg_id": request.POST.get("def_msg_id"),
"ripf": request.POST.get("ripf"),
"dlr_msgid": request.POST.get("dlr_msgid"),
}, cid=request.POST.get("cid"))
elif service == "delete":
tc = TelnetConnection()
smppccm = SMPPCCM(telnet=tc.telnet)
args = smppccm.destroy(cid=request.POST.get("cid"))
elif service == "start":
tc = TelnetConnection()
smppccm = SMPPCCM(telnet=tc.telnet)
args = smppccm.start(cid=request.POST.get("cid"))
elif service == "stop":
tc = TelnetConnection()
smppccm = SMPPCCM(telnet=tc.telnet)
args = smppccm.stop(cid=request.POST.get("cid"))
elif service == "restart":
tc = TelnetConnection()
smppccm = SMPPCCM(telnet=tc.telnet)
args = smppccm.stop(cid=request.POST.get("cid"))
args = smppccm.start(cid=request.POST.get("cid"))
return HttpResponse(json.dumps(args), content_type='application/json') |
# -*- coding: utf-8 -*-
from importlib import import_module
def get_class(dot_path):
# get path
dot_path = dot_path.split(".")
path = ".".join(dot_path[:-1])
# get class name
dot_path.reverse()
class_name = dot_path[0]
# return Class
module = import_module(path)
return getattr(module, class_name)
|
# Programar em Python #21 - Variáveis Globais e Locais
acesso = 'Global';
def mudarAcesso():
acesso = 'Local';
print('Acesso no interior da função:', acesso);
mudarAcesso();
print('Acesso no exterior da função:', acesso); |
import numpy as np
from tqdm import tqdm
import shutil
import torch
from torch.backends import cudnn
from torch.autograd import Variable
from graphs.models.erfnet import ERF
from graphs.models.erfnet_imagenet import ERFNet
from datasets.voc2012 import VOCDataLoader
from graphs.losses.cross_entropy import CrossEntropyLoss
from torch.optim import lr_scheduler
from tensorboardX import SummaryWriter
from utils.metrics import AverageMeter, IOUMetric
from utils.misc import print_cuda_statistics
from agents.base import BaseAgent
cudnn.benchmark = True
class ERFNetAgent(BaseAgent):
"""
This class will be responsible for handling the whole process of our architecture.
"""
def __init__(self, config):
super().__init__(config)
# Create an instance from the Model
self.logger.info("Loading encoder pretrained in imagenet...")
if self.config.pretrained_encoder:
pretrained_enc = torch.nn.DataParallel(ERFNet(self.config.imagenet_nclasses)).cuda()
pretrained_enc.load_state_dict(torch.load(self.config.pretrained_model_path)['state_dict'])
pretrained_enc = next(pretrained_enc.children()).features.encoder
else:
pretrained_enc = None
# define erfNet model
self.model = ERF(self.config, pretrained_enc)
# Create an instance from the data loader
self.data_loader = VOCDataLoader(self.config)
# Create instance from the loss
self.loss = CrossEntropyLoss(self.config)
# Create instance from the optimizer
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=self.config.learning_rate,
betas=(self.config.betas[0], self.config.betas[1]),
eps=self.config.eps,
weight_decay=self.config.weight_decay)
# Define Scheduler
lambda1 = lambda epoch: pow((1 - ((epoch - 1) / self.config.max_epoch)), 0.9)
self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda1)
# initialize my counters
self.current_epoch = 0
self.current_iteration = 0
self.best_valid_mean_iou = 0
# Check is cuda is available or not
self.is_cuda = torch.cuda.is_available()
# Construct the flag and make sure that cuda is available
self.cuda = self.is_cuda & self.config.cuda
if self.cuda:
torch.cuda.manual_seed_all(self.config.seed)
self.device = torch.device("cuda")
torch.cuda.set_device(self.config.gpu_device)
self.logger.info("Operation will be on *****GPU-CUDA***** ")
print_cuda_statistics()
else:
self.device = torch.device("cpu")
torch.manual_seed(self.config.seed)
self.logger.info("Operation will be on *****CPU***** ")
self.model = self.model.to(self.device)
self.loss = self.loss.to(self.device)
# Model Loading from the latest checkpoint if not found start from scratch.
self.load_checkpoint(self.config.checkpoint_file)
# Tensorboard Writer
self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir, comment='FCN8s')
# # scheduler for the optimizer
# self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,
# 'min', patience=self.config.learning_rate_patience,
# min_lr=1e-10, verbose=True)
def save_checkpoint(self, filename='checkpoint.pth.tar', is_best=0):
"""
Saving the latest checkpoint of the training
:param filename: filename which will contain the state
:param is_best: flag is it is the best model
:return:
"""
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iteration,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
# Save the state
torch.save(state, self.config.checkpoint_dir + filename)
# If it is the best copy it to another file 'model_best.pth.tar'
if is_best:
shutil.copyfile(self.config.checkpoint_dir + filename,
self.config.checkpoint_dir + 'model_best.pth.tar')
def load_checkpoint(self, filename):
filename = self.config.checkpoint_dir + filename
try:
self.logger.info("Loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
self.current_epoch = checkpoint['epoch']
self.current_iteration = checkpoint['iteration']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info("Checkpoint loaded successfully from '{}' at (epoch {}) at (iteration {})\n"
.format(self.config.checkpoint_dir, checkpoint['epoch'], checkpoint['iteration']))
except OSError as e:
self.logger.info("No checkpoint exists from '{}'. Skipping...".format(self.config.checkpoint_dir))
self.logger.info("**First time to train**")
def run(self):
"""
This function will the operator
:return:
"""
assert self.config.mode in ['train', 'test', 'random']
try:
if self.config.mode == 'test':
self.test()
else:
self.train()
except KeyboardInterrupt:
self.logger.info("You have entered CTRL+C.. Wait to finalize")
def train(self):
"""
Main training function, with per-epoch model saving
"""
for epoch in range(self.current_epoch, self.config.max_epoch):
self.current_epoch = epoch
self.scheduler.step(epoch)
self.train_one_epoch()
valid_mean_iou, valid_loss = self.validate()
self.scheduler.step(valid_loss)
is_best = valid_mean_iou > self.best_valid_mean_iou
if is_best:
self.best_valid_mean_iou = valid_mean_iou
self.save_checkpoint(is_best=is_best)
def train_one_epoch(self):
"""
One epoch training function
"""
# Initialize tqdm
tqdm_batch = tqdm(self.data_loader.train_loader, total=self.data_loader.train_iterations,
desc="Epoch-{}-".format(self.current_epoch))
# Set the model to be in training mode (for batchnorm)
self.model.train()
# Initialize your average meters
epoch_loss = AverageMeter()
metrics = IOUMetric(self.config.num_classes)
for x, y in tqdm_batch:
if self.cuda:
x, y = x.pin_memory().cuda(async=self.config.async_loading), y.cuda(async=self.config.async_loading)
x, y = Variable(x), Variable(y)
# model
pred = self.model(x)
# loss
cur_loss = self.loss(pred, y)
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
# optimizer
self.optimizer.zero_grad()
cur_loss.backward()
self.optimizer.step()
epoch_loss.update(cur_loss.item())
_, pred_max = torch.max(pred, 1)
metrics.add_batch(pred_max.data.cpu().numpy(), y.data.cpu().numpy())
self.current_iteration += 1
# exit(0)
epoch_acc, _, epoch_iou_class, epoch_mean_iou, _ = metrics.evaluate()
self.summary_writer.add_scalar("epoch-training/loss", epoch_loss.val, self.current_iteration)
self.summary_writer.add_scalar("epoch_training/mean_iou", epoch_mean_iou, self.current_iteration)
tqdm_batch.close()
print("Training Results at epoch-" + str(self.current_epoch) + " | " + "loss: " + str(
epoch_loss.val) + " - acc-: " + str(
epoch_acc) + "- mean_iou: " + str(epoch_mean_iou) + "\n iou per class: \n" + str(
epoch_iou_class))
def validate(self):
"""
One epoch validation
:return:
"""
tqdm_batch = tqdm(self.data_loader.valid_loader, total=self.data_loader.valid_iterations,
desc="Valiation at -{}-".format(self.current_epoch))
# set the model in training mode
self.model.eval()
epoch_loss = AverageMeter()
metrics = IOUMetric(self.config.num_classes)
for x, y in tqdm_batch:
if self.cuda:
x, y = x.pin_memory().cuda(async=self.config.async_loading), y.cuda(async=self.config.async_loading)
x, y = Variable(x), Variable(y)
# model
pred = self.model(x)
# loss
cur_loss = self.loss(pred, y)
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during Validation.')
_, pred_max = torch.max(pred, 1)
metrics.add_batch(pred_max.data.cpu().numpy(), y.data.cpu().numpy())
epoch_loss.update(cur_loss.item())
epoch_acc, _, epoch_iou_class, epoch_mean_iou, _ = metrics.evaluate()
self.summary_writer.add_scalar("epoch_validation/loss", epoch_loss.val, self.current_iteration)
self.summary_writer.add_scalar("epoch_validation/mean_iou", epoch_mean_iou, self.current_iteration)
print("Validation Results at epoch-" + str(self.current_epoch) + " | " + "loss: " + str(
epoch_loss.val) + " - acc-: " + str(
epoch_acc) + "- mean_iou: " + str(epoch_mean_iou) + "\n iou per class: \n" + str(
epoch_iou_class))
tqdm_batch.close()
return epoch_mean_iou, epoch_loss.val
def test(self):
# TODO
pass
def finalize(self):
"""
Finalize all the operations of the 2 Main classes of the process the operator and the data loader
:return:
"""
print("Please wait while finalizing the operation.. Thank you")
self.save_checkpoint()
self.summary_writer.export_scalars_to_json("{}all_scalars.json".format(self.config.summary_dir))
self.summary_writer.close()
self.data_loader.finalize()
|
#!/usr/bin/env python3
import os, sys, re, requests, json, shutil, traceback, logging, hashlib, math
import math
from glob import glob
from UrlUtils import UrlUtils
from subprocess import check_call, CalledProcessError
from datetime import datetime
import xml.etree.cElementTree as ET
from xml.etree.ElementTree import Element, SubElement
from zipfile import ZipFile
import extract_alos2_md
from create_input_xml import create_input_xml
log_format = "[%(asctime)s: %(levelname)s/%(funcName)s] %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger('create_alos2_ifg.log')
BASE_PATH = os.path.dirname(__file__)
IMG_RE=r'IMG-(\w{2})-ALOS(\d{6})(\d{4})-*'
def create_product(id):
pass
def alos2_packaging(id):
# create alos2 packaging
alos2_prod_file = "{}.nc".format(id)
with open(os.path.join(BASE_PATH, "alos2_groups.json")) as f:
alos2_cfg = json.load(f)
alos2_cfg['filename'] = alos2_prod_file
with open('alos2_groups.json', 'w') as f:
json.dump(alos2_cfg, f, indent=2, sort_keys=True)
alos2_cmd = [
"{}/alos2_packaging.py".format(BASE_PATH)
]
alos2_cmd_line = " ".join(alos2_cmd)
logger.info("Calling alos2_packaging.py: {}".format(alos2_cmd_line))
check_call(also2_cmd_line, shell=True)
# chdir back up to work directory
os.chdir(cwd) # create standard product packaging
def get_pol_value(pp):
if pp in ("SV", "DV", "VV"): return "VV"
elif pp in ("DH", "SH", "HH", "HV"): return "HH"
else: raise RuntimeError("Unrecognized polarization: %s" % pp)
def get_pol_frame_info(slc_dir):
pol_arr = []
frame_arr = []
imgRegex = re.compile(IMG_RE)
img_files = glob(os.path.join(slc_dir, "IMG-*"))
for img_f in img_files:
mo = imgRegex.search(img_f)
pol_arr.append(get_pol_value(mo.group(1).upper()))
frame_arr.append(int(mo.group(3)))
print("{} : {} : {}".format(img_f, mo.group(1), mo.group(3)))
pol_arr = list(set(pol_arr))
if len(pol_arr)>1:
print("Error : More than one polarization value in {} : {}".format(slc_dir, pol_arr))
raise Exception("More than one polarization value in {} : {}".format(slc_dir, pol_arr))
return pol_arr[0], list(set(frame_arr))
def fileContainsMsg(file_name, msg):
with open(file_name, 'r') as f:
datafile = f.readlines()
for line in datafile:
if msg in line:
# found = True # Not necessary
return True, line
return False, None
def checkBurstError():
msg = "cannot continue for interferometry applications"
found, line = fileContainsMsg("alos2app.log", msg)
if found:
logger.info("checkBurstError : %s" %line.strip())
raise RuntimeError(line.strip())
if not found:
msg = "Exception: Could not determine a suitable burst offset"
found, line = fileContainsMsg("alos2app.log", msg)
if found:
logger.info("Found Error : %s" %line)
raise RuntimeError(line.strip())
def updateXml(xml_file):
logging.info(xml_file)
path = os.path.split(xml_file)[0]
tree = ET.parse(xml_file)
root = tree.getroot()
for elem in root:
if elem.tag == 'property':
d = elem.attrib
if 'name' in d.keys() and d['name'] == 'file_name':
for n in elem:
if n.tag == 'value':
new_value = os.path.join(path, n.text)
n.text = new_value
logging.info(n.text)
logging.info(tree)
tree = ET.ElementTree(root)
tree.write(xml_file)
def get_SNWE_bbox(bbox):
lons = []
lats = []
for pp in bbox:
lons.append(pp[0])
lats.append(pp[1])
return get_SNWE(min(lons), max(lons), min(lats), max(lats))
def get_SNWE(min_lon, max_lon, min_lat, max_lat):
snwe_arr = []
dem_S = min_lat
dem_N = max_lat
dem_W = min_lon
dem_E = max_lon
dem_S = int(math.floor(dem_S))
dem_N = int(math.ceil(dem_N))
dem_W = int(math.floor(dem_W))
dem_E = int(math.ceil(dem_E))
snwe_arr.append(dem_S)
snwe_arr.append(dem_N)
snwe_arr.append(dem_W)
snwe_arr.append(dem_E)
return "{} {} {} {}".format(dem_S, dem_N, dem_W, dem_E), snwe_arr
def run_command(cmd):
cmd_line = " ".join(cmd)
logging.info("calling : {}".format(cmd_line))
check_call(cmd_line, shell=True)
def move_dem_separate_dir(dir_name):
move_dem_separate_dir_SRTM(dir_name)
move_dem_separate_dir_NED(dir_name)
def move_dem_separate_dir_SRTM(dir_name):
logger.info("move_dem_separate_dir_SRTM : %s" %dir_name)
create_dir(dir_name)
move_cmd=["mv", "demLat*", dir_name]
move_cmd_line=" ".join(move_cmd)
logger.info("Calling {}".format(move_cmd_line))
call_noerr(move_cmd_line)
def move_dem_separate_dir_NED(dir_name):
logger.info("move_dem_separate_dir_NED : %s" %dir_name)
create_dir(dir_name)
move_cmd=["mv", "stitched.*", dir_name]
move_cmd_line=" ".join(move_cmd)
logger.info("Calling {}".format(move_cmd_line))
call_noerr(move_cmd_line)
move_cmd=["mv", "*DEM.vrt", dir_name]
move_cmd_line=" ".join(move_cmd)
logger.info("Calling {}".format(move_cmd_line))
call_noerr(move_cmd_line)
def create_dir(dir_name):
'''
if os.path.isdir(dir_name):
rmdir_cmd=["rm", "-rf", dir_name]
rmdir_cmd_line=" ".join(rmdir_cmd)
logger.info("Calling {}".format(rmdir_cmd_line))
call_noerr(rmdir_cmd_line)
'''
if not os.path.isdir(dir_name):
mkdir_cmd=["mkdir", dir_name]
mkdir_cmd_line=" ".join(mkdir_cmd)
logger.info("create_dir : Calling {}".format(mkdir_cmd_line))
call_noerr(mkdir_cmd_line)
def call_noerr(cmd):
"""Run command and warn if exit status is not 0."""
try: check_call(cmd, shell=True)
except Exception as e:
logger.warn("Got exception running {}: {}".format(cmd, str(e)))
logger.warn("Traceback: {}".format(traceback.format_exc()))
def download_dem(SNWE):
uu = UrlUtils()
dem_user = uu.dem_u
dem_pass = uu.dem_p
srtm3_dem_url = uu.dem_url
ned1_dem_url = uu.ned1_dem_url
ned13_dem_url = uu.ned13_dem_url
dem_type_simple = "SRTM3"
preprocess_dem_dir="preprocess_dem"
geocode_dem_dir="geocode_dem"
dem_type = "SRTM3"
wd = os.getcwd()
logging.info("Working Directory : {}".format(wd))
dem_url = srtm3_dem_url
dem_cmd = [
"{}/applications/dem.py".format(os.environ['ISCE_HOME']), "-a",
"stitch", "-b", "{}".format(SNWE),
"-k", "-s", "1", "-f", "-c", "-n", dem_user, "-w", dem_pass,
"-u", dem_url
]
dem_cmd_line = " ".join(dem_cmd)
logging.info("Calling dem.py: {}".format(dem_cmd_line))
check_call(dem_cmd_line, shell=True)
preprocess_dem_file = glob("*.dem.wgs84")[0]
#cmd= ["rm", "*.zip", *.dem *.dem.vrt *.dem.xml"
#check_call(cmd, shell=True)
preprocess_dem_dir = "{}_{}".format(dem_type_simple, preprocess_dem_dir)
logger.info("dem_type : %s preprocess_dem_dir : %s" %(dem_type, preprocess_dem_dir))
if dem_type.startswith("NED"):
move_dem_separate_dir_NED(preprocess_dem_dir)
elif dem_type.startswith("SRTM"):
move_dem_separate_dir_SRTM(preprocess_dem_dir)
else:
move_dem_separate_dir(preprocess_dem_dir)
preprocess_dem_file = os.path.join(preprocess_dem_dir, preprocess_dem_file)
logger.info("Using Preprocess DEM file: {}".format(preprocess_dem_file))
#preprocess_dem_file = os.path.join(wd, glob("*.dem.wgs84")[0])
#logging.info("preprocess_dem_file : {}".format(preprocess_dem_file))
# fix file path in Preprocess DEM xml
fix_cmd = [
"{}/applications/fixImageXml.py".format(os.environ['ISCE_HOME']),
"-i", preprocess_dem_file, "--full"
]
fix_cmd_line = " ".join(fix_cmd)
logger.info("Calling fixImageXml.py: {}".format(fix_cmd_line))
check_call(fix_cmd_line, shell=True)
preprocess_vrt_file=""
if dem_type.startswith("SRTM"):
preprocess_vrt_file = glob(os.path.join(preprocess_dem_dir, "*.dem.wgs84.vrt"))[0]
elif dem_type.startswith("NED1"):
preprocess_vrt_file = os.path.join(preprocess_dem_dir, "stitched.dem.vrt")
logger.info("preprocess_vrt_file : %s"%preprocess_vrt_file)
else: raise RuntimeError("Unknown dem type %s." % dem_type)
if not os.path.isfile(preprocess_vrt_file):
logger.info("%s does not exists. Exiting")
preprocess_dem_xml = glob(os.path.join(preprocess_dem_dir, "*.dem.wgs84.xml"))[0]
logging.info("preprocess_dem_xml : {}".format(preprocess_dem_xml))
updateXml(preprocess_dem_xml)
geocode_dem_dir = os.path.join(preprocess_dem_dir, "Coarse_{}_preprocess_dem".format(dem_type_simple))
create_dir(geocode_dem_dir)
'''
os.chdir(geocode_dem_dir)
dem_cmd = [
"/usr/local/isce/isce/applications/dem.py", "-a",
"stitch", "-b", "{}".format(SNWE),
"-k", "-s", "3", "-f", "-c", "-n", dem_user, "-w", dem_pass,
"-u", dem_url
]
dem_cmd_line = " ".join(dem_cmd)
logging.info("Calling dem.py: {}".format(dem_cmd_line))
check_call(dem_cmd_line, shell=True)
'''
dem_cmd = [
"{}/applications/downsampleDEM.py".format(os.environ['ISCE_HOME']), "-i",
"{}".format(preprocess_vrt_file), "-rsec", "3"
]
dem_cmd_line = " ".join(dem_cmd)
logger.info("Calling downsampleDEM.py: {}".format(dem_cmd_line))
check_call(dem_cmd_line, shell=True)
geocode_dem_file = ""
logger.info("geocode_dem_dir : {}".format(geocode_dem_dir))
if dem_type.startswith("SRTM"):
geocode_dem_file = glob(os.path.join(geocode_dem_dir, "*.dem.wgs84"))[0]
elif dem_type.startswith("NED1"):
geocode_dem_file = os.path.join(geocode_dem_dir, "stitched.dem")
logger.info("Using Geocode DEM file: {}".format(geocode_dem_file))
checkBurstError()
# fix file path in Geocoding DEM xml
fix_cmd = [
"{}/applications/fixImageXml.py".format(os.environ['ISCE_HOME']),
"-i", geocode_dem_file, "--full"
]
fix_cmd_line = " ".join(fix_cmd)
logger.info("Calling fixImageXml.py: {}".format(fix_cmd_line))
check_call(fix_cmd_line, shell=True)
geocode_dem_xml = glob(os.path.join(geocode_dem_dir, "*.dem.wgs84.xml"))[0]
os.chdir(wd)
cmd= ["pwd"]
cmd_line = " ".join(cmd)
check_call(cmd_line, shell=True)
return preprocess_dem_file, geocode_dem_file, preprocess_dem_xml, geocode_dem_xml
def unzip_slcs(slcs):
for k, v in slcs.items():
logging.info("Unzipping {} in {}".format(v, k))
with ZipFile(v, 'r') as zf:
zf.extractall(k)
logging.info("Removing {}.".format(v))
#try: os.unlink(v)
#except: pass
def main():
''' Run the install '''
wd = os.getcwd()
new_dir= "{}/src".format(BASE_PATH)
logging.info(new_dir)
os.chdir(new_dir)
cmd = "./install.sh"
os.system(cmd)
os.chdir(wd)
cmd= ["pwd"]
run_command(cmd)
''' Get the informations from _context file '''
ctx_file = os.path.abspath('_context.json')
if not os.path.exists(ctx_file):
raise RuntimeError("Failed to find _context.json.")
with open(ctx_file) as f:
ctx = json.load(f)
# save cwd (working directory)
complete_start_time=datetime.now()
logger.info("Alos2 start Time : {}".format(complete_start_time))
dem_type = ctx['dem_type']
reference_slc = ctx['reference_product']
secondary_slc = ctx['secondary_product']
SNWE = ctx['SNWE']
ref_data_dir = os.path.join(wd, "reference")
sec_data_dir = os.path.join(wd, "secondary")
os.chdir(wd)
''' Extrach Reference SLC Metadata'''
ref_insar_obj = extract_alos2_md.get_alos2_obj(ref_data_dir)
extract_alos2_md.create_alos2_md_isce(ref_insar_obj, "ref_alos2_md.json")
#extract_alos2_md.create_alos2_md_bos(ref_data_dir, "ref_alos2_md2.json")
''' Extrach Reference SLC Metadata'''
sec_insar_obj = extract_alos2_md.get_alos2_obj(sec_data_dir)
extract_alos2_md.create_alos2_md_isce(sec_insar_obj, "sec_alos2_md.json")
#extract_alos2_md.create_alos2_md_bos(sec_data_dir, "sec_alos2_md2.json")
with open("ref_alos2_md.json") as f:
ref_md = json.load(f)
with open("sec_alos2_md.json") as f:
sec_md = json.load(f)
ref_bbox = ref_md['geometry']['coordinates'][0]
SNWE, snwe_arr = get_SNWE_bbox(ref_bbox)
#SNWE = "14 25 -109 -91"
logging.info("snwe_arr : {}".format(snwe_arr))
logging.info("SNWE : {}".format(SNWE))
preprocess_dem_file, geocode_dem_file, preprocess_dem_xml, geocode_dem_xml = download_dem(SNWE)
''' This is already done, so commenting it for now
slcs = {"reference" : "0000230036_001001_ALOS2227337160-180808.zip", "secondary" : "0000230039_001001_ALOS2235617160-181003.zip"}
unzip_slcs(slcs)
'''
ifg_type = "scansar"
xml_file = "alos2app_scansar.xml"
tmpl_file = "alos2app_scansar.xml.tmpl"
start_subswath = 1
end_subswath = 5
burst_overlap = 85.0
ref_pol, ref_frame_arr = get_pol_frame_info(ref_data_dir)
sec_pol, sec_frame_arr = get_pol_frame_info(sec_data_dir)
if ref_pol != sec_pol:
raise Exception("REF Pol : {} is different than SEC Pol : {}".format(ref_pol, sec_pol))
'''
Logic for Fram datas
'''
tmpl_file = os.path.join(BASE_PATH, tmpl_file)
print(tmpl_file)
create_input_xml(tmpl_file, xml_file,
str(ref_data_dir), str(sec_data_dir),
str(preprocess_dem_file), str(geocode_dem_file), start_subswath, end_subswath, burst_overlap,
str(ref_pol), str(ref_frame_arr), str(sec_pol), str(sec_frame_arr), snwe_arr)
alos2_start_time=datetime.now()
logger.info("ALOS2 Start Time : {}".format(alos2_start_time))
cmd = ["python3", "{}/scripts/alos2app.py".format(BASE_PATH), "-i", "{}".format(xml_file), "-e", "coherence"]
run_command(cmd)
cmd = ["python3", "{}/scripts/ion.py".format(BASE_PATH), "-i", "{}".format(xml_file)]
run_command(cmd)
cmd = ["python3", "{}/scripts/alos2app.py".format(BASE_PATH), "-i", "{}".format(xml_file), "-s", "filter"]
run_command(cmd)
dt_string = datetime.now().strftime("%d%m%YT%H%M%S")
id = "ALOS2_{}_{}".format(dem_type, dt_string)
create_product(id)
alos2_packaging(id)
if __name__ == '__main__':
complete_start_time=datetime.now()
logger.info("TopsApp End Time : {}".format(complete_start_time))
cwd = os.getcwd()
ctx_file = os.path.abspath('_context.json')
if not os.path.exists(ctx_file):
raise RuntimeError("Failed to find _context.json.")
with open(ctx_file) as f:
ctx = json.load(f)
main()
|
""" Research module.
.. note::
This module requries multiprocess package <http://multiprocess.rtfd.io/>`_.
"""
from .domain import Alias, Domain, Option, ConfigAlias
from .named_expr import E, EC, O, EP, R
from .research import Research
from .results import ResearchResults
from .experiment import Experiment, Executor
from .utils import get_metrics, convert_research_results
|
import sys
from tracing.graph import Graph
if len(sys.argv) != 2:
print("Please provide the path to the graph definition text file (and nothing else)")
exit(1)
path = sys.argv[1]
with open(sys.argv[1], 'r') as file:
graph_definition = file.read()
graph = Graph(graph_definition)
print(f'1. {graph.total_avg_latency("A-B-C")}')
print(f'2. {graph.total_avg_latency("A-D")}')
print(f'3. {graph.total_avg_latency("A-D-C")}')
print(f'4. {graph.total_avg_latency("A-E-B-C-D")}')
print(f'5. {graph.total_avg_latency("A-E-D")}')
print(f'6. {graph.number_of_traces("C", "C", 3)}')
print(f'7. {graph.number_of_traces("A", "C", 4, 4)}')
print(f'8. {graph.shortest_trace("A", "C")}')
print(f'9. {graph.shortest_trace("B", "B")}')
print(f'10. {graph.number_of_traces_shorter("C", "C", 30)}')
|
# %%
import torch
from UnarySim.stream.gen import RNG, SourceGen, BSGen
from UnarySim.stream.shuffle import Uni2Bi
from UnarySim.metric.metric import ProgError
import matplotlib.pyplot as plt
import time
# %%
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# %%
rng = "Sobol"
in_dim = 1024
bitwidth = 8
in_mode = "unipolar"
out_mode = "bipolar"
stype = torch.float
btype = torch.float
rtype = torch.float
uUni2Bi = Uni2Bi(stype=stype).to(device)
iVec = ((torch.rand(in_dim)*(2**bitwidth)).round()/(2**bitwidth)).to(device)
start_time = time.time()
oVec = iVec.type(torch.float)
print("--- %s seconds ---" % (((time.time() - start_time))*2**bitwidth))
print("input", iVec)
print("real output", oVec)
iVecSource = SourceGen(iVec, bitwidth=bitwidth, mode=in_mode, rtype=rtype)().to(device)
iVecRNG = RNG(bitwidth, 1, rng, rtype)().to(device)
iVecBS = BSGen(iVecSource, iVecRNG, stype).to(device)
iVecPE = ProgError(iVec, mode=in_mode).to(device)
oVecPE = ProgError(oVec, mode=out_mode).to(device)
with torch.no_grad():
idx = torch.zeros(iVecSource.size()).type(torch.long).to(device)
start_time = time.time()
for i in range((2**bitwidth)):
iBS = iVecBS(idx + i)
iVecPE.Monitor(iBS)
oVecU = uUni2Bi(iBS)
oVecPE.Monitor(oVecU)
print("--- %s seconds ---" % (time.time() - start_time))
print("final input error: ", min(iVecPE()[1]), max(iVecPE()[1]))
print("final output error:", min(oVecPE()[1]), max(oVecPE()[1]))
print("final output pp:", oVecPE()[0].data)
print("final output pe:", oVecPE()[1].data)
print("final output mean error:", oVecPE()[1].mean())
result_pe = oVecPE()[1].cpu().numpy()
# %%
fig = plt.hist(result_pe, bins='auto') # arguments are passed to np.histogram
plt.title("Histogram for final output error")
plt.show()
# %%
print(result_pe)
print(result_pe.argmin(), result_pe.argmax())
print(result_pe[result_pe.argmin()], result_pe[result_pe.argmax()])
print(iVec[result_pe.argmin()], iVec[result_pe.argmax()])
# %%
|
import json
import os
from pyecharts import options as opts
from pyecharts.charts import BMap, Page
from pyecharts.faker import Collector, Faker
from pyecharts.globals import BMapType
C = Collector()
BAIDU_MAP_AK = os.environ.get("BAIDU_MAP_AK", "FAKE_AK")
@C.funcs
def bmap_base() -> BMap:
c = (
BMap()
.add_schema(baidu_ak=BAIDU_MAP_AK, center=[120.13066322374, 30.240018034923])
.add(
"bmap",
[list(z) for z in zip(Faker.provinces, Faker.values())],
label_opts=opts.LabelOpts(formatter="{b}"),
)
.set_global_opts(title_opts=opts.TitleOpts(title="BMap-基本示例"))
)
return c
@C.funcs
def bmap_heatmap() -> BMap:
c = (
BMap()
.add_schema(baidu_ak=BAIDU_MAP_AK, center=[120.13066322374, 30.240018034923])
.add(
"bmap",
[list(z) for z in zip(Faker.provinces, Faker.values())],
type_="heatmap",
label_opts=opts.LabelOpts(formatter="{b}"),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="BMap-热力图"),
visualmap_opts=opts.VisualMapOpts(),
)
)
return c
@C.funcs
def bmap_lines() -> BMap:
with open(
os.path.join("fixtures", "hangzhou-tracks.json"), "r", encoding="utf-8"
) as f:
j = json.load(f)
c = (
BMap()
.add_schema(
baidu_ak=BAIDU_MAP_AK,
center=[120.13066322374, 30.240018034923],
zoom=14,
is_roam=True,
map_style={
"styleJson": [
{
"featureType": "water",
"elementType": "all",
"stylers": {"color": "#d1d1d1"},
},
{
"featureType": "land",
"elementType": "all",
"stylers": {"color": "#f3f3f3"},
},
{
"featureType": "railway",
"elementType": "all",
"stylers": {"visibility": "off"},
},
{
"featureType": "highway",
"elementType": "all",
"stylers": {"color": "#fdfdfd"},
},
{
"featureType": "highway",
"elementType": "labels",
"stylers": {"visibility": "off"},
},
{
"featureType": "arterial",
"elementType": "geometry",
"stylers": {"color": "#fefefe"},
},
{
"featureType": "arterial",
"elementType": "geometry.fill",
"stylers": {"color": "#fefefe"},
},
{
"featureType": "poi",
"elementType": "all",
"stylers": {"visibility": "off"},
},
{
"featureType": "green",
"elementType": "all",
"stylers": {"visibility": "off"},
},
{
"featureType": "subway",
"elementType": "all",
"stylers": {"visibility": "off"},
},
{
"featureType": "manmade",
"elementType": "all",
"stylers": {"color": "#d1d1d1"},
},
{
"featureType": "local",
"elementType": "all",
"stylers": {"color": "#d1d1d1"},
},
{
"featureType": "arterial",
"elementType": "labels",
"stylers": {"visibility": "off"},
},
{
"featureType": "boundary",
"elementType": "all",
"stylers": {"color": "#fefefe"},
},
{
"featureType": "building",
"elementType": "all",
"stylers": {"color": "#d1d1d1"},
},
{
"featureType": "label",
"elementType": "labels.text.fill",
"stylers": {"color": "#999999"},
},
]
},
)
.add(
"",
type_="lines",
data_pair=j,
is_polyline=True,
is_large=True,
linestyle_opts=opts.LineStyleOpts(color="purple", opacity=0.6, width=1),
)
.add_control_panel(
maptype_control_opts=opts.BMapTypeControl(
type_=BMapType.MAPTYPE_CONTROL_DROPDOWN
),
scale_control_opts=opts.BMapScaleControlOpts(),
overview_map_opts=opts.BMapOverviewMapControlOpts(is_open=True),
)
.set_global_opts(title_opts=opts.TitleOpts(title="BMap-杭州热门步行路线"))
)
return c
Page().add(*[fn() for fn, _ in C.charts]).render()
|
#!/usr/bin/env python
from graphslam.robot import Robot
import random
import math
import matplotlib.pyplot as plt
import seaborn as sns
class World(object):
def __init__(self, world_size, num_landmarks):
self.world_size = world_size
self.num_landmarks = num_landmarks
self.distance = 20.0
self.landmarks = None
self.data = None
def _make_landmarks(self):
""" Makes landmarks at random locations in world based on defined number of landmarks """
self.landmarks = []
for i in range(self.num_landmarks):
self.landmarks.append([round(random.random() * self.world_size),
round(random.random() * self.world_size)])
def _compute_dx_dy(self):
""" Compute distances in x and y coordinates based on random rotation """
orientation = random.random() * 2.0 * math.pi
dx = math.cos(orientation) * self.distance
dy = math.sin(orientation) * self.distance
return dx, dy
def make_data(self, steps, measurement_range, motion_noise, measurement_noise):
""" Makes the data based on Robot repeated sensing and random movement inside world,
stops when all landmarks are measured. Data is stored in shape [measurement, [dx, dy]],
measurement is [landmark_id, x_distance, y_distance], dx and dy represents Robot movement
at every step """
print('Generating world...')
robot = Robot(self.world_size, measurement_range, motion_noise, measurement_noise)
self._make_landmarks()
complete = False
while not complete:
self.data = []
seen = [False for _ in range(self.num_landmarks)]
dx, dy = self._compute_dx_dy()
for step in range(steps-1):
measurement = robot.sense(self.landmarks)
for i in range(len(measurement)):
seen[measurement[i][0]] = True
while not robot.move(dx, dy):
dx, dy = self._compute_dx_dy()
self.data.append([measurement, [dx, dy]])
complete = (sum(seen) == self.num_landmarks)
print('\nTrue positions:\n')
print(f'Landmarks: {self.landmarks}')
print(robot)
def display_world(self, robot_positions, landmark_positions):
""" Plots the estimated Robot movement path and estimated landmark positions in world """
sns.set_style('dark')
ax = plt.gca()
cols, rows = self.world_size + 1, self.world_size + 1
ax.set_xticks([x for x in range(1, cols)], minor=True)
ax.set_yticks([y for y in range(1, rows)], minor=True)
# Plot grid on minor and major axes, major in larger width
# plt.grid(which='minor', ls='-', lw=1, color='white')
plt.grid(which='major', ls='-', lw=1.5, color='white')
# Iterate over robot positions and plot the path and last location
if len(robot_positions) > 1:
for i in range(len(robot_positions) - 1):
dx = robot_positions[i + 1][0] - robot_positions[i][0]
dy = robot_positions[i + 1][1] - robot_positions[i][1]
ax.arrow(robot_positions[i][0], robot_positions[i][1], dx, dy, head_width=1.5,
length_includes_head=True, color='gray')
ax.text(robot_positions[-1][0], robot_positions[-1][1], 'o', ha='center', va='center',
color='r', fontsize=30)
else:
ax.text(robot_positions[-1][0], robot_positions[-1][1], 'o', ha='center', va='center',
color='r', fontsize=30)
# Iterate over landmark positions and plot them on map
for pos in landmark_positions:
ax.text(pos[0], pos[1], 'x', ha='center', va='center', color='purple', fontsize=20)
plt.rcParams["figure.figsize"] = (10, 10)
plt.title('Robot and Landmark positions')
plt.show()
|
import os
from conans import ConanFile, CMake
class Recipe(ConanFile):
name = "example01"
settings = "os", "arch", "compiler", "build_type"
generators = "cmake", "cmake_find_package"
exports = "*"
requires = "protobuf/0.1@xbuild/scenario"
build_requires = "protobuf/0.1@xbuild/scenario"
def build(self):
# Log environment
self.output.info(">>>> os.environ[PATH]={}".format(os.environ.get("PATH")))
self.output.info(">>>> os.environ[DYLD_LIBRARY_PATH]={}".format(os.environ.get("DYLD_LIBRARY_PATH")))
# PATH will be available inside CMake, but we cannot propagate DYLD_LIBRARY_PATH because of SIP, alternatives:
# + propagate another var and transform
# + write values to a script (virtualrunenv) and wrap calls with it
# Compile!
cmake = CMake(self)
cmake.definitions["CONAN_BUILD_DYLD"] = os.environ.get("DYLD_LIBRARY_PATH")
cmake.configure()
cmake.build()
|
from . import startup
flaskapp = startup() |
# Ref: https://graphics.stanford.edu/~mdfisher/cloth.html
import taichi as ti
ti.init(arch=ti.cuda)
# Air resistance
A = 0.01
massLengthA = 0.1
massLengthB = ti.sqrt(2 * massLengthA * massLengthA)
massK = 50000.0
pointMass = 0.2
widthSize,heightSize = 127, 127
faceSize = widthSize * heightSize * 2
pointSize = (widthSize + 1) * (heightSize + 1)
pointLocation = ti.Vector.field(3, dtype=ti.f32, shape=pointSize)
pointVelocity = ti.Vector.field(3, dtype=ti.f32, shape=pointSize)
pointForce = ti.Vector.field(3, dtype=ti.f32, shape=pointSize)
Idx = ti.Vector.field(3, dtype=ti.i32, shape=faceSize)
vUV = ti.Vector.field(2, dtype=ti.f32, shape=pointSize)
# Y Forward
G = ti.Vector([0.0, 0.0, -9.8], dt=ti.f32)
Wind = ti.Vector([0.3, 0.0, 0.0], dt=ti.f32)
@ti.func
def pointID(x,y):
R = -1
if 0 <= x and x <= widthSize and 0 <= y and y <= heightSize:
R = y * (widthSize + 1) + x
return R
def pointIDPy(x,y):
R = -1
if 0 <= x and x <= widthSize and 0 <= y and y <= heightSize:
R = y * (widthSize + 1) + x
return R
@ti.func
def pointCoord(ID):
return (ID % (widthSize + 1), ID // (widthSize + 1))
@ti.func
def massID(ID):
R = ti.Vector([-1, -1, -1, -1, -1, -1, -1, -1], dt=ti.i32)
x,y = pointCoord(ID)
R[0],R[1] = pointID(x-1, y),pointID(x+1, y)
R[2],R[3] = pointID(x, y-1),pointID(x, y+1)
R[4],R[5] = pointID(x-1,y-1),pointID(x+1,y+1)
R[6],R[7] = pointID(x-1,y+1),pointID(x+1,y-1)
return R
@ti.kernel
def InitTi():
for i in range(pointSize):
x,y = pointCoord(i)
pointLocation[i] = (0, y * massLengthA, 10 - x * massLengthA)
pointVelocity[i] = (0, 0, 0)
vUV[i][1] = 1.0 - ti.cast(x, ti.f32) / widthSize
vUV[i][0] = ti.cast(y, ti.f32) / heightSize
@ti.kernel
def ComputeForce():
for i in pointForce:
pointForce[i] = (0, 0, 0)
Dirs = massID(i)
for j in ti.static(range(0,4)):
if not Dirs[j] == -1:
Dir = pointLocation[Dirs[j]] - pointLocation[i]
pointForce[i] += (Dir.norm() - massLengthA) * massK * Dir / Dir.norm()
for j in ti.static(range(4,8)):
if not Dirs[j] == -1:
Dir = pointLocation[Dirs[j]] - pointLocation[i]
pointForce[i] += (Dir.norm() - massLengthB) * massK * Dir / Dir.norm()
pointForce[i] += G * pointMass + Wind
pointForce[i] += A * pointVelocity[i] * pointVelocity[i]
x,y = pointCoord(i)
if not x:
pointForce[i] = (0, 0, 0)
@ti.kernel
def Forward(T: ti.f32, SumT: ti.f32):
for i in range(pointSize):
pointVelocity[i] += T * pointForce[i] / pointMass
pointLocation[i] += T * pointVelocity[i]
x,y = pointCoord(i)
if not x:
Angle = min(SumT, 3.1415926 * 2)
cP = ti.Vector([0.0, 0.5 * heightSize * massLengthA])
dX = 0.0 - cP[0]
dY = y * massLengthA - cP[1]
pointLocation[i][0] = dX * ti.cos(Angle) - dY * ti.sin(Angle) + cP[0]
pointLocation[i][1] = dY * ti.cos(Angle) + dX * ti.sin(Angle) + cP[1]
def Init():
InitTi()
Index = 0
for i in range(widthSize):
for j in range(heightSize):
ID_1 = pointIDPy(i,j)
ID_2 = pointIDPy(i+1,j)
ID_3 = pointIDPy(i,j+1)
ID_4 = pointIDPy(i+1,j+1)
Idx[Index + 0][0] = ID_1
Idx[Index + 0][1] = ID_2
Idx[Index + 0][2] = ID_3
Idx[Index + 1][0] = ID_4
Idx[Index + 1][1] = ID_3
Idx[Index + 1][2] = ID_2
Index += 2
def Step(SumT):
for i in range(50):
ComputeForce()
Forward(1e-5, SumT)
SumT += 1e-5
return SumT
def Export(frameIndex: int):
npL = pointLocation.to_numpy()
npI = Idx.to_numpy()
npU = vUV.to_numpy()
fileName = 'S_%03d.obj'%(frameIndex)
with open(fileName, 'w') as F:
for i in range(pointSize):
F.write('v %.4f %.4f %.4f\n'%(npL[i,0],npL[i,1],npL[i,2]))
for i in range(pointSize):
F.write('vt %.4f %.4f\n'%(npU[i,0],npU[i,1]))
for i in range(faceSize):
x,y,z = npI[i,0]+1,npI[i,1]+1,npI[i,2]+1
F.write('f %d/%d %d/%d %d/%d\n'%(x,x,y,y,z,z))
print('Frame >> %03d'%(frameIndex))
def main():
Init()
Frame = 0
SumT = 0.0
try:
while True:
SumT = Step(SumT)
Frame += 1
if not Frame % 60:
Export(Frame // 60)
except Exception as Error:
print(Error)
if __name__=='__main__':
main() |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 by
# Sergiy Gogolenko <gogolenko@hlrs.de> HLRS
# Fabio Saracco <fabio@imt.it> IMT
# All rights reserved.
#
# Authors: Sergiy Gogolenko <gogolenko@hlrs.de>
# Fabio Saracco <fabio@imt.it>
"""
.. module:: sn4sp
:platform: Unix, Windows
:synopsis: Similarity Network 4 Synthetic Population : Network reconstruction with Lin similarity and geo-damping
"""
from __future__ import division, absolute_import, print_function
__author__ = '\n'.join( ['Sergiy Gogolenko <gogolenko@hlrs.de>',
'Fabio Saracco <fabio@imt.it>'] )
import os
import sys
import logging
import datetime
import argparse
from mpi4py import MPI
# TODO: remove in alpha release
sys.path.insert( 0, os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))) )
import sn4sp
from sn4sp import readwrite
def get_arguments():
""" Get the argument from the command line.
By default, we use exponential damping and half-length scale set to 5 km.
"""
parser = argparse.ArgumentParser(description="Similarity Network 4 Synthetic Population calculator")
parser.add_argument( "input", metavar="HDF5_FILE",
nargs='?', type=str,
help="input HDF5 file with synthetic population",
default=os.path.join(os.getcwd(), 'synthetic_population_ppd.h5') )
parser.add_argument( "-o", "--output",
dest="output", type=str,
help="output HDF5 file with synthesized network",
default=os.getcwd() )
parser.add_argument( "-hss", "--half-sim-scale",
dest="hss", type=float,
help="half-similarity scale",
default=5000 )
parser.add_argument( "-d", "--damping",
dest="damping", type=float,
help="damping function",
default=0.0 )
parser.add_argument( "-p", "--sampling-percentage",
dest="sample_fraction", type=float,
help="fraction of the sample (stripe size) for the parallel similarity calculation",
default=0.1 )
parser.add_argument( "-n", "--num-agents",
dest="num_agents", type=int,
help="maxim size of the population (if input file has more records, it will be truncated)",
default=0 )
return parser.parse_args()
def main():
# Set up logger
logger_fmt='%(asctime)s [process_id={0:03}:{1}] %(message)s'.format(MPI.COMM_WORLD.Get_rank(), MPI.COMM_WORLD.Get_size())
if True: # use stdout logger
log_root=logging.getLogger()
log_root.setLevel(logging.DEBUG)
log_channel=logging.StreamHandler(sys.stdout)
log_channel.setLevel(logging.DEBUG)
log_channel.setFormatter(logging.Formatter(fmt=logger_fmt, datefmt=':%Y-%m-%d %H:%M:%S'))
log_root.addHandler(log_channel)
else: # use stderr logger
logging.basicConfig(format=logger_fmt, datefmt=':%Y-%m-%d %H:%M:%S', level=logging.INFO)
# Handle command line arguments
args=get_arguments()
output_filename=os.path.abspath(args.output)
if os.path.isdir(output_filename):
output_filename=os.path.join(args.output, 'synthetic_network_hss_{0}_d_{1}.h5'.format(args.hss,args.damping))
elif not os.path.isdir(os.path.dirname(output_filename)):
raise ValueError( "Invalid output path '{0}'".fortmat(output_filename) )
# Read input synthetic population and produce similarity network object out of it
sim_net=readwrite.read_attr_table_h5( args.input, truncate=args.num_agents,
hss=args.hss, damping=args.damping, sample_fraction=args.sample_fraction )
# Compute similarity network edge probabilities and store in HDF5 edgelist file
start_time=MPI.Wtime()
readwrite.write_edges_probabilities_h5( sim_net, output_filename, chunk_len=int(1e4) )
elapsed_time=MPI.Wtime() - start_time
logging.info( 'total elapsed time={0}'.format(datetime.timedelta(seconds=elapsed_time)) )
return 0
if __name__ == "__main__":
main()
|
# Generated by Django 2.2 on 2020-01-25 13:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Contact', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='message',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='message',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
|
from django.shortcuts import render
import service
from constants import beanstalkd_description, beanstalkd_enabled
# Create your views here.
def index(request):
stats = service.server_stat()
display_stats = []
for k, v in stats.items():
if k in beanstalkd_enabled:
display_stats.append(
{
'key': k,
'value': v,
'display': beanstalkd_description[k]
})
return render(request, "bskwatcher/index.html", {'stats': display_stats})
def tube_display(request, t):
print t
return render(request, "bskwatcher/tube.html", {})
|
from math import floor
class Crosshairs(object):
def __init__(self, ax, visible=True):
self.ax = ax
self.vl = ax.axvline(0, color='0.7', ls=':')
self.vl.set_visible(visible)
def get_xpos(self):
x = self.vl.get_xdata()[0]
x = x if self.vl.get_visible() else None
return x
def set_visible(self, visible=True):
self.vl.set_visible(visible)
def set_xdata(self, x):
self.vl.set_xdata([x,x])
class CrosshairsManual(object):
iswarpingenabled = False
threshold = 25 #distance threshold for selection (pixels)
markeredgecolor_inactive = 'r'
markerfacecolor_inactive = 'w'
markeredgecolor_active = 'g'
markerfacecolor_active = (0.5, 1, 0.5)
def __init__(self, ax, visible=True, y_constraint=None):
self.Q = None
self.ax = ax
self.vl = ax.axvline(0, color='0.7', ls=':', zorder=0)
self.h = ax.plot(0, 0, 'o', ms=8, zorder=4)[0]
self.xpix = None
self.ypix = None
self.set_warp_active(False)
self.set_visible(visible)
def enable_warping(self, enable=True):
self.iswarpingenabled = enable
self.h.set_visible(False)
def get_position_pixels(self):
return self.xpix, self.ypix
def get_x_position(self):
return self.h.get_xdata()[0]
def get_y_position(self):
return self.h.get_ydata()[0]
def get_y_position_constrained(self):
ind = self.h.get_xdata()[0]
return self.y_constraint[ind]
def set_marker_color(self, color, facecolor):
self.h.set_color( color )
self.h.set_markerfacecolor( facecolor )
def set_pixel_coordinates(self, x, y):
self.xpix = x
self.ypix = y
def set_marker_visible(self, visible=True):
self.h.set_visible( False )
def set_warp_active(self, active=True):
c0 = self.markeredgecolor_active if active else self.markeredgecolor_inactive
c1 = self.markerfacecolor_active if active else self.markerfacecolor_inactive
self.set_marker_color(c0, c1)
def set_visible(self, visible=True):
self.vl.set_visible(visible)
self.h.set_visible(False)
def set_xdata(self, x):
self.vl.set_xdata([x,x])
if self.y_constraint is not None:
ind = min( max(0, floor(x)), self.Q-1)
y = self.y_constraint[ind]
self.h.set_data( [ind], [y] )
def set_y_constraint(self, y):
self.Q = y.size
self.y_constraint = y
self.set_xdata( self.h.get_xdata()[0] )
|
import json
from django.shortcuts import render
from django.contrib import messages
import geojson
from tethys_sdk.routing import controller
from tethys_sdk.gizmos import MapView, MVView, MVDraw, MVLayer, MVLegendClass
from .common import docs_endpoint
def get_geoserver_wms():
"""
Try to get the built in geoserver wms for this installation if possible.
Otherwise point at the chpc geoserver.
"""
return 'https://demo.geo-solutions.it/geoserver/wms'
@controller
def map_view(request):
"""
Controller for the Map View page.
"""
# Define view options
view_options = MVView(
projection='EPSG:4326',
center=[-100, 40],
zoom=3.5,
maxZoom=18,
minZoom=2
)
# Define drawing options
drawing_options = MVDraw()
# Define GeoJSON layer
geojson_object = {
'type': 'FeatureCollection',
'crs': {
'type': 'name',
'properties': {
'name': 'EPSG:3857'
}
},
'features': [
{
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': [0, 0]
}
},
{
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': [[4e6, -2e6], [8e6, 2e6]]
}
},
{
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': [[[-5e6, -1e6], [-4e6, 1e6], [-3e6, -1e6]]]
}
}
]
}
# Define layers
map_layers = []
style_map = {
'Point': {'ol.style.Style': {
'image': {'ol.style.Circle': {
'radius': 5,
'fill': {'ol.style.Fill': {
'color': 'red',
}},
'stroke': {'ol.style.Stroke': {
'color': 'red',
'width': 2
}}
}}
}},
'LineString': {'ol.style.Style': {
'stroke': {'ol.style.Stroke': {
'color': 'green',
'width': 3
}}
}},
'Polygon': {'ol.style.Style': {
'stroke': {'ol.style.Stroke': {
'color': 'blue',
'width': 1
}},
'fill': {'ol.style.Fill': {
'color': 'rgba(0, 0, 255, 0.1)'
}}
}},
}
geojson_layer = MVLayer(
source='GeoJSON',
options=geojson_object,
layer_options={'style_map': style_map},
legend_title='Test GeoJSON',
legend_extent=[-46.7, -48.5, 74, 59],
legend_classes=[
MVLegendClass('polygon', 'Polygons', fill='rgba(0, 0, 255, 0.1)', stroke='blue'),
MVLegendClass('line', 'Lines', stroke='green'),
MVLegendClass('point', 'Points', fill='red')
]
)
map_layers.append(geojson_layer)
if get_geoserver_wms():
# Define GeoServer Layer
geoserver_layer = MVLayer(
source='ImageWMS',
options={'url': get_geoserver_wms(),
'params': {'LAYERS': 'topp:states'},
'serverType': 'geoserver'},
legend_title='USA Population',
legend_extent=[-126, 24.5, -66.2, 49],
legend_classes=[
MVLegendClass('polygon', 'Low Density', fill='#00ff00', stroke='#000000'),
MVLegendClass('polygon', 'Medium Density', fill='#ff0000', stroke='#000000'),
MVLegendClass('polygon', 'High Density', fill='#0000ff', stroke='#000000')
]
)
# map_layers.append(geoserver_layer)
# Define KML Layer
kml_layer = MVLayer(
source='KML',
options={'url': '/static/gizmo_showcase/data/model.kml'},
legend_title='Park City Watershed',
legend_extent=[-111.60, 40.57, -111.43, 40.70],
legend_classes=[
MVLegendClass('polygon', 'Watershed Boundary', fill='#ff8000'),
MVLegendClass('line', 'Stream Network', stroke='#0000ff'),
]
)
map_layers.append(kml_layer)
# Tiled ArcGIS REST Layer
arc_gis_layer = MVLayer(
source='TileArcGISRest',
options={'url': 'http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/' +
'Specialty/ESRI_StateCityHighway_USA/MapServer'},
legend_title='ESRI USA Highway',
legend_extent=[-173, 17, -65, 72]
)
map_layers.append(arc_gis_layer)
# Define map view options
map_view_options = MapView(
height='600px',
width='100%',
controls=['ZoomSlider', 'Rotate', 'FullScreen',
{'MousePosition': {'projection': 'EPSG:4326'}},
{'ZoomToExtent': {'projection': 'EPSG:4326', 'extent': [-130, 22, -65, 54]}}],
layers=map_layers,
view=view_options,
basemap='OpenStreetMap',
draw=drawing_options,
legend=True
)
# Get the geometry drawn by the user
submitted_geometry = request.POST.get('geometry', None)
# Convert GeometryCollection into a FeatureCollection if given
if submitted_geometry:
geojson_objs = geojson.loads(submitted_geometry)
# Create a Feature for each geometry
features = []
for geometry in geojson_objs.geometries:
properties = geometry.pop('properties', [])
features.append({
'type': 'Feature',
'geometry': geometry,
'properties': properties
})
# Create FeatureCollection wrapper with list of features
feature_collection = {
'type': 'FeatureCollection',
'features': features
}
# Set initial features on drawing layer (as geojson string)
drawing_options.initial_features = json.dumps(feature_collection)
# Log in alert message
messages.success(request, "Geometry added to the map successfully.")
context = {
'docs_endpoint': docs_endpoint,
'map_view': map_view_options
}
return render(request, 'gizmo_showcase/map_view.html', context)
|
import utils
import numpy as np
import csv
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import LambdaCallback,EarlyStopping,ModelCheckpoint
#%% single node sensing
# hyperparameters
lr = 0.0003
drop_ratio = 0.2
sample_length = 128
max_epoch = 100
batch_size = 200
patience = 8
# load data
dataset,labelset,SNR = utils.radioml_IQ_CO_data('pkl_data/'+str(sample_length)+'_co.pkl')
total_group = dataset.shape[0]
nodes = dataset.shape[1]
total_num = total_group*nodes
snrs = np.linspace(-20,19,40)
snrs = np.array(snrs,dtype='int16')
snr_type = len(snrs)
# load single model
model_single = load_model('result/models/DetectNet/'+str(sample_length)+'/final.h5')
flatten_dataset = np.reshape(dataset,(total_num,2,sample_length))
predictions = model_single.predict(flatten_dataset,verbose=1)
decisions = np.argmax(predictions,axis=1)
noise_decisions = decisions[total_num//2:]
pf = 1 - np.mean(noise_decisions)
signal_decisions = np.reshape(decisions[:total_num//2],(snr_type,total_num//2//snr_type)) #按average snr来分
pd_list = np.zeros((snr_type,1))
i = 0
while i < snr_type:
pd_list[i] = 1 - np.mean(signal_decisions[i])
i = i + 1
pd_list = np.append(pd_list,pf)
with open('result/xls/SoftCombinationNet/Pds.xls','w') as f:
f_csv = csv.writer(f)
f_csv.writerow(pd_list)
#%% cooperative sensing
noise_decisions_groups = np.reshape(noise_decisions,(noise_decisions.shape[0]//nodes,nodes))
# 1000 is the number of samples per specific modulation scheme and snr
signal_decisions_groups = np.reshape(signal_decisions,(snr_type,1000,nodes))
# Logical OR fusion rule
error = 0
for group in noise_decisions_groups:
error = error + int(np.sum(group) < nodes)
pf_hard = error / (total_group//2)
pd_hard_list = np.zeros((snr_type,1))
i = 0
while i < snr_type:
snr_decisions_groups = signal_decisions_groups[i]
correct = 0
for group in snr_decisions_groups:
correct = correct + int(np.sum(group) < nodes)
pd_hard_list[i] = correct / len(snr_decisions_groups)
i = i + 1
pd_hard_list = np.append(pd_hard_list,pf_hard)
with open('result/xls/SoftCombinationNet/Pds_hard.xls','w') as f:
f_csv = csv.writer(f)
f_csv.writerow(pd_hard_list)
# SoftCombinationNet
softmax_dataset = np.reshape(predictions,(total_group, nodes, 2))
shuffle_idx = np.random.choice(range(0,total_group), size=total_group,replace=False)
softmax_dataset = softmax_dataset[shuffle_idx]
SNR = SNR[shuffle_idx]
softmax_labelset = labelset[shuffle_idx]
co_x_train = softmax_dataset[:int(total_group*0.6)]
co_y_train = softmax_labelset[:int(total_group*0.6)]
co_x_val = softmax_dataset[int(total_group*0.6):int(total_group*0.8)]
co_y_val = softmax_labelset[int(total_group*0.6):int(total_group*0.8)]
co_x_test = softmax_dataset[int(total_group*0.8):]
co_y_test = softmax_labelset[int(total_group*0.8):]
val_SNRs = SNR[int(total_group*0.6):int(total_group*0.8)]
test_SNRs = SNR[int(total_group*0.8):]
input_shape = (nodes,2)
model_co = utils.SoftCombinationNet(lr,input_shape,drop_ratio)
early_stopping = EarlyStopping(monitor='val_loss',patience=patience)
best_model_path = 'result/models/SoftCombinationNet/best.h5'
checkpointer = ModelCheckpoint(best_model_path,verbose=1,save_best_only=True)
model_co.fit(co_x_train,co_y_train,epochs=max_epoch,batch_size=batch_size,verbose=1,shuffle=True,
validation_data=(co_x_val, co_y_val),
callbacks=[early_stopping,checkpointer])
model_co = load_model(best_model_path)
pf_min = 1.5
pf_max = 2.5
pf_test = LambdaCallback(
on_epoch_end=lambda epoch,
logs: utils.get_pf(co_x_val,co_y_val,val_SNRs,model_co,epoch,pf_min,pf_max))
model_co.fit(co_x_train,co_y_train,epochs=max_epoch,batch_size=batch_size,verbose=1,shuffle=True,
callbacks=[pf_test])
utils.performance_evaluation('result/xls/SoftCombinationNet/Pds_soft.xls',co_x_test,co_y_test,test_SNRs,model_co) |
##
import matplotlib.pyplot as plt
import numpy as np
x=np.linspace(0,5)
y=np.sin(x)
plt.plot(x,y)
plt.grid()
plt.savefig('test.pdf')
|
from numpy import hstack
from numpy.random import normal
import numpy as np
# Generate two lists of values with different scale and loc using normal distribution
X1 = np.round(normal(loc=10, scale=2.2, size=6),2)
X2 = np.round(normal(loc=70, scale=2.5, size=6),2)
X = hstack((X1, X2))
X = X.reshape((len(X), 1))
#Compute the likelihood that each point belong to a group
def likelihoodMeasureByGaussian(sd,m,X):
p_xbym=np.zeros(len(X))
i=0
for xi in X:
p_xbym[i]=(1/np.sqrt(2*np.pi*sd*sd))*(np.exp(-((xi-m)**2/(2*sd*sd))))
i=i+1
return p_xbym
#computer Posterior Probability
def posteriorProbability(X,likelihoodMForA,likelihoodMForB,priorProbForA,priorProbForB):
posteriorProbB=np.zeros(len(X))
for i in range(len(X)):
posteriorProbB[i]=(likelihoodMForB[i]*priorProbForB)/(likelihoodMForB[i]*priorProbForB+likelihoodMForA[i]*priorProbForA)
posteriorProbA=np.zeros(len(X))
for i in range(len(X)):
posteriorProbA[i]=1-posteriorProbB[i]
return(posteriorProbA,posteriorProbB)
#Computer standard deviation
def standardDev(X,meanA,posteriorProbA):
stdevA=sum([((X[i]-meanA)**2)*posteriorProbA[i] for i in range(len(X))])
stdevA=stdevA[0]/sum(posteriorProbA)
stdevA=np.sqrt(stdevA)
return(stdevA)
#compute mean
def mean(X,posteriorProbA):
meanA=sum([X[i]*posteriorProbA[i] for i in range(len(X))])
meanA=meanA[0]/sum(posteriorProbA)
return(meanA)
#Assume there are two groups and their mean and standard deviation
meanA=2.0
stdevA=1.5
meanB=7.0
stdevB=2.0
#prior probability for Group A
priorProbForA=0.5
#prior probability for Group B
priorProbForB=0.5
# 1st iteration
#Compute the likelihood that each point belong to A i.e P(X/A)
likelihoodMForA=likelihoodMeasureByGaussian(stdevA,meanA,X)
#Compute the likelihood that each point belong to B i.e P(X/B)
likelihoodMForB=likelihoodMeasureByGaussian(stdevB,meanB,X)
#Compute the posterior prob. that each i.e P(A/X) and P(B/X)
(posteriorProbA,posteriorProbB)=posteriorProbability(X,likelihoodMForA,likelihoodMForB,priorProbForA,priorProbForB)
#Recompute the meanA for group A and meanB for group B
meanA=mean(X,posteriorProbA)
meanB=mean(X,posteriorProbB)
#Recompute the standardDev for group A and standardDev for group B
stdevA=standardDev(X,meanA,posteriorProbA)
stdevB=standardDev(X,meanB,posteriorProbB)
#update prior for A and B
priorProbForB=sum(posteriorProbB)/len(posteriorProbB)
priorProbForA=1-priorProbForB
##2nd iteration
#Compute the likelihood that each point belong to A i.e P(X/A)
likelihoodMForA=likelihoodMeasureByGaussian(stdevA,meanA,X)
#Compute the likelihood that each point belong to B i.e P(X/B)
likelihoodMForB=likelihoodMeasureByGaussian(stdevB,meanB,X)
#Compute the posterior prob. that each i.e P(A/X) and P(B/X)
(posteriorProbA,posteriorProbB)=posteriorProbability(X,likelihoodMForA,likelihoodMForB,priorProbForA,priorProbForB)
#Recompute the meanA for group A and meanB for group B
meanA=mean(X,posteriorProbA)
meanB=mean(X,posteriorProbB)
#Recompute the standardDev for group A and standardDev for group B
stdevA=standardDev(X,meanA,posteriorProbA)
stdevB=standardDev(X,meanB,posteriorProbB)
#update prior for A and B
priorProbForB=sum(posteriorProbB)/len(posteriorProbB)
priorProbForA=1-priorProbForB |
#! /usr/bin/python
# Filename: using_mymodule.py
# Description: This script is used to test
# the module I wrote in mymodule.py
import mymodule
mymodule.sayHello() # using . to set which module to use
print mymodule.version
### Note: if using from mymodule import *, then we could
### use every function and varible without '.'
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
# import plotly.express as px
import pandas as pd
from data_link import *
from ecg_app import EcgApp
DISPLAY_RANGE_INIT = [
[0, 100000], # 100s
[-4000, 4000] # -4V to 4V
]
id_fig = 'figure'
id_d_range = '_display_range'
d_config = {
'responsive': True,
'scrollZoom': True,
'modeBarButtonsToRemove': ['zoom2d', 'lasso2d', 'select2d', 'autoScale2d', 'toggleSpikelines',
'hoverClosestCartesian', 'hoverCompareCartesian'],
'displaylogo': False
}
ecg_app = EcgApp(__name__)
ecg_app.update_lead_options_disable_layout_figures(DATA_PATH.joinpath(record_nm))
idx_lead = 3
plot = ecg_app.add_plot(idx_lead)
fig = ecg_app.get_lead_fig(idx_lead)
app = dash.Dash(
__name__
)
server = app.server
app.title = "development test run"
app.layout = html.Div(children=[
dcc.Store(id=id_d_range, data=DISPLAY_RANGE_INIT),
dcc.Graph(
id=id_fig,
figure=fig,
config=d_config
)
])
@app.callback(
Output(id_d_range, 'data'),
[Input(id_fig, 'relayoutData')],
[State(id_d_range, 'data')],
prevent_initial_call=True)
def update_limits(relayout_data, d_range):
# print("in update limits")
if relayout_data is None:
raise dash.exceptions.PreventUpdate
elif relayout_data is not None:
# print("relayout_data is", relayout_data)
if 'xaxis.range[0]' in relayout_data:
d_range[0] = [
ecg_app._time_str_to_sample_count(relayout_data['xaxis.range[0]']),
ecg_app._time_str_to_sample_count(relayout_data['xaxis.range[1]'])
]
elif 'yaxis.range[0]' in relayout_data:
d_range[1] = [
ecg_app._time_str_to_sample_count(relayout_data['yaxis.range[0]']),
ecg_app._time_str_to_sample_count(relayout_data['yaxis.range[1]'])
]
# print("drange is", d_range)
else:
if d_range is None:
d_range = DISPLAY_RANGE_INIT
raise dash.exceptions.PreventUpdate
return d_range
@app.callback(
Output(id_fig, 'figure'),
[Input(id_d_range, 'data')],
prevent_initial_call=True)
def update_figure(d_range):
# print("in create fig")
t = pd.Timestamp(d_range[0][0])
# print(t.microsecond / 500) # sample count
ecg_app._display_range = d_range
# print(ecg_app._display_range)
return ecg_app.get_lead_fig(idx_lead)
if __name__ == "__main__":
app.run_server(debug=True)
|
import os
from glob import glob
from distutils.core import setup
from site import getusersitepackages
resource_dir = getusersitepackages()
def find_packages():
def convert(x):
x = x.replace('/__init__.py', '')
x = x.replace('/', '.')
return x
raw_pkgs = glob('**/__init__.py', recursive=True)
processed_pkgs = list(map(convert, raw_pkgs))
return processed_pkgs
def find_data_files(ext):
target = "**/*.%s" % ext
file_list = glob(target, recursive=True)
dir_list = list(map(lambda x: os.path.split(x)[0], file_list))
dir_file_dict = {}
for d, f in zip(dir_list, file_list):
if d in dir_file_dict.keys():
dir_file_dict[d].append(f)
else:
dir_file_dict[d] = [f]
result = [
(os.path.join(resource_dir, dir_), files)
for dir_, files
in dir_file_dict.items()
]
return result
# If it needs to add more files, add lines below
files = find_data_files('yml')
# here
setup(
name='ml_keeker',
version='1.0',
description='Gauss ML Log event monitor',
author='Minseo Gong',
author_email='gutssoul1@gmail.com',
packages=find_packages(),
data_files=files
)
|
import sys
from rotor import Rotor
def main():
arguments = len(sys.argv) - 1
leftRotor = Rotor(0, 'abcdefghijklmnopqrstuvwxyz')
if (sys.argv[1] == '-e' and arguments >= 2):
rightRotor = Rotor(0, 'tlmvpcbsuofnaqdhweiyrjzxgk')
ciphertext = encode(sys.argv[2], leftRotor, rightRotor)
key = ''
print(ciphertext + ' ' + key.join(rightRotor.order))
elif (sys.argv[1] == '-d' and arguments >= 3):
rightRotor = Rotor(0, sys.argv[3])
plaintext = decode(sys.argv[2], leftRotor, rightRotor)
print(plaintext[::-1])
elif (sys.argv[1] == '--test' and arguments == 1):
testRun()
elif (sys.argv[1] == '-h' or sys.argv[1] == '--help'):
print('There are only three ways to run this program')
print('\tEncrypt:\t -e <plaintext>')
print('\tDecrypt:\t -d <ciphertext> <key>')
print('\tTestConfig:\t -test')
else:
print('Try passing --help')
def encode(plaintext, leftRotor, rightRotor):
ciphertext = ''
plaintext = plaintext.replace(' ', '')
plaintext = plaintext.lower()
length = len(plaintext)
for char in range(length):
ciphertext += encodeChar(plaintext[char], leftRotor, rightRotor)
rightRotor.flip()
rightRotor.kick(True)
#print(rightRotor.order)
return ciphertext
def encodeChar(char, leftRotor, rightRotor):
rotation = leftRotor.findRotation(char)
leftRotor.rotate(rotation)
rightRotor.rotate(rotation)
return rightRotor.getDisplay()
def decode(ciphertext, leftRotor, rightRotor):
plaintext = ''
ciphertext = ciphertext.replace(' ', '')
ciphertext = ciphertext.lower()
length = len(ciphertext)
for char in range(length):
plaintext += decodeChar(ciphertext[(length - 1) - char], leftRotor,
rightRotor)
return plaintext
def decodeChar(char, leftRotor, rightRotor):
rightRotor.kick(False)
rotation = rightRotor.findRotation(char)
rightRotor.rotate(rotation)
leftRotor.rotate(rotation)
rightRotor.flip()
rotation = rightRotor.findRotation(char)
rightRotor.rotate(rotation)
leftRotor.rotate(rotation)
return leftRotor.getDisplay()
def testRun():
rightRotor = Rotor(0, 'tlmvpcbsuofnaqdhweiyrjzxgk')
ciphertext = encode("my message", leftRotor, rightRotor)
print(leftRotor.order)
plaintext = decode(ciphertext, leftRotor, rightRotor)
print(plaintext[::-1])
key = ''
print(ciphertext + ' ' + key.join(rightRotor.order))
main()
|
# This GYP file stores the dependencies necessary to build Skia on the Chrome OS
# platform. The OS doesn't provide many stable libraries as part of the
# distribution so we have to build a few of them ourselves.
{
'includes': [
'../platform_tools/chromeos/gyp/dependencies.gypi',
],
}
|
# Copyright (c) 2020. JetBrains s.r.o.
# Use of this source code is governed by the MIT license that can be found in the LICENSE file.
from .im import *
__all__ = im.__all__
|
# -*- coding: utf-8 -*-
"""
Helper function for training and testing
"""
import os
import glob
import re
from datetime import datetime
import torch
def load_saved_model(saved_path, model):
"""
Load saved model if exiseted
:param saved_path: model saved path, str
:param model: model object
:return:
"""
if not os.path.exists(saved_path):
raise ValueError('{} not found'.format(saved_path))
def findLastCheckpoint(save_dir):
file_list = glob.glob(os.path.join(save_dir, '*epoch*.pth'))
if file_list:
epochs_exist = []
for file_ in file_list:
result = re.findall(".*epoch(.*).pth.*", file_)
epochs_exist.append(int(result[0]))
initial_epoch_ = max(epochs_exist)
else:
initial_epoch_ = 0
return initial_epoch_
initial_epoch = findLastCheckpoint(saved_path)
if initial_epoch > 0:
print('resuming by loading epoch %d' % initial_epoch)
model.load_state_dict(torch.load(os.path.join(saved_path, 'net_epoch%d.pth' % initial_epoch)))
return initial_epoch, model
def setup_train(args):
"""
create folder for saved model based on current timestep and model name
:param args:
:return:
"""
# TODO: make this parameterized
model_name = 'LMF'
current_time = datetime.now()
folder_name = current_time.strftime("_%Y_%m_%d_%H_%M_%S")
folder_name = model_name + folder_name
current_path = os.path.dirname(__file__)
current_path = os.path.join(current_path, '../logs')
full_path = os.path.join(current_path, folder_name)
if not os.path.exists(full_path):
os.makedirs(full_path)
return full_path
|
# as you can see, I am able to import directly from the `app` module, this corresponds to the `app/__init__.py` file
from app import run_app
run_app()
|
class Authentication:
def __init__(self, userid, password):
self._userid=userid
self._password=password
self._token=None
def get_token(self):
return self._token
def set_token(self, token):
self._token=token
|
from z_src.utils.voc_utils import create_loss_and_optimizer
import os
import pathlib
from z_src.utils import config as cfg
import torch
import time
from torch.autograd import Variable
import numpy as np
from z_src.utils.voc_utils import colorize_mask
import torchvision.transforms as transforms
def train_net(net, n_epochs, learning_rate, loader, n_device):
# Print all of the hyperparameters of the training iteration:
print("TRAINING PHASE")
save_path = os.path.join(cfg.MODEL_CKPT_PATH, 'train_val')
saved_epoch = 0
epoch = 0
# Create our loss and optimizer functions
loss, optimizer = create_loss_and_optimizer(net, learning_rate)
# Load checkpoint
model_save_path = os.path.join(cfg.MODEL_CKPT_PATH, 'model')
if not os.path.exists(model_save_path):
pathlib.Path(model_save_path).mkdir(parents=True, exist_ok=True)
model_save_path = os.path.join(model_save_path, cfg.MODEL_CKPT_FILE_NAME)
if os.path.isfile(model_save_path) and os.path.getsize(model_save_path) > 0:
print("file path ", model_save_path)
checkpoint = torch.load(model_save_path)
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
saved_epoch = checkpoint['epoch']
# train_loss = checkpoint['loss']
net.train()
# Time for printing
training_start_time = time.time()
if saved_epoch >= n_epochs:
print("model loaded from checkpoint")
else:
n_epochs -= saved_epoch
print_set_length = int(len(loader) / 10)
for epoch in range(n_epochs):
print("epoch:", (epoch + 1 + saved_epoch))
current_loss = 0.0
start_time = time.time()
total_train_loss = 0.0
for i, (inputs, target) in enumerate(loader):
# Get inputs
inputs = inputs.to(n_device)
target = target.to(n_device)
# Wrap them in a Variable object
inputs, target = Variable(inputs), Variable(target)
# Set the parameter gradients to zero
optimizer.zero_grad()
# Forward pass, backward pass, optimize
outputs = net(inputs)
train_loss = loss(outputs, target)
train_loss.backward()
optimizer.step()
# Print statistics
current_loss += train_loss.item()
total_train_loss += train_loss.item()
# Print every 10th batch of an epoch
if (i % print_set_length) == (print_set_length - 1):
print("Epoch {}, {:d}% \t train_loss: {:.8f} took: {:.2f}s".format(
epoch + 1, int((i / print_set_length) * 10) + 1, current_loss / print_set_length,
time.time() - start_time))
# Reset running loss and time
current_loss = 0.0
start_time = time.time()
print("Training finished, took {:.2f}s".format(time.time() - training_start_time))
torch.save({
'epoch': (epoch + 1 + saved_epoch),
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': train_loss,
}, model_save_path)
return net
|
from flask import Flask, render_template, redirect, url_for, session, flash, request, make_response
from flask_session import Session
from flask_wtf import FlaskForm, CSRFProtect
from wtforms import StringField, PasswordField, validators
import re
import os
import subprocess
from datetime import datetime
from . import app_forms
from . import create_app
app = create_app()
SECRET_KEY = os.urandom(32)
csrf = CSRFProtect(app)
Session(app)
registration_info = []
logged_in_user = []
validate_success = 1
validate_login = 0
validate_2fa = -1
headers = {"Content-Security-Policy":"default-src 'self'",
"Content-Security-Policy":"frame-ancestors 'none'",
"Content-Security-Policy":"worker-src 'self'",
"Content-Security-Policy":"script-src 'self'",
"Content-Security-Policy":"style-src 'self'",
"Content-Security-Policy":"img-src 'none'",
"Content-Security-Policy":"connect-src 'self'",
"Content-Security-Policy":"font-src 'self'",
"Content-Security-Policy":"media-src 'self'",
"Content-Security-Policy":"manifest-src 'self'",
"Content-Security-Policy":"objec-src 'self'",
"Content-Security-Policy":"prefetch-src 'self'",
"X-Content-Type-Options":"nosniff",
"X-Frame-Options":"DENY",
"X-XSS-Protection":"1; mode=block"}
@app.route('/')
def home():
if len(registration_info) == 0:
return redirect('/register'), 302, headers
elif len(logged_in_user) == 0:
return redirect(url_for('login')), 302, headers
else:
return redirect(url_for('spell_check')), 302, headers
@app.route('/set/')
def set():
session['key'] = 'value'
return 'ok'
@app.route('/get/')
def get():
return session.get('key', 'not set')
@app.route('/api/data')
def get_data():
return app.send_static_file('data.json')
@app.route('/about')
def about():
r = CreateResponse(render_template('about.html'))
return r
@app.route('/register', methods=['GET','POST'])
def register():
try:
form=app_forms.RegistrationForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = {}
user['username'] = form.username.data
user['password'] = form.password.data
user['twofactor'] = form.phone2fa.data
registration_info.append(user)
flash('Registration was a success', 'success')
return redirect(url_for('login')), 302, headers
else:
flash('Registration was a faulure', 'success')
r = CreateResponse(render_template('register.html', form=form))
return r
except Exception as e:
r = CreateResponse(str(e), 500)
return r
@app.route('/login', methods=['GET', 'POST'])
def login():
#if len(registration_info) == 0:
# return redirect('/register')
try:
form = app_forms.LoginForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
logged_in_user.clear()
user = {}
user['username'] = form.username.data
user['password'] = form.password.data
user['twofactor'] = form.phone2fa.data
validation = validate_user(user)
if validation == validate_success:
logged_in_user.append(user)
flash('Login was a success', 'result')
elif validation == validate_login:
flash('Incorrect username or password', 'result')
else:
flash('Two-factor authentication failure', 'result')
return redirect(url_for('spell_check')), 302, headers
except Exception as e:
r = CreateResponse(str(e), 500)
return r
r = CreateResponse(render_template('login.html', form=form))
return r
def validate_user(user):
validation_result = validate_login
for registered_user in registration_info:
if user['username'] == registered_user['username']:
if user['password'] == registered_user['password']:
if user['twofactor'] == registered_user['twofactor']:
validation_result = validate_success
return validation_result
else:
validation_result = validate_2fa
else:
validation_result = validate_login
else:
validation_result = validate_login
return validation_result
@app.route('/spell_check', methods=['GET', 'POST'])
def spell_check():
if len(registration_info) == 0:
return redirect('/register'), 302, headers
elif len(logged_in_user) == 0:
return redirect(url_for('login')), 302, headers
try:
form = app_forms.SpellCheckForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
lines = form.inputtext.data.split('\n')
f = open('check_words.txt', 'w')
f.writelines(lines)
f.close()
p = subprocess.run(['./a.out', './check_words.txt', './wordlist.txt'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
msg = '\n'.join(lines)
sc_form = app_forms.SpellCheckResultsForm()
sc_form.inputtext.data = msg
msg = p.stdout.decode('utf-8')
msg = msg.replace('\n', ', ')
msg = msg.rstrip(', ')
sc_form.misspelled.data = msg
r = CreateResponse(render_template('sc_results.html', form=sc_form))
return r
except Exception as e:
r = CreateResponse(str(e), 500)
return r
r = CreateResponse(render_template('spell_check.html', form=form))
return r
@app.route('/sc_results', methods=['GET'])
def sc_results():
if len(registration_info) == 0:
return redirect('/register'), 302, headers
elif len(logged_in_user) == 0:
return redirect(url_for('login')), 302, headers
try:
form = app_forms.SpellCheckResultsForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
return redirect(url_for('spell_check')), 302, headers
except Exception as e:
r = CreateResponse(str(e), 500)
return r
r = CreateResponse(render_template('sc_results.html', form=form))
return r
def CreateResponse(resp, status_code = None):
if status_code:
r = make_response(resp, status_code)
else:
r = make_response(resp)
r.headers["Content-Security-Policy"] = "default-src 'self'"
r.headers["Content-Security-Policy"] = "frame-ancestors 'none'"
r.headers["Content-Security-Policy"] = "worker-src 'self'"
r.headers["Content-Security-Policy"] = "script-src 'self'"
r.headers["Content-Security-Policy"] = "style-src 'self'"
r.headers["Content-Security-Policy"] = "img-src 'none'"
r.headers["Content-Security-Policy"] = "connect-src 'self'"
r.headers["Content-Security-Policy"] = "font-src 'self'"
r.headers["Content-Security-Policy"] = "media-src 'self'"
r.headers["Content-Security-Policy"] = "manifest-src 'self'"
r.headers["Content-Security-Policy"] = "objec-src 'self'"
r.headers["Content-Security-Policy"] = "prefetch-src 'self'"
r.headers["X-Content-Type-Options"] = "nosniff"
r.headers["X-Frame-Options"] = "DENY"
r.headers["X-XSS-Protection"] = "1; mode=block"
return r |
from django.contrib.auth import authenticate, login, logout, get_user_model
import graphene
from graphql_relay import from_global_id
from graphene import relay
from graphene_django.types import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
import graphene_django_optimizer as gql_optimizer
from django_filters import FilterSet, OrderingFilter
from tourney.models import Competitor, Team, TeamTourney, Game, Tourney, Match, Set
class MatchFilter(FilterSet):
class Meta:
model = Match
fields = ("round", "seed")
order_by = OrderingFilter(fields=("round", "seed",))
class CompetitorType(DjangoObjectType):
class Meta:
model = Competitor
interfaces = (relay.Node,)
class MatchType(DjangoObjectType):
class Meta:
model = Match
interfaces = (relay.Node,)
class TourneyType(DjangoObjectType):
matchSet = DjangoFilterConnectionField(MatchType, filterset_class=MatchFilter)
def resolve_matches(self, info, **kwargs):
return gql_optimizer(MatchFilter(kwargs).qs)
class Meta:
model = Tourney
interfaces = (relay.Node,)
def resolve_matchSet(self, info, **kwargs):
return MatchFilter(kwargs).qs
class TeamType(DjangoObjectType):
class Meta:
model = Team
interfaces = (relay.Node,)
class TeamTourneyType(DjangoObjectType):
class Meta:
model = TeamTourney
interfaces = (relay.Node,)
class GameType(DjangoObjectType):
class Meta:
model = Game
interfaces = (relay.Node,)
class SetType(DjangoObjectType):
class Meta:
model = Set
interfaces = (relay.Node,)
class UserType(DjangoObjectType):
class Meta:
model = get_user_model()
interfaces = (relay.Node,)
fields = ("id", "username")
class CompetitorsTypeConnection(relay.Connection):
class Meta:
node = CompetitorType
class TourneysTypeConnection(relay.Connection):
class Meta:
node = TourneyType
class MatchesTypeConnection(relay.Connection):
class Meta:
node = MatchType
class SetsTypeConnection(relay.Connection):
class Meta:
node = SetType
class TeamsTypeConnection(relay.Connection):
class Meta:
node = TeamType
class GamesTypeConnection(relay.Connection):
class Meta:
node = GameType
class UsersTypeConnection(relay.Connection):
class Meta:
node = UserType
class Query(object):
competitors = relay.ConnectionField(CompetitorsTypeConnection)
tourneys = relay.ConnectionField(TourneysTypeConnection)
matches = relay.ConnectionField(MatchesTypeConnection)
sets = relay.ConnectionField(SetsTypeConnection)
teams = relay.ConnectionField(TeamsTypeConnection)
games = relay.ConnectionField(GamesTypeConnection)
competitor = relay.Node.Field(CompetitorType, name=graphene.String())
tourney = relay.Node.Field(TourneyType, name=graphene.String())
match = relay.Node.Field(MatchType)
team = relay.Node.Field(TeamType, name=graphene.String())
team_tourney = relay.Node.Field(TeamTourneyType, name=graphene.String())
game = relay.Node.Field(GameType, name=graphene.String())
set = relay.Node.Field(SetType)
me = graphene.Field(UserType)
users = relay.ConnectionField(UsersTypeConnection)
def resolve_users(self, info):
return gql_optimizer.quer(get_user_model().objects.all(), info)
def resolve_me(self, info):
user = info.context.user
if user.is_anonymous:
return None
return user
def resolve_competitors(self, info, **kwargs):
return gql_optimizer.query(Competitor.objects.all(), info)
def resolve_tourneys(self, info, **kwargs):
return gql_optimizer.query(Tourney.objects.all().order_by("-created_at"), info)
def resolve_matches(self, info, **kwargs):
return gql_optimizer.query(Match.objects.all(), info)
def resolve_teams(self, info, **kwargs):
return gql_optimizer.query(Team.objects.all(), info)
def resolve_games(self, info, **kwargs):
return gql_optimizer.query(Game.objects.all(), info)
def resolve_sets(self, info, **kwargs):
return gql_optimizer.query(Set.objects.all(), info)
def resolve_competitor(self, info, **kwargs):
id = kwargs.get("id")
name = kwargs.get("name")
if id is not None:
return Competitor.objects.get(pk=id)
if name is not None:
return Competitor.objects.get(name=name)
return None
def resolve_tourney(self, info, **kwargs):
id = kwargs.get("id")
name = kwargs.get("name")
if id is not None:
return gql_optimizer.query(Tourney.objects.get(pk=id))
if name is not None:
return gql_optimizer.query(Tourney.objects.get(name=name))
return None
def resolve_match(self, info, **kwargs):
id = kwargs.get("id")
if id is not None:
return Match.objects.get(pk=id)
return None
def resolve_team(self, info, **kwargs):
id = kwargs.get("id")
name = kwargs.get("name")
if id is not None:
return Team.objects.get(pk=id)
if name is not None:
return Team.objects.get(name=name)
return None
def resolve_team_tourney(self, info, **kwargs):
id = kwargs.get("id")
name = kwargs.get("name")
if id is not None:
return TeamTourney.objects.get(pk=id)
if name is not None:
return TeamTourney.objects.get(name=name)
return None
def resolve_game(self, info, **kwargs):
id = kwargs.get("id")
name = kwargs.get("name")
if id is not None:
return Game.objects.get(pk=id)
if name is not None:
return Game.objects.get(name=name)
return None
def resolve_set(self, info, **kwargs):
id = kwargs.get("id")
if id is not None:
return Set.objects.get(pk=id)
return None
class CreateGame(graphene.Mutation):
class Arguments:
name = graphene.String()
game = graphene.Field(GameType)
ok = graphene.Boolean()
def mutate(self, info, name):
game = Game.objects.create(name=name)
ok = True
return CreateGame(game=game, ok=ok)
class CreateCompetitor(graphene.Mutation):
class Arguments:
name = graphene.String()
competitor = graphene.Field(CompetitorType)
ok = graphene.Boolean()
def mutate(self, info, name):
game = Competitor.objects.create(name=name)
ok = True
return CreateCompetitor(game=game, ok=ok)
class CreateTeam(graphene.Mutation):
class Arguments:
name = graphene.String()
team = graphene.Field(TeamType)
ok = graphene.Boolean()
def mutate(self, info, name):
team = Team.objects.create(name=name)
ok = True
return CreateTeam(team=team, ok=ok)
class CreateTourney(graphene.Mutation):
class Arguments:
name = graphene.String()
tourney = graphene.Field(TourneyType)
ok = graphene.Boolean()
def mutate(self, info, name):
tourney = Tourney.objects.create(name=name)
ok = True
return CreateTourney(tourney=tourney, ok=ok)
class StartTourney(graphene.Mutation):
class Arguments:
tourneyId = graphene.ID()
ok = graphene.Boolean()
def mutate(self, info, tourneyId):
tourney = Tourney.objects.get(id=from_global_id(tourneyId)[1])
tourney.start_tourney()
ok = True
return CreateTourney(tourney=tourney, ok=ok)
class Login(graphene.Mutation):
class Arguments:
username = graphene.String()
password = graphene.String()
user = graphene.Field(UserType)
ok = graphene.Boolean()
def mutate(self, info, username, password):
user = authenticate(info.context, username=username, password=password)
if user is not None:
login(info.context, user)
return Login(ok=True, user=user)
else:
return Login(ok=False, user=None)
class Logout(graphene.Mutation):
ok = graphene.Boolean()
def mutate(self, info):
logout(info.context)
return Logout(ok=True)
class SignUp(graphene.Mutation):
class Arguments:
username = graphene.String()
email = graphene.String()
password = graphene.String()
passwordVerify = graphene.String()
ok = graphene.Boolean()
user = graphene.Field(UserType)
def mutate(self, info, username, email, password, passwordVerify):
# TODO: VERIFY WITH EMAIL
if password != passwordVerify:
raise Exception("Password mismatch")
UserModel = get_user_model()
if UserModel.objects.filter(username=username).exists():
raise Exception("User already exists")
UserModel.objects.create_user(username=username, email=email, password=password)
user = authenticate(info.context, username=username, password=password)
login(info.context, user)
return SignUp(ok=True, user=user)
class Mutations(graphene.ObjectType):
login = Login.Field()
logout = Logout.Field()
signUp = SignUp.Field()
startTourney = StartTourney.Field()
createGame = CreateGame.Field()
createCompetitor = CreateCompetitor.Field()
createTeam = CreateTeam.Field()
createTourney = CreateTourney.Field()
|
import sys
import nipype
import nipype.pipeline as pe
# import the defined workflow from the func_preproc folder
import PUMI.utils.Concat as conc
import PUMI.anat_preproc.Better as bet
import PUMI.func_preproc.MotionCorrecter as mc
import PUMI.func_preproc.Compcor as cmpcor
import PUMI.func_preproc.NuissanceCorr as nuisscorr
import PUMI.func_preproc.TemporalFiltering as tmpfilt
import PUMI.func_preproc.DataCensorer as cens
import PUMI.func_preproc.MedianAngleCorr as medangcor
import PUMI.func_preproc.DataCensorer as scrub
import PUMI.utils.QC as qc
import nipype.interfaces.utility as utility
import nipype.interfaces.afni as afni
import nipype.interfaces.fsl as fsl
import PUMI.utils.globals as globals
from nipype.interfaces.fsl import Smooth
import os
def FuncProc(stdrefvol="mid",SinkTag="func_preproc", wf_name="funcproc"):
"""
Performs processing of functional (resting-state) images:
Images should be already reoriented, e.g. with fsl fslreorient2std (see scripts/ex_pipeline.py)
Workflow inputs:
:param func: The functional image file.
:param SinkDir: where to write important ouputs
:param SinkTag: The output directory in which the returned images (see workflow outputs) could be found.
Workflow outputs:
:param
:return: anatproc_workflow
Tamas Spisak
tamas.spisak@uk-essen.de
2018
"""
SinkDir = os.path.abspath(globals._SinkDir_ + "/" + SinkTag)
if not os.path.exists(SinkDir):
os.makedirs(SinkDir)
# Basic interface class generates identity mappings
inputspec = pe.Node(utility.IdentityInterface(fields=['func', 'cc_noise_roi']),
name='inputspec')
# build the actual pipeline
#myonevol = onevol.onevol_workflow(SinkDir=SinkDir)
mybet = bet.bet_workflow(SinkTag="func_preproc", fmri=True, wf_name="brain_extraction_func")
mymc = mc.mc_workflow_fsl(reference_vol=stdrefvol)
mycmpcor = cmpcor.compcor_workflow()
myconc = conc.concat_workflow(numconcat=2)
mynuisscor = nuisscorr.nuissremov_workflow()
mytmpfilt = tmpfilt.tmpfilt_workflow(highpass_Hz=0.008, lowpass_Hz=0.08)
mycens = cens.datacens_workflow_percent()
mymedangcor = medangcor.mac_workflow()
# Basic interface class generates identity mappings
outputspec = pe.Node(utility.IdentityInterface(fields=['func_mc',
'func_mc_nuis',
'func_mc_nuis_bpf',
'func_mc_nuis_bpf_cens',
'func_mc_nuis_bpf_cens_medang',
# non-image data
'FD'
]),
name='outputspec')
wf_mc = nipype.Workflow(wf_name)
wf_mc.connect([
(inputspec, mybet,
[('func', 'inputspec.in_file')]),
(mybet, mymc,
[('outputspec.brain', 'inputspec.func')]),
(mymc, mycmpcor, [('outputspec.func_out_file', 'inputspec.func_aligned')]),
(inputspec, mycmpcor, [('cc_noise_roi', 'inputspec.mask_file')]),
(mycmpcor,myconc, [('outputspec.components_file','inputspec.par1')]),
(mymc, myconc, [('outputspec.first24_file', 'inputspec.par2')]),
(myconc,mynuisscor, [('outputspec.concat_file', 'inputspec.design_file')]),
(mymc, mynuisscor, [('outputspec.func_out_file', 'inputspec.in_file')]),
(mynuisscor,mytmpfilt,[('outputspec.out_file','inputspec.func')]),
(mytmpfilt,mycens,[('outputspec.func_tmplfilt','inputspec.func')]),
(mymc,mycens,[('outputspec.FD_file','inputspec.FD')]),
(mybet,mymedangcor, [('outputspec.brain_mask','inputspec.mask')]),
(mycens, mymedangcor, [('outputspec.scrubbed_image', 'inputspec.realigned_file')]),
# outputspec
(mymc, outputspec, [('outputspec.func_out_file', 'func_mc')]),
(mynuisscor, outputspec, [('outputspec.out_file', 'func_mc_nuis')]),
(mytmpfilt, outputspec, [('outputspec.func_tmplfilt', 'func_mc_nuis_bpf')]),
(mycens, outputspec, [('outputspec.scrubbed_image', 'func_mc_nuis_bpf_cens')]),
(mymedangcor, outputspec, [('outputspec.final_func', 'func_mc_nuis_bpf_cens_medang')]),
# non-image data:
(mycens, outputspec, [('outputspec.FD', 'FD')])
])
return wf_mc
def FuncProc_cpac(stdrefvol="mid",SinkTag="func_preproc", wf_name="funcproc"):
"""
Performs processing of functional (resting-state) images, closely replicating the results of C-PAC,
with the conf file: etc/cpac_conf.yml
Images should be already reoriented, e.g. with fsl fslreorient2std (see scripts/ex_pipeline.py)
Workflow inputs:
:param func: The functional image file.
:param SinkDir: where to write important ouputs
:param SinkTag: The output directory in which the returned images (see workflow outputs) could be found.
Workflow outputs:
:param
:return: anatproc_workflow
Tamas Spisak
tamas.spisak@uk-essen.de
2018
"""
SinkDir = os.path.abspath(globals._SinkDir_ + "/" + SinkTag)
if not os.path.exists(SinkDir):
os.makedirs(SinkDir)
# Basic interface class generates identity mappings
inputspec = pe.Node(utility.IdentityInterface(fields=['func', 'cc_noise_roi']),
name='inputspec')
# build the actual pipeline
#myonevol = onevol.onevol_workflow(SinkDir=SinkDir)
mymc = mc.mc_workflow_afni(reference_vol=stdrefvol, FD_mode = "Power")
mybet = bet.bet_workflow(SinkTag="func_preproc", fmri=True,
wf_name="brain_extraction_func") # do it with Automaks of AFNI?
mycmpcor = cmpcor.compcor_workflow()
mydespike = cens.despike_workflow()
myconc = conc.concat_workflow(numconcat=3)
mynuisscor = nuisscorr.nuissremov_workflow()
mymedangcor = medangcor.mac_workflow()
mytmpfilt = tmpfilt.tmpfilt_workflow(highpass_Hz=0.01, lowpass_Hz=0.08)
# Basic interface class generates identity mappings
outputspec = pe.Node(utility.IdentityInterface(fields=['func_mc',
'func_mc_nuis',
'func_mc_nuis_medang',
'func_mc_nuis_medang_bpf',
# non-image data
'FD'
]),
name='outputspec')
wf_mc = nipype.Workflow(wf_name)
wf_mc.connect([
(inputspec, mymc,
[('func', 'inputspec.func')]),
(mymc, mybet,
[('outputspec.func_out_file', 'inputspec.in_file')]),
(mybet, mycmpcor, [('outputspec.brain', 'inputspec.func_aligned')]),
(inputspec, mycmpcor, [('cc_noise_roi', 'inputspec.mask_file')]),
(mymc, mydespike, [("outputspec.FD_file", "inputspec.FD")]),
(mycmpcor, myconc, [('outputspec.components_file', 'inputspec.par1')]),
(mymc, myconc, [('outputspec.first24_file', 'inputspec.par2')]),
(mydespike, myconc, [('outputspec.despike_mat', 'inputspec.par3')]),
(myconc,mynuisscor, [('outputspec.concat_file', 'inputspec.design_file')]),
(mybet, mynuisscor, [('outputspec.brain', 'inputspec.in_file')]),
(mybet, mymedangcor, [('outputspec.brain_mask', 'inputspec.mask')]),
(mynuisscor, mymedangcor, [('outputspec.out_file', 'inputspec.realigned_file')]),
(mymedangcor, mytmpfilt, [('outputspec.final_func', 'inputspec.func')]),
# outputspec
(mymc, outputspec, [('outputspec.func_out_file', 'func_mc')]),
(mynuisscor, outputspec, [('outputspec.out_file', 'func_mc_nuis')]),
(mymedangcor, outputspec, [('outputspec.final_func', 'func_mc_nuis_medang')]),
(mytmpfilt, outputspec, [('outputspec.func_tmplfilt', 'func_mc_nuis_medang_bpf')]),
# non-image data:
(mymc, outputspec, [('outputspec.FD_file', 'FD')]),
])
return wf_mc
def FuncProc_despike_afni(stdrefvol="mid",SinkTag="func_preproc", wf_name="func_preproc_dspk_afni", fwhm=0, carpet_plot=""):
"""
Performs processing of functional (resting-state) images:
Images should be already reoriented, e.g. with fsl fslreorient2std (see scripts/ex_pipeline.py)
Workflow inputs:
:param func: The functional image file.
:param SinkDir: where to write important ouputs
:param SinkTag: The output directory in which the returned images (see workflow outputs) could be found.
Workflow outputs:
:param
:return: anatproc_workflow
Tamas Spisak
tamas.spisak@uk-essen.de
2018
"""
SinkDir = os.path.abspath(globals._SinkDir_ + "/" + SinkTag)
if not os.path.exists(SinkDir):
os.makedirs(SinkDir)
wf_mc = nipype.Workflow(wf_name)
# Basic interface class generates identity mappings
inputspec = pe.Node(utility.IdentityInterface(fields=['func', 'cc_noise_roi']),
name='inputspec')
# build the actual pipeline
#myonevol = onevol.onevol_workflow(SinkDir=SinkDir)
mybet = bet.bet_workflow(SinkTag="func_preproc", fmri=True, wf_name="brain_extraction_func")
mymc = mc.mc_workflow_fsl(reference_vol=stdrefvol)
if carpet_plot:
# create "atlas"
add_masks = pe.MapNode(fsl.ImageMaths(op_string=' -add'),
iterfield=['in_file', 'in_file2'],
name="addimgs")
wf_mc.connect(inputspec, 'cc_noise_roi', add_masks, 'in_file')
wf_mc.connect(mybet, 'outputspec.brain_mask', add_masks, 'in_file2')
fmri_qc_mc = qc.fMRI2QC(carpet_plot, tag="mc", indiv_atlas=True)
wf_mc.connect(add_masks, 'out_file', fmri_qc_mc, 'inputspec.atlas')
wf_mc.connect(mymc, 'outputspec.FD_file', fmri_qc_mc, 'inputspec.confounds')
wf_mc.connect(mymc, 'outputspec.func_out_file', fmri_qc_mc, 'inputspec.func')
mydespike = pe.MapNode(afni.Despike(outputtype="NIFTI_GZ"), # I do it after motion correction...
iterfield=['in_file'],
name="DeSpike")
if carpet_plot:
fmri_qc_mc_dspk = qc.fMRI2QC(carpet_plot, tag="mc_dspk", indiv_atlas=True)
wf_mc.connect(add_masks, 'out_file', fmri_qc_mc_dspk, 'inputspec.atlas')
wf_mc.connect(mymc, 'outputspec.FD_file', fmri_qc_mc_dspk, 'inputspec.confounds')
wf_mc.connect(mydespike, 'out_file', fmri_qc_mc_dspk, 'inputspec.func')
mycmpcor = cmpcor.compcor_workflow() # to WM+CSF signal
myconc = conc.concat_workflow(numconcat=2)
mynuisscor = nuisscorr.nuissremov_workflow() # regress out 5 compcor variables and the Friston24
if carpet_plot:
fmri_qc_mc_dspk_nuis = qc.fMRI2QC(carpet_plot, tag="mc_dspk_nuis", indiv_atlas=True)
wf_mc.connect(add_masks, 'out_file', fmri_qc_mc_dspk_nuis, 'inputspec.atlas')
wf_mc.connect(mymc, 'outputspec.FD_file', fmri_qc_mc_dspk_nuis, 'inputspec.confounds')
wf_mc.connect(mynuisscor, 'outputspec.out_file', fmri_qc_mc_dspk_nuis, 'inputspec.func')
# optional smoother:
if fwhm > 0:
smoother = pe.MapNode(interface=Smooth(fwhm=fwhm),
iterfield=['in_file'],
name="smoother")
if carpet_plot:
fmri_qc_mc_dspk_smooth_nuis_bpf = qc.fMRI2QC(carpet_plot, tag="mc_dspk_nuis_smooth", indiv_atlas=True)
wf_mc.connect(add_masks, 'out_file', fmri_qc_mc_dspk_smooth_nuis_bpf, 'inputspec.atlas')
wf_mc.connect(mymc, 'outputspec.FD_file', fmri_qc_mc_dspk_smooth_nuis_bpf, 'inputspec.confounds')
wf_mc.connect(smoother, 'smoothed_file', fmri_qc_mc_dspk_smooth_nuis_bpf, 'inputspec.func')
#mymedangcor = medangcor.mac_workflow() #skip it this time
mytmpfilt = tmpfilt.tmpfilt_workflow(highpass_Hz=0.008, lowpass_Hz=0.08) #will be done by the masker?
if carpet_plot:
fmri_qc_mc_dspk_nuis_bpf = qc.fMRI2QC(carpet_plot, tag="mc_dspk_nuis_bpf", indiv_atlas=True)
wf_mc.connect(add_masks, 'out_file', fmri_qc_mc_dspk_nuis_bpf, 'inputspec.atlas')
wf_mc.connect(mymc, 'outputspec.FD_file', fmri_qc_mc_dspk_nuis_bpf, 'inputspec.confounds')
wf_mc.connect(mytmpfilt, 'outputspec.func_tmplfilt', fmri_qc_mc_dspk_nuis_bpf, 'inputspec.func')
myscrub = scrub.datacens_workflow_threshold(ex_before=0, ex_after=0)
# "liberal scrubbing" since despiking was already performed
if carpet_plot:
fmri_qc_mc_dspk_nuis_bpf_scrub = qc.fMRI2QC(carpet_plot, tag="mc_dspk_nuis_bpf_scrub", indiv_atlas=True)
wf_mc.connect(add_masks, 'out_file', fmri_qc_mc_dspk_nuis_bpf_scrub, 'inputspec.atlas')
wf_mc.connect(myscrub, 'outputspec.FD_scrubbed', fmri_qc_mc_dspk_nuis_bpf_scrub, 'inputspec.confounds')
wf_mc.connect(myscrub, 'outputspec.scrubbed_image', fmri_qc_mc_dspk_nuis_bpf_scrub, 'inputspec.func')
# Basic interface class generates identity mappings
outputspec = pe.Node(utility.IdentityInterface(fields=[
'func_preprocessed',
'func_preprocessed_scrubbed',
# non-image data
'FD'
]),
name='outputspec')
wf_mc.connect([
(inputspec, mybet,
[('func', 'inputspec.in_file')]),
(mybet, mymc,
[('outputspec.brain', 'inputspec.func')]),
(mymc, mydespike, [('outputspec.func_out_file', 'in_file')]),
(mydespike, mycmpcor, [('out_file', 'inputspec.func_aligned')]),
(inputspec, mycmpcor, [('cc_noise_roi', 'inputspec.mask_file')]),
(mycmpcor,myconc, [('outputspec.components_file','inputspec.par1')]),
(mymc, myconc, [('outputspec.first24_file', 'inputspec.par2')]),
(myconc,mynuisscor, [('outputspec.concat_file', 'inputspec.design_file')]),
(mydespike, mynuisscor, [('out_file', 'inputspec.in_file')])
])
if fwhm > 0:
wf_mc.connect([
(mynuisscor, smoother, [('outputspec.out_file', 'in_file')]),
(smoother, mytmpfilt, [('smoothed_file', 'inputspec.func')]),
(mytmpfilt, myscrub, [('outputspec.func_tmplfilt', 'inputspec.func')]),
(mymc, myscrub, [('outputspec.FD_file', 'inputspec.FD')]),
(mytmpfilt, outputspec, [('outputspec.func_tmplfilt', 'func_preprocessed')])
])
else:
wf_mc.connect([
(mynuisscor, mytmpfilt, [('outputspec.out_file', 'inputspec.func')]),
(mytmpfilt, myscrub, [('outputspec.func_tmplfilt', 'inputspec.func')]),
(mymc, myscrub, [('outputspec.FD_file', 'inputspec.FD')]),
(mytmpfilt, outputspec, [('outputspec.func_tmplfilt', 'func_preprocessed')])
])
wf_mc.connect([
# non-image data:
(mymc, outputspec, [('outputspec.FD_file', 'FD')]),
(myscrub, outputspec, [('outputspec.scrubbed_image', 'func_preprocessed_scrubbed')]),
])
return wf_mc
|
import torch
import argparse
import numpy as np
import utils
from pathlib import Path
from datetime import datetime
from network import ContactsNet
from dataset import ProteinDataLoader
def run_eval(target_path, model_path, replica, out_dir, device):
config = utils.build_config(model_path, replica)
dataloader = ProteinDataLoader(target_path, config)
model = ContactsNet(config.network_config).to(device)
print(f'Model parameters: {model.get_parameter_number()["Total"]}')
model_file = model_path / replica / 'model.pt'
if model_file.exists():
model.load_state_dict(torch.load(model_file, map_location=device))
else:
cost_time = utils.load_tf_ckpt(model, model_file)
model.to(device)
print(f'Load tf model cost time: {cost_time}')
num_examples = 0
num_crops = 0
num_bins = config.network_config.num_bins
torsion_bins = config.network_config.torsion_bins
crop_size_x = config.crop_size_x
crop_size_y = config.crop_size_y
prob_weights = 1
if config.eval_config.pyramid_weights > 0:
sx = np.expand_dims(np.linspace(1.0 / crop_size_x, 1, crop_size_x), 1)
sy = np.expand_dims(np.linspace(1.0 / crop_size_y, 1, crop_size_y), 0)
prob_weights = np.minimum(np.minimum(sx, np.flipud(sx)),
np.minimum(sy, np.fliplr(sy)))
prob_weights /= np.max(prob_weights)
prob_weights = np.minimum(prob_weights, config.eval_config.pyramid_weights) # crop_size_x x crop_size_y
start_t = datetime.now()
for protein, crops in dataloader:
L = protein.len
print('Data: ',protein.targets.domain_name, L)
# Crops
contact_prob_accum = np.zeros((L, L, 2), dtype=np.float32)
distance_prob_accum = np.zeros((L, L, num_bins), dtype=np.float32)
sec_accum = np.zeros((L, 8), dtype=np.float32)
tor_accum = np.zeros((L, torsion_bins**2), dtype=np.float32)
asa_accum = np.zeros((L,), dtype=np.float32)
weights_1d_accum = np.zeros((L,), dtype=np.float32)
num_crops_local = 0
for x_2d, crop_x, crop_y in crops:
ic = max(0, crop_x[0])
jc = max(0, crop_y[0])
ic_to = min(L, crop_x[1])
jc_to = min(L, crop_y[1])
prepad_x = max(0, -crop_x[0])
prepad_y = max(0, -crop_y[0])
postpad_x = crop_x[1] - ic_to
postpad_y = crop_y[1] - jc_to
with torch.no_grad():
x_2d = np.transpose(x_2d, (2, 0, 1)) # to NCHW shape
x_2d = torch.tensor([x_2d]).float().to(device)
crop_x = torch.tensor([crop_x]).to(device)
crop_y = torch.tensor([crop_y]).to(device)
out = model(x_2d, crop_x, crop_y)
out = {k:t.cpu() for k,t in out.items()}
contact_probs = out['contact_probs'][0,
prepad_y:crop_size_y - postpad_y,
prepad_x:crop_size_x - postpad_x].numpy()
distance_probs = out['distance_probs'][0,
prepad_y:crop_size_y - postpad_y,
prepad_x:crop_size_x - postpad_x].numpy()
weight = prob_weights[prepad_y:crop_size_y - postpad_y,
prepad_x:crop_size_x - postpad_x]
contact_prob_accum[jc:jc_to, ic:ic_to, 0] += contact_probs * weight
contact_prob_accum[jc:jc_to, ic:ic_to, 1] += weight
distance_prob_accum[jc:jc_to, ic:ic_to, :] += distance_probs * np.expand_dims(weight, 2)
weights_1d_accum[jc:jc_to] += 1
weights_1d_accum[ic:ic_to] += 1
if 'secstruct_probs' in out:
sec_x = out['secstruct_probs'][0, prepad_x:crop_size_x - postpad_x].numpy()
sec_y = out['secstruct_probs'][0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y].numpy()
sec_accum[ic:ic + sec_x.shape[0]] += sec_x
sec_accum[jc:jc + sec_y.shape[0]] += sec_y
if 'torsion_probs' in out:
tor_x = out['torsion_probs'][0, prepad_x:crop_size_x - postpad_x].numpy()
tor_y = out['torsion_probs'][0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y].numpy()
tor_accum[ic:ic + tor_x.shape[0]] += tor_x
tor_accum[jc:jc + tor_y.shape[0]] += tor_y
if 'asa_output' in out:
asa_x = out['asa_output'][0, prepad_x:crop_size_x - postpad_x].numpy()
asa_y = out['asa_output'][0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y].numpy()
asa_accum[ic:ic + asa_x.shape[0]] += np.squeeze(asa_x, 1)
asa_accum[jc:jc + asa_y.shape[0]] += np.squeeze(asa_y, 1)
num_crops_local += 1
assert (contact_prob_accum[:, :, 1] > 0.0).all()
contact_accum = contact_prob_accum[:, :, 0] / contact_prob_accum[:, :, 1]
distance_accum = distance_prob_accum[:, :, :] / contact_prob_accum[:, :, 1:2]
asa_accum /= weights_1d_accum
sec_accum /= np.expand_dims(weights_1d_accum, 1)
tor_accum /= np.expand_dims(weights_1d_accum, 1)
# The probs are symmetrical
contact_accum = (contact_accum + contact_accum.transpose()) / 2
distance_accum = (distance_accum + np.transpose(distance_accum, [1, 0, 2])) / 2
# Save the output files
distance_accum.dump(out_dir / f'{protein.targets.domain_name}.distance')
if config.network_config.torsion_multiplier > 0:
tor_accum.dump(out_dir / f'{protein.targets.domain_name}.torsion')
if config.network_config.secstruct_multiplier > 0:
utils.save_seq_prob(sec_accum, protein.seq, out_dir / f'{protein.targets.domain_name}.sec')
if config.network_config.asa_multiplier > 0:
utils.save_seq_prob(asa_accum, protein.seq, out_dir / f'{protein.targets.domain_name}.asa')
num_examples += 1
num_crops += num_crops_local
if num_examples >= config.eval_config.max_num_examples: break
time_spent = datetime.now() - start_t
print(f'Evaluate {num_examples} examples, {num_crops} crops, {num_crops/num_examples:.1f} crops/ex')
print(f'Cost time {time_spent}, {time_spent/num_examples} s/example, {time_spent/num_crops} s/crops\n')
def ensemble(target_path, out_dir):
for model_dir in filter(lambda d:d.is_dir() and d.name != 'pasted', out_dir.iterdir()):
r = {}
for replica_dir in filter(lambda d:d.is_dir() and d.name.isdigit(), model_dir.iterdir()):
for pkl in replica_dir.glob('*.distance'):
target = pkl.name.split('.')[0]
dis = np.load(pkl, allow_pickle=True)
if target in r:
r[target].append(dis)
else:
r[target] = [dis]
ensemble_dir = model_dir / 'ensemble'
ensemble_dir.mkdir(exist_ok=True)
for k, v in r.items():
ensemble_file = ensemble_dir / f'{k}.distance'
ensemble_dis = sum(v) / len(v)
ensemble_dis.dump(ensemble_file)
targets_weight = {data['domain_name']:{'weight':data['num_alignments'][0,0], 'seq':data['sequence']} for data in np.load(target_path, allow_pickle=True)}
ensemble_dir = out_dir / 'Distogram' / 'ensemble'
paste_dir = out_dir / 'pasted'
paste_dir.mkdir(exist_ok=True)
targets = set([t.split("-")[0] for t in targets_weight.keys()])
for target in targets:
combined_cmap = np.load(ensemble_dir / f'{target}.distance', allow_pickle=True)
counter_map = np.ones_like(combined_cmap[:, :, 0:1])
seq = targets_weight[target]['seq']
target_domains = utils.generate_domains(target, seq)
for domain in sorted(target_domains, key=lambda x: x["name"]):
if domain["name"] == target: continue
crop_start, crop_end = domain["description"]
domain_dis = np.load(ensemble_dir / f'{domain["name"]}.distance', allow_pickle=True)
weight = targets_weight[domain["name"]]['weight']
weight_matrix_size = crop_end - crop_start + 1
weight_matrix = np.ones((weight_matrix_size, weight_matrix_size), dtype=np.float32) * weight
combined_cmap[crop_start - 1:crop_end, crop_start - 1:crop_end, :] += (domain_dis * np.expand_dims(weight_matrix, 2))
counter_map[crop_start - 1:crop_end, crop_start - 1:crop_end, 0] += weight_matrix
combined_cmap /= counter_map
combined_cmap.dump(paste_dir / f'{target}.distance')
contact_probs = combined_cmap[:,:,:19].sum(-1)
utils.save_rr_file(contact_probs, seq, target, paste_dir / f'{target}.rr')
utils.plot_contact_map(target, [contact_probs, combined_cmap], paste_dir / f'{target}.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Alphafold - PyTorch version')
parser.add_argument('-i', '--input', type=str, required=True, help='target protein, support both .pkl or .tfrec format')
parser.add_argument('-o', '--out', type=str, default='', help='output dir')
parser.add_argument('-m', '--model', type=str, default='model', help='model dir')
parser.add_argument('-r', '--replica', type=str, default='0', help='model replica')
parser.add_argument('-t', '--type', type=str, choices=['D', 'B', 'T'], default='D', help='model type: D - Distogram, B - Background, T - Torsion')
parser.add_argument('-e', '--ensemble', default=False, action='store_true', help='ensembling all replica outputs')
parser.add_argument('-d', '--debug', default=False, action='store_true', help='debug mode')
args = parser.parse_args()
DEBUG = args.debug
TARGET_PATH = args.input
timestr = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
OUT_DIR = Path(args.out) if args.out else Path(f'contacts_{TARGET}_{timestr}')
if args.ensemble:
ensemble(TARGET_PATH, OUT_DIR)
else:
DEVICE = torch.device("cuda")# if torch.cuda.is_available() else "cpu") #TR
TARGET = TARGET_PATH.split('/')[-1].split('.')[0]
REPLICA = args.replica
if args.type == 'D':
MODEL_TYPE = 'Distogram'
MODEL_PATH = Path(args.model) / '873731'
elif args.type == 'B':
MODEL_TYPE = 'Background'
MODEL_PATH = Path(args.model) / '916425'
elif args.type == 'T':
MODEL_TYPE = 'Torsion'
MODEL_PATH = Path(args.model) / '941521'
OUT_DIR = OUT_DIR / MODEL_TYPE / REPLICA
OUT_DIR.mkdir(parents=True, exist_ok=True)
print(f'Input file: {TARGET_PATH}')
print(f'Output dir: {OUT_DIR}')
print(f'{MODEL_TYPE} model: {MODEL_PATH}')
print(f'Replica: {REPLICA}')
print(f'Device: {DEVICE}')
run_eval(TARGET_PATH, MODEL_PATH, REPLICA, OUT_DIR, DEVICE)
|
#!/usr/bin/env python3
'''
Shapeless bot for MsgRoom @ Windows96.net
Shapeless bot © Diicorp95. MIT License
Windows 96 © Mikesoft. All rights reserved, except for some parts
Open-Source Part: Configuration API
'''
import base64 # not implemented
import os.path # not implemented
global con,config_storage_demo
con={}
config_storage_demo = [
None,
'SHAPELESS:CONFIG',
'silent:MA==',# 0: Can send messages; 1: Don't return error code ('Learned', 'Error'..., etc.); 2: Completely silent. 'BOTGUESS', 'say(<<text>>)'' and similar commands will be useless.
'default_name:PT58PD0=',# Default name.
'name_lasts:AA==',# If true, then bot always sets its nickname as in parameter 'name_default'. Default name can be set if user executes command 'BOTNAME-default'
'prefix:XSNbIA==',# Prefix when copying name of a user (by their ID) or nickname
] # example; do not use in real code
def usend(msg): # In real code it's used only as a command from terminal
# < ... >
# progs.received(msg) # not implemented; will be useless here
if msg[0:6]=='BOTCON':
if msg[6:7]==" " and msg.find('=') > -1 and not msg[7:8]=="=" and not msg[-1:]=="=":
cfg.edit(msg[7:msg.find('=')],msg[msg.find('=')+1:])
if msg=='BOTSAVECON':
cfg.save(False)
if msg=='BOTRESTORECON':
cfg.restore(False)
# < ... >
class cfg:
MAGIC_STRING = 'SHAPELESS:CONFIG'
term = {'homedisk': '/dev/dsk1/'} # not implemented
default_config_location = os.path.join(term['homedisk'],"/shapeless.config")
def edit(variable, value, system = False):
if not system:
try:
if int(con['silent'])>1:
sendmsg('Learned') # demo
except ValueError:
raise ValueError('Unstable config')
except NameError:
return 2
except KeyError:
con['silent'] = 0
con[variable]=value
if not system: sendterm(BM+"Changed value"+NC+" of "+BC+variable+NC+" to "+BC+value+NC,'config')
else: sendterm("Value of "+BC+variable+NC+" is loaded",'config')
return 1
def encode(a):
return str(base64.b64encode(a.encode("utf-8")),"utf-8")
def decode(a):
try:
return str(base64.b64decode(a),"utf-8")
except Exception:
return 1
#def restore(absolute_path = None):
#if absolute_path is None:
#absolute_path = cfg.default_config_location
def restore(system = False):
sendmsg('...')
#x = fs.readlines(cfg.default_config_location,_) # example; do not comment this line in real code
#if isinstance(x,int) and x < 1: # example; do not comment this line in real code
#return 1 # example; do not comment this line in real code
_ = config_storage_demo # example; do not use in real code
if _[0] > 15:
if _[1] == cfg.MAGIC_STRING:
for i,s in enumerate(_):
if i > 0:
x = s.find(':')
if (x == -1):
continue
cfg.edit(s[0:x],cfg.decode(s[x+1:]),system)
sendmsg('Restored 💾')
#def save(absolute_path = None):
#if absolute_path is None:
#absolute_path = cfg.default_config_location
def save(system = False):
sendmsg('...')
_ = [False,cfg.MAGIC_STRING]
for i,s in enumerate(con):
if i > 0:
try:
_.append(str(list(con.keys())[i])+':'+cfg.encode(str(s)))
except KeyError:
try:
sendterm("Could not access value of "+BC+list(con.keys())[i]+NC,'config')
except:
raise KeyError('Could not access value of a key at config')
continue
#if fs.writelines(cfg.default_config_location,_) < 1: #example; do not comment this line in real code
sendmsg('Saved 💾')
if __name__=="__main__": # demo
sendmsg=lambda s:print(GC+'Chat + term. : '+NC+s) # not implemented
sendterm=lambda s,mgrp='':print(SC+'[Config]'+NC,s) if mgrp=='config' else print(GC+'Terminal only: '+NC+s) # not implemented
BM='\033[7m' # demo
BC='\033[3m' # demo
SC='\033[0;91m' # demo
GC='\033[0;94m' # demo
NC='\033[0m' # demo
config_storage_demo[0] = 0 # exaple; do not use in real code
for i,s in enumerate(config_storage_demo): # example; do not use in real code
config_storage_demo[0] += len(str(s)) # example; do not use in real code
config_storage_demo[0] += len(config_storage_demo) # example; do not use in real code
cfg.restore(True) # demo
usend('BOTCON name_lasts=true') # demo
usend('BOTSAVECON') # demo
|
import socket
import random
s = socket.socket(
family=socket.AF_INET,
type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP,
)
s.bind(('', 8000))
s.listen()
def handle_connection(conx):
req = conx.makefile("rb")
reqline = req.readline().decode('utf8')
method, url, version = reqline.split(" ", 2)
assert method in ["GET", "POST"]
headers = {}
for line in req:
line = line.decode('utf8')
if line == '\r\n': break
header, value = line.split(":", 1)
headers[header.lower()] = value.strip()
if 'content-length' in headers:
length = int(headers['content-length'])
body = req.read(length).decode('utf8')
else:
body = None
body, headers = handle_request(method, url, headers, body)
response = "HTTP/1.0 200 OK\r\n"
for header, value in headers.items():
response += "{}: {}\r\n".format(header, value)
response += "Content-Length: {}\r\n".format(len(body.encode("utf8")))
response += "\r\n" + body
conx.send(response.encode('utf8'))
conx.close()
TOKENS = {}
NONCES = {}
ENTRIES = [
("No names. We are nameless!", "cerealkiller"),
("HACK THE PLANET!!!", "crashoverride"),
]
LOGINS = { "crashoverride": "0cool", "cerealkiller": "emmanuel" }
def check_login(username, pw):
return username in LOGINS and LOGINS[username] == pw
def parse_cookies(s):
out = {}
if (len(s) == 0):
return out
for cookie in s.split(";"):
k, v = cookie.strip().split("=", 1)
out[k] = v
return out
def handle_request(method, url, headers, body):
print('handle_request')
resp_headers = {}
if method == 'POST' and url == "/":
params = form_decode(body)
if check_login(params.get("username"), params.get("password")):
username = params["username"]
token = str(random.random())[2:]
TOKENS[token] = username
resp_headers["Set-Cookie"] = "token=" + token
elif "cookie" in headers:
username = TOKENS.get(parse_cookies(headers["cookie"]).get("token"))
if method == 'POST':
params = form_decode(body)
if url == '/add':
return add_entry(params, username), resp_headers
else:
return show_comments(username), resp_headers
else:
if url == "/comment9.js":
with open("comment9.js") as f:
return f.read(), resp_headers
elif url == "/comment9.css":
with open("comment9.css") as f:
return f.read(), resp_headers
elif url == "/eventloop13.js":
with open("eventloop13.js") as f:
return f.read(), resp_headers
elif url == "/login":
return login_form(), resp_headers
elif url == "/count":
return show_count(), resp_headers
else:
return show_comments(username), resp_headers
def login_form():
body = "<!doctype html>"
body += "<form action=/ method=post>"
body += "<p>Username: <input name=username></p>"
body += "<p>Password: <input name=password type=password></p>"
body += "<p><button>Log in</button></p>"
body += "</form>"
return body
def html_escape(text):
return text.replace("&", "&").replace("<", "<")
def show_comments(username):
out = "<!doctype html>"
if username:
nonce = str(random.random())[2:]
NONCES[username] = nonce
out += "<form action=add method=post>"
out += "<p><input name=nonce type=hidden value=" + nonce + "></p>"
out += "<p><input name=guest></p>"
out += "<p><button>Sign the book!</button></p>"
out += "</form>"
else:
out += "<p><a href=/login>Log in to add to the guest list</a></p>"
for entry, who in ENTRIES:
out += "<p>" + html_escape(entry)
out += " <i>from " + who + "</i></p>"
out += "<script src=/comment.js></script>"
return out
def show_count():
out = "<!doctype html>"
out += "<div>";
out += " Let's count up to 50!"
out += "</div>";
out += "<div id=output>hi</div>"
out += "<div><script src=/eventloop13.js></script></div>"
return out
def check_nonce(params, username):
if 'nonce' not in params: return False
if username not in NONCES: return False
return params['nonce'] == NONCES[username]
def add_entry(params, username):
if 'guest' in params and len(params["guest"]) <= 100 and username:
ENTRIES.append(params['guest'])
return show_comments()
def form_decode(body):
params = {}
for field in body.split("&"):
name, value = field.split("=", 1)
params[name] = value.replace("%20", " ")
return params
while True:
conx, addr = s.accept()
print("Received connection from", addr)
handle_connection(conx)
|
from matplotlib import pyplot as plt
from collections import OrderedDict
import json
colors = ['b', 'r', 'k', 'g', 'y', 'c', 'm']
line_style = ['--', '-.', ':', '-']
model_size = 31400
server_num = 36
underlay = 'geantdistance'
file_name = "%d-%s-%d.json.1" % (model_size, underlay, server_num)
with open("./%s" % file_name) as f:
time_reference = json.load(f)
time = time_reference["100000000.0"]
i = 0
for j, method in enumerate(time):
print(time[method])
if len(time[method]) == 1:
x = range(1, 31)
color_n_line = colors[j] + line_style[i]
plt.plot(x, [time[method][0] for _ in x], color_n_line, label=method, linewidth=2)
else:
color_n_line = colors[j] + line_style[i]
plt.plot(range(1, len(time[method])+1), time[method], color_n_line, label=method, linewidth=2)
i += 1
plt.grid()
plt.legend()
plt.show()
# model = 'LR'
# senario_list = ['geantdistance-40', 'geantdistance-9']
#
# model = 'CNN'
# senario_list = ['geantdistance-9']
#
# nrows = 1; ncols = 1
# fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(20,10))
# # fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 8))
# legends = time[model]['legend']
#
# for k, senario in enumerate(senario_list):
# xs = [float(x) / 1e6 for x in time[model][senario].keys()]
# yy = time[model][senario]
# ys_list = [[] for _ in range(len(legends))]
# for x in yy:
# for i, y in enumerate(yy[x]):
# # ys_list[i].append(y / yy[x][0])
# ys_list[i].append(y)
#
# ax = plt.subplot(nrows, ncols, k + 1)
# plt.grid()
# for i, ys in enumerate(ys_list):
# color_n_line = colors[i] + line_style[i]
# plt.plot(xs, ys, color_n_line, label=legends[i], linewidth=2)
# plt.xlabel("Network Capacity (Mbps)", fontsize=20)
# plt.ylabel("Communication Time (s)", fontsize=20)
# plt.tick_params(labelsize=20)
# plt.xscale('log')
# plt.legend(fontsize=20)
# plt.title(model+ '-' + senario, fontsize=20)
# plt.yscale('log')
# plt.show()
|
from cohortextractor import (
codelist,
codelist_from_csv,
)
ethnicity_codes = codelist_from_csv(
"codelists/opensafely-ethnicity.csv",
system="ctv3",
column="Code",
category_column="Grouping_6",
)
dementia_codes = codelist_from_csv(
"codelists/opensafely-dementia-complete.csv",
system="ctv3",
column="code",
)
chronic_respiratory_disease_codes = codelist_from_csv(
"codelists/opensafely-chronic-respiratory-disease.csv",
system="ctv3",
column="CTV3ID",
)
chronic_cardiac_disease_codes = codelist_from_csv(
"codelists/opensafely-chronic-cardiac-disease.csv",
system="ctv3",
column="CTV3ID",
)
diabetes_codes = codelist_from_csv(
"codelists/opensafely-diabetes.csv",
system="ctv3",
column="CTV3ID",
)
lung_cancer_codes = codelist_from_csv(
"codelists/opensafely-lung-cancer.csv",
system="ctv3",
column="CTV3ID",
)
haem_cancer_codes = codelist_from_csv(
"codelists/opensafely-haematological-cancer.csv",
system="ctv3",
column="CTV3ID",
)
other_cancer_codes = codelist_from_csv(
"codelists/opensafely-cancer-excluding-lung-and-haematological.csv",
system="ctv3",
column="CTV3ID",
)
chronic_liver_disease_codes = codelist_from_csv(
"codelists/opensafely-chronic-liver-disease.csv",
system="ctv3",
column="CTV3ID",
)
nhse_care_home_des_codes = codelist_from_csv(
"codelists/opensafely-nhs-england-care-homes-residential-status-ctv3.csv",
system="ctv3",
column="code",
)
primis_codes = codelist_from_csv(
"codelists/primis-covid19-vacc-uptake-longres.csv",
system="snomed",
column="code",
)
stroke_codes = codelist_from_csv(
"codelists/opensafely-stroke-updated.csv",
system="ctv3",
column="CTV3ID",
) |
import boto3
from sys import argv
from time import time
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Music')
indices = {
'Artist': None,
'Song': 'Song-Artist-index',
'Year': 'Year-Song-index',
}
def performance(method):
def wrapped(*args, **kw):
start_time = time()
response = method(*args, **kw)
duration = time() - start_time
print(' - Returned {} items'.format(response.get('Count', 0)))
print(' - Scanned {} items'.format(response.get('ScannedCount', 0)))
print(' - Duration {:0.3f} seconds'.format(duration))
print('')
return response.get('LastEvaluatedKey')
return wrapped
@performance
def scan(attribute, value, last_key=None):
kwargs = {
'ScanFilter': {
attribute: {
'AttributeValueList': [value],
'ComparisonOperator': 'EQ',
}
}
}
if last_key:
kwargs['ExclusiveStartKey'] = last_key
return table.scan(**kwargs)
@performance
def query(attribute, value, index_name, last_key=None):
kwargs = {
'KeyConditions': {
attribute: {
'AttributeValueList': [value],
'ComparisonOperator': 'EQ',
}
}
}
if index_name:
kwargs['IndexName'] = index_name
if last_key:
kwargs['ExclusiveStartKey'] = last_key
return table.query(**kwargs)
def scan_iterator(attribute, value):
print(' == Performing a SCAN operation ==')
print('')
last_key = None
while True:
last_key = scan(attribute, value, last_key)
if last_key is None:
break
def query_iterator(attribute, value, index_name):
print(' == Performing a QUERY operation ==')
print('')
last_key = None
while True:
last_key = query(attribute, value, index_name, last_key)
if last_key is None:
break
if __name__ == '__main__':
attribute = argv[1] if len(argv) >= 2 else 'Year'
value = argv[2] if len(argv) >= 2 else '2010'
if attribute in indices:
index_name = indices[attribute]
else:
raise Exception('Invalid key. Needs to be Artist, Year or Song')
scan_iterator(attribute, value)
query_iterator(attribute, value, index_name)
|
import tensorflow as tf
import tensorlayer as tl
import numpy as np
import os
from data import data
print("=======TEST.PY IMPORTED WHAT THE FUCK=======")
metadata, idx_q, idx_a = data.load_data(PATH='data/')
w2idx = metadata['w2idx'] # dict word 2 index
idx2w = metadata['idx2w'] # list index 2 word
print("Loading vocab done:", "shapes", idx_q.shape, idx_a.shape)
emb_dim = 512
batch_size = 256
xvocab_size = yvocab_size = len(idx2w)
unk_id = w2idx['unk'] # 1
pad_id = w2idx['_'] # 0
start_id = xvocab_size
end_id = xvocab_size + 1
w2idx['start_id'] = start_id
w2idx['end_id'] = end_id
idx2w = idx2w + ['start_id', 'end_id']
xvocab_size = yvocab_size = xvocab_size + 2
w2idx['end_id']
print("Vocab preprocessing done")
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import model
print("Start testing")
seq2seq = model.Model(w2idx, idx2w, True)
sess = seq2seq.restore()
#seq2seq.train(trainX, trainY)
questions = [
'что думаешь об nlp',
'кем ты работаешь',
'какой сегодня день'
]
answers = seq2seq.predict(sess, questions)
new_answers = [seq2seq.predict_one(sess, q) for q in questions]
for q, a, new_a in zip(questions, answers, new_answers):
print(q)
print(">", " ".join(a))
print(">", " ".join(new_a))
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the extraLongFactorials function below.
def extraLongFactorials(n):
f = n * fact(n-1)
print(f)
def fact(n):
if n > 1:
return n * fact(n-1)
else :
return 1
if __name__ == '__main__':
n = int(input())
extraLongFactorials(n)
|
from .dukemtmcreid_interpretation import DukeMTMC_Interpretation
from .market1501_interpretation import Market1501_Interpretation
from .duketomarket_interpretation import DukeToMarket_Interpretation
from .markettoduke_interpretation import MarketToDuke_Interpretation
# AND
from .market1501_and_interpretation import Market1501_And_Interpretation
from .dukemtmcreid_and_interpretation import DukeMTMC_And_Interpretation
|
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import json
import logging
from configuration import getConfiguration
from github import getPool, parsePayload
from irc import sendMessages
from validation import validatePayload
from __init__ import __version__
def handler(event, context=None):
# ensure it's a valid request
if event and "body" in event and "headers" in event:
# AWS Lambda configures the logger before executing this script
# We want to remove their configurations and set our own
log = logging.getLogger()
if log.handlers:
for handler in log.handlers:
log.removeHandler(handler)
if "X-Ghi-Server" in event["headers"]:
# was invoked by local server
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [ghi] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S"
)
else:
logging.basicConfig(
level=logging.INFO,
format="%(message)s"
)
# By default ghi will respond to the request immediately,
# then invoke itself to actually process the event.
# This can be disabled by setting GHI_LONG_RESPONSE="true"
if "requestContext" in event:
from aws import InvokeSelf
# Was invoked by AWS
if "GHI_LONG_RESPONSE" in os.environ and os.getenv("GHI_LONG_RESPONSE"):
pass
elif "X-Ghi-Invoked" not in event["headers"]:
return InvokeSelf(event)
# validate and load configuration file
configuration = getConfiguration()
if configuration["statusCode"] != 200:
return configuration
# Enable debug if set in config
if configuration["debug"]:
logging.getLogger().setLevel(logging.DEBUG)
# verify the request is from GitHub
githubPayload = event["body"]
# Enhanced logging if debug is set
logging.debug("Ghi Version:")
logging.debug(__version__)
logging.debug("Payload:")
logging.debug(githubPayload)
logging.debug("Headers:")
logging.debug(event["headers"])
# figure out which pool this should belong to so we can use its secret
pool = getPool(githubPayload, configuration["pools"])
if pool["statusCode"] != 200:
return pool
try:
if pool["verify"]:
githubSignature = event["headers"]["X-Hub-Signature"]
try:
githubEvent = event["headers"]["X-GitHub-Event"]
except KeyError as e:
githubEvent = event["headers"]["X-Github-Event"]
except KeyError as e:
errorMessage = "missing header in request: %s" % e
logging.error(errorMessage)
return {
"statusCode": 400,
"body": json.dumps({
"success": False,
"message": errorMessage
})
}
# check signatures of request
if pool["verify"]:
validPayload = validatePayload(
payload=githubPayload,
signature=githubSignature,
secret=pool["secret"]
)
if not validPayload:
logging.error("GitHub payload validation failed")
return {
"statusCode": 401,
"body": json.dumps({
"success": False,
"message": "payload validation failed"
})
}
else:
logging.debug("Skipping payload verification because 'verify' set to False.")
getMessages = parsePayload(githubEvent, githubPayload, pool["pool"].repos, pool["pool"].shorten)
if getMessages["statusCode"] != 200:
return getMessages
logging.debug("Messages:")
logging.debug(getMessages["messages"])
# Send messages to the designated IRC channel(s)
sendToIrc = sendMessages(pool["pool"], getMessages["messages"])
if sendToIrc["statusCode"] != 200:
return sendToIrc
result = "Successfully notified IRC."
logging.info(result)
return {
"statusCode": 200,
"body": json.dumps({
"success": True,
"message": result
})
}
else:
return {
"statusCode": 400,
"body": json.dumps({
"success": False,
"message": "bad event data"
})
} |
"""The various clients that help you send stats."""
import itertools
import socket
import time
from . import packets
from .helpers import dot_join
class StatsClient:
"""Basic stats client.
Holds some functionality, but is not recommended for direct use.
"""
def __init__(self, prefix, host=None, port=None, disabled=None):
"""Return a new StatsClient."""
self.prefix = prefix
self.port = port or 8125
self.host = host or 'localhost'
self.disabled = disabled or False
if not self.disabled:
self.socket = self.connect(self.host, self.port)
def counter(self, suffix):
"""Return a counter."""
return Counter(self, suffix)
def timer(self, suffix):
"""Return a timer."""
return Timer(self, suffix)
def gauge(self, suffix):
"""Return a gauge."""
return Gauge(self, suffix)
def set(self, suffix):
"""Return a set."""
return Set(self, suffix)
def send(self, *partials):
"""Send a packet."""
if self.disabled:
return
full_packages = (
dot_join(self.prefix, partial).encode()
for partial in partials
)
self.socket.send(b'\n'.join(full_packages))
@staticmethod
def connect(host, port):
"""Connect to the host."""
connection_info = (host, port)
conn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
conn.connect(connection_info)
return conn
class Timer:
"""Timer class.
<name>:<value>|ms
"""
def __init__(self, client, suffix):
"""Return a new counter."""
self.client = client
self.suffix = suffix
self._start = None
self._intermediate = None
def send(self, name, value):
"""Send a measured time off."""
self.client.send(
*packets.timer_packet(
dot_join(self.suffix, name),
value,
)
)
def start(self):
"""Start the timer."""
self._start = time.time()
return self
def intermediate(self, name):
"""Send an intermediate time."""
since = self._intermediate or self._start
self.send(name, time.time() - since)
self._intermediate = time.time()
def stop(self, name='total'):
"""Stop the timer."""
self.send(name, time.time() - self._start)
self._start = None
self._intermediate = None
class Counter:
"""Counter class.
<name>:<value>|c
"""
def __init__(self, client, suffix):
"""Return a new counter."""
self.client = client
self.suffix = suffix
def increment(self, name, count):
"""Increment the counter."""
self.client.send(
*packets.counter_packet(
dot_join(self.suffix, name),
count,
)
)
def decrement(self, name, count):
"""Decrement the counter."""
self.increment(name, -count)
def from_mapping(self, mapping):
"""Send many values at once from a mapping."""
parts = (
packets.counter_packet(dot_join(self.suffix, name), count)
for name, count in mapping.items()
)
self.client.send(*itertools.chain.from_iterable(parts))
class Gauge:
"""Gauge class.
<name>:<value>|g
"""
def __init__(self, client, suffix):
"""Return a new counter."""
self.client = client
self.suffix = suffix
def set(self, name, value):
"""Set the current value of the gauge."""
lines = packets.gauge_set_packet(dot_join(self.suffix, name), value)
self.client.send(*lines)
def update(self, name, value):
"""Update the current value with a relative change."""
lines = packets.gauge_update_packet(dot_join(self.suffix, name), value)
self.client.send(*lines)
class Set:
"""Set class.
<name>:<value>|s
"""
def __init__(self, client, suffix):
"""Return a new counter."""
self.client = client
self.suffix = suffix
def add(self, name, value):
"""Add a value to the set."""
self.client.send(
*packets.set_packet(
dot_join(self.suffix, name),
value,
)
)
|
from test.BaseTest import BaseTest
from src.gui.Server import Server
class VideoStreamerTest(BaseTest):
def setUp(self):
self.server = Server()
#self.server.run(host="0.0.0.0", threaded=True)
def test_init(self):
self.assertIsInstance(self.server, Server, "Server should be a videostream server") |
from .decoding_strategy import DecodingStrategy
__all__ = [
'DecodingStrategy'
] |
MODEL_HYPERPARAMETERS = {
"num_class": 91, # number of class, denoted C in paper, must include the background class (id: 0)
"input_size": 512,
"grid_sizes": [24], # Grid number, denoted S in the paper
"backbone": "resnet50", # resnet50, mobilenet, mobilenetv2, xception
"head_style": "vanilla", # decoupled, vanilla
"head_depth": 8,
"fpn_channel":256
}
TRAINING_PARAMETERS = {
"batch_size": 8,
"num_epoch": 36,
"steps_per_epoch": 5000,
"learning_rates": [0.01, 0.001, 0.0001],
"epochs": [27, 33],
"weight_decay": 0.0001,
"momentum": 0.9,
}
def display_config(mode):
print()
print("Model hyperparameters")
print("=" * 80)
print("Number of output class:", MODEL_HYPERPARAMETERS['num_class'])
print("Input shape:", MODEL_HYPERPARAMETERS['num_class'], "(Current only support squared images)")
print("Grid number(s) (S):", MODEL_HYPERPARAMETERS['grid_sizes'])
print("Backbone network:", MODEL_HYPERPARAMETERS['backbone'])
print("Head style:", MODEL_HYPERPARAMETERS['head_style'])
print("Depth of head network:", MODEL_HYPERPARAMETERS['head_depth'])
print("Number of channels of FPN network:", MODEL_HYPERPARAMETERS['fpn_channel'])
print()
if mode == 'train':
print("Training parameters")
print("=" * 80)
print("Batch size:", TRAINING_PARAMETERS['batch_size'])
print("Number of epochs:", TRAINING_PARAMETERS['num_epoch'])
print("Learning rate:", TRAINING_PARAMETERS['learning_rates'])
print("Epoch that changes the learning rate:", TRAINING_PARAMETERS['epochs'])
print("Weigth decay:", TRAINING_PARAMETERS['weight_decay'])
print("Momentum:", TRAINING_PARAMETERS['momentum'])
print()
|
import numpy as np
from .RelVelocity import RelVelocity
from .RelEnergy import RelEnergy
from .PSDtoFlux import PSDtoFluxE
from scipy.optimize import minimize
kB = np.float64(1.38064852e-23)
e = np.float64(1.6022e-19)
def _MB_psd(E,n,T,m,CountConst=1.0):
'''
M-B dist outputting PSD
'''
#convert Energy to Joules
Ej = E*1000*e
#define the constant
A = n*(m/(2*np.pi*kB*T))**1.5
#calculate the rest of the function
f = A*np.exp(-Ej/(kB*T))
return f
def _MB_psdv2(E,n,T,m,CountConst=1.0):
'''
M-B dist outputting PSD
'''
#convert Energy to Joules
Ej = E*1000*e
#define the constant
A = n*(m/(2*np.pi*kB*T))**1.5
#calculate the psd
f = A*np.exp(-Ej/(kB*T))
#now velocity
v = RelVelocity(E,m)
return 4*np.pi*f*v**2
def _MB_psdv4(E,n,T,m,CountConst=1.0):
'''
M-B dist outputting PSD
'''
#convert Energy to Joules
Ej = E*1000*e
#define the constant
A = n*(m/(2*np.pi*kB*T))**1.5
#calculate the psd
f = A*np.exp(-Ej/(kB*T))
#now velocity
v = RelVelocity(E,m)
return m*4*np.pi*f*v**4
def _MB_flux(E,n,T,m,CountConst=1.0):
'''
M-B dist outputting flux
'''
#convert Energy to Joules
Ej = E*1000*e
#define the constant
A = n*(m/(2*np.pi*kB*T))**1.5
#calculate the psd
f = A*np.exp(-Ej/(kB*T))
#convert to flux
flux = PSDtoFluxE(E,f,m)
return flux
def _MB_cts(E,n,T,m,CountConst=1.0):
'''
M-B dist outputting flux
'''
#convert Energy to Joules
Ej = E*1000*e
#define the constant
A = n*(m/(2*np.pi*kB*T))**1.5
#calculate the psd
f = A*np.exp(-Ej/(kB*T))
#convert to flux
flux = PSDtoFluxE(E,f,m)
#convert to counts
cts = flux*E*CountConst
return cts
def GetMaxwellianFunction(yparam):
'''
Return a function which will produce a M-B distribution.
Inputs
======
yparam : str
'Counts'|'Flux'|'PSD'|'PSD1D'|'PSD1Dv2' - this determines the
output type
'''
funcs = { 'counts' : _MB_cts,
'flux' : _MB_flux,
'psd1d' : _MB_psdv2,
'psd1dv2' : _MB_psdv4,
'psd' : _MB_psd, }
return funcs.get(yparam.lower(),funcs['psd'])
def Maxwellian(x,n,T,m,CountConst=1.0,xparam='V',yparam='PSD'):
'''
Given either velocity (m/s) or energy (keV) and a density/temperature
calculate the Maxwell-Boltzmann distribution.
Inputs
======
x : float
Velocity in m/s or energy in keV
n : float
Density in m^-3 (not cm^-3)
T : float
Temperature in K
m : float
Particle mass (kg)
Countconst : float
Constant which can be used to convert between flux and counts
using:
Flux = Counts/(E*CountConst)
i.e.
CountConst = Counts/(E*Flux)
xparam : str
'V'|'E' - denotes whether the intput parameter is energy ('E')
or velocity ('V')
yparam : str
'Counts'|'Flux'|'PSD'|'PSD1D'|'PSD1Dv2' - this determines the
output type
Returns
=======
f : float
Distribution function in whichever type defined by "yparam"
'''
#get the energy
if xparam == 'E':
E = x
else:
E = RelEnergy(x,m)
#determine which function to call
Func = GetMaxwellianFunction(yparam)
return Func(E,n,T,m,CountConst)
def _GetMisfitFunc(E,f,m,CountConst,MinFunc,MBFunc,LogDiff=True):
'''
Return a function which can be minimized.
Inputs
======
E : float
Energy in keV
f : float
the spectral data (whatever units defined by yparam)
m : float
Mass of particles in kg
CountConst : float
This is used if converting from flux to counts
MinFunc : str
'mean-squared'|'mean-abs'
MBFunc : callable
This will provide us with the MB dist
LogDiff : bool
If True, then the logarithm of the points will be taken prior to
calculating the difference.
'''
lf = np.log10(f)
def FuncMS(X):
n,T = X
fm = MBFunc(E,n,T,m,CountConst)
if LogDiff:
lm = np.log10(fm)
diff = np.sum(((lf-lm)**2))/f.size
else:
diff = np.sum(((f-fm)**2))/f.size
return diff
def FuncMA(X):
n,T = X
fm = MBFunc(E,n,T,m,CountConst)
if LogDiff:
lm = np.log10(fm)
diff = np.sum(np.abs(lf-lm))/f.size
else:
diff = np.sum(np.abs(f-fm)**2)/f.size
return diff
if MinFunc == 'mean-squared':
return FuncMS
else:
return FuncMA
def FitMaxwellian(x,f,n0,T0,m,CountConst=1.0,xparam='V',yparam='PSD',
Verbose=False,MaxIter=None,MinFunc='mean-squared',LogDiff=True,
MinFit=3):
#get the energy
if xparam == 'E':
E = x
else:
E = RelEnergy(x,m)
#select only good data to fit to
if yparam == 'Counts':
goodf = np.isfinite(f) & (f >= 0)
else:
goodf = np.isfinite(f) & (f > 0)
good = np.where(goodf)[0]
if (good.size < MinFit):
return -1, -1, False
#get the MB function
MBFunc = GetMaxwellianFunction(yparam)
#get the misfit function to be minimized
if np.size(CountConst) > 1:
CC = CountConst[good]
else:
CC = CountConst
Func = _GetMisfitFunc(E[good],f[good],m,CC,MinFunc,MBFunc,LogDiff)
#set options
if MaxIter is None:
opt = {}
else:
opt = { 'maxiter' : MaxIter }
#fit the function
res = minimize(Func,[n0,T0],method='nelder-mead',options=opt)
n,t = res.x
if not res.success and Verbose:
print('Warning - potentially bad M-B fit')
print(res.message)
return n,t,res.success
|
import numpy as np
import pandas as pd
def find_best_linear_combination(df1, df2, actual, metric_fn, start, end, stepsize):
"""
df1 and df2 have to be pd.Series.
actual can be a df/series. it will be used only in metric_fn:
metric_fn(actual,prediction_df)
"""
assert start >= 0
assert end <= 1
best_score = None
best_alpha = None
alphas = list(np.arange(start, end, stepsize))
if end not in alphas:
alphas.append(end)
for alpha in alphas:
combined = alpha * df1 + (1 - alpha) * df2
score = metric_fn(actual, combined)
# print(alpha, score)
if best_score is None or score < best_score:
best_score = score
best_alpha = alpha
# import pdb
# pdb.set_trace()
return (best_score, best_alpha)
class LinearStacking:
def __init__(self, metric_fn, starting_alpha=0, ending_alpha=1, stepsize=0.01):
self._metric_fn = metric_fn
self._start = starting_alpha
self._end = ending_alpha
self._stepsize = stepsize
self._weights = []
def check_weights_sanity(self):
if len(self._weights) == 0:
return
if len(self._weights) == 1:
assert self._weights[0] <= 1
return
assert all([w <= 1 and w >= 0 for w in self._weights])
weights = self._weights.copy()
weights = [0] + weights
total_weights = []
for i in range(len(weights)):
ith_df_weight = (1 - weights[i]) * np.product(weights[i + 1:])
total_weights.append(ith_df_weight)
print('Weights: ', ' '.join(['{:3f}'.format(wi) for wi in total_weights]))
assert abs(np.sum(total_weights) - 1) < 1e-10
def fit(self, df_array, target_y):
if len(df_array) == 1:
print('Just one model. Allocating everyting to it.')
return
cumulative_best_df = df_array[0]
self._weights = []
for i in range(1, len(df_array)):
best_score, best_alpha = find_best_linear_combination(
cumulative_best_df,
df_array[i],
target_y,
self._metric_fn,
self._start,
self._end,
self._stepsize,
)
cumulative_best_df = cumulative_best_df * best_alpha + df_array[i] * (1 - best_alpha)
self._weights.append(best_alpha)
print('{}th inclusion BestScore:{:.3f}'.format(i, best_score))
print('Individual best performance:',
' '.join(['{:3f}'.format(self._metric_fn(target_y, df)) for df in df_array]))
print('Median best performance: {:3f}'.format(
self._metric_fn(target_y, pd.concat(df_array, axis=1).median(axis=1))))
self.check_weights_sanity()
def transform(self, df_array):
assert len(df_array) >= 1
assert len(df_array) == len(self._weights) + 1
if len(df_array) == 1:
return df_array[0]
cumulative_best_df = df_array[0]
for i in range(1, len(df_array)):
alpha = self._weights[i - 1]
cumulative_best_df = alpha * cumulative_best_df + (1 - alpha) * df_array[i]
return cumulative_best_df
|
import json
from tola_management.models import ProgramAuditLog
class GlobalConstantsMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
js_globals = request.session.get('js_globals', {})
js_globals.update(self.get_js_globals())
request.session['js_globals'] = js_globals
response = self.get_response(request)
return response
def get_js_globals(self):
reason_for_change_options = [
{'value': value, 'label': label, 'rationale_required': required}
for (value, label, required) in ProgramAuditLog.reason_for_change_options()]
return {
'reason_for_change_options': json.dumps(reason_for_change_options)
}
|
import random
word_list = ["aardvark", "baboon", "camel"]
chosen_word = random.choice(word_list)
word_length = len(chosen_word)
# Create blanks
display = []
for _ in range(word_length):
display += "_"
# TODO-1: - Use a while loop to let the user guess again. The loop should only stop once the user has guessed all the letters in the chosen_word and 'display' has no more blanks ("_"). Then you can tell the user they've won.
while '_' in display:
guess = input("Guess a letter: ").lower()
# Check guessed letter
for position in range(word_length):
letter = chosen_word[position]
if letter == guess:
display[position] = letter
print(display)
print('Won!')
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import (
Any, Iterator, Optional, Union,
)
from databuilder.models.dashboard.dashboard_metadata import DashboardMetadata
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.models.usage.usage_constants import (
READ_RELATION_COUNT_PROPERTY, READ_RELATION_TYPE, READ_REVERSE_RELATION_TYPE,
)
from databuilder.models.user import User
LOGGER = logging.getLogger(__name__)
class DashboardUsage(GraphSerializable):
"""
A model that encapsulate Dashboard usage between Dashboard and User
"""
def __init__(self,
dashboard_group_id: Optional[str],
dashboard_id: Optional[str],
email: str,
view_count: int,
should_create_user_node: Optional[bool] = False,
product: Optional[str] = '',
cluster: Optional[str] = 'gold',
**kwargs: Any
) -> None:
"""
:param dashboard_group_id:
:param dashboard_id:
:param email:
:param view_count:
:param should_create_user_node: Enable this if it is fine to create/update User node with only with email
address. Please be advised that other fields will be emptied. Current use case is to create anonymous user.
For example, Mode dashboard does not provide which user viewed the dashboard and anonymous user can be used
to show the usage.
:param product:
:param cluster:
:param kwargs:
"""
self._dashboard_group_id = dashboard_group_id
self._dashboard_id = dashboard_id
self._email = email
self._view_count = int(view_count)
self._product = product
self._cluster = cluster
self._user_model = User(email=email)
self._should_create_user_node = bool(should_create_user_node)
self._relation_iterator = self._create_relation_iterator()
def create_next_node(self) -> Union[GraphNode, None]:
if self._should_create_user_node:
return self._user_model.create_next_node()
return None
def create_next_relation(self) -> Union[GraphRelationship, None]:
try:
return next(self._relation_iterator)
except StopIteration:
return None
def _create_relation_iterator(self) -> Iterator[GraphRelationship]:
relationship = GraphRelationship(
start_label=DashboardMetadata.DASHBOARD_NODE_LABEL,
end_label=User.USER_NODE_LABEL,
start_key=DashboardMetadata.DASHBOARD_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group=self._dashboard_group_id,
dashboard_name=self._dashboard_id
),
end_key=User.get_user_model_key(email=self._email),
type=READ_REVERSE_RELATION_TYPE,
reverse_type=READ_RELATION_TYPE,
attributes={
READ_RELATION_COUNT_PROPERTY: self._view_count
}
)
yield relationship
def __repr__(self) -> str:
return f'DashboardUsage({self._dashboard_group_id!r}, {self._dashboard_id!r}, ' \
f'{self._email!r}, {self._view_count!r}, {self._should_create_user_node!r}, ' \
f'{self._product!r}, {self._cluster!r})'
|
"""PSF module
This module provides a class implementing a spatially varying PSF.
Intended usage is:
>>> unknown ***
"""
import pdb
import numpy
def shift(im, offset, **kw):
"""Wrapper for scipy.ndimage.interpolation.shift"""
from scipy.ndimage.interpolation import shift
if 'order' not in kw:
kw['order'] = 4
# 1" Gaussian: 60 umag; 0.75": 0.4 mmag; 0.5": 4 mmag
# order=3 roughly 5x worse.
if 'mode' not in kw:
kw['mode'] = 'nearest'
if 'output' not in kw:
kw['output'] = im.dtype
return shift(im, offset, **kw)
def central_stamp(stamp, censize=19):
if censize is None:
censize = 19
stampsz = stamp.shape[-1]
if ((stampsz % 2) == 0) | ((censize % 2) == 0):
pdb.set_trace()
if stampsz == censize:
return stamp
elif stampsz > censize:
trim = (stamp.shape[-1] - censize)//2
f = trim
l = stampsz - trim
return stamp[..., f:l, f:l]
else:
ret = numpy.zeros(stamp.shape[:-2]+(censize, censize), dtype='f4')
central_stamp(ret, censize=stampsz)[..., :, :] = stamp
return ret
def neff_fwhm(stamp):
"""FWHM-like quantity derived from N_eff = numpy.sum(PSF**2.)**-1"""
norm = numpy.sum(stamp, axis=(-1, -2), keepdims=True)
return 1.18 * (numpy.pi*numpy.sum((stamp/norm)**2., axis=(-1, -2)))**(-0.5)
def fwhm_neff(fwhm):
return (fwhm/1.18)**2*numpy.pi
def gaussian_psf(fwhm, stampsz=19, deriv=True, shift=[0, 0]):
"""Create Gaussian psf & derivatives for a given fwhm and stamp size.
Args:
fwhm (float): the full width at half maximum
stampsz (int): the return psf stamps are [stampsz, stampsz] in size
deriv (bool): return derivatives?
shift (float, float): shift centroid by this amount in x, y
Returns:
(psf, dpsfdx, dpsfdy)
psf (ndarray[stampsz, stampsz]): the psf stamp
dpsfdx (ndarray[stampsz, stampsz]): the x-derivative of the PSF
dpsfdy (ndarray[stampsz, stampsz]): the y-derivative of the PSF
"""
sigma = fwhm / numpy.sqrt(8*numpy.log(2))
stampszo2 = stampsz // 2
parshape = numpy.broadcast(fwhm, shift[0], shift[1]).shape
if len(parshape) > 0:
sigma, shift[0], shift[1] = (numpy.atleast_1d(q).reshape(-1, 1, 1)
for q in (sigma, shift[0], shift[1]))
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
yc = xc.copy()
xc = xc.reshape(-1, 1)-shift[0]
yc = yc.reshape(1, -1)-shift[1]
psf = numpy.exp(-(xc**2. + yc**2.) /
2./sigma**2.).astype('f4')
psf /= numpy.sum(psf[..., :, :])
dpsfdx = xc/sigma**2.*psf
dpsfdy = yc/sigma**2.*psf
ret = psf
if deriv:
ret = (ret,) + (dpsfdx, dpsfdy)
return ret
def moffat_psf(fwhm, beta=3., xy=0., yy=1., stampsz=19, deriv=True,
shift=[0, 0]):
"""Create Moffat psf & derivatives for a given fwhm and stamp size.
Args:
fwhm (float): the full width at half maximum
beta (float): beta parameter for Moffat distribution
xy (float): xy coefficient (0 for uncorrelated)
yy (float): yy coefficient (1 for FWHM_x == FWHM_y)
stampsz (int): the returned psf stamps are [stampsz, stampsz] in size
deriv (bool): return derivatives?
shift (float, float): shift centroid by this amount in x, y
Returns:
(psf, dpsfdx, dpsfdy)
psf (ndarray[stampsz, stampsz]): the psf stamp
dpsfdx (ndarray[stampsz, stampsz]): the x-derivative of the PSF
dpsfdy (ndarray[stampsz, stampsz]): the y-derivative of the PSF
"""
if numpy.any(beta <= 1e-3):
print('Warning: crazy values for beta in moffat_psf')
beta = numpy.clip(beta, 1e-3, numpy.inf)
alpha = fwhm/(2*numpy.sqrt(2**(1./beta)-1))
stampszo2 = stampsz // 2
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
parshape = numpy.broadcast(fwhm, beta, xy, yy, shift[0], shift[1]).shape
xc = xc.reshape(-1, 1)
yc = xc.copy().reshape(1, -1)
if len(parshape) > 0:
alpha, beta, xy, yy = (numpy.atleast_1d(q).reshape(-1, 1, 1)
for q in (alpha, beta, xy, yy))
shift = list(shift)
shift[0], shift[1] = (numpy.atleast_1d(q).reshape(-1, 1, 1)
for q in (shift[0], shift[1]))
xc = xc - shift[0]
yc = yc - shift[1]
yy = numpy.abs(yy)
rc2 = yy**(-0.5)*xc**2. + xy*xc*yc + yy**(0.5)*yc**2.
# for bad xy, this can screw up and generate negative values.
if numpy.any(rc2 < 0.):
print('Warning: crazy xy and yy values to moffat_psf')
rc2 = numpy.clip(rc2, 0., numpy.inf)
rc = numpy.sqrt(rc2)
psf = (beta - 1)/(numpy.pi * alpha**2.)*(1.+(rc**2./alpha**2.))**(-beta)
ret = psf
if deriv:
dpsffac = (beta-1)/(numpy.pi*alpha**2.)*(beta)*(
(1+(rc**2./alpha**2.))**(-beta-1))
dpsfdx = dpsffac*2*xc/alpha
dpsfdy = dpsffac*2*yc/alpha
ret = (psf, dpsfdx, dpsfdy)
return ret
def simple_centroid(psf, norm=True):
stampsz = psf.shape[-1]
stampszo2 = stampsz // 2
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
xc = xc.reshape(-1, 1)
yc = xc.copy().reshape(1, -1)
denom = 1.
if norm:
denom = numpy.sum(psf, axis=(-1, -2))
return (numpy.sum(xc*psf, axis=(-1, -2))/denom,
numpy.sum(yc*psf, axis=(-1, -2))/denom)
def center_psf(psf, censize=None):
"""Center and normalize a psf; centroid is placed at center."""
psf = psf.copy()
cpsf = central_stamp(psf, censize=censize)
for _ in range(3):
xcen, ycen = simple_centroid(cpsf)
psf[:, :] = shift(psf, [-xcen, -ycen],
output=numpy.dtype('f4'))
psf /= numpy.sum(psf)
psf = psf.astype('f4')
return psf
class SimplePSF:
def __init__(self, stamp, normalize=19):
self.stamp = stamp
if normalize > 0:
norm = numpy.sum(central_stamp(stamp, censize=normalize))
self.stamp /= norm
self.deriv = numpy.gradient(-stamp)
def render_model(self, x, y, stampsz=None):
if stampsz is None:
return self.stamp
else:
return central_stamp(self.stamp, censize=stampsz)
def __call__(self, x, y, stampsz=None, deriv=False):
parshape = numpy.broadcast(x, y).shape
tparshape = parshape if len(parshape) > 0 else (1,)
if stampsz is None:
stampsz = self.stamp.shape[0]
shiftx, shifty = (numpy.atleast_1d(q) - numpy.round(q) for q in (x, y))
stamp = central_stamp(self.stamp, censize=stampsz)
ret = numpy.zeros(tparshape+(stampsz, stampsz), dtype='f4')
for i in range(ret.shape[0]):
ret[i, :, :] = shift(stamp, (shiftx[i], shifty[i]))
if deriv:
dpsfdx = numpy.zeros_like(ret)
dpsfdy = numpy.zeros_like(ret)
dxstamp = central_stamp(self.deriv[0], censize=stampsz)
dystamp = central_stamp(self.deriv[1], censize=stampsz)
for i in range(ret.shape[0]):
dpsfdx[i, :, :] = shift(dxstamp, (shiftx[i], shifty[i]))
dpsfdy[i, :, :] = shift(dystamp, (shiftx[i], shifty[i]))
if parshape != tparshape:
ret = ret.reshape(stampsz, stampsz)
if deriv:
dpsfdx = dpsfdx.reshape(stampsz, stampsz)
dpsfdy = dpsfdy.reshape(stampsz, stampsz)
if deriv:
ret = (ret, dpsfdx, dpsfdy)
return ret
def serialize(self, stampsz=None):
stamp = self.stamp
if stampsz is not None:
stamp = central_stamp(self.stamp, stampsz)
dtype = [('offset', '2f4'),
('stamp', stamp.dtype, stamp.shape)]
extrapar = getattr(self, 'extraparam', None)
if extrapar is not None:
dtype += extrapar.dtype.descr
res = numpy.zeros(1, dtype=dtype)
res['offset'][0, :] = getattr(self, 'offset', (0, 0))
res['stamp'][0, ...] = stamp
if getattr(self, 'extraparam', None) is not None:
for name in extrapar.dtype.names:
res[name][0, ...] = extrapar[name]
return res
class MoffatPSF:
def __init__(self, fwhm, beta, xy=0., yy=1., normalize=19):
self.fwhm = fwhm
self.beta = beta
self.xy = xy
self.yy = yy
if normalize > 0:
self.norm = numpy.sum(self.render_model(0, 0, stampsz=19))
else:
self.norm = 1
def render_model(self, x, y, stampsz=59):
res = moffat_psf(self.fwhm, beta=self.beta, xy=self.xy,
yy=self.yy, stampsz=stampsz)
return res
def __call__(self, x, y, stampsz=None, deriv=False):
shiftx, shifty = (q - numpy.round(q) for q in (x, y))
res = moffat_psf(self.fwhm, beta=self.beta, xy=self.xy,
yy=self.yy, stampsz=stampsz, deriv=deriv,
shift=(shiftx, shifty))
if deriv:
res = [r / self.norm for r in res]
else:
res = res / self.norm
return res
class VariableMoffatPSF:
def __init__(self, fwhm, beta, xy=0., yy=1., normalize=19):
self.fwhm = numpy.atleast_2d(fwhm)
self.beta = numpy.atleast_2d(beta)
self.xy = numpy.atleast_2d(xy)
self.yy = numpy.atleast_2d(yy)
self.normalize = normalize
def render_model(self, x, y, stampsz=59, deriv=False):
from numpy.polynomial.polynomial import polyval2d
x = x / 1000.
y = y / 1000.
fwhm = polyval2d(x, y, self.fwhm)
beta = polyval2d(x, y, self.beta)
xy = polyval2d(x, y, self.xy)
yy = polyval2d(x, y, self.yy)
return moffat_psf(fwhm, beta=beta, xy=xy,
yy=yy, stampsz=stampsz, deriv=deriv)
def __call__(self, x, y, stampsz=59, deriv=False):
from numpy.polynomial.polynomial import polyval2d
shiftx, shifty = (q - numpy.round(q) for q in (x, y))
x = x / 1000.
y = y / 1000.
fwhm = polyval2d(x, y, self.fwhm)
beta = polyval2d(x, y, self.beta)
xy = polyval2d(x, y, self.xy)
yy = polyval2d(x, y, self.yy)
tstampsz = max(stampsz, self.normalize)
psf = moffat_psf(fwhm, beta=beta, xy=xy,
yy=yy, stampsz=tstampsz, deriv=deriv,
shift=(shiftx, shifty))
if not deriv:
psf = [psf]
if self.normalize > 0:
norms = numpy.sum(central_stamp(psf[0], censize=self.normalize),
axis=(-1, -2)).reshape(-1, 1, 1)
else:
norms = 1
psf = [central_stamp(p, censize=stampsz) / norms
for p in psf]
if not deriv:
psf = psf[0]
return psf
class VariablePixelizedPSF:
def __init__(self, stamp, normalize=19):
stampsz = stamp.shape[-1]
if (stampsz % 2) == 0:
raise ValueError('problematic shape')
self.stamp = stamp
self.normalize = normalize
self.deriv = numpy.gradient(-self.stamp, axis=(2, 3))
if normalize > 0:
cstamp = central_stamp(stamp, normalize)
else:
cstamp = stamp
self.normstamp = numpy.sum(cstamp, axis=(2, 3))
stampsz = cstamp.shape[-1]
stampszo2 = stampsz // 2
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
xc = xc.reshape(1, 1, -1, 1)
yc = xc.copy().reshape(1, 1, 1, -1)
self.xstamp = numpy.sum(xc*cstamp, axis=(2, 3))
self.ystamp = numpy.sum(yc*cstamp, axis=(2, 3))
def norm(self, x, y):
from numpy.polynomial.polynomial import polyval2d
x, y = (x/1000., y/1000.)
return polyval2d(x, y, self.normstamp)
def centroid(self, x, y):
from numpy.polynomial.polynomial import polyval2d
x, y = (x/1000., y/1000.)
if self.normalize < 0:
norm = 1
else:
norm = self.norm(x, y)
xc = polyval2d(x, y, self.xstamp)
yc = polyval2d(x, y, self.ystamp)
return xc/norm, yc/norm
def render_model(self, x, y, stampsz=59, deriv=False):
from numpy.polynomial.polynomial import polyval2d
x = x / 1000.
y = y / 1000.
tstamps = polyval2d(x, y, central_stamp(self.stamp, stampsz))
if len(tstamps.shape) == 3:
tstamps = tstamps.transpose(2, 0, 1)
if deriv:
dpsfdx = polyval2d(x, y, central_stamp(self.deriv[0], stampsz))
dpsfdy = polyval2d(x, y, central_stamp(self.deriv[1], stampsz))
if len(tstamps.shape) == 3:
dpsfdx = dpsfdx.transpose(2, 0, 1)
dpsfdy = dpsfdy.transpose(2, 0, 1)
tstamps = (tstamps, dpsfdx, dpsfdy)
return tstamps
def serialize(self, stampsz=None):
stamp = self.stamp
if stampsz is not None:
stamp = central_stamp(self.stamp, stampsz)
dtype = [('offset', '2f4'),
('stamp', stamp.dtype, stamp.shape)]
extrapar = getattr(self, 'extraparam', None)
if extrapar is not None:
dtype += extrapar.dtype.descr
res = numpy.zeros(1, dtype=dtype)
res['offset'][0, :] = getattr(self, 'offset', (0, 0))
res['stamp'][0, ...] = stamp
if getattr(self, 'extraparam', None) is not None:
for name in extrapar.dtype.names:
res[name][0, ...] = extrapar[name]
return res
def __call__(self, x, y, stampsz=None, deriv=False):
if stampsz is None:
stampsz = self.stamp.shape[-1]
parshape = numpy.broadcast(x, y).shape
tparshape = parshape if len(parshape) > 0 else (1,)
shiftx, shifty = (q - numpy.round(q) for q in (x, y))
stamps = self.render_model(x, y, stampsz=stampsz, deriv=deriv)
if deriv:
stamps, dpsfdx, dpsfdy = stamps
dpsfdx = dpsfdx.reshape(tparshape+(stampsz, stampsz))
dpsfdy = dpsfdy.reshape(tparshape+(stampsz, stampsz))
stamps = stamps.reshape(tparshape+(stampsz, stampsz))
norm = numpy.atleast_1d(self.norm(x, y))
shiftx = numpy.atleast_1d(shiftx)
shifty = numpy.atleast_1d(shifty)
for i in range(stamps.shape[0]):
stamps[i, :, :] = shift(stamps[i, :, :], (shiftx[i], shifty[i]))
stamps /= norm.reshape(-1, 1, 1)
if tparshape != parshape:
stamps = stamps.reshape(stamps.shape[1:])
if deriv:
for i in range(stamps.shape[0]):
dpsfdx[i, :, :] = shift(dpsfdx[i, :, :],
(shiftx[i], shifty[i]))
dpsfdy[i, :, :] = shift(dpsfdy[i, :, :],
(shiftx[i], shifty[i]))
dpsfdx /= norm.reshape(-1, 1, 1)
dpsfdy /= norm.reshape(-1, 1, 1)
if tparshape != parshape:
dpsfdx = dpsfdx.reshape(stamps.shape[1:])
dpsfdy = dpsfdy.reshape(stamps.shape[1:])
stamps = (stamps, dpsfdx, dpsfdy)
return stamps
class VariableMoffatPixelizedPSF:
def __init__(self, stamp, fwhm, beta, xy=0., yy=1., normalize=-1):
self.moffat = VariableMoffatPSF(fwhm, beta, xy=xy, yy=yy, normalize=-1)
self.resid = VariablePixelizedPSF(stamp, normalize=-1)
self.normalize = normalize
def render_model(self, x, y, stampsz=59, deriv=False):
mof = self.moffat.render_model(x, y, stampsz=stampsz, deriv=deriv)
res = self.resid.render_model(x, y, stampsz=stampsz, deriv=deriv)
if not deriv:
return mof + res
else:
return [a+b for (a, b) in zip(mof, res)]
def __call__(self, x, y, stampsz=None, deriv=False):
stampsz = (stampsz if stampsz is not None else
self.resid.stamp.shape[-1])
tstampsz = max(stampsz, self.normalize)
modstamp = self.render_model(x, y, stampsz=tstampsz, deriv=deriv)
if not deriv:
modstamp = [modstamp]
shiftx, shifty = (q - numpy.round(q) for q in (x, y))
if len(modstamp[0].shape) == 2:
for modstamp0 in modstamp:
modstamp0[:, :] = shift(modstamp0[:, :], (shiftx, shifty))
else:
for modstamp0 in modstamp:
for i in range(modstamp0.shape[0]):
modstamp0[i, :, :] = shift(modstamp0[i, :, :],
(shiftx[i], shifty[i]))
if self.normalize > 0:
norms = numpy.sum(central_stamp(modstamp[0],
censize=self.normalize),
axis=(-1, -2))
norms = numpy.array(norms)[..., None, None]
else:
norms = 1 + self.resid.norm(x, y)
for modstamp0 in modstamp:
if len(modstamp0.shape) == 2:
modstamp0 /= norms
else:
modstamp0 /= norms.reshape(-1, 1, 1)
if not deriv:
modstamp = modstamp[0]
return modstamp
class GridInterpPSF:
def __init__(self, stamp, x, y, normalize=19):
stampsz = stamp.shape[-1]
if (stampsz % 2) == 0:
raise ValueError('problematic shape')
if (stamp.shape[0] != len(x)) or (stamp.shape[1] != len(y)):
raise ValueError('mismatch between grid coordinates and stamp.')
self.stamp = stamp
self.normalize = normalize
self.x = x
self.y = y
self.deriv = numpy.gradient(-self.stamp, axis=(2, 3))
if normalize > 0:
cstamp = central_stamp(stamp, normalize)
else:
cstamp = stamp
self.normstamp = numpy.sum(cstamp, axis=(2, 3))
stampsz = cstamp.shape[-1]
stampszo2 = stampsz // 2
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
xc = xc.reshape(1, 1, -1, 1)
yc = xc.copy().reshape(1, 1, 1, -1)
self.xstamp = numpy.sum(xc*cstamp, axis=(2, 3))
self.ystamp = numpy.sum(yc*cstamp, axis=(2, 3))
def interpolator(self, stamp, x, y):
x0 = numpy.atleast_1d(x)
y0 = numpy.atleast_1d(y)
ind = [numpy.interp(z, zgrid, numpy.arange(len(zgrid), dtype='f4'),
left=0, right=len(zgrid)-1)
for (z, zgrid) in ((x0, self.x), (y0, self.y))]
w1 = [numpy.ceil(z) - z for z in ind]
w2 = [1 - z for z in w1]
left = [numpy.floor(z).astype('i4') for z in ind]
right = [numpy.ceil(z).astype('i4') for z in ind]
ret = numpy.zeros((len(x0),)+stamp.shape[2:], dtype=stamp.dtype)
for i in range(len(x0)):
ret[i, ...] = (
w1[0][i]*w1[1][i]*stamp[left[0][i], left[1][i], ...] +
w1[0][i]*w2[1][i]*stamp[left[0][i], right[1][i], ...] +
w2[0][i]*w1[1][i]*stamp[right[0][i], left[1][i], ...] +
w2[0][i]*w2[1][i]*stamp[right[0][i], right[1][i], ...])
if x0 is not x:
ret = ret[0]
return ret
def norm(self, x, y):
return self.interpolator(self.normstamp, x, y)
def centroid(self, x, y):
if self.normalize < 0:
norm = 1
else:
norm = self.norm(x, y)
xc = self.interpolator(self.xstamp, x, y)
yc = self.interpolator(self.ystamp, x, y)
return xc/norm, yc/norm
def render_model(self, x, y, stampsz=59, deriv=False):
tstamps = self.interpolator(central_stamp(self.stamp, stampsz), x, y)
if deriv:
dpsfdx = self.interpolator(central_stamp(self.deriv[0], stampsz),
x, y)
dpsfdy = self.interpolator(central_stamp(self.deriv[1], stampsz),
x, y)
tstamps = (tstamps, dpsfdx, dpsfdy)
return tstamps
def serialize(self, stampsz=None):
stamp = self.stamp
if stampsz is not None:
stamp = central_stamp(self.stamp, stampsz)
dtype = [('stamp', stamp.dtype, stamp.shape),
('x', len(self.x), 'f4'), ('y', len(self.y), 'f4')]
extrapar = getattr(self, 'extraparam', None)
if extrapar is not None:
dtype += extrapar.dtype.descr
res = numpy.zeros(1, dtype=dtype)
res['stamp'][0, ...] = stamp
res['x'][0, ...] = self.x
res['y'][0, ...] = self.y
if getattr(self, 'extraparam', None) is not None:
for name in extrapar.dtype.names:
res[name][0, ...] = extrapar[name]
return res
def __call__(self, x, y, stampsz=None, deriv=False):
if stampsz is None:
stampsz = self.stamp.shape[-1]
parshape = numpy.broadcast(x, y).shape
tparshape = parshape if len(parshape) > 0 else (1,)
x = numpy.atleast_1d(x)
y = numpy.atleast_1d(y)
shiftx, shifty = (q - numpy.round(q) for q in (x, y))
stamps = self.render_model(x, y, stampsz=stampsz, deriv=deriv)
if deriv:
stamps, dpsfdx, dpsfdy = stamps
dpsfdx = dpsfdx.reshape(tparshape+(stampsz, stampsz))
dpsfdy = dpsfdy.reshape(tparshape+(stampsz, stampsz))
stamps = stamps.reshape(tparshape+(stampsz, stampsz))
norm = numpy.atleast_1d(self.norm(x, y))
shiftx = numpy.atleast_1d(shiftx)
shifty = numpy.atleast_1d(shifty)
for i in range(stamps.shape[0]):
stamps[i, :, :] = shift(stamps[i, :, :], (shiftx[i], shifty[i]))
stamps /= norm.reshape(-1, 1, 1)
if tparshape != parshape:
stamps = stamps.reshape(stamps.shape[1:])
if deriv:
for i in range(stamps.shape[0]):
dpsfdx[i, :, :] = shift(dpsfdx[i, :, :],
(shiftx[i], shifty[i]))
dpsfdy[i, :, :] = shift(dpsfdy[i, :, :],
(shiftx[i], shifty[i]))
dpsfdx /= norm.reshape(-1, 1, 1)
dpsfdy /= norm.reshape(-1, 1, 1)
if tparshape != parshape:
dpsfdx = dpsfdx.reshape(stamps.shape[1:])
dpsfdy = dpsfdy.reshape(stamps.shape[1:])
stamps = (stamps, dpsfdx, dpsfdy)
return stamps
def select_stamps(psfstack, imstack, weightstack, shiftx, shifty):
if psfstack.shape[0] == 0:
return numpy.ones(0, dtype='bool')
tflux = numpy.sum(psfstack, axis=(1, 2))
timflux = numpy.sum(imstack, axis=(1, 2))
tmedflux = numpy.median(psfstack, axis=(1, 2))
npix = psfstack.shape[1]*psfstack.shape[2]
tfracflux = tflux / numpy.clip(timflux, 100, numpy.inf)
tfracflux2 = ((tflux-tmedflux*npix) /
numpy.clip(timflux, 100, numpy.inf))
# tfracflux3 = ((tflux - tmedflux*npix)/
# numpy.clip(timflux-tmedflux*npix, 100, numpy.inf))
cx, cy = (imstack.shape[-2] // 2, imstack.shape[-1] // 2)
cenflux = imstack[:, cx, cy]
psfqf = (numpy.sum(psfstack*(weightstack > 0), axis=(1, 2)) /
(tflux + (tflux == 0)))
okpsf = ((numpy.abs(psfqf - 1) < 0.03) &
(tfracflux > 0.5) & (tfracflux2 > 0.2) &
(weightstack[:, cx, cy] > 0) &
(cenflux*weightstack[:, cx, cy] > 3))
if numpy.sum(okpsf) > 0:
shiftxm = numpy.median(shiftx[okpsf])
shiftym = numpy.median(shifty[okpsf])
okpsf = (okpsf &
(numpy.abs(shiftx-shiftxm) < 1.) &
(numpy.abs(shifty-shiftym) < 1.))
return okpsf
def shift_and_normalize_stamps(psfstack, modstack, weightstack,
shiftx, shifty):
xr = numpy.round(shiftx)
yr = numpy.round(shifty)
psfstack = psfstack.copy()
weightstack = weightstack.copy()
psfstack = (psfstack -
numpy.median(psfstack-modstack, axis=(1, 2)).reshape(-1, 1, 1))
norms = numpy.sum(psfstack, axis=(1, 2))
psfstack /= norms.reshape(-1, 1, 1)
weightstack *= norms.reshape(-1, 1, 1)
for i in range(psfstack.shape[0]):
psfstack[i, :, :] = shift(psfstack[i, :, :], [-shiftx[i], -shifty[i]])
if (numpy.abs(xr[i]) > 0) or (numpy.abs(yr[i]) > 0):
weightstack[i, :, :] = shift(weightstack[i, :, :],
[-xr[i], -yr[i]],
mode='constant', cval=0.)
return psfstack, weightstack
def fill_param_matrix(param, order):
ret = numpy.zeros((order+1, order+1)+param.shape[1:], dtype='f4')
ret[numpy.tril_indices(order+1)] = param
return ret[::-1, ...]
def extract_params(param, order, pixsz):
nperpar = (order+1)*(order+2)/2
if (pixsz**2.+3)*nperpar != len(param):
raise ValueError('Bad parameter vector size?')
return [fill_param_matrix(x, order) for x in
(param[0:nperpar], param[nperpar:nperpar*2],
param[nperpar*2:nperpar*3],
param[nperpar*3:nperpar*(3+pixsz**2)].reshape(nperpar, pixsz,
pixsz))]
def extract_params_moffat(param, order):
nperpar = (order+1)*(order+2)/2
if 3*nperpar != len(param):
raise ValueError('Bad parameter vector size?')
return [fill_param_matrix(x, order) for x in
(param[0:nperpar], param[nperpar:nperpar*2],
param[nperpar*2:nperpar*3])]
def plot_psf_fits(stamp, x, y, model, isig, name=None, save=False):
from matplotlib import pyplot as p
datim = numpy.zeros((stamp.shape[1]*10, stamp.shape[1]*10), dtype='f4')
modim = numpy.zeros((stamp.shape[1]*10, stamp.shape[1]*10), dtype='f4')
xbd = numpy.linspace(numpy.min(x)-0.01, numpy.max(x)+0.01, 11)
ybd = numpy.linspace(numpy.min(y)-0.01, numpy.max(y)+0.01, 11)
medmodel = numpy.median(model, axis=0)
sz = stamp.shape[-1]
for i in range(10):
for j in range(10):
m = numpy.flatnonzero((x > xbd[i]) & (x <= xbd[i+1]) &
(y > ybd[j]) & (y <= ybd[j+1]))
if len(m) == 0:
continue
ind = m[numpy.argmax(numpy.median(isig[m, :, :], axis=(1, 2)))]
datim0 = stamp[ind, :, :]
modim0 = model[ind, :, :]
datim[i*sz:(i+1)*sz, j*sz:(j+1)*sz] = datim0-medmodel
modim[i*sz:(i+1)*sz, j*sz:(j+1)*sz] = modim0-medmodel
p.figure(figsize=(24, 8), dpi=150)
p.subplot(1, 3, 1)
p.imshow(datim, aspect='equal', vmin=-0.005, vmax=0.005, cmap='binary')
p.title('Stamps')
p.subplot(1, 3, 2)
p.imshow(modim, aspect='equal', vmin=-0.005, vmax=0.005, cmap='binary')
p.title('Model')
p.subplot(1, 3, 3)
p.imshow(datim-modim, aspect='equal', vmin=-0.001, vmax=0.001,
cmap='binary')
p.title('Residuals')
if save:
import matplotlib
matplotlib.use('Agg')
p.style.use('dark_background')
p.savefig('psf_'+name[1]+'_'+str(name[0])+'.png', dpi=150,
bbox_inches='tight', pad_inches=0.1)
def plot_psf_fits_brightness(stamp, x, y, model, isig):
from matplotlib import pyplot as p
import util_efs
nx, ny = 10, 10
datim = numpy.zeros((stamp.shape[1]*nx, stamp.shape[1]*ny), dtype='f4')
modim = numpy.zeros((stamp.shape[1]*nx, stamp.shape[1]*ny), dtype='f4')
medmodel = numpy.median(model, axis=0)
s = numpy.argsort(-numpy.median(isig, axis=(1, 2)))
sz = stamp.shape[-1]
for i in range(nx):
for j in range(ny):
if i*ny+j >= len(s):
continue
ind = s[i*ny+j]
datim0 = stamp[ind, :, :]
modim0 = model[ind, :, :]
datim[i*sz:(i+1)*sz, j*sz:(j+1)*sz] = datim0-medmodel
modim[i*sz:(i+1)*sz, j*sz:(j+1)*sz] = modim0-medmodel
p.figure('psfs')
p.subplot(1, 3, 1)
util_efs.imshow(datim, aspect='equal', vmin=-0.005, vmax=0.005)
p.title('Stamps')
p.subplot(1, 3, 2)
util_efs.imshow(modim, aspect='equal', vmin=-0.005, vmax=0.005)
p.title('Model')
p.subplot(1, 3, 3)
util_efs.imshow(datim-modim, aspect='equal', vmin=-0.001, vmax=0.001)
p.title('Residuals')
p.draw()
def damper(chi, damp):
return 2*damp*numpy.sign(chi)*(numpy.sqrt(1+numpy.abs(chi)/damp)-1)
def fit_variable_moffat_psf(x, y, xcen, ycen, stamp, imstamp, modstamp,
isig, order=1, pixsz=9, nkeep=200, plot=False, name=None):
# clean and shift the PSFs first.
shiftx = xcen + x - numpy.round(x)
shifty = ycen + y - numpy.round(y)
okpsf = select_stamps(stamp, imstamp, isig, shiftx, shifty)
x, y, xcen, ycen = (q[okpsf] for q in (x, y, xcen, ycen))
stamp, modstamp, isig, imstamp, shiftx, shifty = (
q[okpsf] for q in (stamp, modstamp, isig, imstamp, shiftx, shifty))
if len(x) > nkeep:
fluxes = numpy.sum(stamp, axis=(1, 2))
s = numpy.argsort(-fluxes)
okpsf = (fluxes >= fluxes[s][nkeep-1])
x, y, xcen, ycen = (q[okpsf] for q in (x, y, xcen, ycen))
stamp, modstamp, isig, imstamp, shiftx, shifty = (
q[okpsf] for q in (stamp, modstamp, isig, imstamp, shiftx, shifty))
stamp, isig = shift_and_normalize_stamps(stamp, modstamp, isig,
shiftx, shifty)
isig = numpy.clip(isig, 0., 1./(0.1*0.001))
isig_nocen = isig.copy()
if stamp.shape[0] > 50:
central_stamp(isig_nocen, censize=pixsz)[:, :, :] = 0.
def make_full_psf_model(param, order, pixsz):
fwhm, xy, yy, resid = extract_params(param, order, pixsz)
return VariableMoffatPixelizedPSF(resid, fwhm, 3., xy=xy, yy=yy)
def make_moffat_psf_model(param, order):
fwhm, xy, yy = extract_params_moffat(param, order)
return VariableMoffatPSF(fwhm, 3., xy=xy, yy=yy)
def chimoff(param, isig):
norm = param[-1]
psf = make_moffat_psf_model(param[:-1], order)
tresid = (stamp -
norm*psf.render_model(x, y, stampsz=stamp.shape[-1]))
tchi = damper(tresid*isig, 3.).reshape(-1).astype('f4')
return tchi
def chipix(param, resid, isig):
from numpy.polynomial.polynomial import polyval2d
mat = fill_param_matrix(param, order)
tchi = (resid - polyval2d(x/1000., y/1000., mat))*isig
return damper(tchi, 3.).reshape(-1)
nperpar = (order+1)*(order+2)/2
guess = numpy.zeros(3*nperpar+1, dtype='f4')
constanttermindex = nperpar - order - 1
guess[0+constanttermindex] = 4. # 1" PSF
# guess[nperpar+constanttermindex] = 3. # beta
guess[nperpar*2+constanttermindex] = 1. # yy
guess[-1] = 1. # overall normalization
# all others can be zero.
from scipy import optimize
resmoff = optimize.leastsq(chimoff, guess, args=(isig_nocen,),
full_output=True)
residfitdict = {}
residguess = numpy.zeros(nperpar, dtype='f4')
moffpsf = make_moffat_psf_model(resmoff[0][:-1], order)
resid = (stamp - resmoff[0][-1] *
moffpsf.render_model(x, y, stampsz=stamp.shape[-1])).astype('f4')
resid_cen = central_stamp(resid, censize=pixsz)
isig_cen = central_stamp(isig, censize=pixsz)
for i in range(pixsz):
for j in range(pixsz):
args = (resid_cen[:, i, j], isig_cen[:, i, j])
residfitdict[i, j] = optimize.leastsq(chipix, residguess,
args=args, full_output=True)
fullparam = numpy.zeros((3+pixsz**2)*nperpar+1, dtype='f4')
fullparam[0:3*nperpar] = resmoff[0][0:3*nperpar]
fullparam[-1] = resmoff[0][-1]
resparam = numpy.array([[residfitdict[i, j][0]/fullparam[-1]
for j in range(pixsz)]
for i in range(pixsz)])
resparam = resparam.transpose(2, 0, 1)
fullparam[3*nperpar:(3+pixsz**2)*nperpar] = resparam.reshape(-1)
psf = make_full_psf_model(fullparam[:-1], order, pixsz)
if plot != 0:
norm = fullparam[-1]
modstamps = norm*psf.render_model(x, y, stampsz=stamp.shape[-1])
if plot == 1:
plot_psf_fits(stamp, x, y, modstamps, isig, name=name)
else:
plot_psf_fits(stamp, x, y, modstamps, isig, name=name, save=True)
return psf
def fit_moffat(stamp, isig=None):
if isig is None:
isig = numpy.ones_like(stamp, dtype='f4')
def chimoff(param):
model = param[0]*moffat_psf(param[1], beta=param[2], xy=param[3],
yy=param[4], stampsz=stamp.shape[0],
deriv=False)
chi = (stamp-model)*isig
return damper(chi, 5).reshape(-1).astype('f4')
from scipy import optimize
guess = numpy.array([1., 4., 3., 0., 1.]).astype('f4')
res = optimize.leastsq(chimoff, guess, full_output=True, epsfcn=1e-2)
return res
def sum_prof(param, stampsz=59, prof='moffat'):
res = numpy.zeros((stampsz, stampsz), dtype='f4')
npar = 3 if prof == 'moffat' else 2
ncomp = len(param) / npar
for i in range(ncomp):
if prof == 'moffat':
tres = moffat_psf(param[i*npar+1], beta=param[i*npar+2], xy=0.,
yy=1, stampsz=stampsz,
deriv=False)
elif prof == 'gaussian':
tres = gaussian_psf(param[i*npar+1], stampsz=stampsz,
deriv=False)
res += tres*param[i*npar]
return res*param[0]
def fit_sum_prof(stamp, ncomp=3, isig=None, prof='moffat'):
if isig is None:
isig = numpy.ones_like(stamp, dtype='f4')
def chiprof(param):
chi = (stamp-sum_prof(param, stampsz=stamp.shape[-1], prof=prof))*isig
return damper(chi, 5).reshape(-1).astype('f4')
guessnorm = numpy.ones(ncomp)/1.0/ncomp
guessfwhm = 4*numpy.exp(numpy.linspace(0, numpy.log(stamp.shape[-1]/10),
ncomp))
guessbeta = 3.5-1*numpy.linspace(0, 1, ncomp)
guess = []
if prof == 'moffat':
for n, b, f in zip(guessnorm, guessfwhm, guessbeta):
guess += [n, b, f]
else:
for n, f in zip(guessnorm, guessfwhm):
guess += [n, f]
from scipy import optimize
guess = numpy.array(guess).astype('f4')
res = optimize.leastsq(chiprof, guess, full_output=True)
return res
def gaussian(major, minor, rotation, stampsz):
sigmafac = 1 / numpy.sqrt(8*numpy.log(2))
major = major * sigmafac
minor = minor * sigmafac
stampszo2 = stampsz // 2
dx = numpy.arange(stampsz).reshape(1, -1, 1)-stampszo2
dy = dx.copy().reshape(1, 1, -1)
major = numpy.abs(major).reshape(-1, 1, 1)
minor = numpy.abs(minor).reshape(-1, 1, 1)
rotation = rotation.reshape(-1, 1, 1)
r2 = ((dx*numpy.cos(rotation)-dy*numpy.sin(rotation))**2/major**2 +
(dx*numpy.sin(rotation)+dy*numpy.cos(rotation))**2/minor**2)
return 1./(2.*numpy.pi*major*minor)*numpy.exp(-0.5*r2)
def fit_gaussian(stamp, isig=None):
if isig is None:
isig = numpy.ones_like(stamp, dtype='f4')
def chigauss(param):
model = param[0]*gaussian(numpy.atleast_1d(param[1]),
numpy.atleast_1d(param[2]),
numpy.atleast_1d(param[3]),
stampsz=stamp.shape[-1])
return (stamp-model).reshape(-1).astype('f4')
from scipy import optimize
guess = numpy.array([1., 4., 4., 0.]).astype('f4')
res = optimize.leastsq(chigauss, guess, full_output=True)
return res
def chipix(param, resid, isig, x, y, order):
from numpy.polynomial.polynomial import polyval2d
mat = fill_param_matrix(param, order)
tchi = (resid - polyval2d(x/1000., y/1000., mat))*isig
return damper(tchi, 3.).reshape(-1)
def chipixlin(param, resid, isig, x, y, order):
if order == 0:
tchi = (resid - param[0])*isig
else:
tchi = (resid - param[1] - param[0]*x/1000. - param[2]*y/1000.)*isig
return damper(tchi, 3.).reshape(-1)
def modelstampcorn(param, staticstamp, stampsz=None):
from scipy.signal import fftconvolve
stampsz = staticstamp.shape[-1] if stampsz is None else stampsz
if len(param) > 4:
tx = numpy.array([0, 1000, 0])
ty = numpy.array([0, 0, 1000])
fwhm = param[0]+tx/1000.*param[1]+ty/1000.*param[2]
yy = param[3]+tx/1000.*param[4]+ty/1000.*param[5]
xy = param[6]+tx/1000.*param[7]+ty/1000.*param[8]
norm = param[9]
else:
fwhm = param[0]*numpy.ones(3, dtype='f4')
yy = param[1]
xy = param[2]
norm = param[3]
moffats = moffat_psf(fwhm, beta=3., xy=xy, yy=yy,
stampsz=stampsz+6, deriv=None)
tstaticstamp = central_stamp(staticstamp, stampsz+6).copy()
modcorn = fftconvolve(moffats, tstaticstamp[None, :, :], mode='same')
# the full convolution is nice here, but we could probably replace with
# minimal loss of generality with something like the sum of
# the Moffat and an image convolved with only the center part of the
# PSF.
modcorn = central_stamp(modcorn, censize=stampsz).copy()
return modcorn * norm
def modelstampcorn2(param, staticstamp, stampsz=None):
from scipy.signal import fftconvolve
stampsz = staticstamp.shape[-1] if stampsz is None else stampsz
if len(param) > 5:
tx = numpy.array([0, 1000, 0])
ty = numpy.array([0, 0, 1000])
fwhm = param[0]+tx/1000.*param[1]+ty/1000.*param[2]
yy = param[3]+tx/1000.*param[4]+ty/1000.*param[5]
xy = param[6]+tx/1000.*param[7]+ty/1000.*param[8]
beta = param[9]+tx/1000.*param[10]+ty/1000.*param[11]
norm = param[12]
else:
fwhm = param[0]*numpy.ones(3, dtype='f4')
yy = param[1]
xy = param[2]
beta = param[3]
norm = param[4]
moffats = moffat_psf(fwhm, beta=beta, xy=xy, yy=yy,
stampsz=stampsz+6, deriv=None)
tstaticstamp = central_stamp(staticstamp, stampsz+6).copy()
modcorn = fftconvolve(moffats, tstaticstamp[None, :, :], mode='same')
# the full convolution is nice here, but we could probably replace with
# minimal loss of generality with something like the sum of
# the Moffat and an image convolved with only the center part of the
# PSF.
modcorn = central_stamp(modcorn, censize=stampsz).copy()
return modcorn * norm
def stamp2model(corn, normalize=-1):
stamppar = numpy.zeros((2, 2, corn.shape[-1], corn.shape[-1]),
dtype='f4')
stamppar[0, 0, :, :] = corn[0]
stamppar[1, 0, :, :] = (corn[1]-corn[0])
stamppar[0, 1, :, :] = (corn[2]-corn[0])
return VariablePixelizedPSF(stamppar, normalize=normalize)
def fit_linear_static_wing(x, y, xcen, ycen, stamp, imstamp, modstamp,
isig, pixsz=9, nkeep=200, plot=False,
filter='g', name=None):
# clean and shift the PSFs first.
shiftx = xcen + x - numpy.round(x)
shifty = ycen + y - numpy.round(y)
okpsf = select_stamps(stamp, imstamp, isig, shiftx, shifty)
if numpy.sum(okpsf) == 0:
return None
x, y, xcen, ycen = (q[okpsf] for q in (x, y, xcen, ycen))
stamp, modstamp, isig, imstamp, shiftx, shifty = (
q[okpsf] for q in (stamp, modstamp, isig, imstamp, shiftx, shifty))
if len(x) > nkeep:
fluxes = numpy.sum(stamp, axis=(1, 2))
s = numpy.argsort(-fluxes)
okpsf = (fluxes >= fluxes[s][nkeep-1])
x, y, xcen, ycen = (q[okpsf] for q in (x, y, xcen, ycen))
stamp, modstamp, isig, imstamp, shiftx, shifty = (
q[okpsf] for q in (stamp, modstamp, isig, imstamp, shiftx, shifty))
stamp, isig = shift_and_normalize_stamps(stamp, modstamp, isig,
shiftx, shifty)
maxisig = 1./(0.1*0.001)
isig = numpy.clip(isig, 0., maxisig)
import os
from astropy.io import fits
fname = os.path.join(os.environ['DECAM_DIR'], 'data', 'psfs',
'psf_%s_deconv_mod.fits.gz' % filter)
staticstamp = fits.getdata(fname).T.copy()
outstampsz = staticstamp.shape[-1]
normalizesz = 59
staticstamp /= numpy.sum(central_stamp(staticstamp, normalizesz))
def modelconv(param, stampsz=None):
if stampsz is None:
stampsz = stamp.shape[-1]
model = stamp2model(modelstampcorn(param, staticstamp,
stampsz=stamp.shape[-1]))
return model.render_model(x, y, stampsz=stampsz, deriv=False)
def chiconv(param):
tresid = stamp - modelconv(param)
chi = damper(tresid*isig, 3).reshape(-1).astype('f4')
return chi
stampszo2 = isig.shape[-1] // 2
nbright = numpy.sum(isig[:, stampszo2, stampszo2] >= min(1000, maxisig))
if nbright < 10:
order = 0
else:
order = 1
from scipy import optimize
if order == 1:
guess = numpy.array([2., 0., 0.,
1., 0., 0.,
0., 0., 0.,
# 3., 0., 0.,
1.]).astype('f4')
else:
guess = numpy.array([2., 1., 0., 1.]).astype('f4')
res = optimize.leastsq(chiconv, guess, full_output=True)
resid = (stamp - modelconv(res[0])).astype('f4')
resid_cen = central_stamp(resid, censize=pixsz)
isig_cen = central_stamp(isig, censize=pixsz)
residfitdict = {}
nperpar = (order+1)*(order+2)/2
residguess = numpy.zeros(int(nperpar), dtype='f4')
for i in range(pixsz):
for j in range(pixsz):
args = (resid_cen[:, i, j].copy(), isig_cen[:, i, j].copy(), x, y,
order)
residfitdict[i, j] = optimize.leastsq(chipixlin, residguess,
args=args, full_output=True)
resparam = numpy.array([[residfitdict[i, j][0]
for j in range(pixsz)]
for i in range(pixsz)])
resparam = resparam.transpose(2, 0, 1)
modresid = VariablePixelizedPSF(fill_param_matrix(resparam, order),
normalize=-1)
modwing = stamp2model(modelstampcorn(res[0], staticstamp,
stampsz=outstampsz))
xx = numpy.array([0, 1000, 0])
yy = numpy.array([0, 0, 1000])
cornwing = modwing.render_model(xx, yy, deriv=False,
stampsz=outstampsz)
cornresid = modresid.render_model(xx, yy, deriv=False,
stampsz=outstampsz)
modtotal = stamp2model(cornwing+cornresid, normalize=normalizesz)
nlinperpar = 3
extraparam = numpy.zeros(
1, dtype=[('convparam', 'f4', 4*nlinperpar+1),
('resparam', 'f4', (nlinperpar, pixsz, pixsz))])
extraparam['convparam'][0, 0:len(res[0])] = res[0]
extraparam['resparam'][0, 0:resparam.shape[0], :, :] = resparam
modtotal.extraparam = extraparam
if plot != 0:
modstamps = modtotal.render_model(x, y, deriv=False,
stampsz=stamp.shape[-1])
if plot == 1:
plot_psf_fits(stamp, x, y, modstamps, isig, name=name)
else:
plot_psf_fits(stamp, x, y, modstamps, isig, name=name, save=True)
return modtotal
def linear_static_wing_from_record(record, filter='g'):
import os
from astropy.io import fits
fname = os.path.join(os.environ['DECAM_DIR'], 'data', 'psfs',
'psf_%s_deconv_mod.fits.gz' % filter)
staticstamp = fits.getdata(fname).T.copy()
outstampsz = staticstamp.shape[-1]
normalizesz = 59
staticstamp /= numpy.sum(central_stamp(staticstamp, normalizesz))
order = 1 if numpy.any(record['resparam'][1:, ...]) else 0
nperpar = int((order+1)*(order+2)/2)
modresid = VariablePixelizedPSF(
fill_param_matrix(record['resparam'][:nperpar], order), normalize=-1)
modwing = stamp2model(modelstampcorn(record['convparam'], staticstamp,
stampsz=outstampsz))
xx = numpy.array([0, 1000, 0])
yy = numpy.array([0, 0, 1000])
cornwing = modwing.render_model(xx, yy, deriv=False,
stampsz=outstampsz)
cornresid = modresid.render_model(xx, yy, deriv=False,
stampsz=outstampsz)
modtotal = stamp2model(cornwing+cornresid, normalize=normalizesz)
modtotal.offset = record['offset']
return modtotal
def wise_psf_fit(x, y, xcen, ycen, stamp, imstamp, modstamp,
isig, pixsz=9, nkeep=200, plot=False,
psfstamp=None, grid=False, name=None):
if psfstamp is None:
raise ValueError('psfstamp must be set')
# clean and shift the PSFs first.
shiftx = xcen + x - numpy.round(x)
shifty = ycen + y - numpy.round(y)
okpsf = select_stamps(stamp, imstamp, isig, shiftx, shifty)
if numpy.sum(okpsf) == 0:
print('Giving up PSF fit...')
return None
x, y, xcen, ycen = (q[okpsf] for q in (x, y, xcen, ycen))
stamp, modstamp, isig, imstamp, shiftx, shifty = (
q[okpsf] for q in (stamp, modstamp, isig, imstamp, shiftx, shifty))
if len(x) < 200:
# really should never happen for WISE images, unless we're, say,
# right in the Galactic center and things are horrible.
print('Only %d PSF stars (of %d total), giving up PSF fit...' %
(len(x), len(okpsf)))
if not grid:
return SimplePSF(psfstamp)
else:
return GridInterpPSF(*psfstamp)
if len(x) > nkeep:
fluxes = numpy.sum(stamp, axis=(1, 2))
s = numpy.argsort(-fluxes)
okpsf = (fluxes >= fluxes[s][nkeep-1])
x, y, xcen, ycen = (q[okpsf] for q in (x, y, xcen, ycen))
stamp, modstamp, isig, imstamp, shiftx, shifty = (
q[okpsf] for q in (stamp, modstamp, isig, imstamp, shiftx, shifty))
stamp, isig = shift_and_normalize_stamps(stamp, modstamp, isig,
shiftx, shifty)
maxisig = 1./(0.1*0.001)
isig0 = isig.copy()
isig = numpy.clip(isig, 0., maxisig)
stampsz = isig.shape[-1]
if not grid:
normstamp = numpy.sum(central_stamp(psfstamp, censize=stampsz))
psfstamp /= normstamp
npsfstamp = psfstamp
else:
normstamp = numpy.sum(central_stamp(psfstamp[0], censize=stampsz),
axis=(2, 3))[:, :, None, None]
psfstamp[0][...] = psfstamp[0] / normstamp
npsfstamp = psfstamp[0]
npsfstamp = numpy.mean(npsfstamp, axis=(0, 1))
npsfstamp /= numpy.sum(central_stamp(npsfstamp, censize=stampsz))
resid = (stamp - central_stamp(npsfstamp, censize=stampsz))
resid = resid.astype('f4')
resid_cen = central_stamp(resid, censize=pixsz)
residmed = numpy.median(resid_cen, axis=0)
if not grid:
newstamp = psfstamp.copy()
central_stamp(newstamp, censize=pixsz)[:, :] += residmed
else:
newstamp = psfstamp[0].copy()
central_stamp(newstamp, censize=pixsz)[:, :, :, :] += (
residmed[None, None, :, :])
if plot:
if not grid:
modstamp = central_stamp(newstamp, censize=stampsz)
else:
# HACK; not clear which model to use... should use the right
# one for each, but I don't really want to go there...
modstamp = central_stamp(newstamp[0, 0, :, :], censize=stampsz)
modstamp = modstamp[None, ...]*numpy.ones((stamp.shape[0], 1, 1))
plot_psf_fits_brightness(stamp, x, y, modstamp, isig0)
if not grid:
return SimplePSF(newstamp)
else:
return GridInterpPSF(newstamp, psfstamp[1], psfstamp[2])
|
#encoding: utf-8
from __future__ import print_function
from keras.callbacks import Callback
from keras import backend as K
import warnings
from sklearn.metrics import roc_auc_score, roc_curve, auc
import numpy as np
import os
from matplotlib import pyplot as plt
class MultiGPUCheckpoint(Callback):
"""save weights when you use keras multi gpu model in data parallel mode
# Args:
filepath: formated string, weights path
base_model: model instance in cpu
"""
def __init__(self,
filepath,
base_model,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(MultiGPUCheckpoint, self).__init__()
self.base_model = base_model
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.base_model.save_weights(filepath, overwrite=True)
else:
self.base_model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' %
(epoch + 1, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.base_model.save_weights(filepath, overwrite=True)
else:
self.base_model.save(filepath, overwrite=True)
class MultipleClassAUROC(Callback):
"""
Monitor mean AUROC and update model
# Args:
# Examples:
```python
cb = MultipleClassAUROC(val_gen, xray_labels,
'/home/project/best_auc_score.h5',
len(val_gen))
```
"""
def __init__(self, val_gen, class_names, log_dir, weights_name, steps=0):
super(MultipleClassAUROC, self).__init__()
self.val_gen = val_gen
self.class_names = class_names
self.weights_name = weights_name
self.log_dir = log_dir
self.log_file = os.path.join(self.log_dir, 'auc_log.csv')
self.epochs_cls_auc = []
self.epochs_aver_auc = []
if not steps or steps > len(val_gen):
self.steps = len(val_gen)
else:
self.steps = steps
def predict(self):
y_pred = []
y_true = []
for batch_id in range(self.steps):
batch_x, batch_y = self.val_gen[batch_id]
pred = self.model.predict(batch_x, batch_size=self.val_gen.batch_size)
score = np.mean(np.round(pred[:]) == batch_y[:])
print('predicting batch ', batch_id + 1, ', total', self.steps, '---- accuracy score: ', score)
y_pred.append(pred)
y_true.append(batch_y)
y_pred = np.concatenate(y_pred, axis=0)
y_true = np.concatenate(y_true, axis=0)
return y_true, y_pred
def plot_class_roc(self, y_true, y_pred, epoch, path):
class_auroc = []
fig, axes = plt.subplots(1, 1, figsize=(9, 9))
for i in range(len(self.class_names)):
try:
fpr, tpr, thresholds = roc_curve(y_true[:, i].astype(int), y_pred[:, i])
score = auc(fpr, tpr)
axes.plot(fpr, tpr, label='%s (AUC:%0.2f)' % (self.class_names[i], score))
except ValueError:
score = 0
class_auroc.append(score)
print("epoch ", epoch, "---class ", self.class_names[i], " score --- ", score)
axes.legend()
axes.set_xlabel('False Positive Rate')
axes.set_ylabel('True Positive Rate')
name = os.path.join(path, 'epoch'+str(epoch)+'_roc.png')
fig.savefig(name)
plt.close(fig)
return class_auroc
def plot_aver_roc(self, y_true, y_pred, epoch, path):
y_true = y_true.flatten().astype(int)
y_pred = y_pred.flatten()
fig, axes = plt.subplots(1, 1, figsize=(9, 9))
try:
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
auc_score = auc(fpr, tpr)
axes.plot(fpr, tpr, label='%s (AUC:%0.2f)' % ('average', auc_score))
except ValueError:
auc_score = 0
axes.legend()
axes.set_xlabel('False Positive Rate')
axes.set_ylabel('True Positive Rate')
name = os.path.join(path, 'epoch'+str(epoch)+'_aver_roc.png')
fig.savefig(name)
print('epoch ', epoch, ' average auc is ', auc_score)
plt.close(fig)
return auc_score
def on_epoch_end(self, epoch, logs={}):
"""
Calculate the average AUROC and save the best model weights according
to this metric.
"""
print("\n", "*"*30)
lr = float(K.eval(self.model.optimizer.lr))
print("current learning rate: ", lr)
y_true, y_pred = self.predict()
class_auc = self.plot_class_roc(y_true, y_pred, epoch, self.log_dir)
aver_auc = self.plot_aver_roc(y_true, y_pred, epoch, self.log_dir)
if self.epochs_aver_auc:
auc_max = np.amax(self.epochs_aver_auc)
else:
auc_max = 0
if aver_auc >= auc_max:
name = 'epoch' + str(epoch) + '_' + self.weights_name
weights_path = os.path.join(self.log_dir, name)
self.model.save_weights(weights_path)
with open(self.log_file, mode='a+') as f:
str_list = ['%.3f' % item for item in class_auc]
str_list.append('%.3f' % aver_auc)
print(','.join(str_list), file=f)
self.epochs_cls_auc.append(class_auc)
self.epochs_aver_auc.append(aver_auc)
def on_train_begin(self, logs={}):
"""
训练开始, 重载父类函数
"""
with open(self.log_file, mode='a+') as f:
print(','.join(self.class_names), ',aver auc', file=f)
class HistoryLogger(Callback):
"""save trining history and plot training epoch loss
# Args:
name_prefix: name prefix of file which is used for saving traing history
file_path: str, log dir
plot_item: str, keras logs key, such as 'val loss' or metric name
# Examples:
```python
log_dir = '/home/project/abus'
his_cb = HistoryLogger('his', log_dir, plot_item='binary_accuracy')
```
"""
def __init__(self, name_prefix='abus_train',
file_path='./logs', mode='a+',
plot_item='loss'):
self.name_prefix = name_prefix
file_name = name_prefix + '_history.txt'
file_name = os.path.join(file_path, file_name)
self.file = open(file_name, mode)
self.path = file_path
self.plot_item = plot_item
super(HistoryLogger, self).__init__()
def on_train_begin(self, logs={}):
"""
训练开始, 重载父类函数
"""
print("开始训练", file=self.file)
self.file.flush()
self.epoch_loss = []
def on_batch_begin(self, batch, logs={}):
pass
def on_batch_end(self, batch, logs={}):
"""
Args:
batch: int
logs: dict
"""
log_line = ['%-10s - %-10.5f' % item for item in sorted(logs.items())]
log_line.insert(0, 'epoch--- %-3d'% self.cur_epoch)
log_line = '\t'.join(log_line)
print(log_line, file=self.file)
self.losses.append(logs)
self.file.flush()
def on_epoch_begin(self, epoch, logs={}):
self.cur_epoch = epoch
self.losses = []
def on_epoch_end(self, epoch, logs={}):
print('*' * 80, file=self.file)
log_line = ['%-10s - %-10.5f' % item for item in logs.items()]
log_line = '\t'.join(log_line) + '\n'
print(log_line, file=self.file)
print('*' * 80, file=self.file)
self.file.flush()
# 画出loss下降曲线
loss = np.zeros((len(self.losses)))
for i, item in enumerate(self.losses):
output_loss = item.get(self.plot_item)
loss[i] = output_loss
plt.figure("epoch-" + str(epoch))
plt.plot(loss)
plt.gca().set_xlabel('batch')
plt.gca().set_ylabel('loss')
plt.gca().set_title('epoch-{}'.format(epoch))
save_path = os.path.join(self.path, self.name_prefix + '_epoch_{}.jpg'.format(epoch))
plt.savefig(save_path)
# 记录val_loss
self.epoch_loss.append(logs.get('loss'))
def on_train_end(self, logs={}):
plt.figure("epoch loss")
plt.plot(np.array(self.epoch_loss))
plt.gca().set_xlabel('epoch')
plt.gca().set_ylabel('loss')
plt.gca().set_title('epoch loss')
save_path = os.path.join(self.path, './epoch_loss.jpg')
plt.savefig(save_path)
self.file.close()
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 30, 60, 90, 120 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 120:
lr *= 0.5e-3
elif epoch > 90:
lr *= 1e-3
elif epoch > 60:
lr *= 1e-2
elif epoch > 30:
lr *= 1e-1
print('Learning rate: ', lr)
return lr |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.