content
stringlengths 5
1.05M
|
|---|
import json
from pathlib import Path
from prompt_toolkit import PromptSession
from prompt_toolkit.shortcuts import print_formatted_text
from prompt_toolkit.history import FileHistory
from xdg import XDG_CONFIG_HOME
from everhour import Everhour
from .completer import everhour_completer
from .history import get_history
from .subcommands import start_timer, stop_timer, list_tasks
timer = None
def get_xdg_json_data():
path = Path(XDG_CONFIG_HOME, 'everhour', 'settings.json')
if path.exists():
with path.open('r') as a:
return json.load(a)
return {}
configs = get_xdg_json_data()
api_map = {k: Everhour(v) for k,v in configs.items()}
def main():
history = FileHistory(get_history())
session = PromptSession('> ', completer=everhour_completer, history=history)
global timer
while True:
def get_prompt():
" Tokens to be shown before the prompt. "
if timer:
return [
('bg:#008800 #ffffff', '%s' % (timer)),
('', ' > ')
]
return [
('', '> ')
]
try:
text = session.prompt(get_prompt, refresh_interval=1)
text = text.strip()
if text.startswith('start'):
task_id = text.split(' ')[1]
timer = start_timer(api_map, timer, task_id)
elif text.startswith('stop'):
timer = stop_timer(timer)
elif text.startswith('list'):
range_ = text.split(' ')
if len(range_) > 1:
range_ = range_[1]
else:
range_ = 'today'
list_tasks(api_map, range_)
except KeyboardInterrupt:
continue # Control-C pressed. Try again.
except EOFError:
if timer:
timer.stop()
break # Control-D pressed.
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import glob
# look for files named count_<something>.txt, which are the output files from
# the previous step
# load each of them, read the number out of it, and add it to our total counts
total_counts = 0
for counts_file in glob.glob("counts_*.txt"):
with open(counts_file, mode="r") as file:
counts = int(file.readline())
total_counts += counts
# write the results to an output file, which HTCondor will detect and transfer
# back to the submit machine
with open("total_counts.txt", mode="w") as file:
file.write(str(total_counts))
|
from math import trunc
num = float(input("Digite um número: "))
arred = trunc(num)
print("O número {} arredondado é:{} ".format(num, arred))
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def lncosh(x):
return -x + F.softplus(2.*x) - math.log(2.)
def tanh_prime(x):
return 1.-torch.tanh(x)**2
def tanh_prime2(x):
t = torch.tanh(x)
return 2.*t*(t*t-1.)
def sigmoid_prime(x):
s = torch.sigmoid(x)
return s*(1.-s)
class CNN(nn.Module):
def __init__(self, L, channel, hidden_size, device='cpu', name=None):
super(CNN, self).__init__()
self.device = device
if name is None:
self.name = 'CNN'
else:
self.name = name
self.L = L
self.dim = L**2
self.channel = channel
self.conv1 = nn.Conv2d(self.channel, hidden_size, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(hidden_size, 2*hidden_size, kernel_size=3, padding=1)
self.fc1 = nn.Linear(2*hidden_size*(self.L//4)**2, 64)
self.fc2 = nn.Linear(64, 1, bias=False)
def forward(self, x):
x = x.view(x.shape[0], self.channel, self.L, self.L)
x = F.softplus(F.max_pool2d(self.conv1(x), 2))
x = F.softplus(F.max_pool2d(self.conv2(x), 2))
x = x.view(x.shape[0], -1)
x = F.softplus(self.fc1(x))
return self.fc2(x).sum(dim=1)
def grad(self, x):
batch_size = x.shape[0]
return torch.autograd.grad(self.forward(x), x, grad_outputs=torch.ones(batch_size, device=x.device), create_graph=True)[0]
class MLP(nn.Module):
def __init__(self, dim, hidden_size, use_z2=True, device='cpu', name=None):
super(MLP, self).__init__()
self.device = device
if name is None:
self.name = 'MLP'
else:
self.name = name
self.dim = dim
self.fc1 = nn.Linear(dim, hidden_size, bias=False)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1, bias=False)
if use_z2:
self.activation = lncosh
else:
self.activation = F.softplus
def forward(self, x):
out = self.activation(self.fc1(x))
out = F.softplus(self.fc2(out))
out = self.fc3(out)
return out.sum(dim=1)
def grad(self, x):
batch_size = x.shape[0]
return torch.autograd.grad(self.forward(x), x, grad_outputs=torch.ones(batch_size, device=x.device), create_graph=True)[0]
class Simple_MLP(nn.Module):
'''
Single hidden layer MLP
with handcoded grad and laplacian function
'''
def __init__(self, dim, hidden_size, use_z2=True, device='cpu', name=None):
super(Simple_MLP, self).__init__()
self.device = device
if name is None:
self.name = 'Simple_MLP'
else:
self.name = name
self.dim = dim
self.fc1 = nn.Linear(dim, hidden_size, bias=not use_z2)
self.fc2 = nn.Linear(hidden_size, 1, bias=False)
if use_z2:
self.activation = lncosh
self.activation_prime = torch.tanh
self.activation_prime2 = tanh_prime
else:
self.activation = F.softplus
self.activation_prime = torch.sigmoid
self.activation_prime2 = sigmoid_prime
def forward(self, x):
out = self.activation(self.fc1(x))
out = self.fc2(out)
return out.sum(dim=1)
def grad(self, x):
'''
grad u(x)
'''
out = self.activation_prime(self.fc1(x))
out = torch.mm(out, torch.diag(self.fc2.weight[0]))
out = torch.mm(out, self.fc1.weight)
return out
def laplacian(self, x):
'''
div \cdot grad u(x)
'''
out = self.activation_prime2(self.fc1(x))
out = torch.mm(out, torch.diag(self.fc2.weight[0]))
out = torch.mm(out, self.fc1.weight**2)
return out.sum(dim=1)
def acceleration(self, x):
'''
d^x/dt^2 = grad [(grad phi)^2] = 2 (v cdot grad) v = 2 H\cdot v
'''
grad = self.grad(x)
return torch.autograd.grad((grad**2).sum(dim=1), x, grad_outputs=torch.ones(x.shape[0], device=x.device), create_graph=True)[0]
if __name__=='__main__':
from hessian import compute_grad_and_hessian
batchsize = 1
L = 4
dim = L**2
x = torch.randn(batchsize, dim, requires_grad = True)
net = Simple_MLP(dim=dim, hidden_size = 10)
print (net.acceleration(x))
grad, hessian = compute_grad_and_hessian(net(x), x)
print (grad.shape)
print (hessian.shape)
print (2.*torch.bmm(grad.unsqueeze(1), hessian).squeeze(0))
|
'''
Created on 14/11/2014
@author: javgar119
'''
from QuantLib import *
# dates
calendar = TARGET()
todaysDate = Date(14, 11, 2014)
Settings.instance().evaluationDate = todaysDate
settlementDate = Date(14, 11, 2014)
maturity = Date(17, 2, 2015)
dayCounter = Actual365Fixed()
# option parameters
option_type = Option.Call
underlying = 39.0
strike = 41.5
dividendYield = 0.0
riskFreeRate = 0.01
volatility = 0.17
# basic option
payoff = PlainVanillaPayoff(option_type, strike)
exercise = EuropeanExercise(maturity)
europeanOption = VanillaOption(payoff, exercise)
# handle setups
underlyingH = QuoteHandle(SimpleQuote(underlying))
flatTermStructure = YieldTermStructureHandle(FlatForward(settlementDate, riskFreeRate, dayCounter))
dividendYield = YieldTermStructureHandle(FlatForward(settlementDate, dividendYield, Actual365Fixed()))
flatVolTS = BlackVolTermStructureHandle(BlackConstantVol(settlementDate, calendar, volatility, dayCounter))
# done
bsmProcess = BlackScholesMertonProcess(underlyingH,
dividendYield,
flatTermStructure,
flatVolTS)
# method: analytic
europeanOption.setPricingEngine(AnalyticEuropeanEngine(bsmProcess))
value = europeanOption.NPV()
print("European option value ", value)
|
import discord
import asyncio
from discord.ext import tasks, commands
from config import *
import logging
from mcstatus import MinecraftServer
import base64
from io import BytesIO
import concurrent.futures
class mcstatus_cog(commands.Cog):
def __init__(self, bot: commands.Bot, get_status):
self.bot = bot
self.get_status = get_status
self.logger = logging.getLogger("tonymc.mcstatus_cog")
self.mc_server = MinecraftServer(config_ip, config_port)
self.server_status = None
self.favicon = None
self.decoded_favicon = None
# possible: offline, whitelist (prob not), online
self.server_power_status = "offline"
self.periodically_get_status.add_exception_type(ConnectionError)
self.periodically_get_status.add_exception_type(IOError)
self.periodically_get_status.add_exception_type(ValueError)
self.periodically_get_status.start()
self.did_status_crash.start()
@tasks.loop()
async def did_status_crash(self):
await asyncio.sleep(15)
if not self.periodically_get_status.is_running():
self.logger.error("Status updater not running!")
if self.periodically_get_status.failed():
self.logger.error("Status updater failed!")
@did_status_crash.before_loop
async def before_crash(self):
self.logger.debug("Waiting for bot to be ready... (Status Watcher)")
@tasks.loop(seconds=config_ping_time)
async def periodically_get_status(self):
self.logger.debug("Starting to get server status (MCStatus)")
try:
loop = asyncio.get_running_loop()
self.server_status = await loop.run_in_executor(
None, self.mc_server.status)
except (ConnectionError, IOError, ValueError):
self.logger.debug(
"Server was not on - Or at least some kind of connection issue...")
self.server_power_status = "offline"
else:
self.logger.debug("Server was on! Populating variables.")
if self.server_status.favicon is not None:
base_favicon = self.server_status.favicon
# Add correct padding to favicon, otherwise the base64 library refuses to decode it.
# https://stackoverflow.com/a/2942039
base_favicon += "=" * ((4 - len(base_favicon) % 4) % 4)
# Additionally, it doesn't seem to remove the type header, causing a corrupted image to be created.
base_favicon = base_favicon.replace("data:image/png;base64,", "")
self.decoded_favicon = base64.b64decode(base_favicon)
else:
self.decoded_favicon = None
self.server_power_status = "online"
self.logger.debug("Updating presence")
await self.change_discord_status()
@periodically_get_status.before_loop
async def before_status(self):
self.logger.debug("Waiting for bot to be ready... (Server Status)")
await self.bot.wait_until_ready()
async def change_discord_status(self, given_status=None):
game = None
status = None
if given_status is None:
server_status = await self.get_status()
else:
server_status = given_status
if server_status == "offline":
game = discord.Game("Server Offline")
status = discord.Status.dnd
elif server_status == "online":
current, max = await self.get_players_and_max()
game = discord.Game("{0}/{1} Players".format(current, max))
if current == 0:
status = discord.Status.idle
else:
status = discord.Status.online
elif server_status == "starting":
game = discord.Game("Server Starting")
status = discord.Status.idle
elif server_status == "stopping":
game = discord.Game("Server Stopping")
status = discord.Status.dnd
else:
game = discord.Game("Unknown Error")
status = discord.Status.idle
try:
await self.bot.change_presence(status=status, activity=game)
self.logger.debug(
"Changed presence to: {0}, {1}".format(game, status))
except TypeError:
self.logger.debug(
"TypeError when changing presence")
async def get_players_and_max(self):
if self.server_power_status == "online":
return self.server_status.players.online, self.server_status.players.max
else:
return 0, 0
|
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# https://github.com/Roffild/RoffildLibrary
# ==============================================================================
import os
import shutil
import subprocess as proc
import sys
import unittest
from configparser import ConfigParser
from pathlib import Path
CURDIR = Path(__file__).parent.joinpath("_buildall")
def search(glob_pattern):
for path in CURDIR.glob(glob_pattern):
return path
return None
def searchInPF(glob_pattern):
for path in Path(os.environ["ProgramW6432"]).glob(glob_pattern):
return path
for path in Path(os.environ["ProgramFiles(x86)"]).glob(glob_pattern):
return path
return None
def environmentWrite(fpath):
with open(fpath, "w", encoding="utf-16le") as cfg:
cfg.write("\uFEFF") # 0xFFFE - in file
cfg.write("Windows Registry Editor Version 5.00\n\n")
cfg.write("#[HKEY_CURRENT_USER\Environment]\n")
cfg.write("[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]\n")
cfg.write('"PYTHONHOME"="' + str(sys.prefix).replace("\\", "\\\\") + '"\n')
env = os.environ.get("MT5_HOME")
if env is None:
for env in Path(os.environ["ProgramFiles"]).rglob("metaeditor64.exe"):
env = env.parent
break
cfg.write('"MT5_HOME"="' + str(env).replace("\\", "\\\\") + '"\n')
env = os.environ.get("JAVA_HOME")
if env is None:
for env in Path(os.environ["ProgramFiles"]).rglob("java.exe"):
env = env.parent
break
cfg.write('"JAVA_HOME"="' + str(env).replace("\\", "\\\\") + '"\n')
env = os.environ.get("JAVA_TOOL_OPTIONS")
if env is None:
env = "-Xmx9g"
cfg.write('"JAVA_TOOL_OPTIONS"="' + str(env).replace("\\", "\\\\") + '"\n')
env = os.environ.get("GRADLE_USER_HOME")
if env is None:
cfg.write('# "GRADLE_USER_HOME"=""\n')
env = os.environ.get("HADOOP_HOME")
if env is None:
env = ""
cfg.write('# %HADOOP_HOME%\\bin\winutils.exe\n# https://github.com/steveloughran/winutils\n')
cfg.write('"HADOOP_HOME"="' + str(env).replace("\\", "\\\\") + '"\n')
env = os.environ.get("SPARK_HOME")
if env is None:
env = ""
cfg.write('"SPARK_HOME"="' + str(env).replace("\\", "\\\\") + '"\n')
env = os.environ.get("SPARK_LOCAL_DIRS")
if env is None:
env = ""
cfg.write('"SPARK_LOCAL_DIRS"="' + str(env).replace("\\", "\\\\") + '"\n')
def environmentRead(fpath):
with open(fpath, "r", encoding="utf-16le") as cfg:
cfg.readline()
source = "[env]\n"
for line in cfg:
kv = line.split("=", 1)
if len(kv) > 1:
source += kv[0].strip("\" \n") + "=" + kv[1].strip("\" \n") + "\n"
print(source)
parser = ConfigParser()
parser.read_string(source)
for k in parser.options("env"):
os.environ[k.upper()] = str(parser.get("env", k, raw=True)) \
.replace("\\\\", "\\").replace("\\\\", "\\")
def mql5():
MT5_HOME = Path(os.environ["MT5_HOME"])
MT5_EDITOR = MT5_HOME.joinpath("metaeditor64.exe")
with open(CURDIR.joinpath("mql5_all.log"), "w") as log_all, \
open(CURDIR.joinpath("mql5_errors.log"), "w") as log_errors:
for root, folders, names in os.walk(CURDIR.parent):
for n in names:
ext = n[-4:]
if ext == ".mq5" or ext == ".mqh":
pth = Path(root, n)
print(pth, end="", flush=True)
ret = proc.run('"' + str(MT5_EDITOR) + '" /log /compile:"' + str(pth) + '"',
cwd=MT5_HOME).returncode
print(" - OK" if ret == 1 else " - FAIL")
with open(pth.with_suffix(".log"), 'r', encoding='utf-16le') as flog:
flog.read(1)
lines = flog.readlines()
log_all.write(n + ":\n")
log_all.writelines(lines)
log_all.write("\n")
log_all.flush()
if ret == 0:
log_errors.write(n + ":\n")
log_errors.writelines(lines)
log_errors.write("\n")
log_errors.flush()
def mql5tests():
# The function does not work
import time
MT5_HOME = Path(os.environ["MT5_HOME"])
MT5_TERMINAL = MT5_HOME.joinpath("terminal64.exe")
config = CURDIR.joinpath("temp_terminal.ini")
for pth in CURDIR.parent.joinpath(r"Scripts\Roffild\UnitTests").glob("*.mq5"):
with open(config, "w", encoding="utf-16le") as cfg:
cfg.write("\uFEFF") # 0xFFFE - in file
cfg.write("[StartUp]\nSymbol=EURUSD\nPeriod=M1\n")
cfg.write("Script=Roffild\\UnitTests\\" + pth.stem + "\n")
try:
proc.run('start "" "' + str(MT5_TERMINAL) + '" /config:"' + str(config) + '"', cwd=MT5_HOME,
shell=True)
time.sleep(5)
# proc.Popen('"'+str(MT5_TERMINAL) + '" /config:"' + str(config) + '"', cwd=MT5_HOME).communicate(None, timeout=1)
except proc.TimeoutExpired:
pass
def mql5doc():
with open(CURDIR.joinpath("doxygen.log"), "w") as log:
doxygen = search("**/doxygen.exe")
if doxygen is None:
doxygen = searchInPF("**/doxygen.exe")
if doxygen is None:
log.write("doxygen.exe not found!\n")
print("doxygen.exe not found!\n")
return
mql5doc_src = CURDIR.parent.joinpath("mql5doc")
mql5doc_dst = CURDIR.joinpath("mql5doc")
if mql5doc_src.exists():
shutil.rmtree(mql5doc_src)
cmd = '"' + str(doxygen) + '"'
cwd = str(CURDIR.parent)
log.write(cwd + ">" + cmd + "\n")
log.flush()
print(cwd + ">" + cmd + "\n")
proc.run(cmd, cwd=cwd, stdout=log, stderr=log)
if mql5doc_dst.exists():
shutil.rmtree(mql5doc_dst)
shutil.move(mql5doc_src, mql5doc_dst)
def python():
if sys.gettrace() is None:
import pip._internal
pip._internal.main(
["install", "-I", "-e", str(CURDIR.parent.joinpath(r"Include\Roffild\RoffildPython"))])
with open(CURDIR.joinpath("python_unittests.log"), "w") as log:
log.writeln = lambda text="": log.write(text + "\n")
testresult = unittest.TextTestResult(log, "", 100)
unittest.defaultTestLoader.discover(
CURDIR.parent.joinpath(r"Include\Roffild\RoffildPython\roffild\test")).run(testresult)
testresult.printErrors()
def java():
root = CURDIR.parent
with open(CURDIR.joinpath("java.log"), "w") as log:
gradle = search("**/gradle.bat")
if gradle is None:
log.write("gradle.bat not found!\n")
print("gradle.bat not found!\n")
return
if "GRADLE_USER_HOME" not in os.environ \
and not Path(os.environ["USERPROFILE"], ".gradle").exists():
os.environ["GRADLE_USER_HOME"] = str(CURDIR.joinpath(".gradle"))
def gradle_run(cmd, path):
cmd = '"' + str(gradle) + '" ' + cmd
text = str(path) + ">" + cmd + "\n"
log.write(text)
log.flush()
print(text)
proc.run(cmd, cwd=path, stdout=log, stderr=log)
gradle_run("clean check :alljavadoc", root.joinpath(r"Include\Roffild\RoffildJava"))
gradle_run("clean check shadowJar", root.joinpath(r"Experts\Roffild\AmazonUtils"))
gradle_run("clean check shadowJar", root.joinpath(r"Scripts\Roffild\MLPDataFileSparkTest"))
gradle_run("clean check jar", root.joinpath(r"Include\Roffild\LogMX"))
javadoc_src = root.joinpath(r"Include\Roffild\RoffildJava\build\javadoc")
javadoc_dst = CURDIR.joinpath("javadoc")
if javadoc_dst.exists():
shutil.rmtree(javadoc_dst)
shutil.move(javadoc_src, javadoc_dst)
with open(CURDIR.joinpath("spark.log"), "w") as log:
path = root.joinpath(r"Scripts\Roffild\MLPDataFileSparkTest")
cmd = str(Path(os.environ["APPDATA"], r"MetaQuotes\Terminal\Common\Files",
r"MLPData\mlp_2147483645.bin"))
cmd = '"' + str(path.joinpath("spark.bat")) + '" "' + cmd + '"'
text = str(path) + ">" + cmd + "\n"
log.write(text)
log.flush()
print(text)
proc.run(cmd, cwd=path, stdout=log, stderr=log)
if __name__ == '__main__':
fenv = CURDIR.joinpath("environment.reg")
if not fenv.is_file():
CURDIR.mkdir(exist_ok=True)
environmentWrite(fenv)
environmentRead(fenv)
mql5()
python()
java()
# mql5tests()
mql5doc()
|
#!/usr/bin/env python
""" This module is the top of the current measurement documentation
all other modules contained in the script folder are utilized here.
The main method starts the threads that communicate with
each MCU on the embedded board and save the received data.
The scmcu thread controls the MCU, which causes the FPGA to switch
its state. The scmon activates the measurement function
on the monitor MCU and receives the measurement data. The third thread
saves this. Further, the data can be formatted and plots can be made if desired.
"""
import convert.storage.threaded_storage as ts
from convert.storage.adapter.timer.timer import Timer
from convert.meas_processor import MeasProcessor
# Define the path where the data is stored and the communication parameters.
FOLDERPATH = 'meas/TEST/' # Folder to store the measurement
FILEPATH = '400Hz10mV1s' # File to store the measurement
PORT_MON = 'COM26' # COM port monitor MCU 'ttyS26' #
PORT_MCU = 'COM22' # COM port controller MCU
BAUDRATE = 115200
# The signals controlling the monitor MCU
START_MONITOR_ALL = 'A'
START_MON_USB = 'U'
START_MON_FPGA = 'F'
START_MON_WIREL = 'W'
PAUSE_MON_CMD = 'p'
STOP_MON_CMD = 'q'
# The signals to the MCU that controls the FPGA.
SUSPEND_FPGA_CMD = 'S'
UNSUSPEND_FPGA_CMD = 's'
RESET_FPGA_CMD = 'R'
SHUT_ON_FPGA_CMD = 'C'
SHUT_OFF_FPGA_CMD = 'c'
TESTMODE_FPGA_CMD = 'T'
LEDFLASH_FPGA_CMD = 'L'
# Specifies the layout of the plot
PLOT_SUBPLOTS = 2
PLOT_ALL_IN_ONE = 1
PLOT_BOTH = 3
def formatData():
""" The stored data is processed and plotted by this function.
The function parameters affect the layout of the results.
The 'path' and 'meas_time' attributes are mandatory parameters
as they tell the module where the data is to be
stored and which time period underlies the plot.
'plotoverlay' specifies which layout is chosen for the plot and
'showplot' specifies whether the plot is to be shown after creation.
'meas_id' will be written to the plot title.
"""
# Instanciate class object, 'plotoverlay' can be assigned to 1 (three subplots in a single frame)
# and 2 (draw all graphs in one plot) or 3 (plot both layouts)
mp = MeasProcessor(folderpath=FOLDERPATH, filepath=FILEPATH, meas_id='1', meas_time=time_limit, plotoverlay=PLOT_SUBPLOTS, showplot=True)
# Execute data processing
mp.processFileByLine()
if __name__ == "__main__":
# Set time limit for monitoring
time_limit = 0.5
# Set desired control signal
# mcu_data = [TESTMODE_FPGA_CMD, LEDFLASH_FPGA_CMD]
# mcu_data = UNSUSPEND_FPGA_CMD
# mcu_data = SUSPEND_FPGA_CMD
# mcu_data = RESET_FPGA_CMD
# mcu_data = SHUT_ON_FPGA_CMD
mcu_data = SHUT_OFF_FPGA_CMD
# Execute the process one ore more times
for i in range(1):
# Instanciate the threads and provide control signals
scmcu = ts.SerialControllerMcu(port=PORT_MCU, baudrate=BAUDRATE, data=mcu_data)
sc = ts.StorageController(folderpath=FOLDERPATH, filepath=FILEPATH)
scmon = ts.SerialControllerMon(port=PORT_MON, baudrate=BAUDRATE, data=START_MON_FPGA)
# List for thread start method
# threads = [scmon, sc, scmcu]
threads = [scmon, sc]
# Start all threads
ts.startThreads(threads)
# Start timer
timer = Timer()
timer.timerSleep(0.2)
scmcu.start()
# Check if time limit is reached and stop threads
while timer.get_elapsed_time() < time_limit:
pass
ts.stopThreads(threads)
scmcu.flag = False
scmcu.join()
# Comment out the method call in order to just store the received data without formatting
formatData()
# Set sleep period between loops
# timer.timerSleep(2)
|
import argparse
import os
from fuckery.cli import main
import tests.common as t_common
def test_main():
fn = 'hello_world.bf'
fp = t_common.get_file(fn)
ns = argparse.Namespace()
ns.verbose = False
ns.input = fp
ns.loop_detection = False
ns.memory_size = 100
main(options=ns)
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import paddle
from paddle.optimizer import Optimizer
import warnings
__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Adamax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS']
class Adadelta(Optimizer):
def __init__(self, lr=0.001, eps=1.0e-6, rho=0.95, weight_decay=0.0, grad_clip=None):
if lr is None:
raise ValueError('learn_rate is not set.')
if eps is None:
raise ValueError('eps is not set.')
if rho is None:
raise ValueError('rho is not set')
self.lr = lr
self.eps = eps
self.rho = rho
self.grad_succeed = True
self.init_optim = False
self.weight_decay = weight_decay
self.grad_clip = grad_clip
def gradient(self, loss, weights):
if loss is None:
raise ValueError('loss is not set.')
if weights is None:
raise ValueError('weights is not set.')
if not self.init_optim:
self.adadelta = paddle.optimizer.Adadelta(
learning_rate=self.lr, epsilon=self.eps, rho=self.rho, parameters=weights,
grad_clip=self.grad_clip, weight_decay=self.weight_decay
)
self.init_optim = True
loss.backward()
grads_and_vars = self.adadelta.backward(loss=loss, parameters=weights)
params, grads, self.grad_succeed = filter_grads(grads_and_vars, weights)
self.grads_and_vars = grads_and_vars
return grads
def apply_gradients(self, grads_and_vars):
grads_and_vars = zip_grads_and_params(grads_and_vars, self.grad_succeed, self.grads_and_vars)
if grads_and_vars is None:
raise ValueError('grads_and_vars is not set.')
self.adadelta._apply_optimize(loss=None, startup_program=None, params_grads=grads_and_vars)
self.adadelta.clear_grad()
class Adagrad(Optimizer):
def __init__(self, lr=0.001, initial_accumulator_value=0.0, eps=1.0e-6, weight_decay=0.0, grad_clip=None):
if lr is None:
raise ValueError('lr is not set.')
if initial_accumulator_value is None:
raise ValueError('initial_accumulator_value is not set.')
if eps is None:
raise ValueError('eps is not set.')
self.lr = lr
self.initial_accumulator_value = initial_accumulator_value
self.eps = eps
self.grad_succeed = True
self.init_optim = False
self.weight_decay = weight_decay
self.grad_clip = grad_clip
def gradient(self, loss, weights):
if loss is None:
raise ValueError('loss is not set.')
if weights is None:
raise ValueError('weights is not set.')
if not self.init_optim:
self.adagrad = paddle.optimizer.Adagrad(
learning_rate=self.lr, epsilon=self.eps,
initial_accumulator_value=self.initial_accumulator_value, parameters=weights, grad_clip=self.grad_clip,
weight_decay=self.weight_decay
)
self.init_optim = True
loss.backward()
grads_and_vars = self.adagrad.backward(loss=loss, parameters=weights)
params, grads, self.grad_succeed = filter_grads(grads_and_vars, weights)
self.grads_and_vars = grads_and_vars
return grads
def apply_gradients(self, grads_and_vars):
grads_and_vars = zip_grads_and_params(grads_and_vars, self.grad_succeed, self.grads_and_vars)
if grads_and_vars is None:
raise ValueError('grads_and_vars is not set.')
self.adagrad._apply_optimize(loss=None, startup_program=None, params_grads=grads_and_vars)
self.adagrad.clear_grad()
class Adam(Optimizer):
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, eps=1.0e-8, weight_decay=0.0, grad_clip=None):
if lr is None:
raise ValueError('lr is not set.')
if beta_1 is None:
raise ValueError('beta_1 is not set.')
if beta_2 is None:
raise ValueError('beta_2 is not set.')
if eps is None:
raise ValueError('eps is not set.')
if not 0 <= beta_1 < 1:
raise ValueError("Invaild value of beta1, expect beta1 in [0,1).")
if not 0 <= beta_2 < 1:
raise ValueError("Invaild value of beta2, expect beta2 in [0,1).")
self.lr = lr
self.beta_1 = beta_1
self.beta_2 = beta_2
self.eps = eps
self.init_optim = False
self.grad_succeed = True
self.weight_decay = weight_decay
self.grad_clip = grad_clip
def gradient(self, loss, weights):
if loss is None:
raise ValueError('loss is not set.')
if weights is None:
raise ValueError('weights is not set.')
if not self.init_optim:
self.adam = paddle.optimizer.Adam(
learning_rate=self.lr, beta1=self.beta_1, beta2=self.beta_2, epsilon=self.eps,
parameters=weights, grad_clip=self.grad_clip, weight_decay=self.weight_decay
)
self.init_optim = True
loss.backward()
grads_and_vars = self.adam.backward(loss, parameters=weights)
params, grads, self.grad_succeed = filter_grads(grads_and_vars, weights)
self.grads_and_vars = grads_and_vars
return grads
def apply_gradients(self, grads_and_vars):
grads_and_vars = zip_grads_and_params(grads_and_vars, self.grad_succeed, self.grads_and_vars)
if grads_and_vars is None:
raise ValueError('grads_and_vars is not set.')
self.adam._apply_optimize(loss=None, startup_program=None, params_grads=grads_and_vars)
self.adam.clear_grad()
class Adamax(Optimizer):
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, eps=1.0e-8, weight_decay=0.0, grad_clip=None):
if lr is None:
raise ValueError('lr is not set.')
if beta_1 is None:
raise ValueError('beta_1 is not set.')
if beta_2 is None:
raise ValueError('beta_2 is not set.')
if eps is None:
raise ValueError('eps is not set.')
if not 0 <= beta_1 < 1:
raise ValueError("Invaild value of beta1, expect beta1 in [0,1).")
if not 0 <= beta_2 < 1:
raise ValueError("Invaild value of beta2, expect beta2 in [0,1).")
self.lr = lr
self.beta_1 = beta_1
self.beta_2 = beta_2
self.eps = eps
self.grad_succeed = True
self.init_optim = False
self.weight_decay = weight_decay
self.grad_clip = grad_clip
def gradient(self, loss, weights):
if loss is None:
raise ValueError('loss is not set.')
if weights is None:
raise ValueError('weights is not set.')
if not self.init_optim:
self.adamax = paddle.optimizer.Adamax(
learning_rate=self.lr, beta1=self.beta_1, beta2=self.beta_2, epsilon=self.eps,
parameters=weights, grad_clip=self.grad_clip, weight_decay=self.weight_decay
)
self.init_optim = True
loss.backward()
grads_and_vars = self.adamax.backward(loss=loss, parameters=weights)
params, grads, self.grad_succeed = filter_grads(grads_and_vars, weights)
self.grads_and_vars = grads_and_vars
return grads
def apply_gradients(self, grads_and_vars):
grads_and_vars = zip_grads_and_params(grads_and_vars, self.grad_succeed, self.grads_and_vars)
if grads_and_vars is None:
raise ValueError('grads_and_vars is not set.')
self.adamax._apply_optimize(loss=None, startup_program=None, params_grads=grads_and_vars)
self.adamax.clear_grad()
class Ftrl(Optimizer):
def __init__(self):
raise Exception('Ftrl optimizer function not implemented')
class Nadam(Optimizer):
def __init__(self):
raise Exception('Nadam optimizer function not implemented')
class RMSprop(Optimizer):
def __init__(
self, lr=0.001, rho=0.95, eps=1.0e-6, momentum=0.0, centered=False, weight_decay=0.0,
grad_clip=None
):
if lr is None:
raise ValueError("lr is not set.")
if rho is None:
raise ValueError("rho is not set.")
if eps is None:
raise ValueError("eps is not set.")
if momentum is None:
raise ValueError("momentum is not set.")
if not 0.0 <= eps:
raise ValueError("Invalid value of eps, expect eps >= 0.")
if not 0.0 <= momentum:
raise ValueError("Invalid value of momentum, expect momentum >= 0.")
if not 0.0 <= rho:
raise ValueError("Invalid value of rho, expect rho >= 0.")
self.lr = lr
self.eps = eps
self.rho = rho
self.momentum = momentum
self.centered = centered
self.grad_succeed = True
self.init_optim = False
self.weight_decay = weight_decay
self.grad_clip = grad_clip
def gradient(self, loss, weights):
if loss is None:
raise ValueError('loss is not set.')
if weights is None:
raise ValueError('weights is not set.')
if not self.init_optim:
self.rmsprop = paddle.optimizer.RMSProp(
learning_rate=self.lr, epsilon=self.eps, rho=self.rho, momentum=self.momentum,
parameters=weights, grad_clip=self.grad_clip, weight_decay=self.weight_decay
)
self.init_optim = True
loss.backward()
grads_and_vars = self.rmsprop.backward(loss=loss, parameters=weights)
params, grads, self.grad_succeed = filter_grads(grads_and_vars, weights)
self.grads_and_vars = grads_and_vars
return grads
def apply_gradients(self, grads_and_vars):
grads_and_vars = zip_grads_and_params(grads_and_vars, self.grad_succeed, self.grads_and_vars)
if grads_and_vars is None:
raise ValueError('grads_and_vars is not set.')
self.rmsprop._apply_optimize(loss=None, startup_program=None, params_grads=grads_and_vars)
self.rmsprop.clear_grad()
class SGD(Optimizer):
def __init__(self, lr=0.1, momentum=0.0, weight_decay=0.0, grad_clip=None):
if lr is None:
raise ValueError("lr is not set.")
self.lr = lr
self.grad_succeed = True
self.init_optim = False
self.weight_decay = weight_decay
self.grad_clip = grad_clip
def gradient(self, loss, weights):
if loss is None:
raise ValueError('loss is not set.')
if weights is None:
raise ValueError('weights is not set.')
if not self.init_optim:
self.sgd = paddle.optimizer.SGD(
learning_rate=self.lr, parameters=weights, grad_clip=self.grad_clip,
weight_decay=self.weight_decay
)
self.init_optim = True
loss.backward()
grads_and_vars = self.sgd.backward(loss=loss, parameters=weights)
params, grads, self.grad_succeed = filter_grads(grads_and_vars, weights)
self.grads_and_vars = grads_and_vars
return grads
def apply_gradients(self, grads_and_vars):
grads_and_vars = zip_grads_and_params(grads_and_vars, self.grad_succeed, self.grads_and_vars)
if grads_and_vars is None:
raise ValueError('grads_and_vars is not set.')
self.sgd._apply_optimize(loss=None, startup_program=None, params_grads=grads_and_vars)
self.sgd.clear_grad()
class Momentum(Optimizer):
def __init__(self, lr=0.001, momentum=0.9, weight_decay=0.0, nesterov=False, grad_clip=None):
if lr is None:
raise ValueError("lr is not set")
if momentum is None:
raise ValueError("momentum is not set")
self.lr = lr
self.momentum = momentum
self.nesterov = nesterov
self.grad_succeed = True
self.init_optim = False
self.weight_decay = weight_decay
self.grad_clip = grad_clip
def gradient(self, loss, weights):
if loss is None:
raise ValueError('loss is not set.')
if weights is None:
raise ValueError('weights is not set.')
if not self.init_optim:
self.moment = paddle.optimizer.Momentum(
learning_rate=self.lr, momentum=self.momentum, parameters=weights,
use_nesterov=self.nesterov, grad_clip=self.grad_clip, weight_decay=self.weight_decay
)
self.init_optim = True
loss.backward()
grads_and_vars = self.moment.backward(loss=loss, parameters=weights)
params, grads, self.grad_succeed = filter_grads(grads_and_vars, weights)
self.grads_and_vars = grads_and_vars
return grads
def apply_gradients(self, grads_and_vars):
grads_and_vars = zip_grads_and_params(grads_and_vars, self.grad_succeed, self.grads_and_vars)
if grads_and_vars is None:
raise ValueError('grads_and_vars is not set.')
self.moment._apply_optimize(loss=None, startup_program=None, params_grads=grads_and_vars)
self.moment.clear_grad()
class Lamb(Optimizer):
def __init__(
self, lr=0.001, beta_1=0.9, beta_2=0.999, eps=1.0e-6, weight_decay=0.01, grad_clip=None
):
if lr is None:
raise ValueError('lr is not set.')
if beta_1 is None:
raise ValueError('beta_1 is not set.')
if beta_2 is None:
raise ValueError('beta_2 is not set.')
if eps is None:
raise ValueError('eps is not set.')
if not 0 <= beta_1 < 1:
raise ValueError("Invaild value of beta1, expect beta1 in [0,1).")
if not 0 <= beta_2 < 1:
raise ValueError("Invaild value of beta2, expect beta2 in [0,1).")
self.lr = lr
self.lamb_weight_decay = weight_decay
self.beta_1 = beta_1
self.beta_2 = beta_2
self.eps = eps
self.grad_succeed = True
self.init_optim = False
self.grad_clip = grad_clip
def gradient(self, loss, weights):
if loss is None:
raise ValueError('loss is not set.')
if weights is None:
raise ValueError('weights is not set.')
if not self.init_optim:
self.lamb = paddle.optimizer.Lamb(
learning_rate=self.lr, lamb_weight_decay=self.lamb_weight_decay, beta1=self.beta_1,
beta2=self.beta_2, epsilon=self.eps, parameters=weights, grad_clip=self.grad_clip
)
self.init_optim = True
loss.backward()
grads_and_vars = self.lamb.backward(loss=loss, parameters=weights)
params, grads, self.grad_succeed = filter_grads(grads_and_vars, weights)
self.grads_and_vars = grads_and_vars
return grads
def apply_gradients(self, grads_and_vars):
grads_and_vars = zip_grads_and_params(grads_and_vars, self.grad_succeed, self.grads_and_vars)
if grads_and_vars is None:
raise ValueError('grads_and_vars is not set.')
self.lamb._apply_optimize(loss=None, startup_program=None, params_grads=grads_and_vars)
self.lamb.clear_grad()
class LARS(Optimizer):
def __init__(self):
pass
def gradient(self):
pass
def apply_gradients(self, grads_and_vars):
raise Exception('LARS optimizer function not implemented')
# TODO There may be gradient incompleteness when calculating gradient paddle.optimizer.backward.
def filter_grads(grads_and_vars, weights):
try:
params, grads = list(zip(*grads_and_vars))
except:
params, grads = [], []
if len(grads) - len(weights) == 0:
grad_succeed = True
else:
grad_succeed = False
return params, grads, grad_succeed
def zip_grads_and_params(grads_and_vars, grad_succeed, call_grads_and_vars):
if grad_succeed == False:
grads_and_vars = call_grads_and_vars
warnings.warn("The number of gradients and training parameters are not equal", RuntimeWarning)
else:
grads, params = list(zip(*grads_and_vars))
grads_and_vars = list(zip(params, grads))
return grads_and_vars
|
# -*- coding: utf-8 -*-
"""Git helper for the scaffolder project.
This file provides a class to assist with git operations.
"""
import os
import re
from typing import Tuple
from l2tscaffolder.helpers import cli
from l2tscaffolder.lib import errors
class GitHelper(cli.CLIHelper):
"""Helper class for git operations.
Attributes:
project_path: path to the git project folder.
"""
def __init__(self, project_path: str):
"""Initializes the git helper.
Arguments:
project_path (str): the path to the git project folder.
"""
super(GitHelper, self).__init__()
self.project_path = project_path
self._cwd = os.getcwd()
def AddFileToTrack(self, file_path: str):
"""Add a file to those that are tracked by the git repo.
Args:
file_path (str): path to the file to be added to tracked
files by this git repo.
Raises:
errors.UnableToConfigure: when the tool is not able to add
newly added files to the git repo.
"""
command = 'git add {0:s}'.format(file_path)
exit_code, output, error = self.RunCommand(command)
if exit_code != 0:
raise errors.UnableToConfigure((
'Unable to add files to git branch, output of "git add" '
'command is [{0:s}] with the error: {1:s}'.format(output, error)))
def HasBranch(self, branch_name: str) -> bool:
"""Tests for the existence of a specific branch.
Args:
branch_name (str): the name of the branch to test for.
Returns:
bool: True if the branch exists.
"""
command = 'git show-ref --verify --quiet refs/heads/"{0:s}"'.format(
branch_name)
exit_code, _, _ = self.RunCommand(command)
if exit_code == 0:
return True
return False
def GenerateBranchName(self, module_name: str) -> str:
"""Generates a git branch name.
Args:
module_name (str): module name to generate a git branch name from.
Returns:
str: git branch name.
"""
branch_name = re.sub('(?<!^)(?=[A-Z])', '_', module_name)
branch_name = branch_name.lower()
return branch_name
def GetActiveBranch(self) -> str:
"""Determines the active branch of the git project.
Returns:
str: the active branch of the git project.
Raises:
errors.UnableToConfigure: when the tool is not able to get
the active branch of the git project.
"""
command = 'git branch --list --no-color'
exit_code, output, error = self.RunCommand(command)
if exit_code != 0:
raise errors.UnableToConfigure((
'Unable to get the active git branch, with error message '
'{0:s}').format(error))
for line in output.split('\n'):
if line.startswith('*'):
_, _, line_string = line.partition('*')
return line_string.strip()
raise errors.UnableToConfigure('Unable to determine the active git branch')
def RunCommand(self, command: str) -> Tuple[int, str, str]:
"""Runs a command.
Args:
command (str): command to run.
Returns:
tuple[int, str, str]: exit code, output that was written to stdout
and stderr.
"""
os.chdir(self.project_path)
exit_code, output, error = super(GitHelper, self).RunCommand(command)
os.chdir(self._cwd)
return exit_code, output, error
def SwitchToBranch(self, branch: str) -> int:
"""Switches the git branch and returns the exit code of the command.
Arguments:
branch (str): the name of the git branch.
Returns:
int: the exit code from the git command.
"""
command = 'git checkout {0:s}'.format(branch)
exit_code, _, _ = self.RunCommand(command)
return exit_code
def CreateBranch(self, branch: str) -> int:
"""Creates a git branch and returns the exit code of the command.
Arguments:
branch (str): the name of the git branch.
Returns:
int: the exit code from the git command.
"""
command = 'git branch {0:s}'.format(branch)
exit_code, _, _ = self.RunCommand(command)
return exit_code
|
from django.shortcuts import redirect
from django.urls import reverse
from guardian.decorators import permission_required
from rnapuzzles.models import Challenge
def publish_results(request, pk):
challenge = Challenge.objects.get(pk=pk)
challenge.result_published = True
challenge.save()
return redirect(reverse('show-results', kwargs={'pk': pk}))
|
# -*- coding: utf-8 -*-
"""
Segmenting text to Enhanced Thai Character Cluster (ETCC)
Python implementation by Wannaphong Phatthiyaphaibun
This implementation relies on a dictionary of ETCC created from etcc.txt
in pythainlp/corpus.
Notebook:
https://colab.research.google.com/drive/1UTQgxxMRxOr9Jp1B1jcq1frBNvorhtBQ
:See Also:
Inrut, Jeeragone, Patiroop Yuanghirun, Sarayut Paludkong, Supot Nitsuwat, and
Para Limmaneepraserth. "Thai word segmentation using combination of forward
and backward longest matching techniques." In International Symposium on
Communications and Information Technology (ISCIT), pp. 37-40. 2001.
"""
import re
from typing import List
from pythainlp import thai_follow_vowels
from pythainlp.corpus import get_corpus
from pythainlp.tokenize import Tokenizer
_cut_etcc = Tokenizer(get_corpus("etcc.txt"), engine="longest")
_PAT_ENDING_CHAR = f"[{thai_follow_vowels}ๆฯ]"
_RE_ENDING_CHAR = re.compile(_PAT_ENDING_CHAR)
def _cut_subword(tokens: List[str]) -> List[str]:
len_tokens = len(tokens)
i = 0
while True:
if i == len_tokens:
break
if _RE_ENDING_CHAR.search(tokens[i]) and i > 0 and len(tokens[i]) == 1:
tokens[i - 1] += tokens[i]
del tokens[i]
len_tokens -= 1
i += 1
return tokens
def segment(text: str) -> List[str]:
"""
Segmenting text into ETCCs.
Enhanced Thai Character Cluster (ETCC) is a kind of subword unit.
The concept was presented in Inrut, Jeeragone, Patiroop Yuanghirun,
Sarayut Paludkong, Supot Nitsuwat, and Para Limmaneepraserth.
"Thai word segmentation using combination of forward and backward
longest matching techniques." In International Symposium on Communications
and Information Technology (ISCIT), pp. 37-40. 2001.
:param str text: text to be tokenized to character clusters
:return: list of clusters, tokenized from the text
:return: list[str]
"""
if not text or not isinstance(text, str):
return []
return _cut_subword(_cut_etcc.word_tokenize(text))
|
from .data_service import TopicDataEntityHelper, TopicDataService, TopicStructureService, TopicTrigger
from .raw_data_service import RawTopicDataEntityHelper, RawTopicDataService
from .regular_data_service import RegularTopicDataEntityHelper, RegularTopicDataService
from .topic_storage import build_topic_data_storage
|
"""Helpers for Google Translate API"""
__author__ = "Amanjeev Sethi"
import logging
from apiclient.discovery import build
from .base_service import BaseService
class Google(BaseService):
def __init__(self, api_key):
self._api_key = api_key
self._service = build('translate', 'v2', developerKey=self._api_key)
self._directions = self._directions()
def _translate(self, initial_language, target, text):
return self._service.translations().list(
source=initial_language,
target=target,
q=[text]
).execute()
def translate_cascade(self, initial_language, cascade_steps, text):
""" 1. Check for the text if the service thinks it is the same language as the user has provided
2. Check if the services thinks steps are legit and there is no step that cannot be done
3. Translate cascadingly
:param initial_language: two letter string of the language user needs to start with
:param cascade_steps: user provided steps (usually excluding the initial language)
:param text: the text user wants to translate cascadingly
:return: a tuple of all translations and the final translation in the original language
"""
logging.debug(initial_language + " - " + text)
cascade_steps = self.steps_to_execute(initial_language, cascade_steps)
results = {}
orig_lang = initial_language
for lang in cascade_steps[1:]:
try:
response = self._translate(orig_lang, lang, text)
results[lang] = response['translations'][0]['translatedText']
orig_lang = lang
except:
return {}
text = results[lang]
logging.debug(lang + " - " + text)
result = results[initial_language]
return (results, result)
def get_language(self, text=""):
"""get the language detected by the service
:param text: the text user wants to translate cascadingly
:return: language detected
"""
result = self._service.detections().list(
q=[text]
).execute()
return result['detections'][0][0]['language']
def _directions(self):
"""Service's available translation directions
:return: list of the available translation directions (from-to)
"""
pass
def check_language(self, initial_language, text):
"""check whether the user provided text is in the same langauge as the
initial langauge provided by the user
:param initial_language: two letter string of the language user needs to start with
:param text: the text user wants to translate cascadingly
:return: boolean whether a language is correct
"""
lang = self.get_language(text)
if lang == initial_language:
is_correct_language = True
else:
is_correct_language = False
return is_correct_language
def is_translation_step_valid(self, from_lang, to_lang):
"""
If one translation step valid
:param from_lang: two letter string for lang
:param to_lang: two letter string for lang
:return: boolean if translation valid from_lang to to_lang
"""
response = self._service.languages().list(target=from_lang).execute()
valid = False
for lang in response['languages']:
if lang['language'] == to_lang:
valid = True
break
return valid
def check_cascade_steps(self, initial_language, cascade_steps):
"""check if steps provided by the user are allowed by the service
:param initial_language: two letter string of the language user needs to start with
:param cascade_steps: user provided steps (usually excluding the initial language)
:return: boolean of whether all the translation steps are doable
"""
cascade_steps = self.steps_to_execute(initial_language,
cascade_steps)
is_cascade_achievable = False
for lang in cascade_steps[1:]:
if self.is_translation_step_valid(initial_language, lang):
is_cascade_achievable = True
initial_language = lang
else:
is_cascade_achievable = False
break
return is_cascade_achievable
|
# PACKAGE: DO NOT EDIT THIS LINE
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy
import sklearn
from ipywidgets import interact
from load_data import load_mnist
MNIST = load_mnist()
images = MNIST['data'].astype(np.double)
labels = MNIST['target'].astype(np.int)
# GRADED FUNCTION: DO NOT EDIT THIS LINE
def distance(x0, x1):
"""Compute distance between two vectors x0, x1 using the dot product"""
return np.sqrt(np.dot(x0-x1, x0-x1))
def angle(x0, x1):
"""Compute the angle between two vectors x0, x1 using the dot product"""
mx0 = np.dot(x0, x0)
mx1 = np.dot(x1, x1)
return np.arccos(np.dot(x0,x1)/np.sqrt(mx0*mx1))
# GRADED FUNCTION: DO NOT EDIT
def most_similar_image():
"""Find the index of the digit, among all MNIST digits
that is the second-closest to the first image in the dataset (the first image is closest to itself trivially).
Your answer should be a single integer.
"""
ref = images[0] # reference image
result = np.linalg.norm(images[1:].astype(np.float) - ref.astype(np.float), axis=1)
index = np.argmin(result)+1
return index # 60
# GRADED FUNCTION: DO NOT EDIT
def pairwise_distance_matrix(X, Y):
"""Compute the pairwise distance between rows of X and rows of Y
Arguments
----------
X: ndarray of size (N, D)
Y: ndarray of size (M, D)
Returns
--------
distance_matrix: matrix of shape (N, M), each entry distance_matrix[i,j] is the distance between
ith row of X and the jth row of Y (we use the dot product to compute the distance).
"""
assert X.ndim == 2
assert Y.ndim == 2
return scipy.spatial.distance_matrix(X, Y)
# GRADED FUNCTION: DO NOT EDIT THIS LINE
def KNN(k, X, y, x):
"""K nearest neighbors
k: number of nearest neighbors
X: training input locations
y: training labels
x: test input
"""
N, D = X.shape
num_classes = len(np.unique(y))
dist = pairwise_distance_matrix(X, x.reshape(1, -1)).reshape(-1)
# Next we make the predictions
ypred = np.zeros(num_classes)
classes = y[np.argsort(dist)][:k] # find the labels of the k nearest neighbors
for c in np.unique(classes):
ypred[c] = len(classes[classes == c])
return np.argmax(ypred)
|
class pyidmlException(Exception):
"""Base-class for all exceptions raised by this module"""
class NotParaException(pyidmlException):
"""Expected a paragraph tag, received something else"""
|
def make_lookup():
"""
Returns dictionary relating ID numbers to h/v/b values
"""
d1, d2 = {}, {}
with open(r"res/ACNH_color_palette", "r") as f1:
d1['column_headers'] = f1.readline().split(",")
while s1 := f1.readline():
vals = s1.split(",")
if vals[3] not in d1:
d1[vals[3]] = " ".join(vals[0:3])
with open(r"res/farben.txt", "r") as f2:
while s2 := f2.readline():
vals = s2[1:].split(",")
d2[vals[1][:-1]] = vals[0]
d3 = {}
for k,v in d2.items():
d3[k] = d1[v]
return d3
|
# Suppose an array of length n sorted in ascending order is rotated between 1 and n times. For example, the array nums =
# [0,1,2,4,5,6,7] might become:
#
# [4,5,6,7,0,1,2] if it was rotated 4 times.
# [0,1,2,4,5,6,7] if it was rotated 7 times.
# Notice that rotating an array
# [a[0], a[1], a[2], ..., a[n-1]] 1 time results in the array [a[n-1], a[0], a[1], a[2], ..., a[n-2]].
#
# Given the sorted rotated array nums of unique elements, return the minimum element of this array.
# You must write an algorithm that runs in O(log n) time.
#
# Example 1:
# Input: nums = [3,4,5,1,2]
# Output: 1
# Explanation: The original array was [1,2,3,4,5] rotated 3 times.
#
# Example 2:
# Input: nums = [4,5,6,7,0,1,2]
# Output: 0
# Explanation: The original array was [0,1,2,4,5,6,7] and it was rotated 4 times.
#
# Example 3:
# Input: nums = [11,13,15,17]
# Output: 11
# Explanation: The original array was [11,13,15,17] and it was rotated 4 times.
#
# Constraints:
# n == nums.length
# 1 <= n <= 5000
# -5000 <= nums[i] <= 5000
# All the integers of nums are unique.
# nums is sorted and rotated between 1 and n times.
#
class Solution:
def findMin(self, nums: List[int]) -> int:
l, r, m = 0, len(nums) - 1, len(nums) // 2
curMin = nums[m]
while l <= r:
curMin = min(nums[m], curMin)
if nums[l] <= nums[r]:
curMin = min(nums[l], curMin)
break
if nums[m] >= nums[l]:
l = m + 1
else:
r = m - 1
m = (l + r) // 2
return curMin
|
import cv2
import numpy as np
from math import sqrt
def convertLine(line):
return np.array([line[0], -line[1], -line[0]*line[2] + line[1]*line[3]])
def main():
ransac_trial = 50
ransac_n_sample = 2
ransac_thresh = 3.0
data_num = 1000
data_inlier_ratio = 0.5
data_inlier_noise = 1.0
truth = np.array([1.0/sqrt(2.0), 1.0/sqrt(2.0), -240.0])
data = []
for i in range(data_num):
if np.random.uniform(0.0, 1.0) < data_inlier_ratio:
x = np.random.uniform(0.0, 480.0)
y = (truth[0]*x + truth[2])/-truth[1]
x += np.random.normal(data_inlier_noise)
y += np.random.normal(data_inlier_noise)
data.append((x, y))
else:
data.append((np.random.uniform(0.0, 640.0), np.random.uniform(0.0, 480.0)))
best_score = -1
for i in range(ransac_trial):
sample = []
for j in range(ransac_n_sample):
index = int(np.random.uniform(0, len(data)))
sample.append(data[index])
nnxy = cv2.fitLine(np.asarray(sample), cv2.DIST_L2, 0, 0.01, 0.01)
line = convertLine(nnxy)
score = 0
for j in range(len(data)):
error = abs(line[0]*data[j][0] + line[1]*data[j][1] + line[2])
if error < ransac_thresh:
score += 1
if score > best_score:
best_score = score
best_line = line
nnxy = cv2.fitLine(np.asarray(data), cv2.DIST_L2, 0, 0.01, 0.01)
lsm_line = convertLine(nnxy)
print("* The Truth: %.3f, %.3f, %.3f\n" % (truth[0], truth[1], truth[2]))
print("* Estimate (RANSAC): %.3f, %.3f, %.3f (Score: %d)\n" % (
best_line[0], best_line[1], best_line[2], best_score))
print("* Estimate (LSM): %.3f, %.3f, %.3f\n" % (lsm_line[0], lsm_line[1], lsm_line[2]))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 9 13:55:16 2015
@author: adelpret
"""
from control_manager_conf import *
TAU_MAX = 1e6; # security check of ControlManager
CURRENT_MAX = 1e6; # security check of ControlManager
CTRL_MAX = 1e6; # max desired current (security check of ControlManager)
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class CbtfLanl(CMakePackage):
"""CBTF LANL project contains a memory tool and data center type system
command monitoring tool."""
homepage = "http://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-lanl.git"
version('develop', branch='master')
version('1.9.3', branch='1.9.3')
version('1.9.2', branch='1.9.2')
version('1.9.1.2', branch='1.9.1.2')
version('1.9.1.1', branch='1.9.1.1')
version('1.9.1.0', branch='1.9.1.0')
variant('build_type', default='None', values=('None'),
description='CMake build type')
variant('runtime', default=False,
description="build only the runtime libraries and collectors.")
variant('cti', default=False,
description="Build MRNet with the CTI startup option")
depends_on("cmake@3.0.2:", type='build')
# For MRNet
depends_on("mrnet@5.0.1-3:+cti", when='@develop+cti')
depends_on("mrnet@5.0.1-3:+lwthreads", when='@develop')
depends_on("mrnet@5.0.1-3+cti", when='@1.9.1.0:9999+cti')
depends_on("mrnet@5.0.1-3+lwthreads", when='@1.9.1.0:9999')
# For Xerces-C
depends_on("xerces-c")
# For CBTF
depends_on("cbtf@develop", when='@develop')
depends_on("cbtf@1.9.1.0:9999", when='@1.9.1.0:9999')
# For CBTF with cti
depends_on("cbtf@develop+cti", when='@develop+cti')
depends_on("cbtf@1.9.1.0:9999+cti", when='@1.9.1.0:9999+cti')
# For CBTF with runtime
depends_on("cbtf@develop+runtime", when='@develop+runtime')
depends_on("cbtf@1.9.1.0:9999+runtime", when='@1.9.1.0:9999+runtime')
# For CBTF-KRELL
depends_on("cbtf-krell@develop", when='@develop')
depends_on("cbtf-krell@1.9.1.0:9999", when='@1.9.1.0:9999')
depends_on('cbtf-krell@develop+cti', when='@develop+cti')
depends_on('cbtf-krell@1.9.1.0:9999+cti', when='@1.9.1.0:9999+cti')
depends_on('cbtf-krell@develop+runtime', when='@develop+runtime')
depends_on('cbtf-krell@1.9.1.0:9999+runtime', when='@1.9.1.0:9999+runtime')
parallel = False
build_directory = 'build_cbtf_lanl'
def cmake_args(self):
spec = self.spec
compile_flags = "-O2 -g"
cmake_args = [
'-DCMAKE_CXX_FLAGS=%s' % compile_flags,
'-DCMAKE_C_FLAGS=%s' % compile_flags,
'-DCBTF_DIR=%s' % spec['cbtf'].prefix,
'-DCBTF_KRELL_DIR=%s' % spec['cbtf-krell'].prefix,
'-DMRNET_DIR=%s' % spec['mrnet'].prefix,
'-DXERCESC_DIR=%s' % spec['xerces-c'].prefix,
'-DCMAKE_MODULE_PATH=%s' % join_path(
prefix.share, 'KrellInstitute', 'cmake')]
return cmake_args
|
# Used for performance tests to replace `has_government` lines with scripted triggers
ideology_bundles = {
"has_socialist_government": ["has_government = totalist", "has_government = syndicalist", "has_government = radical_socialist"],
"has_elected_government": ["has_government = social_democrat", "has_government = social_liberal", "has_government = market_liberal", "has_government = social_conservative"],
"has_democratic_government": ["has_government = social_democrat", "has_government = social_liberal", "has_government = market_liberal", "has_government = social_conservative", "has_government = authoritarian_democrat"],
"has_authoritarian_government": ["has_government = authoritarian_democrat", "has_government = paternal_autocrat", "has_government = national_populist"],
"has_dictatorship_government": ["has_government = paternal_autocrat", "has_government = national_populist"],
"has_right_democratic_government": ["has_government = social_conservative", "has_government = market_liberal"],
"has_left_democratic_government": ["has_government = social_liberal", "has_government = social_democrat"]
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.image import draw_dense_reg
import math
import random
class EpisodicDetDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def _sample_query_from_categories(self, sampled_categories):
# this loop is to sample a single image for every category
# (to be sure each cat gets at least an image)
query_img_paths = []
anns_per_query = []
category_dict = {}
for idx,cat in enumerate(sampled_categories):
image_ids = self.coco.getImgIds(catIds=cat)
img_id = random.choice(image_ids)
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
all_anns = self.coco.loadAnns(ids=ann_ids)
val_anns = [a for a in all_anns if a['category_id'] in sampled_categories]
anns_per_query.append( val_anns )
query_img_paths.append( img_path )
category_dict[cat] = idx
return query_img_paths, anns_per_query, category_dict
def _sample_support_set(self, cat_id):
img_ids = self.coco_support.getImgIds(catIds=[cat_id])
#img_items = self.coco_support.loadImgs(ids=img_ids)
ann_ids = self.coco_support.getAnnIds(imgIds=img_ids)
anns = self.coco_support.loadAnns(ids=ann_ids)
is_proper_size = lambda a: (a['bbox'][2]>=self.opt.min_bbox_len) & (a['bbox'][3]>=self.opt.min_bbox_len)
is_proper_cat = lambda a:a['category_id']==cat_id
good_anns = [a for a in anns if (is_proper_size(a) & is_proper_cat(a))]
sampled_good_anns = np.random.choice(good_anns, self.k_shots).tolist()
img_paths = []
for s in sampled_good_anns:
img_file_name = self.coco_support.loadImgs([s['image_id']])[0]['file_name']
img_paths.append(os.path.join(self.supp_img_dir, img_file_name))
return img_paths, sampled_good_anns
def _process_query(self, img, cat, augment=False):
height, width = img.shape[0], img.shape[1]
center = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
if self.opt.keep_res:
input_h = (height | self.opt.pad) + 1
input_w = (width | self.opt.pad) + 1
scale = np.array([input_w, input_h], dtype=np.float32)
else:
scale = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.opt.input_h, self.opt.input_w
flipped = False
if augment:
if not self.opt.not_rand_crop:
scale = scale * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
center[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
center[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
center[0] += scale * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
center[1] += scale * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
scale = scale * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
center[0] = width - center[0] - 1
trans_input = get_affine_transform(
center, scale, 0, [input_w, input_h])
inp = cv2.warpAffine(img, trans_input,
(input_w, input_h),
flags=cv2.INTER_LINEAR)
#cv2.imshow('inp-{}'.format(cat),inp)
inp = (inp.astype(np.float32) / 255.)
if augment and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
inp_dim = (input_h, input_w)
return inp, inp_dim, flipped, center, scale
def _process_all_query_outs(self, query_imgs, anns_per_query, query_info, category_dict):
hm_per_query = []
reg_mask_per_query = []
reg_per_query = []
ind_per_query = []
wh_per_query = []
cs_wh_per_query = []
cs_mask_per_query = []
gt_det_per_query = []
for query_idx, img in enumerate(query_imgs):
width = img.shape[2]#(2, 0, 1)
input_h, input_w = query_info['inp_dim'][query_idx]
output_h = input_h // self.opt.down_ratio
output_w = input_w // self.opt.down_ratio
num_classes = len(query_info['sampled_categories'])
center = query_info['center'][query_idx]
scale = query_info['scale'][query_idx]
trans_output = get_affine_transform(center, scale, 0, [output_w, output_h])
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)
cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
num_objs = min(len(anns_per_query[query_idx]), self.max_objs)
for k in range(num_objs):
ann = anns_per_query[query_idx][k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = category_dict[ann['category_id']]
if query_info['flipped'][query_idx]:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]
cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])
#cv2.imshow( 'hm-query-{}-cat-{}'.format(query_idx,0), cv2.resize(hm[0], tuple(img.shape[1:3])) )
#cv2.imshow( 'hm-query-{}-cat-{}'.format(query_idx,1), cv2.resize(hm[1], tuple(img.shape[1:3])) )
#cv2.imshow( 'hm-query-{}-cat-{}'.format(query_idx,2), cv2.resize(hm[2], tuple(img.shape[1:3])) )
hm_per_query.append(hm)
reg_mask_per_query.append(reg_mask)
reg_per_query.append(reg)
ind_per_query.append(ind)
wh_per_query.append(wh)
gt_det_per_query.append(gt_det)
cs_wh_per_query.append(cat_spec_wh)
cs_mask_per_query.append(cat_spec_mask)
hm = np.stack(hm_per_query)
reg_mask = np.stack(reg_mask_per_query)
reg = np.stack(reg_per_query)
ind = np.stack(ind_per_query)
wh = np.stack(wh_per_query)
cs_wh_per_query = np.stack(cs_wh_per_query)
cs_mask_per_query = np.stack(cs_mask_per_query)
return hm, reg_mask, reg, ind, wh, gt_det_per_query, cs_wh_per_query, cs_mask_per_query
def _process_support_set(self, support_imgs, support_anns, cat, augment=False):
out_supp = []
for i, (img, ann) in enumerate(zip(support_imgs, support_anns)):
bbox = self._coco_box_to_bbox(ann['bbox'])
x1,y1,x2,y2 = math.floor(bbox[0]), math.floor(bbox[1]), math.ceil(bbox[2]), math.ceil(bbox[3])
#give a little more of context for support
y1 = max(0, y1-self.opt.supp_ctxt)
x1 = max(0, x1-self.opt.supp_ctxt)
y2 = min(y2+self.opt.supp_ctxt, img.shape[0])
x2 = min(x2+self.opt.supp_ctxt, img.shape[1])
inp = img[y1:y2,x1:x2,:]
if augment:
if np.random.random() < self.opt.flip:
inp = inp[:, ::-1, :]
#cv2.imshow('sample-{}-cat-{}'.format(i,cat), inp)
inp = cv2.resize(inp, (int(self.opt.supp_w), int(self.opt.supp_h)))
inp = (inp.astype(np.float32) / 255.)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
out_supp.append(inp)
out_supp = np.stack(out_supp,axis=0)
return out_supp
def _sample_categories(self,num_categories):
cat_ids = random.sample(self._valid_ids, num_categories)
return cat_ids
def __getitem__(self, index):
# 1. sample n categories
sampled_categories = self._sample_categories(self.n_sample_classes)
# 2. sample one image per category and load annotations for each image
query_img_paths, anns_per_query, category_dict = self._sample_query_from_categories(sampled_categories)
# 3. load all the query images and process them
query_imgs = []
query_info = {'flipped': [], 'center': [], 'scale': [], 'inp_dim': [], 'sampled_categories': sampled_categories}
for qi,path in enumerate(query_img_paths):
query_img = cv2.imread(path)
inp, inp_dim, flipped, center, scale = self._process_query(query_img, qi, augment=(self.split=='train'))
query_imgs.append(inp)
query_info['flipped'].append(flipped)
query_info['center'].append(center)
query_info['scale'].append(scale)
query_info['inp_dim'].append(inp_dim)
# 4. sample and process the support set
support_set = []
for ic,cat in enumerate(sampled_categories):
support_paths, support_anns = self._sample_support_set(cat)
supp_imgs = [cv2.imread(img_path) for img_path in support_paths]
supp_imgs = self._process_support_set(supp_imgs, support_anns, ic, augment=(self.split=='train'))
support_set.append(supp_imgs)
support_set = np.stack(support_set,axis=0)
# 5. Process query gt output
hm, reg_mask, reg, ind, wh, gt_det, cs_wh_per_query, cs_mask_per_query = self._process_all_query_outs(query_imgs, anns_per_query, query_info, category_dict)
# 6. stack all together to be size [N,...]
query_imgs = np.stack(query_imgs, axis=0)
#cv2.waitKey(0)
#print(query_imgs.shape, hm.shape, wh.shape, support_set.shape,'**************')
ret = {'input': query_imgs, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh,
'supp': support_set, 'cat_spec_wh': cs_wh_per_query, 'cat_spec_mask': cs_mask_per_query}
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not self.split == 'train':
gt_det = np.array(gt_det[0], dtype=np.float32) if len(gt_det[0]) > 0 else \
np.zeros((1, 6), dtype=np.float32)
#meta = {'c': center, 's': scale, 'gt_det': gt_det, 'img_id': query_id}
meta = {'c': center, 's': scale, 'gt_det': gt_det}
ret['meta'] = meta
return ret
|
# -*- coding: utf-8 -*-
"""OpenCTI ReportImporter connector main module."""
from reportimporter import ReportImporter
if __name__ == "__main__":
connector = ReportImporter()
connector.start()
|
#!/usr/bin/python3
"""
Copyright 2018-2019 Firmin.Sun (fmsunyh@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -----------------------------------------------------
# @Time : 10/12/2019 3:43 PM
# @Author : Firmin.Sun (fmsunyh@gmail.com)
# @Software: ZJ_AI
# -----------------------------------------------------
# -*- coding: utf-8 -*-
from yacs.config import CfgNode as CN
import os.path as osp
_C = CN(new_allowed=True)
_C.OBDT_01 = CN()
_C.OBDT_01.IP = '192.168.1.179'
_C.OBDT_01.PORT = 26888
_C.OBDT_CLS = CN()
_C.OBDT_CLS.IP = '192.168.1.179'
_C.OBDT_CLS.PORT = 16888
_C.PATH = osp.join(osp.dirname(__file__),"..", 'data','config.yaml')
cfg = _C
|
import bitwise as bw
class TestHalfAdder:
def test_HalfAdder(self):
input_1 = bw.wire.Wire()
input_2 = bw.wire.Wire()
carry_out = bw.wire.Wire()
sum_ = bw.wire.Wire()
a = bw.arithmetic.HalfAdder(input_1, input_2, carry_out, sum_)
input_1.value = 0
input_2.value = 0
assert (carry_out.value, sum_.value) == (0, 0)
input_1.value = 0
input_2.value = 1
assert (carry_out.value, sum_.value) == (0, 1)
input_1.value = 1
input_2.value = 0
assert (carry_out.value, sum_.value) == (0, 1)
input_1.value = 1
input_2.value = 1
assert (carry_out.value, sum_.value) == (1, 0)
print(a.__doc__)
print(a)
a(
a=0,
b=0,
carry_out=None,
sum=None
)
assert (carry_out.value, sum_.value) == (0, 0)
|
import os
import numpy as np
import saber
np.seterr(all="ignore")
# workdir = ''
# drain_shape = os.path.join(workdir, 'gis_inputs', '')
# gauge_shape = ''
# obs_data_dir = ''
# hist_sim_nc = ''
# COLOMBIA
# workdir = '/Users/rchales/data/saber/colombia-magdalena'
# drain_shape = os.path.join(workdir, 'gis_inputs', 'magdalena_dl_attrname_xy.json')
# gauge_shape = os.path.join(workdir, 'gis_inputs', 'ideam_stations.json')
# obs_data_dir = os.path.join(workdir, 'data_inputs', 'obs_csvs')
# hist_sim_nc = os.path.join(workdir, 'data_inputs', 'south_america_era5_qout.nc')
# Prepare the working directory - only need to do this step 1x ever
# saber.prep.scaffold_working_directory(workdir)
# Scripts not provided. Consult README.md for instructions
# Create the gauge_table.csv and drain_table.csv
# Put the historical simulation netCDF in the right folder
# Put the observed data csv files in the data_inputs/obs_csvs folder
# Prepare the observation and simulation data - Only need to do this step 1x ever
print('Preparing data')
saber.prep.historical_simulation(workdir)
# Generate the assignments table
print('Generate Assignment Table')
assign_table = saber.table.gen(workdir)
saber.table.cache(workdir, assign_table)
# Generate the clusters using the historical simulation data
print('Generate Clusters')
saber.cluster.generate(workdir)
assign_table = saber.cluster.summarize(workdir, assign_table)
saber.table.cache(workdir, assign_table)
# Assign basins which are gauged and propagate those gauges
print('Making Assignments')
assign_table = saber.assign.gauged(assign_table)
assign_table = saber.assign.propagation(assign_table)
assign_table = saber.assign.clusters_by_dist(assign_table)
# Cache the assignments table with the updates
saber.table.cache(workdir, assign_table)
# Generate GIS files so you can go explore your progress graphically
print('Generate GIS files')
saber.gis.clip_by_assignment(workdir, assign_table, drain_shape)
saber.gis.clip_by_cluster(workdir, assign_table, drain_shape)
saber.gis.clip_by_unassigned(workdir, assign_table, drain_shape)
# Compute the corrected simulation data
print('Starting Calibration')
saber.calibrate_region(workdir, assign_table)
# run the validation study
print('Performing Validation')
saber.validate.sample_gauges(workdir, overwrite=True)
saber.validate.run_series(workdir, drain_shape, obs_data_dir)
vtab = saber.validate.gen_val_table(workdir)
saber.gis.validation_maps(workdir, gauge_shape, vtab)
|
#Created by Josh Tseng, 2 June 2020
#This file illustrates that you can use other data types in dictionaries too
#Imagine this dictionary contains user details for a bank
#The format for data is as follows:
#User ID: [Name, Phone number, Amount of money in bank in dollars]
user_database = {
"001": ["John Tan", "92234456", 0.0],
"002": ["Raj Sara", "82284545", 100.5],
"003": ["Katrina Brown", "99493345", 102.23]
}
#Print entire dictionary
print(user_database)
|
from abc import ABCMeta, abstractmethod
class Observer(object):
__metaclass__ = ABCMeta
@abstractmethod
def on_next(self, value):
return NotImplemented
@abstractmethod
def on_error(self, error):
return NotImplemented
@abstractmethod
def on_completed(self):
return NotImplemented
|
import numpy as np
def generate_basic_anchors(sizes, base_size=16):
"""
:param sizes: [(h1, w1), (h2, w2)...]
:param base_size
:return:
"""
anchor_num=10
assert(anchor_num==len(sizes))
base_anchor=np.array([0, 0, base_size-1, base_size-1], np.int32)
anchors=np.zeros((len(sizes), 4), np.int32)
index=0
for h, w in sizes:
anchors[index]=scale_anchor(base_anchor, h, w)
index+=1
return anchors
def scale_anchor(anchor, h, w):
x_ctr=(anchor[0]+anchor[2])*0.5
y_ctr=(anchor[1]+anchor[3])*0.5
scaled_anchor=anchor.copy()
scaled_anchor[0]=x_ctr-w/2
scaled_anchor[2]=x_ctr+w/2
scaled_anchor[1]=y_ctr-h/2
scaled_anchor[3]=y_ctr+h/2
return scaled_anchor
def apply_deltas_to_anchors(boxes_delta, anchors):
"""
:return [l t r b]
"""
anchor_y_ctr=(anchors[:, 1]+anchors[:, 3])/2.
anchor_h=anchors[:, 3]-anchors[:, 1]+1.
global_coords=np.zeros_like(boxes_delta, np.float32)
global_coords[:, 1]=np.exp(boxes_delta[:, 1])*anchor_h
global_coords[:, 0]=boxes_delta[:, 0]*anchor_h+anchor_y_ctr-global_coords[:, 1]/2.
return np.hstack((anchors[:, [0]], global_coords[:, [0]], anchors[:, [2]],
global_coords[:, [0]]+global_coords[:, [1]])).astype(np.float32)
def text_anchors():
"""
anchor [l t r b]
"""
heights=[11, 16, 23, 33, 48, 68, 97, 139, 198, 283]
widths=[16]
sizes=[]
for h in heights:
for w in widths:
sizes.append((h, w))
return generate_basic_anchors(sizes)
def locate_anchors(feat_map_size, feat_stride):
"""
return all anchors on the feature map
"""
basic_anchors_=basic_anchors()
anchors=np.zeros((basic_anchors_.shape[0]*feat_map_size[0]*feat_map_size[1], 4), np.int32)
index=0
for y_ in range(feat_map_size[0]):
for x_ in range(feat_map_size[1]):
shift=np.array([x_, y_, x_, y_])*feat_stride
anchors[index:index+basic_anchors_.shape[0], :]=basic_anchors_+shift
index+=basic_anchors_.shape[0]
return anchors
if __name__ == '__main__':
anchors = basic_anchors()
print(anchors)
|
import sys
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
S = ns()
d = deque(S)
Q = ni()
cur = 0
for _ in range(Q):
q = list(map(str, input().split()))
t = int(q[0])
if t == 1:
cur = 1 - cur
if t == 2:
f, c = int(q[1]), q[2]
if f == 1:
if cur == 0:
d.appendleft(c)
else:
d.append(c)
else:
if cur == 0:
d.append(c)
else:
d.appendleft(c)
if not cur:
d.reverse()
ans = []
while d:
ans.append(d.pop())
print("".join(ans))
|
async def async_func():
return 42
|
import os
import pytest
from liberapay.testing import Harness
@pytest.mark.skipif(
os.environ.get('LIBERAPAY_PROFILING') != 'yes',
reason="these tests are only for profiling",
)
class TestPerformance(Harness):
def test_performance_of_homepage(self):
for i in range(1000):
self.client.GET('/')
def test_performance_when_serving_static_file(self):
for i in range(10000):
self.client.GET('/assets/avatar-default.png')
|
"""
defines:
- split_line_elements(bdf_model, eids, neids=2,
eid_start=1, nid_start=1)
"""
import numpy as np
from pyNastran.bdf.bdf import read_bdf
def split_line_elements(bdf_model, eids, neids=2,
eid_start=1, nid_start=1):
"""
Splits a set of element ids
Parameters
----------
eids : List[int]
element ids to split
neids : int; default=5
how many elements should a single bar be split into
min=2
eid_start : int; default=1
the starting element id
nid_start : int; default=1
the starting node id
Returns
-------
eids_out : List[int]
the list of elements that have been added
eid_end : int; default=1
the final element id
nid_end : int; default=1
the final node id
A-----*-----B; neids=2
A--*--*--*--B; neids=4
"""
eids_out = []
assert neids >= 2, neids
dx = np.linspace(0., 1., num=neids+1)
for eid in eids:
elem = bdf_model.elements[eid]
n1, n2 = elem.nodes
node1 = bdf_model.nodes[n1]
node2 = bdf_model.nodes[n2]
cp = node1.cp
assert node1.cp == node2.cp
assert node1.cd == node2.cd
xyz1 = node1.xyz
xyz2 = node2.xyz
dxyz = xyz2 - xyz1
etype = elem.type
if etype in ['CBAR', 'CBEAM']:
pa = elem.pa
pb = 0
elem.comment = ''
comment = str(elem) + '\n'
for ieid in range(neids):
dxi = dx[ieid + 1]
new_xyz = xyz1 + dxyz * dxi
if dxi < 1.:
new_node = nid_start
nid_start += 1
bdf_model.add_grid(new_node, new_xyz, cp=cp)
else:
new_node = n2
if etype in ['CBAR', 'CBEAM']:
pb = elem.pb
if etype == 'CONROD':
nids = [n1, new_node]
bdf_model.add_conrod(eid_start, elem.mid, nids, elem.A, j=elem.j,
c=elem.c, nsm=elem.nsm, comment=comment)
elif etype == 'CROD':
nids = [n1, new_node]
bdf_model.add_crod(eid_start, elem.pid, nids, comment=comment)
elif etype == 'CBAR':
ga = n1
gb = new_node
bdf_model.add_cbar(eid_start, elem.pid, [ga, gb], elem.x, elem.g0, offt=elem.offt,
pa=pa, pb=pb, wa=elem.wa, wb=elem.wb, comment=comment)
pa = 0
elif etype == 'CBEAM':
ga = n1
gb = new_node
bdf_model.add_cbeam(eid_start, elem.pid, [ga, gb], elem.x, elem.g0,
offt=elem.offt, bit=elem.bit,
pa=pa, pb=pb,
wa=elem.wa, wb=elem.wb, sa=elem.sa, sb=elem.sb,
comment=comment)
pa = 0
else:
raise NotImplementedError(elem)
n1 = new_node
eids_out.append(eid_start)
eid_start += 1
comment = str(eid)
del bdf_model.elements[eid]
return eids_out, eid_start, nid_start
def split_elements(bdf_filename):
"""unimplemented method for splitting elements"""
model = read_bdf(bdf_filename, xref=True)
for eid, elem in model.elements.items():
if elem.type == 'CTRIA3':
#
# 3
# /|\
# / | \
# / | \
# / 4 \
# / / \ \
# / / \ \
# 1-------------2
#
p1, p2, p3 = elem.get_node_positions()
#centroid = (p1 + p2 + p3) / 3.
#
# 3
# /|\
# / | \
# / | \
# / | \
# 1----4----2
#
elif elem.type == 'CQUAD4':
#
#
# 4---------3
# | \ / |
# | \ / |
# | 5 |
# | / \ |
# |/ \|
# 1---------2
#
# the same thing shown in a rotated view
# 4
# /| \
# / | \
# / | \
# / | \
# 1---------5---------3
# \ | /
# \ | /
# \ | /
# \ | /
# 2
#
# max_area, taper_ratio, area_ratio
# 4----7----3
# | | |
# | | |
# 8----9----6
# | | |
# | | |
# 1----4----2
#
# max_interior_angle
# 4---------3
# / \ /
# / \ /
# / \ /
# / \ /
# 1---------2
#
# taper_ratio
# 4--6--3
# / | \
# / | \
# / | \
# 1------5------2
#
# taper_ratio
# 4------3
# / \ / \
# / \ / \
# / \/ \
# 1-------5------2
#
# taper_ratio
# 4------3
# / \ \
# / \ \
# / \ \
# 1-------5------2
pass
|
import json
import logging
from django.test import TestCase, Client
from django.urls import reverse
from ..models import User
logger = logging.getLogger(__name__)
class TestsTheme(TestCase):
fixtures = ['f1']
def setUp(self):
# Every test needs a client.
self.client = Client()
self.test_user = User.objects.get(pk=1)
self.client.force_login(self.test_user)
# def test_get_theme(self):
# # Get the page, if not super or board, page is forbidden
# response = self.client.get(reverse('registration:theme'), secure=True)
# self.assertEqual(response.status_code, 404)
def test_post_good(self):
response = self.client.post(reverse('registration:theme'), {'theme': False, }, secure=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
logging.debug(content)
self.assertEqual(content['status'], 'SUCCESS')
u = User.objects.get(pk=1)
self.assertFalse(u.dark_theme)
def test_post_error(self):
response = self.client.post(reverse('registration:theme'), {'theme': 'dark', }, secure=True)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
logging.debug(content)
self.assertEqual(content['theme'], ['Must be a valid boolean.'])
u = User.objects.get(pk=1)
self.assertTrue(u.dark_theme)
|
"""
unrelated.py: examples with ``super()`` in a sibling class.
``U`` is unrelated (does not subclass ``Root``)
Calling ``ping`` on an instance of ``U`` fails::
# tag::UNRELATED_DEMO_1[]
>>> u = U()
>>> u.ping()
Traceback (most recent call last):
...
AttributeError: 'super' object has no attribute 'ping'
# end::UNRELATED_DEMO_1[]
But if ``U`` is part of a cooperative arrangement of base classes,
its ``ping`` method works::
# tag::UNRELATED_DEMO_2[]
>>> leaf2 = LeafUA()
>>> leaf2.ping()
<instance of LeafUA>.ping() in LeafUA
<instance of LeafUA>.ping() in U
<instance of LeafUA>.ping() in A
<instance of LeafUA>.ping() in Root
>>> LeafUA.__mro__ # doctest:+NORMALIZE_WHITESPACE
(<class 'diamond2.LeafUA'>, <class 'diamond2.U'>,
<class 'diamond.A'>, <class 'diamond.Root'>, <class 'object'>)
# end::UNRELATED_DEMO_2[]
Here ``U.ping`` is never called because ``Root.ping`` does not call ``super``.
>>> o6 = LeafAU()
>>> o6.ping()
<instance of LeafAU>.ping() in LeafAU
<instance of LeafAU>.ping() in A
<instance of LeafAU>.ping() in Root
>>> LeafAU.__mro__ # doctest:+NORMALIZE_WHITESPACE
(<class 'diamond2.LeafAU'>, <class 'diamond.A'>, <class 'diamond.Root'>,
<class 'diamond2.U'>, <class 'object'>)
"""
# tag::DIAMOND_CLASSES[]
from diamond import A # <1>
class U(): # <2>
def ping(self):
print(f'{self}.ping() in U')
super().ping() # <3>
class LeafUA(U, A): # <4>
def ping(self):
print(f'{self}.ping() in LeafUA')
super().ping()
# end::DIAMOND_CLASSES[]
class LeafAU(A, U):
def ping(self):
print(f'{self}.ping() in LeafAU')
super().ping()
|
from celery import Celery
from event_manager.get_app import get_app
from kombu import Exchange, Queue
broker_url = 'amqp://rabbitmq:5672'
app = get_app('producer', broker_url, tuple())
|
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_fast_connect_provider_service_key_facts
short_description: Fetches details about a FastConnectProviderServiceKey resource in Oracle Cloud Infrastructure
description:
- Fetches details about a FastConnectProviderServiceKey resource in Oracle Cloud Infrastructure
- Gets the specified provider service key's information. Use this operation to validate a
provider service key. An invalid key returns a 404 error.
version_added: "2.9"
author: Oracle (@oracle)
options:
provider_service_id:
description:
- The OCID of the provider service.
type: str
required: true
provider_service_key_name:
description:
- The provider service key that the provider gives you when you set up a virtual circuit connection
from the provider to Oracle Cloud Infrastructure. You can set up that connection and get your
provider service key at the provider's website or portal. For the portal location, see the `description`
attribute of the L(FastConnectProviderService,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/FastConnectProviderService/).
type: str
required: true
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_name_option ]
"""
EXAMPLES = """
- name: Get a specific fast_connect_provider_service_key
oci_network_fast_connect_provider_service_key_facts:
provider_service_id: ocid1.providerservice.oc1..xxxxxxEXAMPLExxxxxx
provider_service_key_name: provider_service_key_name_example
"""
RETURN = """
fast_connect_provider_service_key:
description:
- FastConnectProviderServiceKey resource
returned: on success
type: complex
contains:
name:
description:
- The service key that the provider gives you when you set up a virtual circuit connection
from the provider to Oracle Cloud Infrastructure. Use this value as the `providerServiceKeyName`
query parameter for
L(GetFastConnectProviderServiceKey,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/20160918/FastConnectProviderServiceKey/GetFastConnectProviderServiceKey).
returned: on success
type: string
sample: name_example
bandwidth_shape_name:
description:
- The provisioned data rate of the connection. To get a list of the
available bandwidth levels (that is, shapes), see
L(ListFastConnectProviderServiceVirtualCircuitBandwidthShapes,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/iaas/20160918/FastConnectProviderService/ListFastConnectProviderVirtualCircuitBandwidthShapes).
- "Example: `10 Gbps`"
returned: on success
type: string
sample: 10 Gbps
peering_location:
description:
- The provider's peering location.
returned: on success
type: string
sample: peering_location_example
sample: {
"name": "name_example",
"bandwidth_shape_name": "10 Gbps",
"peering_location": "peering_location_example"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.core import VirtualNetworkClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class FastConnectProviderServiceKeyFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"provider_service_id",
"provider_service_key_name",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_fast_connect_provider_service_key,
provider_service_id=self.module.params.get("provider_service_id"),
provider_service_key_name=self.module.params.get(
"provider_service_key_name"
),
)
FastConnectProviderServiceKeyFactsHelperCustom = get_custom_class(
"FastConnectProviderServiceKeyFactsHelperCustom"
)
class ResourceFactsHelper(
FastConnectProviderServiceKeyFactsHelperCustom,
FastConnectProviderServiceKeyFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
provider_service_id=dict(type="str", required=True),
provider_service_key_name=dict(type="str", required=True),
name=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="fast_connect_provider_service_key",
service_client_class=VirtualNetworkClient,
namespace="core",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(fast_connect_provider_service_key=result)
if __name__ == "__main__":
main()
|
"""Validator interface."""
import abc
from typing import Any # noqa # pylint: disable=unused-import
import six
@six.add_metaclass(abc.ABCMeta) # pylint: disable=R0903,W0232
class Validator(object):
# disabled checks for pylint
# R0903 - :too-few-public-methods
# W0232 - :no-init
"""Validator Interface."""
@abc.abstractmethod
def validate(self, data):
# type: (object) -> bool
"""Validate data and return True or False."""
|
# RestDF Default settings
PORT: int = 8000
HOST: str = 'localhost'
DEBUG: bool = False
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class UnpackagedExtensionData(Model):
"""UnpackagedExtensionData.
:param categories:
:type categories: list of str
:param description:
:type description: str
:param display_name:
:type display_name: str
:param draft_id:
:type draft_id: str
:param extension_name:
:type extension_name: str
:param installation_targets:
:type installation_targets: list of :class:`InstallationTarget <gallery.v4_1.models.InstallationTarget>`
:param is_converted_to_markdown:
:type is_converted_to_markdown: bool
:param pricing_category:
:type pricing_category: str
:param product:
:type product: str
:param publisher_name:
:type publisher_name: str
:param qn_aEnabled:
:type qn_aEnabled: bool
:param referral_url:
:type referral_url: str
:param repository_url:
:type repository_url: str
:param tags:
:type tags: list of str
:param version:
:type version: str
:param vsix_id:
:type vsix_id: str
"""
_attribute_map = {
'categories': {'key': 'categories', 'type': '[str]'},
'description': {'key': 'description', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'draft_id': {'key': 'draftId', 'type': 'str'},
'extension_name': {'key': 'extensionName', 'type': 'str'},
'installation_targets': {'key': 'installationTargets', 'type': '[InstallationTarget]'},
'is_converted_to_markdown': {'key': 'isConvertedToMarkdown', 'type': 'bool'},
'pricing_category': {'key': 'pricingCategory', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
'publisher_name': {'key': 'publisherName', 'type': 'str'},
'qn_aEnabled': {'key': 'qnAEnabled', 'type': 'bool'},
'referral_url': {'key': 'referralUrl', 'type': 'str'},
'repository_url': {'key': 'repositoryUrl', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[str]'},
'version': {'key': 'version', 'type': 'str'},
'vsix_id': {'key': 'vsixId', 'type': 'str'}
}
def __init__(self, categories=None, description=None, display_name=None, draft_id=None, extension_name=None, installation_targets=None, is_converted_to_markdown=None, pricing_category=None, product=None, publisher_name=None, qn_aEnabled=None, referral_url=None, repository_url=None, tags=None, version=None, vsix_id=None):
super(UnpackagedExtensionData, self).__init__()
self.categories = categories
self.description = description
self.display_name = display_name
self.draft_id = draft_id
self.extension_name = extension_name
self.installation_targets = installation_targets
self.is_converted_to_markdown = is_converted_to_markdown
self.pricing_category = pricing_category
self.product = product
self.publisher_name = publisher_name
self.qn_aEnabled = qn_aEnabled
self.referral_url = referral_url
self.repository_url = repository_url
self.tags = tags
self.version = version
self.vsix_id = vsix_id
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import paddlers as pdrs
from paddlers import transforms as T
# 下载和解压视盘分割数据集
optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz'
pdrs.utils.download_and_decompress(optic_dataset, path='./')
# 定义训练和验证时的transforms
# API说明:https://github.com/PaddlePaddle/paddlers/blob/develop/docs/apis/transforms/transforms.md
train_transforms = T.Compose([
T.Resize(target_size=512),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
eval_transforms = T.Compose([
T.Resize(target_size=512),
T.Normalize(
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
# 定义训练和验证所用的数据集
# API说明:https://github.com/PaddlePaddle/paddlers/blob/develop/docs/apis/datasets.md
train_dataset = pdrs.datasets.SegDataset(
data_dir='optic_disc_seg',
file_list='optic_disc_seg/train_list.txt',
label_list='optic_disc_seg/labels.txt',
transforms=train_transforms,
num_workers=0,
shuffle=True)
eval_dataset = pdrs.datasets.SegDataset(
data_dir='optic_disc_seg',
file_list='optic_disc_seg/val_list.txt',
label_list='optic_disc_seg/labels.txt',
transforms=eval_transforms,
num_workers=0,
shuffle=False)
# 初始化模型,并进行训练
# 可使用VisualDL查看训练指标,参考https://github.com/PaddlePaddle/paddlers/blob/develop/docs/visualdl.md
num_classes = len(train_dataset.labels)
model = pdrs.tasks.FarSeg(num_classes=num_classes)
# API说明:https://github.com/PaddlePaddle/paddlers/blob/develop/docs/apis/models/semantic_segmentation.md
# 各参数介绍与调整说明:https://github.com/PaddlePaddle/paddlers/blob/develop/docs/parameters.md
model.train(
num_epochs=10,
train_dataset=train_dataset,
train_batch_size=4,
eval_dataset=eval_dataset,
learning_rate=0.01,
pretrain_weights=None,
save_dir='output/farseg')
|
import sqlite3
from model.output_config_model import OutputConfigModel
class OutputConfigDao:
def __init__(self):
self.conn = sqlite3.connect('mybatis_generator.db')
def close(self):
self.conn.cursor().close()
self.conn.close()
def add_config(self, output_config):
cur = self.conn.cursor()
cur.execute(
"insert into tb_output_config(src_id, model_pkg, sql_map_pkg, mapper_pkg, out_dir) values (?, ?, ?, ?, ?)",
(output_config.src_id, output_config.model_pkg, output_config.sql_map_pkg,
output_config.mapper_pkg
, output_config.out_dir))
self.conn.commit()
def update(self, output_config):
cur = self.conn.cursor()
cur.execute(
"update tb_output_config set model_pkg=?, sql_map_pkg=?, mapper_pkg=?, out_dir=? where src_id=?",
(output_config.model_pkg, output_config.sql_map_pkg, output_config.mapper_pkg,
output_config.out_dir, output_config.src_id))
self.conn.commit()
def get_by_id(self, src_id):
cur = self.conn.cursor()
result = cur.execute("select * from tb_output_config where src_id=?", str(src_id)).fetchone()
if result is not None:
output_config = OutputConfigModel()
output_config.src_id = result[0]
output_config.model_pkg = result[1]
output_config.sql_map_pkg = result[2]
output_config.mapper_pkg = result[3]
output_config.out_dir = result[4]
return output_config
else:
return result
def del_by_id(self, src_id):
cur = self.conn.cursor()
cur.execute("delete from tb_output_config where src_id=?", str(src_id))
self.conn.commit()
|
import os
from flask import Flask
from etc.conf.env import ENV_CONFIG
__basedir__ = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
config = None
env = os.environ.get('ENV', 'local')
logged_in_user = {}
# Loading configuration
if env in ENV_CONFIG:
app.config.from_object(ENV_CONFIG[env])
config = app.config
config['APPLICATION_ROOT'] = __basedir__
else:
print("Invalid env name: {}. Available environments are: {}".format(env, ', '.join(ENV_CONFIG.keys())))
__all__ = ["config", "env", "app"]
|
x = y = 1
x = y = z = 1
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 9 20:58:07 2018
@author: JinJheng
"""
a=int(input())
if a%400==0:
print(a,"is a leap year.")
elif a%100==0 or a%4!=0:
print(a,"is not a leap year.")
elif a%4==0:
print(a,"is a leap year.")
else:
print()
|
#!/usr/bin/env python
#
# Copyright 2018 Twitch Interactive, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License is
# located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import print_function
import sys
import clientcompat_pb2
import clientcompat_pb2_twirk
def main():
req = read_request()
client = clientcompat_pb2_twirk.CompatServiceClient(req.service_address)
try:
resp = do_request(client, req)
sys.stdout.write(resp.SerializeToString())
except clientcompat_pb2_twirk.twirkException as e:
sys.stderr.write(e.code)
def read_request():
input_str = sys.stdin.read()
return clientcompat_pb2.ClientCompatMessage.FromString(input_str)
def do_request(client, req):
if req.method == clientcompat_pb2.ClientCompatMessage.NOOP:
input_type = clientcompat_pb2.Empty
method = client.noop_method
elif req.method == clientcompat_pb2.ClientCompatMessage.METHOD:
input_type = clientcompat_pb2.Req
method = client.method
req = input_type.FromString(req.request)
return method(req)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('atlas_teleop')
import rospy
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Pose
from std_msgs.msg import Float64, Int8
import math
class DrcVehicleTeleop:
def __init__(self):
rospy.init_node('drc_vehicle_teleop')
self.joy_sub = rospy.Subscriber('joy', Joy, self.joy_cb)
self.robot_enter_car = rospy.Publisher('drc_world/robot_enter_car', Pose)
self.robot_exit_car = rospy.Publisher('drc_world/robot_exit_car', Pose)
self.brake_pedal = rospy.Publisher('drc_vehicle/brake_pedal/cmd', Float64)
self.gas_pedal = rospy.Publisher('drc_vehicle/gas_pedal/cmd', Float64)
self.hand_brake = rospy.Publisher('drc_vehicle/hand_brake/cmd', Float64)
self.hand_wheel = rospy.Publisher('drc_vehicle/hand_wheel/cmd', Float64)
self.direction = rospy.Publisher('drc_vehicle/direction/cmd', Int8)
self.AXIS_HAND_BRAKE = 0
self.AXIS_BRAKE_PEDAL = 1
self.AXIS_DIRECTION = 2
self.AXIS_GAS_PEDAL = 3
self.AXIS_HAND_WHEEL = 4
self.BUTTON_ENTER_CAR = 0
self.BUTTON_EXIT_CAR = 1
def joy_cb(self, data):
if data.buttons[self.BUTTON_ENTER_CAR] == 1:
self.robot_enter_car.publish(Pose())
elif data.buttons[self.BUTTON_EXIT_CAR] == 1:
self.robot_exit_car.publish(Pose())
else:
self.hand_brake.publish(Float64(data.axes[self.AXIS_HAND_BRAKE]))
self.brake_pedal.publish(Float64(data.axes[self.AXIS_BRAKE_PEDAL]))
self.gas_pedal.publish(Float64(data.axes[self.AXIS_GAS_PEDAL]))
direction = -1 if data.axes[self.AXIS_DIRECTION] < 0.5 else 1
self.direction.publish(Int8(direction))
hand_wheel = (data.axes[self.AXIS_HAND_WHEEL] - 0.5) * math.pi
self.hand_wheel.publish(Float64(hand_wheel))
def run(self):
rospy.spin()
if __name__ == '__main__':
d = DrcVehicleTeleop()
d.run()
|
import copy
import io
import json
import logging
import math
import os
import random
import threading
import tempfile
import time
from pathlib import Path
import boto3
import cv2
import ffmpeg
from PIL import Image
from tqdm import tqdm
from ..api import API
from ..common import (
annotation_status_str_to_int, project_type_int_to_str,
project_type_str_to_int, user_role_int_to_str, user_role_str_to_int
)
from ..exceptions import (
SABaseException, SAExistingProjectNameException,
SANonExistingProjectNameException
)
from .annotation_classes import (
create_annotation_classes_from_classes_json, fill_class_and_attribute_ids,
get_annotation_classes_name_to_id, search_annotation_classes
)
from .project import get_project_metadata
from .users import get_team_contributor_metadata
from .images import search_images
logger = logging.getLogger("superannotate-python-sdk")
_api = API.get_instance()
_NUM_THREADS = 10
def create_project(project_name, project_description, project_type):
"""Create a new project in the team.
:param project_name: the new project's name
:type project_name: str
:param project_description: the new project's description
:type project_description: str
:param project_type: the new project type, Vector or Pixel.
:type project_type: str
:return: dict object metadata the new project
:rtype: dict
"""
try:
get_project_metadata(project_name)
except SANonExistingProjectNameException:
pass
else:
raise SAExistingProjectNameException(
0, "Project with name " + project_name +
" already exists. Please use unique names for projects to use with SDK."
)
project_type = project_type_str_to_int(project_type)
data = {
"team_id": str(_api.team_id),
"name": project_name,
"description": project_description,
"status": 0,
"type": project_type
}
response = _api.send_request(
req_type='POST', path='/project', json_req=data
)
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't create project " + response.text
)
res = response.json()
logger.info(
"Created project %s (ID %s) with type %s", res["name"], res["id"],
project_type_int_to_str(res["type"])
)
return res
def create_project_like_project(
project_name,
from_project,
project_description=None,
copy_annotation_classes=True,
copy_settings=True,
copy_workflow=True,
copy_project_contributors=False
):
"""Deprecated. Function name changed to clone_project.
"""
logger.warning("Deprecated. Function name changed to clone_project.")
clone_project(
project_name, from_project, project_description,
copy_annotation_classes, copy_settings, copy_workflow,
copy_project_contributors
)
def clone_project(
project_name,
from_project,
project_description=None,
copy_annotation_classes=True,
copy_settings=True,
copy_workflow=True,
copy_project_contributors=False
):
"""Create a new project in the team using annotation classes and settings from from_project.
:param project_name: new project's name
:type project_name: str
:param from_project: the name or metadata of the project being used for duplication
:type from_project: str or dict
:param project_description: the new project's description. If None, from_project's
description will be used
:type project_description: str
:param copy_annotation_classes: enables copying annotation classes
:type copy_annotation_classes: bool
:param copy_settings: enables copying project settings
:type copy_settings: bool
:param copy_workflow: enables copying project workflow
:type copy_workflow: bool
:param copy_project_contributors: enables copying project contributors
:type copy_project_contributors: bool
:return: dict object metadata of the new project
:rtype: dict
"""
try:
get_project_metadata(project_name)
except SANonExistingProjectNameException:
pass
else:
raise SAExistingProjectNameException(
0, "Project with name " + project_name +
" already exists. Please use unique names for projects to use with SDK."
)
if not isinstance(from_project, dict):
from_project = get_project_metadata(from_project)
if project_description is None:
project_description = from_project["description"]
data = {
"team_id": str(_api.team_id),
"name": project_name,
"description": project_description,
"status": 0,
"type": from_project["type"]
}
response = _api.send_request(
req_type='POST', path='/project', json_req=data
)
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't create project " + response.text
)
res = response.json()
logger.info(
"Created project %s (ID %s) with type %s", res["name"], res["id"],
project_type_int_to_str(res["type"])
)
if copy_settings:
set_project_settings(res, get_project_settings(from_project))
if copy_annotation_classes:
create_annotation_classes_from_classes_json(
res, search_annotation_classes(from_project, return_metadata=True)
)
if copy_workflow:
set_project_workflow(res, get_project_workflow(from_project))
if copy_project_contributors:
for user in from_project["users"]:
share_project(
res, user["user_id"], user_role_int_to_str(user["user_role"])
)
return res
def delete_project(project):
"""Deletes the project
:param project: project name or metadata of the project to be deleted
:type project: str or dict
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
team_id, project_id = project["team_id"], project["id"]
params = {"team_id": team_id}
response = _api.send_request(
req_type='DELETE', path=f'/project/{project_id}', params=params
)
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't delete project " + response.text
)
logger.info("Successfully deleted project %s.", project["name"])
def rename_project(project, new_name):
"""Renames the project
:param project: project name or metadata of the project to be deleted
:type project: str or dict
:param new_name: project's new name
:type new_name: str
"""
try:
get_project_metadata(new_name)
except SANonExistingProjectNameException:
pass
else:
raise SAExistingProjectNameException(
0, "Project with name " + new_name +
" already exists. Please use unique names for projects to use with SDK."
)
if not isinstance(project, dict):
project = get_project_metadata(project)
team_id, project_id = project["team_id"], project["id"]
params = {"team_id": team_id}
json_req = {"name": new_name}
response = _api.send_request(
req_type='PUT',
path=f'/project/{project_id}',
params=params,
json_req=json_req
)
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't rename project " + response.text
)
logger.info(
"Successfully renamed project %s to %s.", project["name"], new_name
)
def get_project_image_count(project):
"""Returns number of images in the project.
:param project: project name or metadata of the project
:type project: str or dict
:return: number of images in the project
:rtype: int
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
team_id, project_id = project["team_id"], project["id"]
params = {'team_id': team_id}
response = _api.send_request(
req_type='GET',
path=f'/reporting/project/{project_id}/overview',
params=params
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't get project image count " + response.text
)
return response.json()["total_images"]
def upload_video_to_project(
project,
video_path,
target_fps=None,
start_time=0.0,
end_time=None,
annotation_status="NotStarted",
image_quality_in_editor=None
):
"""Uploads image frames from video to platform. Uploaded images will have
names "<video_name>_<frame_no>.jpg".
:param project: project name or metadata of the project to upload video frames to
:type project: str or dict
:param video_path: video to upload
:type video_path: Pathlike (str or Path)
:param target_fps: how many frames per second need to extract from the video (approximate).
If None, all frames will be uploaded
:type target_fps: float
:param start_time: Time (in seconds) from which to start extracting frames
:type start_time: float
:param end_time: Time (in seconds) up to which to extract frames. If None up to end
:type end_time: float
:param annotation_status: value to set the annotation statuses of the uploaded
video frames NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: filenames of uploaded images
:rtype: list of strs
"""
logger.info("Uploading from video %s.", str(video_path))
rotate_code = None
try:
meta_dict = ffmpeg.probe(str(video_path))
rot = int(meta_dict['streams'][0]['tags']['rotate'])
if rot == 90:
rotate_code = cv2.ROTATE_90_CLOCKWISE
elif rot == 180:
rotate_code = cv2.ROTATE_180
elif rot == 270:
rotate_code = cv2.ROTATE_90_COUNTERCLOCKWISE
if rot != 0:
logger.info(
"Frame rotation of %s found. Output images will be rotated accordingly.",
rot
)
except:
logger.warning("Couldn't read video metadata.")
video = cv2.VideoCapture(str(video_path), cv2.CAP_FFMPEG)
if not video.isOpened():
raise SABaseException(0, "Couldn't open video file " + str(video_path))
total_num_of_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
if total_num_of_frames < 0:
if target_fps is not None:
logger.warning(
"Number of frames indicated in the video is negative number. Disabling FPS change."
)
target_fps = None
else:
logger.info("Video frame count is %s.", total_num_of_frames)
if target_fps is not None:
video_fps = video.get(cv2.CAP_PROP_FPS)
logger.info(
"Video frame rate is %s. Target frame rate is %s.", video_fps,
target_fps
)
if target_fps > video_fps:
target_fps = None
else:
r = video_fps / target_fps
frames_count_to_drop = total_num_of_frames - (
total_num_of_frames / r
)
percent_to_drop = frames_count_to_drop / total_num_of_frames
my_random = random.Random(122222)
zero_fill_count = len(str(total_num_of_frames))
tempdir = tempfile.TemporaryDirectory()
video_name = Path(video_path).name
frame_no = 1
while True:
success, frame = video.read()
if not success:
break
if target_fps is not None and my_random.random() < percent_to_drop:
continue
frame_time = video.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
if frame_time < start_time:
continue
if end_time is not None and frame_time > end_time:
continue
if rotate_code is not None:
frame = cv2.rotate(frame, rotate_code)
cv2.imwrite(
str(
Path(tempdir.name) / (
video_name + "_" + str(frame_no).zfill(zero_fill_count) +
".jpg"
)
), frame
)
frame_no += 1
logger.info(
"Extracted %s frames from video. Now uploading to platform.",
frame_no - 1
)
filenames = upload_images_from_folder_to_project(
project,
tempdir.name,
extensions=["jpg"],
annotation_status=annotation_status,
image_quality_in_editor=image_quality_in_editor
)
assert len(filenames[1]) == 0
filenames_base = []
for file in filenames[0]:
filenames_base.append(Path(file).name)
return filenames_base
def upload_videos_from_folder_to_project(
project,
folder_path,
extensions=None,
exclude_file_patterns=None,
recursive_subfolders=False,
target_fps=None,
start_time=0.0,
end_time=None,
annotation_status="NotStarted",
image_quality_in_editor=None
):
"""Uploads image frames from all videos with given extensions from folder_path to the project.
Sets status of all the uploaded images to set_status if it is not None.
:param project: project name or metadata of the project to upload videos to
:type project: str or dict
:param folder_path: from which folder to upload the videos
:type folder_path: Pathlike (str or Path)
:param extensions: list of filename extensions to include from folder, if None, then
extensions = ["mp4", "avi", "mov", "webm", "flv", "mpg", "ogg"]
:type extensions: list of str
:param exclude_file_patterns: filename patterns to exclude from uploading
:type exclude_file_patterns: list of strs
:param recursive_subfolders: enable recursive subfolder parsing
:type recursive_subfolders: bool
:param target_fps: how many frames per second need to extract from the video (approximate).
If None, all frames will be uploaded
:type target_fps: float
:param start_time: Time (in seconds) from which to start extracting frames
:type start_time: float
:param end_time: Time (in seconds) up to which to extract frames. If None up to end
:type end_time: float
:param annotation_status: value to set the annotation statuses of the uploaded images NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: uploaded and not-uploaded video frame images' filenames
:rtype: tuple of list of strs
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
if recursive_subfolders:
logger.warning(
"When using recursive subfolder parsing same name videos in different subfolders will overwrite each other."
)
if exclude_file_patterns is None:
exclude_file_patterns = []
if extensions is None:
extensions = ["mp4", "avi", "mov", "webm", "flv", "mpg", "ogg"]
elif not isinstance(extensions, list):
raise SABaseException(
0,
"extensions should be a list in upload_images_from_folder_to_project"
)
logger.info(
"Uploading all videos with extensions %s from %s to project %s. Excluded file patterns are: %s.",
extensions, folder_path, project["name"], exclude_file_patterns
)
paths = []
for extension in extensions:
if not recursive_subfolders:
paths += list(Path(folder_path).glob(f'*.{extension.lower()}'))
if os.name != "nt":
paths += list(Path(folder_path).glob(f'*.{extension.upper()}'))
else:
paths += list(Path(folder_path).rglob(f'*.{extension.lower()}'))
if os.name != "nt":
paths += list(Path(folder_path).rglob(f'*.{extension.upper()}'))
filtered_paths = []
for path in paths:
not_in_exclude_list = [
x not in Path(path).name for x in exclude_file_patterns
]
if all(not_in_exclude_list):
filtered_paths.append(path)
filenames = []
for path in filtered_paths:
filenames += upload_video_to_project(
project,
path,
target_fps=target_fps,
start_time=start_time,
end_time=end_time,
annotation_status=annotation_status,
image_quality_in_editor=image_quality_in_editor
)
return filenames
def upload_images_from_folder_to_project(
project,
folder_path,
extensions=None,
annotation_status="NotStarted",
from_s3_bucket=None,
exclude_file_patterns=None,
recursive_subfolders=False,
image_quality_in_editor=None
):
"""Uploads all images with given extensions from folder_path to the project.
Sets status of all the uploaded images to set_status if it is not None.
:param project: project name or metadata of the project to upload images_to
:type project: str or dict
:param folder_path: from which folder to upload the images
:type folder_path: Pathlike (str or Path)
:param extensions: list of filename extensions to include from folder, if None, then "jpg" and "png" are included
:type extensions: list of str
:param annotation_status: value to set the annotation statuses of the uploaded images NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param from_s3_bucket: AWS S3 bucket to use. If None then folder_path is in local filesystem
:type from_s3_bucket: str
:param exclude_file_patterns: filename patterns to exclude from uploading,
default value is to exclude SuperAnnotate pixel project
annotation mask output file pattern. If None,
SuperAnnotate related ["___save.png", "___fuse.png"]
will bet set as default exclude_file_patterns.
:type exclude_file_patterns: list of strs
:param recursive_subfolders: enable recursive subfolder parsing
:type recursive_subfolders: bool
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: uploaded and not-uploaded images' filepaths
:rtype: tuple of list of strs
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
if recursive_subfolders:
logger.info(
"When using recursive subfolder parsing same name images in different subfolders will overwrite each other."
)
if exclude_file_patterns is None:
exclude_file_patterns = ["___save.png", "___fuse.png"]
if extensions is None:
extensions = ["jpg", "png"]
elif not isinstance(extensions, list):
raise SABaseException(
0,
"extensions should be a list in upload_images_from_folder_to_project"
)
logger.info(
"Uploading all images with extensions %s from %s to project %s. Excluded file patterns are: %s.",
extensions, folder_path, project["name"], exclude_file_patterns
)
if from_s3_bucket is None:
paths = []
for extension in extensions:
if not recursive_subfolders:
paths += list(Path(folder_path).glob(f'*.{extension.lower()}'))
if os.name != "nt":
paths += list(
Path(folder_path).glob(f'*.{extension.upper()}')
)
else:
paths += list(Path(folder_path).rglob(f'*.{extension.lower()}'))
if os.name != "nt":
paths += list(
Path(folder_path).rglob(f'*.{extension.upper()}')
)
else:
s3_client = boto3.client('s3')
paginator = s3_client.get_paginator('list_objects_v2')
response_iterator = paginator.paginate(
Bucket=from_s3_bucket, Prefix=folder_path
)
paths = []
for response in response_iterator:
for object_data in response['Contents']:
key = object_data['Key']
if not recursive_subfolders and '/' in key[len(folder_path) +
1:]:
continue
for extension in extensions:
if key.endswith(f'.{extension.lower()}'
) or key.endswith(f'.{extension.upper()}'):
paths.append(key)
break
filtered_paths = []
for path in paths:
not_in_exclude_list = [
x not in Path(path).name for x in exclude_file_patterns
]
if all(not_in_exclude_list):
filtered_paths.append(path)
return upload_images_to_project(
project, filtered_paths, annotation_status, from_s3_bucket,
image_quality_in_editor
)
def get_image_array_to_upload(byte_io_orig, image_quality_in_editor):
Image.MAX_IMAGE_PIXELS = None
im = Image.open(byte_io_orig)
im_format = im.format
width, height = im.size
if image_quality_in_editor == 100 and im_format in ['JPEG', 'JPG']:
byte_io_lores = io.BytesIO(byte_io_orig.getbuffer())
else:
byte_io_lores = io.BytesIO()
bg = Image.new('RGBA', im.size, (255, 255, 255))
im = im.convert("RGBA")
bg.paste(im, mask=im)
bg = bg.convert('RGB')
if image_quality_in_editor == 100:
bg.save(
byte_io_lores,
'JPEG',
quality=image_quality_in_editor,
subsampling=0
)
else:
bg.save(byte_io_lores, 'JPEG', quality=image_quality_in_editor)
im = bg
byte_io_huge = io.BytesIO()
hsize = int(height * 600.0 / width)
im.resize((600, hsize), Image.ANTIALIAS).save(byte_io_huge, 'JPEG')
byte_io_thumbs = io.BytesIO()
thumbnail_size = (128, 96)
background = Image.new('RGB', thumbnail_size, "black")
im.thumbnail(thumbnail_size, Image.ANTIALIAS)
(w, h) = im.size
background.paste(
im, ((thumbnail_size[0] - w) // 2, (thumbnail_size[1] - h) // 2)
)
im = background
im.save(byte_io_thumbs, 'JPEG')
byte_io_thumbs.seek(0)
byte_io_lores.seek(0)
byte_io_huge.seek(0)
byte_io_orig.seek(0)
return byte_io_orig, byte_io_lores, byte_io_huge, byte_io_thumbs
def __upload_images_to_aws_thread(
res,
img_paths,
project,
annotation_status,
prefix,
thread_id,
chunksize,
couldnt_upload,
uploaded,
image_quality_in_editor,
from_s3_bucket=None,
):
len_img_paths = len(img_paths)
start_index = thread_id * chunksize
end_index = start_index + chunksize
if from_s3_bucket is not None:
from_session = boto3.Session()
from_s3 = from_session.resource('s3')
if start_index >= len_img_paths:
return
s3_session = boto3.Session(
aws_access_key_id=res['accessKeyId'],
aws_secret_access_key=res['secretAccessKey'],
aws_session_token=res['sessionToken']
)
s3_resource = s3_session.resource('s3')
bucket = s3_resource.Bucket(res["bucket"])
prefix = res['filePath']
uploaded_imgs = []
for i in range(start_index, end_index):
if i >= len_img_paths:
break
path = img_paths[i]
key = prefix + f'{Path(path).name}'
try:
if from_s3_bucket is not None:
file = io.BytesIO()
from_s3_object = from_s3.Object(from_s3_bucket, path)
from_s3_object.download_fileobj(file)
else:
with open(path, "rb") as f:
file = io.BytesIO(f.read())
except Exception as e:
logger.warning("Unable to open image %s.", e)
couldnt_upload[thread_id].append(path)
continue
try:
orig_image, lores_image, huge_image, thumbnail_image = get_image_array_to_upload(
file, image_quality_in_editor
)
bucket.put_object(Body=orig_image, Key=key)
bucket.put_object(Body=lores_image, Key=key + '___lores.jpg')
bucket.put_object(Body=huge_image, Key=key + '___huge.jpg')
bucket.put_object(Body=thumbnail_image, Key=key + '___thumb.jpg')
except Exception as e:
logger.warning("Unable to upload to data server %s.", e)
couldnt_upload[thread_id].append(path)
else:
uploaded[thread_id].append(path)
uploaded_imgs.append(path)
if len(uploaded_imgs) >= 100:
__create_image(
uploaded_imgs, project, annotation_status, prefix
)
uploaded_imgs = []
__create_image(uploaded_imgs, project, annotation_status, prefix)
def __create_image(img_paths, project, annotation_status, remote_dir):
# print("Creating images ", len(img_paths))
if len(img_paths) == 0:
return
team_id, project_id = project["team_id"], project["id"]
data = {
"project_id": str(project_id),
"team_id": str(team_id),
"images": [],
"annotation_status": annotation_status
}
for img_path in img_paths:
img_name = Path(img_path).name
remote_path = remote_dir + f"{img_name}"
data["images"].append({"name": img_name, "path": remote_path})
response = _api.send_request(
req_type='POST', path='/image/ext-create', json_req=data
)
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't ext-create image " + response.text
)
def upload_images_to_project(
project,
img_paths,
annotation_status="NotStarted",
from_s3_bucket=None,
image_quality_in_editor=None
):
"""Uploads all images given in list of path objects in img_paths to the project.
Sets status of all the uploaded images to set_status if it is not None.
If an image with existing name already exists in the project it won't be uploaded,
and its path will be appended to the third member of return value of this
function.
:param project: project name or metadata of the project to upload images to
:type project: str or dict
:param img_paths: list of Pathlike (str or Path) objects to upload
:type img_paths: list
:param annotation_status: value to set the annotation statuses of the uploaded images NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param from_s3_bucket: AWS S3 bucket to use. If None then folder_path is in local filesystem
:type from_s3_bucket: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: uploaded, could-not-upload, existing-images filepaths
:rtype: tuple (3 members) of list of strs
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
if not isinstance(img_paths, list):
raise SABaseException(
0, "img_paths argument to upload_images_to_project should be a list"
)
annotation_status = annotation_status_str_to_int(annotation_status)
image_quality_in_editor = _get_project_image_quality_in_editor(
project, image_quality_in_editor
)
team_id, project_id = project["team_id"], project["id"]
existing_images = search_images(project)
duplicate_images = []
for existing_image in existing_images:
i = -1
for j, img_path in enumerate(img_paths):
if str(img_path).endswith(existing_image):
i = j
break
if i != -1:
duplicate_images.append(img_paths[i])
del img_paths[i]
if len(duplicate_images) != 0:
logger.warning(
"%s already existing images found that won't be uploaded.",
len(duplicate_images)
)
len_img_paths = len(img_paths)
logger.info(
"Uploading %s images to project %s.", len_img_paths, project["name"]
)
if len_img_paths == 0:
return ([], [], duplicate_images)
params = {
'team_id': team_id,
}
uploaded = []
for _ in range(_NUM_THREADS):
uploaded.append([])
couldnt_upload = []
for _ in range(_NUM_THREADS):
couldnt_upload.append([])
finish_event = threading.Event()
chunksize = int(math.ceil(len(img_paths) / _NUM_THREADS))
tqdm_thread = threading.Thread(
target=__tqdm_thread_image_upload,
args=(len_img_paths, uploaded, couldnt_upload, finish_event)
)
tqdm_thread.start()
response = _api.send_request(
req_type='GET',
path=f'/project/{project_id}/sdkImageUploadToken',
params=params
)
if response.ok:
res = response.json()
prefix = res['filePath']
else:
raise SABaseException(
response.status_code, "Couldn't get upload token " + response.text
)
threads = []
for thread_id in range(_NUM_THREADS):
t = threading.Thread(
target=__upload_images_to_aws_thread,
args=(
res, img_paths, project, annotation_status, prefix, thread_id,
chunksize, couldnt_upload, uploaded, image_quality_in_editor,
from_s3_bucket
)
)
threads.append(t)
t.start()
for t in threads:
t.join()
finish_event.set()
tqdm_thread.join()
list_of_not_uploaded = []
for couldnt_upload_thread in couldnt_upload:
for file in couldnt_upload_thread:
logger.warning("Couldn't upload image %s", file)
list_of_not_uploaded.append(str(file))
list_of_uploaded = []
for upload_thread in uploaded:
for file in upload_thread:
list_of_uploaded.append(str(file))
return (list_of_uploaded, list_of_not_uploaded, duplicate_images)
def __upload_annotations_thread(
team_id, project_id, project_type, anns_filenames, folder_path,
annotation_classes_dict, thread_id, chunksize, num_uploaded, from_s3_bucket,
actually_uploaded
):
NUM_TO_SEND = 500
len_anns = len(anns_filenames)
start_index = thread_id * chunksize
if start_index >= len_anns:
return
end_index = min(start_index + chunksize, len_anns)
postfix_json = '___objects.json' if project_type == 1 else '___pixel.json'
len_postfix_json = len(postfix_json)
postfix_mask = '___save.png'
if from_s3_bucket is not None:
from_session = boto3.Session()
from_s3 = from_session.resource('s3')
for i in range(start_index, end_index, NUM_TO_SEND):
data = {"project_id": project_id, "team_id": team_id, "names": []}
for j in range(i, i + NUM_TO_SEND):
if j >= end_index:
break
image_name = anns_filenames[j][:-len_postfix_json]
data["names"].append(image_name)
response = _api.send_request(
req_type='POST',
path='/images/getAnnotationsPathsAndTokens',
json_req=data
)
res = response.json()
if len(res["images"]) != len(data["names"]):
logger.warning("Couldn't find all the images for annotation JSONs.")
aws_creds = res["creds"]
s3_session = boto3.Session(
aws_access_key_id=aws_creds['accessKeyId'],
aws_secret_access_key=aws_creds['secretAccessKey'],
aws_session_token=aws_creds['sessionToken']
)
s3_resource = s3_session.resource('s3')
bucket = s3_resource.Bucket(aws_creds["bucket"])
for image_name, image_path in res['images'].items():
json_filename = image_name + postfix_json
if from_s3_bucket is None:
annotation_json = json.load(
open(Path(folder_path) / json_filename)
)
else:
file = io.BytesIO()
from_s3_object = from_s3.Object(
from_s3_bucket, folder_path + json_filename
)
from_s3_object.download_fileobj(file)
file.seek(0)
annotation_json = json.load(file)
fill_class_and_attribute_ids(
annotation_json, annotation_classes_dict
)
bucket.put_object(
Key=image_path + postfix_json, Body=json.dumps(annotation_json)
)
if project_type != 1:
mask_filename = image_name + postfix_mask
if from_s3_bucket is None:
with open(Path(folder_path) / mask_filename, 'rb') as fin:
file = io.BytesIO(fin.read())
else:
file = io.BytesIO()
from_s3_object = from_s3.Object(
from_s3_bucket, folder_path + mask_filename
)
from_s3_object.download_fileobj(file)
file.seek(0)
bucket.put_object(Key=image_path + postfix_mask, Body=file)
num_uploaded[thread_id] += 1
actually_uploaded[thread_id].append(
Path(folder_path) / json_filename
)
def upload_annotations_from_folder_to_project(
project, folder_path, from_s3_bucket=None, recursive_subfolders=False
):
"""Finds and uploads all JSON files in the folder_path as annotations to the project.
The JSON files should follow specific naming convention. For Vector
projects they should be named "<image_filename>___objects.json" (e.g., if
image is cats.jpg the annotation filename should be cats.jpg___objects.json), for Pixel projects
JSON file should be named "<image_filename>___pixel.json" and also second mask
image file should be present with the name "<image_name>___save.png". In both cases
image with <image_name> should be already present on the platform.
Existing annotations will be overwritten.
:param project: project name or metadata of the project to upload annotations to
:type project: str or dict
:param folder_path: from which folder to upload the annotations
:type folder_path: Pathlike (str or Path)
:param from_s3_bucket: AWS S3 bucket to use. If None then folder_path is in local filesystem
:type from_s3_bucket: str
:param recursive_subfolders: enable recursive subfolder parsing
:type recursive_subfolders: bool
:return: paths to annotations uploaded
:rtype: list of strs
"""
if recursive_subfolders:
logger.info(
"When using recursive subfolder parsing same name annotations in different subfolders will overwrite each other."
)
logger.info(
"The JSON files should follow specific naming convention. For Vector projects they should be named '<image_name>___objects.json', for Pixel projects JSON file should be names '<image_name>___pixel.json' and also second mask image file should be present with the name '<image_name>___save.png'. In both cases image with <image_name> should be already present on the platform."
)
logger.info("Existing annotations will be overwritten.")
if not isinstance(project, dict):
project = get_project_metadata(project)
return _upload_annotations_from_folder_to_project(
project, folder_path, from_s3_bucket, recursive_subfolders
)
def _upload_annotations_from_folder_to_project(
project, folder_path, from_s3_bucket=None, recursive_subfolders=False
):
return_result = []
if from_s3_bucket is not None:
if not folder_path.endswith('/'):
folder_path = folder_path + '/'
if recursive_subfolders:
if from_s3_bucket is None:
for path in Path(folder_path).glob('*'):
if path.is_dir():
return_result += _upload_annotations_from_folder_to_project(
project, path, from_s3_bucket, recursive_subfolders
)
else:
s3_client = boto3.client('s3')
result = s3_client.list_objects(
Bucket=from_s3_bucket, Prefix=folder_path, Delimiter='/'
)
results = result.get('CommonPrefixes')
if results is not None:
for o in results:
return_result += _upload_annotations_from_folder_to_project(
project, o.get('Prefix'), from_s3_bucket,
recursive_subfolders
)
team_id, project_id, project_type = project["team_id"], project[
"id"], project["type"]
logger.info(
"Uploading all annotations from %s to project %s.", folder_path,
project["name"]
)
annotations_paths = []
annotations_filenames = []
if from_s3_bucket is None:
for path in Path(folder_path).glob('*.json'):
if path.name.endswith('___objects.json'
) or path.name.endswith('___pixel.json'):
annotations_paths.append(path)
annotations_filenames.append(path.name)
else:
s3_client = boto3.client('s3')
paginator = s3_client.get_paginator('list_objects_v2')
response_iterator = paginator.paginate(
Bucket=from_s3_bucket, Prefix=folder_path
)
for response in response_iterator:
for object_data in response['Contents']:
key = object_data['Key']
if '/' in key[len(folder_path) + 1:]:
continue
if key.endswith('___objects.json'
) or key.endswith('___pixel.json'):
annotations_paths.append(key)
annotations_filenames.append(Path(key).name)
len_annotations_paths = len(annotations_paths)
logger.info(
"Uploading %s annotations to project %s.", len_annotations_paths,
project["name"]
)
if len_annotations_paths == 0:
return return_result
num_uploaded = [0] * _NUM_THREADS
actually_uploaded = []
for _ in range(_NUM_THREADS):
actually_uploaded.append([])
finish_event = threading.Event()
tqdm_thread = threading.Thread(
target=__tqdm_thread,
args=(len_annotations_paths, num_uploaded, finish_event)
)
tqdm_thread.start()
annotation_classes = search_annotation_classes(
project, return_metadata=True
)
annotation_classes_dict = get_annotation_classes_name_to_id(
annotation_classes
)
chunksize = int(math.ceil(len_annotations_paths / _NUM_THREADS))
threads = []
for thread_id in range(_NUM_THREADS):
t = threading.Thread(
target=__upload_annotations_thread,
args=(
team_id, project_id, project_type, annotations_filenames,
folder_path, annotation_classes_dict, thread_id, chunksize,
num_uploaded, from_s3_bucket, actually_uploaded
)
)
threads.append(t)
t.start()
for t in threads:
t.join()
finish_event.set()
tqdm_thread.join()
logger.info("Number of annotations uploaded %s.", sum(num_uploaded))
for ac_upl in actually_uploaded:
return_result += [str(p) for p in ac_upl]
print(return_result)
return return_result
def __upload_preannotations_thread(
aws_creds, project_type, preannotations_filenames, folder_path,
annotation_classes_dict, thread_id, chunksize, num_uploaded,
already_uploaded, from_s3_bucket
):
len_preanns = len(preannotations_filenames)
start_index = thread_id * chunksize
if start_index >= len_preanns:
return
end_index = min(start_index + chunksize, len_preanns)
s3_session = boto3.Session(
aws_access_key_id=aws_creds['accessKeyId'],
aws_secret_access_key=aws_creds['secretAccessKey'],
aws_session_token=aws_creds['sessionToken']
)
s3_resource = s3_session.resource('s3')
bucket = s3_resource.Bucket(aws_creds["bucket"])
postfix_json = '___objects.json' if project_type == 1 else '___pixel.json'
len_postfix_json = len(postfix_json)
postfix_mask = '___save.png'
if from_s3_bucket is not None:
from_session = boto3.Session()
from_s3 = from_session.resource('s3')
for i in range(start_index, end_index):
if already_uploaded[i]:
continue
json_filename = preannotations_filenames[i]
if from_s3_bucket is None:
annotation_json = json.load(open(Path(folder_path) / json_filename))
else:
file = io.BytesIO()
from_s3_object = from_s3.Object(
from_s3_bucket, folder_path + json_filename
)
from_s3_object.download_fileobj(file)
file.seek(0)
annotation_json = json.load(file)
fill_class_and_attribute_ids(annotation_json, annotation_classes_dict)
bucket.put_object(
Key=aws_creds["filePath"] + f"/{json_filename}",
Body=json.dumps(annotation_json)
)
if project_type != 1:
mask_filename = json_filename[:-len_postfix_json] + postfix_mask
if from_s3_bucket is None:
with open(Path(folder_path) / mask_filename, 'rb') as fin:
file = io.BytesIO(fin.read())
else:
file = io.BytesIO()
from_s3_object = from_s3.Object(
from_s3_bucket, folder_path + mask_filename
)
from_s3_object.download_fileobj(file)
file.seek(0)
bucket.put_object(
Key=aws_creds['filePath'] + f'/{mask_filename}', Body=file
)
num_uploaded[thread_id] += 1
already_uploaded[i] = True
def __tqdm_thread(total_num, current_nums, finish_event):
with tqdm(total=total_num) as pbar:
while True:
finished = finish_event.wait(5)
if not finished:
pbar.update(sum(current_nums) - pbar.n)
else:
pbar.update(total_num - pbar.n)
break
def __tqdm_thread_image_upload(
total_num, uploaded, couldnt_upload, finish_event
):
with tqdm(total=total_num) as pbar:
while True:
finished = finish_event.wait(5)
if not finished:
sum_all = 0
for i in couldnt_upload:
sum_all += len(i)
for i in uploaded:
sum_all += len(i)
pbar.update(sum_all - pbar.n)
else:
pbar.update(total_num - pbar.n)
break
def upload_preannotations_from_folder_to_project(
project, folder_path, from_s3_bucket=None, recursive_subfolders=False
):
"""Finds and uploads all JSON files in the folder_path as pre-annotations to the project.
The JSON files should follow specific naming convention. For Vector
projects they should be named "<image_filename>___objects.json" (e.g., if
image is cats.jpg the annotation filename should be cats.jpg___objects.json), for Pixel projects
JSON file should be named "<image_filename>___pixel.json" and also second mask
image file should be present with the name "<image_name>___save.png". In both cases
image with <image_name> should be already present on the platform.
Existing pre-annotations will be overwritten.
:param project: project name or metadata of the project to upload pre-annotations to
:type project: str or dict
:param folder_path: from which folder to upload the pre-annotations
:type folder_path: Pathlike (str or Path)
:param from_s3_bucket: AWS S3 bucket to use. If None then folder_path is in local filesystem
:type from_s3_bucket: str
:param recursive_subfolders: enable recursive subfolder parsing
:type recursive_subfolders: bool
:return: paths to pre-annotations uploaded
:rtype: list of strs
"""
if recursive_subfolders:
logger.info(
"When using recursive subfolder parsing same name pre-annotations in different subfolders will overwrite each other."
)
logger.info(
"The JSON files should follow specific naming convention. For Vector projects they should be named '<image_name>___objects.json', for Pixel projects JSON file should be names '<image_name>___pixel.json' and also second mask image file should be present with the name '<image_name>___save.png'. In both cases image with <image_name> should be already present on the platform."
)
logger.info(
"Identically named existing pre-annotations will be overwritten."
)
if not isinstance(project, dict):
project = get_project_metadata(project)
return _upload_preannotations_from_folder_to_project(
project, folder_path, from_s3_bucket, recursive_subfolders
)
def _upload_preannotations_from_folder_to_project(
project, folder_path, from_s3_bucket=None, recursive_subfolders=False
):
return_result = []
if from_s3_bucket is not None:
if not folder_path.endswith('/'):
folder_path = folder_path + '/'
if recursive_subfolders:
if from_s3_bucket is None:
for path in Path(folder_path).glob('*'):
if path.is_dir():
return_result += _upload_preannotations_from_folder_to_project(
project, path, from_s3_bucket, recursive_subfolders
)
else:
s3_client = boto3.client('s3')
result = s3_client.list_objects(
Bucket=from_s3_bucket, Prefix=folder_path, Delimiter='/'
)
results = result.get('CommonPrefixes')
if results is not None:
for o in results:
return_result += _upload_preannotations_from_folder_to_project(
project, o.get('Prefix'), from_s3_bucket,
recursive_subfolders
)
team_id, project_id, project_type = project["team_id"], project[
"id"], project["type"]
logger.info(
"Uploading all preannotations from %s to project %s.", folder_path,
project["name"]
)
preannotations_paths = []
preannotations_filenames = []
if from_s3_bucket is None:
for path in Path(folder_path).glob('*.json'):
if path.name.endswith('___objects.json'
) or path.name.endswith('___pixel.json'):
preannotations_paths.append(path)
preannotations_filenames.append(path.name)
else:
s3_client = boto3.client('s3')
paginator = s3_client.get_paginator('list_objects_v2')
response_iterator = paginator.paginate(
Bucket=from_s3_bucket, Prefix=folder_path
)
for response in response_iterator:
for object_data in response['Contents']:
key = object_data['Key']
if '/' in key[len(folder_path) + 1:]:
continue
if key.endswith('___objects.json'
) or key.endswith('___pixel.json'):
preannotations_paths.append(key)
preannotations_filenames.append(Path(key).name)
len_preannotations_paths = len(preannotations_paths)
logger.info(
"Uploading %s preannotations to project %s.", len_preannotations_paths,
project["name"]
)
if len_preannotations_paths == 0:
return return_result
params = {'team_id': team_id, 'creds_only': True, 'type': project_type}
num_uploaded = [0] * _NUM_THREADS
already_uploaded = [False] * len_preannotations_paths
chunksize = int(math.ceil(len_preannotations_paths / _NUM_THREADS))
finish_event = threading.Event()
tqdm_thread = threading.Thread(
target=__tqdm_thread,
args=(len_preannotations_paths, num_uploaded, finish_event)
)
tqdm_thread.start()
annotation_classes = search_annotation_classes(
project, return_metadata=True
)
annotation_classes_dict = get_annotation_classes_name_to_id(
annotation_classes
)
while True:
if sum(num_uploaded) == len_preannotations_paths:
break
response = _api.send_request(
req_type='GET',
path=f'/project/{project_id}/preannotation',
params=params
)
if not response.ok:
raise SABaseException(response.status_code, response.text)
aws_creds = response.json()
threads = []
for thread_id in range(_NUM_THREADS):
t = threading.Thread(
target=__upload_preannotations_thread,
args=(
aws_creds, project_type, preannotations_filenames,
folder_path, annotation_classes_dict, thread_id, chunksize,
num_uploaded, already_uploaded, from_s3_bucket
)
)
threads.append(t)
t.start()
for t in threads:
t.join()
finish_event.set()
tqdm_thread.join()
logger.info("Number of preannotations uploaded %s.", sum(num_uploaded))
return return_result + [str(p) for p in preannotations_paths]
def share_project(project, user, user_role):
"""Share project with user.
:param project: project name or metadata of the project
:type project: str or dict
:param user: user email or metadata of the user to share project with
:type user: str or dict
:param user_role: user role to apply, one of Admin , Annotator , QA , Customer , Viewer
:type user_role: str
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
if not isinstance(user, dict):
user = get_team_contributor_metadata(user)
user_role = user_role_str_to_int(user_role)
team_id, project_id = project["team_id"], project["id"]
user_id = user["id"]
json_req = {"user_id": user_id, "user_role": user_role}
params = {'team_id': team_id}
response = _api.send_request(
req_type='POST',
path=f'/project/{project_id}/share',
params=params,
json_req=json_req
)
if not response.ok:
raise SABaseException(response.status_code, response.text)
logger.info(
"Shared project %s with user %s and role %s", project["name"],
user["email"], user_role_int_to_str(user_role)
)
def unshare_project(project, user):
"""Unshare (remove) user from project.
:param project: project name or metadata of the project
:type project: str or dict
:param user: user email or metadata of the user to unshare project
:type user: str or dict
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
if not isinstance(user, dict):
user = get_team_contributor_metadata(user)
team_id, project_id = project["team_id"], project["id"]
user_id = user["id"]
json_req = {"user_id": user_id}
params = {'team_id': team_id}
response = _api.send_request(
req_type='DELETE',
path=f'/project/{project_id}/share',
params=params,
json_req=json_req
)
if not response.ok:
raise SABaseException(response.status_code, response.text)
logger.info("Unshared project %s from user ID %s", project["name"], user_id)
def upload_images_from_s3_bucket_to_project(
project,
accessKeyId,
secretAccessKey,
bucket_name,
folder_path,
image_quality_in_editor=None
):
"""Uploads all images from AWS S3 bucket to the project.
:param project: project name or metadata of the project to upload images to
:type project: str or dict
:param accessKeyId: AWS S3 access key ID
:type accessKeyId: str
:param secretAccessKey: AWS S3 secret access key
:type secretAccessKey: str
:param bucket_name: AWS S3 bucket
:type bucket_name: str
:param folder_path: from which folder to upload the images
:type folder_path: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
if image_quality_in_editor is not None:
old_quality = _get_project_image_quality_in_editor(project, None)
_set_project_default_image_quality_in_editor(
project,
_get_project_image_quality_in_editor(
project, image_quality_in_editor
)
)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
data = {
"accessKeyID": accessKeyId,
"secretAccessKey": secretAccessKey,
"bucketName": bucket_name,
"folderName": folder_path
}
response = _api.send_request(
req_type='POST',
path=f'/project/{project_id}/get-image-s3-access-point',
params=params,
json_req=data
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't upload to project from S3 " + response.text
)
logger.info("Waiting for S3 upload to finish.")
while True:
time.sleep(5)
res = _get_upload_from_s3_bucket_to_project_status(project)
if res["progress"] == '2':
break
if res["progress"] != "1":
raise SABaseException(
response.status_code,
"Couldn't upload to project from S3 " + response.text
)
if image_quality_in_editor is not None:
_set_project_default_image_quality_in_editor(project, old_quality)
def _get_upload_from_s3_bucket_to_project_status(project):
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
response = _api.send_request(
req_type='GET',
path=f'/project/{project_id}/getS3UploadStatus',
params=params
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't get upload to project from S3 status " + response.text
)
return response.json()
def get_project_workflow(project):
"""Gets project's workflow.
Return value example: [{ "step" : <step_num>, "className" : <annotation_class>, "tool" : <tool_num>, ...},...]
:param project: project name or metadata
:type project: str or dict
:return: project workflow
:rtype: list of dicts
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
response = _api.send_request(
req_type='GET', path=f'/project/{project_id}/workflow', params=params
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't get project workflow " + response.text
)
res = response.json()
annotation_classes = search_annotation_classes(
project, return_metadata=True
)
for r in res:
if "class_id" not in r:
continue
found_classid = False
for a_class in annotation_classes:
if a_class["id"] == r["class_id"]:
found_classid = True
r["className"] = a_class["name"]
del r["class_id"]
break
if not found_classid:
raise SABaseException(0, "Couldn't find class_id in workflow")
return res
def set_project_workflow(project, new_workflow):
"""Sets project's workflow.
new_workflow example: [{ "step" : <step_num>, "className" : <annotation_class>, "tool" : <tool_num>, ...},...]
:param project: project name or metadata
:type project: str or dict
:param project: new workflow list of dicts
:type project: list of dicts
:return: updated part of project's workflow
:rtype: list of dicts
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
if not isinstance(new_workflow, list):
raise SABaseException(
0, "Set project setting new_workflow should be a list"
)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
annotation_classes = search_annotation_classes(
project, return_metadata=True
)
new_list = copy.deepcopy(new_workflow)
for step in new_list:
if "id" in step:
del step["id"]
if "className" in step:
found = False
for an_class in annotation_classes:
if an_class["name"] == step["className"]:
step["class_id"] = an_class["id"]
del step["className"]
found = True
break
if not found:
raise SABaseException(
0, "Annotation class not found in set_project_workflow."
)
json_req = {"steps": new_list}
response = _api.send_request(
req_type='POST',
path=f'/project/{project_id}/workflow',
params=params,
json_req=json_req
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't set project workflow " + response.text
)
res = response.json()
return res
def get_project_settings(project):
"""Gets project's settings.
Return value example: [{ "attribute" : "Brightness", "value" : 10, ...},...]
:param project: project name or metadata
:type project: str or dict
:return: project settings
:rtype: list of dicts
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
response = _api.send_request(
req_type='GET', path=f'/project/{project_id}/settings', params=params
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't get project settings " + response.text
)
return response.json()
def set_project_settings(project, new_settings):
"""Sets project's settings.
New settings format example: [{ "attribute" : "Brightness", "value" : 10, ...},...]
:param project: project name or metadata
:type project: str or dict
:param new_settings: new settings list of dicts
:type new_settings: list of dicts
:return: updated part of project's settings
:rtype: list of dicts
"""
if not isinstance(project, dict):
project = get_project_metadata(project)
if not isinstance(new_settings, list):
raise SABaseException(
0, "Set project setting new_settings should be a list"
)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
current_settings = get_project_settings(project)
id_conv = {}
for setting in current_settings:
if "attribute" in setting:
id_conv[setting["attribute"]] = setting["id"]
new_list = []
for new_setting in new_settings:
if "attribute" in new_setting and new_setting["attribute"] in id_conv:
new_list.append(
{
"attribute": new_setting["attribute"],
"id": id_conv[new_setting["attribute"]],
"value": new_setting["value"]
}
)
json_req = {"settings": new_list}
response = _api.send_request(
req_type='PUT',
path=f'/project/{project_id}/settings',
params=params,
json_req=json_req
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't set project settings " + response.text
)
return response.json()
def _get_project_image_quality_in_editor(project, image_quality_in_editor):
if image_quality_in_editor is None:
for setting in get_project_settings(project):
if "attribute" in setting and setting["attribute"] == "ImageQuality":
return setting["value"]
return 60
elif image_quality_in_editor == "compressed":
return 60
elif image_quality_in_editor == "original":
return 100
else:
raise SABaseException(
0,
"Image quality in editor should be 'compressed', 'original' or None for project settings value"
)
def _set_project_default_image_quality_in_editor(project, quality):
set_project_settings(
project, [{
"attribute": "ImageQuality",
"value": quality
}]
)
def set_project_default_image_quality_in_editor(
project, image_quality_in_editor
):
"""Sets project's default image quality in editor setting.
:param project: project name or metadata
:type project: str or dict
:param image_quality_in_editor: new setting value, should be "original" or "compressed"
:type image_quality_in_editor: str
"""
if image_quality_in_editor == "compressed":
image_quality_in_editor = 60
elif image_quality_in_editor == "original":
image_quality_in_editor = 100
else:
raise SABaseException(
0, "Image quality in editor should be 'compressed', 'original'"
)
_set_project_default_image_quality_in_editor(
project, image_quality_in_editor
)
def get_project_default_image_quality_in_editor(project):
"""Gets project's default image quality in editor setting.
:param project: project name or metadata
:type project: str or dict
:return: "original" or "compressed" setting value
:rtype: str
"""
image_quality_in_editor = _get_project_image_quality_in_editor(
project, None
)
if image_quality_in_editor == 60:
image_quality_in_editor = "compressed"
elif image_quality_in_editor == 100:
image_quality_in_editor = "original"
else:
raise SABaseException(
0, "Image quality in editor should be '60', '100'"
)
return image_quality_in_editor
|
"""Remove EquipmentDataField default_val."""
# pylint: disable=invalid-name
from django.db import migrations
class Migration(migrations.Migration):
"""Remove EquipmentDataField default_val."""
dependencies = [
('IoT_DataMgmt',
'0087_delete_EquipmentInstanceDataFieldDailyAgg')
]
operations = [
migrations.RemoveField(
model_name='equipmentdatafield',
name='default_val')
]
|
# Josh Aaron Miller 2021
# API calls for Enemies
import venntdb
import uuid
from constants import *
from api_campaigns import *
# VenntHandler methods
def create_enemy(self, args, username):
name = args[KEY_NAME]
campaign_id = None
if KEY_CAMPAIGN_ID in args:
# automatically add the enemy to the campaign if its a valid campaign
campaign_id = args[KEY_CAMPAIGN_ID]
if not has_gm_permissions(self, username, campaign_id):
return self.respond({"success": False, "info": MSG_BAD_CAMP})
id = IDType.ENEMY + str(uuid.uuid4())
enemy = {"name": name, "id": id}
for key in args:
if key in ATTRIBUTES:
try:
enemy[key] = int(args[key])
except ValueError:
return self.respond({"success": False, "info": MSG_INVALID_ATTRIBUTE})
self.server.db.create_character(username, enemy, is_enemy=True)
ret = {"success": True, "id": id}
if campaign_id:
self.server.db.add_to_campaign(campaign_id, username, id, gm_only=True)
return self.respond(ret)
|
from numpy import average
from multiCameraServer import filterHubTargets,configX,configY
import plotly.graph_objects as go
lines = []
#input CSV is an WpiLib Data export
with open('C:/Users/racve/OneDrive/Documents/FIRST/FRC_20220322_011433.csv') as f:
lines = f.readlines()
count = 0
baseTime = 0
loopTime = 0.05
times = []
outX = []
outY = []
baseX = []
baseY = []
targetXarray = []
targetYarray = []
for line in lines[1:]:
cols = line.split(',')
if(cols[1] == '"NT:/vision/targetX"'):
targetXarray = []
temp = cols[2].split(';')
for item in temp:
if(item != '\n'):
targetXarray.append(float(item))
elif (cols[1] == '"NT:/vision/targetY"'):
targetYarray = []
temp = cols[2].split(';')
for item in temp:
if(item != '\n'):
targetYarray.append(float(item))
else:
#unknown name of data, continue
continue
curTime = float(cols[0])
while((baseTime + loopTime) < curTime):
if(len(targetXarray) == 1):
baseX.append(targetXarray[0])
elif(len(targetXarray) > 1):
baseX.append(average(targetXarray))
elif(len(baseX) == 0):
baseX.append(0)
else:
#append the last item
baseX.append(baseX[:1])
if(len(targetYarray) == 1):
baseY.append(targetYarray[0])
elif(len(targetYarray) > 1):
baseY.append(average(targetYarray))
elif(len(baseY) == 0):
baseY.append(0)
else:
#append the last item
baseY.append(baseY[:1])
newX, configX = filterHubTargets(targetXarray, configX)
newY, configY = filterHubTargets(targetYarray, configY)
times.append(baseTime)
outX.append(newX)
outY.append(newY)
baseTime = baseTime + loopTime
#actual plotting
fig = go.Figure()
fig.add_trace(go.Scatter(x=times,y=baseX,name = 'BaseLine'))
fig.add_trace(go.Scatter(x=times,y=outX,name = 'TargetX'))
fig.show()
fig = go.Figure()
fig.add_trace(go.Scatter(x=times,y=baseY,name = 'BaseLine'))
fig.add_trace(go.Scatter(x=times,y=outY,name = 'TargetY'))
fig.show()
|
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.block_header import sign_block_header
from eth2spec.test.helpers.keys import privkeys
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect
from eth2spec.test.helpers.state import next_epoch
def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True):
"""
Run ``process_proposer_slashing``, yielding:
- pre-state ('pre')
- proposer_slashing ('proposer_slashing')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
pre_state = state.copy()
yield 'pre', state
yield 'proposer_slashing', proposer_slashing
if not valid:
expect_assertion_error(lambda: spec.process_proposer_slashing(state, proposer_slashing))
yield 'post', None
return
spec.process_proposer_slashing(state, proposer_slashing)
yield 'post', state
slashed_proposer_index = proposer_slashing.signed_header_1.message.proposer_index
check_proposer_slashing_effect(spec, pre_state, state, slashed_proposer_index)
@with_all_phases
@spec_state_test
def test_success(spec, state):
# Get proposer for next slot
block = build_empty_block_for_next_slot(spec, state)
proposer_index = block.proposer_index
# Create slashing for same proposer
proposer_slashing = get_valid_proposer_slashing(spec, state,
slashed_index=proposer_index,
signed_1=True, signed_2=True)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing)
@with_all_phases
@spec_state_test
def test_success_slashed_and_proposer_index_the_same(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_1(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=True)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_2(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_1_and_2(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_1_and_2_swap(spec, state):
# Get valid signatures for the slashings
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
# But swap them
signature_1 = proposer_slashing.signed_header_1.signature
proposer_slashing.signed_header_1.signature = proposer_slashing.signed_header_2.signature
proposer_slashing.signed_header_2.signature = signature_1
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
def test_invalid_proposer_index(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
# Index just too high (by 1)
proposer_slashing.signed_header_1.message.proposer_index = len(state.validators)
proposer_slashing.signed_header_2.message.proposer_index = len(state.validators)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
def test_invalid_different_proposer_indices(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
# set different index and sign
header_1 = proposer_slashing.signed_header_1.message
header_2 = proposer_slashing.signed_header_2.message
active_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state))
active_indices = [i for i in active_indices if i != header_1.proposer_index]
header_2.proposer_index = active_indices[0]
proposer_slashing.signed_header_2 = sign_block_header(spec, state, header_2, privkeys[header_2.proposer_index])
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
def test_epochs_are_different(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
# set slots to be in different epochs
header_2 = proposer_slashing.signed_header_2.message
proposer_index = header_2.proposer_index
header_2.slot += spec.SLOTS_PER_EPOCH
proposer_slashing.signed_header_2 = sign_block_header(spec, state, header_2, privkeys[proposer_index])
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
def test_headers_are_same_sigs_are_same(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
# set headers to be the same
proposer_slashing.signed_header_2 = proposer_slashing.signed_header_1.copy()
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
def test_headers_are_same_sigs_are_different(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
# set headers to be the same
proposer_slashing.signed_header_2 = proposer_slashing.signed_header_1.copy()
# but signatures to be different
proposer_slashing.signed_header_2.signature = proposer_slashing.signed_header_2.signature[:-1] + b'\x00'
assert proposer_slashing.signed_header_1.signature != proposer_slashing.signed_header_2.signature
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
def test_proposer_is_not_activated(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
# set proposer to be not active yet
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
state.validators[proposer_index].activation_epoch = spec.get_current_epoch(state) + 1
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
def test_proposer_is_slashed(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
# set proposer to slashed
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
state.validators[proposer_index].slashed = True
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
@with_all_phases
@spec_state_test
def test_proposer_is_withdrawn(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
# move 1 epoch into future, to allow for past withdrawable epoch
next_epoch(spec, state)
# set proposer withdrawable_epoch in past
current_epoch = spec.get_current_epoch(state)
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
state.validators[proposer_index].withdrawable_epoch = current_epoch - 1
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
"""Shim to enable editable installs."""
from setuptools import setup
setup()
|
import os
import unittest
from programy.extensions.weather.weather import WeatherExtension
from test.aiml_tests.client import TestClient
class WeatherExtensionTests(unittest.TestCase):
def setUp(self):
self.test_client = TestClient()
latlong = os.path.dirname(__file__) + "/google_latlong.json"
observation = os.path.dirname(__file__) + "/observation.json"
threehourly = os.path.dirname(__file__) + "/forecast_3hourly.json"
daily = os.path.dirname(__file__) + "/forecast_daily.json"
self.test_client.bot.license_keys.load_license_key_data("""
GOOGLE_LATLONG=%s
METOFFICE_API_KEY=TESTKEY
CURRENT_OBSERVATION_RESPONSE_FILE=%s
THREE_HOURLY_FORECAST_RESPONSE_FILE=%s
DAILY_FORECAST_RESPONSE_FILE=%s
"""%(latlong, observation, threehourly, daily))
self.clientid = "testid"
def test_observation(self):
weather = WeatherExtension()
self.assertIsNotNone(weather)
result = weather.execute(self.test_client.bot, self.clientid, "LOCATION KY39UR WHEN NOW")
self.assertIsNotNone(result)
self.assertEquals("WEATHER Partly cloudy (day) TEMP 12 3 VISIBILITY V 35000 VF Very Good WIND D SW DF South West S 10 PRESSURE P 1017 PT F PTF Falling HUMIDITY 57 3", result)
|
wandb_project ='mmsegmentation'
wandb_experiment_name = 'ResNet50, PointRend finetuning, 2 classes'
######################################################################
# optimizer
optimizer = dict(type='Adam', lr=0.0001, weight_decay=0.00005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='exp', gamma=0.999994, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=300000)
checkpoint_config = dict(by_epoch=False, interval=20000)
evaluation = dict(interval=10000, metric='mIoU', pre_eval=False, tb_log_dir="./work_dirs/tf_logs")
######################################################################
# runtime settings
log_config = dict(
interval=1000,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project=wandb_project,
name=wandb_experiment_name))
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None# 'work_dirs/standard_unet/latest.pth'
workflow = [('train', 1), ('val', 1)]
cudnn_benchmark = True
######################################################################
# dataset settings
dataset_type = 'CustomDataset'
data_root = 'data/'
img_norm_cfg = dict(
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
crop_size = (448, 448)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Normalize', **img_norm_cfg),
#dict(type='RGB2Gray')
dict(type='RandomRotate', prob=0.2, degree=180),
dict(type='Corrupt', prob = 0.5, corruption=['gaussian_blur', 'gaussian_noise', 'shot_noise', 'brightness', 'contrast', 'jpeg_compression', 'pixelate']),
dict(type='RandomFlip', prob=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale = (448, 448),
flip=False,
transforms=[
dict(type='Pad', size_divisor=1),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'], meta_keys = ['filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'flip', 'flip_direction', 'img_norm_cfg']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/train',
ann_dir='annotations/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/val',
ann_dir='annotations/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/test',
ann_dir='annotations/test',
pipeline=test_pipeline))
# model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='CascadeEncoderDecoder',
num_stages=2,
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 1),
strides=(1, 2, 2, 2),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=4),
decode_head=[
dict(
type='FPNHead',
in_channels=[256, 256, 256, 256],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
dict(
type='PointHead',
in_channels=[256],
in_index=[0],
channels=256,
num_fcs=3,
coarse_pred_each_layer=True,
dropout_ratio=-1,
num_classes=2,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
],
# model training and testing settings
train_cfg=dict(
num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75),
test_cfg=dict(
mode='slide',
crop_size=(960, 960),
stride=(900, 900),
subdivision_steps=2,
subdivision_num_points=8196,
scale_factor=2))
|
import random
from .enums.difficulty import Difficulty
from .enums.goal import Goal
from .enums.logic import Logic
from .enums.enemizer import Enemizer
from .enums.start_location import StartLocation
class RandomizerData:
def __init__(self, seed: int = random.randint(0, 999999999),
difficulty: Difficulty = Difficulty.NORMAL, goal: Goal = Goal.DARK_GAIA,
logic: Logic = Logic.COMPLETABLE, statues: str = "4", enemizer: Enemizer = Enemizer.NONE,
start_location: StartLocation = StartLocation.SOUTH_CAPE, firebird: bool = False, ohko: bool = False,
red_jewel_madness: bool = False, allow_glitches: bool = False, boss_shuffle: bool = False,
open_mode: bool = False, overworld_shuffle: bool = False, dungeon_shuffle: bool = False):
self.seed = seed
self.difficulty = difficulty
self.start_location = start_location
self.goal = goal
self.logic = logic
self.statues = statues
self.enemizer = enemizer
self.firebird = firebird
self.ohko = ohko
self.red_jewel_madness = red_jewel_madness
self.allow_glitches = allow_glitches
self.boss_shuffle = boss_shuffle
self.overworld_shuffle = overworld_shuffle
self.dungeon_shuffle = dungeon_shuffle
self.open_mode = open_mode
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import traceback
from flask import request,jsonify, abort
from flask import current_app as app
from ..models.supply import Supply, PostCompany
from base_handler import BaseHandler, HandlerException
from data_packer import RequiredField, OptionalField, SelectorField, converter
from data_packer.checker import (
ReChecker
)
from ..constant import RESP_CODE, RESP_ERR_MSG
from app import db
SUPPLY_Name = RequiredField('supply_name', checker=ReChecker(ur'([\u4e00-\u9fa5]{2,30})'))
SUPPLY_Mobile = RequiredField('supply_mobile', checker=ReChecker(r'1[0-9]{10}'))
SUPPLY_Address = RequiredField('supply_address', checker=ReChecker(ur'([a-z0-9\u4e00-\u9fa5]{2,50})'))
OPTION_SUPPLY_id = OptionalField(src_name='supply_id',
dst_name='id',
converter=converter.TypeConverter(str),
checker=ReChecker(r'[0-9]{1,}'))
OPTION_SUPPLY_Name = OptionalField(src_name='supply_name',
dst_name='name',
checker=ReChecker(ur'([\u4e00-\u9fa5]{2,30})'))
OPTION_SUPPLY_Mobile = OptionalField(src_name='supply_mobile',
dst_name='mobile',
checker=ReChecker(r'1[0-9]{10}'))
OPTION_SUPPLY_Address = OptionalField(src_name='supply_address',
dst_name='address',
checker=ReChecker(ur'([a-z0-9\u4e00-\u9fa5]{2,30})'))
RELATION_SUPPLY_id = RequiredField(src_name='supply_id',
converter=converter.TypeConverter(str),
checker=ReChecker(r'[0-9]{1,}'))
RELATION_POST_id = RequiredField(src_name='post_id',
converter=converter.TypeConverter(str),
checker=ReChecker(r'[0-9]{1,}'))
class SupplyHandler(BaseHandler):
POST_FIELDS = [
SUPPLY_Name, SUPPLY_Mobile, SUPPLY_Address
]
GET_FIELDS = [SelectorField(
fields=[
OPTION_SUPPLY_id,
OPTION_SUPPLY_Name,
OPTION_SUPPLY_Mobile,
OPTION_SUPPLY_Address,
],
at_least=1,
)]
def get(self):
get_ret = self.handle(())
if get_ret:
return jsonify(get_ret)
def post(self):
post_ret = self.handle(())
if post_ret:
return jsonify(post_ret)
def _handle(self, *args, **kwargs):
params = self.parse_request_params()
if params is None:
return app.logger.info('func=parse_request_params | 没有正确解析参数')
app.logger.info('func=parse_request_params | parse_type={} | parse_params = {}'.format(type(params), params))
try:
if request.method == 'GET':
supply = Supply.query.filter_by(**params).first()
if supply:
return supply.model_to_dict(query_relation=True)
else:
raise HandlerException(respcd=RESP_CODE.DB_ERROR, respmsg=RESP_ERR_MSG.get(RESP_CODE.DB_ERROR))
elif request.method == 'POST':
# 插入
supply = Supply(name=params['supply_name'], mobile=params['supply_mobile'], address=params['supply_address'])
supply.save()
if supply.id:
return {'supply_id': supply.id}
else:
raise HandlerException(respcd=RESP_CODE.DB_ERROR, respmsg=RESP_ERR_MSG.get(RESP_CODE.DB_ERROR))
else:
abort(404)
except BaseException as e:
db.session.rollback()
raise e
class SupplySelectHandler(BaseHandler):
POST_FIELDS = [
RELATION_SUPPLY_id, RELATION_POST_id
]
def post(self):
post_ret = self.handle(())
if post_ret:
return jsonify(post_ret)
def _handle(self, *args, **kwargs):
params = self.parse_request_params()
if params is None:
return app.logger.info('func=parse_request_params | 没有正确解析参数')
app.logger.info('func=parse_request_params | parse_type={} | parse_params = {}'.format(type(params), params))
try:
supply = Supply.query.filter_by(id=params['supply_id']).first()
if supply is None:
raise HandlerException(respcd=RESP_CODE.DB_QUERY_NOT_FOUND,
respmsg=RESP_ERR_MSG.get(RESP_CODE.DB_QUERY_NOT_FOUND) + ' supply_id {}'.format(params['supply_id']))
app.logger.info('<Supply>DB query result: {}'.format(supply.model_to_dict(query_relation=False)))
post = PostCompany.query.filter_by(id=params['post_id']).first()
if post is None:
raise HandlerException(respcd=RESP_CODE.DB_ERROR,
respmsg=RESP_ERR_MSG.get(RESP_CODE.DB_QUERY_NOT_FOUND) + ' supply_id {}'.format(params['post_id']))
app.logger.info('<PostCompany>DB query result: {}'.format(post.model_to_dict(query_relation=False)))
post.supply_set.append(supply)
supply.save()
return self.request_finish(RESP_CODE.SUCCESS, RESP_ERR_MSG.get(RESP_CODE.SUCCESS, ''))
except BaseException as e:
db.session.rollback()
raise e
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import textworld
from textworld.challenges import cooking
def test_making_cooking_games():
options = textworld.GameOptions()
options.seeds = 1234
nb_ingredients = 2
settings = {
"recipe": nb_ingredients,
"take": 1,
"open": True,
"open": True,
"cook": True,
"cut": False,
"drop": False,
"go": 6,
"recipe_seed": 123,
"split": "valid"
}
game = cooking.make(settings, options)
assert len(game.metadata["ingredients"]) == nb_ingredients
# Change only the recipe.
options = textworld.GameOptions()
options.seeds = 1234
settings["recipe_seed"] = 321
game2 = cooking.make(settings, options)
# Recipe's ingredients should be different.
assert game.metadata["ingredients"] != game2.metadata["ingredients"]
assert game.metadata["entities"] == game2.metadata["entities"]
# The rest of the world should stay the same.
POSITIONNING_FACTS = ("in", "on", "at", "west_of", "east_of", "south_of", "north_of")
differing_facts = set(game.world.facts) - set(game2.world.facts)
assert [pred for pred in differing_facts if pred.name in POSITIONNING_FACTS] == []
# TODO: Check the game can be completed by following the walkthrough.
# agent = WalkthroughAgent(commands=game.metadata["walkthrough"])
|
from http import HTTPStatus
from lite_content.lite_exporter_frontend.core import RegisterAnOrganisation
def validate_register_organisation_triage(_, json):
errors = {}
if not json.get("type"):
errors["type"] = [RegisterAnOrganisation.CommercialOrIndividual.ERROR]
if not json.get("location"):
errors["location"] = [RegisterAnOrganisation.WhereIsYourOrganisationBased.ERROR]
if errors:
return {"errors": errors}, HTTPStatus.BAD_REQUEST
return json, HTTPStatus.OK
|
# -*- coding:utf-8 -*-
from __future__ import print_function, division
import sys
sys._running_pytest = True
from distutils.version import LooseVersion as V
import pytest
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetNotebookWorkspaceResult',
'AwaitableGetNotebookWorkspaceResult',
'get_notebook_workspace',
]
@pulumi.output_type
class GetNotebookWorkspaceResult:
"""
A notebook workspace resource
"""
def __init__(__self__, id=None, name=None, notebook_server_endpoint=None, status=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notebook_server_endpoint and not isinstance(notebook_server_endpoint, str):
raise TypeError("Expected argument 'notebook_server_endpoint' to be a str")
pulumi.set(__self__, "notebook_server_endpoint", notebook_server_endpoint)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the database account.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notebookServerEndpoint")
def notebook_server_endpoint(self) -> str:
"""
Specifies the endpoint of Notebook server.
"""
return pulumi.get(self, "notebook_server_endpoint")
@property
@pulumi.getter
def status(self) -> str:
"""
Status of the notebook workspace. Possible values are: Creating, Online, Deleting, Failed, Updating.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetNotebookWorkspaceResult(GetNotebookWorkspaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNotebookWorkspaceResult(
id=self.id,
name=self.name,
notebook_server_endpoint=self.notebook_server_endpoint,
status=self.status,
type=self.type)
def get_notebook_workspace(account_name: Optional[str] = None,
notebook_workspace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNotebookWorkspaceResult:
"""
A notebook workspace resource
:param str account_name: Cosmos DB database account name.
:param str notebook_workspace_name: The name of the notebook workspace resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['notebookWorkspaceName'] = notebook_workspace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20190801:getNotebookWorkspace', __args__, opts=opts, typ=GetNotebookWorkspaceResult).value
return AwaitableGetNotebookWorkspaceResult(
id=__ret__.id,
name=__ret__.name,
notebook_server_endpoint=__ret__.notebook_server_endpoint,
status=__ret__.status,
type=__ret__.type)
|
import requests
URL = "https://ims_api.supppee.workers.dev/api/coord"
r = requests.post(url = URL)
data = r.json()
print(data)
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
def dualcamnet_v2(inputs,
keep_prob=0.5,
is_training=True,
num_classes=None,
num_frames=12,
num_channels=12,
spatial_squeeze=False, scope='DualCamNet'):
"""
Builds a DualCamNet network for classification using a 3D temporal convolutional layer with 7x1x1 filters.
"""
with tf.variable_scope(scope, 'DualCamNet', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for convolution2d and max_pool2d
with slim.arg_scope([slim.layers.conv3d, slim.layers.conv2d, slim.layers.max_pool2d],
outputs_collections=[end_points_collection]):
# ----------- 1st layer group ---------------
net = tf.reshape(inputs, shape=(-1, num_frames, 36, 48, num_channels))
net = slim.conv3d(net, num_channels, [7, 1, 1], scope='conv1', padding='SAME')
net = tf.reshape(net, shape=(-1, 36, 48, num_channels))
# ----------- 2nd layer group ---------------
net = slim.conv2d(net, 32, [5, 5], scope='conv2', padding='SAME')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
# ----------- 3rd layer group ---------------
net = slim.conv2d(net, 64, [5, 5], scope='conv3', padding='SAME')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
if num_classes is None:
# ----------- 4th layer group ---------------
net = slim.conv2d(net, 1024, [9, 12], scope='full1', padding='VALID')
else:
# ----------- 4th layer group ---------------
net = slim.conv2d(net, 1024, [9, 12], scope='full1', padding='VALID')
net = slim.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='drop1')
# ----------- 5th layer group ---------------
net = slim.conv2d(net, 1000, 1, scope='full2')
# ----------- 6th layer group ---------------
net = slim.conv2d(net, num_classes, 1, scope='full3')
# Convert end_points_collection into a end_point dictionary
end_points = slim.layers.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='full3/squeezed')
end_points[sc.name + '/full3'] = net
return net, end_points
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 7 10:10:55 2018
@author: David
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
#%% Print program header
def print_header():
print("==============================================================================================")
print("description :NN time series sandbox scripts.")
print("author :David Beam, github db4ai")
print("date :20110930")
print("python version :3.6.3 (v3.6.3:2c5fed8, Oct 3 2017, 18:11:49) [MSC v.1900 64 bit (AMD64)]")
print("tensorflow version :1.4.0")
print("notes :")
print("==============================================================================================")
#%% Create NARX datasets
"""
Creates a Nonlinear Auto-Regressive w/ Exogenous Input style dataset
Defined by the number of delayed inputs and the number of delayed outptus
"""
def create_NARX_dataset(input, output, numDelayedInputs, numDelayedOutputs):
# Calculate the sizes of the data
numInputs = input.shape[1]
numOutputs = output.shape[1]
length = input.shape[0] - max(numDelayedInputs,numDelayedOutputs)
width = ((numDelayedInputs + 1)*numInputs) + (numDelayedOutputs*numOutputs)
# Placeholder to hold the dataset
x_input_NARX = np.zeros((length, width) , dtype=np.float32)
# Loop through all the inputs
for i in range(max(0,max(numDelayedInputs+1,numDelayedOutputs)-1), input.shape[0]):
# Append delayed inputs to the row
temp_row = input[i,:]
for j in range(1,numDelayedInputs+1):
temp_row = np.concatenate([temp_row, input[i-j]])
# Append delayed outputs to the row
for j in range(0,numDelayedOutputs):
temp_row = np.concatenate([temp_row, output[i-j,:]], axis=0)
x_input_NARX[i-max(numDelayedInputs+1,numDelayedOutputs),:] = temp_row
return x_input_NARX
#%% Split the data into training and testing datasets
"""
Takes the full dataset and splits it into two sets defined by the testing data size and the training data size
"""
def split_data(series,training,testing):
testing = series[-testing:] #split off the testing data
training = series[0:training] #split off the training data
return training,testing
#%% Split data into testing and training sets
"""
Uses the split_data function to split the required datasets into training and testing sets
"""
def split_data_into_sets(x1,y_Realtime,y_Interval,y_Interval_Interpolated,x2,x2_Interval,trainingSamples,testingSamples,batchSize):
molW_training,molW_test = split_data(x1,trainingSamples,testingSamples)
y_Realtime_training,y_Realtime_test = split_data(y_Realtime,trainingSamples,testingSamples)
y_Interval_training,y_Interval_test = split_data(y_Interval,trainingSamples,testingSamples)
y_Interval_Interpolated_training,y_Interval_Interpolated_test = split_data(y_Interval_Interpolated,trainingSamples,testingSamples)
x2_training,x2_test = split_data(x2,trainingSamples,testingSamples)
x2_Interval_training,x2_Interval_test = split_data(x2_Interval,trainingSamples,testingSamples)
return molW_training,molW_test,y_Realtime_training,y_Realtime_test,y_Interval_training,y_Interval_test,y_Interval_Interpolated_training, \
y_Interval_Interpolated_test,x2_training,x2_test,x2_Interval_training,x2_Interval_test
#%% Create datasets from the batches
"""
Take a 2d array and reshapes it into a 3d array with the first dimension being the batch number
[batch_size, time-step, sample]
"""
def make_batches(series,samples):
data = series[:(len(series)-(len(series) % samples))] #trim off extra to ensure equal size batches
batches = data.reshape(-1, samples, series.shape[1]) #form batches
return batches
#%% Import the data and separate into batches
def split_data_into_batches(x1,y_Realtime,y_Interval,y_Interval_Interpolated,x2,x2_Interval,trainingSamples,testingSamples,batchSize,numInputDelays,numOutputDelays):
# Split the datasets into testing and training
x1_training,x1_test, \
y_Realtime_training,y_Realtime_test, \
y_Interval_training,y_Interval_test, \
y_Interval_Interpolated_training,y_Interval_Interpolated_test, \
x2_training,x2_test, \
x2_Interval_training,x2_Interval_test = \
split_data_into_sets(x1,y_Realtime,y_Interval,y_Interval_Interpolated,x2,x2_Interval,trainingSamples,testingSamples,batchSize)
# Create the input dataset for the NN model
# This code uses only x2 as the input
x_input = x2_training
x_test = x2_test
# This code combines x1 and x2 into a single x array
#x_input = np.concatenate((x1_training, x2_training), axis=1)
#x_test = np.concatenate((x1_test, x2_test), axis=1)
# Create the input dataset for the NARX model
x_input_NARX = create_NARX_dataset(x_input, y_Realtime_training,numInputDelays,numOutputDelays)
# Create batches for the NN model
x_input_batches = make_batches(x_input, batchSize)
x_test_batches = make_batches(x_test, batchSize)
x_input_NARX_batches = make_batches(x_input, batchSize)
x_testNARX__batches = make_batches(x_test, batchSize)
y_Realtime_training_batches = make_batches(y_Realtime_training, batchSize)
y_Realtime_testing_batches = make_batches(y_Realtime_test, batchSize)
y_Interval_training_batches = make_batches(y_Interval_training, batchSize)
y_Interval_testing_batches = make_batches(y_Interval_test, batchSize)
y_Interpolate_training_batches = make_batches(y_Interval_Interpolated_training, batchSize)
y_Interpolate_testing_batches = make_batches(y_Interval_Interpolated_test, batchSize)
x_input_NARX_batches = make_batches(x_input_NARX, batchSize)
return x_input_batches,x_test_batches,x_input_NARX_batches,y_Realtime_training_batches,y_Realtime_testing_batches,y_Interval_training_batches, y_Interval_testing_batches,y_Interpolate_training_batches, y_Interpolate_testing_batches
#%% Plot results
def plot_test_data(gas_sample, actual, predict):
plt.title("Forecast vs Actual, gas " + str(gas_sample), fontsize=14)
plt.plot(pd.Series(np.ravel(actual[:,gas_sample])), "bo", markersize=1, label="Actual")
plt.plot(pd.Series(np.ravel(predict[:,gas_sample])), "r.", markersize=1, label="Forecast")
plt.legend(loc="upper left")
plt.xlabel("Time Periods")
plt.show()
#%% TensorBoard summaries for a given variable
def variable_summaries(var):
#tf.summary.scalar('value',var)
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from typing import Dict
from typing import Text
import paddle
from paddle.optimizer import Optimizer
from paddle.regularizer import L2Decay
from deepspeech.training.gradclip import ClipGradByGlobalNormWithLog
from deepspeech.utils.dynamic_import import dynamic_import
from deepspeech.utils.dynamic_import import instance_class
from deepspeech.utils.log import Log
__all__ = ["OptimizerFactory"]
logger = Log(__name__).getlog()
OPTIMIZER_DICT = {
"sgd": "paddle.optimizer:SGD",
"momentum": "paddle.optimizer:Momentum",
"adadelta": "paddle.optimizer:Adadelta",
"adam": "paddle.optimizer:Adam",
"adamw": "paddle.optimizer:AdamW",
}
def register_optimizer(cls):
"""Register optimizer."""
alias = cls.__name__.lower()
OPTIMIZER_DICT[cls.__name__.lower()] = cls.__module__ + ":" + cls.__name__
return cls
@register_optimizer
class Noam(paddle.optimizer.Adam):
"""Seem to: espnet/nets/pytorch_backend/transformer/optimizer.py """
def __init__(self,
learning_rate=0,
beta1=0.9,
beta2=0.98,
epsilon=1e-9,
parameters=None,
weight_decay=None,
grad_clip=None,
lazy_mode=False,
multi_precision=False,
name=None):
super().__init__(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
parameters=parameters,
weight_decay=weight_decay,
grad_clip=grad_clip,
lazy_mode=lazy_mode,
multi_precision=multi_precision,
name=name)
def __repr__(self):
echo = f"<{self.__class__.__module__}.{self.__class__.__name__} object at {hex(id(self))}> "
echo += f"learning_rate: {self._learning_rate}, "
echo += f"(beta1: {self._beta1} beta2: {self._beta2}), "
echo += f"epsilon: {self._epsilon}"
def dynamic_import_optimizer(module):
"""Import Optimizer class dynamically.
Args:
module (str): module_name:class_name or alias in `OPTIMIZER_DICT`
Returns:
type: Optimizer class
"""
module_class = dynamic_import(module, OPTIMIZER_DICT)
assert issubclass(module_class,
Optimizer), f"{module} does not implement Optimizer"
return module_class
class OptimizerFactory():
@classmethod
def from_args(cls, name: str, args: Dict[Text, Any]):
assert "parameters" in args, "parameters not in args."
assert "learning_rate" in args, "learning_rate not in args."
grad_clip = ClipGradByGlobalNormWithLog(
args['grad_clip']) if "grad_clip" in args else None
weight_decay = L2Decay(
args['weight_decay']) if "weight_decay" in args else None
if weight_decay:
logger.info(f'<WeightDecay - {weight_decay}>')
if grad_clip:
logger.info(f'<GradClip - {grad_clip}>')
module_class = dynamic_import_optimizer(name.lower())
args.update({"grad_clip": grad_clip, "weight_decay": weight_decay})
opt = instance_class(module_class, args)
if "__repr__" in vars(opt):
logger.info(f"{opt}")
else:
logger.info(
f"<Optimizer {module_class.__module__}.{module_class.__name__}> LR: {args['learning_rate']}"
)
return opt
|
"""Script to build, sign and notarize the macOS RDMnet binary package."""
import datetime
import os
import re
import subprocess
import sys
import time
# This script requires an unlocked keychain on the keychain search path with the ETC macOS identities pre-installed.
# It is mostly run in the environment of Azure Pipelines CI, where those prerequisites are met.
MACOS_APPLICATION_SIGNING_IDENTITY = os.getenv(
"MACOS_APPLICATION_SIGNING_IDENTITY",
"Developer ID Application: Electronic Theatre Controls, Inc. (8AVSFD7ZED)",
)
MACOS_INSTALLER_SIGNING_IDENTITY = os.getenv(
"MACOS_INSTALLER_SIGNING_IDENTITY",
"Developer ID Installer: Electronic Theatre Controls, Inc. (8AVSFD7ZED)",
)
PACKAGE_BUNDLE_ID = "com.etcconnect.pkg.RDMnet"
DEVEL_ID_USERNAME = os.getenv("RDMNET_APPLE_DEVELOPER_ID_USER")
DEVEL_ID_PASSWORD = os.getenv("RDMNET_APPLE_DEVELOPER_ID_PW")
if not DEVEL_ID_USERNAME or not DEVEL_ID_PASSWORD:
print("Couldn't get credentials to notarize application!")
sys.exit(1)
###############################################################################
# Codesign
###############################################################################
print("Codesigning binaries...")
subprocess.run(
[
"codesign",
"--force",
"--sign",
f"{MACOS_APPLICATION_SIGNING_IDENTITY}",
"--deep",
"--timestamp",
"-o",
"runtime",
"build/install/RDMnet Controller Example.app",
],
check=True,
)
subprocess.run(
[
"codesign",
"--force",
"--sign",
f"{MACOS_APPLICATION_SIGNING_IDENTITY}",
"--timestamp",
"-o",
"runtime",
"build/install/bin/rdmnet_broker_example",
],
check=True,
)
subprocess.run(
[
"codesign",
"--force",
"--sign",
f"{MACOS_APPLICATION_SIGNING_IDENTITY}",
"--timestamp",
"-o",
"runtime",
"build/install/bin/rdmnet_device_example",
],
check=True,
)
subprocess.run(
[
"codesign",
"--force",
"--sign",
f"{MACOS_APPLICATION_SIGNING_IDENTITY}",
"--timestamp",
"-o",
"runtime",
"build/install/bin/llrp_manager_example",
],
check=True,
)
###############################################################################
# Build and sign pkg
###############################################################################
print("Building installer package...")
subprocess.run(["packagesbuild", "tools/install/macos/RDMnet.pkgproj"], check=True)
print("Signing installer package...")
subprocess.run(
[
"productsign",
"--sign",
f"{MACOS_INSTALLER_SIGNING_IDENTITY}",
"tools/install/macos/build/RDMnet.pkg",
"RDMnet.pkg",
],
check=True,
)
subprocess.run(["pkgutil", "--check-signature", "RDMnet.pkg"], check=True)
###############################################################################
# Notarize pkg
###############################################################################
print("Notarizing...")
notarize_result = subprocess.run(
[
"xcrun",
"altool",
"--notarize-app",
"--primary-bundle-id",
f"{PACKAGE_BUNDLE_ID}",
"--username",
f"{DEVEL_ID_USERNAME}",
"--password",
f"{DEVEL_ID_PASSWORD}",
"--file",
"RDMnet.pkg",
],
capture_output=True,
encoding="utf-8",
)
print(notarize_result.stdout)
if notarize_result.returncode != 0:
sys.exit(1)
notarize_uuid = re.search(
r"[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}",
notarize_result.stdout,
)
if not notarize_uuid:
print("UUID not found in notarization output: {}".format(notarize_result.stdout))
sys.exit(1)
notarize_uuid = notarize_uuid.group(0)
# check the status of the notarization process
# Slightly delay getting the notarization status to give apple servers time to update their
# status of our pkg upload. If we don't do this, we'll get a UUID not found error since we're
# requesting the status of our pkg a tad bit too fast right after uploading.
time.sleep(5)
# Check notarization status every 30 seconds for up to 20 minutes
for half_minutes in range(0, 40):
time_str = datetime.time(
minute=int(half_minutes / 2), second=int((half_minutes % 2) * 30)
).strftime("%M:%S")
print(
f"Checking notarization request UUID: {notarize_uuid} at {time_str} since notarization upload..."
)
notarize_status = subprocess.run(
[
"xcrun",
"altool",
"--notarization-info",
f"{notarize_uuid}",
"-u",
f"{DEVEL_ID_USERNAME}",
"-p",
f"{DEVEL_ID_PASSWORD}",
],
capture_output=True,
encoding="utf-8",
)
print(notarize_status.stdout)
if notarize_status.returncode != 0:
sys.exit(1)
notarize_status_str = None
for line in notarize_status.stdout.splitlines():
notarize_status_str = re.search("Status: (.+)", line)
if notarize_status_str:
notarize_status_str = notarize_status_str.group(1).strip()
break
if notarize_status_str:
print("Got notarization status: '{}'".format(notarize_status_str))
else:
print(
"Notarization status not found in status request output: {}".format(
notarize_status.stdout
)
)
sys.exit(1)
if notarize_status_str == "success":
# staple the ticket onto the notarized pkg
print("Stapling ticket to pkg file...")
staple_result = subprocess.run(
["xcrun", "stapler", "staple", "RDMnet.pkg"],
capture_output=True,
encoding="utf-8",
)
staple_result.check_returncode()
print(staple_result.stdout)
if re.search("The staple and validate action worked!", staple_result.stdout):
print("Done.")
sys.exit(0)
else:
print("Unknown ticket staple status. Notarization failed.")
sys.exit(1)
time.sleep(30)
# If we got here, the notarization was not approved; error.
print("Unable to obtain confirmation of notarization approval.")
sys.exit(1)
|
from . import api
from .version import (
version,
version_info,
__version__
)
__all__ = (
"api",
"__version__",
"version",
"version_info",
)
|
import csv
import os
import pickle
from math import sqrt
from typing import Dict
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from arm_prosthesis.services.myoelectronics.preprocessing.feture_extactor import FeatureExtractor
gestures_signals_names = {
0: 'clenching',
1: 'sharp_flexion'
}
class ClassificationTraining:
def __init__(self):
pass
def train(self, tagged_signals: Dict[int, str], output_model_path: str):
features_inputs = []
output = []
for signal_tag in tagged_signals:
signal_dir_path = tagged_signals[signal_tag]
for file_name in os.listdir(signal_dir_path):
# get the original signal
signal_data = self.extract_signal_from_csv(signal_dir_path + file_name)
# get mav feature from signal
mav_features = FeatureExtractor.extract_mav(signal_data)
features_inputs.append(mav_features)
output.append(signal_tag)
# split the sample into test and training
features_train, features_test, output_train, output_test = train_test_split(
features_inputs, output, test_size=0.2, random_state=12345
)
# create and train model
knn_model = KNeighborsRegressor(n_neighbors=2)
knn_model.fit(features_train, output_train)
# test model
# Get of Root Mean Square Error (RMSE)
print("Train RMSE:" + str(ClassificationTraining.calculate_rmse(knn_model, features_train, output_train)))
print("Test RMSE:" + str(ClassificationTraining.calculate_rmse(knn_model, features_test, output_test)))
# Save model
with open(output_model_path, 'wb') as knn_file:
pickle.dump(knn_model, knn_file)
@staticmethod
def calculate_rmse(knn_model, input_model, output_model):
train_predicts = knn_model.predict(input_model)
mse = mean_squared_error(output_model, train_predicts)
rmse = sqrt(mse)
return rmse
@staticmethod
def extract_signal_from_csv(file_signal_path: str):
signal = []
with open(file_signal_path, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=';')
next(reader, None) # skip the headers
for row in reader:
signal.append(int(row[1]))
return signal
if __name__ == '__main__':
path_to_gestures = '//home/pi/arm-prosthesis/data/gestures/training/'
model_path = '//home/pi/arm-prosthesis-bin/knn_model'
tagged_gestures = {}
for gesture_id in gestures_signals_names:
gesture_name = gestures_signals_names[gesture_id]
gesture_path = path_to_gestures + gesture_name + '/'
tagged_gestures[gesture_id] = gesture_path
classification_trainer = ClassificationTraining()
classification_trainer.train(tagged_gestures, model_path)
|
from graphql import GraphQLArgument as Argument
from graphql import GraphQLBoolean as Boolean
from graphql import GraphQLField as Field
from graphql import GraphQLNonNull as NonNull
from graphql import GraphQLResolveInfo as ResolveInfo
from graphql import GraphQLString as String
from scrapqd.gql import constants as const
from scrapqd.gql.helper import get_key, with_error_traceback
from scrapqd.gql.parser import Parser
from scrapqd.gql.scalar.generic import GenericScalar
@with_error_traceback
def resolve_link(parser: Parser, info: ResolveInfo,
xpath, base_url=None, multi=const.MULTI_DEFAULT_VALUE):
"""Extracts anchor node's href attribute, forms absolute url from the response and returns to client.
If the base url is given, it will use that to form the absolute url. Xpath expected to be anchor tag.
:param parser: Parser instance passed down from parent query.
:param info: GraphQLResolveInfo instance which gives resolver information.
:param xpath: Expected to be an anchor <a> node.
:param base_url: Custom base url to form absolute url.
:param multi: It is set to False as default.
- True - Process multiple elements when xpath locates multiple nodes.
- False - Process first element when xpath locates multiple nodes.
:return: Text - When multi is set to False, This option can be overridden to return list with single value.
using `NON_MULTI_RESULT_LIST`
List - When multi is set to True
"""
if base_url is None:
base_url = parser.headers.get("response_url")
key = get_key(info)
result = parser.solve_link(key, base_url=base_url, multi=multi, xpath=xpath)
result = parser.get_multi_results(multi, result)
parser.caching(key, result)
return result
link = Field(GenericScalar,
args={
"xpath": Argument(NonNull(String), description=const.xpath_desc),
"multi": Argument(Boolean, description=const.multi_desc),
"base_url": Argument(String, description=const.link_base_url_desc),
},
resolve=resolve_link,
description="Extracts href attribute from anchor <a> tag. If the base_url argument is give, "
"it will form the absolute url. Xpath expected to be anchor tag.")
|
"""
Estimate optical flow on standard test datasets.
Use this script to generate the predictions to be submitted to the benchmark website.
Note that this script will only generate the flow files in the format specified by the respective benchmark. However,
additional steps may be necessary before submitting. For example, for the MPI-Sintel benchmark you need to download and run the
official bundler on the results generated by this script.
"""
# =============================================================================
# Copyright 2021 Henrique Morimitsu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import logging
import sys
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import Any, Dict, Optional
import cv2 as cv
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from ptlflow import get_model, get_model_reference
from ptlflow.models.base_model.base_model import BaseModel
from ptlflow.utils import flow_utils
from ptlflow.utils.io_adapter import IOAdapter
from ptlflow.utils.utils import (
add_datasets_to_parser, config_logging, get_list_of_available_models_list, tensor_dict_to_numpy)
config_logging()
def _init_parser() -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument(
'model', type=str, choices=get_list_of_available_models_list(),
help='Name of the model to use.')
parser.add_argument(
'--output_path', type=str, default=str(Path('outputs/test')),
help='Path to the directory where the validation results will be saved.')
parser.add_argument(
'--show', action='store_true',
help='If set, the results are shown on the screen.')
parser.add_argument(
'--max_forward_side', type=int, default=None,
help=('If max(height, width) of the input image is larger than this value, then the image is downscaled '
'before the forward and the outputs are bilinearly upscaled to the original resolution.'))
parser.add_argument(
'--max_show_side', type=int, default=1000,
help=('If max(height, width) of the output image is larger than this value, then the image is downscaled '
'before showing it on the screen.'))
return parser
def generate_outputs(
args: Namespace,
inputs: Dict[str, torch.Tensor],
preds: Dict[str, torch.Tensor],
dataloader_name: str,
batch_idx: int,
metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Display on screen and/or save outputs to disk, if required.
Parameters
----------
args : Namespace
The arguments with the required values to manage the outputs.
inputs : Dict[str, torch.Tensor]
The inputs loaded from the dataset (images, groundtruth).
preds : Dict[str, torch.Tensor]
The model predictions (optical flow and others).
dataloader_name : str
A string to identify from which dataloader these inputs came from.
batch_idx : int
Indicates in which position of the loader this input is.
metadata : Dict[str, Any], optional
Metadata about this input, if available.
"""
inputs = tensor_dict_to_numpy(inputs)
preds = tensor_dict_to_numpy(preds)
preds['flows_viz'] = flow_utils.flow_to_rgb(preds['flows'])[:, :, ::-1]
if preds.get('flows_b') is not None:
preds['flows_b_viz'] = flow_utils.flow_to_rgb(preds['flows_b'])[:, :, ::-1]
_write_to_file(args, preds, dataloader_name, batch_idx, metadata)
if args.show:
_show(inputs, preds, args.max_show_side)
def test(
args: Namespace,
model: BaseModel
) -> None:
"""Run predictions on the test dataset.
Parameters
----------
args : Namespace
Arguments to configure the model and the test.
model : BaseModel
The model to be used for testing.
See Also
--------
ptlflow.models.base_model.base_model.BaseModel : The parent class of the available models.
"""
model.eval()
if torch.cuda.is_available():
model = model.cuda()
dataloaders = model.test_dataloader()
dataloaders = {model.test_dataloader_names[i]: dataloaders[i] for i in range(len(dataloaders))}
for dataset_name, dl in dataloaders.items():
test_one_dataloader(args, model, dl, dataset_name)
@torch.no_grad()
def test_one_dataloader(
args: Namespace,
model: BaseModel,
dataloader: DataLoader,
dataloader_name: str,
) -> None:
"""Perform predictions for all examples of one test dataloader.
Parameters
----------
args : Namespace
Arguments to configure the model and the validation.
model : BaseModel
The model to be used for validation.
dataloader : DataLoader
The dataloader for the validation.
dataloader_name : str
A string to identify this dataloader.
"""
for i, inputs in enumerate(tqdm(dataloader)):
scale_factor = (
None if args.max_forward_side is None else float(args.max_forward_side) / min(inputs['images'].shape[-2:]))
io_adapter = IOAdapter(
model, inputs['images'].shape[-2:], target_scale_factor=scale_factor, cuda=torch.cuda.is_available())
inputs = io_adapter.prepare_inputs(inputs=inputs)
preds = model(inputs)
inputs = io_adapter.unpad_and_unscale(inputs)
preds = io_adapter.unpad_and_unscale(preds)
generate_outputs(args, inputs, preds, dataloader_name, i, inputs.get('meta'))
def _show(
inputs: Dict[str, np.ndarray],
preds: Dict[str, np.ndarray],
max_show_side: int
) -> None:
for k, v in inputs.items():
if isinstance(v, np.ndarray) and (len(v.shape) == 2 or v.shape[2] == 1 or v.shape[2] == 3):
if max(v.shape[:2]) > max_show_side:
scale_factor = float(max_show_side) / max(v.shape[:2])
v = cv.resize(v, (int(scale_factor*v.shape[1]), int(scale_factor*v.shape[0])))
cv.imshow(k, v)
for k, v in preds.items():
if isinstance(v, np.ndarray) and (len(v.shape) == 2 or v.shape[2] == 1 or v.shape[2] == 3):
if max(v.shape[:2]) > max_show_side:
scale_factor = float(max_show_side) / max(v.shape[:2])
v = cv.resize(v, (int(scale_factor*v.shape[1]), int(scale_factor*v.shape[0])))
cv.imshow('pred_'+k, v)
cv.waitKey(1)
def _write_to_file(
args: Namespace,
preds: Dict[str, np.ndarray],
dataloader_name: str,
batch_idx: int,
metadata: Optional[Dict[str, Any]] = None
) -> None:
out_root_dir = Path(args.output_path)
dataloader_tokens = dataloader_name.split('-')
if dataloader_tokens[0] == 'kitti':
out_root_dir /= f'{dataloader_tokens[0]}{dataloader_tokens[1]}'
flow_ext = 'png'
elif dataloader_tokens[0] == 'sintel':
out_root_dir = out_root_dir / dataloader_tokens[0] / dataloader_tokens[1]
flow_ext = 'flo'
extra_dirs = ''
if metadata is not None:
img_path = Path(metadata['image_paths'][0][0])
image_name = img_path.stem
if 'sintel' in dataloader_name:
seq_name = img_path.parts[-2]
extra_dirs = seq_name
else:
image_name = f'{batch_idx:08d}'
out_dir = out_root_dir / extra_dirs
out_dir.mkdir(parents=True, exist_ok=True)
flow_utils.flow_write(out_dir / f'{image_name}.{flow_ext}', preds['flows'])
if __name__ == '__main__':
parser = _init_parser()
# TODO: It is ugly that the model has to be gotten from the argv rather than the argparser.
# However, I do not see another way, since the argparser requires the model to load some of the args.
FlowModel = None
if len(sys.argv) > 1 and sys.argv[1] not in ['-h', '--help']:
FlowModel = get_model_reference(sys.argv[1])
parser = FlowModel.add_model_specific_args(parser)
add_datasets_to_parser(parser, 'datasets.yml')
args = parser.parse_args()
logging.info('The outputs will be saved to %s.', args.output_path)
model_id = args.model
if args.pretrained_ckpt is not None:
model_id += f'_{args.pretrained_ckpt}'
args.output_path = Path(args.output_path) / model_id
model = get_model(sys.argv[1], args.pretrained_ckpt, args)
args.output_path.mkdir(parents=True, exist_ok=True)
metrics_df = test(args, model)
|
from application import db
from sqlalchemy import text
# Tulos-liitostaulun luominen
class Tulos(db.Model):
__tablename__='liitostaulu'
id = db.Column(db.Integer, primary_key=True)
sijoitus = db.Column(db.Integer, nullable=False)
pisteet = db.Column(db.Integer, nullable=False)
kilpailu_id = db.Column(db.Integer, db.ForeignKey('kilpailu.id'), nullable=False)
kilpailija_id = db.Column(db.Integer, db.ForeignKey('kilpailija.id'), nullable=False)
def __init__(self, sijoitus, pisteet, kilpailu_id, kilpailija_id):
self.sijoitus = sijoitus
self.pisteet = pisteet
self.kilpailu_id = kilpailu_id
self.kilpailija_id = kilpailija_id
def get_id(self):
return self.id
def get_kilpailuId(self):
return self.kilpailu_id
def get_kilpailijaId(self):
return self.kilpailija_id
# Metodi kilpailukohtaisten kilpailijatulosten hakemiseen Kilpailu.id:n avulla
@staticmethod
def kilpailunTulokset(tarkasteltavaKilpailuId):
stmt = text("SELECT liitostaulu.sijoitus, Kilpailija.sailnumber, Kilpailija.name, Kilpailija.sailclub,"
" liitostaulu.pisteet, liitostaulu.id FROM liitostaulu"
" JOIN Kilpailija ON Kilpailija.id = liitostaulu.kilpailija_id"
" JOIN Kilpailu ON Kilpailu.id = liitostaulu.kilpailu_id"
" WHERE liitostaulu.kilpailu_id = :iidee"
" ORDER BY liitostaulu.sijoitus")
res = db.engine.execute(stmt, iidee=tarkasteltavaKilpailuId)
response = []
for row in res:
response.append({"sijoitus":row[0],"purjenumero":row[1],"nimi":row[2],"pursiseura":row[3],"pisteet":row[4],"id":row[5]})
return response
# Metodi tietyn kilpailun tietojen hakua varten
@staticmethod
def kilpailunTiedot(tarkasteltavaKilpailuId):
stmt = text ("SELECT Luokka.name, Kilpailu.name, Kilpailu.venue FROM Kilpailu"
" JOIN Luokka ON Luokka.id = Kilpailu.luokka_id"
" WHERE Kilpailu.id = :iidee")
res = db.engine.execute(stmt, iidee=tarkasteltavaKilpailuId)
response = []
for row in res:
response.append({"luokka":row[0],"kilpnimi":row[1],"kilpaikka":row[2]})
return response
|
"""
Created on May 21, 2020
@author: Parker Fagrelius
start server with the following command:
bokeh serve --show OS_Report
view at: http://localhost:5006/OS_Report
"""
import os, sys
import time
import pandas as pd
import subprocess
from bokeh.io import curdoc
from bokeh.plotting import save
from bokeh.models import TextInput, ColumnDataSource, Button, TextAreaInput, Select
from bokeh.models.widgets.markups import Div
from bokeh.models.widgets.tables import DataTable, TableColumn, NumberEditor, StringEditor, PercentEditor
from bokeh.layouts import layout, column, row
from bokeh.models.widgets import Panel, Tabs
from bokeh.themes import built_in_themes
from bokeh.models import CustomJS
sys.path.append(os.getcwd())
sys.path.append('./ECLAPI-8.0.12/lib')
#os.environ["NL_DIR"] = "/n/home/desiobserver/parkerf/desilo/nightlogs" #"/Users/pfagrelius/Research/DESI/Operations/NightLog/nightlogs"
import nightlog as nl
from report import Report
# sys.stdout = open(os.environ['NL_DIR']+'/out.txt', 'a')
# print('test')
class OS_Report(Report):
def __init__(self):
Report.__init__(self, 'OS')
self.title = Div(text="DESI Nightly Intake - Operating Scientist", css_classes=['h1-title-style'], width=1000)# width=800, style={'font-size':'24pt','font-style':'bold'})
desc = """
The Operating Scientist (OS) is responsible for initializing the Night Log. Connect to an existing Night Log using the date or initialize tonight's log.
Throughout the night, enter information about the exposures, weather, and problems. Complete the OS Checklist at least once every hour.
"""
self.instructions = Div(text=desc+self.time_note.text, css_classes=['inst-style'], width=500)
self.line = Div(text='-----------------------------------------------------------------------------------------------------------------------------', width=1000)
self.line2 = Div(text='-----------------------------------------------------------------------------------------------------------------------------', width=1000)
self.init_bt = Button(label="Initialize Tonight's Log", css_classes=['init_button'])
self.LO = Select(title='Lead Observer', value='Choose One', options=self.lo_names)
self.OA = Select(title='Observing Assistant', value='Choose One', options=self.oa_names)
self.page_logo = Div(text="<img src='OS_Report/static/logo.png'>", width=350, height=300)
self.contributer_list = TextAreaInput(placeholder='Contributer names (include all)', rows=2, cols=3, title='Names of all Contributers')
self.contributer_btn = Button(label='Update Contributer List', css_classes=['add_button'], width=300)
self.connect_hdr = Div(text="Connect to Existing Night Log", css_classes=['subt-style'], width=800)
self.init_hdr = Div(text="Initialize Tonight's Night Log", css_classes=['subt-style'], width=800)
self.check_subtitle = Div(text="OS Checklist", css_classes=['subt-style'])
self.checklist_inst = Div(text="Every hour, the OS is expected to monitor several things. After completing these tasks, record at what time they were completed. Be honest please!", css_classes=['inst-style'], width=1000)
self.checklist.labels = ["Did you check the weather?", "Did you check the guiding?", "Did you check the positioner temperatures?","Did you check the FXC?", "Did you check the Cryostat?", "Did you do a connectivity aliveness check?","Did you check the Spectrograph Chiller?"]
self.nl_submit_btn = Button(label='Submit NightLog & Publish Nightsum', width=300, css_classes=['add_button'])
self.header_options = ['Startup','Calibration (Arcs/Twilight)','Focus','Observation','Other Acquisition','Comment']
def plan_tab(self):
self.plan_subtitle = Div(text="Night Plan", css_classes=['subt-style'])
self.plan_inst = Div(text="Input the major elements of the Night Plan found at the link below in the order expected for their completion.", css_classes=['inst-style'], width=1000)
self.plan_txt = Div(text='<a href="https://desi.lbl.gov/trac/wiki/DESIOperations/ObservingPlans/">Tonights Plan Here</a>', css_classes=['inst-style'], width=500)
self.plan_order = TextInput(title ='Expected Order:', placeholder='1', value=None)
self.plan_input = TextAreaInput(placeholder="description", rows=8, cols=3, title="Describe item of the night plan:",max_length=5000)
self.plan_btn = Button(label='Add', css_classes=['add_button'])
self.plan_alert = Div(text=' ', css_classes=['alert-style'])
def milestone_tab(self):
self.milestone_subtitle = Div(text="Milestones & Major Accomplishments", css_classes=['subt-style'])
self.milestone_inst = Div(text="Record any major milestones or accomplishments that occur throughout a night and the exposure numbers that correspond to it. If applicable, indicate the ID of exposures to ignore in a series.", css_classes=['inst-style'],width=1000)
self.milestone_input = TextAreaInput(placeholder="Description", rows=10, cols=3, max_length=5000)
self.milestone_exp_start = TextInput(title ='Exposure Start', placeholder='12345', value=None)
self.milestone_exp_end = TextInput(title='Exposure End', placeholder='12345', value=None)
self.milestone_exp_excl = TextInput(title='Excluded Exposures', placeholder='12346', value=None)
self.milestone_btn = Button(label='Add Milestone', css_classes=['add_button'])
self.milestone_alert = Div(text=' ', css_classes=['alert-style'])
self.summary = TextAreaInput(rows=6, title='End of Night Summary',max_length=5000)
self.summary_btn = Button(label='Add Summary', css_classes=['add_button'], width=300)
def weather_tab(self):
data = pd.DataFrame(columns = ['time','desc','temp','wind','humidity'])
self.weather_source = ColumnDataSource(data)
self.weather_subtitle = Div(text="Weather", css_classes=['subt-style'])
columns = [TableColumn(field='time', title='Time (local)', width=75),
TableColumn(field='desc', title='Description', width=200, editor=StringEditor()),
TableColumn(field='temp', title='Temperature (C)', width=100, editor=NumberEditor()),
TableColumn(field='wind', title='Wind Speed (mph)', width=120, editor=NumberEditor()),
TableColumn(field='humidity', title='Humidity (%)', width=100, editor=PercentEditor())]
self.weather_inst = Div(text="Every hour include a description of the weather and any other relevant information, as well as fill in all the fields below. Click the Update Night Log button after every hour's entry. To update a cell: double click in it, record the information, click out of the cell.", width=1000, css_classes=['inst-style'])
self.weather_time = TextInput(placeholder='17:00', value=None, width=100) #title='Time in Kitt Peak local time',
self.weather_desc = TextInput(title='Description', placeholder='description', value=None)
self.weather_temp = TextInput(title='Temperature (C)', placeholder='50', value=None)
self.weather_wind = TextInput(title='Wind Speed (mph)', placeholder='10', value=None)
self.weather_humidity = TextInput(title='Humidity (%)', placeholder='5', value=None)
self.weather_table = DataTable(source=self.weather_source, columns=columns)
self.weather_btn = Button(label='Add Weather', css_classes=['add_button'])
def exp_tab(self):
self.exp_subtitle = Div(text="Nightly Progress", css_classes=['subt-style'])
self.exp_inst = Div(text="Throughout the night record the progress, including comments on Calibrations and Exposures. All exposures are recorded in the eLog, so only enter information that can provide additional information.", width=800, css_classes=['inst-style'])
self.hdr_type = Select(title="Observation Type", value='Observation', options=self.header_options)
self.hdr_btn = Button(label='Select', css_classes=['add_button'])
self.add_image = TextInput(title="Add Image", placeholder='Pictures/image.png', value=None)
self.exp_script = TextInput(title='Script Name', placeholder='dithering.json', value=None)
self.exp_time_end = TextInput(title='Time End', placeholder='20:07', value=None)
self.exp_focus_trim = TextInput(title='Trim from Focus', placeholder='54', value=None)
self.exp_tile = TextInput(title='Tile Number', placeholder='68001', value=None)
self.exp_tile_type = Select(title="Tile Type", value=None, options=['None','QSO','LRG','ELG','BGS','MW'])
self.exp_input_layout = layout([])
def choose_exposure(self):
if self.hdr_type.value == 'Focus':
self.exp_input_layout = layout([
[self.time_title, self.exp_time, self.now_btn],
[self.exp_exposure_start, self.exp_exposure_finish],
[self.exp_comment],
[self.exp_script],
[self.exp_focus_trim],
[self.exp_btn]])
elif self.hdr_type.value in ['Startup']:
self.exp_input_layout = layout([
[self.time_title, self.exp_time, self.now_btn],
[self.exp_comment],
[self.exp_btn]])
elif self.hdr_type.value in ['Comment']:
self.exp_input_layout = layout([
[self.time_title, self.exp_time, self.now_btn, self.img_upinst2, self.img_upload_comments_os],
[self.exp_comment],
[self.exp_btn]])
elif self.hdr_type.value == 'Calibration (Arcs/Twilight)':
self.exp_input_layout = layout([
[self.time_title, self.exp_time, self.now_btn],
[self.exp_exposure_start, self.exp_exposure_finish],
[self.exp_comment],
[self.exp_type],
[self.exp_script],
[self.exp_btn]])
elif self.hdr_type.value in ['Observation', 'Other Acquisition']:
self.exp_input_layout = layout([
[self.time_title, self.exp_time, self.now_btn],
[self.exp_exposure_start, self.exp_exposure_finish],
[self.exp_comment],
[self.exp_tile_type],
[self.exp_tile],
[self.exp_script],
[self.exp_btn]])
self.exp_layout.children[6] = self.exp_input_layout
def get_layout(self):
intro_layout = layout([self.title,
[self.page_logo, self.instructions],
self.connect_hdr,
[self.date_init, self.connect_bt],
self.connect_txt,
self.line,
self.init_hdr,
[[self.os_name_1, self.os_name_2], self.LO, self.OA],
[self.init_bt],
self.line2,
self.contributer_list,
self.contributer_btn,
self.nl_info,
self.intro_txt], width=1000)
intro_tab = Panel(child=intro_layout, title="Initialization")
plan_layout = layout([self.title,
self.plan_subtitle,
self.plan_inst,
self.plan_txt,
[self.plan_order, self.plan_input],
[self.plan_btn],
self.plan_alert], width=1000)
plan_tab = Panel(child=plan_layout, title="Night Plan")
milestone_layout = layout([self.title,
self.milestone_subtitle,
self.milestone_inst,
self.milestone_input,
[self.milestone_exp_start,self.milestone_exp_end, self.milestone_exp_excl],
[self.milestone_btn],
self.summary,
self.summary_btn,
self.milestone_alert], width=1000)
milestone_tab = Panel(child=milestone_layout, title='Milestones')
self.exp_layout = layout(children=[self.title,
self.exp_subtitle,
self.exp_inst,
self.time_note,
self.exp_info,
[self.hdr_type, self.hdr_btn],
self.exp_input_layout,
self.exp_alert], width=1000)
exp_tab = Panel(child=self.exp_layout, title="Nightly Progress")
weather_layout = layout([self.title,
self.weather_subtitle,
self.weather_inst,
self.time_note,
[self.time_title, self.weather_time, self.now_btn],
[self.weather_desc, self.weather_temp],
[self.weather_wind, self.weather_humidity, self.weather_btn],
self.weather_table], width=1000)
weather_tab = Panel(child=weather_layout, title="Weather")
self.get_nl_layout()
nl_layout = layout([self.title,
self.nl_subtitle,
self.nl_alert,
self.nl_text,
self.exptable_alert,
self.exp_table,
self.nl_submit_btn], width=1000)
nl_tab = Panel(child=nl_layout, title="Current DESI Night Log")
self.get_prob_layout()
self.get_checklist_layout()
self.get_img_layout()
self.get_plots_layout()
self.check_tab.title = 'OS Checklist'
self.layout = Tabs(tabs=[intro_tab, plan_tab, milestone_tab, exp_tab, weather_tab, self.prob_tab, self.check_tab, self.img_tab, self.plot_tab, nl_tab], css_classes=['tabs-header'], sizing_mode="scale_both")
def run(self):
self.plan_tab()
self.milestone_tab()
self.exp_tab()
self.weather_tab()
self.time_tabs = [None, None, None, self.exp_time, self.weather_time, self.prob_time, None, None]
self.now_btn.on_click(self.time_is_now)
self.init_bt.on_click(self.initialize_log)
self.connect_bt.on_click(self.connect_log)
self.exp_btn.on_click(self.progress_add)
self.hdr_btn.on_click(self.choose_exposure)
self.weather_btn.on_click(self.weather_add)
self.prob_btn.on_click(self.prob_add)
#self.nl_btn.on_click(self.current_nl)
self.nl_submit_btn.on_click(self.nl_submit)
self.check_btn.on_click(self.check_add)
self.milestone_btn.on_click(self.milestone_add)
self.plan_btn.on_click(self.plan_add)
self.img_btn.on_click(self.image_add)
self.contributer_btn.on_click(self.add_contributer_list)
self.summary_btn.on_click(self.add_summary)
self.get_layout()
OS = OS_Report()
OS.run()
curdoc().theme = 'dark_minimal'
curdoc().title = 'DESI Night Log - Observing Scientist'
curdoc().add_root(OS.layout)
curdoc().add_periodic_callback(OS.current_nl, 30000)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This software is under a BSD license. See LICENSE.txt for details.
from datatank_py.DTDataFile import DTDataFile
from datatank_py.DTPath2D import DTPath2D
from datatank_py.DTPoint2D import DTPoint2D
from datatank_py.DTPointCollection2D import DTPointCollection2D
from datatank_py.DTPointValueCollection2D import DTPointValueCollection2D
import numpy as np
from bisect import bisect_right
from time import time
import sys
def _find_le(a, x):
'Find rightmost value less than or equal to x'
i = bisect_right(a, x)
if i:
return i-1, a[i-1]
raise ValueError
def _pp_distance(p1, p2):
"""Euclidean distance between two points"""
dx = p2.x - p1.x
dy = p2.y - p1.y
r = np.sqrt(dx * dx + dy * dy)
return r
def _divide_path_with_segment_spacing(path, required_distance):
"""Divide a path into points at equal intervals.
Arguments:
path -- DTPath2D with no subpaths
required_distance -- Distance between successive points
Returns:
DTPointValueCollection2D containing the points and their distances
from the start of the path.
This is designed to work with straight-line paths, as it only considers
the Euclidean distance between points. If the input path has sufficient
point resolution, results for curved paths may be adequate.
"""
points = path.point_list()
distances = []
p1 = points[0]
for p2 in points[1:]:
distances.append(_pp_distance(p1, p2))
p1 = p2
path_length = np.sum(distances)
num_elements = int(path_length / required_distance)
cum_dist = np.cumsum(distances)
point_values = DTPointValueCollection2D(DTPointCollection2D([], []), [])
point_values.add_point_value(points[0], 0)
for el in xrange(1, num_elements + 1):
distance_to_find = el * required_distance
idx, dist = _find_le(cum_dist, distance_to_find)
p1 = points[idx - 1]
p2 = points[idx]
remainder = distance_to_find - dist
assert remainder >= 0, "negative distance"
dx = p2.x - p1.x
dy = p2.y - p1.y
r = np.sqrt(dx * dx + dy * dy)
delta_y = remainder * dy / r
delta_x = remainder * dx / r
p3 = DTPoint2D(p1.x + delta_x, p1.y + delta_y)
point_values.add_point_value(p3, distance_to_find)
return point_values
def _test():
with DTDataFile("split_2dpath.dtbin", truncate=True) as df:
xvalues = np.linspace(0, 10, num=100)
yvalues = np.sin(xvalues)
xvalues = np.append(xvalues, np.flipud(xvalues))
xvalues = np.append(xvalues, xvalues[0])
yvalues = np.append(yvalues, -yvalues)
yvalues = np.append(yvalues, yvalues[0])
path = DTPath2D(xvalues, yvalues)
df["Path"] = path
df["PointValues"] = _divide_path_with_segment_spacing(path, 0.5)
if __name__ == '__main__':
import traceback
#
# This is a DataTank plugin that is intended to find points on a path
# that are some user-specified distance apart. It works well with a
# straight-line transect, say for creating a distance scale or choosing
# discrete stations for further analysis. You are only guaranteed to get
# a point at the starting endpoint.
#
# It will only work properly with curved paths if you have a sufficiently
# refined path. Even then, you still have to watch the error tolerances.
#
start_time = time()
errors = []
try:
input_file = DTDataFile("Input.dtbin")
required_distance = input_file["Distance"]
assert required_distance > 0, "distance must be greater than zero"
path = DTPath2D.from_data_file(input_file, "Path")
input_file.close()
point_values = _divide_path_with_segment_spacing(path, float(required_distance))
except Exception, e:
errors.append(str(e))
traceback.print_exc(file=sys.stderr)
# create or truncate the output file
with DTDataFile("Output.dtbin", truncate=True) as output_file:
# record computation time
output_file.write_anonymous(time() - start_time, "ExecutionTime")
# DataTank seems to display stderr instead of the error list, so
# make sure to write to both.
if len(errors):
output_file.write_anonymous(errors, "ExecutionErrors")
sys.stderr.write("%s\n" % errors)
else:
# save the output variable; this will be visible to DataTank
output_file["Var"] = point_values
|
import tensorflow as tf
def shape_list(x):
ps = x.get_shape().as_list()
ts = tf.shape(x)
return [ts[i] if ps[i] is None else ps[i] for i in range(len(ps))]
def _rnn_dropout(x, kp):
if kp < 1.0:
x = tf.nn.dropout(x, kp)
return x
def rnn_cell(x, h, units, scope='rnn_cell',
w_init=tf.random_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(0),
i_kp=1.0, o_kp=1.0):
with tf.variable_scope(scope):
w_dim = shape_list(x)[1] + shape_list(h)[1]
w = tf.get_variable("w", [w_dim, units], initializer=w_init)
b = tf.get_variable("b", [units], initializer=b_init)
x = _rnn_dropout(x, i_kp)
h = tf.tanh(tf.matmul(tf.concat([x, h], 1), w)+b)
h = _rnn_dropout(h, o_kp)
return h, h
def lstm_cell(x, c, h, units, scope='lstm_cell',
w_init=tf.random_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(0),
f_b=1.0, i_kp=1.0, o_kp=1.0):
with tf.variable_scope(scope):
w_dim = shape_list(x)[1] + shape_list(h)[1]
w = tf.get_variable("w", [w_dim, units * 4], initializer=w_init)
b = tf.get_variable("b", [units * 4], initializer=b_init)
x = _rnn_dropout(x, i_kp)
z = tf.matmul(tf.concat([x, h], 1), w) + b
i, j, f, o = tf.split(z, 4, 1)
c = tf.nn.sigmoid(f + f_b) * c + tf.nn.sigmoid(i) * tf.tanh(j)
h = tf.nn.sigmoid(o) * tf.tanh(c)
h = _rnn_dropout(h, o_kp)
return h, c
def peep_lstm_cell(x, c, h, units, scope='peep_lstm_cell',
w_init=tf.random_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(0),
f_b=1.0, i_kp=1.0, o_kp=1.0):
with tf.variable_scope(scope):
w_dim = shape_list(x)[1] + shape_list(h)[1]
w = tf.get_variable("w", [w_dim, units * 4], initializer=w_init)
b = tf.get_variable("b", [units * 4], initializer=b_init)
w_f_diag = tf.get_variable("w_f_diag", [units], initializer=w_init)
w_i_diag = tf.get_variable("w_i_diag", [units], initializer=w_init)
w_o_diag = tf.get_variable("w_o_diag", [units], initializer=w_init)
x = _rnn_dropout(x, i_kp)
z = tf.matmul(tf.concat([x, h], 1), w)+b
i, j, f, o = tf.split(z, num_or_size_splits=4, axis=1)
c = tf.nn.sigmoid(f + f_b + w_f_diag * c) * c + tf.nn.sigmoid(i + w_i_diag * c) * tf.tanh(j)
h = tf.nn.sigmoid(o + w_o_diag * c) * tf.tanh(c)
h = _rnn_dropout(h, o_kp)
return h, c
def mlstm_cell(x, c, h, units, scope='mlstm_cell',
w_init=tf.random_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(0),
f_b=1.0, i_kp=1.0, o_kp=1.0):
with tf.variable_scope(scope):
x_dim = shape_list(x)[1]
wx = tf.get_variable("wx", [x_dim, units * 4], initializer=w_init)
wh = tf.get_variable("wh", [units, units * 4], initializer=w_init)
wmx = tf.get_variable("wmx", [x_dim, units], initializer=w_init)
wmh = tf.get_variable("wmh", [units, units], initializer=w_init)
b = tf.get_variable("b", [units * 4], initializer=b_init)
x = _rnn_dropout(x, i_kp)
m = tf.matmul(x, wmx)*tf.matmul(h, wmh)
z = tf.matmul(x, wx) + tf.matmul(m, wh) + b
i, j, f, o = tf.split(z, 4, 1)
c = tf.nn.sigmoid(f + f_b) * c + tf.nn.sigmoid(i) * tf.tanh(j)
h = tf.nn.sigmoid(o) * tf.tanh(c)
h = _rnn_dropout(h, o_kp)
return h, c
def peep_mlstm_cell(x, c, h, units, scope='peep_mlstm_cell',
w_init=tf.random_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(0),
f_b=1.0, i_kp=1.0, o_kp=1.0):
with tf.variable_scope(scope):
x_dim = shape_list(x)[1]
wx = tf.get_variable("wx", [x_dim, units * 4], initializer=w_init)
wh = tf.get_variable("wh", [units, units * 4], initializer=w_init)
wmx = tf.get_variable("wmx", [x_dim, units], initializer=w_init)
wmh = tf.get_variable("wmh", [units, units], initializer=w_init)
w_f_diag = tf.get_variable("w_f_diag", [units], initializer=w_init)
w_i_diag = tf.get_variable("w_i_diag", [units], initializer=w_init)
w_o_diag = tf.get_variable("w_o_diag", [units], initializer=w_init)
b = tf.get_variable("b", [units * 4], initializer=b_init)
x = _rnn_dropout(x, i_kp)
m = tf.matmul(x, wmx)*tf.matmul(h, wmh)
z = tf.matmul(x, wx) + tf.matmul(m, wh) + b
i, j, f, o = tf.split(z, 4, 1)
c = tf.nn.sigmoid(f + f_b + w_f_diag * c) * c + tf.nn.sigmoid(i + w_i_diag * c) * tf.tanh(j)
h = tf.nn.sigmoid(o + w_o_diag * c) * tf.tanh(c)
h = _rnn_dropout(h, o_kp)
return h, c
def l2_mlstm_cell(x, c, h, units, scope='l2_mlstm_cell',
w_init=tf.random_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(0),
f_b=1.0, i_kp=1.0, o_kp=1.0):
with tf.variable_scope(scope):
x_dim = shape_list(x)[1]
wx = tf.get_variable("wx", [x_dim, units * 4], initializer=w_init)
wh = tf.get_variable("wh", [units, units * 4], initializer=w_init)
wmx = tf.get_variable("wmx", [x_dim, units], initializer=w_init)
wmh = tf.get_variable("wmh", [units, units], initializer=w_init)
b = tf.get_variable("b", [units * 4], initializer=b_init)
gx = tf.get_variable("gx", [units * 4], initializer=w_init)
gh = tf.get_variable("gh", [units * 4], initializer=w_init)
gmx = tf.get_variable("gmx", [units], initializer=w_init)
gmh = tf.get_variable("gmh", [units], initializer=w_init)
wx = tf.nn.l2_normalize(wx, axis=0) * gx
wh = tf.nn.l2_normalize(wh, axis=0) * gh
wmx = tf.nn.l2_normalize(wmx, axis=0) * gmx
wmh = tf.nn.l2_normalize(wmh, axis=0) * gmh
x = _rnn_dropout(x, i_kp)
m = tf.matmul(x, wmx)*tf.matmul(h, wmh)
z = tf.matmul(x, wx) + tf.matmul(m, wh) + b
i, f, o, u = tf.split(z, 4, 1)
c = tf.nn.sigmoid(f + f_b) * c + tf.nn.sigmoid(i) * tf.tanh(u)
h = tf.nn.sigmoid(o) * tf.tanh(c)
h = _rnn_dropout(h, o_kp)
return h, c
def gru_cell(x, h, units, scope='gru_cell',
w_init=tf.random_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(0),
i_kp=1.0, o_kp=1.0):
with tf.variable_scope(scope):
w_dim = shape_list(x)[1] + shape_list(h)[1]
w_g = tf.get_variable("w_g", [w_dim, units * 2], initializer=w_init)
b_g = tf.get_variable("b_g", [units * 2], initializer=b_init)
w_c = tf.get_variable("w_c", [w_dim, units], initializer=w_init)
b_c = tf.get_variable("b_c", [units], initializer=b_init)
x = _rnn_dropout(x, i_kp)
g = tf.nn.sigmoid(tf.matmul(tf.concat([x, h], 1), w_g)+b_g)
r, z = tf.split(g, num_or_size_splits=2, axis=1)
c = tf.tanh(tf.matmul(tf.concat([x, r * h], 1), w_c)+b_c)
h = z * h + (1 - z) * c
h = _rnn_dropout(h, o_kp)
return h
|
"helper funs"
#imports
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from moviepy.editor import VideoFileClip
from IPython.display import HTML
from operator import itemgetter
# def create_fullpath_lst(dirpath):
#
# fullpath_lst = []
# print(os.listdir(dirpath))
# # for file_name in os.listdir(dirpath):
# # fullpath_lst.append(os.path.join(dirpath, file_name))
# return fullpath_lst
def process_video(video_in_path, video_out_path, pipeline, show_video=True):
clip_in = VideoFileClip(video_in_path)
clip_frame = clip_in.fl_image(pipeline)
clip_frame.write_videofile(video_out_path, audio=False)
if show_video :
return(
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(video_out_path)))
def dir_content_lst(dir_path):
return [image for image in os.listdir(dir_path)]
def dir_content_fullpath_lst(dir_path):
return [os.path.join(dir_path, filename) for filename in os.listdir(dir_path)]
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def select_rgb_yellow(image):
# white color mask
# yellow color mask
lower = np.uint8([190, 190, 0])
upper = np.uint8([255, 255, 255])
yellow_mask = cv2.inRange(image, lower, upper)
# combine the mask
# mask = cv2.bitwise_or(white_mask, yellow_mask)
masked = cv2.bitwise_and(image, image, mask=yellow_mask)
return masked
def convert_to_lab(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
def select_white_yellow_L(image):
converted = convert_to_lab(image)
# white mask
d = 25
lower = np.uint8([210, -d + 128, -d + 128])
upper = np.uint8([255, d + 128, d + 128])
white_mask = cv2.inRange(converted, lower, upper)
# yellow mask
lower = np.uint8([128, -28 + 128, 30 + 128])
upper = np.uint8([255, 28 + 128, 255])
yellow_mask = cv2.inRange(converted, lower, upper)
mask = cv2.bitwise_or(yellow_mask, white_mask)
return cv2.bitwise_and(image, image, mask=mask)
def region_of_interest(img):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
x = img.shape[1]
y = img.shape[0]
vertices = np.array([[(x * 0., y), (x * .47, y * .58), (x * .53, y * .58), (x, y)]], dtype=np.int32)
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
#plot fun
def plot_imfiles(imnames_lst,images_im,cmap=None,title=''):
plt.figure(figsize=(7, 13),)
plt.subplots_adjust(left=0.05 , bottom=0.05, right=0.99, top=0.92,
wspace=0.25, hspace=0.35)
plt.suptitle(title)
for idx, (im_name, image_im) in enumerate(zip(imnames_lst, images_im)):
plt.subplot(len(imnames_lst),2,idx+1)
plt.title(im_name)
plt.imshow(image_im,cmap=cmap)
#plt.tight_layout()
def process_filters(image_in, image_name_in='name', filters_dict=[]):
# process image in by applying filters in filters_dict
image_temp = image_in
image_process_pipe_lst = [(image_in, image_name_in)]
# if no filtering funs
if filters_dict:
for idx, (f, param) in enumerate(filters_dict.items()):
if param == 'none':
image_temp = f(image_temp)
image_process_pipe_lst.append((image_temp, f.__name__))
elif param == 'image':
#print(image_temp)
image_temp = f(image_temp, image_in)
image_process_pipe_lst.append((image_temp, f.__name__))
# elif f.__name__=='hough_lines':
# args = param
# image_temp = f(image_temp, *args)
# image_process_pipe_lst.append((image_temp[0], f.__name__))
else:
args = param
image_temp = f(image_temp, *args)
if isinstance(image_temp, tuple):
image_temp = image_temp[0]
image_process_pipe_lst.append((image_temp, f.__name__))
return image_process_pipe_lst
def plot_filters(image_pipe_lst, fgs=(20, 10)):
# plot filter for one image
colormap = None
cols = len(image_pipe_lst)
plt.figure(figsize=fgs)
for idx, img_tuple in enumerate(image_pipe_lst):
if len(img_tuple[0].shape) == 2:
colormap = 'gray'
plt.subplot(1, cols, idx + 1)
plt.title(img_tuple[1])
plt.imshow(img_tuple[0], cmap=colormap)
plt.show()
def plot_pipes(processed_images_lst, fgs=(20, 15)):
# plots filters on all images in processed_images_lst
rows = len(processed_images_lst)
cols = len(processed_images_lst[0])
plt.figure(figsize=fgs)
colormap = None
for jx,image_pipe_lst in enumerate(processed_images_lst):
for idx, img_tuple in enumerate(image_pipe_lst):
if len(img_tuple[0].shape) == 2:
colormap = 'gray'
plt.subplot(rows, cols, (idx + 1)+(jx*cols))
plt.title(img_tuple[1])
plt.imshow(img_tuple[0], cmap=colormap)
plt.tight_layout()
plt.show()
def avg_lines_by_points(lines, weights=[], weighted=True):
if len(lines):
start_x = lines[:, 0, 0]
start_y = lines[:, 0, 1]
end_x = lines[:, 0, 2]
end_y = lines[:, 0, 3]
if weighted & len(weights)>1:
x1= np.average(start_x,weights=weights)
y1 = np.average(start_y, weights=weights)
x2 = np.average(end_x, weights=weights)
y2 = np.average(end_y, weights=weights)
else:
x1 = start_x.mean()
y1 = start_y.mean()
x2 = end_x.mean()
y2 = end_y.mean()
else:
x1 = []
y1 = []
x2 = []
y2 = []
return [x1,y1,x2,y2]
def line_kq_from_pts(line_pts):
k =(line_pts[3]-line_pts[1])/(line_pts[2]-line_pts[0])
q = line_pts[1] - k * line_pts[0]
return(k,q)
def draw_lines(img, hlines, lines_previous, color=[255, 0, 0], thickness=13, counter=0):
# hlines is result of hough function 3d array
# img to draw on
# lines_previous - previously infered lines
x_size = img.shape[1]
y_size = img.shape[0]
horizon_height = y_size * 0.6
left_hlines = []
left_hlines_len = []
right_hlines = []
right_hlines_len = []
# sort left, right hlines
for index, line in enumerate(hlines):
for x1, y1, x2, y2 in line:
slope = (y2 - y1) / (x2 - x1)
angle = np.arctan2((y2 - y1), (x2 - x1))
intercept = y1 - x1 * slope
line_len = np.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2)
if (abs(slope) > 0.2) & (abs(slope) < 0.8):
if slope < 0:
# print('left slope: {}'.format(slope))
left_hlines.append([[x1, y1, x2, y2]])
left_hlines_len.append(line_len)
else:
# print('right slope: {}'.format(slope))
right_hlines.append([[x1, y1, x2, y2]])
right_hlines_len.append(line_len)
# case of no lines check use previous
if not len(left_hlines):
left_hlines = np.array([[lines_previous[0]]])
if not len(right_hlines):
right_hlines = np.array([[lines_previous[1]]])
# numpy arrays
left_hlines = np.array(left_hlines)
right_hlines = np.array(right_hlines)
left_hlines_len = np.array(left_hlines_len)
right_hlines_len = np.array(right_hlines_len)
# averaging lines by points, weighted by length
left_line_pts = avg_lines_by_points(left_hlines, weights=left_hlines_len, weighted=True)
right_line_pts = avg_lines_by_points(right_hlines, weights=right_hlines_len, weighted=True)
# Bottom and top stretching of line
# Result lines 0=left, 1=right new lines
new_lines = np.zeros(shape=(2, 4), dtype=np.int32)
# left
k, q = line_kq_from_pts(left_line_pts)
left_bottom_x = (y_size - q) / k
left_top_x = (horizon_height - q) / k
if left_bottom_x >= 0:
new_lines[0] = [left_bottom_x, y_size, left_top_x, horizon_height]
# right
k, q = line_kq_from_pts(right_line_pts)
right_bottom_x = (y_size - q) / k
right_top_x = (horizon_height - q) / k
if right_bottom_x <= x_size:
new_lines[1] = [right_bottom_x, y_size, right_top_x, horizon_height]
# Low pass filtering
if not lines_previous.size == 0:
if counter < 5:
# At the begining almost no filtering #TODO functional dependence
alfa = 0.9
else:
# low pass filter on x values
alfa = 0.12
new_lines[0][0] = new_lines[0][0] * alfa + lines_previous[0][0] * (1 - alfa)
new_lines[0][2] = new_lines[0][2] * alfa + lines_previous[0][2] * (1 - alfa)
new_lines[1][0] = new_lines[1][0] * alfa + lines_previous[1][0] * (1 - alfa)
new_lines[1][2] = new_lines[1][2] * alfa + lines_previous[1][2] * (1 - alfa)
# Draw lines
for line in new_lines:
cv2.line(img, (line[0], line[1]), (line[2], line[3]), color, thickness)
return new_lines
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, lines_previous,counter):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines_raw = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,
maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
new_lines = draw_lines(line_img, lines_raw, lines_previous,counter=counter)
return line_img, lines_raw, new_lines
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
#print(type(img),type(initial_img))
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, λ)
def show_videos_html(video_path):
r=HTML("""<video width="960" height="540" controls> <source src="{0}"> </video>""".format(video_path))
return(r)
|
"""
From https://www.kaggle.com/pierremegret/gensim-word2vec-tutorial#Gensim-Word2Vec%C2%A0Tutorial
Author Pierre Megret
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style("darkgrid")
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
def tsnescatterplot(model, word, list_names, n_components):
""" Plot in seaborn the results from the t-SNE dimensionality reduction algorithm of the vectors of a query word,
its list of most similar words, and a list of words.
"""
arrays = np.empty((0, 300), dtype='f')
word_labels = [word]
color_list = ['red']
# adds the vector of the query word
arrays = np.append(arrays, model.wv.__getitem__([word]), axis=0)
# gets list of most similar words
close_words = model.wv.most_similar([word])
# adds the vector for each of the closest words to the array
for wrd_score in close_words:
wrd_vector = model.wv.__getitem__([wrd_score[0]])
word_labels.append(wrd_score[0])
color_list.append('blue')
arrays = np.append(arrays, wrd_vector, axis=0)
# adds the vector for each of the words from list_names to the array
for wrd in list_names:
wrd_vector = model.wv.__getitem__([wrd])
word_labels.append(wrd)
color_list.append('green')
arrays = np.append(arrays, wrd_vector, axis=0)
# Reduces the dimensionality from 300 to 50 dimensions with PCA
reduc = PCA(n_components=n_components).fit_transform(arrays)
# Finds t-SNE coordinates for 2 dimensions
np.set_printoptions(suppress=True)
Y = TSNE(n_components=2, random_state=0, perplexity=15).fit_transform(reduc)
# Sets everything up to plot
df = pd.DataFrame({'x': [x for x in Y[:, 0]],
'y': [y for y in Y[:, 1]],
'words': word_labels,
'color': color_list})
fig, _ = plt.subplots()
fig.set_size_inches(9, 9)
# Basic plot
p1 = sns.regplot(data=df,
x="x",
y="y",
fit_reg=False,
marker="o",
scatter_kws={'s': 40,
'facecolors': df['color']
}
)
# Adds annotations one by one with a loop
for line in range(0, df.shape[0]):
p1.text(df["x"][line],
df['y'][line],
' ' + df["words"][line].title(),
horizontalalignment='left',
verticalalignment='bottom', size='medium',
color=df['color'][line],
weight='normal'
).set_size(15)
plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)
plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)
plt.title('t-SNE visualization for {}'.format(word.title()))
|
"""
Clark Cape Cod
==============
"""
from sklearn.base import BaseEstimator
class ClarkCapeCod(BaseEstimator):
pass
|
from functools import reduce
from unittest import TestCase
from itertools import product
from nose.tools import (
assert_is_not,
eq_,
ok_,
raises,
)
from pyscalambda.formula_nodes import Formula
from pyscalambda import not_, Q, SC, SD, SF, SI, _, _1, _2, _3
def not_formula_and_eq(a, b, msg=None):
if issubclass(a.__class__, Formula):
raise AssertionError(msg)
if issubclass(b.__class__, Formula):
raise AssertionError(msg)
eq_(a, b, msg)
def not_formula_and_ok(expr, msg=None):
if issubclass(expr.__class__, Formula):
raise AssertionError(msg)
ok_(expr, msg)
class UnderscoreTest(TestCase):
def test_identity(self):
not_formula_and_eq(_(1), 1)
not_formula_and_eq(_(2), 2)
not_formula_and_eq(_([1, 2]), [1, 2])
def test_calc_number_operator2(self):
not_formula_and_eq((_ + _)(1, 1), 2)
not_formula_and_eq((_ - _)(1, 1), 0)
not_formula_and_eq((_ * _)(1, 1), 1)
not_formula_and_eq((_ / _)(1, 1), 1)
not_formula_and_eq((_ ** _)(1, 1), 1)
def test_calc_number_left_operator(self):
not_formula_and_eq((_ + 2)(2), 4)
not_formula_and_eq((_ - 2)(2), 0)
not_formula_and_eq((_ * 2)(2), 4)
not_formula_and_eq((_ / 2)(2), 1)
not_formula_and_eq((_ ** 2)(2), 4)
def test_calc_number_right_operator(self):
not_formula_and_eq((3 + _)(1), 4)
not_formula_and_eq((3 - _)(1), 2)
not_formula_and_eq((3 * _)(1), 3)
not_formula_and_eq((3 / _)(1), 3)
not_formula_and_eq((3 ** _)(1), 3)
def test_calc_complex_formula(self):
not_formula_and_eq((3 + _ * 4 + _)(1, 2), 9)
not_formula_and_eq((3 + _ * 4 + (_ + 1) * 100)(1, 2), 307)
not_formula_and_eq((10 + -_ * 2)(1), 8)
not_formula_and_eq(((10 + -_) * 2)(1), 18)
def test_call_method(self):
not_formula_and_eq((_.split(","))("test,nadeko"), ["test", "nadeko"])
not_formula_and_eq((_.split(",") + ["rikka"])("test,nadeko"), ["test", "nadeko", "rikka"])
def test_str_args(self):
not_formula_and_eq((_ + " is " + _)("nadeko", "cute"), "nadeko is cute")
not_formula_and_eq((_ * _)("nadeko", 4), "nadeko" * 4)
def test_already_bindings(self):
x = 1
not_formula_and_eq((_ + x)(1), 2)
def test_bug_case1(self):
not_formula_and_eq(("_a" + _)("test"), "_atest")
def test_scalambdable_func(self):
def test(x):
return 100 + x
def test2(x, y):
return x + y + 1
not_formula_and_eq((SF(test)(10) + _)(1000), 1110)
not_formula_and_eq(SF(len)(_)([1, 2, 3]), 3)
not_formula_and_eq(SF(len)(_)(list(range(100))), 100)
not_formula_and_eq((SF(len)(_) + 1)([list(range(1)), list(range(2)), list(range(3))]), 4)
not_formula_and_eq(list(map((SF(len)(_) + 1), [list(range(1)), list(range(2)), list(range(3))])), [2, 3, 4])
not_formula_and_eq((SF(test2)(_, 2) + 1)(100), 104)
not_formula_and_eq(
list(map(SF(test), list(map((SF(len)(_) + 1), [list(range(1)), list(range(2)), list(range(3))])))),
[102, 103, 104]
)
not_formula_and_eq(SF(test)(10), 110)
def test_scalambdable_func_multi_args(self):
not_formula_and_eq(SF(_ + 1, len)(_)([1, 2, 3]), 4)
not_formula_and_eq(SF(lambda x: x + 1, len)(_)([1, 2, 3]), 4)
def test(x):
return x + 1
not_formula_and_eq(SF(test, len)(_)([1, 2, 3]), 4)
@SF
def test2(x):
return x + 1
not_formula_and_eq(test2(SF(len)(_))([1, 2, 3]), 4)
def test_readme(self):
not_formula_and_eq(list(map(_ + 1, [1, 2, 3, 4])), [2, 3, 4, 5])
not_formula_and_eq("".join(filter(_.isdigit(), "ab123aad")), "123")
not_formula_and_eq(reduce(_ + _, [1, 2, 3, 4]), 10)
not_formula_and_eq(list(map(SF(len)(_) + 1, [[1], [1, 2], [1, 2, 3]])), [2, 3, 4])
def test_high_stress(self):
for i in range(6):
not_formula_and_eq(list(map(_ + 10, range(10 ** i))), list(range(10, 10 ** i + 10)))
def test_getitem(self):
not_formula_and_eq(_[0]([1, 2, 3]), 1)
not_formula_and_eq(_[1]([1, 2, 3]), 2)
not_formula_and_eq(_[2]([1, 2, 3]), 3)
def test_member(self):
class A(object):
def __init__(self):
self.a = 100
self.bc = 10
a = A()
assert (_.a(a) == 100)
def test_1to9_placeholder(self):
not_formula_and_eq((_1 + _2 * _2)(10, 100), 10010)
@raises(SyntaxError)
def test_1to9_placeholder_and_unnamed_placeholder(self):
not_formula_and_eq((_1 + _2 * _2 + _)(10, 100, 10), 10020)
def test_not_use_const_dict(self):
not_formula_and_eq(len((_ + 1 + 1 + 1).debug()[1]), 0)
class A(object):
pass
not_formula_and_eq(len((_ + A()).debug()[1]), 1)
def test_call_with_kwargs(self):
class A(object):
def test(self, test=10):
return test
a = A()
not_formula_and_eq((_.test(test=19))(a), 19)
not_formula_and_eq((_.test())(a), 10)
@SF
def func(x, y=10):
return x + y
not_formula_and_eq(func(10, y=_)(100), 110)
not_formula_and_eq(func(_, y=_)(10, 100), 110)
def test_scalambdable_const(self):
not_formula_and_eq(SC(10)(), 10)
not_formula_and_eq((SC(10) * 10)(), 100)
def test_quote(self):
not_formula_and_eq(SF(sum, map)(Q(_ + 1), _)([1, 2, 3]), 9)
def test_virtual_if(self):
not_formula_and_eq((_2 + 1).if_(_1 < 5).else_(_2 + 2)(0, 10), 11)
not_formula_and_eq((_2 + 1).if_(_1 < 5).else_(_2 + 2)(10, 10), 12)
not_formula_and_eq(_.if_(_ < 5).else_(_)(11, 10, 12), 12)
not_formula_and_eq(_.if_(_ < 5).else_(_)(11, 0, 12), 11)
def test_deep_const(self):
l = [1, 2, 3]
assert_is_not(SD(l)(), l)
(SD(l).append(_))(4)
not_formula_and_eq(l, [1, 2, 3])
(SC(l).append(_))(4)
not_formula_and_eq(l, [1, 2, 3, 4])
def test_scalambdable_iterator(self):
not_formula_and_eq(SI([1, 2, 3])(), [1, 2, 3])
not_formula_and_eq(SI([1, _, 3])(10), [1, 10, 3])
not_formula_and_eq(SI([_, _, 3])(10, 20), [10, 20, 3])
not_formula_and_eq(SI((1, 2, 3))(), (1, 2, 3))
not_formula_and_eq(SI((1, _, 3))(10), (1, 10, 3))
not_formula_and_eq(SI((_, _, 3))(10, 20), (10, 20, 3))
not_formula_and_eq(SI({1, 2, _1})(10), {1, 2, 10})
not_formula_and_eq(SI({"a": 1, "b": 2})(), {"a": 1, "b": 2})
not_formula_and_eq(SI({"a": _1, "b": 2})(10), {"a": 10, "b": 2})
not_formula_and_eq(SI({_1: _2, _3: 2})("a", 20, "c"), {"a": 20, "c": 2})
@raises(SyntaxError)
def test_scalambda_iterator_dict_syntax_error1(self):
not_formula_and_eq(SI({_: _, "b": _})("k", 10, 20), {"k": 10, "b": 20}) # because can't decide argument order
@raises(SyntaxError)
def test_scalambda_iterator_dict_syntax_error2(self):
not_formula_and_eq(SI({_: 2, _: 1})("a", "c"), {"a": 2, "c": 1}) # because can't decide argument order
@raises(SyntaxError)
def test_scalambda_iterator_set_syntax_error(self):
not_formula_and_eq(SI({1, 2, _})(10), {1, 2, 10}) # because can't decide argument order
def test_virtual_in(self):
for iter_class in [set, list, tuple]:
not_formula_and_ok(SC(1).in_(_)(iter_class([1, 2, 3])))
not_formula_and_ok(not SC(4).in_(_)(iter_class([1, 2, 3])))
not_formula_and_ok(_.in_(_)(2, iter_class([1, 2, 3])))
@raises(TypeError)
def test_virtual_in_type_error(self):
_.in_(12)(100)
def test_virtual_not_in(self):
for iter_class in [set, list, tuple]:
not_formula_and_ok(not SC(1).not_in_(_)(iter_class([1, 2, 3])))
not_formula_and_ok(SC(4).not_in_(_)(iter_class([1, 2, 3])))
not_formula_and_ok(not _.not_in_(_)(2, iter_class([1, 2, 3])))
@raises(TypeError)
def test_virtual_not_in_type_error(self):
_.not_in_(12)(100)
def test_virtual_logic_and(self):
for x, y in product([True, False], [True, False]):
not_formula_and_eq(_.and_(SC(x))(y), x and y)
not_formula_and_eq(SC(x).and_(_)(y), x and y)
@raises(TypeError)
def test_virtual_logic_and_not_callable(self):
not_formula_and_eq(_.and_(True)(False), None)
def test_virtual_logic_or(self):
for x, y in product([True, False], [True, False]):
not_formula_and_eq(_.or_(SC(x))(y), x or y)
not_formula_and_eq(SC(x).or_(_)(y), x or y)
@raises(TypeError)
def test_virtual_logic_or_not_callable(self):
not_formula_and_eq(_.or_(True)(False), None)
def test_virtual_not(self):
not_formula_and_ok(not not_(_)(True))
not_formula_and_ok(not_(_)(False))
not_formula_and_ok(not_(_.and_(SC(False)))(False))
not_formula_and_ok(not_(_.and_(SC(False)))(True))
not_formula_and_ok(not not_(_.and_(SC(True)))(True))
not_formula_and_ok(not_(_.or_(SC(False)))(False))
not_formula_and_ok(not not_(_.or_(SC(False)))(True))
not_formula_and_ok(not not_(_.or_(SC(True)))(True))
def test_get_attribute(self):
class A(object):
def __init__(self, x):
self.x = x
a = A(10)
not_formula_and_eq(_.x.M(a), 10)
not_formula_and_eq((_.x + 1)(a), 11)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================
Classification Accuracy as a Substantive
Quantity of Interest: Measuring Polarization
in Westminster Systems
Andrew Peterson & Arthur Spirling 2017
=================================================
Generate classifier accuracy scores and predictions,
using pre-generated matrices from gen_mats.py.
"""
# Authors:
# Andrew Peterson <ajp502 at nyu dot edu>
# Arthur Spirling < at nyu dot edu>
# License: BSD 3 clause
# run with:
# python sess_estimates.py
# or uncomment sys.argv and use python sess_estimates.py 1 '/foo/datain/' '/foo/input_mats/' '/foo/output/' 'runid'
# arguments: (1) normalize (2) input directory (3) output mat directory (4) run id
import os
import pickle as pickle
import sys
import logging
import pandas as pd
import numpy as np
import re
import string
import itertools
import os.path
import time
import scipy
from scipy.io import mmread
from scipy.sparse import csr_matrix
from sklearn import preprocessing
from sklearn.preprocessing import maxabs_scale
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from utils import has_regex, prep_year_data, fit_pred_offline_classifiers # Functions for preparing data, etc.
#---------------------------------------
#---------------------------------------
def randomize_by_member(df):
""" Randomize labels by member,
keeping the same proportion, a la
Gentzkow, et al 2015,
Measuring Polarization in High-dimensional Data"""
#mp_ids = list(set(df.memref))
uniq = df.drop_duplicates('memref')
mp_ids = uniq.memref
proportion_cons = np.mean(uniq.y_binary) # (cons = 1)
cons = [x>(1-proportion_cons) for x in np.random.rand(1,len(mp_ids))]
cons = [int(x) for x in cons[0] ]
random_cons = dict(zip(mp_ids, cons))
df['rand_cons']= df['memref'].map(random_cons)
return(np.array(df.rand_cons))
#---------------------------------------
#---------------------------------------
def load_X_y(data_in, mats_dir, indx, yrmth, randomize, normalized=1):
errors = []
try:
df = prep_year_data(data_in, yrmth, minlen=40)
logging.info("length df: %d" % len(df))
except:
logging.error("failed getting year-month %s" % yrmth)
errors.append(yrmth)
df.index = range(len(df))
testsize = len(df)/10
if randomize:
y = randomize_by_member(df)
else:
y = df.y_binary
if (np.mean(y)==0 or np.mean(y)==1 or len(df)==0):
logging.warning("no variation in year: %s" % yrmth)
errors.append(yrmth)
return(np.NaN, np.NaN)
if normalized:
X = mmread(mats_dir + 'topic_aug_mat_normalized_j5_' + str(indx) + '.mtx')
X = csr_matrix(X)
else:
X = mmread(mats_dir + 'topic_aug_mat_' + str(indx) + '.mtx')
logging.info("Num errors: %d" % len(errors))
return(X, y)
#---------------------------------------
#
#---------------------------------------
def run_estimates(data_in, mats_dir, results_dir, run_id, sess_indx, randomize=0, normalized=1):
""" Run classifiers for each Parliamentary session."""
yearly_stats = {}
yearly_preds = {}
setup_time = []
train_time = []
starttime = time.time()
errors = []
for indx, yrmth in sess_indx.items():
tick = time.time()
logging.info("currently running: %s" % yrmth)
X, y = load_X_y(data_in, mats_dir, indx, yrmth, randomize, normalized)
st_time = time.time() - tick
setup_time.append(st_time)
logging.info("setup time: %d s" % st_time)
tick = time.time()
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1234)
cls_stats = {}
preds= {}
foldid = 0
for train_index, test_index in skf.split(X, y):
#logging.info("fold: %d" % foldid)
#logging.info("TRAIN: %s" train_index)#, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
cls_stats[foldid], preds[foldid] = fit_pred_offline_classifiers(X_train, y_train, X_test, y_test, X)
foldid += 1
fit_time = time.time() - tick
train_time.append(fit_time)
yearly_stats[indx] = cls_stats
yearly_preds[indx] = preds
all_yearstime = time.time() - starttime
logging.info("%ds required to vectorize and to fit 4 classifiers \t" % all_yearstime)
pickle.dump(yearly_stats, open(results_dir + "/yearly_stats_rand"+ str(randomize) + "_run" + str(run_id) +".pkl", "wb" ) )
pickle.dump(yearly_preds, open(results_dir + "/yearly_predictions_rand"+ str(randomize)+ "_run" + str(run_id) +".pkl", "wb" ) )
#---------------------------------------
# Stats from estimates
#---------------------------------------
def avergage_per_classifier(cls_stats, classifier_names):
accuracies = {}
median = {}
vs = {}
for classif in classifier_names:
accs = []
for fold, stats in cls_stats.items():
relevant = stats[classif]
accs.append(relevant['accuracy'])
accuracies[classif] = np.mean(accs)
vs[classif] = np.var(accs)
median[classif] = np.median(accs)
return(accuracies, median, vs)
#---------------------------------------
#
#---------------------------------------
def stats_from_estimates(yearly_stats, sess_indx, randomize, run_id, results_dir):
""" """
classifier_names = ['SAG', 'SGD', 'Perceptron','Passive-Aggressive'] #classifiers.keys()
rows = []
for indx, yr in sess_indx.items():
#logging.info(str(yr))
try:
curr = yearly_stats[indx]
mns, meds, vs = avergage_per_classifier(curr, classifier_names )
rows.append([indx, yr, mns['SAG'], mns['SGD'], mns['Perceptron'], mns['Passive-Aggressive'],
meds['SAG'], meds['SGD'], meds['Perceptron'], meds['Passive-Aggressive'],
vs['SAG'], vs['SGD'], vs['Perceptron'], vs['Passive-Aggressive'] ])
except:
logging.error("Error getting stats for: ", str(yr))
res = pd.DataFrame(data=rows, columns = ['index', 'yrmth',
'mn_sag','mn_sgd','mn_pcpt','mn_passAgr',
'md_sag','md_sgd','md_pcpt','md_passAgr',
'var_sag','var_sgd','var_pcpt','var_passAgr' ])
res.to_csv(results_dir + '/acc_allmembers_rand' + str(randomize)+ "_run_" + str(run_id) +".csv", index=False)
#---------------------------------------
#
#---------------------------------------
def main():
curr_dir = os.getcwd()
curr_dir = re.sub('/UK_data', '', curr_dir)
logging.basicConfig(filename= curr_dir + '/log_files/sess_estimates.log',level=logging.INFO,format='%(asctime)s %(lineno)s: %(message)s')
logging.info('Start.')
#sess_indx_file = (provide path to full index for full data)
#sess_indx = pickle.load(open(sess_indx_file, 'rb'))
sess_indx = {9: '1944-11', 74: '2008-12'}
#normalize = sys.argv[1] # normalize_X = True
randomize = 0 # sys.argv[1]
data_in = curr_dir + "/data/" #sys.argv[2] # data_in = '/nas/tz/uk/wtopics/'
mats_dir = curr_dir + "/" #sys.argv[3]
run_id = "replication" #sys.argv[4]
results_dir = curr_dir
run_estimates(data_in, mats_dir, results_dir, run_id, sess_indx, randomize=randomize, normalized=1)
logging.info("Calculating accuracy stats from estimates")
yearly_stats = pickle.load(open(results_dir + "/yearly_stats_rand"+ str(randomize) + "_run" + str(run_id) +".pkl", 'rb'))
stats_from_estimates(yearly_stats, sess_indx, randomize, run_id, results_dir)
if __name__ == "__main__":
main()
|
name = "test-packaging-rss"
print("loaded")
|
from cdiserrors import *
{% if cookiecutter.package_type == 'Service' %}
from authutils.errors import JWTError
{% endif %}
class CustomException(APIError):
def __init__(self, message):
self.message = str(message)
self.code = 500
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for gcloud ml video-intelligence commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import iso_duration
from googlecloudsdk.core.util import times
VIDEO_API = 'videointelligence'
VIDEO_API_VERSION = 'v1'
INPUT_ERROR_MESSAGE = ('[{}] is not a valid format for video input. Must be a '
'local path or a Google Cloud Storage URI '
'(format: gs://bucket/file).')
OUTPUT_ERROR_MESSAGE = ('[{}] is not a valid format for result output. Must be '
'a Google Cloud Storage URI '
'(format: gs://bucket/file).')
SEGMENT_ERROR_MESSAGE = ('Could not get video segments from [{0}]. '
'Please make sure you give the desired '
'segments in the form: START1:END1,START2:'
'END2, etc.: [{1}]')
class Error(exceptions.Error):
"""Base error class for this module."""
class SegmentError(Error):
"""Error for poorly formatted video segment messages."""
class VideoUriFormatError(Error):
"""Error if the video input URI is invalid."""
def ValidateAndParseSegments(given_segments):
"""Get VideoSegment messages from string of form START1:END1,START2:END2....
Args:
given_segments: [str], the list of strings representing the segments.
Raises:
SegmentError: if the string is malformed.
Returns:
[GoogleCloudVideointelligenceXXXVideoSegment], the messages
representing the segments or None if no segments are specified.
"""
if not given_segments:
return None
messages = apis.GetMessagesModule(VIDEO_API, VIDEO_API_VERSION)
segment_msg = messages.GoogleCloudVideointelligenceV1VideoSegment
segment_messages = []
segments = [s.split(':') for s in given_segments]
for segment in segments:
if len(segment) != 2:
raise SegmentError(SEGMENT_ERROR_MESSAGE.format(
','.join(given_segments), 'Missing start/end segment'))
start, end = segment[0], segment[1]
# v1beta2 requires segments as a duration string representing the
# count of seconds and fractions of seconds to nanosecond resolution
# e.g. offset "42.596413s". To perserve backward compatibility with v1beta1
# we will parse any segment timestamp with out a duration unit as an
# int representing microseconds.
try:
start_duration = _ParseSegmentTimestamp(start)
end_duration = _ParseSegmentTimestamp(end)
except ValueError as ve:
raise SegmentError(SEGMENT_ERROR_MESSAGE.format(
','.join(given_segments), ve))
sec_fmt = '{}s'
segment_messages.append(segment_msg(
endTimeOffset=sec_fmt.format(end_duration.total_seconds),
startTimeOffset=sec_fmt.format(start_duration.total_seconds)))
return segment_messages
def _ParseSegmentTimestamp(timestamp_string):
"""Parse duration formatted segment timestamp into a Duration object.
Assumes string with no duration unit specified (e.g. 's' or 'm' etc.) is
an int representing microseconds.
Args:
timestamp_string: str, string to convert
Raises:
ValueError: timestamp_string is not a properly formatted duration, not a
int or int value is <0
Returns:
Duration object represented by timestamp_string
"""
# Assume timestamp_string passed as int number of microseconds if no unit
# e.g. 4566, 100, etc.
try:
microseconds = int(timestamp_string)
except ValueError:
try:
duration = times.ParseDuration(timestamp_string)
if duration.total_seconds < 0:
raise times.DurationValueError()
return duration
except (times.DurationSyntaxError, times.DurationValueError):
raise ValueError('Could not parse timestamp string [{}]. Timestamp must '
'be a properly formatted duration string with time '
'amount and units (e.g. 1m3.456s, 2m, 14.4353s)'.format(
timestamp_string))
else:
log.warning("Time unit missing ('s', 'm','h') for segment timestamp [{}], "
"parsed as microseconds.".format(timestamp_string))
if microseconds < 0:
raise ValueError('Could not parse duration string [{}]. Timestamp must be'
'greater than >= 0)'.format(timestamp_string))
return iso_duration.Duration(microseconds=microseconds)
def ValidateOutputUri(output_uri):
"""Validates given output URI against validator function.
Args:
output_uri: str, the output URI for the analysis.
Raises:
VideoUriFormatError: if the URI is not valid.
Returns:
str, The same output_uri.
"""
if output_uri and not storage_util.ObjectReference.IsStorageUrl(output_uri):
raise VideoUriFormatError(OUTPUT_ERROR_MESSAGE.format(output_uri))
return output_uri
def UpdateRequestWithInput(unused_ref, args, request):
"""The Python hook for yaml commands to inject content into the request."""
path = args.input_path
if os.path.isfile(path):
request.inputContent = files.ReadBinaryFileContents(path)
elif storage_util.ObjectReference.IsStorageUrl(path):
request.inputUri = path
else:
raise VideoUriFormatError(INPUT_ERROR_MESSAGE.format(path))
return request
|
#-----------------------------------------------------------------------------
# Copyright (c) 2014, HFTools Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import warnings
import numpy as np
import hftools.dataset.dim as ddim
from hftools.dataset.dim import DimSweep, ComplexDiagAxis, ComplexIndepAxis,\
ComplexDerivAxis, dims_has_complex, info_has_complex
from hftools.testing import TestCase, make_load_tests
from hftools.dataset import hfarray
from hftools.utils import reset_hftools_warnings, HFToolsDeprecationWarning
basepath = os.path.split(__file__)[0]
load_tests = make_load_tests(ddim)
class Test_Dim_init(TestCase):
def setUp(self):
self.dim = DimSweep("a", 10, unit="Hz")
def test_1(self):
a = DimSweep(self.dim)
self.assertEqual(a, self.dim)
self.assertEqual(a.name, "a")
self.assertEqual(a.unit, "Hz")
self.assertAllclose(a.data, range(10))
def test_2(self):
a = DimSweep(self.dim, data=range(5))
self.assertEqual(a.name, "a")
self.assertEqual(a.unit, "Hz")
self.assertAllclose(a.data, range(5))
def test_3(self):
a = DimSweep(self.dim, unit="m")
self.assertEqual(a.name, "a")
self.assertEqual(a.unit, "m")
self.assertAllclose(a.data, range(10))
def test_4(self):
a = DimSweep(self.dim, name="P")
self.assertEqual(a.name, "P")
self.assertEqual(a.unit, "Hz")
self.assertAllclose(a.data, range(10))
def test_5(self):
a = DimSweep(self.dim, name="P", unit="W", data=range(3))
self.assertEqual(a.name, "P")
self.assertEqual(a.unit, "W")
self.assertAllclose(a.data, range(3))
def test_6(self):
a = DimSweep("P", data=set([0, 1, 2]), unit="W")
self.assertEqual(a.name, "P")
self.assertEqual(a.unit, "W")
self.assertAllclose(sorted(a.data), range(3))
def test_7(self):
a = DimSweep("P", data=np.array([0, 1, 2]), unit="W")
self.assertEqual(a.name, "P")
self.assertEqual(a.unit, "W")
self.assertAllclose(a.data, range(3))
def test_hfarray(self):
a = hfarray(self.dim)
self.assertAllclose(a, range(10))
self.assertEqual(a.dims, (DimSweep("a", 10, unit="Hz"),))
class Test_Dim(TestCase):
cls = ddim.DimBase
def _helper(self, name, indata, finaldata):
d = self.cls("a", indata)
self.assertEqual(d.name, "a")
self.assertTrue(np.allclose(d.data, np.array(finaldata)))
self.assertEqual(d.fullsize(), len(finaldata))
def test_1(self):
self._helper("a", 1, [0])
def test_2(self):
self._helper("a", 3, [0, 1, 2])
def test_3(self):
self._helper("a", [1, 2, 3], [1, 2, 3])
def test_4(self):
self._helper("a", [[1, 2, 3]], [1, 2, 3])
def test_cmp_1(self):
a = self.cls("a", 0)
b = self.cls("b", 4)
self.assertTrue(a < b)
self.assertFalse(a > b)
def test_cmp_2(self):
a = self.cls("a", 4)
b = self.cls("a", 4)
self.assertEqual(a, b)
def test_cmp_3(self):
a = self.cls("a", 4)
b = "a"
self.assertEqual(a, b)
def test_cmp_4(self):
a = self.cls("a", 4)
b = "b"
res = a < b
self.assertTrue(res)
def test_slice_1(self):
a = self.cls("a", 10)
b = a[::2]
self.assertEqual(b.name, a.name)
self.assertTrue(np.allclose(a.data, np.arange(10)))
self.assertTrue(np.allclose(b.data, np.arange(10)[::2]))
self.assertEqual(a.fullsize(), 10)
self.assertEqual(b.fullsize(), 5)
def test_data(self):
a = self.cls("a", 2)
self.assertRaises(AttributeError, setattr, a, "data", "b")
def test_name(self):
a = self.cls("a", 2)
self.assertRaises(AttributeError, setattr, a, "name", "b")
def test_unit(self):
a = self.cls("a", 2)
self.assertRaises(AttributeError, setattr, a, "unit", "b")
def test_hash(self):
a = self.cls("a", 2)
hashres = hash((self.cls.sortprio, "a", self.cls, (0, 1), None, None))
self.assertEqual(hash(a), hashres)
def test_get_1(self):
a = self.cls("a", 10)
self.assertEqual(a[:], a)
def test_get_2(self):
a = self.cls("a", 10, unit="Hz")
b = a[::2]
self.assertEqual(b.data.tolist(), a.data.tolist()[::2])
self.assertEqual(b.unit, a.unit)
self.assertEqual(b.name, a.name)
def test_get_3(self):
a = self.cls("a", 10, unit="Hz")
b = a[np.arange(10) < 5]
self.assertEqual(b.data.tolist(), list(range(5)))
self.assertEqual(b.unit, a.unit)
self.assertEqual(b.name, a.name)
def test_get_4(self):
a = self.cls("a", 10, unit="Hz")
self.assertRaises(IndexError, lambda x: x[0], a)
class Test_DimSweep(Test_Dim):
cls = ddim.DimSweep
class Test_DimRep(Test_Dim):
cls = ddim.DimRep
class Test_DimMatrix(Test_Dim):
cls = ddim._DimMatrix
class Test_DimMatrix_i(Test_Dim):
cls = ddim.DimMatrix_i
class Test_DimMatrix_j(Test_Dim):
cls = ddim.DimMatrix_j
class TestDiag(Test_Dim):
cls = ddim.DiagAxis
indep = ddim.IndepAxis
deriv = ddim.DerivAxis
def test_indep_1(self):
a = self.cls("a", 10)
self.assertIsInstance(a.indep_axis, self.indep)
self.assertIsInstance(a.indep_axis.diag_axis, self.cls)
self.assertIsInstance(a.indep_axis.deriv_axis, self.deriv)
self.assertEqual(a.indep_axis.name, a.name)
self.assertEqual(a.indep_axis.unit, a.unit)
self.assertAllclose(a.indep_axis.data, a.data)
def test_indep_2(self):
a = self.cls("a", 10)
self.assertIsInstance(a.deriv_axis, self.deriv)
self.assertIsInstance(a.deriv_axis.diag_axis, self.cls)
self.assertIsInstance(a.deriv_axis.indep_axis, self.indep)
self.assertEqual(a.deriv_axis.name, a.name)
self.assertEqual(a.deriv_axis.unit, a.unit)
self.assertAllclose(a.deriv_axis.data, a.data)
class TestDiag_Complex(Test_Dim):
cls = ddim.ComplexDiagAxis
indep = ddim.ComplexIndepAxis
deriv = ddim.ComplexDerivAxis
class TestDiag_Matrix_i(Test_Dim):
cls = ddim.DimMatrix_i
indep = ddim.DimMatrix_Indep_i
deriv = ddim.DimMatrix_Deriv_i
class TestDiag_Matrix_j(Test_Dim):
cls = ddim.DimMatrix_j
indep = ddim.DimMatrix_Indep_j
deriv = ddim.DimMatrix_Deriv_j
class Test_dims_has_complex(TestCase):
def _helper(self, dims):
self.assertTrue(dims_has_complex(dims))
def _helper_false(self, dims):
self.assertFalse(dims_has_complex(dims))
def test_1(self):
self._helper_false((DimSweep("d", 3),))
def test_2(self):
self._helper((DimSweep("d", 3), ComplexDiagAxis("cplx", 2)))
def test_3(self):
self._helper((DimSweep("d", 3), ComplexIndepAxis("cplx", 2)))
def test_4(self):
self._helper((DimSweep("d", 3), ComplexDerivAxis("cplx", 2)))
def test_5(self):
self._helper((DimSweep("d", 3), ComplexIndepAxis("cplx", 2),
ComplexDerivAxis("cplx", 2)))
class Test_info_has_complex(Test_dims_has_complex):
def _helper(self, dims):
reset_hftools_warnings()
self.assertHFToolsDeprecationWarning(info_has_complex, dims)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("ignore", HFToolsDeprecationWarning)
self.assertTrue(info_has_complex(dims))
def _helper_false(self, dims):
reset_hftools_warnings()
self.assertHFToolsDeprecationWarning(info_has_complex, dims)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("ignore", HFToolsDeprecationWarning)
self.assertFalse(info_has_complex(dims))
class TestDimConv(TestCase):
a = ddim.DimBase
b = ddim.DimBase
def setUp(self):
self.A = self.a("A", [0], unit="Hz")
self.B = self.b("B", [0], unit="N")
def test_self_conv_1(self):
OBJ = self.a(self.A)
self.assertEqual(OBJ.name, self.A.name)
self.assertEqual(OBJ.data, self.A.data)
self.assertEqual(OBJ.unit, self.A.unit)
self.assertIsInstance(OBJ, self.a)
def test_self_conv_2(self):
OBJ = self.b(self.B)
self.assertEqual(OBJ.name, self.B.name)
self.assertEqual(OBJ.data, self.B.data)
self.assertEqual(OBJ.unit, self.B.unit)
self.assertIsInstance(OBJ, self.b)
def test_1(self):
OBJ = self.a(self.B)
self.assertEqual(OBJ.name, self.B.name)
self.assertEqual(OBJ.data, self.B.data)
self.assertEqual(OBJ.unit, self.B.unit)
self.assertIsInstance(OBJ, self.a)
def test_2(self):
OBJ = self.b(self.A)
self.assertEqual(OBJ.name, self.A.name)
self.assertEqual(OBJ.data, self.A.data)
self.assertEqual(OBJ.unit, self.A.unit)
self.assertIsInstance(OBJ, self.b)
def test_3(self):
OBJ = self.a(self.B, unit="Pa")
self.assertEqual(OBJ.name, self.B.name)
self.assertEqual(OBJ.data, self.B.data)
self.assertEqual(OBJ.unit, "Pa")
self.assertIsInstance(OBJ, self.a)
def test_4(self):
OBJ = self.b(self.A, unit="s")
self.assertEqual(OBJ.name, self.A.name)
self.assertEqual(OBJ.data, self.A.data)
self.assertEqual(OBJ.unit, "s")
self.assertIsInstance(OBJ, self.b)
class TestDimConv1(TestDimConv):
a = ddim.DimSweep
b = ddim.DimRep
class Object:
pass
class TestDate(TestCase):
def test_date_1(self):
res = np.array(["2012-05-30 12:12:31", "2012-05-31 12:12:31"],
np.dtype("datetime64[us]"))
d = DimSweep("a", res)
self.assertEqual(d.data.dtype, np.dtype("datetime64[us]"))
def test_obj(self):
d = DimSweep("a", np.array([Object(), Object()]))
self.assertEqual(d.data.dtype, np.object)
def test_empty(self):
d = DimSweep("a", [])
self.assertEqual(d.data.dtype, np.float64)
class Testoutputformat(TestCase):
def test_int(self):
d = DimSweep("a", [1, 2, 3])
self.assertEqual(d.outputformat, "%d")
def test_num(self):
d = DimSweep("a", [1., 2., 3])
self.assertEqual(d.outputformat, "%.16e")
def test_str(self):
d = DimSweep("a", ["a"])
self.assertEqual(d.outputformat, "%s")
|
import optimus_manager.envs as envs
import optimus_manager.var as var
import optimus_manager.checks as checks
import optimus_manager.pci as pci
from optimus_manager.acpi_data import ACPI_STRINGS
from optimus_manager.bash import exec_bash, BashError
class KernelSetupError(Exception):
pass
def setup_kernel_state(config, requested_gpu_mode):
assert requested_gpu_mode in ["intel", "nvidia", "hybrid"]
if requested_gpu_mode == "intel":
_setup_intel_mode(config)
elif requested_gpu_mode == "nvidia":
_setup_nvidia_mode(config)
elif requested_gpu_mode == "hybrid":
_setup_hybrid_mode(config)
def _setup_intel_mode(config):
# Resetting the system to its base state
_set_base_state(config)
# Power switching according to the switching backend
if config["optimus"]["switching"] == "nouveau":
try:
_load_nouveau(config)
except KernelSetupError as e:
print("ERROR : cannot load nouveau. Moving on. Error is : %s" % str(e))
elif config["optimus"]["switching"] == "bbswitch":
_set_bbswitch_state("OFF")
elif config["optimus"]["switching"] == "acpi_call":
_try_set_acpi_call_state("OFF")
elif config["optimus"]["switching"] == "none":
_try_custom_set_power_state("OFF")
# PCI remove
if config["optimus"]["pci_remove"] == "yes":
switching_mode = config["optimus"]["switching"]
if switching_mode == "nouveau" or switching_mode == "bbswitch":
print("%s is selected, pci_remove option ignored." % switching_mode)
else:
print("Removing Nvidia from PCI bus")
_try_remove_pci()
# PCI power control
if config["optimus"]["pci_power_control"] == "yes":
switching_mode = config["optimus"]["switching"]
if switching_mode == "bbswitch" or switching_mode == "acpi_call":
print("%s is enabled, pci_power_control option ignored." % switching_mode)
elif config["optimus"]["pci_remove"] == "yes":
print("pci_remove is enabled, pci_power_control option ignored." % switching_mode)
else:
_try_set_pci_power_state("auto")
def _setup_nvidia_mode(config):
_set_base_state(config)
_load_nvidia_modules(config)
def _setup_hybrid_mode(config):
_set_base_state(config)
_load_nvidia_modules(config)
def _set_base_state(config):
_unload_nvidia_modules()
_unload_nouveau()
switching_mode = config["optimus"]["switching"]
try:
if switching_mode == "bbswitch":
_load_bbswitch()
elif switching_mode == "acpi_call":
_load_acpi_call()
except KernelSetupError as e:
print("ERROR : error loading modules for %s. Continuing anyways. Error is : %s" % (switching_mode, str(e)))
if not checks.is_module_available(switching_mode):
print("%s is not available for the current kernel. Is the corresponding package installed ?")
if checks.is_module_loaded("bbswitch"):
_try_set_bbswitch_state("ON")
if checks.is_module_loaded("acpi_call"):
try:
last_acpi_call_state = var.read_last_acpi_call_state()
should_send_acpi_call = (last_acpi_call_state == "OFF")
except var.VarError:
should_send_acpi_call = False
if should_send_acpi_call:
_try_set_acpi_call_state("ON")
if not pci.is_nvidia_visible():
print("Nvidia card not visible in PCI bus, rescanning")
_try_rescan_pci()
_try_pci_reset(config)
if switching_mode == "bbswitch":
_load_bbswitch()
else:
_unload_bbswitch()
if switching_mode == "none":
_try_custom_set_power_state("ON")
_try_set_pci_power_state("on")
def _load_nvidia_modules(config):
print("Loading Nvidia modules")
pat_value = _get_PAT_parameter_value(config)
modeset_value = 1 if config["nvidia"]["modeset"] == "yes" else 0
try:
exec_bash("modprobe nvidia NVreg_UsePageAttributeTable=%d" % pat_value)
exec_bash("modprobe nvidia_drm modeset=%d" % modeset_value)
except BashError as e:
raise KernelSetupError("Cannot load Nvidia modules : %s" % str(e))
def _unload_nvidia_modules():
print("Unloading Nvidia modules (if any)")
try:
exec_bash("modprobe -r nvidia_drm nvidia_modeset nvidia_uvm nvidia")
except BashError as e:
print(e)
def _load_nouveau(config):
print("Loading nouveau module")
modeset_value = 1 if config["intel"]["modeset"] == "yes" else 0
try:
exec_bash("modprobe nouveau modeset=%d" % modeset_value)
except BashError as e:
raise KernelSetupError("Cannot load nouveau : %s" % str(e))
def _unload_nouveau():
print("Unloading nouveau module (if any)")
try:
exec_bash("modprobe -r nouveau")
except BashError as e:
print(e)
def _load_bbswitch():
if not checks.is_module_available("bbswitch"):
raise KernelSetupError("Module bbswitch not available for current kernel.")
print("Loading bbswitch module")
try:
exec_bash("modprobe bbswitch")
except BashError as e:
raise KernelSetupError("Cannot load bbswitch : %s" % str(e))
def _unload_bbswitch():
print("Unloading bbswitch module (if any)")
try:
exec_bash("modprobe -r bbswitch")
except BashError as e:
print(e)
def _load_acpi_call():
if not checks.is_module_available("acpi_call"):
raise KernelSetupError("Module acpi_call not available for current kernel.")
print("Loading acpi_call module")
try:
exec_bash("modprobe acpi_call")
except BashError as e:
raise KernelSetupError("Cannot load acpi_call : %s" % str(e))
def _get_PAT_parameter_value(config):
pat_value = {"yes": 1, "no": 0}[config["nvidia"]["PAT"]]
if not checks.is_pat_available():
print("Warning : Page Attribute Tables are not available on your system.\n"
"Disabling the PAT option for Nvidia.")
pat_value = 0
return pat_value
def _set_bbswitch_state(state):
assert state in ["OFF", "ON"]
print("Setting GPU power to %s via bbswitch" % state)
try:
with open("/proc/acpi/bbswitch", "w") as f:
f.write(state)
except FileNotFoundError:
raise KernelSetupError("Cannot open /proc/acpi/bbswitch")
except IOError:
raise KernelSetupError("Error writing to /proc/acpi/bbswitch")
def _set_acpi_call_state(state):
assert state in ["OFF", "ON"]
print("Setting GPU power to %s via acpi_call" % state)
try:
acpi_strings_list = var.read_acpi_call_strings()
print("Found saved ACPI strings")
except var.VarError:
acpi_strings_list = ACPI_STRINGS
print("No ACPI string saved, trying them all (expect kernel messages spam)")
working_strings = []
for off_str, on_str in acpi_strings_list:
string = off_str if state == "OFF" else on_str
try:
print("Sending ACPI string %s" % string)
with open("/proc/acpi/call", "w") as f:
f.write(string)
with open("/proc/acpi/call", "r") as f:
output = f.read()
except FileNotFoundError:
raise KernelSetupError("Cannot open /proc/acpi/call")
except IOError:
continue
if not "Error" in output:
print("ACPI string %s works, saving" % string)
working_strings.append((off_str, on_str))
var.write_last_acpi_call_state(state)
var.write_acpi_call_strings(working_strings)
def _try_remove_pci():
try:
pci.remove_nvidia()
except pci.PCIError as e:
print("ERROR : cannot remove Nvidia from PCI bus. Continuing. Error is : %s" % str(e))
def _try_rescan_pci():
try:
pci.rescan()
if not pci.is_nvidia_visible():
print("ERROR : Nvidia card not showing up in PCI bus after rescan. Continuing anyways.")
except pci.PCIError as e:
print("ERROR : cannot rescan PCI bus. Continuing. Error is : %s" % str(e))
def _try_set_pci_power_state(state):
try:
pci.set_power_state(state)
except pci.PCIError as e:
print("ERROR : cannot set PCI power management state. Continuing. Error is : %s" % str(e))
def _try_pci_reset(config):
try:
_unload_bbswitch()
_pci_reset(config)
except KernelSetupError as e:
print("ERROR : Nvidia PCI reset failed. Continuing. Error is : %s" % str(e))
def _try_set_acpi_call_state(state):
try:
_set_acpi_call_state(state)
except KernelSetupError as e:
print("ERROR : setting acpi_call to %s. Continuing anyways. Error is : %s" % (state, str(e)))
def _try_set_bbswitch_state(state):
try:
_set_bbswitch_state(state)
except KernelSetupError as e:
print("ERROR : setting bbswitch to %s. Continuing anyways. Error is : %s" % (state, str(e)))
def _pci_reset(config):
if config["optimus"]["pci_reset"] == "no":
return
try:
if config["optimus"]["pci_reset"] == "function_level":
print("Performing function-level reset of Nvidia")
pci.function_level_reset_nvidia()
elif config["optimus"]["pci_reset"] == "hot_reset":
print("Starting hot reset sequence")
pci.hot_reset_nvidia()
except pci.PCIError as e:
raise KernelSetupError("Failed to perform PCI reset : %s" % str(e))
def _try_custom_set_power_state(state):
if state == "ON":
script_path = envs.NVIDIA_MANUAL_ENABLE_SCRIPT_PATH
elif state == "OFF":
script_path = envs.NVIDIA_MANUAL_DISABLE_SCRIPT_PATH
print("Running %s" % script_path)
try:
exec_bash(script_path)
except BashError as e:
print("ERROR : cannot run %s. Continuing anyways. Error is : %s"
% (script_path, str(e)))
|
"""
A daemon that cleans up tasks from the service queues when a service is disabled/deleted.
When a service is turned off by the orchestrator or deleted by the user, the service task queue needs to be
emptied. The status of all the services will be periodically checked and any service that is found to be
disabled or deleted for which a service queue exists, the dispatcher will be informed that the task(s)
had an error.
"""
import time
from assemblyline.odm.models.error import Error
from assemblyline.common.isotime import now_as_iso
from assemblyline.common.constants import service_queue_name
from assemblyline_core.dispatching.client import DispatchClient
from assemblyline_core.server_base import CoreBase, ServiceStage
class Plumber(CoreBase):
def __init__(self, logger=None, shutdown_timeout: float = None, config=None,
redis=None, redis_persist=None, datastore=None, delay=60):
super().__init__('plumber', logger, shutdown_timeout, config=config, redis=redis,
redis_persist=redis_persist, datastore=datastore)
self.delay = float(delay)
self.dispatch_client = DispatchClient(datastore=self.datastore, redis=self.redis,
redis_persist=self.redis_persist, logger=self.log)
def try_run(self):
# Get an initial list of all the service queues
service_queues = {queue.decode('utf-8').lstrip('service-queue-'): None
for queue in self.redis.keys(service_queue_name('*'))}
while self.running:
self.heartbeat()
# Reset the status of the service queues
service_queues = {service_name: None for service_name in service_queues}
# Update the service queue status based on current list of services
for service in self.datastore.list_all_services(full=True):
service_queues[service.name] = service
for service_name, service in service_queues.items():
if not service or not service.enabled or self.get_service_stage(service_name) != ServiceStage.Running:
while True:
task = self.dispatch_client.request_work(None, service_name=service_name,
service_version='0', blocking=False)
if task is None:
break
error = Error(dict(
archive_ts=now_as_iso(self.config.datastore.ilm.days_until_archive * 24 * 60 * 60),
created='NOW',
expiry_ts=now_as_iso(task.ttl * 24 * 60 * 60) if task.ttl else None,
response=dict(
message='The service was disabled while processing this task.',
service_name=task.service_name,
service_version='0',
status='FAIL_NONRECOVERABLE',
),
sha256=task.fileinfo.sha256,
type="TASK PRE-EMPTED",
))
error_key = error.build_key(task=task)
self.dispatch_client.service_failed(task.sid, error_key, error)
# Wait a while before checking status of all services again
time.sleep(self.delay)
if __name__ == '__main__':
with Plumber() as server:
server.serve_forever()
|
from ctypes import byref, CDLL, c_int, c_size_t, POINTER
from ctypes.util import find_library
from errno import ENOENT, ESRCH, EALREADY
from socket import socket
libSystem = CDLL(find_library('System'))
class LaunchdSocketActivateError(Exception):
# from launch_activate_socket(3)
errors = {
ENOENT: "The socket name specified does not exist in the caller's launchd.plist(5).",
ESRCH: 'The calling process is not managed by launchd(8).',
EALREADY: 'The specified socket has already been activated.'
}
def __init__(self, errcode):
if errcode not in self.errors:
raise ValueError('unexpected error code')
super().__init__(self, errcode)
self.errcode = errcode
def __str__(self):
return self.errors[self.errcode]
def launch_activate_socket(name: str):
fds = POINTER(c_int)()
count = c_size_t()
res = libSystem.launch_activate_socket(name.encode('utf-8'), byref(fds), byref(count))
if res:
raise LaunchdSocketActivateError(res)
sockets = [socket(fileno=fds[s]) for s in range(count.value)]
libSystem.free(fds)
return sockets
|
#Matthew Trahms
#EE 526
#5/19/21
#This function generates the low enable latches to store the write data address
#this file is called multiple times in the case of multiple regfiles
#syntax for the post latch address bits is defined in make_decoder.py
#clock signal expected is wr_addr_en_(RF#)
#takes:
#the python file interface to write the verilog output
#the index of the register file
#the number of entries
#the desired latch output width
#produces the line by line modules for all the latches
import math
from cell_map import low_latch
def make_wr_addr_latches(verilog_out, rf_idx, num_entries, latch_w):
latch_cell = (low_latch % latch_w)
addr_bits = int(math.ceil(math.log(num_entries, 2)))
#latch name template: latch_waddr_(RF#)_(ADDR BIT#)
name_templ = 'latch_waddr_'+str(rf_idx)+'_'
for i in range(addr_bits):
line = latch_cell + ' ' + name_templ + str(i) + ' (.D(wr_addr['
line += str(i) + ']), .EN(clk), .Q('
line += 'wr_addr_l_' + str(rf_idx) + '_' + str(i) + '));\n'
verilog_out.write(line)
return
if __name__ == '__main__':
f = open('wr_addr_latch_test.txt','w')
rf = 0
entries = 12
w = 1
make_wr_addr_latches(f, rf, entries, w)
f.close()
|
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
"""
https://www.w3schools.com/tags/tryit.asp?filename=tryhtml_td_bgcolor
"""
app = dash.Dash()
titleStyle = {'margin-bottom':'0.5in',
'font-size':'300%'}
title = html.H1(children="Making Bionic Better", style = titleStyle)
#################################################################################
#################################################################################
# Interaction
#################################################################################
#################################################################################
nameOfClass = html.Div([
dcc.Input(id='my-id', value='', type='text'),
html.Div(id='my-div')
])
weekdropDown = dcc.Dropdown\
(\
options=[
{'label': 'Monday', 'value': 'Mo'},
{'label': 'Tuesday', 'value': 'Tu'},
{'label': 'Wednesday', 'value': 'We'},
{'label': 'Thursday', 'value': 'Th'},
{'label': 'Friday', 'value': 'Fr'}
],
value=[],
multi=True,
id='dropdown'
)
weekMeetLabelStyle = {'text-align':'justify', 'margin-top':'0.5in'}
weekMeetLabel = html.Label('Days of the week the class meets',
style=weekMeetLabelStyle)
weekMeetDays = html.Div([
weekMeetLabel,
weekdropDown
])
buttonStyle = {'margin-bottom': '0.6in'}
submitButton = html.Button('Update Class', id='button', style=buttonStyle)
interactions = [nameOfClass, weekMeetDays, submitButton]
interactStyle = {}
interaction = html.Div(interactions, style = interactStyle)
#################################################################################
#################################################################################
# End Interaction
#################################################################################
#################################################################################
responseStyle ={}
interactResponse = html.Div(id='interact-response',
style=responseStyle)
hruleStyle = {'margin-bottom':'0.5in'}
hrule = html.Hr(style=hruleStyle)
htmlBreak = html.Br()
page = html.Div(children=[
title,
htmlBreak,
hrule,
interaction,
htmlBreak,
hrule,
htmlBreak,
interactResponse
])
centerstyle = {'text-align':'center'}
myOverallStyle = {'columnCount':1,
'text-align':'center',
'margin':'auto',
'width':'50%',
'top':'0px',
'position':'relative',
'padding':'auto',
'font-family':'Times New Roman'
}
app.layout = html.Div([page],
style=myOverallStyle)
####################################################################################
####################################################################################
####################################################################################
# Callbacks
####################################################################################
####################################################################################
####################################################################################
@app.callback(
Output(component_id='my-div', component_property='children'),
[Input(component_id='my-id', component_property='value')]
)
def update_output_div(input_value):
return 'You\'ve entered "{}"'.format(input_value)
@app.callback(
Output(component_id='interact-response', component_property='children'),
[Input('button', 'n_clicks')],
[State('my-id', 'value'), State('dropdown', 'value')])
def buttonresponse(numclicks, textInput, dropdown):
titleColStyle = {
'text-align':'center',
'border-bottom':'2px solid #ddd',
'margin':'auto',
'position':'relative',
'padding':'auto',
}
titlecol = html.Tr([html.Td('Name'), html.Td('Class')],style = titleColStyle)
row1 = html.Tr([html.Td('Divesh'), html.Td('Algebra')])
numclicks = 0 if numclicks==None else numclicks
row2 = html.Tr([html.Td('Matt'), html.Td('CS30' + str(numclicks))])
row3 = html.Tr([html.Td('Textfield'), html.Td(textInput)])
row4 = html.Tr([html.Td('DropDown'), html.Td(dropdown)])
tableStyle = {'border' : '3px solid black'}
simpleTable = html.Table([titlecol, row1, row2, row3, row4], style=tableStyle)
return simpleTable
if __name__ == '__main__':
app.run_server(debug=True)
|
"""Operators for creating Spiral curve shapes."""
import bpy
from bpy.types import Operator
from bpy.props import FloatProperty, IntProperty, StringProperty
from bpy_extras.object_utils import AddObjectHelper
from ..utils.functions import create_polar_coordinates, make_spline
from ..utils.ui import get_icon
SPIRAL_TYPE = "spiral"
class AddSpiralObject(Operator, AddObjectHelper): # pylint: disable=too-few-public-methods
"""Create a new Spiral object"""
bl_idname = "curve.add_spiral"
bl_label = "Spiral"
bl_options = {"REGISTER", "UNDO"}
def _update(self, context): # pylint: disable=no-self-use
"""Update the spline when a property changes."""
obj = context.object
if obj and obj.type in ["CURVE"]:
coords = create_polar_coordinates(obj.radius, obj.height, obj.resolution, obj.scalar, obj.loops)
make_spline(obj.data, coords, "POLY", True)
# bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
bpy.types.Object.radius = FloatProperty(
name="Radius",
description="Radius of the spiral",
default=1.0,
update=_update
)
bpy.types.Object.height = FloatProperty(
name="Height",
description="Height of the spiral",
default=1.0,
update=_update
)
bpy.types.Object.resolution = IntProperty(
name="Resolution",
description="Number of vertices in the spiral",
default=32,
min=3,
update=_update
)
bpy.types.Object.scalar = FloatProperty(
name="Scalar",
description="Scalar value along the spiral",
default=0.0,
update=_update
)
bpy.types.Object.loops = IntProperty(
name="Loops",
description="Amount of loops in the spiral",
default=2,
min=1,
update=_update
)
def execute(self, context): # pylint: disable=no-self-use
"""Create the new Spiral object."""
# Set up Curve Object
curve_obj = bpy.data.curves.new('myCurve', "CURVE")
curve_obj.dimensions = "3D"
obj = bpy.data.objects.new("Spiral", curve_obj)
bpy.context.collection.objects.link(obj)
# Set init properties
bpy.types.Object.my_type = StringProperty(options={"HIDDEN"})
obj.my_type = SPIRAL_TYPE
obj.resolution = 32
obj.height = 1.0
obj.radius = 1.0
obj.scalar = 0.0
obj.loops = 2
start_loc = bpy.context.scene.cursor.location
# Set up Curve Spline
pos_list = create_polar_coordinates(1.0, 1.0, 50, 0.0, 2, start_loc)
make_spline(curve_obj, pos_list, "POLY", False)
# Select Curve
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
return {'FINISHED'}
class SpiralPropertiesPanel(bpy.types.Panel):
"""Properties panel for Spiral objects."""
bl_idname = "OBJECT_PT_Spiral_Properties"
bl_label = "Spiral Properties"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "object"
@classmethod
def poll(cls, context):
"""Check if selected object is a Spiral type."""
return context.object and context.object.get("my_type") is SPIRAL_TYPE
def draw(self, context):
"""Draw the properties for the selected Spiral."""
obj = context.object
self.layout.prop(obj, "radius", text="Radius")
self.layout.prop(obj, "height", text="Height")
self.layout.prop(obj, "resolution", text="Resolution")
self.layout.prop(obj, "scalar", text="Scalar")
self.layout.prop(obj, "loops", text="Loops")
def menu_draw(self, context):
"""Draw the menu item for adding a new Spiral object."""
self.layout.operator(AddSpiralObject.bl_idname, icon_value=get_icon('spring_icon'))
REGISTER_CLASSES = (
AddSpiralObject,
SpiralPropertiesPanel
)
|
from teree.teree import Teree
|
import time
import copy
import math
import random
from dataclasses import dataclass
import pyxel
@dataclass
class Point:
x: float
y: float
def __eq__(self, other):
if not isinstance(other, Point):
return False
return self.x == other.x and self.y == other.y
def __repr__(self):
return f"Point={self.x, self.y}"
def distance(self, other: 'Point') -> float:
return math.sqrt((self.x - other.x) ** 2 + (self.y - other.y) ** 2)
@dataclass
class Segment:
start: Point
end: Point
def get_intersection(first_segment: Segment, second_segment: Segment) -> Point:
intersect = Point(0, 0)
denom = (second_segment.end.x - second_segment.start.x) * (first_segment.start.y - first_segment.end.y) - (
first_segment.start.x - first_segment.end.x) * (second_segment.end.y - second_segment.start.y)
if denom == 0:
return Point(0, 0)
ta_num = (second_segment.start.y - second_segment.end.y) * (first_segment.start.x - second_segment.start.x) + (
second_segment.end.x - second_segment.start.x) * (first_segment.start.y - second_segment.start.y)
ta = ta_num / denom
tb_num = (first_segment.start.y - first_segment.end.y) * (first_segment.start.x - second_segment.start.x) + (
first_segment.end.x - first_segment.start.x) * (first_segment.start.y - second_segment.start.y)
tb = tb_num / denom
if 0 <= ta <= 1 and 0 <= tb <= 1:
x = first_segment.start.x + ta * (first_segment.end.x - first_segment.start.x)
y = first_segment.start.y + ta * (first_segment.end.y - first_segment.start.y)
return Point(x, y)
else:
return Point(0, 0)
return intersect
class Brick:
def __init__(self, top_left=Point(0, 0), width=20, height=10, active=False):
self.top_left = copy.copy(top_left)
self.width = width
self.height = height
self.center = Point(top_left.x + width / 2, top_left.y + height / 2)
self.active = active
def __repr__(self):
return f"Brick(x={self.top_left.x}, y={self.top_left.y}, center={self.center.x, self.center.y})"
def draw(self):
if self.active:
pyxel.rect(int(self.top_left.x), int(self.top_left.y), self.width, self.height, pyxel.COLOR_LIGHTGRAY)
else:
pyxel.rectb(int(self.top_left.x), int(self.top_left.y), self.width, self.height, pyxel.COLOR_LIGHTGRAY)
def get_bottom_segment(self) -> Segment:
start_point = Point(self.top_left.x,
self.top_left.y + self.height)
end_point = Point(self.top_left.x + self.width,
self.top_left.y + self.height)
return Segment(start_point, end_point)
def get_left_segment(self) -> Segment:
start_point = Point(self.top_left.x,
self.top_left.y)
end_point = Point(self.top_left.x,
self.top_left.y + self.height)
return Segment(start_point, end_point)
def get_right_segment(self) -> Segment:
start_point = Point(self.top_left.x + self.width,
self.top_left.y)
end_point = Point(self.top_left.x + self.width,
self.top_left.y + self.height)
return Segment(start_point, end_point)
def get_top_segment(self) -> Segment:
start_point = Point(self.top_left.x,
self.top_left.y)
end_point = Point(self.top_left.x + self.width,
self.top_left.y)
return Segment(start_point, end_point)
class Ball:
def __init__(self, center=Point(0, 0), radius=1, x_velocity=0, y_velocity=0):
self.center = copy.copy(center)
self.previous = Point(0, 0)
self.radius = radius
self.x_velocity = x_velocity
self.y_velocity = y_velocity
def __repr__(self):
return f"Ball(center={self.center}, prev={self.previous}, radius={self.radius})"
def draw(self):
if self.radius > 1:
pyxel.circ(int(self.center.x), int(self.center.y), self.radius, pyxel.COLOR_LIME)
else:
pyxel.pix(int(self.center.x), int(self.center.y), pyxel.COLOR_LIME)
def update(self):
self.previous = copy.copy(self.center)
self.center.x += self.x_velocity
self.center.y += self.y_velocity
if self.x_velocity < -3:
self.x_velocity = -3
if self.x_velocity > 3:
self.x_velocity = 3
if self.y_velocity < -3:
self.y_velocity = -3
if self.y_velocity > 3:
self.y_velocity = 3
def out_of_bounds(self):
if (self.center.y + self.radius) >= pyxel.height:
return True
return False
def intersect_wall(self):
# wall bounces
if self.center.x <= 1:
self.x_velocity *= -1
self.center.x = 1
if self.center.x >= (pyxel.width - 1):
self.x_velocity *= -1
self.center.x = (pyxel.width - 1)
if self.center.y <= 1:
self.y_velocity *= -1
self.center.y = 1
def intersect_brick(self, brick: Brick):
movement_segment = Segment(start=self.previous, end=self.center)
if self.previous.x < brick.center.x:
# can only hit left side or top or bottom
left_intersection_point = get_intersection(movement_segment, brick.get_left_segment())
point_array = []
if left_intersection_point != Point(0, 0):
point_array.append((left_intersection_point,
self.center.distance(left_intersection_point),
-1 * self.x_velocity,
self.y_velocity))
if self.y_velocity > 0:
# can only hit left or top
top_intersection_point = get_intersection(movement_segment, brick.get_top_segment())
if top_intersection_point != Point(0, 0):
point_array.append((top_intersection_point,
self.center.distance(top_intersection_point),
self.x_velocity,
-1 * self.y_velocity))
elif self.y_velocity <= 0:
# can only hit left or bottom
bottom_intersection_point = get_intersection(movement_segment, brick.get_bottom_segment())
if bottom_intersection_point != Point(0, 0):
point_array.append((bottom_intersection_point,
self.center.distance(bottom_intersection_point),
self.x_velocity,
-1 * self.y_velocity))
try:
intersection_info = min(point_array, key=lambda k: k[1])
except ValueError:
print(f"{movement_segment=} {brick.get_left_segment()} {brick.get_top_segment()} {brick.get_bottom_segment()}")
pyxel.quit()
self.center = copy.copy(intersection_info[0])
self.x_velocity = intersection_info[2]
self.y_velocity = intersection_info[3]
else:
# can only hit right side or top or bottom
right_intersection_point = get_intersection(movement_segment, brick.get_right_segment())
point_array = []
if right_intersection_point != Point(0, 0):
point_array.append((right_intersection_point,
self.center.distance(right_intersection_point),
-1 * self.x_velocity,
self.y_velocity))
if self.y_velocity > 0:
# can only hit right or top
top_intersection_point = get_intersection(movement_segment, brick.get_top_segment())
if top_intersection_point != Point(0, 0):
point_array.append((top_intersection_point,
self.center.distance(top_intersection_point),
self.x_velocity,
-1 * self.y_velocity))
elif self.y_velocity <= 0:
# can only hit left or bottom
bottom_intersection_point = get_intersection(movement_segment, brick.get_bottom_segment())
if bottom_intersection_point != Point(0, 0):
point_array.append((bottom_intersection_point,
self.center.distance(bottom_intersection_point),
self.x_velocity,
-1 * self.y_velocity))
try:
intersection_info = min(point_array, key=lambda k: k[1])
except ValueError:
print(f"{movement_segment=} {brick.get_right_segment()} {brick.get_top_segment()} {brick.get_bottom_segment()}")
pyxel.quit()
self.center = copy.copy(intersection_info[0])
self.x_velocity = intersection_info[2]
self.y_velocity = intersection_info[3]
class Bar:
def __init__(self, location=Point(0, 0), width=20, height=3):
self.location = copy.copy(location)
self.width = width
self.height = height
self.center = Point(self.location.x + width / 2, self.location.y + height / 2)
self.velocity = 0
def draw(self):
pyxel.rect(int(self.location.x), int(self.location.y), self.width, self.height, pyxel.COLOR_GREEN)
def update(self):
if pyxel.btn(pyxel.KEY_LEFT) or pyxel.btn(pyxel.GAMEPAD_1_LEFT):
self.location.x = max(self.location.x - 2, 1)
self.center.x = self.location.x + self.width / 2
self.velocity = -1
if pyxel.btn(pyxel.KEY_RIGHT) or pyxel.btn(pyxel.GAMEPAD_1_RIGHT):
self.location.x = min(self.location.x + 2, pyxel.width - (1 + self.width))
self.center.x = self.location.x + self.width / 2
self.velocity = 1
def intersect_bar(ball: Ball, bar: Bar) -> bool:
if abs(ball.center.x - bar.center.x) < (bar.width + ball.radius) and \
abs(ball.center.y - bar.center.y) < (bar.height + ball.radius):
return True
return False
def intersect_brick(ball: Ball, brick: Brick) -> bool:
# top edge of ball less than top of brick or bottom of ball above bottom of brick
radius = ball.radius - 1
if abs(ball.center.x - brick.center.x) < (brick.width / 2 + radius) and \
abs(ball.center.y - brick.center.y) < (brick.height / 2 + radius):
print(f"{brick=} {ball=}")
return True
return False
class App:
def __init__(self):
pyxel.init(160, 120, caption="Fix it, Break it")
# pyxel.image(0).load(0, 0, "assets/pyxel_logo_38x16.png")
self.bar = Bar(location=Point(1, y=pyxel.height - (3 + 1)))
self.ball = Ball(center=Point(5, y=pyxel.height - (3 + 1) - 1 - self.bar.height),
x_velocity=random.randint(1, 2),
y_velocity=-1)
self.bricks = []
self.level = 1
self.mesg_time = 2
self.playing = False
self.paused = False
self.lose = False
self.win = False
self.debug = False
self.reset()
pyxel.run(self.update, self.draw)
def reset(self):
self.change_level(self.level)
self.mesg_time = 2
self.playing = False
self.paused = False
self.lose = False
self.win = False
self.debug = False
def update(self):
if pyxel.btnp(pyxel.KEY_Q):
pyxel.quit()
if pyxel.btnp(pyxel.KEY_P) and self.playing:
self.paused = not self.paused
if pyxel.btnp(pyxel.KEY_D):
self.debug = not self.debug
if pyxel.btnp(pyxel.KEY_1):
self.level = 1
if pyxel.btnp(pyxel.KEY_1):
self.level = 2
if pyxel.btnp(pyxel.KEY_S):
self.change_level(self.level)
self.playing = True
if self.paused or not self.playing:
return
if pyxel.btnp(pyxel.KEY_R):
self.lose = True
self.bar.update()
self.ball.update()
self.calculate_intersections()
if self.lose or self.win:
time.sleep(self.mesg_time)
self.reset()
if self.ball.out_of_bounds():
self.lose = True
all_bricks_active = True
for brick in self.bricks:
all_bricks_active &= brick.active
if all_bricks_active:
self.win = True
def calculate_intersections(self):
# bounce off bar
self.ball.intersect_wall()
if intersect_bar(self.ball, self.bar):
self.ball.y_velocity *= -1
self.ball.x_velocity += self.bar.velocity
for brick in self.bricks:
if intersect_brick(self.ball, brick):
brick.active = not brick.active
# adjust ball position if needed
print(f"ball_velocity = {self.ball.x_velocity}, {self.ball.y_velocity}")
self.ball.intersect_brick(brick)
print(f"ball_velocity = {self.ball.x_velocity}, {self.ball.y_velocity}")
def draw_debug(self):
pyxel.text(5, 5, f"ball x: {self.ball.center.x}, ball y: {self.ball.center.y}", pyxel.COLOR_NAVY)
pyxel.text(5, 10, f"ball vx: {self.ball.x_velocity}, ball vy: {self.ball.y_velocity}", pyxel.COLOR_NAVY)
pyxel.text(5, 15, f"bar x: {self.bar.location.x}, bar y: {self.bar.location.y}", pyxel.COLOR_NAVY)
def draw(self):
pyxel.cls(0)
if self.debug:
self.draw_debug()
if self.paused:
pyxel.text(60, 60, f"PAUSED", pyxel.COLOR_NAVY)
if not self.playing:
pyxel.text(55, 5, f"Fixit Breakit", pyxel.COLOR_STEELBLUE)
pyxel.text(5, 50, f"Use left/right arrow keys to move ", pyxel.COLOR_PEACH)
pyxel.text(5, 60, f"Press P to pause", pyxel.COLOR_STEELBLUE)
pyxel.text(5, 70, f"Press S to start a game", pyxel.COLOR_STEELBLUE)
pyxel.text(5, 80, f"Press Q to quit", pyxel.COLOR_STEELBLUE)
return
pyxel.rectb(0, 0, pyxel.width, pyxel.height, pyxel.COLOR_DARKGRAY)
if self.debug:
self.draw_debug()
self.bar.draw()
self.ball.draw()
for brick in self.bricks:
brick.draw()
if self.lose:
pyxel.text(40, 40, 'Game Over', pyxel.COLOR_RED)
if self.win:
pyxel.text(40, 40, 'You won!', pyxel.COLOR_RED)
def change_level(self, level=1):
self.bar.location = Point(1, y=pyxel.height - (3 + 1))
self.ball.center = Point(5, y=pyxel.height - (3 + 1) - 1 - self.bar.height)
self.ball.x_velocity=random.randint(1, 2)
self.ball.y_velocity=-1
if level == 1:
self.bricks = [Brick(Point(25, 30)),
Brick(Point(45, 30)),
Brick(Point(65, 30)),
Brick(Point(90, 30)),
Brick(Point(20, 50)),
Brick(Point(40, 50)),
Brick(Point(60, 50)),
Brick(Point(100, 50)),
Brick(Point(120, 50))]
elif level == 2:
self.bricks = [Brick(Point(60, 50)),
Brick(Point(100, 50)),
Brick(Point(120, 50))]
App()
|
from collections import Counter
import random
class MarkovChain:
def __init__(self):
self.probabilities = {}
self.start_probabilities = {}
self.symbols = set()
self.n = None
def fit(self, sequences, n):
self.n = n
# Collect n-grams and their next value
patterns = []
start_patterns = []
for sequence in sequences:
patterns += [(tuple(sequence[x: x+n]), sequence[x+n]) for x in range(len(sequence) - n)]
start_patterns.append(tuple(sequence[0: n]))
self.symbols = self.symbols.union(set(sequence))
# For each n-gram, calculate the probability distribution over the next output
tmp_probability_counter = {}
for (ngram, next_value) in patterns:
if ngram not in tmp_probability_counter:
tmp_probability_counter[ngram] = {}
if next_value not in tmp_probability_counter[ngram]:
tmp_probability_counter[ngram][next_value] = 1
else:
tmp_probability_counter[ngram][next_value] += 1
# Fill in the transition probabilities
for (ngram, next_counts) in tmp_probability_counter.items():
total_count = sum(next_counts.values())
self.probabilities[ngram] = {k:(v / total_count) for k, v in next_counts.items()}
# Also calculate the start probabilities
start_counter = Counter(start_patterns)
total_count_start = sum(start_counter.values())
self.start_probabilities = {k:(v / total_count_start) for k, v in start_counter.items()}
def next(self, ngram):
if ngram not in self.probabilities.keys():
return random.choice(list(self.symbols))
(next_symbols, probabilities) = zip(*self.probabilities[ngram].items())
next_symbol = random.choices(next_symbols, probabilities)[0]
return next_symbol
def generate(self, length):
# Determine starting sequence
start_sequences = list(self.start_probabilities.keys())
start_probabilities = list(self.start_probabilities.values())
start_ngram = random.choices(
population=start_sequences,
weights=start_probabilities,
k=1
)[0]
sequence = start_ngram
while len(sequence) < length:
cur_ngram = sequence[-self.n:]
sequence += (self.next(cur_ngram),)
return sequence
if __name__ == "__main__":
import pickle
sequences = pickle.load(open('/home/tom/projects/lstar/experiments/mutatingproblem12/counterexamples_Problem12_large.p', 'rb'))
m = MarkovChain()
m.fit(sequences, 3)
for i in range(100):
print(m.generate(50))
|
import os, sys, time
import numpy as np
import scipy.io as spio
import torch
from iflow.dataset.generic_dataset import Dataset
directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..','data')) + '/LASA_dataset/'
class LASA():
def __init__(self, filename, device=torch.device('cpu')):
## Define Variables and Load trajectories ##
self.filename = filename
self.dim = 2
self.device = device
mat = spio.loadmat(directory + filename + '.mat', squeeze_me=True)
self.trajs_real=[]
for demo_i in mat['demos']:
x = demo_i[0]
y = demo_i[1]
tr_i = np.stack((x,y))
self.trajs_real.append(tr_i.T)
trajs_np = np.asarray(self.trajs_real)
self.n_trajs = trajs_np.shape[0]
self.trj_length = trajs_np.shape[1]
self.n_dims = trajs_np.shape[2]
## Normalize trajectories ##
trajs_np = np.reshape(trajs_np, (self.n_trajs * self.trj_length, self.n_dims))
self.mean = np.mean(trajs_np,axis=0)
self.std = np.std(trajs_np, axis=0)
self.trajs_normalized = self.normalize(self.trajs_real)
## Build Train Dataset
self.train_data = []
for i in range(self.trajs_normalized.shape[0]):
self.train_data.append(self.trajs_normalized[i, ...])
self.dataset = Dataset(trajs=self.train_data, device=device)
def normalize(self, X):
Xn = (X - self.mean)/self.std
return Xn
def unormalize(self, Xn):
X = Xn*self.std + self.mean
return X
if __name__ == "__main__":
filename = 'Spoon'
device = torch.device('cpu')
lasa = LASA(filename, device)
print(lasa)
|
naam = input("wat is uw naam? >>>")
adres = input("wat is uw adres? >>>")
postcode = input("wat is uw postcode? >>>")
woonplaats = input("wat is uw woonplaats? >>>")
print("----------------------------------------------------")
print("| Naam : " + naam)
print("| Adres : " + adres)
print("| Postcode : " + postcode)
print("| Woonplaats: " + woonplaats)
print("----------------------------------------------------")
|
# Value Exists
has_repo = session.query(models.Repo.repo_id).filter_by(ext_repo_id=ext_repo_id).scalar() is not None
|
import os
from collections import defaultdict
from . import clientutils, cmds
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class SearchEngine(object):
"""
Search class used for locating files on perforce.
This classes also records search history for faster lookups.
History is broken down by client first then search value.
"""
__slots__ = ('_history',)
def __init__(self):
"""
Private method called after a new instance has been created.
"""
# Call parent method
#
super(SearchEngine, self).__init__()
# Declare class variables
#
self._history = defaultdict(dict)
def history(self, client):
"""
Returns the search history for the given client.
:type client: str
:rtype: dict
"""
return self._history[client]
def searchClient(self, search, client=None):
"""
Locates the specified file against the supplied client.
If no client is provided then the current client is used instead.
:type search: str
:type client: str
:rtype: dict
"""
# Check if a client was supplied
#
if client is None:
client = os.environ['P4CLIENT']
# Check if client has history
#
history = self.history(client)
if search in history.keys():
return history[search]
# Collect files from client view
#
fileSpecs = cmds.files(search, client=client, ignoreDeleted=True)
if fileSpecs is not None:
history[search] = fileSpecs
return fileSpecs
else:
return []
def searchClients(self, search):
"""
Searches all of the available clients for the given file.
:type search: str
:rtype: dict
"""
# Iterate through clients
#
results = {}
for (client, clientSpec) in clientutils.iterClients():
# Check if client is associated with host
#
if clientSpec.host != os.environ['P4HOST']:
continue
# Find files
#
fileSpecs = self.searchClient(search, client=client)
if fileSpecs is not None:
results[client] = fileSpecs
return results
def clearHistory(self):
"""
Clears all of the accumulated search history.
This is useful in case the user has been doing alot of renaming through p4v.
:rtype: None
"""
log.info('Clearing search history...')
self._history.clear()
def findFile(search):
"""
Locates the supplied file using the search engine.
:type search: str
:rtype: list[dict]
"""
return __searchengine__.searchClient(search)
def clearHistory():
"""
Clears all of the accumulated search history.
This is useful in case the user has been doing alot of renaming through p4v.
:rtype: None
"""
__searchengine__.clearHistory()
__searchengine__ = SearchEngine()
|
#
# Copyright 2019 Peifeng Yu <peifeng@umich.edu>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from collections import defaultdict
with open('exec.output') as f:
lines = f.readlines()
requests = defaultdict(set)
rrequests = defaultdict(set)
pat_evenlop = re.compile(r"Received request evenlop: EvenlopDef\(type='(?P<type>[a-zA-Z.]+)', seq=(?P<seq>\d+)")
pat_resp = re.compile(r"Response proto object have size \d+ with evenlop EvenlopDef\(type='(?P<type>[a-zA-Z.]+)', seq=(?P<seq>\d+)")
pat_dispatch = re.compile(r"Dispatching custom task executor.TFRendezRecvUpdate of seq (?P<seq>\d+)")
pat_recvupd = re.compile(r"executor.TFRendezRecvUpdate for seq (?P<seq>\d+)")
for line in lines:
if pat_evenlop.search(line):
res = pat_evenlop.search(line)
reqtype = res.group('type')
s = requests[res.group('type')]
if res.group('seq') in s:
print('Request got twice: ', line)
import ipdb; ipdb.set_trace()
continue
s.add(res.group('seq'))
elif pat_resp.search(line):
res = pat_resp.search(line)
reqtype = res.group('type')
if reqtype.endswith('Response'):
reqtype = reqtype.replace('Response', 'Request')
if reqtype == 'executor.TFRendezRecvRequests':
s = rrequests[reqtype]
if res.group('seq') in s:
print('Sending out twice requests: ', line)
continue
s.add(res.group('seq'))
continue
if reqtype not in requests:
print('Response for non-exist request: ', line)
import ipdb; ipdb.set_trace()
continue
s = requests[reqtype]
if res.group('seq') not in s:
print('Response for non-exist request seq: ', line)
import ipdb; ipdb.set_trace()
continue
s.remove(res.group('seq'))
elif pat_dispatch.search(line):
res = pat_dispatch.search(line)
s = requests['executor.CustomRequest']
if res.group('seq') not in s:
print('CustomRequest not found for TFRendezRecvUpdate')
import ipdb; ipdb.set_trace()
continue
s.remove(res.group('seq'))
elif pat_recvupd.search(line):
res = pat_recvupd.search(line)
s = rrequests['executor.TFRendezRecvRequests']
if res.group('seq') not in s:
print('Response for non-exist request seq: ', line)
import ipdb; ipdb.set_trace()
continue
s.remove(res.group('seq'))
continue
print('===========================================')
print('Remaining')
for k, v in requests.items():
print(k)
for seq in v:
print(' ', seq)
for k, v in rrequests.items():
print(k)
for seq in v:
print(' ', seq)
|
from typing import Optional, Union, Tuple, List
from warnings import warn
import os
import time
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
import mechanicalsoup
import requests
import json
from chemicalc.utils import decode_base64_dict, find_nearest_idx
from chemicalc.file_mgmt import etc_file_dir, download_bluemuse_files
wmko_options = {
"instrument": ["lris", "deimos", "hires", "esi"],
"mag type": ["Vega", "AB"],
"filter": [
"sdss_r.dat",
"sdss_g.dat",
"sdss_i.dat",
"sdss_u.dat",
"sdss_z.dat",
"Buser_B.dat",
"Buser_V.dat",
"Cousins_R.dat",
"Cousins_I.dat",
],
"template": [
"O5V_pickles_1.fits",
"B5V_pickles_6.fits",
"A0V_pickles_9.fits",
"A5V_pickles_12.fits",
"F5V_pickles_16.fits",
"G5V_pickles_27.fits",
"K0V_pickles_32.fits",
"K5V_pickles_36.fits",
"M5V_pickles_44.fits",
],
"grating (DEIMOS)": ["600Z", "900Z", "1200G", "1200B"],
"grating (LRIS)": ["600/7500", "600/10000", "1200/9000", "400/8500", "831/8200"],
"grism (LRIS)": ["B300", "B600"],
"binning (DEIMOS)": ["1x1"],
"binning (LRIS)": ["1x1", "2x1", "2x2", "3x1"],
"binning (ESI)": ["1x1", "2x2", "2x1", "3x1"],
"binning (HIRES)": ["1x1", "2x1", "2x2", "3x1"],
"slitwidth (DEIMOS)": ["0.75", "1.0", "1.5"],
"slitwidth (LRIS)": ["0.7", "1.0", "1.5"],
"slitwidth (ESI)": ["0.75", "0.3", "0.5", "1.0"],
"slitwidth (HIRES)": ["C5", "E4", "B2", "B5", "E5", "D3"],
"slitwidth arcsec (HIRES)": [1.15, 0.40, 0.57, 0.86, 0.80, 1.72],
"dichroic (LRIS)": ["D560"],
"central wavelength (DEIMOS)": ["5000", "6000", "7000", "8000"],
}
mmt_options = {
"inst_mode": [
"BINOSPEC_1000",
"BINOSPEC_270",
"BINOSPEC_600",
"HECTOSPEC_270",
"HECTOSPEC_600",
],
"template": [
"O5V",
"A0V",
"A5V",
"B0V",
"F0V",
"F5V",
"G0V",
"G2V",
"K0V",
"K5V",
"M5V",
"Moon",
],
"filter": ["r_filt", "g_filt", "i_filt"],
"aptype": ["Round", "Square", "Rectangular"],
}
mse_options = {
"spec_mode": ["LR", "MR", "HR"],
"airmass": ["1.0", "1.2", "1.5"],
"filter": ["u", "g", "r", "i", "z", "Y", "J"],
"src_type": ["extended", "point"],
"template": [
"o5v",
"o9v",
"b1v",
"b2ic",
"b3v",
"b8v",
"b9iii",
"b9v",
"a0iii",
"a0v",
"a2v",
"f0v",
"g0i",
"g2v",
"g5iii",
"k2v",
"k7v",
"m2v",
"flat",
"WD",
"LBG_EW_le_0",
"LBG_EW_0_20",
"LBG_EW_ge_20",
"qso1",
"qso2",
"elliptical",
"spiral_Sc",
"HII",
"PN",
],
}
vlt_options = {
"instruments": ["UVES", "FLAMES-UVES", "FLAMES-GIRAFFE", "X-SHOOTER", "MUSE"],
"src_target_mag_band (MUSE)": [
"B",
"V",
"R",
"I",
"sloan_g_prime",
"sloan_r_prime",
"sloan_i_prime",
"sloan_z_prime",
],
"src_target_mag_band (GIRAFFE)": ["U", "B", "V", "R", "I",],
"src_target_mag_band (UVES)": ["U", "B", "V", "R", "I",],
"src_target_mag_band (X-SHOOTER)": ["U", "B", "V", "R", "I", "J", "H", "K",],
"src_target_mag_system": ["Vega", "AB"],
"src_target_type": ["template_spectrum"],
"src_target_spec_type": [
"Pickles_O5V",
"Pickles_O9V",
"Kurucz_B1V",
"Pickles_B2IV",
"Kurucz_B3V",
"Kurucz_B8V",
"Pickles_B9III",
"Pickles_B9V",
"Pickles_A0III",
"Pickles_A0V",
"Kurucz_A1V",
"Kurucz_F0V",
"Pickles_G0V",
"Kurucz_G2V",
"Pickles_K2V",
"Pickles_K7V",
"Pickles_M2V",
"Planetary Nebula",
"HII Region (ORION)",
"Kinney_ell",
"Kinney_s0",
"Kinney_sa",
"Kinney_sb",
"Kinney_starb1",
"Kinney_starb2",
"Kinney_starb3",
"Kinney_starb4",
"Kinney_starb5",
"Kinney_starb6",
"Galev_E",
"qso-interp",
],
"sky_seeing": ["0.5", "0.6", "0.7", "0.8", "1.0", "1.3", "3.0"],
"uves_det_cd_name": [
"Blue_346",
"Blue_437",
"Red__520",
"Red__580",
"Red__600",
"Red__860",
"Dicroic1_Blue_346",
"Dicroic2_Blue_346",
"Dicroic1_Red__580",
"Dicroic1_Blue_390",
"Dicroic2_Blue_390",
"Dicroic1_Red__564",
"Dicroic2_Blue_437",
"Dicroic2_red__760",
"Dicroic2_Red__860",
],
"uves_slit_width": [
"0.3",
"0.4",
"0.5",
"0.6",
"0.7",
"0.8",
"0.9",
"1.0",
"1.1",
"1.2",
"1.5",
"1.8",
"2.1",
"2.4",
"2.7",
"3.0",
"5.0",
"10.0",
],
"uves_ccd_binning": ["1x1", "1x1v", "2x2", "2x1", "3x2"],
"giraffe_sky_sampling_mode": ["MEDUSA", "IFU052", "ARGUS052", "ARGUS030",],
"giraffe_slicer": [
"LR01",
"LR02",
"LR03",
"LR04",
"LR05",
"LR06",
"LR07",
"LR08",
"HR01",
"HR02",
"HR03",
"HR04",
"HR05A",
"HR05B",
"HR06",
"HR07A",
"HR07B",
"HR08",
"HR09A",
"HR09B",
"HR10",
"HR11",
"HR12",
"HR13",
"HR14A",
"HR14B",
"HR15",
"HR15n",
"HR16",
"HR17A",
"HR17B",
"HR17B",
"HR18",
"HR19A",
"HR19B",
"HR20A",
"HR20B",
"HR21",
"HR22A",
"HR22B",
],
"giraffe_ccd_mode": ["standard", "fast", "slow"],
"xshooter_uvb_slitwidth": ["0.5", "0.8", "1.0", "1.3", "1.6", "5.0"],
"xshooter_vis_slitwidth": ["0.4", "0.7", "0.9", "1.2", "1.5", "5.0"],
"xshooter_nir_slitwidth": ["0.4", "0.6", "0.9", "1.2", "1.5", "5.0"],
"xshooter_uvb_ccd_binning": [
"high1x1slow",
"high1x2slow",
"high2x2slow",
"low1x1fast",
"low1x2fast",
"low2x2fast",
],
"xshooter_vis_ccd_binning": [
"high1x1slow",
"high1x2slow",
"high2x2slow",
"low1x1fast",
"low1x2fast",
"low2x2fast",
],
"muse_mode": [
"WFM_NONAO_N", # Wide Field Mode without AO, nominal wavelength range
"WFM_NONAO_E", # Wide Field Mode without AO, extended wavelength range
"WFM_AO_N", # Wide Field Mode with AO, nominal wavelength range
"WFM_AO_E", # Wide Field Mode with AO, extended wavelength range
"NFM_AO_N",
], # Narrow Field Mode with AO, nominal wavelength range
"muse_spatial_binning": ["1", "2", "3", "4", "5", "10", "30", "60", "100"],
"muse_spectra_binning": [
"1",
"2",
"3",
"4",
"5",
"10",
"20",
"30",
"40",
"50",
"100",
"200",
"400",
"800",
"1600",
"3200",
],
}
lco_options = {
"template": ["flat", "O5V", "B0V", "A0V", "F0V", "G0V", "K0V", "M0V"],
"tempfilter": ["u", "g", "r", "i", "z"],
"telescope": ["MAGELLAN1", "MAGELLAN2"],
"MAGELLAN1_instrument": ["IMACS", "MAGE"],
"MAGELLAN2_instrument": ["LDSS3", "MIKE"],
"IMACS_mode": [
"F2_150_11",
"F2_200_15",
"F2_300_17",
"F2_300_26",
"F4_150-3_3.4",
"F4_300-4_6.0",
"F4_600-8_9.3",
"F4_600-13_14.0",
"F4_1200-17_19.0",
"F4_1200-27_27.0",
"F4_1200-27_33.5",
],
"MAGE_mode": ["ECHELLETTE"],
"MIKE_mode": ["BLUE", "RED"],
"LDSS3_mode": ["VPHALL", "VPHBLUE", "VPHRED"],
"binspat": ["1", "2", "3", "4", "5", "6", "7", "8"],
"binspec": ["1", "2", "3", "4", "5", "6", "7", "8"],
"nmoon": [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
],
}
class Sig2NoiseQuery:
"""
Base class for ETC queries
"""
def __init__(self):
pass
def query_s2n(self) -> None:
pass
class Sig2NoiseWMKO(Sig2NoiseQuery):
"""
Superclass for WMKO ETC Queries
:param str instrument: Keck instrument. Must be "DEIMOS", "LRIS", "HIRES", or "ESI"
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.wmko_options['template'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str band: Magnitude band. For valid options see s2n.wmko_options['filter'].
:param float airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float redshift: Redshift of the target
"""
def __init__(
self,
instrument: str,
exptime: float,
mag: float,
template: str,
magtype: str = "Vega",
band: str = "Cousins_I.dat",
airmass: float = 1.1,
seeing: float = 0.75,
redshift: float = 0,
):
Sig2NoiseQuery.__init__(self)
if instrument not in wmko_options["instrument"]:
raise KeyError(f"{instrument} not one of {wmko_options['instrument']}")
if magtype not in wmko_options["mag type"]:
raise KeyError(f"{magtype} not one of {wmko_options['mag type']}")
if band not in wmko_options["filter"]:
raise KeyError(f"{band} not one of {wmko_options['filter']}")
if template not in wmko_options["template"]:
raise KeyError(f"{template} not one of {wmko_options['template']}")
self.instrument = instrument
self.mag = mag
self.magtype = magtype
self.filter = band
self.template = template
self.exptime = exptime
self.airmass = airmass
self.seeing = seeing
self.redshift = redshift
def query_s2n(self) -> None:
"""
No generic S/N query, see specific instrument subclasses
:return:
"""
raise NotImplementedError(
"No generic S/N query, see specific instrument children classes"
)
class Sig2NoiseDEIMOS(Sig2NoiseWMKO):
"""
Keck/DEIMOS S/N Query (http://etc.ucolick.org/web_s2n/deimos)
:param str grating: DEIMOS grating. Must be one of "600Z", "900Z", "1200G", or "1200B".
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.wmko_options['template'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str band: Magnitude band. For valid options see s2n.wmko_options['filter'].
:param str cwave: Central wavelength of grating. Must be one of "5000", "6000", "7000", or "8000"
:param str slitwidth: Width of slit in arcseconds. Must be "0.75", "1.0", or "1.5"
:param str binning: spatial x spectral binning. "1x1" is the only option.
:param flaot airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float redshift: Redshift of the target
"""
def __init__(
self,
grating: str,
exptime: float,
mag: float,
template: str,
magtype: str = "Vega",
band: str = "Cousins_I.dat",
cwave: str = "7000",
slitwidth: str = "0.75",
binning: str = "1x1",
airmass: float = 1.1,
seeing: float = 0.75,
redshift: float = 0,
):
Sig2NoiseWMKO.__init__(
self,
"deimos",
exptime,
mag,
template,
magtype,
band,
airmass,
seeing,
redshift,
)
if grating not in wmko_options["grating (DEIMOS)"]:
raise KeyError(f"{grating} not one of {wmko_options['grating (DEIMOS)']}")
if binning not in wmko_options["binning (DEIMOS)"]:
raise KeyError(f"{binning} not one of {wmko_options['binning (DEIMOS)']}")
if slitwidth not in wmko_options["slitwidth (DEIMOS)"]:
raise KeyError(
f"{slitwidth} not one of {wmko_options['slitwidth (DEIMOS)']}"
)
if cwave not in wmko_options["central wavelength (DEIMOS)"]:
raise KeyError(
f"{cwave} not one of {wmko_options['central wavelength (DEIMOS)']}"
)
self.grating = grating
self.binning = binning
self.slitwidth = slitwidth
self.cwave = cwave
def query_s2n(self):
"""
Query the DEIMOS ETC (http://etc.ucolick.org/web_s2n/deimos)
:return:
"""
url = "http://etc.ucolick.org/web_s2n/deimos"
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form["grating"] = self.grating
form["cwave"] = self.cwave
form["slitwidth"] = self.slitwidth
form["binning"] = self.binning
form["exptime"] = str(self.exptime)
form["mag"] = str(self.mag)
form["ffilter"] = self.filter
if self.magtype.lower() == "vega":
form["mtype"] = "1"
elif self.magtype.lower() == "ab":
form["mtype"] = "2"
form["seeing"] = str(self.seeing)
form["template"] = self.template
form["airmass"] = str(self.airmass)
form["redshift"] = str(self.redshift)
data = browser.submit_selected().json()
snr = np.array(data["s2n"]).T
return snr
class Sig2NoiseLRIS(Sig2NoiseWMKO):
"""
Keck/LRIS S/N Query (http://etc.ucolick.org/web_s2n/lris)
:param str grating: LRIS red arm grating.
Must be one of "600/7500", "600/10000", "1200/9000", "400/8500", or "831/8200".
:param str grism: LRIS blue arm grism. Must be one of "B300" or "B600".
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.wmko_options['template'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str band: Magnitude band. For valid options see s2n.wmko_options['filter'].
:param str dichroic: LRIS dichroic separating the red and blue arms. "D560" is the only option currently.
:param str slitwidth: Width of slit in arcseconds. Must be one of "0.7", "1.0", or "1.5"
:param str binning: spatial x spectral binning. Must be one of "1x1", "2x1", "2x2", or "3x1"
:param float airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float redshift: Redshift of the target
"""
def __init__(
self,
grating: str,
grism: str,
exptime: float,
mag: float,
template: str,
magtype: str = "Vega",
band: str = "Cousins_I.dat",
dichroic: str = "D560",
slitwidth: str = "0.7",
binning: str = "1x1",
airmass: float = 1.1,
seeing: float = 0.75,
redshift: float = 0,
):
Sig2NoiseWMKO.__init__(
self,
"lris",
exptime,
mag,
template,
magtype,
band,
airmass,
seeing,
redshift,
)
if grating not in wmko_options["grating (LRIS)"]:
raise KeyError(f"{grating} not one of {wmko_options['grating (LRIS)']}")
if grism not in wmko_options["grism (LRIS)"]:
raise KeyError(f"{grism} not one of {wmko_options['grism (LRIS)']}")
if binning not in wmko_options["binning (LRIS)"]:
raise KeyError(f"{binning} not one of {wmko_options['binning (LRIS)']}")
if slitwidth not in wmko_options["slitwidth (LRIS)"]:
raise KeyError(f"{slitwidth} not one of {wmko_options['slitwidth (LRIS)']}")
if dichroic not in wmko_options["dichroic (LRIS)"]:
raise KeyError(f"{dichroic} not one of {wmko_options['dichroic (LRIS)']}")
self.grating = grating
self.grism = grism
self.binning = binning
self.slitwidth = slitwidth
self.dichroic = dichroic
def query_s2n(self):
"""
Query the LRIS ETC (http://etc.ucolick.org/web_s2n/lris)
:return:
"""
url = "http://etc.ucolick.org/web_s2n/lris"
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form["grating"] = self.grating
form["grism"] = self.grism
form["dichroic"] = self.dichroic
form["slitwidth"] = self.slitwidth
form["binning"] = self.binning
form["exptime"] = str(self.exptime)
form["mag"] = str(self.mag)
form["ffilter"] = self.filter
if self.magtype.lower() == "vega":
form["mtype"] = "1"
elif self.magtype.lower() == "ab":
form["mtype"] = "2"
form["seeing"] = str(self.seeing)
form["template"] = self.template
form["airmass"] = str(self.airmass)
form["redshift"] = str(self.redshift)
data = browser.submit_selected().json()
snr = np.array(data["s2n"]).T
return snr
class Sig2NoiseESI(Sig2NoiseWMKO):
"""
Keck/ESI S/N Query (http://etc.ucolick.org/web_s2n/esi)
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.wmko_options['template'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str band: Magnitude band. For valid options see s2n.wmko_options['filter'].
:param str slitwidth: Width of slit in arcseconds. Must be one of "0.75", "0.3", "0.5", or "1.0"
:param str binning: spatial x spectral binning. Must be one of "1x1", "2x1", "2x2", or "3x1"
:param float airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float redshift: Redshift of the target
"""
def __init__(
self,
exptime: float,
mag: float,
template: str,
magtype: str = "Vega",
band: str = "Cousins_I.dat",
slitwidth: str = "0.75",
binning: str = "1x1",
airmass: float = 1.1,
seeing: float = 0.75,
redshift: float = 0,
):
Sig2NoiseWMKO.__init__(
self,
"lris",
exptime,
mag,
template,
magtype,
band,
airmass,
seeing,
redshift,
)
if binning not in wmko_options["binning (ESI)"]:
raise KeyError(f"{binning} not one of {wmko_options['binning (ESI)']}")
if slitwidth not in wmko_options["slitwidth (ESI)"]:
raise KeyError(f"{slitwidth} not one of {wmko_options['slitwidth (ESI)']}")
self.binning = binning
self.slitwidth = slitwidth
def query_s2n(self):
"""
Query the ESI ETC (http://etc.ucolick.org/web_s2n/esi)
:return:
"""
url = "http://etc.ucolick.org/web_s2n/esi"
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form["slitwidth"] = self.slitwidth
form["binning"] = self.binning
form["exptime"] = str(self.exptime)
form["mag"] = str(self.mag)
form["ffilter"] = self.filter
if self.magtype.lower() == "vega":
form["mtype"] = "1"
elif self.magtype.lower() == "ab":
form["mtype"] = "2"
form["seeing"] = str(self.seeing)
form["template"] = self.template
form["airmass"] = str(self.airmass)
form["redshift"] = str(self.redshift)
data = browser.submit_selected().json()
snr = np.array(data["s2n"]).T
return snr
class Sig2NoiseHIRES(Sig2NoiseWMKO):
"""
Keck/HIRES S/N Query (http://etc.ucolick.org/web_s2n/hires)
:param str slitwidth: HIRES Decker. Must be "C5" (1.15"), "E4" (0.40"), "B2" (0.57"),
"B5" (0.86"), "E5" (0.80"), or "D3" (1.72")
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.wmko_options['template'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str band: Magnitude band. For valid options see s2n.wmko_options['filter'].
:param str binning: spatial x spectral binning. Must be one of "1x1", "2x1", "2x2", or "3x1".
:param float airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float redshift: Redshift of the target
"""
def __init__(
self,
slitwidth: str,
exptime: float,
mag: float,
template: str,
magtype: str = "Vega",
band: str = "Cousins_I.dat",
binning: str = "1x1",
airmass: float = 1.1,
seeing: float = 0.75,
redshift: float = 0,
):
Sig2NoiseWMKO.__init__(
self,
"hires",
exptime,
mag,
template,
magtype,
band,
airmass,
seeing,
redshift,
)
if binning not in wmko_options["binning (HIRES)"]:
raise KeyError(f"{binning} not one of {wmko_options['binning (HIRES)']}")
if slitwidth not in wmko_options["slitwidth (HIRES)"]:
raise KeyError(
f"{slitwidth} not one of {wmko_options['slitwidth (HIRES)']}"
)
self.binning = binning
self.slitwidth = slitwidth
def query_s2n(self):
"""
Query the HIRES ETC (http://etc.ucolick.org/web_s2n/hires)
:return:
"""
url = "http://etc.ucolick.org/web_s2n/hires"
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form["slitwidth"] = self.slitwidth
form["binning"] = self.binning
form["exptime"] = str(self.exptime)
form["mag"] = str(self.mag)
form["ffilter"] = self.filter
if self.magtype.lower() == "vega":
form["mtype"] = "1"
elif self.magtype.lower() == "ab":
form["mtype"] = "2"
form["seeing"] = str(self.seeing)
form["template"] = self.template
form["airmass"] = str(self.airmass)
form["redshift"] = str(self.redshift)
data = browser.submit_selected().json()
snr = np.array(data["s2n"]).T
return snr
class Sig2NoiseHectoBinoSpec(Sig2NoiseQuery):
"""
MMT/Hectospec and MMT/Binospec S/N Query (http://hopper.si.edu/etc-cgi/TEST/sao-etc)
:param str inst_mode: Instrument and mode.
One of: "BINOSPEC_1000", "BINOSPEC_270", "BINOSPEC_600", "HECTOSPEC_270", or "HECTOSPEC_600"
:param float exptime: Exposure time in seconds
:param float mag: AB Magnitude of source
:param str band: Magnitude band. One of "r_filt", "g_filt", or "i_filt"
:param str template: Spectral template. For valid options see s2n.mmt_options['template'].
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float airmass: Airmass of observation
:param float moonage: Moon Phase (days since new moon)
:param str aptype: Aperture shape. Must be one of "Round", "Square", or "Rectangular".
:param float apwidth: Width of aperture in arcseconds
"""
def __init__(
self,
inst_mode: str,
exptime: float,
mag: float,
band: str = "g_filt",
template: str = "K0V",
seeing: float = 0.75,
airmass: float = 1.1,
moonage: float = 0.0,
aptype: str = "Round",
apwidth: float = 1.0,
):
Sig2NoiseQuery.__init__(self)
if inst_mode not in mmt_options["inst_mode"]:
raise KeyError(f"{inst_mode} not one of {mmt_options['inst_mode']}")
self.inst_mode = inst_mode
self.exptime = exptime
self.mag = mag
if band not in mmt_options["filter"]:
raise KeyError(f"{band} not one of {mmt_options['filter']}")
self.band = band
if template not in mmt_options["template"]:
raise KeyError(f"{template} not one of {mmt_options['template']}")
self.template = template
self.seeing = seeing
self.airmass = airmass
self.moonage = moonage
if aptype not in mmt_options["aptype"]:
raise KeyError(f"{aptype} not one of {mmt_options['aptype']}")
self.aptype = aptype
self.apwidth = apwidth
def query_s2n(self):
"""
Query the Hectospec/Binospec ETC (http://hopper.si.edu/etc-cgi/TEST/sao-etc)
:return:
"""
url = "http://hopper.si.edu/etc-cgi/TEST/sao-etc"
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form.new_control(type="select", name="instmode", value="")
form.new_control(type="select", name="objspec_", value="")
form.new_control(type="select", name="objspec__", value="")
form["instmode"] = self.inst_mode
form["exptime"] = self.exptime
form["ABmag"] = self.mag
form["bandfilter"] = self.band
form["objspec_"] = "Stars"
form["objspec__"] = self.template
form["objspec"] = f"Stars/{self.template}.tab"
form["srcext"] = 0.0
form["seeing"] = self.seeing
form["airmass"] = self.airmass
form["moonage"] = self.moonage
form["aptype"] = self.aptype
form["apwidth"] = self.apwidth
data = browser.submit_selected()
snr_text = data.text.split("---")[-1]
snr = pd.DataFrame([row.split("\t") for row in snr_text.split("\n")[1:-1]])
snr.index = snr.pop(0)
snr.drop([1, 2, 3, 4], axis=1, inplace=True)
snr = np.vstack([snr.index.values, snr[5].values]).astype(float)
snr[0] *= 1e4
return snr
class Sig2NoiseVLT(Sig2NoiseQuery):
"""
Superclass for VLT ETC Queries
:param str instrument: VLT instrument. Must be "UVES", "FLAMES-UVES", "FLAMES-GIRAFFE", "X-SHOOTER", or "MUSE"
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str band: Magnitude band. For valid options see s2n.vlt_options['src_target_mag_band (<instrument>)'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str template_type: Type of SED template. For now, only "template_spectrum" is supported.
:param str template: Spectral template. For valid options see s2n.vlt_options['src_target_spec_type'].
:param float redshift: Redshift of the target
:param float airmass: Airmass of observation
:param float moon_phase: Moon Phase between 0.0 (new) and 1.0 (full)
:param str seeing: Seeing (FWHM) of observation in arcseconds.
For valid options see s2n.vlt_options['sky_seeing'].
:param \**kwargs: Other entries in the ETC web form to set.
To see what options are available, an inspection of the ETC website is necessary.
"""
# TODO: Implement MARCS stellar template selection
def __init__(
self,
instrument: str,
exptime: float,
mag: float,
band: str = "V",
magtype: str = "Vega",
template_type: str = "template_spectrum",
template: str = "Pickles_K2V",
redshift: float = 0,
airmass: float = 1.1,
moon_phase: float = 0.0,
seeing: str = "0.8",
**kwargs,
):
Sig2NoiseQuery.__init__(self)
if instrument not in vlt_options["instruments"]:
raise KeyError(f"{instrument} not one of {vlt_options['instruments']}")
if not exptime > 0:
raise ValueError("Exposure Time must be positive")
if magtype not in vlt_options["src_target_mag_system"]:
raise KeyError(
f"{magtype} not one of {vlt_options['src_target_mag_system']}"
)
if template_type not in vlt_options["src_target_type"]:
raise KeyError(
f"{template_type} not one of {vlt_options['src_target_type']}"
)
if template not in vlt_options["src_target_spec_type"]:
raise KeyError(
f"{template} not one of {vlt_options['src_target_spec_type']}"
)
if not redshift >= 0:
raise ValueError("Redshift must be positive")
if not airmass >= 1.0:
raise ValueError("Airmass must be > 1.0")
if moon_phase < 0.0 or moon_phase > 1.0:
raise ValueError("moon_phase must be between 0.0 (new) and 1.0 (full)")
if seeing not in vlt_options["sky_seeing"]:
raise KeyError(f"{seeing} not one of {vlt_options['sky_seeing']}")
self.instrument = instrument
self.exptime = exptime
self.mag = mag
self.band = band
self.magtype = magtype
self.template_type = template_type
self.template = template
self.redshift = redshift
self.airmass = airmass
self.moon_phase = moon_phase
self.seeing = seeing
self.kwargs = kwargs
def query_s2n(self) -> None:
"""
No generic S/N query, see specific instrument subclasses
:return:
"""
raise NotImplementedError(
"No generic S/N query, see specific instrument children classes"
)
class Sig2NoiseUVES(Sig2NoiseVLT):
"""
VLT/UVES S/N Query (http://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES++INS.MODE=spectro)
:param str detector: UVES detector setup. For valid options see s2n.vlt_options['uves_det_cd_name'].
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str band: Magnitude band. For valid options see s2n.vlt_options['src_target_mag_band (UVES)'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str template_type: Type of SED template. For now, only "template_spectrum" is supported.
:param str template: Spectral template. For valid options see s2n.vlt_options['src_target_spec_type'].
:param float redshift: Redshift of the target
:param float airmass: Airmass of observation
:param float moon_phase: Moon Phase between 0.0 (new) and 1.0 (full)
:param str seeing: Seeing (FWHM) of observation in arcseconds.
For valid options see s2n.vlt_options['sky_seeing'].
:param str slitwidth: Width of slit in arcseconds. For valid options see s2n.vlt_options['uves_slit_width'].
:param str binning: spatial x spectral binning. For valid options see s2n.vlt_options['uves_ccd_binning'].
:param bool mid_order_only: If True, returns only peak S/N in each order.
Otherwise the S/N at both ends of each order are also included.
:param \**kwargs: Other entries in the ETC web form to set.
To see what options are available, an inspection of the ETC website is necessary.
"""
def __init__(
self,
detector: str,
exptime: float,
mag: float,
band: str = "V",
magtype: str = "Vega",
template_type: str = "template_spectrum",
template: str = "Pickles_K2V",
redshift: float = 0,
airmass: float = 1.1,
moon_phase: float = 0.0,
seeing: str = "0.8",
slitwidth: str = "1.0",
binning: str = "1x1",
mid_order_only: bool = False,
**kwargs,
):
Sig2NoiseVLT.__init__(
self,
"UVES",
exptime,
mag,
band,
magtype,
template_type,
template,
redshift,
airmass,
moon_phase,
seeing,
**kwargs,
)
self.url = "http://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES++INS.MODE=spectro"
if self.band not in vlt_options["src_target_mag_band (UVES)"]:
raise KeyError(
f"{src_target_mag_band} not one of {vlt_options['src_target_mag_band (UVES)']}"
)
if detector not in vlt_options["uves_det_cd_name"]:
raise KeyError(f"{detector} not one of {vlt_options['uves_det_cd_name']}")
if slitwidth not in vlt_options["uves_slit_width"]:
raise KeyError(f"{slitwidth} not one of {vlt_options['uves_slit_width']}")
if binning not in vlt_options["uves_ccd_binning"]:
raise KeyError(f"{binning} not one of {vlt_options['uves_ccd_binning']}")
self.detector = detector
self.slitwidth = slitwidth
self.binning = binning
self.mid_order_only = mid_order_only
self.data = None
def query_s2n(self):
"""
Query the UVES ETC (http://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES++INS.MODE=spectro)
:return:
"""
url = self.url
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form.new_control(type="select", name="SRC.TARGET.MAG.BAND", value="")
form.new_control(type="select", name="SKY.SEEING.ZENITH.V", value="")
form["POSTFILE.FLAG"] = 0
# Source Parameters
form["SRC.TARGET.MAG"] = self.mag
form["SRC.TARGET.MAG.BAND"] = self.band
form["SRC.TARGET.MAG.SYSTEM"] = self.magtype
form["SRC.TARGET.TYPE"] = self.template_type
form["SRC.TARGET.SPEC.TYPE"] = self.template
form["SRC.TARGET.REDSHIFT"] = self.redshift
form["SRC.TARGET.GEOM"] = "seeing_ltd"
# Sky Parameters
form["SKY.AIRMASS"] = self.airmass
form["SKY.MOON.FLI"] = self.moon_phase
form["USR.SEEING.OR.IQ"] = "seeing_given"
form["SKY.SEEING.ZENITH.V"] = self.seeing
# Default Sky Background
form["almanac_time_option"] = "almanac_time_option_ut_time"
form["SKYMODEL.TARGET.ALT"] = 65.38
form["SKYMODEL.MOON.SUN.SEP"] = 0
# Instrument Specifics
form["INS.NAME"] = "UVES"
form["INS.MODE"] = "spectro"
form["INS.PRE_SLIT.FILTER.NAME"] = "ADC"
form["INS.IMAGE_SLICERS.NAME"] = "None"
form["INS.BELOW_SLIT.FILTER.NAME"] = "NONE"
form["INS.DET.SPECTRAL_FORMAT.NAME"] = "STANDARD"
form["INS.DET.CD.NAME"] = self.detector
form["INS.SLIT.FROM_USER.WIDTH.VAL"] = self.slitwidth
form["INS.DET.CCD.BINNING.VAL"] = self.binning
form["INS.DET.EXP.TIME.VAL"] = self.exptime
form["INS.GEN.TABLE.SF.SWITCH.VAL"] = "yes"
form["INS.GEN.TABLE.RES.SWITCH.VAL"] = "yes"
form["INS.GEN.GRAPH.S2N.SWITCH.VAL"] = "yes"
for key in self.kwargs:
form[key] = self.kwargs[key]
self.data = browser.submit_selected()
if self.mid_order_only:
snr = self.parse_etc_mid()
else:
snr = self.parse_etc()
return snr
def parse_etc(self):
mit_tab1 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[1].split("</table>")[0]
)[0]
mit_tab1.columns = mit_tab1.loc[0]
mit_tab1.drop(0, axis=0, inplace=True)
mit_tab2 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[2].split("</table>")[0]
)[0]
mit_tab2.columns = mit_tab2.loc[1]
mit_tab2.drop([0, 1], axis=0, inplace=True)
eev_tab1 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[3].split("</table>")[0]
)[0]
eev_tab1.columns = eev_tab1.loc[0]
eev_tab1.drop(0, axis=0, inplace=True)
eev_tab2 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[4].split("</table>")[0]
)[0]
eev_tab2.columns = eev_tab2.loc[1]
eev_tab2.drop([0, 1], axis=0, inplace=True)
mit_wave_mid = mit_tab1["wav of central column (nm)"]
mit_wave_min = mit_tab1["FSR l Min (nm)"]
mit_wave_max = mit_tab1["FSR l Max (nm)"]
mit_snr_min = mit_tab2["S/N*"].iloc[:, 0]
mit_snr_mid = mit_tab2["S/N*"].iloc[:, 1]
mit_snr_max = mit_tab2["S/N*"].iloc[:, 2]
eev_wave_mid = eev_tab1["wav of central column (nm)"]
eev_wave_min = eev_tab1["FSR l Min (nm)"]
eev_wave_max = eev_tab1["FSR l Max (nm)"]
eev_snr_min = eev_tab2["S/N*"].iloc[:, 0]
eev_snr_mid = eev_tab2["S/N*"].iloc[:, 1]
eev_snr_max = eev_tab2["S/N*"].iloc[:, 2]
mit_wave = pd.concat([mit_wave_min, mit_wave_mid, mit_wave_max])
mit_snr = pd.concat([mit_snr_min, mit_snr_mid, mit_snr_max])
mit_snr.index = mit_wave
mit_snr.sort_index(inplace=True)
mit_snr = mit_snr.groupby(mit_snr.index).max()
eev_wave = pd.concat([eev_wave_min, eev_wave_mid, eev_wave_max])
eev_snr = pd.concat([eev_snr_min, eev_snr_mid, eev_snr_max])
eev_snr.index = eev_wave
eev_snr.sort_index(inplace=True)
eev_snr = eev_snr.groupby(eev_snr.index).max()
uves_snr = pd.concat([eev_snr, mit_snr])
uves_snr = np.vstack([uves_snr.index.values, uves_snr.iloc[:].values]).astype(
float
)
uves_snr[0] *= 10
return uves_snr
def parse_etc_mid(self):
snr_url1 = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[1].split('" TITLE')[0]
)
snr_url2 = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[2].split('" TITLE')[0]
)
snr_txt1 = requests.post(snr_url1).text
snr_txt2 = requests.post(snr_url2).text
snr1 = pd.DataFrame([row.split("\t") for row in snr_txt1.split("\n")[:-1]])
snr2 = pd.DataFrame([row.split("\t") for row in snr_txt2.split("\n")[:-1]])
uves_snr = pd.concat([snr1, snr2])
uves_snr.index = uves_snr.pop(0)
uves_snr.sort_index(inplace=True)
uves_snr = np.vstack([uves_snr.index.values, uves_snr[1].values]).astype(float)
uves_snr[0] *= 10
return uves_snr
class Sig2NoiseFLAMESUVES(Sig2NoiseVLT):
"""
VLT/FLAMES-UVES S/N Query (https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES+INS.MODE=FLAMES)
:param str detector: UVES detector setup. For valid options see s2n.vlt_options['uves_det_cd_name'].
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str band: Magnitude band. For valid options see s2n.vlt_options['src_target_mag_band (UVES)'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str template_type: Type of SED template. For now, only "template_spectrum" is supported.
:param str template: Spectral template. For valid options see s2n.vlt_options['src_target_spec_type'].
:param float redshift: Redshift of the target
:param float airmass: Airmass of observation
:param float moon_phase: Moon Phase between 0.0 (new) and 1.0 (full)
:param str seeing: Seeing (FWHM) of observation in arcseconds.
For valid options see s2n.vlt_options['sky_seeing'].
:param bool mid_order_only: If True, returns only peak S/N in each order.
Otherwise the S/N at both ends of each order are also included.
:param \**kwargs: Other entries in the ETC web form to set.
To see what options are available, an inspection of the ETC website is necessary.
"""
def __init__(
self,
detector: str,
exptime: float,
mag: float,
band: str = "V",
magtype: str = "Vega",
template_type: str = "template_spectrum",
template: str = "Pickles_K2V",
redshift: float = 0,
airmass: float = 1.1,
moon_phase: float = 0.0,
seeing: str = "0.8",
mid_order_only: bool = False,
**kwargs,
):
Sig2NoiseVLT.__init__(
self,
"FLAMES-UVES",
exptime,
mag,
band,
magtype,
template_type,
template,
redshift,
airmass,
moon_phase,
seeing,
**kwargs,
)
self.url = "https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES+INS.MODE=FLAMES"
if self.band not in vlt_options["src_target_mag_band (UVES)"]:
raise KeyError(
f"{src_target_mag_band} not one of {vlt_options['src_target_mag_band (UVES)']}"
)
if detector not in vlt_options["uves_det_cd_name"]:
raise KeyError(f"{detector} not one of {vlt_options['uves_det_cd_name']}")
self.detector = detector
self.mid_order_only = mid_order_only
self.data = None
def query_s2n(self):
"""
Query the FLAMES-UVES ETC (https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES+INS.MODE=FLAMES)
:return:
"""
url = self.url
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form.new_control(type="select", name="SRC.TARGET.MAG.BAND", value="")
form.new_control(type="select", name="SKY.SEEING.ZENITH.V", value="")
form["POSTFILE.FLAG"] = 0
# Source Parameters
form["SRC.TARGET.MAG"] = self.mag
form["SRC.TARGET.MAG.BAND"] = self.band
form["SRC.TARGET.MAG.SYSTEM"] = self.magtype
form["SRC.TARGET.TYPE"] = self.template_type
form["SRC.TARGET.SPEC.TYPE"] = self.template
form["SRC.TARGET.REDSHIFT"] = self.redshift
form["SRC.TARGET.GEOM"] = "seeing_ltd"
# Sky Parameters
form["SKY.AIRMASS"] = self.airmass
form["SKY.MOON.FLI"] = self.moon_phase
form["USR.SEEING.OR.IQ"] = "seeing_given"
form["SKY.SEEING.ZENITH.V"] = self.seeing
# Default Sky Background
form["almanac_time_option"] = "almanac_time_option_ut_time"
form["SKYMODEL.TARGET.ALT"] = 65.38
form["SKYMODEL.MOON.SUN.SEP"] = 0
# Instrument Specifics
form["INS.NAME"] = "UVES"
form["INS.MODE"] = "FLAMES"
form["INS.DET.CD.NAME"] = self.detector
form["INS.DET.EXP.TIME.VAL"] = self.exptime
form["INS.GEN.TABLE.SF.SWITCH.VAL"] = "yes"
form["INS.GEN.TABLE.RES.SWITCH.VAL"] = "yes"
form["INS.GEN.GRAPH.S2N.SWITCH.VAL"] = "yes"
for key in self.kwargs:
form[key] = self.kwargs[key]
self.data = browser.submit_selected()
if self.mid_order_only:
snr = self.parse_etc_mid()
else:
snr = self.parse_etc()
return snr
def parse_etc(self):
mit_tab1 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[1].split("</table>")[0]
)[0]
mit_tab1.columns = mit_tab1.loc[0]
mit_tab1.drop(0, axis=0, inplace=True)
mit_tab2 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[2].split("</table>")[0]
)[0]
mit_tab2.columns = mit_tab2.loc[1]
mit_tab2.drop([0, 1], axis=0, inplace=True)
eev_tab1 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[3].split("</table>")[0]
)[0]
eev_tab1.columns = eev_tab1.loc[0]
eev_tab1.drop(0, axis=0, inplace=True)
eev_tab2 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[4].split("</table>")[0]
)[0]
eev_tab2.columns = eev_tab2.loc[1]
eev_tab2.drop([0, 1], axis=0, inplace=True)
mit_wave_mid = mit_tab1["wav of central column (nm)"]
mit_wave_min = mit_tab1["FSR l Min (nm)"]
mit_wave_max = mit_tab1["FSR l Max (nm)"]
mit_snr_min = mit_tab2["S/N*"].iloc[:, 0]
mit_snr_mid = mit_tab2["S/N*"].iloc[:, 1]
mit_snr_max = mit_tab2["S/N*"].iloc[:, 2]
eev_wave_mid = eev_tab1["wav of central column (nm)"]
eev_wave_min = eev_tab1["FSR l Min (nm)"]
eev_wave_max = eev_tab1["FSR l Max (nm)"]
eev_snr_min = eev_tab2["S/N*"].iloc[:, 0]
eev_snr_mid = eev_tab2["S/N*"].iloc[:, 1]
eev_snr_max = eev_tab2["S/N*"].iloc[:, 2]
mit_wave = pd.concat([mit_wave_min, mit_wave_mid, mit_wave_max])
mit_snr = pd.concat([mit_snr_min, mit_snr_mid, mit_snr_max])
mit_snr.index = mit_wave
mit_snr.sort_index(inplace=True)
mit_snr = mit_snr.groupby(mit_snr.index).max()
eev_wave = pd.concat([eev_wave_min, eev_wave_mid, eev_wave_max])
eev_snr = pd.concat([eev_snr_min, eev_snr_mid, eev_snr_max])
eev_snr.index = eev_wave
eev_snr.sort_index(inplace=True)
eev_snr = eev_snr.groupby(eev_snr.index).max()
uves_snr = pd.concat([eev_snr, mit_snr])
uves_snr = np.vstack([uves_snr.index.values, uves_snr.iloc[:].values]).astype(
float
)
uves_snr[0] *= 10
return uves_snr
def parse_etc_mid(self):
snr_url1 = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[1].split('" TITLE')[0]
)
snr_url2 = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[2].split('" TITLE')[0]
)
snr_txt1 = requests.post(snr_url1).text
snr_txt2 = requests.post(snr_url2).text
snr1 = pd.DataFrame([row.split("\t") for row in snr_txt1.split("\n")[:-1]])
snr2 = pd.DataFrame([row.split("\t") for row in snr_txt2.split("\n")[:-1]])
uves_snr = pd.concat([snr1, snr2])
uves_snr.index = uves_snr.pop(0)
uves_snr.sort_index(inplace=True)
uves_snr = np.vstack([uves_snr.index.values, uves_snr[1].values]).astype(float)
uves_snr[0] *= 10
return uves_snr
class Sig2NoiseFLAMESGIRAFFE(Sig2NoiseVLT):
"""
VLT/FLAMES-GIRAFFE S/N Query (https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=GIRAFFE+INS.MODE=spectro)
:param str slicer: GIRAFFE slicer. For valid options see s2n.vlt_options['giraffe_slicer'].
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str band: Magnitude band. For valid options see s2n.vlt_options['src_target_mag_band (UVES)'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str template_type: Type of SED template. For now, only "template_spectrum" is supported.
:param str template: Spectral template. For valid options see s2n.vlt_options['src_target_spec_type'].
:param float redshift: Redshift of the target
:param float airmass: Airmass of observation
:param float moon_phase: Moon Phase between 0.0 (new) and 1.0 (full)
:param str seeing: Seeing (FWHM) of observation in arcseconds.
For valid options see s2n.vlt_options['sky_seeing'].
:param str sky_sampling_mode: Fiber Mode. Must be one of "MEDUSA", "IFU052", "ARGUS052", or "ARGUS030".
:param str ccd_mode: CCD readout mode. Must be one of "standard", "fast", or "slow"
:param float fiber_obj_decenter: Displacement of source from fiber center (from 0.0 to 0.6).
Only applicable if sky_sampling_mode="MEDUSA".
:param \**kwargs: Other entries in the ETC web form to set.
To see what options are available, an inspection of the ETC website is necessary.
"""
def __init__(
self,
slicer: str,
exptime: float,
mag: float,
band: str = "V",
magtype: str = "Vega",
template_type: str = "template_spectrum",
template: str = "Pickles_K2V",
redshift: float = 0,
airmass: float = 1.1,
moon_phase: float = 0.0,
seeing: str = "0.8",
sky_sampling_mode="MEDUSA",
ccd_mode="standard",
fiber_obj_decenter=0.0,
**kwargs,
):
Sig2NoiseVLT.__init__(
self,
"FLAMES-GIRAFFE",
exptime,
mag,
band,
magtype,
template_type,
template,
redshift,
airmass,
moon_phase,
seeing,
**kwargs,
)
self.url = "https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=GIRAFFE+INS.MODE=spectro"
if self.band not in vlt_options["src_target_mag_band (GIRAFFE)"]:
raise KeyError(
f"{src_target_mag_band} not one of {vlt_options['src_target_mag_band (GIRAFFE)']}"
)
if slicer not in vlt_options["giraffe_slicer"]:
raise KeyError(f"{slicer} not one of {vlt_options['giraffe_slicer']}")
if sky_sampling_mode not in vlt_options["giraffe_sky_sampling_mode"]:
raise KeyError(
f"{sky_sampling_mode} not one of {vlt_options['giraffe_sky_sampling_mode']}"
)
if ccd_mode not in vlt_options["giraffe_ccd_mode"]:
raise KeyError(f"{ccd_mode} not one of {vlt_options['giraffe_ccd_mode']}")
if not fiber_obj_decenter >= 0:
raise ValueError("giraffe_fiber_obj_decenter must be positive")
self.slicer = slicer
self.sky_sampling_mode = sky_sampling_mode
self.ccd_mode = ccd_mode
self.fiber_obj_decenter = fiber_obj_decenter
self.data = None
def query_s2n(self):
"""
Query the FLAMES-GIRAFFE ETC (https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=GIRAFFE+INS.MODE=spectro)
:return:
"""
url = self.url
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form.new_control(type="select", name="SRC.TARGET.MAG.BAND", value="")
form.new_control(type="select", name="SKY.SEEING.ZENITH.V", value="")
form["POSTFILE.FLAG"] = 0
# Source Parameters
form["SRC.TARGET.MAG"] = self.mag
form["SRC.TARGET.MAG.BAND"] = self.band
form["SRC.TARGET.MAG.SYSTEM"] = self.magtype
form["SRC.TARGET.TYPE"] = self.template_type
form["SRC.TARGET.SPEC.TYPE"] = self.template
form["SRC.TARGET.REDSHIFT"] = self.redshift
form["SRC.TARGET.GEOM"] = "seeing_ltd"
# Sky Parameters
form["SKY.AIRMASS"] = self.airmass
form["SKY.MOON.FLI"] = self.moon_phase
form["USR.SEEING.OR.IQ"] = "seeing_given"
form["SKY.SEEING.ZENITH.V"] = self.seeing
# Default Sky Background
form["almanac_time_option"] = "almanac_time_option_ut_time"
form["SKYMODEL.TARGET.ALT"] = 65.38
form["SKYMODEL.MOON.SUN.SEP"] = 0
# Instrument Specifics
form["INS.NAME"] = "GIRAFFE"
form["INS.MODE"] = "spectro"
form["INS.SKY.SAMPLING.MODE"] = self.sky_sampling_mode
form["INS.GIRAFFE.FIBER.OBJ.DECENTER"] = self.fiber_obj_decenter
if self.slicer[:2] == "LR":
form["INS.GIRAFFE.RESOLUTION"] = "LR"
form["INS.IMAGE.SLICERS.NAME.LR"] = self.slicer
elif self.slicer[:2] == "HR":
form["INS.GIRAFFE.RESOLUTION"] = "HR"
form["INS.IMAGE.SLICERS.NAME.HR"] = self.slicer
else:
raise RuntimeError(f"{self.slicer} should start with either 'LR' or 'HR'")
form["DET.CCD.MODE"] = self.ccd_mode
form["USR.OUT.MODE"] = "USR.OUT.MODE.EXPOSURE.TIME"
form["USR.OUT.MODE.EXPOSURE.TIME"] = self.exptime
form["USR.OUT.DISPLAY.SN.V.WAVELENGTH"] = "1"
for key in self.kwargs:
form[key] = self.kwargs[key]
self.data = browser.submit_selected()
snr = self.parse_etc()
return snr
def parse_etc(self):
snr_url = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[1].split('" TITLE')[0]
)
snr_txt = requests.post(snr_url).text
snr = pd.DataFrame([row.split(" ") for row in snr_txt.split("\n")[:-1]])
snr.index = snr.pop(0)
snr.sort_index(inplace=True)
snr = np.vstack([snr.index.values, snr[1].values]).astype(float)
snr[0] *= 10
return snr
class Sig2NoiseXSHOOTER(Sig2NoiseVLT):
"""
VLT/X-SHOOTER S/N Query (https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=X-SHOOTER+INS.MODE=spectro)
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str band: Magnitude band. For valid options see s2n.vlt_options['src_target_mag_band (UVES)'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str template_type: Type of SED template. For now, only "template_spectrum" is supported.
:param str template: Spectral template. For valid options see s2n.vlt_options['src_target_spec_type'].
:param float redshift: Redshift of the target
:param float airmass: Airmass of observation
:param float moon_phase: Moon Phase between 0.0 (new) and 1.0 (full)
:param str seeing: Seeing (FWHM) of observation in arcseconds.
For valid options see s2n.vlt_options['sky_seeing'].
:param str uvb_slitwidth: Width of UVB spectrograph slit in arcseconds
:param str vis_slitwidth: Width of VIS spectrograph slit in arcseconds
:param str nir_slitwidth: Width of NIR spectrograph slit in arcseconds
:param str uvb_ccd_binning: UVB CCD gain/binning/readout mode.
For valid options see s2n.vlt_options['xshooter_uvb_ccd_binning'].
:param str vis_ccd_binning: VIS CCD gain/binning/readout mode.
For valid options see s2n.vlt_options['xshooter_vis_ccd_binning'].
:param \**kwargs: Other entries in the ETC web form to set.
To see what options are available, an inspection of the ETC website is necessary.
"""
def __init__(
self,
exptime: float,
mag: float,
band: str = "V",
magtype: str = "Vega",
template_type: str = "template_spectrum",
template: str = "Pickles_K2V",
redshift: float = 0,
airmass: float = 1.1,
moon_phase: float = 0.0,
seeing: str = "0.8",
uvb_slitwidth: str = "0.8",
vis_slitwidth: str = "0.7",
nir_slitwidth: str = "0.9",
uvb_ccd_binning: str = "high1x1slow",
vis_ccd_binning: str = "high1x1slow",
**kwargs,
):
Sig2NoiseVLT.__init__(
self,
"X-SHOOTER",
exptime,
mag,
band,
magtype,
template_type,
template,
redshift,
airmass,
moon_phase,
seeing,
**kwargs,
)
self.url = "https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=X-SHOOTER+INS.MODE=spectro"
if self.band not in vlt_options["src_target_mag_band (X-SHOOTER)"]:
raise KeyError(
f"{src_target_mag_band} not one of {vlt_options['src_target_mag_band (X-SHOOTER)']}"
)
if uvb_slitwidth not in vlt_options["xshooter_uvb_slitwidth"]:
raise KeyError(
f"{uvb_slitwidth} not one of {vlt_options['xshooter_uvb_slitwidth']}"
)
if vis_slitwidth not in vlt_options["xshooter_vis_slitwidth"]:
raise KeyError(
f"{vis_slitwidth} not one of {vlt_options['xshooter_vis_slitwidth']}"
)
if nir_slitwidth not in vlt_options["xshooter_nir_slitwidth"]:
raise KeyError(
f"{nir_slitwidth} not one of {vlt_options['xshooter_nir_slitwidth']}"
)
if uvb_ccd_binning not in vlt_options["xshooter_uvb_ccd_binning"]:
raise KeyError(
f"{uvb_ccd_binning} not one of {vlt_options['xshooter_uvb_ccd_binning']}"
)
if vis_ccd_binning not in vlt_options["xshooter_vis_ccd_binning"]:
raise KeyError(
f"{vis_ccd_binning} not one of {vlt_options['xshooter_vis_ccd_binning']}"
)
self.uvb_slitwidth = uvb_slitwidth
self.vis_slitwidth = vis_slitwidth
self.nir_slitwidth = nir_slitwidth
self.uvb_ccd_binning = uvb_ccd_binning
self.vis_ccd_binning = vis_ccd_binning
self.data = None
def query_s2n(self):
"""
Query the X-SHOOTER ETC (https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=X-SHOOTER+INS.MODE=spectro)
:return:
"""
url = self.url
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form.new_control(type="select", name="SRC.TARGET.MAG.BAND", value="")
form.new_control(type="select", name="SKY.SEEING.ZENITH.V", value="")
form["POSTFILE.FLAG"] = 0
# Source Parameters
form["SRC.TARGET.MAG"] = self.mag
form["SRC.TARGET.MAG.BAND"] = self.band
form["SRC.TARGET.MAG.SYSTEM"] = self.magtype
form["SRC.TARGET.TYPE"] = self.template_type
form["SRC.TARGET.SPEC.TYPE"] = self.template
form["SRC.TARGET.REDSHIFT"] = self.redshift
form["SRC.TARGET.GEOM"] = "seeing_ltd"
# Sky Parameters
form["SKY.AIRMASS"] = self.airmass
form["SKY.MOON.FLI"] = self.moon_phase
form["USR.SEEING.OR.IQ"] = "seeing_given"
form["SKY.SEEING.ZENITH.V"] = self.seeing
# Default Sky Background
form["almanac_time_option"] = "almanac_time_option_ut_time"
form["SKYMODEL.TARGET.ALT"] = 65.38
form["SKYMODEL.MOON.SUN.SEP"] = 0
# Instrument Specifics
form["INS.NAME"] = "X-SHOOTER"
form["INS.MODE"] = "spectro"
form["INS.ARM.UVB.FLAG"] = "1"
form["INS.ARM.VIS.FLAG"] = "1"
form["INS.ARM.NIR.FLAG"] = "1"
form["INS.SLIT.FROM_USER.WIDTH.VAL.UVB"] = self.uvb_slitwidth
form["INS.SLIT.FROM_USER.WIDTH.VAL.VIS"] = self.vis_slitwidth
form["INS.SLIT.FROM_USER.WIDTH.VAL.NIR"] = self.nir_slitwidth
form["INS.DET.DIT.UVB"] = self.exptime
form["INS.DET.DIT.VIS"] = self.exptime
form["INS.DET.DIT.NIR"] = self.exptime
form["INS.DET.CCD.BINNING.VAL.UVB"] = self.uvb_ccd_binning
form["INS.DET.CCD.BINNING.VAL.VIS"] = self.vis_ccd_binning
form["INS.GEN.GRAPH.S2N.SWITCH.VAL"] = "yes"
for key in self.kwargs:
form[key] = self.kwargs[key]
self.data = browser.submit_selected()
snr = self.parse_etc()
return snr
def parse_etc(self):
def combine_xshooter_snr(snr_min_df, snr_mid_df, snr_max_df, offset):
snr_mid_df.index = snr_mid_df.pop(0)
snr_max_df.index = snr_max_df.pop(0)
snr_min_df.index = snr_min_df.pop(0)
snr_mid_df.sort_index(inplace=True)
snr_max_df.sort_index(inplace=True)
snr_min_df.sort_index(inplace=True)
snr = pd.concat([snr_min_df, snr_mid_df, snr_max_df])
snr.sort_index(inplace=True)
for i, idx_min in enumerate(snr_min_df.index[offset:]):
idx_max = snr_max_df.index[i]
idx_mid_before = snr_mid_df.index[i]
idx_mid_after = snr_mid_df.index[i + 1]
snr_min = snr_min_df.loc[idx_min, 1]
if not isinstance(snr_min, float):
snr_min = snr_min.iloc[0]
snr_mid_before = snr_mid_df.loc[idx_mid_before, 1]
snr_mid_after = snr_mid_df.loc[idx_mid_after, 1]
snr_max = snr_max_df.loc[idx_max, 1]
if not isinstance(snr_max, float):
snr_max = snr_max.iloc[0]
if idx_min < idx_max:
if snr_min > snr_max:
dy = snr_max - snr_mid_before
dx = idx_max - idx_mid_before
new_wave = idx_min - 0.1
new_snr = snr_mid_before + dy / dx * (new_wave - idx_mid_before)
snr.drop(idx_max, inplace=True)
snr.loc[new_wave] = new_snr
else:
dy = snr_mid_after - snr_min
dx = idx_mid_after - idx_min
new_wave = idx_max + 0.1
new_snr = snr_min + dy / dx * (new_wave - idx_min)
snr.drop(idx_min, inplace=True)
snr.loc[new_wave] = new_snr
elif idx_min == idx_max:
snr.drop(idx_min, inplace=True)
snr.loc[idx_min] = np.max([snr_min, snr_max])
snr.sort_index(inplace=True)
return snr
snr_url1_mid = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[1].split('" TITLE')[0]
)
snr_url2_mid = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[2].split('" TITLE')[0]
)
snr_url3_mid = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[3].split('" TITLE')[0]
)
snr_url1_max = snr_url1_mid[:-4] + "_FSRmax.dat"
snr_url2_max = snr_url2_mid[:-4] + "_FSRmax.dat"
snr_url3_max = snr_url3_mid[:-4] + "_FSRmax.dat"
snr_url1_min = snr_url1_mid[:-4] + "_FSRmin.dat"
snr_url2_min = snr_url2_mid[:-4] + "_FSRmin.dat"
snr_url3_min = snr_url3_mid[:-4] + "_FSRmin.dat"
snr_txt1_mid = requests.post(snr_url1_mid).text
snr_txt2_mid = requests.post(snr_url2_mid).text
snr_txt3_mid = requests.post(snr_url3_mid).text
snr_txt1_max = requests.post(snr_url1_max).text
snr_txt2_max = requests.post(snr_url2_max).text
snr_txt3_max = requests.post(snr_url3_max).text
snr_txt1_min = requests.post(snr_url1_min).text
snr_txt2_min = requests.post(snr_url2_min).text
snr_txt3_min = requests.post(snr_url3_min).text
snr1_mid_df = pd.DataFrame(
[row.split("\t") for row in snr_txt1_mid.split("\n")[:-1]], dtype="float64"
)
snr2_mid_df = pd.DataFrame(
[row.split("\t") for row in snr_txt2_mid.split("\n")[:-1]], dtype="float64"
)
snr3_mid_df = pd.DataFrame(
[row.split("\t") for row in snr_txt3_mid.split("\n")[:-1]], dtype="float64"
)
snr1_max_df = pd.DataFrame(
[row.split("\t") for row in snr_txt1_max.split("\n")[:-1]], dtype="float64"
)
snr2_max_df = pd.DataFrame(
[row.split("\t") for row in snr_txt2_max.split("\n")[:-1]], dtype="float64"
)
snr3_max_df = pd.DataFrame(
[row.split("\t") for row in snr_txt3_max.split("\n")[:-1]], dtype="float64"
)
snr1_min_df = pd.DataFrame(
[row.split("\t") for row in snr_txt1_min.split("\n")[:-1]], dtype="float64"
)
snr2_min_df = pd.DataFrame(
[row.split("\t") for row in snr_txt2_min.split("\n")[:-1]], dtype="float64"
)
snr3_min_df = pd.DataFrame(
[row.split("\t") for row in snr_txt3_min.split("\n")[:-1]], dtype="float64"
)
snr1 = combine_xshooter_snr(snr1_min_df, snr1_mid_df, snr1_max_df, offset=1)
snr2 = combine_xshooter_snr(snr2_min_df, snr2_mid_df, snr2_max_df, offset=0)
snr3 = combine_xshooter_snr(snr3_min_df, snr3_mid_df, snr3_max_df, offset=1)
snr = pd.concat([snr1, snr2, snr3])
snr = np.vstack([snr.index.values, snr[1].values])
snr[0] *= 10
return snr
class Sig2NoiseMUSE(Sig2NoiseVLT):
"""
VLT/MUSE S/N Query (https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=MUSE+INS.MODE=swspectr)
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str band: Magnitude band. For valid options see s2n.vlt_options['src_target_mag_band (UVES)'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str template_type: Type of SED template. For now, only "template_spectrum" is supported.
:param str template: Spectral template. For valid options see s2n.vlt_options['src_target_spec_type'].
:param float redshift: Redshift of the target
:param float airmass: Airmass of observation
:param float moon_phase: Moon Phase between 0.0 (new) and 1.0 (full)
:param str seeing: Seeing (FWHM) of observation in arcseconds.
For valid options see s2n.vlt_options['sky_seeing'].
:param str mode: MUSE instument mode. For valid options see s2n.vlt_options['muse_mode'].
:param str spatial_binning: Spatial binning. For valid options see s2n.vlt_options['muse_spatial_binning'].
:param str spectra_binning: Spectral binning. For valid options see s2n.vlt_options['muse_spectra_binning'].
:param float target_offset: Displacement of source from fiber center.
:param \**kwargs: Other entries in the ETC web form to set.
To see what options are available, an inspection of the ETC website is necessary.
"""
def __init__(
self,
exptime: float,
mag: float,
band: str = "V",
magtype: str = "Vega",
template_type: str = "template_spectrum",
template: str = "Pickles_K2V",
redshift: float = 0,
airmass: float = 1.1,
moon_phase: float = 0.0,
seeing: str = "0.8",
mode: str = "WFM_NONAO_N",
spatial_binning: str = "3",
spectra_binning: str = "1",
target_offset: float = 0,
**kwargs,
):
Sig2NoiseVLT.__init__(
self,
"MUSE",
exptime,
mag,
band,
magtype,
template_type,
template,
redshift,
airmass,
moon_phase,
seeing,
**kwargs,
)
self.url = "https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=MUSE+INS.MODE=swspectr"
if self.band not in vlt_options["src_target_mag_band (MUSE)"]:
raise KeyError(
f"{src_target_mag_band} not one of {vlt_options['src_target_mag_band (MUSE)']}"
)
if mode not in vlt_options["muse_mode"]:
raise KeyError(f"{mode} not one of {vlt_options['muse_mode']}")
if spatial_binning not in vlt_options["muse_spatial_binning"]:
raise KeyError(
f"{spatial_binning} not one of {vlt_options['muse_spatial_binning']}"
)
if spectra_binning not in vlt_options["muse_spectra_binning"]:
raise KeyError(
f"{spectra_binning} not one of {vlt_options['muse_spectra_binning']}"
)
if not target_offset >= 0:
raise ValueError("muse_target_offset must be positive")
self.mode = mode
self.spatial_binning = spatial_binning
self.spectra_binning = spectra_binning
self.target_offset = target_offset
self.data = None
def query_s2n(self):
"""
Query the MUSE ETC (https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=MUSE+INS.MODE=swspectr)
:return:
"""
url = self.url
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form.new_control(type="select", name="SRC.TARGET.MAG.BAND", value="")
form.new_control(type="select", name="SKY.SEEING.ZENITH.V", value="")
form["POSTFILE.FLAG"] = 0
# Source Parameters
form["SRC.TARGET.MAG"] = self.mag
form["SRC.TARGET.MAG.BAND"] = self.band
form["SRC.TARGET.MAG.SYSTEM"] = self.magtype
form["SRC.TARGET.TYPE"] = self.template_type
form["SRC.TARGET.SPEC.TYPE"] = self.template
form["SRC.TARGET.REDSHIFT"] = self.redshift
form["SRC.TARGET.GEOM"] = "seeing_ltd"
# Sky Parameters
form["SKY.AIRMASS"] = self.airmass
form["SKY.MOON.FLI"] = self.moon_phase
form["USR.SEEING.OR.IQ"] = "seeing_given"
form["SKY.SEEING.ZENITH.V"] = self.seeing
# Default Sky Background
form["almanac_time_option"] = "almanac_time_option_ut_time"
form["SKYMODEL.TARGET.ALT"] = 65.38
form["SKYMODEL.MOON.SUN.SEP"] = 0
# Instrument Specifics
form["INS.NAME"] = "MUSE"
form["INS.MODE"] = "swspectr"
form["INS.MUSE.SETTING.KEY"] = self.mode
form["INS.MUSE.SPATIAL.NPIX.LINEAR"] = self.spatial_binning
form["INS.MUSE.SPECTRAL.NPIX.LINEAR"] = self.spectra_binning
form["SRC.TARGET.GEOM.DISTANCE"] = self.target_offset
form["USR.OBS.SETUP.TYPE"] = "givenexptime"
form["DET.IR.NDIT"] = 1
form["DET.IR.DIT"] = self.exptime
form["USR.OUT.DISPLAY.SN.V.WAVELENGTH"] = 1
for key in self.kwargs:
form[key] = self.kwargs[key]
self.data = browser.submit_selected()
snr = self.parse_etc()
return snr
def parse_etc(self):
snr_url = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[1].split('" TITLE')[0]
)
snr_txt = requests.post(snr_url).text
snr = pd.DataFrame([row.split(" ") for row in snr_txt.split("\n")[:-1]])
snr.index = snr.pop(0)
snr.sort_index(inplace=True)
snr = np.vstack([snr.index.values, snr[1].values]).astype(float)
snr[0] *= 10
return snr
class Sig2NoiseMSE(Sig2NoiseQuery):
"""
MSE S/N Query (http://etc-dev.cfht.hawaii.edu/mse/)
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.mse_options['template'].
:param str spec_mode: MSE mode. Must be "LR" (low resolution), "MR" (medium resolution), or "HR" (high resolution)
:param str band: Magnitude band. For valid options see s2n.mse_options['filter'].
:param str airmass: Airmass of observation. Must be one of mse_options['airmass'].
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float skymag: Background sky magnitude of observation
:param str src_type: Spatial profile of source. Must be one of mse_options['src_type'].
:param float redshift: Redshift of the target.
:param bool smoothed: If True, uses smoothed S/N,
"""
def __init__(
self,
exptime: float,
mag: float,
template: str,
spec_mode: str = "LR",
band: str = "g",
airmass: str = "1.2",
seeing: float = 0.5,
skymag: float = 20.7,
src_type: str = "point",
redshift: float = 0,
smoothed: bool = False,
):
Sig2NoiseQuery.__init__(self)
self.url_base = "http://etc-dev.cfht.hawaii.edu/cgi-bin/mse/mse_wrapper.py"
# Hard Coded Values
self.sessionID = 1234
self.coating = "ZeCoat"
self.fibdiam = 1
self.spatbin = 2
self.specbin = 1
self.meth = "getSNR"
self.snr_value = 10
# Check Values
if template not in mse_options["template"]:
raise KeyError(f"{template} not one of {mse_options['template']}")
if spec_mode not in mse_options["spec_mode"]:
raise KeyError(f"{spec_mode} not one of {mse_options['spec_mode']}")
if band not in mse_options["filter"]:
raise KeyError(f"{band} not one of {mse_options['filter']}")
if airmass not in mse_options["airmass"]:
raise KeyError(f"{airmass} not one of {mse_options['airmass']}")
if src_type not in mse_options["src_type"]:
raise KeyError(f"{src_type} not one of {mse_options['src_type']}")
self.exptime = exptime
self.mag = mag
self.template = template
self.spec_mode = spec_mode
self.band = band
self.airmass = airmass
self.seeing = seeing
self.skymag = skymag
self.src_type = src_type
self.redshift = redshift
self.smoothed = smoothed
def query_s2n(self):
url = (
f"{self.url_base}?"
+ f"sessionID={self.sessionID}&"
+ f"coating={self.coating}&"
+ f"seeing={self.seeing}&"
+ f"airmass={self.airmass}&"
+ f"skymag={self.skymag}&"
+ f"spectro={self.spec_mode}&"
+ f"fibdiam={self.fibdiam}&"
+ f"spatbin={self.spatbin}&"
+ f"specbin={self.specbin}&"
+ f"meth={self.meth}&"
+ f"etime={self.exptime}&"
+ f"snr={self.snr_value}&"
+ f"src_type={self.src_type}&"
+ f"tgtmag={self.mag}&"
+ f"redshift={self.redshift}&"
+ f"band={self.band}&"
+ f"template={self.template}"
)
response = requests.post(url)
# Parse HTML response
r = response.text.split("docs_json = '")[1].split("';")[0]
model = json.loads(r)
key = list(model.keys())[0]
model_dict = model[key]
model_pass1 = [
_
for _ in model_dict["roots"]["references"]
if "data" in _["attributes"].keys()
]
model_pass2 = [
_ for _ in model_pass1 if "__ndarray__" in _["attributes"]["data"]["x"]
]
x = {}
y = {}
for i, tmp in enumerate(model_pass2):
x_str = tmp["attributes"]["data"]["x"]
x[i] = decode_base64_dict(x_str)
y_str = tmp["attributes"]["data"]["y"]
y[i] = decode_base64_dict(y_str)
# Sort Arrays
order = np.argsort([array[0] for i, array in x.items()])
x = {i: x[j] for i, j in enumerate(order)}
y = {i: y[j] for i, j in enumerate(order)}
x = {i: x[2 * i] for i in range(int(len(x) / 2))}
if self.smoothed:
y = {
i: (
y[2 * i]
if (np.mean(y[2 * i]) > np.mean(y[2 * i + 1]))
else y[2 * i + 1]
)
for i in range(int(len(y) / 2))
}
else:
y = {
i: (
y[2 * i]
if (np.mean(y[2 * i]) < np.mean(y[2 * i + 1]))
else y[2 * i + 1]
)
for i in range(int(len(y) / 2))
}
if self.spec_mode == "LR":
y[0] = y[0][x[0] < x[1].min()]
x[0] = x[0][x[0] < x[1].min()]
y[1] = y[1][x[1] < x[2].min()]
x[1] = x[1][x[1] < x[2].min()]
y[2] = y[2][x[2] < x[3].min()]
x[2] = x[2][x[2] < x[3].min()]
filler_x = np.linspace(x[3].max(), x[4].min(), 100)
filler_y = np.zeros(100)
x = np.concatenate([x[0], x[1], x[2], x[3], filler_x, x[4]])
y = np.concatenate([y[0], y[1], y[2], y[3], filler_y, y[4]])
elif self.spec_mode in ["MR", "HR"]:
filler_x1 = np.linspace(x[0].max(), x[1].min(), 100)
filler_x2 = np.linspace(x[1].max(), x[2].min(), 100)
filler_y = np.zeros(100)
x = np.concatenate([x[0], filler_x1, x[1], filler_x2, x[2]])
y = np.concatenate([y[0], filler_y, y[1], filler_y, y[2]])
else:
raise RuntimeError(
f"{self.spec_mode} not one of {mse_options['spec_mode']}"
)
snr = np.vstack([x, y])
return snr
class Sig2NoiseLCO(Sig2NoiseQuery):
"""
Superclass for LCO ETC Queries (http://www.lco.cl/scripts/lcoetc/lcoetc_sspec.html)
:param instrument: LCO instrument. Valid options are "MIKE", "LDSS3", "IMACS", and "MAGE".
:param telescope: LCO telescope. "MAGELLAN1" for IMACS and MAGE. "MAGELLAN2" for LDSS3 and MIKE.
:param exptime: Exposure time in seconds
:param mag: Magnitude of source
:param template: Spectral template. For valid options see s2n.lco_options['template'].
:param band: Magnitude band. For valid options see s2n.lco_options['filter'].
:param airmass: Airmass of observation
:param seeing: Seeing (FWHM) of observation in arcseconds
:param nmoon: Days from since new moon. For valid options see s2n.lco_options['nmoon'].
:param nexp: Number of exposures
:param slitwidth: Width of slit in arcseconds
:param binspat: Binning in the spatial direction. For valid options see s2n.lco_options['binspat'].
:param binspec: Binning in the spectral direction. For valid options see s2n.lco_options['binspec'].
:param extract_ap: Size of extraction aperture in arcseconds.
"""
def __init__(
self,
instrument: str,
telescope: str,
exptime: float,
mag: float,
template: str = "flat",
band: str = "g",
airmass: float = 1.1,
seeing: float = 0.5,
nmoon: str = "0",
nexp: int = 1,
slitwidth: float = 1.0,
binspat: str = "3",
binspec: str = "1",
extract_ap: float = 1.5,
):
Sig2NoiseQuery.__init__(self)
self.url_base = "http://www.lco.cl/scripts/lcoetc/lcoetc_sspec.html"
if template not in lco_options["template"]:
raise KeyError(f"{template} not one of {lco_options['template']}")
if band not in lco_options["tempfilter"]:
raise KeyError(f"{band} not one of {lco_options['tempfilter']}")
if telescope not in lco_options["telescope"]:
raise KeyError(f"{telescope} not one of {lco_options['telescope']}")
if instrument not in lco_options[telescope + "_instrument"]:
raise KeyError(
f"{instrument} not one of {lco_options[telescope+'_telescope']}"
)
if binspat not in lco_options["binspat"]:
raise KeyError(f"{binspat} not one of {lco_options['binspat']}")
if binspec not in lco_options["binspec"]:
raise KeyError(f"{binspec} not one of {lco_options['binspec']}")
if nmoon not in lco_options["nmoon"]:
raise KeyError(f"{nmoon} not one of {lco_options['nmoon']}")
if template == "flat":
self.template = template
else:
self.template = f"{template}_Pickles.dat"
self.abmag = mag
self.tempfilter = f"sdss_{band}.dat"
self.telescope = telescope
self.instrument = instrument
self.dslit = slitwidth
self.binspat = binspat
self.binspec = binspec
self.nmoon = nmoon
self.amass = airmass
self.dpsf = seeing
self.texp = exptime
self.nexp = nexp
self.aper = extract_ap
def query_s2n(self):
if not hasattr(self, "mode"):
raise AttributeError(
"Query has no attribute 'mode'."
+ "Try using the instrument specific query (e.g., Sig2NoiseMIKE) instead of this general one."
)
url = (
f"{self.url_base}?"
+ f"template={self.template}&"
+ f"abmag={self.abmag}&"
+ f"tempfilter={self.tempfilter}&"
+ f"addline=0&"
+ f"linelam=5000&"
+ f"lineflux=1e-16&"
+ f"linefwhm=5.0&"
+ f"telescope={self.telescope}&"
+ f"instrument={self.instrument}&"
+ f"mode={self.mode}&"
+ f"dslit={self.dslit}&"
+ f"binspat={self.binspat}&"
+ f"binspec={self.binspec}&"
+ f"nmoon={self.nmoon}&"
+ f"amass={self.amass}&"
+ f"dpsf={self.dpsf}&"
+ f"texp={self.texp}&"
+ f"nexp={self.nexp}&"
+ f"aper={self.aper}&"
+ f"submitted=CALCULATE"
)
response = requests.post(url)
data_url = response.text.split('href="')[1].split('" download>')[0]
data_text = requests.post(data_url).text
header = data_text.split("\n")[0]
data = pd.DataFrame(
[row.split(" ") for row in data_text.split("\n")[1:-1]],
columns=header.split(" ")[1:],
)
snr = np.vstack(
[data["Wavelength_[A]"].values, data["S/N_Aperture_Coadd"]]
).astype(float)
return snr
class Sig2NoiseIMACS(Sig2NoiseLCO):
"""
Magellan/IMACS S/N Query (http://www.lco.cl/scripts/lcoetc/lcoetc_sspec.html)
:param mode: IMACS mode. For valid options see s2n.lco_options['IMACS_mode'].
:param exptime: Exposure time in seconds
:param mag: Magnitude of source
:param template: Spectral template. For valid options see s2n.lco_options['template'].
:param band: Magnitude band. For valid options see s2n.lco_options['filter'].
:param airmass: Airmass of observation
:param seeing: Seeing (FWHM) of observation in arcseconds
:param nmoon: Days from since new moon. For valid options see s2n.lco_options['nmoon'].
:param nexp: Number of exposures
:param slitwidth: Width of slit in arcseconds
:param binspat: Binning in the spatial direction. For valid options see s2n.lco_options['binspat'].
:param binspec: Binning in the spectral direction. For valid options see s2n.lco_options['binspec'].
:param extract_ap: Size of extraction aperture in arcseconds.
"""
def __init__(
self,
mode: str,
exptime: float,
mag: float,
template: str = "flat",
band: str = "g",
airmass: float = 1.1,
seeing: float = 0.5,
nmoon: str = "0",
nexp: int = 1,
slitwidth: float = 1.0,
binspat: str = "3",
binspec: str = "1",
extract_ap: float = 1.5,
):
Sig2NoiseLCO.__init__(
self,
"IMACS",
"MAGELLAN1",
exptime,
mag,
template,
band,
airmass,
seeing,
nmoon,
nexp,
slitwidth,
binspat,
binspec,
extract_ap,
)
if mode not in lco_options["IMACS_mode"]:
raise KeyError(f"{mode} not one of {lco_options['IMACS_mode']}")
self.mode = mode
class Sig2NoiseMAGE(Sig2NoiseLCO):
"""
Magellan/MAGE S/N Query (http://www.lco.cl/scripts/lcoetc/lcoetc_sspec.html)
:param mode: MAGE mode. "ECHELLETTE" is currently the only option.
:param exptime: Exposure time in seconds
:param mag: Magnitude of source
:param template: Spectral template. For valid options see s2n.lco_options['template'].
:param band: Magnitude band. For valid options see s2n.lco_options['filter'].
:param airmass: Airmass of observation
:param seeing: Seeing (FWHM) of observation in arcseconds
:param nmoon: Days from since new moon. For valid options see s2n.lco_options['nmoon'].
:param nexp: Number of exposures
:param slitwidth: Width of slit in arcseconds
:param binspat: Binning in the spatial direction. For valid options see s2n.lco_options['binspat'].
:param binspec: Binning in the spectral direction. For valid options see s2n.lco_options['binspec'].
:param extract_ap: Size of extraction aperture in arcseconds.
"""
def __init__(
self,
mode: str,
exptime: float,
mag: float,
template: str = "flat",
band: str = "g",
airmass: float = 1.1,
seeing: float = 0.5,
nmoon: str = "0",
nexp: int = 1,
slitwidth: float = 1.0,
binspat: str = "3",
binspec: str = "1",
extract_ap: float = 1.5,
):
Sig2NoiseLCO.__init__(
self,
"MAGE",
"MAGELLAN1",
exptime,
mag,
template,
band,
airmass,
seeing,
nmoon,
nexp,
slitwidth,
binspat,
binspec,
extract_ap,
)
if mode not in lco_options["MAGE_mode"]:
raise KeyError(f"{mode} not one of {lco_options['MAGE_mode']}")
self.mode = mode
class Sig2NoiseMIKE(Sig2NoiseLCO):
"""
Magellan/MIKE S/N Query (http://www.lco.cl/scripts/lcoetc/lcoetc_sspec.html)
:param mode: MIKE mode. Valid options are "BLUE" and "RED".
:param exptime: Exposure time in seconds
:param mag: Magnitude of source
:param template: Spectral template. For valid options see s2n.lco_options['template'].
:param band: Magnitude band. For valid options see s2n.lco_options['filter'].
:param airmass: Airmass of observation
:param seeing: Seeing (FWHM) of observation in arcseconds
:param nmoon: Days from since new moon. For valid options see s2n.lco_options['nmoon'].
:param nexp: Number of exposures
:param slitwidth: Width of slit in arcseconds
:param binspat: Binning in the spatial direction. For valid options see s2n.lco_options['binspat'].
:param binspec: Binning in the spectral direction. For valid options see s2n.lco_options['binspec'].
:param extract_ap: Size of extraction aperture in arcseconds.
"""
def __init__(
self,
mode: str,
exptime: float,
mag: float,
template: str = "flat",
band: str = "g",
airmass: float = 1.1,
seeing: float = 0.5,
nmoon: str = "0",
nexp: int = 1,
slitwidth: float = 1.0,
binspat: str = "3",
binspec: str = "1",
extract_ap: float = 1.5,
):
Sig2NoiseLCO.__init__(
self,
"MIKE",
"MAGELLAN2",
exptime,
mag,
template,
band,
airmass,
seeing,
nmoon,
nexp,
slitwidth,
binspat,
binspec,
extract_ap,
)
if mode not in lco_options["MIKE_mode"]:
raise KeyError(f"{mode} not one of {lco_options['MIKE_mode']}")
self.mode = mode
class Sig2NoiseLDSS3(Sig2NoiseLCO):
"""
Magellan/LDSS-3 S/N Query (http://www.lco.cl/scripts/lcoetc/lcoetc_sspec.html)
:param mode: LDSS-3 mode. Valid options are "VPHALL", "VPHBLUE", and "VPHRED".
:param exptime: Exposure time in seconds
:param mag: Magnitude of source
:param template: Spectral template. For valid options see s2n.lco_options['template'].
:param band: Magnitude band. For valid options see s2n.lco_options['filter'].
:param airmass: Airmass of observation
:param seeing: Seeing (FWHM) of observation in arcseconds
:param nmoon: Days from since new moon. For valid options see s2n.lco_options['nmoon'].
:param nexp: Number of exposures
:param slitwidth: Width of slit in arcseconds
:param binspat: Binning in the spatial direction. For valid options see s2n.lco_options['binspat'].
:param binspec: Binning in the spectral direction. For valid options see s2n.lco_options['binspec'].
:param extract_ap: Size of extraction aperture in arcseconds.
"""
def __init__(
self,
mode: str,
exptime: float,
mag: float,
template: str = "flat",
band: str = "g",
airmass: float = 1.1,
seeing: float = 0.5,
nmoon: str = "0",
nexp: int = 1,
slitwidth: float = 1.0,
binspat: str = "3",
binspec: str = "1",
extract_ap: float = 1.5,
):
Sig2NoiseLCO.__init__(
self,
"LDSS3",
"MAGELLAN2",
exptime,
mag,
template,
band,
airmass,
seeing,
nmoon,
nexp,
slitwidth,
binspat,
binspec,
extract_ap,
)
if mode not in lco_options["LDSS3_mode"]:
raise KeyError(f"{mode} not one of {lco_options['LDSS3_mode']}")
self.mode = mode
def calculate_mods_snr(
F: np.ndarray,
wave: np.ndarray,
t_exp: float,
airmass: float = 1.1,
slitloss: float = 0.76,
mode: str = "dichroic",
side: Optional[str] = None,
) -> np.ndarray:
"""
Calculate S/N for LBT/MODS. Based on the calculations and data presented here:
https://sites.google.com/a/lbto.org/mods/preparing-to-observe/sensitivity
:param np.ndarray F: Flux (ergs s^-1 Angstrom^-1 cm^-2)
:param np.ndarray wave: Wavelength array (Angstrom)
:param float t_exp: Exposure time in seconds
:param float airmass: Airmass of observation
:param float slitloss: Slit loss factor (i.e., the fraction of the flux that makes it through the slit).
:param str mode: "dichroic" for both red and blue detectors or "direct" for just one or the other.
:param Optional[str] side: Detector to use if mode="direct". Must be either "red" or "blue".
:return np.ndarray: S/N as a function of wavelength for LBT/MODS
"""
if mode not in ["dichroic", "direct"]:
raise KeyError("mode must be either 'dichroic' or 'direct'.")
if mode == "direct" and side not in ["red", "blue"]:
raise KeyError("side must be either 'red' or 'blue' if mode is 'direct'.")
if len(F) != len(wave):
raise ValueError("Flux and wavelength must be the same length.")
if airmass < 1.0:
raise ValueError("Airmass must be greater than or equal to 1.")
log_t_exp = np.log10(t_exp)
log_F = np.log10(F)
g_blue = 2.5 # electron / ADU
g_red = 2.6 # electron / ADU
sigma_RO_red = 2.5 # electron
sigma_RO_blue = 2.5 # electron
A_per_pix_red = 0.85
A_per_pix_blue = 0.50
atm_extinct_curve = np.genfromtxt(etc_file_dir.joinpath("LBTO_atm_extinct.txt")).T
atm_extinct = interp1d(
x=atm_extinct_curve[0],
y=atm_extinct_curve[1],
bounds_error=False,
fill_value="extrapolate",
)
log_S_0_red = np.genfromtxt(etc_file_dir.joinpath("MODS_red_S_0.txt")).T
log_S_0_blue = np.genfromtxt(etc_file_dir.joinpath("MODS_blue_S_0.txt")).T
g = np.zeros_like(wave)
if mode == "dichroic":
log_S_0_r = interp1d(
log_S_0_red[0], log_S_0_red[2], bounds_error=False, fill_value="extrapolate"
)
log_S_0_b = interp1d(
log_S_0_blue[0],
log_S_0_blue[2],
bounds_error=False,
fill_value="extrapolate",
)
log_S_red = (
log_S_0_r(wave) + log_F + log_t_exp - 0.4 * atm_extinct(wave) * airmass
)
log_S_blue = (
log_S_0_b(wave) + log_F + log_t_exp - 0.4 * atm_extinct(wave) * airmass
)
S_red = 10 ** log_S_red * slitloss * A_per_pix_red
S_blue = 10 ** log_S_blue * slitloss * A_per_pix_blue
snr_red = g_red * S_red / np.sqrt(g_red * S_red + sigma_RO_red ** 2)
snr_blue = g_blue * S_blue / np.sqrt(g_blue * S_blue + sigma_RO_blue ** 2)
snr = np.max([snr_red, snr_blue], axis=0)
elif mode == "direct":
if side == "red":
log_S_0_r = interp1d(
log_S_0_red[0],
log_S_0_red[1],
bounds_error=False,
fill_value="extrapolate",
)
log_S_red = (
log_S_0_r(wave) + log_F + log_t_exp - 0.4 * atm_extinct(wave) * airmass
)
S_red = 10 ** log_S_red * slitloss * A_per_pix_red
snr = g_red * S_red / np.sqrt(g_red * S_red + sigma_RO_red ** 2)
elif side == "blue":
log_S_0_b = interp1d(
log_S_0_blue[0],
log_S_0_blue[1],
bounds_error=False,
fill_value="extrapolate",
)
log_S_blue = (
log_S_0_b(wave) + log_F + log_t_exp - 0.4 * atm_extinct(wave) * airmass
)
S_blue = 10 ** log_S_blue * slitloss * A_per_pix_blue
snr = g_blue * S_blue / np.sqrt(g_blue * S_blue + sigma_RO_blue ** 2)
else:
raise RuntimeError("Improper side argument")
else:
raise RuntimeError("Improper mode argument")
return np.array([wave, snr])
def calculate_fobos_snr(
spec_file: Optional[str] = None,
spec_wave: Union[str, float] = "WAVE",
spec_wave_units: str = "angstrom",
spec_flux: Union[str, float] = "FLUX",
spec_flux_units: Optional[str] = None,
spot_fwhm: float = 5.8,
spec_res_indx: Optional[Union[str, float]] = None,
spec_res_value: Optional[float] = None,
spec_table: Optional[Union[str, float]] = None,
mag: float = 24.0,
mag_band: str = "g",
mag_system: str = "AB",
sky_mag: Optional[float] = None,
sky_mag_band: str = "g",
sky_mag_system: str = "AB",
redshift: float = 0.0,
emline: Optional[str] = None,
sersic: Optional[Tuple[float, float, float, float]] = None,
uniform: bool = False,
exptime: float = 3600.0,
fwhm: float = 0.65,
airmass: float = 1.0,
snr_units: str = "pixel",
sky_err: float = 0.1,
print_summary: bool = True,
) -> np.ndarray:
"""
This is slightly modified code from https://github.com/Keck-FOBOS/enyo/blob/master/python/enyo/scripts/fobos_etc.py
:param Optional[str] spec_file: A fits or ascii file with the object spectrum to use. If None, a flat spectrum is used.
:param Union[str,float]spec_wave: Extension or column number with the wavelengths.
:param str spec_wave_units: Wavelength units
:param Union[str,float] spec_flux: Extension or column number with the flux.
:param Optional[str] spec_flux_units: Input units of the flux density. Must be interpretable by astropy.units.Unit.
Assumes 1e-17 erg / (cm2 s angstrom) if units are not provided.
:param float spot_fwhm: FHWM of the monochromatic spot size on the detector in pixels.
:param Optional[Union[str,float]] spec_res_indx: Extension or column number with the flux.
:param Optional[float] spec_res_value: Single value for the spectral resolution (R = lambda/dlambda) for the full spectrum.
:param Optional[Union[str,float]] spec_table: Extension in the fits file with the binary table data.
:param float mag: Total apparent magnitude of the source
:param str mag_band: Broad-band used for the provided magnitude. Must be u, g, r, i, or z.
:param str mag_system: Magnitude system. Must be either AB or Vega.
:param Optional[float] sky_mag: Surface brightness of the sky in mag/arcsec^2 in the defined broadband.
If not provided, default dark-sky spectrum is used.
:param str sky_mag_band: Broad-band used for the provided sky surface brightness. Must be u, g, r, i, or z.
:param str sky_mag_system: Magnitude system. Must be either AB or Vega.
:param float redshift: Redshift of the object, z
:param Optional[str] emline: File with emission lines to add to the spectrum.
:param Optional[Tuple[float,float,float,float]] sersic: Use a Sersic profile to describe the object surface-brightness distribution; order
must be effective radius, Sersic index, ellipticity (1-b/a), position angle (deg).
:param bool uniform: Instead of a point source or Sersic profile,
assume the surface brightness distribution is uniform over the fiber face.
If set, the provided magnitude is assumed to be a surface brightness.
See the MAG option.
:param float exptime: Exposure time (s)
:param float fwhm: On-sky PSF FWHM (arcsec)
:param float airmass: Airmass
:param str snr_units: The units for the S/N. Options are pixel, angstrom, resolution.
:param float sky_err: The fraction of the Poisson error in the sky incurred when subtracting the sky from the observation.
Set to 0 for a sky subtraction that adds no error to the sky-subtracted spectrum;
set to 1 for a sky-subtraction error that is the same as the Poisson error in the sky spectrum
acquired during the observation.
:param bool print_summary: If True, prints a summary of the calculations.
:return np.ndarray: S/N as a function of wavelength for Keck/FOBOS
"""
try:
from enyo.etc import (
spectrum,
efficiency,
telescopes,
aperture,
detector,
extract,
)
from enyo.etc.observe import Observation
from enyo.scripts.fobos_etc import (
get_wavelength_vector,
read_emission_line_database,
get_spectrum,
get_sky_spectrum,
get_source_distribution,
)
except ImportError:
raise ImportError(
"To calculate FOBOS S/N you must first install the FOBOS ETC.\n "
+ "See <> for installation instructions."
)
if sky_err < 0 or sky_err > 1:
raise ValueError("--sky_err option must provide a value between 0 and 1.")
# Constants:
resolution = 3500.0 # lambda/dlambda
fiber_diameter = 0.8 # Arcsec
rn = 2.0 # Detector readnoise (e-)
dark = 0.0 # Detector dark-current (e-/s)
# Temporary numbers that assume a given spectrograph PSF and LSF.
# Assume 3 pixels per spectral and spatial FWHM.
spatial_fwhm = spot_fwhm
spectral_fwhm = spot_fwhm
# Get source spectrum in 1e-17 erg/s/cm^2/angstrom. Currently, the
# source spectrum is assumed to be
# - normalized by the total integral of the source flux
# - independent of position within the source
dw = 1 / spectral_fwhm / resolution / np.log(10)
wavelengths = [3100, 10000, dw]
wave = get_wavelength_vector(wavelengths[0], wavelengths[1], wavelengths[2])
emline_db = None if emline is None else read_emission_line_database(emline)
spec = get_spectrum(
wave,
mag,
mag_band=mag_band,
mag_system=mag_system,
spec_file=spec_file,
spec_wave=spec_wave,
spec_wave_units=spec_wave_units,
spec_flux=spec_flux,
spec_flux_units=spec_flux_units,
spec_res_indx=spec_res_indx,
spec_res_value=spec_res_value,
spec_table=spec_table,
emline_db=emline_db,
redshift=redshift,
resolution=resolution,
)
t = time.perf_counter()
# Get the source distribution. If the source is uniform, onsky is None.
onsky = get_source_distribution(fwhm, uniform, sersic)
# Get the sky spectrum
sky_spectrum = get_sky_spectrum(
sky_mag, mag_band=sky_mag_band, mag_system=sky_mag_system
)
# Get the atmospheric throughput
atmospheric_throughput = efficiency.AtmosphericThroughput(airmass=airmass)
# Set the telescope. Defines the aperture area and throughput
# (nominally 3 aluminum reflections for Keck)
telescope = telescopes.KeckTelescope()
# Define the observing aperture; fiber diameter is in arcseconds,
# center is 0,0 to put the fiber on the target center. "resolution"
# sets the resolution of the fiber rendering; it has nothing to do
# with spatial or spectral resolution of the instrument
fiber = aperture.FiberAperture(0, 0, fiber_diameter, resolution=100)
# Get the spectrograph throughput (circa June 2018; needs to
# be updated). Includes fibers + foreoptics + FRD + spectrograph +
# detector QE (not sure about ADC). Because this is the total
# throughput, define a generic efficiency object.
thru_db = np.genfromtxt(
os.path.join(os.environ["ENYO_DIR"], "data/efficiency", "fobos_throughput.db")
)
spectrograph_throughput = efficiency.Efficiency(thru_db[:, 1], wave=thru_db[:, 0])
# System efficiency combines the spectrograph and the telescope
system_throughput = efficiency.SystemThroughput(
wave=spec.wave,
spectrograph=spectrograph_throughput,
telescope=telescope.throughput,
)
# Instantiate the detector; really just a container for the rn and
# dark current for now. QE is included in fobos_throughput.db file,
# so I set it to 1 here.
det = detector.Detector(rn=rn, dark=dark, qe=1.0)
# Extraction: makes simple assumptions about the detector PSF for
# each fiber spectrum and mimics a "perfect" extraction, including
# an assumption of no cross-talk between fibers. Ignore the
# "spectral extraction".
extraction = extract.Extraction(
det,
spatial_fwhm=spatial_fwhm,
spatial_width=1.5 * spatial_fwhm,
spectral_fwhm=spectral_fwhm,
spectral_width=spectral_fwhm,
)
# Perform the observation
obs = Observation(
telescope,
sky_spectrum,
fiber,
exptime,
det,
system_throughput=system_throughput,
atmospheric_throughput=atmospheric_throughput,
airmass=airmass,
onsky_source_distribution=onsky,
source_spectrum=spec,
extraction=extraction,
snr_units=snr_units,
)
# Construct the S/N spectrum
snr = obs.snr(sky_sub=True, sky_err=sky_err)
snr_label = "S/N per {0}".format(
"R element" if snr_units == "resolution" else snr_units
)
if print_summary:
# Report
g = efficiency.FilterResponse(band="g")
r = efficiency.FilterResponse(band="r")
iband = efficiency.FilterResponse(band="i")
print("-" * 70)
print("{0:^70}".format("FOBOS S/N Calculation (v0.2)"))
print("-" * 70)
print("Compute time: {0} seconds".format(time.perf_counter() - t))
print(
"Object g- and r-band AB magnitude: {0:.1f} {1:.1f}".format(
spec.magnitude(band=g), spec.magnitude(band=r)
)
)
print(
"Sky g- and r-band AB surface brightness: {0:.1f} {1:.1f}".format(
sky_spectrum.magnitude(band=g), sky_spectrum.magnitude(band=r)
)
)
print("Exposure time: {0:.1f} (s)".format(exptime))
if not uniform:
print("Aperture Loss: {0:.1f}%".format((1 - obs.aperture_factor) * 100))
print(
"Extraction Loss: {0:.1f}%".format(
(1 - obs.extraction.spatial_efficiency) * 100
)
)
print("Median {0}: {1:.1f}".format(snr_label, np.median(snr.flux)))
print(
"g-band weighted mean {0} {1:.1f}".format(
snr_label, np.sum(g(snr.wave) * snr.flux) / np.sum(g(snr.wave))
)
)
print(
"r-band weighted mean {0} {1:.1f}".format(
snr_label, np.sum(r(snr.wave) * snr.flux) / np.sum(r(snr.wave))
)
)
print(
"i-band weighted mean {0} {1:.1f}".format(
snr_label, np.sum(iband(snr.wave) * snr.flux) / np.sum(iband(snr.wave))
)
)
return np.vstack([snr.wave, snr.flux])
def calculate_wfos_snr(
spec_file: Optional[str] = None,
spec_wave: Union[str, float] = "WAVE",
spec_wave_units: str = "angstrom",
spec_flux: Union[str, float] = "FLUX",
spec_flux_units: Optional[str] = None,
#spot_fwhm: float = 5.8, # Not used
spec_res_indx: Optional[Union[str, float]] = None,
spec_res_value: Optional[float] = None,
spec_table: Optional[Union[str, float]] = None,
mag: float = 24.0,
mag_band: str = "g",
mag_system: str = "AB",
#sky_mag: Optional[float] = None, # Not used
#sky_mag_band: str = "g", # Not used
#sky_mag_system: str = "AB", # Not used
redshift: float = 0.0,
emline: Optional[str] = None,
sersic: Optional[Tuple[float, float, float, float]] = None,
uniform: bool = False,
exptime: float = 3600.0,
fwhm: float = 0.65,
airmass: float = 1.0,
snr_units: str = "pixel",
sky_err: float = 0.1,
# WFOS specifics
refl: str = "req",
blue_grat: str = "B1210",
blue_wave: Optional[float] = None,
blue_angle: Optional[float] = None,
blue_binning: Optional[Tuple[int, int]] = (1, 1),
red_grat: str = "R680",
red_wave: Optional[float] = None,
red_angle: Optional[float] = None,
red_binning: Optional[Tuple[int, int]] = (1, 1),
slit: Optional[Tuple[float, float, float, float, float]] = (
0.0,
0.0,
0.75,
5.0,
0.0,
),
extract_size: Optional[float] = None,
return_R: bool = False,
print_summary: bool = True,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
This is slightly modified code from https://github.com/Keck-FOBOS/enyo/blob/master/enyo/scripts/wfos_etc.py
:param Optional[str] spec_file: A fits or ascii file with the object spectrum to use. If None, a flat spectrum is used.
:param Union[str,float]spec_wave: Extension or column number with the wavelengths.
:param str spec_wave_units: Wavelength units
:param Union[str,float] spec_flux: Extension or column number with the flux.
:param Optional[str] spec_flux_units: Input units of the flux density. Must be interpretable by astropy.units.Unit.
Assumes 1e-17 erg / (cm2 s angstrom) if units are not provided.
:param Optional[Union[str,float]] spec_res_indx: Extension or column number with the flux.
:param Optional[float] spec_res_value: Single value for the spectral resolution (R = lambda/dlambda) for the full spectrum.
:param Optional[Union[str,float]] spec_table: Extension in the fits file with the binary table data.
:param float mag: Total apparent magnitude of the source
:param str mag_band: Broad-band used for the provided magnitude. Must be u, g, r, i, or z.
:param str mag_system: Magnitude system. Must be either AB or Vega.
:param float redshift: Redshift of the object, z
:param Optional[str] emline: File with emission lines to add to the spectrum.
:param Optional[Tuple[float,float,float,float]] sersic: Use a Sersic profile to describe the object surface-brightness distribution; order
must be effective radius, Sersic index, ellipticity (1-b/a), position angle (deg).
:param bool uniform: Instead of a point source or Sersic profile,
assume the surface brightness distribution is uniform over the fiber face.
If set, the provided magnitude is assumed to be a surface brightness.
See the MAG option.
:param float exptime: Exposure time (s)
:param float fwhm: On-sky PSF FWHM (arcsec)
:param float airmass: Airmass
:param str snr_units: The units for the S/N. Options are pixel, angstrom, resolution.
:param float sky_err: The fraction of the Poisson error in the sky incurred when subtracting the sky from the observation.
Set to 0 for a sky subtraction that adds no error to the sky-subtracted spectrum;
set to 1 for a sky-subtraction error that is the same as the Poisson error in the sky spectrum
acquired during the observation.
:param str refl: Select the reflectivity curve for TMT.
Must be either 'req' or 'goal' for the required or goal reflectivity performance.
:param str blue_grat: Grating to use in the blue arm.
For valid options see enyo.etc.spectrographs.WFOSGrating.available_gratings.keys()
:param Optional[float] blue_wave: Central wavelength for the blue arm.
If None, will use the peak-efficiency wavelength.
:param Optional[float] blue_angle: Grating angle for blue grating.
If None, will use then angle the provides the best efficiency for the on-axis spectrum.
:param Optional[Tuple[int,int]] blue_binning: On-chip binning for the blue grating. Order is spectral then spatial.
I.e., to bin 2 pixels spectrally and no binning spatial, use (2, 1)
:param str red_grat: Grating to use in the red arm.
For valid options see enyo.etc.spectrographs.WFOSGrating.available_gratings.keys()
:param Optional[float] red_wave: Central wavelength for the red arm.
If None, will use the peak-efficiency wavelength.
:param Optional[float] red_angle: Grating angle for red grating.
If None, will use then angle the provides the best efficiency for the on-axis spectrum.
:param Optional[Tuple[int,int]] red_binning: On-chip binning for the red grating. Order is spectral then spatial.
I.e., to bin 2 pixels spectrally and no binning spatial, use (2, 1)
:param Optional[Tuple[float,float,float,float,float]] slit: Slit properties:
x field center, y field center, width, length, rotation.
The rotation is in degrees, everything else is in on-sky arcsec.
The slit width is in the *unrotated* frame, meaning the effective slit width for a rotated slit is
slit_width/cos(rotation). For the field center, x is along the dispersion direction with a valid range of
+/- 90 arcsec, and y is in the cross-dispersion direction with a valid range of +/- 249 arcsec.
Coordinate (0,0) is on axis.
:param Optional[float] extract_size: Extraction aperture in arcsec *along the slit* centered on the source.
At the detector, the extraction aperture is narrower by cos(slit rotation).
If not provided, set to the FWHM of the seeing disk.
:param bool return_R: If True, also returns the resolution as a function of wavelength.
:param bool print_summary: If True, prints a summary of the calculations.
:return Union[np.ndarray,Tuple[np.ndarray,np.ndarray]]: S/N as a function of wavelength for Keck/WFOS.
If return_R, a tuple of S/N and resolving power as a function of wavelength.
"""
try:
from enyo.etc import (
spectrum,
efficiency,
telescopes,
aperture,
detector,
extract,
)
from enyo.etc.observe import Observation
from enyo.etc.spectrographs import TMTWFOSBlue, TMTWFOSRed, WFOSGrating
from enyo.scripts.wfos_etc import (
get_source_distribution,
get_wavelength_vector,
observed_spectrum,
read_emission_line_database,
read_spectrum,
)
except ImportError:
raise ImportError(
"To calculate WFOS S/N you must first install the WFOS ETC.\n "
+ "See <> for installation instructions."
)
if sky_err < 0 or sky_err > 1:
raise ValueError("--sky_err option must provide a value between 0 and 1.")
# Extract the slit properties for clarity
slit_x, slit_y, slit_width, slit_length, slit_rotation = slit
effective_slit_width = slit_width / np.cos(np.radians(slit_rotation))
_extract_length = fwhm if extract_size is None else extract_size
# Slit aperture. This representation of the slit is *always*
# centered at (0,0). Set the aperture based on the extraction
# length for now.
slit = aperture.SlitAperture(
0.0, 0.0, slit_width, _extract_length, rotation=slit_rotation
)
# Get the source distribution. If the source is uniform, onsky is None.
onsky = get_source_distribution(fwhm, uniform, sersic)
# Sky spectrum and atmospheric throughput
sky_spectrum = spectrum.MaunakeaSkySpectrum()
atmospheric_throughput = efficiency.AtmosphericThroughput(airmass=airmass)
# Emission lines to add
emline_db = None if emline is None else read_emission_line_database(emline)
# Setup the raw object spectrum
if spec_file is None:
wavelengths = [3100, 10000, 1e-5]
wave = get_wavelength_vector(*wavelengths)
obj_spectrum = spectrum.ABReferenceSpectrum(wave, log=True)
else:
obj_spectrum = read_spectrum(
spec_file,
spec_wave,
spec_wave_units,
spec_flux,
spec_flux_units,
spec_res_indx,
spec_res_value,
spec_table,
)
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# Setup the instrument arms
# -------------------------------------------------------------------
# Blue Arm
blue_arm = TMTWFOSBlue(
reflectivity=refl,
grating=blue_grat,
cen_wave=blue_wave,
grating_angle=blue_angle,
)
# Pixels per resolution element
blue_res_pix = (
blue_arm.resolution_element(slit_width=effective_slit_width, units="pixels")
/ blue_binning[0]
)
# Get the wavelength range for each arm
blue_wave_lim = blue_arm.wavelength_limits(slit_x, slit_y, add_grating_limits=True)
# Setup dummy wavelength vectors to get something appropriate for sampling
max_resolution = blue_arm.resolution(
blue_wave_lim[1], x=slit_x, slit_width=effective_slit_width
)
# Set the wavelength vector to allow for a regular, logarithmic binning
dw = 1 / blue_res_pix / max_resolution / np.log(10)
blue_wave = get_wavelength_vector(blue_wave_lim[0], blue_wave_lim[1], dw)
resolution = blue_arm.resolution(
blue_wave, x=slit_x, slit_width=effective_slit_width
)
blue_spec = observed_spectrum(
obj_spectrum,
blue_wave,
resolution,
mag=mag,
mag_band=mag_band,
mag_system=mag_system,
redshift=redshift,
emline_db=emline_db,
)
blue_R_interp = interp1d(
blue_wave,
resolution,
fill_value=(resolution[0], resolution[-1]),
bounds_error=False,
)
# Resample to linear to better match what's expected for the detector
blue_ang_per_pix = (
blue_arm.resolution_element(
wave=blue_wave_lim, slit_width=effective_slit_width, units="angstrom"
)
/ blue_res_pix
)
blue_wave = get_wavelength_vector(
blue_wave_lim[0], blue_wave_lim[1], np.mean(blue_ang_per_pix), linear=True
)
blue_spec = blue_spec.resample(wave=blue_wave, log=False)
# Spectrograph arm efficiency (this doesn't include the telescope)
blue_arm_eff = blue_arm.efficiency(
blue_spec.wave, x=slit_x, y=slit_y, same_type=False
)
# System efficiency combines the spectrograph and the telescope
blue_thru = efficiency.SystemThroughput(
wave=blue_spec.wave,
spectrograph=blue_arm_eff,
telescope=blue_arm.telescope.throughput,
)
# Extraction: makes simple assumptions about the monochromatic
# image and extracts the flux within the aperture, assuming the
# flux from both the object and sky is uniformly distributed across
# all detector pixels (incorrect!).
# Extraction width in pixels
spatial_width = (
slit.length
* np.cos(np.radians(slit.rotation))
/ blue_arm.pixelscale
/ blue_binning[1]
)
blue_ext = extract.Extraction(
blue_arm.det, spatial_width=spatial_width, profile="uniform"
)
# Perform the observation
blue_obs = Observation(
blue_arm.telescope,
sky_spectrum,
slit,
exptime,
blue_arm.det,
system_throughput=blue_thru,
atmospheric_throughput=atmospheric_throughput,
airmass=airmass,
onsky_source_distribution=onsky,
source_spectrum=blue_spec,
extraction=blue_ext,
snr_units=snr_units,
)
# Construct the S/N spectrum
blue_snr = blue_obs.snr(sky_sub=True, sky_err=sky_err)
blue_R = blue_R_interp(blue_snr.wave)
# -------------------------------------------------------------------
# Red Arm
red_arm = TMTWFOSRed(
reflectivity=refl, grating=red_grat, cen_wave=red_wave, grating_angle=red_angle
)
# Pixels per resolution element
red_res_pix = (
red_arm.resolution_element(slit_width=effective_slit_width, units="pixels")
/ red_binning[0]
)
# Get the wavelength range for each arm
red_wave_lim = red_arm.wavelength_limits(slit_x, slit_y, add_grating_limits=True)
# Setup dummy wavelength vectors to get something appropriate for sampling
max_resolution = red_arm.resolution(
red_wave_lim[1], x=slit_x, slit_width=effective_slit_width
)
# Set the wavelength vector to allow for a regular, logarithmic binning
dw = 1 / red_res_pix / max_resolution / np.log(10)
red_wave = get_wavelength_vector(red_wave_lim[0], red_wave_lim[1], dw)
resolution = red_arm.resolution(red_wave, x=slit_x, slit_width=effective_slit_width)
red_spec = observed_spectrum(
obj_spectrum,
red_wave,
resolution,
mag=mag,
mag_band=mag_band,
mag_system=mag_system,
redshift=redshift,
emline_db=emline_db,
)
# Resample to linear to better match what's expected for the detector
red_ang_per_pix = (
red_arm.resolution_element(
wave=red_wave_lim, slit_width=effective_slit_width, units="angstrom"
)
/ red_res_pix
)
red_wave = get_wavelength_vector(
red_wave_lim[0], red_wave_lim[1], np.mean(red_ang_per_pix), linear=True
)
ree_spec = red_spec.resample(wave=red_wave, log=False)
# Spectrograph arm efficiency (this doesn't include the telescope)
red_arm_eff = red_arm.efficiency(red_spec.wave, x=slit_x, y=slit_y, same_type=False)
# System efficiency combines the spectrograph and the telescope
red_thru = efficiency.SystemThroughput(
wave=red_spec.wave,
spectrograph=red_arm_eff,
telescope=red_arm.telescope.throughput,
)
# Extraction: makes simple assumptions about the monochromatic
# image and extracts the flux within the aperture, assuming the
# flux from both the object and sky is uniformly distributed across
# all detector pixels (incorrect!).
# Extraction width in pixels
spatial_width = (
slit.length
* np.cos(np.radians(slit.rotation))
/ red_arm.pixelscale
/ red_binning[1]
)
red_ext = extract.Extraction(
red_arm.det, spatial_width=spatial_width, profile="uniform"
)
# Perform the observation
red_obs = Observation(
red_arm.telescope,
sky_spectrum,
slit,
exptime,
red_arm.det,
system_throughput=red_thru,
atmospheric_throughput=atmospheric_throughput,
airmass=airmass,
onsky_source_distribution=onsky,
source_spectrum=red_spec,
extraction=red_ext,
snr_units=snr_units,
)
# Construct the S/N spectrum
red_snr = red_obs.snr(sky_sub=True, sky_err=sky_err)
# Set the wavelength vector
dw = 1 / (5 if red_res_pix > 5 else red_res_pix) / max_resolution / np.log(10)
red_wave = get_wavelength_vector(red_wave_lim[0], red_wave_lim[1], dw)
resolution = red_arm.resolution(red_wave, x=slit_x, slit_width=effective_slit_width)
red_spec = observed_spectrum(
obj_spectrum,
red_wave,
resolution,
mag=mag,
mag_band=mag_band,
mag_system=mag_system,
redshift=redshift,
emline_db=emline_db,
)
red_R = interp1d(
red_wave,
resolution,
fill_value=(resolution[0], resolution[-1]),
bounds_error=False,
)(red_snr.wave)
# -------------------------------------------------------------------
# -------------------------------------------------------------------
snr_label = "S/N per {0}".format(
"R element" if snr_units == "resolution" else snr_units
)
if print_summary:
g = efficiency.FilterResponse(band="g")
r = efficiency.FilterResponse(band="r")
print("-" * 70)
print("{0:^70}".format("WFOS S/N Calculation (v0.1)"))
print("-" * 70)
print(
"Object g- and r-band AB magnitude: {0:.1f} {1:.1f}".format(
obj_spectrum.magnitude(band=g), obj_spectrum.magnitude(band=r)
)
)
print(
"Sky g- and r-band AB surface brightness: {0:.1f} {1:.1f}".format(
sky_spectrum.magnitude(band=g), sky_spectrum.magnitude(band=r)
)
)
print("Exposure time: {0:.1f} (s)".format(exptime))
if not uniform:
print("Aperture Loss: {0:.1f}%".format((1 - red_obs.aperture_factor) * 100))
if blue_snr.wave.max() > red_snr.wave.min():
bwave_overlap = blue_snr.wave[blue_snr.wave > red_snr.wave.min()]
rwave_overlap = red_snr.wave[red_snr.wave < blue_snr.wave.max()]
bsnr_overlap = blue_snr.flux[blue_snr.wave > red_snr.wave.min()]
rsnr_overlap = red_snr.flux[red_snr.wave < blue_snr.wave.max()]
diff = np.sqrt(
(bwave_overlap[:, np.newaxis] - rwave_overlap[np.newaxis, :]) ** 2
+ (bsnr_overlap[:, np.newaxis] - rsnr_overlap[np.newaxis, :]) ** 2
)
bintersect_ind, rintersect_ind = np.unravel_index(
np.argmin(diff, axis=None), diff.shape
)
i = 0
while bwave_overlap[bintersect_ind] > rwave_overlap[rintersect_ind + i]:
i += 1
bind = find_nearest_idx(blue_snr.wave, bwave_overlap[bintersect_ind])
rind = find_nearest_idx(red_snr.wave, rwave_overlap[rintersect_ind + i])
else:
bind = len(blue_snr.wave)
rind = 0
snr = np.vstack(
[
np.concatenate([blue_snr.wave[:bind], red_snr.wave[rind:]]),
np.concatenate([blue_snr.flux[:bind], red_snr.flux[rind:]]),
]
)
resolution = np.vstack(
[
np.concatenate([blue_snr.wave[:bind], red_snr.wave[rind:]]),
np.concatenate([blue_R[:bind], red_R[rind:]]),
]
)
if return_R:
return snr, resolution
else:
return snr
def calculate_muse_snr(
wave: np.ndarray,
flux: np.ndarray,
exptime: float,
nexp: int,
blueMUSE: bool = False,
airmass: float = 1.0,
seeing: float = 0.8,
moon: str = "d",
pointsource: bool = True,
nspatial: float = 3,
nspectral: float = 1,
):
"""
Calculates the S/N for VLT/MUSE and VLT/blueMUSE.
This code is adapted from https://git-cral.univ-lyon1.fr/johan.richard/BlueMUSE-ETC.
:param np.ndarray wave: Wavelength array corresponding to flux array
:param np.ndarray flux: Flux of source (ergs s^-1 Angstrom^-1 cm^-2)
:param float exptime: Exposure time in seconds
:param int nexp: Number of exposures
:param bool blueMUSE: If True, performs calculation for blueMUSE. If False, performs calculation for MUSE.
:param float airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param str moon: "d" for dark conditions. "g" for grey conditions.
:param bool pointsource: True if source is a point source. False if source is extended.
:param int nspatial: Binning in the spacial direction
:param int nspectral: Binning in the spectral direction
:return np.ndarray: S/N as a function of wavelength for VLT/(blue)MUSE
"""
MUSE_etc_dir = etc_file_dir.joinpath("MUSE")
muse_files = [
MUSE_etc_dir.joinpath("NewBlueMUSE_noatm.txt"),
MUSE_etc_dir.joinpath("radiance_airmass1.0_0.5moon.txt"),
MUSE_etc_dir.joinpath("radiance_airmass1.0_newmoon.txt"),
MUSE_etc_dir.joinpath("transmission_airmass1.txt"),
MUSE_etc_dir.joinpath("WFM_NONAO_N.dat.txt"),
]
if not all([file.exists() for file in muse_files]):
download_bluemuse_files()
ron = 3.0 # readout noise (e-)
dcurrent = 3.0 # dark current (e-/pixel/s)
nbiases = 11 # number of biases used in calibration
tarea = 485000.0 # squared centimeters
teldiam = 8.20 # diameter in meters
h = 6.626196e-27 # erg.s
if blueMUSE:
spaxel = 0.3 # spaxel scale (arcsecs)
fins = 0.2 # Instrument image quality (arcsecs)
lmin = 3500.0 # minimum wavelength
lmax = 6000.0 # maximum wavelength
lstep = 0.66 # spectral sampling (Angstroms)
lsf = lstep * 2.0 # in Angstroms
musetrans = np.loadtxt(MUSE_etc_dir.joinpath("NewBlueMUSE_noatm.txt"))
wmusetrans = musetrans[:, 0] * 10.0 # in Angstroms
valmusetrans = musetrans[:, 1]
else:
warn("We recommend chemicalc.s2n.Sig2NoiseMUSE to directly query the VLT/MUSE ETC.", UserWarning)
spaxel = 0.2 # spaxel scale (arcsecs)
fins = 0.15 # Instrument image quality (arcsecs)
lmin = 4750.0 # minimum wavelength
lmax = 9350.0 # maximum wavelength
lstep = 1.25 # spectral sampling (Angstroms)
lsf = 2.5 # in Angstroms
musetrans = np.loadtxt(MUSE_etc_dir.joinpath("WFM_NONAO_N.dat.txt"))
polysky = [
-6.32655161e-12,
1.94056813e-08,
-2.25416420e-05,
1.19349511e-02,
-1.50077035e00,
]
psky = np.polyval(
polysky, musetrans[:, 0]
) # sky transmission fit over MUSE wavelength
wmusetrans = musetrans[:, 0] * 10.0 # in Angstroms
valmusetrans = musetrans[:, 1] / psky
wrange = np.arange(lmin, lmax, lstep)
waveinput = wave
fluxinput = flux
flux = np.interp(wrange, waveinput, fluxinput)
pixelarea = nspatial * nspatial * spaxel * spaxel # in arcsec^2
npixels = nspatial * nspatial * nspectral
# Compute image quality as a function of seeing, airmass and wavelength
iq = np.zeros(wrange.shape)
frac = np.zeros(wrange.shape)
snratio = np.zeros(wrange.shape)
sky = np.zeros(wrange.shape)
skyelectrons = np.zeros(wrange.shape)
shape = np.array((101, 101)) # in 0.05 arcsec pixels
yy, xx = np.mgrid[: shape[0], : shape[1]]
def moffat(p, q):
xdiff = p - 50.0
ydiff = q - 50.0
return norm * (1 + (xdiff / a) ** 2 + (ydiff / a) ** 2) ** (-n)
posmin = 50 - int(nspatial * spaxel / 2.0 / 0.05) # in 0.05 arcsec pixels
posmax = posmin + int(nspatial * spaxel / 0.05) # in 0.05 arcsec pixels
# For point sources compute the fraction of the total flux of the source
if pointsource:
for k in range(wrange.shape[0]):
# All of this is based on ESO atmosphere turbulence model (ETC)
ftel = 0.0000212 * wrange[k] / teldiam # Diffraction limit in arcsec
r0 = (
0.100 * (seeing ** -1) * (wrange[k] / 5000.0) ** 1.2 * (airmass) ** -0.6
) # Fried parameter in meters
fkolb = -0.981644
if r0 < 5.4:
fatm = (
seeing
* (airmass ** 0.6)
* (wrange[k] / 5000.0) ** (-0.2)
* np.sqrt((1.0 + fkolb * 2.183 * (r0 / 46.0) ** 0.356))
)
else:
fatm = 0.0
# Full image quality FWHM in arcsecs
iq[k] = np.sqrt(fatm * fatm + ftel * ftel + fins * fins)
fwhm = iq[k] / 0.05 # FWHM of PSF in 0.05 arcsec pixels
n = 2.5
a = fwhm / (2 * np.sqrt(2 ** (1.0 / n) - 1.0))
norm = (n - 1) / (np.pi * a * a)
psf = moffat(yy, xx)
# fraction of spatial PSF within extraction aperture
frac[k] = np.sum(psf[posmin:posmax, posmin:posmax])
# sky spectrum (grey moon)
if moon == "g":
skyemtable = np.loadtxt(
MUSE_etc_dir.joinpath("radiance_airmass1.0_0.5moon.txt")
)
skyemw = skyemtable[:, 0] * 10.0 # in Angstroms
skyemflux = (
skyemtable[:, 1] * airmass
) # in photons / s / m2 / micron / arcsec2 approximated at given airmass
elif moon == "d": # dark conditions - no moon
skyemtable = np.loadtxt(
MUSE_etc_dir.joinpath("radiance_airmass1.0_newmoon.txt")
) # sky spectrum (grey) - 0.5 FLI
skyemw = skyemtable[:, 0] * 10.0 # in Angstroms
skyemflux = skyemtable[:, 1] * airmass # in photons / s / m2 / micron / arcsec2
else:
raise KeyError("moon must be either 'd' (dark) or 'g' (grey)")
# Interpolate sky spectrum at instrumental wavelengths
sky = np.interp(wrange, skyemw, skyemflux)
# loads sky transmission
atmtrans = np.loadtxt(MUSE_etc_dir.joinpath("transmission_airmass1.txt"))
atmtransw = atmtrans[:, 0] * 10.0 # In Angstroms
atmtransval = atmtrans[:, 1]
atm = np.interp(wrange, atmtransw, atmtransval)
# Interpolate transmission including sky transmission at corresponding airmass
# Note: ESO ETC includes a 60% margin for MUSE
transm = np.interp(wrange, wmusetrans, valmusetrans) * (atm ** (airmass))
transmnoatm = np.interp(wrange, wmusetrans, valmusetrans)
dit = exptime
ndit = nexp
for k in range(wrange.shape[0]):
kmin = 1 + np.max([-1, int(k - nspectral / 2)])
kmax = 1 + np.min([wrange.shape[0] - 1, int(k + nspectral / 2)])
if pointsource:
signal = (
(
np.sum(flux[kmin:kmax] * transm[kmin:kmax] * frac[kmin:kmax])
* lstep
/ (h * 3e18 / wrange[k])
)
* tarea
* dit
* ndit
) # in electrons
else: # extended source, flux is per arcsec2
signal = (
(
np.sum(flux[kmin:kmax] * transm[kmin:kmax])
* lstep
/ (h * 3e18 / wrange[k])
)
* tarea
* dit
* ndit
* pixelarea
) # in electrons
skysignal = (
(np.sum(sky[kmin:kmax] * transmnoatm[kmin:kmax]) * lstep / 10000.0)
* (tarea / 10000.0)
* dit
* ndit
* pixelarea
) # lstep converted in microns, tarea in m2 #in electrons
skyelectrons[k] = skysignal
noise = np.sqrt(
ron * ron * npixels * (1.0 + 1.0 / nbiases) * ndit
+ dcurrent * (dit * ndit / 3600.0) * npixels
+ signal
+ skysignal
) # in electrons
snratio[k] = signal / noise
return np.vstack([wrange, snratio])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.