content
stringlengths 5
1.05M
|
|---|
import requests
import aiohttp
class taromaru():
"""
If your looking for async, use `taromaruasync()`
Example:
```py
taromaru = taromaru("Apikey")
print(taromaru.image(taromaru))
```
Result:
```json
{"error": "false", "image": "url"}
```
Current api calls:
- image(type)
"""
def __init__(self, apikey: str):
self.apikey = apikey
def image(self, type):
r = requests.get(f'https://taromaruapi.cu.ma/api/{type}/', params={
"apikey": self.apikey
})
return r.json()
class taromaruasync():
"""
If you do not want async, use `taromaru()`
Example:
```py
taromaru = taromaru("Apikey")
print(taromaru.image(taromaru))
```
Result:
```json
{"error": "false", "image": "url"}
```
Current api calls:
- image(type)
"""
def __init__(self, apikey: str):
self.apikey = apikey
async def image(self, type):
async with aiohttp.ClientSession() as session:
async with session.get(f'https://taromaruapi.cu.ma/api/{type}/', params={
"apikey": self.apikey
}) as resp:
return await resp.json()
|
import numpy as np
from mms.utils.mxnet import image
from skimage import transform
import mxnet as mx
import cv2 as cv
from mxnet_model_service import MXNetModelService
# One time initialization of Haar Cascade Classifier to extract and crop out face
face_detector = cv.CascadeClassifier('haarcascade_frontalface.xml')
# Classifier parameter specifying how much the image size is reduced at each image scale
scale_factor = 1.3
# Classifier parameter how many neighbors each candidate rectangle should have to retain it
min_neighbors = 5
def crop_face(image):
"""Attempts to identify a face in the input image.
Parameters
----------
image : array representing a BGR image
Returns
-------
array
The cropped face, transformed to grayscale. If no face found returns None
"""
gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
face_roi_list = face_detector.detectMultiScale(gray_image, scale_factor, min_neighbors)
if len(face_roi_list) > 0:
(x,y,w,h) = face_roi_list[0]
return gray_image[y:y+h,x:x+w]
else:
return None
def compute_norm_matrix(width, height):
# normalization matrix used in image pre-processing
x = np.arange(width)
y = np.arange(height)
X, Y = np.meshgrid(x, y)
X = X.flatten()
Y = Y.flatten()
A = np.array([X * 0 + 1, X, Y]).T
A_pinv = np.linalg.pinv(A)
return A, A_pinv
def normalize(img, width, height):
A, A_pinv = compute_norm_matrix(width, height)
# compute image histogram
img_flat = img.flatten()
img_flat = img_flat.astype(int)
img_hist = np.bincount(img_flat, minlength=256)
# cumulative distribution function
cdf = img_hist.cumsum()
cdf = cdf * (2.0 / cdf[-1]) - 1.0 # normalize
# histogram equalization
img_eq = cdf[img_flat]
diff = img_eq - np.dot(A, np.dot(A_pinv, img_eq))
# after plane fitting, the mean of diff is already 0
std = np.sqrt(np.dot(diff, diff) / diff.size)
if std > 1e-6:
diff = diff / std
return diff.reshape(img.shape)
class FERService(MXNetModelService):
"""
Defines custom pre and post processing for the Facial Emotion Recognition model
"""
def preprocess(self, request):
"""
Pre-process requests by attempting to extract face image, and transforming to fit the model's input
Returns
-------
list of NDArray
Processed images in the model's expected input shape
"""
img_list = []
input_shape = self.signature['inputs'][0]['data_shape']
[height, width] = input_shape[2:]
param_name = self.signature['inputs'][0]['data_name']
# Iterate over all input images provided with the request, transform and append for inference
for idx, data in enumerate(request):
# Extract the input image
img = data.get(param_name)
if img is None:
img = data.get("body")
if img is None:
img = data.get("data")
if img is None or len(img) == 0:
self.error = "Empty image input"
return None
try:
img_arr = image.read(img).asnumpy()
except Exception as e:
logging.warning(e, exc_info=True)
self.error = "Corrupted image input"
return None
# Try to identify face to crop
face = crop_face(img_arr)
if face is not None:
face = transform.resize(face, (height, width))
# If no face identified - use the entire input image
else:
face = cv.cvtColor(img_arr, cv.COLOR_BGR2GRAY)
# Transform image into tensor of the required shape
face = np.resize(face, input_shape)
face = normalize(face, height, width)
face = mx.nd.array(face)
img_list.append(face)
return img_list
def postprocess(self, data):
"""
Post-process inference result to normalize probabilities and render with labels
Parameters
----------
data : list of NDArray
Inference output.
Returns
-------
list of object
list of outputs to be sent back.
"""
if self.error is not None:
return [self.error]
# Iterating over inference results to render the normalized probabilities
response = []
for inference_result in data:
softmax_result = inference_result.softmax().asnumpy()
for idx, label in enumerate(self.labels):
response.append({label: float(softmax_result[0][idx])})
return [response]
_service = FERService()
def handle(data, context):
"""
Entry point for the service, called by MMS for every incoming inference request
"""
# Lazy initialization, so that we preserve resources until model is actually needed
if not _service.initialized:
_service.initialize(context)
if data is None:
return None
return _service.handle(data, context)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'zoo_AboutWin.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AboutWin(object):
def setupUi(self, AboutWin):
AboutWin.setObjectName("AboutWin")
AboutWin.resize(600, 800)
self.gridLayout = QtWidgets.QGridLayout(AboutWin)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(AboutWin)
font = QtGui.QFont()
font.setFamily("Liberation Serif")
font.setPointSize(18)
font.setBold(False)
font.setWeight(50)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.textBrowser = QtWidgets.QTextBrowser(AboutWin)
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout.addWidget(self.textBrowser)
self.buttonBox = QtWidgets.QDialogButtonBox(AboutWin)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.gridLayout.addLayout(self.verticalLayout, 1, 1, 1, 1)
self.retranslateUi(AboutWin)
self.buttonBox.accepted.connect(AboutWin.accept)
self.buttonBox.rejected.connect(AboutWin.reject)
QtCore.QMetaObject.connectSlotsByName(AboutWin)
def retranslateUi(self, AboutWin):
_translate = QtCore.QCoreApplication.translate
AboutWin.setWindowTitle(_translate("AboutWin", "Dialog"))
self.label.setText(_translate("AboutWin", "Network Zoo v0.007"))
|
import lcd,image, time
bg = (236, 36, 36)
lcd.init(freq=15000000)
lcd.direction(lcd.YX_RLDU)
lcd.clear(lcd.RED)
time.sleep(1)
lcd.draw_string(120, 120, "hello maixpy", lcd.WHITE, lcd.RED)
time.sleep(2)
img = image.Image()
img.draw_string(60, 100, "hello maixpy", scale=2)
img.draw_rectangle((120, 120, 30, 30))
lcd.display(img)
lcd.init(type=1, freq=15000000)
# lcd.init(type=2, freq=20000000)
# lcd.init(type=1, width=320, height=240, invert=True, freq=20000000)
img = image.Image(size=(240,240))
img.draw_rectangle(0,0,30, 240, fill=True, color=(0xff, 0xff, 0xff))
img.draw_rectangle(30,0,30, 240, fill=True, color=(250, 232, 25))
img.draw_rectangle(60,0,30, 240, fill=True, color=(106, 198, 218))
img.draw_rectangle(90,0,30, 240, fill=True, color=(98, 177, 31))
img.draw_rectangle(120,0,30, 240, fill=True, color=(180, 82, 155))
img.draw_rectangle(150,0,30, 240, fill=True, color=(231, 47, 29))
img.draw_rectangle(180,0,30, 240, fill=True, color=(32, 77, 158))
img.draw_rectangle(210,0,30, 240, fill=True, color=(27, 28, 32))
lcd.display(img)
count = 500
while count > 0:
t = time.ticks_ms()
lcd.display(img)
# print(time.ticks_ms() - t)
count -= 1
|
from flask import Flask, render_template,request, redirect, send_file
from scrapper import get_jobs
app = Flask("SuperScrapper")
from exporter import save_to_file
db = {}
@app.route("/")
def home():
return render_template("home.html")
@app.route("/report")
def report():
word = request.args.get('word')
if word:
word = word.lower()
existingJob = db.get(word)
if existingJob:
jobs = existingJob
else:
jobs = get_jobs(word)
db[word]= jobs
else:
return redirect("/")
return render_template(
"report.html",
searchingBy=word,
resultsNumber = len(jobs),
jobs = jobs
)
@app.route("/export")
def export():
try:
word = request.args.get('word')
if not word:
raise Exception()
word = word.lower()
jobs = db.get(word)
if not jobs:
raise Exception()
save_to_file(jobs)
return send_file("jobs.csv")
except:
return redirect("/")
app.run(host= "0.0.0.0")
|
# Project Quex (http://quex.sourceforge.net); License: MIT;
# (C) 2005-2020 Frank-Rene Schaefer;
#_______________________________________________________________________________
#! /usr/bin/env python
import os
import sys
sys.path.insert(0, os.environ["QUEX_PATH"])
from quex.engine.state_machine.core import DFA
from quex.engine.state_machine.TEST_help.many_shapes import *
from quex.engine.analyzer.examine.acceptance import RecipeAcceptance
from quex.engine.analyzer.examine.core import Examiner
if "--hwut-info" in sys.argv:
print("Categorize into Linear and Mouth States")
print("CHOICES: %s;" % get_sm_shape_names())
sys.exit()
sm, state_n, pic = get_sm_shape_by_name(sys.argv[1])
examiner = Examiner(sm, RecipeAcceptance)
examiner.categorize()
if "pic" in sys.argv:
print(pic)
print("Linear States:", list(examiner.linear_db.keys()))
print("Mouth States:", list(examiner.mouth_db.keys()))
|
"""Main entry point for application"""
import os
import logging
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from fastapi import FastAPI, status, Request
from .api import auth_api, client_api, provider_api, blockchain_api
import uuid
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logging.info("Starting")
if 'ENVIRONMENT' not in os.environ or os.environ['ENVIRONMENT'] == 'development':
logging.info("Development CORS policy enabled")
middleware = [ Middleware(
CORSMiddleware,
allow_origins=['http://localhost:3000', 'http://localhost:*', 'https://app.dev.blockmedisolutions.com'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*']
)]
app = FastAPI(middleware=middleware)
@app.middleware("http")
async def add_correlation_header(request: Request, call_next):
correlation_id = str(uuid.uuid4())
response = await call_next(request)
response.headers["X-Correlation-Id"] = correlation_id
return response
app.include_router(auth_api)
app.include_router(client_api)
app.include_router(provider_api)
app.include_router(blockchain_api)
@app.get('/api/health', status_code=status.HTTP_200_OK)
def health():
"""Health check endpoint for use by ECS"""
return True
|
import os
import sys
from configparser import ConfigParser
import zipfile
import requests
from git import Repo
import subprocess
first = True
command = "req"
InstallMode = "Auto"
config = ConfigParser()
try:
os.environ["GIT_PYTHON_GIT_EXECUTABLE"] = os.getcwd() + "/Git/cmd"
config.read("config.ini")
vs_url = str(config["Settings"]["vs_link"])
update = config["Settings"]["update"]
git_url = config["Settings"]["git_link"]
cmake_url = config["Settings"]["cmake_link"]
skia_url = config["Settings"]["skia_link"]
ninja_url = config["Settings"]["ninja_link"]
n_p = config["Settings"]["ninja_path"]
aseprite_path = config["Settings"]["aseprite_path"]
except Exception as e:
print("Config File Is Corrupted or does not Exist!" + e)
if update == "True":
if os.path.isdir("Git"):
os.remove("Git")
r_vs = requests.get(vs_url)
r_git = requests.get(git_url)
r_cmake = requests.get(cmake_url)
r_skia = requests.get(skia_url)
r_ninja = requests.get(ninja_url)
os.mkdir("Git")
open("Git.zip", "wb").write(r_git.content)
open("vs.exe", "wb").write(r_vs.content)
open("cmake.msi", "wb").write(r_cmake.content)
open("skia.zip", "wb").write(r_skia.content)
open("ninja.zip", "wb").write(r_ninja.content)
with zipfile.ZipFile("Git.zip", "r") as zf:
zf.extractall("Git")
os.remove("Git.zip")
os.system("cmake.msi")
os.remove("cmake.msi")
os.system("vs.exe")
os.remove("vs.exe")
config.set("Settings", "update", "False")
with open("config.ini", "w") as configfile:
config.write(configfile)
def change_install_mode(mode):
InstallMode = mode
print("Success! Install-Mode is now: " + InstallMode)
def Install():
Repo.clone_from("https://github.com/aseprite/aseprite.git", aseprite_path + "aseprite", recursive = True)
os.mkdir(aseprite_path + "deps")
os.mkdir(aseprite_path + "aseprite/build")
skia_path = "skia.zip"
ninja_path = "ninja.zip"
try:
with zipfile.ZipFile(skia_path, "r") as zf:
zf.extractall(aseprite_path + "deps/skia")
with zipfile.ZipFile(ninja_path, "r") as zf:
zf.extractall(n_p)
except Exception as e:
print(e)
if os.path.isdir("C:/Program Files/Microsoft Visual Studio/2022/Community/Common7/Tools"):
_extracted_from_Install_21(
'call "C:/Program Files/Microsoft Visual Studio/2022/Community/Common7/Tools/VsDevCmd.bat" -arch=x64'
)
elif os.path.isdir("C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/Common7/Tools"):
_extracted_from_Install_21(
'call "C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/Common7/Tools/VsDevCmd.bat" -arch=x64'
)
else:
print("No Visual Studio installation found", "No Visual Studio installation found. Please refer to https://github.com/TheLiteCrafter/AsepriteTool")
os.system('shortcut /a:c /f:"C:/ProgramData/Microsoft/Windows/Start Menu/Programs/Aseprite.lnk" /t:"' + aseprite_path + 'build/bin/aseprite.exe"')
print("Done! Finisched Compiling Aseprite! It can be found by searching for aseprite in the start menu")
os.remove("cmd.bat")
# TODO Rename this here and in `Install`
def _extracted_from_Install_21(arg0):
with open("cmd.bat", "w") as f:
f.write(arg0 + "\n")
f.write("cd " + aseprite_path + "aseprite" + "\n")
f.write("mkdir build" + "\n")
f.write("cd build" + "\n")
f.write("cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DLAF_BACKEND=skia -DSKIA_DIR=" + aseprite_path + "deps/skia" + " -DSKIA_LIBRARY_DIR=" + aseprite_path + "deps/skia/out/Release-x64" + " -DSKIA_LIBRARY=" + aseprite_path + "deps/skia/out/Release-x64/skia.lib" + " -G Ninja .." + "\n")
f.write("ninja aseprite")
subprocess.call(["cmd.bat"])
def Update():
repo = Repo(aseprite_path + "aseprite")
o = repo.remotes.origin
o.pull()
for submodule in repo.submodules:
submodule.update(init=True, recursive=True)
if os.path.isdir("C:/Program Files/Microsoft Visual Studio/2022/Community/Common7/Tools"):
_extracted_from_Update_8(
'call "C:/Program Files/Microsoft Visual Studio/2022/Community/Common7/Tools/VsDevCmd.bat" -arch=x64'
)
elif os.path.isdir("C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/Common7/Tools"):
_extracted_from_Update_8(
'call "C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/Common7/Tools/VsDevCmd.bat" -arch=x64'
)
else:
print("No Visual Studio installation found", "No Visual Studio installation found. Please refer to https://github.com/TheLiteCrafter/AsepriteTool")
os.system('shortcut /a:c /f:"C:/ProgramData/Microsoft/Windows/Start Menu/Programs/Aseprite.lnk" /t:"' + aseprite_path + 'build/bin/aseprite.exe"')
print("Done! Finisched Compiling Aseprite! It can be found by searching for aseprite in the start menu")
os.remove("cmd.bat")
# TODO Rename this here and in `Update`
def _extracted_from_Update_8(arg0):
with open("cmd.bat", "w") as f:
f.write(arg0 + "\n")
f.write("cd " + aseprite_path + "aseprite/build" + "\n")
f.write("cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DLAF_BACKEND=skia -DSKIA_DIR=C:\deps\skia -DSKIA_LIBRARY_DIR=C:\deps\skia\out\Release-x64 -DSKIA_LIBRARY=C:\deps\skia\out\Release-x64\skia.lib -G Ninja .." + "\n")
#f.write("cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DLAF_BACKEND=skia -DSKIA_DIR=" + aseprite_path + "deps/skia" + " -DSKIA_LIBRARY_DIR=" + aseprite_path + "deps/skia/out/Release-x64" + " -DSKIA_LIBRARY=" + aseprite_path + "deps/skia/out/Release-x64/skia.lib" + " -G Ninja .." + "\n")
f.write("ninja aseprite")
subprocess.call(["cmd.bat"])
while 1:
if first == False:
command = input("Please Enter a Command: ")
command = str(command).lower()
if command == "help":
print("List of avilable commands:")
print("help - Shows a List of all avilable commands")
print("start - Starts the install/update process")
print("exit - Exists the programm")
print("req - Shows all requierments")
print("InstallMode Auto/Update/Install - Changes the Installation-Mode")
elif command == "installmode auto":
change_install_mode("Auto")
elif command == "installmode install":
change_install_mode("Install")
elif command == "installmode update":
change_install_mode("Update")
elif command == "exit":
sys.exit()
elif command == "start":
if InstallMode == "Auto":
if os.path.isdir(aseprite_path + "aseprite") and os.path.isdir(aseprite_path + "deps"):
print("Update Mode detected.")
Update()
else:
print("Install mode detected.")
Install()
elif InstallMode == "Install":
Install()
elif InstallMode == "Update":
Update()
elif command == "req":
print("Requierments: ")
print("")
print("Visual Studio and Cmake will automatically be downloaded. On Cmake dont forget to select add to Path for all Users, and on Visual Studio the Desktop Development with C++ and under Individual Items (Check on Aseprite Guide: https://github.com/aseprite/aseprite/blob/main/INSTALL.md#windows-dependencies)")
first = False
|
# -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1426544011.476607
_enable_loop = True
_template_filename = '/Users/Nate/chf_dmp/catalog/scripts/index.jsm'
_template_uri = 'index.jsm'
_source_encoding = 'ascii'
import os, os.path, re
_exports = []
def render_body(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
request = context.get('request', UNDEFINED)
__M_writer = context.writer()
__M_writer(
"$(function() {\n // update the time every 1 seconds\n window.setInterval(function() {\n $('.browser-time').text('The current browser time is ' + new Date() + '.');\n }, ")
__M_writer(str(request.urlparams[1] or 1000))
__M_writer(
");\n\n // update button\n $('#server-time-button').click(function() {\n $('.server-time').load('/catalog/index.get_time');\n });\n});\n")
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"filename": "/Users/Nate/chf_dmp/catalog/scripts/index.jsm", "source_encoding": "ascii", "uri": "index.jsm", "line_map": {"16": 0, "24": 5, "30": 24, "22": 1, "23": 5}}
__M_END_METADATA
"""
|
"""常用网络函数库封装"""
import json
import uuid
import http
import gzip
import zlib
import os.path
import urllib.parse as urlparse
import urllib.request as request
from collections import namedtuple
import xml.etree.cElementTree as et
from typing import Tuple, Dict, Optional, Union
from . import file
from ..algorithm import Tree
CHROME_HEADER = {
"Accept-Encoding":
"deflate, gzip",
"Accept-Language":
"zh-CN,zh;q=0.8,en;q=0.6",
"Cache-Control":
"no-cache",
"Connection":
"Keep-Alive",
"Pragma":
"no-cache",
"Accept": ("text/html,application/xhtml+xml"
",application/xml;q=0.9,image/webp,*/*;q=0.8"),
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/76.0.3809.100 Safari/537.36"
)
} # 默认的请求Header
_DEFAULT_XML_ENCODING = "utf8" # 默认 XML 编码
_DEFAULT_XML_ROOT_NAME = "xml" # 默认 XML ROOT 节点名称
_DECOMPRESS = "Content-Encoding" # 获取压缩方式键
_CHARSET = "Content-Type" # 获取 Charset 键
_DEFAULT_CHARSET = "utf-8" # 默认网页编码
# 仅含有字节流时的 HTTP-Content 头
def _TEXT_HEADER():
return {'Content-Type': "text/plain"}
# 仅含有表单格式的 HTTP-Content 头
def _PARAMS_HEADER():
return {'Content-Type': 'application/x-www-form-urlencoded'}
# 文件数据混编时的 HTTP-Content 头
def _FILE_HEADER(boundry):
return {'Content-Type': 'multipart/form-data; boundary=' + boundry}
# 生成 Web GET/POST 请求之后获取的信息类
WebResult: object = namedtuple("WebResult", ("data", "response"))
class WebTools:
"""一些网络交互过程需要的工具"""
@staticmethod
def urlencode(raw_url: str) -> str:
"""将网址按照 Unicode 编码"""
encoded_url = request.quote(raw_url)
return encoded_url
@staticmethod
def urldecode(encoded_url: str) -> str:
"""将网址按照 Unicode 解码"""
raw_url = request.unquote(encoded_url)
return raw_url
@staticmethod
def JSONparser(json_: str) -> dict:
"""根据JSON字符创创建字典"""
data = json.loads(json_)
return data
@staticmethod
def JSONcreater(data: dict) -> str:
"""根据字典创建JSON字符串"""
# ensure_ascii = False - 不检查 ascii 转换错误
json_string = json.dumps(data, ensure_ascii=False)
return json_string
@staticmethod
def XMLparser(xmlstr: str) -> dict:
"""根据XML字符创创建字典"""
# 创建 xmlTree 对象解析节点信息
xmltree = et.fromstring(xmlstr)
# 将 xmlTree 对象转换为内部 tree 对象
# 这里仅仅记载 xml 中 text 的内容
# 标签中 attribute 的部分将被丢弃
root = Tree.fromxml(xmltree, True, False)
itree = Tree(root)
return itree.todict()
@staticmethod
def XMLcreater(data: dict, encoding: str = _DEFAULT_XML_ENCODING) -> str:
"""
根据传入的字典创建XML字符串:
encoding 参数传入 False 时无 <xml version="1.0" ?>
"""
# 创建 tree.Tree 对象
root = Tree.fromdict(data)
itree = Tree(root)
# 创建 XMLTree 对象
xmltree = itree.toxml()
xmlstr = et.tostring(xmltree, encoding=encoding, method="xml")
return xmlstr.decode()
@staticmethod
def _encode_files(boundry: str, files: Dict[str, str]) -> bytes:
"""
对传入的文件进行编码:
按照 RFC1867 对传入的文件列表进行编码
会首先尝试推测文件的 mimetype
将信息保存后会按照字节流的方式将文件编码至 Bytes 对象
*boundry: str: 编码边界
*files: Dict[str: 文件名, str: 文件绝对路径]
"""
# 文件名模板
_BOUNDRY = ("--" + boundry).encode()
_DISPOSITION = "Content-Disposition: form-data; "
_FILE_FORMAT = "name='{0}'; filename='{1}'"
_CONTENT_TYPE = "\nContent-Type: {type}"
# 生成缓冲区保存编码的文件
buffer = list()
for filename, location in files.items():
# 添加文件的基本信息
rawname = os.path.basename(location)
mimetype = file.mimetype(location)
buffer.append(_DISPOSITION.encode())
format_ = _FILE_FORMAT.format(rawname, filename)
buffer.append(format_.encode())
type_ = _CONTENT_TYPE.format(type=mimetype)
buffer.append(type_.encode())
# 添加文件内容
handler = open(location, "rb")
buffer.append(b"\r\n" + handler.read())
buffer.append(_BOUNDRY)
handler.close()
# 最后添加的一个 boundry 需要补充两个横线 --
buffer.append("--".encode())
return b''.join(buffer)
@staticmethod
def _encode_paras_str(data: str) -> bytes:
"""
对传入的参数列表进行编码:
按照 RFC2616 标准对传入的参数字典进行编码
判断传入的 data 判断是字符串/字典:
当为字符串时直接 encode 添加
当为字典时使用 urlencode 函数处理 encode 添加
"""
return data.encode()
@staticmethod
def _encode_paras_dict(params: dict) -> bytes:
"""
对传入的参数列表进行编码:
按照 RFC2616 标准对传入的参数字典进行编码
判断传入的 data 判断是字符串/字典:
当为字符串时直接 encode 添加
当为字典时使用 urlencode 函数处理 encode 添加
"""
urlencoded: str = urlparse.urlencode(params)
return urlencoded.encode()
@staticmethod
def encode(payload: Union[dict, str] = None,
files: Optional[dict] = None) -> Tuple[bytes, dict]:
"""对传入的信息进行符合 RFC HTTP/1.1 通讯格式要求的编码"""
# 生成数据分割 boundry 并对 Header 头进行修改
buffer = bytes()
boundry = uuid.uuid4().hex
additional_header: dict = dict()
# 检查数据格式修改 Content 头
if isinstance(payload, dict):
buffer += WebTools._encode_paras_dict(payload)
additional_header = _PARAMS_HEADER()
if isinstance(payload, str):
buffer += WebTools._encode_paras_str(payload)
additional_header = _TEXT_HEADER()
# 检查是否有文件要发送
if not files is None:
buffer += WebTools._encode_files(bool, files)
additional_header = _FILE_HEADER(boundry)
# 返回所有数据流和 HTTP 请求头
return buffer, additional_header
@staticmethod
def HTTPopener(headers: dict) -> request.OpenerDirector:
"""构建一个支持 cookie 的 HTTPopener - 基于urllib.request"""
# 添加 cookie 支持
cookie_jar = http.cookiejar.CookieJar()
cookie_support = request.HTTPCookieProcessor(cookie_jar)
# 构建 opener
opener = request.build_opener(cookie_support, request.HTTPHandler)
request.install_opener(opener)
opener.addheaders = list(headers.items())
return opener
@staticmethod
def HTTPConnector(target: str, key: Optional[str] = None,
cert: Optional[str] = None) -> http.client.HTTPConnection:
"""
一个 http/https 链接器
*target 若为 https 开头则进行 HTTPS 链接
*cert 若传入则为需要进行连接的证书
"""
# 获取连接的 host 主机名
address: tuple = urlparse.urlparse(target)
host: str = address.netloc
# 分情况建立连接
if target.startswith("https://"):
connection = http.client.HTTPSConnection(
host, key_file=key, cert_file=cert)
else:
connection = http.client.HTTPConnection(host)
return connection
@staticmethod
def _url_combiner(path: str, params: Optional[dict] = None) -> str:
"""url 连接器 - 将传入的 host 和参数合成为 url"""
# 将连接分解为 host, path, ...
infomations = urlparse.urlparse(path)
path = infomations.path
# 如果 params 为 None 则不进行合并
if params is None:
return path
# 将给定的参数全部合并
query: str = infomations.query + "&" + urlparse.urlencode(params)
# 连接 host 和参数
url: str = urlparse.urljoin(path, '?' + query)
return url
@staticmethod
def _get_charset(response: http.client.HTTPResponse) -> str:
"""尝试从回显 HTTP 头中获得网页编码信息"""
# Content-Type 格式 "text/html; charset=GB2312"
content = response.headers.get(_CHARSET)
# 当 headers-content 中没有提供编码时直接返回默认的 charset
if content is None:
return _DEFAULT_CHARSET
# 当提供了 charset 信息时尝试寻找
try:
info = content.split(';')[1]
info = info.strip(' ')
charset = info.split('=')[1]
except IndexError:
# 获取不到有效编码信息时
return _DEFAULT_CHARSET
else:
return charset
return _DEFAULT_CHARSET
@staticmethod
def _get_compress(response: http.client.HTTPResponse) -> str:
"""尝试获取网页数据的压缩类型"""
return response.headers.get(_DECOMPRESS, '')
@staticmethod
def decompress(rawdata: bytes,
response: http.client.HTTPResponse) -> str:
"""尝试解压缩 bytes 类型的 http 数据包"""
# 获取压缩和编码类型
compress_method = WebTools._get_compress(response)
charset = WebTools._get_charset(response)
# 进行解压
decompressed = rawdata
if compress_method == "gzip":
decompressed = gzip.decompress(rawdata)
if compress_method == "deflate":
decompressed = zlib.decompress(rawdata, -zlib.MAX_WBITS)
# 进行解编码
return decompressed.decode(charset, "ignore")
@staticmethod
def get(target: str, params: Optional[dict] = None,
cert: Optional[Tuple[str, str]] = None,
header: Optional[dict] = None)\
-> Tuple[str, http.client.HTTPResponse]:
"""
以 Get 方式请求一个 HTTP 目标
*params 为链接中需要携带的参数
*cert 为当进行 https 链接时需要的证书:
首先是 key 文件, 之后是 cert 文件
*header 为需要指定的 UA
"""
# 判断是否没有传入 header
if header is None:
header = dict()
# 拼接链接 - 获取 url
url = WebTools._url_combiner(target, params)
if cert:
connection = WebTools.HTTPConnector(target, *cert)
else:
connection = WebTools.HTTPConnector(target)
connection.request("GET", url, headers=header)
# 获取响应
response = connection.getresponse()
rawdata = response.read()
data = WebTools.decompress(rawdata, response)
response.close()
return WebResult(data=data, response=response)
@staticmethod
def post(target: str, params: Optional[dict],
forms: Union[dict, str] = None,
files: Optional[dict] = None,
cert: Optional[Tuple[str, str]] = None,
header: Optional[dict] = None) \
-> Tuple[str, http.client.HTTPResponse]:
"""
以 POST 方式请求一个 HTTP 目标
*params 为链接中需要携带的参数
*forms 为需要携带的 form 参数/字符串参数
*files 为需要携带的文件参数
*cert 为当进行 https 链接时需要的证书:
首先是 key 文件, 之后是 cert 文件
*header 为需要指定的 UA
"""
# 判断是否没有传入 header
if header is None:
header = dict()
# 获取请求需要的 url, payload, 额外 header
url = WebTools._url_combiner(target, params)
payload, optional = WebTools.encode(forms, files)
if cert:
connection = WebTools.HTTPConnector(target, *cert)
else:
connection = WebTools.HTTPConnector(target)
# 进行请求
header.update(optional)
connection.request("POST", url, payload, header)
# 获取响应
response = connection.getresponse()
rawdata = response.read()
data = WebTools.decompress(rawdata, response)
response.close()
return WebResult(data=data, response=response)
|
from random import choices
import string
from os.path import isfile
from celery import shared_task
from django.core.files import File
from groups.libs.latex import Renderer, DocumentPresenter
from groups.models import Document
@shared_task
def render_document(document_id):
document = Document.objects.get(id=document_id)
latex = Renderer()
latex.render(DocumentPresenter.present(document))
name = ''.join(choices(string.ascii_letters + string.digits, k=12))
if isfile(latex.output_filename()):
document.rendered_file.save(name + ".pdf", File(latex.output_file()))
document.save()
else:
raise Exception("Can't render latex")
latex.clean()
|
"""Constant values needed for string parsing"""
intervals = {
"intervalToTuple": {
"1": (1,0),
"2": (2,2),
"b3": (3,3),
"3": (3,4),
"4": (4,5),
"b5": (5,6),
"5": (5,7),
"#5": (5,8),
"6": (6,9),
"bb7": (7,9),
"b7": (7,10),
"7": (7,11),
"9": (9,14),
"b9": (9,13),
"#9": (9,15),
"11": (11,17),
"#11": (11,18),
"b13": (13,20),
"13": (13,21),
"s5": (5,8),
"s9": (9,15),
"s11": (11,18)
},
"shorthandToIntervals": {
"maj": ("1", "3", "5"),
"min": ("1", "b3", "5"),
"dim": ("1", "b3", "b5"),
"aug": ("1", "3", "#5"),
"maj7": ("1", "3", "5", "7"),
"min7": ("1", "b3", "5", "b7"),
"7": ("1", "3", "5", "b7"),
"dim7": ("1", "b3", "b5", "bb7"),
"hdim7": ("1", "b3", "b5", "b7"),
"minmaj7": ("1", "b3", "5", "7"),
"maj6": ("1", "3", "5", "6"),
"min6": ("1", "b3", "5", "6"),
"9": ("1", "3", "5", "b7", "9"),
"maj9": ("1", "3", "5", "7", "9"),
"min9": ("1", "b3", "5", "b7", "9"),
"9sus": ("1", "4", "5", "b7", "9"),
"sus9": ("1", "4", "5", "b7", "9"),
"11": ("1", "3", "5", "b7", "11"),
"13": ("1", "3", "5", "b7", "13"),
"sus2": ("1", "2", "5"),
"sus4": ("1", "4", "5"),
# Unofficial aliases, comment out for stricter interpretation
"6": ("1", "3", "5", "6"),
"hdim": ("1", "b3", "b5", "b7")
}
}
notes = {
"naturalToStep": {
"C": 0,
"D": 1,
"E": 2,
"F": 3,
"G": 4,
"A": 5,
"B": 6,
},
"naturalToHalfStep": {
"Cbb": 10,
"Cb": 11,
"C": 0,
"Dbb": 0,
"Db": 1,
"D": 2,
"Ebb": 2,
"Eb": 3,
"E": 4,
"Fb": 4,
"F": 5,
"Gb": 6,
"G": 7,
"Abb": 7,
"Ab": 8,
"A": 9,
"Bbb": 9,
"Bb": 10,
"B": 11,
"C#": 1,
"C##": 2,
"D#": 3,
"E#": 5,
"F#": 6,
"G#": 8,
"A#": 10,
"B#": 12,
},
"stepToNatural": {
"0": "C",
"1": "D",
"2": "E",
"3": "F",
"4": "G",
"5": "A",
"6": "B",
},
"natural": [
"A",
"B",
"C",
"D",
"E",
"F",
"G"
],
"flat": [
"C",
"Db",
"D",
"Eb",
"E",
"F",
"Gb",
"G",
"Ab",
"A",
"Bb",
"B"
],
"sharp": [
"C",
"C#",
"D",
"D#",
"E",
"F",
"F#",
"G",
"G#",
"A",
"A#",
"B"
]
}
|
import supybot.utils as utils
import re
from supybot.commands import *
import sys
import supybot.plugins as plugins
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('RandKicks')
except ImportError:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x: x
def isChan(chan, checkprefix):
if not chan:
return False
elif chan.startswith("#"):
return True
elif checkprefix and len(chan) >= 2 and not chan[0].isalnum() and chan[1] == "#":
return True
else:
return False
class RandKicks(callbacks.Plugin):
"""RandKicks"""
threaded = True
def doPrivmsg(self,irc,msg):
if not isChan(msg.args[0], True):
return
FunDet = self.registryValue('funDet',msg.args[0])
kickstr = "KICK"
funregexes = []
funkickmsg = []
funregexes.append('something')
funkickmsg.append("something else")
if FunDet:
for i in range(0, len(funregexes)):
if re.match(funregexes[i],' '.join(msg.args[1:]), re.IGNORECASE):
irc.queueMsg(ircmsgs.IrcMsg(prefix='', command=kickstr,
args=(msg.args[0], msg.nick, funkickmsg[i]), msg=None))
return
Class = RandKicks
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
"""Message handler functions."""
from stickerfinder.models import (
Change,
Sticker,
StickerSet,
)
from stickerfinder.session import message_wrapper
from stickerfinder.enum import TagMode
from stickerfinder.logic.tag import (
handle_next,
tag_sticker,
current_sticker_tags_message,
handle_request_reply,
)
from stickerfinder.telegram.keyboard import (
get_tag_this_set_keyboard,
get_nsfw_ban_keyboard,
)
@message_wrapper()
def handle_private_text(bot, update, session, chat, user):
"""Read all messages and handle the tagging of stickers."""
# Handle the name of a sticker set to initialize full sticker set tagging
if chat.tag_mode in [TagMode.sticker_set.value, TagMode.random.value]:
# Try to tag the sticker. Return early if it didn't work.
tag_sticker(
session,
update.message.text,
chat.current_sticker,
user,
tg_chat=update.message.chat,
chat=chat,
message_id=update.message.message_id,
)
session.commit()
handle_next(session, bot, chat, update.message.chat, user)
elif chat.tag_mode == TagMode.single_sticker.value:
tag_sticker(
session,
update.message.text,
chat.current_sticker,
user,
tg_chat=update.message.chat,
chat=chat,
message_id=update.message.message_id,
)
chat.cancel(bot)
return "Sticker tags adjusted."
@message_wrapper()
def handle_private_sticker(bot, update, session, chat, user):
"""Read all stickers.
- Handle initial sticker addition.
- Handle sticker tagging
"""
incoming_sticker = update.message.sticker
set_name = incoming_sticker.set_name
# The sticker is no longer associated to a sticker set
if set_name is None:
return "This sticker doesn't belong to a sticker set."
sticker_set = StickerSet.get_or_create(session, set_name, chat, user)
if sticker_set.reviewed is False:
sticker_set.furry = user.furry
return f"Set {sticker_set.name} is going to be added soon ☺️"
# Notify if they are still in a tagging process
if chat.tag_mode in [TagMode.sticker_set.value, TagMode.random.value]:
chat.cancel(bot)
pass
sticker = session.query(Sticker).get(incoming_sticker.file_unique_id)
if sticker is None:
sticker_set.scan_scheduled = True
return f"I don't know this specific sticker yet. Please wait a few minutes and try again ☺️"
chat.current_sticker = sticker
chat.tag_mode = TagMode.single_sticker.value
sticker_tags_message = current_sticker_tags_message(sticker, user)
# Send inline keyboard to allow fast tagging of the sticker's set
keyboard = get_tag_this_set_keyboard(sticker.sticker_set, user)
update.message.chat.send_message(
f"Just send the new tags for this sticker.\n{sticker_tags_message}",
reply_markup=keyboard,
)
@message_wrapper(send_message=False)
def handle_group_sticker(bot, update, session, chat, user):
"""Read all stickers.
- Handle initial sticker addition.
- Detect whether a sticker set is used in a chat or not.
"""
tg_sticker = update.message.sticker
set_name = tg_sticker.set_name
# The sticker is no longer associated to a sticker set
if set_name is None:
return
# Handle maintenance and newsfeed sticker sets
if chat.is_maintenance or chat.is_newsfeed:
sticker_set = StickerSet.get_or_create(session, set_name, chat, user)
if not sticker_set.complete:
return "Sticker set is not yet reviewed"
message = f'StickerSet "{sticker_set.title}" ({sticker_set.name})'
keyboard = get_nsfw_ban_keyboard(sticker_set)
update.message.chat.send_message(message, reply_markup=keyboard)
return
# Handle replies to #request messages and tag those stickers with the request tags
handle_request_reply(tg_sticker.file_unique_id, update, session, chat, user)
# Right now we only want to add animated stickers
if not tg_sticker.is_animated:
return
sticker_set = StickerSet.get_or_create(session, set_name, chat, user)
if sticker_set not in chat.sticker_sets:
chat.sticker_sets.append(sticker_set)
# Stickerset is not yet completed
if not sticker_set.complete:
return
# Set the send sticker to the current sticker for tagging or report.
sticker = session.query(Sticker).get(tg_sticker.file_unique_id)
if sticker is None:
sticker_set.scan_scheduled = True
return
chat.current_sticker = sticker
return
@message_wrapper()
def handle_edited_messages(bot, update, session, chat, user):
"""Read edited messages and check whether the user corrected some tags."""
message = update.edited_message
# Try to find a Change with this message
change = (
session.query(Change)
.filter(Change.chat == chat)
.filter(Change.message_id == message.message_id)
.order_by(Change.created_at.desc())
.limit(1)
.one_or_none()
)
if change is None:
return
tag_sticker(
session,
message.text,
change.sticker,
user,
tg_chat=message.chat,
chat=chat,
message_id=message.message_id,
single_sticker=True,
)
return "Sticker tags edited."
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def diameterOfBinaryTree(self, root: TreeNode) -> int:
self.result = 0
def depth(root):
if root == None: return 0
left = depth(root.left)
right = depth(root.right)
self.result = max(self.result, left + right)
return max(left, right) + 1
depth(root)
return self.result
|
from github import Github as PyGithub
from github.Organization import Organization as PyGithubOrganization
from github_team_organizer.classes.github import GitHubWrapper
class Organization:
__instance = None
def __new__(cls, *args, **kwargs):
if Organization.__instance is None:
Organization.__instance = super().__new__(cls, *args, **kwargs)
return Organization.__instance
def __init__(self, name: str, github: PyGithub = None):
if not github:
github = GitHubWrapper()
self.github_organization: PyGithubOrganization = github.get_organization(name)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A place to store information about events, such as format strings, etc."""
# TODO: move this class to events/definitions.py or equiv.
class EventTimestamp(object):
"""Class to manage event data."""
# The timestamp_desc values.
ACCESS_TIME = u'Last Access Time'
CHANGE_TIME = u'Metadata Modification Time'
CREATION_TIME = u'Creation Time'
MODIFICATION_TIME = u'Content Modification Time'
ENTRY_MODIFICATION_TIME = u'Metadata Modification Time'
# Added time and Creation time are considered the same.
ADDED_TIME = u'Creation Time'
# Written time and Modification time are considered the same.
WRITTEN_TIME = u'Content Modification Time'
EXIT_TIME = u'Exit Time'
LAST_RUNTIME = u'Last Time Executed'
DELETED_TIME = u'Content Deletion Time'
FILE_DOWNLOADED = u'File Downloaded'
PAGE_VISITED = u'Page Visited'
# TODO: change page visited into last visited time.
LAST_VISITED_TIME = u'Last Visited Time'
LAST_CHECKED_TIME = u'Last Checked Time'
EXPIRATION_TIME = u'Expiration Time'
START_TIME = u'Start Time'
END_TIME = u'End Time'
FIRST_CONNECTED = u'First Connection Time'
LAST_CONNECTED = u'Last Connection Time'
LAST_PRINTED = u'Last Printed Time'
LAST_RESUME_TIME = u'Last Resume Time'
# Note that the unknown time is used for date and time values
# of which the exact meaning is unknown and being researched.
# For most cases do not use this timestamp description.
UNKNOWN = u'Unknown Time'
|
from bert-serving.client import BertClient
import numpy as np
bc = BertClient()
strings = ['hej hopp', 'hej hopp']
enc = bc.encode(strings)
def cosine_sim(s1, s2):
return np.dot(s1[0,:],s2[0,:])/(np.linalg.norm(s1)*np.linalg.norm(s2)+1e-9)
|
from crcapp.models import Employee,Store
from django.contrib.auth.hashers import make_password, check_password
from django.core.exceptions import ValidationError
from django.contrib.sessions.models import Session
from django.utils import timezone
# Create functions related to staff
class StaffController:
# Developer: Sam
# Creating a new staff account. If a username exist in the employee
# database, do not create the account. else, create the account for
# the employee
def createStaff(request):
staffObj = Employee.objects.raw("SELECT employeeID FROM `crcapp_employee` ORDER BY employeeID DESC LIMIT 1")[0]
empID = staffObj.employeeID
empID = empID[1:]
empID = int(empID)+1
emploID = str(empID).zfill(5)
try:
employeeID_ = "E"+emploID
firstName_ = request.POST.get("firstName")
lastName_ = request.POST.get("lastName")
streetAddress_ = request.POST.get("streetAddress")
cityAddress_ = request.POST.get("city")
postCodeAddress_ = request.POST.get("postalCode")
stateAddress_ = request.POST.get("state")
DOB_ = request.POST.get("dob")
TFN_ = request.POST.get("TFN")
phoneNumber_ = request.POST.get("phoneNumber")
email_ = request.POST.get("email")
userName_ = "NULL"
password_ = "NULL"
userType_ = request.POST.get("userType")
dateJoined_ = timezone.now()
lastLogin_ = timezone.now()
storeID_ = request.POST.get("storeID")
store = Store.objects.get(storeID=storeID_)
# Sam change this according to your create function for now Iam adding this to test
staff = Employee(employeeID = employeeID_,
firstName = firstName_,
lastName = lastName_,
streetAddress = streetAddress_,
cityAddress = cityAddress_,
postCodeAddress = postCodeAddress_,
stateAddress = stateAddress_,
DOB = DOB_,
TFN = TFN_,
phoneNumber = phoneNumber_,
email = email_,
userName = userName_,
password = password_,
userType = userType_,
dateJoined = dateJoined_,
lastLogin = lastLogin_,
storeID = store)
vali = staff.full_clean()
if vali:
return vali
else:
staff.save()
return True
return False
except ValidationError as e:
return e
# Developer: Sam
# modifying the staff values
def modify(request):
employeeID_ = request.POST.get("employeeID")
firstName_ = request.POST.get("firstName")
lastName_ = request.POST.get("lastName")
streetAddress_ = request.POST.get("streetAddress")
cityAddress_ = request.POST.get("city")
postCodeAddress_ = request.POST.get("postalCode")
stateAddress_ = request.POST.get("state")
DOB_ = request.POST.get("dob")
TFN_ = request.POST.get("TFN")
phoneNumber_ = request.POST.get("phoneNumber")
email_ = request.POST.get("email")
userType_ = request.POST.get("userType")
storeID_ = request.POST.get("storeID")
store = Store.objects.get(storeID=storeID_)
staff = Employee.objects.get(employeeID = employeeID_)
try:
if(firstName_ != ""):
staff.firstName = firstName_
if(lastName_ != ""):
staff.lastName = lastName_
if(streetAddress_ != ""):
staff.streetAddress = streetAddress_
if(cityAddress_ != ""):
staff.cityAddress = cityAddress_
if(postCodeAddress_ != ""):
staff.postCodeAddress = postCodeAddress_
if(stateAddress_ != ""):
staff.stateAddress = stateAddress_
if(DOB_ != ""):
staff.DOB = DOB_
if(TFN_ != ""):
staff.TFN = TFN_
if(phoneNumber_ != ""):
staff.phoneNumber = phoneNumber_
if(email_ != ""):
staff.email = email_
if(userType_ != ""):
staff.userType = userType_
if(storeID_ != ""):
staff.storeID = store
vali = staff.full_clean()
if vali:
return vali
else:
staff.save()
return True
return False
except ValidationError as e:
return e
# Developer: Aidan
def changeLoginDetails(request, pw):
try:
employeeID_ = request.POST.get("empID")
userName_ = request.POST.get("username")
password_ = pw
# updating certain values
staff = Employee.objects.get(employeeID=employeeID_)
staff.userName=userName_
staff.password = password_
vali = staff.full_clean()
if vali:
return vali
else:
staff.save()
return True
return False
except ValidationError as e:
return e
# Developer: Sam
def search(arg):
if (arg == False):
for each in Customer.objects.all():
return(
each.employeeID,
each.firstName,
each.lastName,
each.streetAddress,
each.cityAddress,
each.postCodeAddress,
each.stateAddress,
each.DOB,
each.TFN,
each.phoneNumber,
each.email,
each.userName,
each.password,
each.userType,
each.dateJoined,
each.lastLogin,
each.storeID)
if (arg == True):
employeeID_min = request.POST.get("employeeID_min")
employeeID_max = request.POST.get("employeeID_max")
firstName = request.POST.get("firstName")
lastName = request.POST.get("lastName")
streetAddress = request.POST.get("streetAddress")
cityAddress = request.POST.get("cityAddress")
postCodeAddress_min = request.POST.get("postCodeAddress_min")
postCodeAddress_max = request.POST.get("postCodeAddress_max")
stateAddress = request.POST.get("stateAddress")
DOB_min = request.POST.get("DOB_min")
DOB_max = request.POST.get("DOB_max")
TFN = request.POST.get("TFN")
phoneNumber = request.POST.get("phoneNumber")
email = request.POST.get("email")
userName = request.POST.get("userName")
password = request.POST.get("password")
userType = request.POST.get("userType")
dateJoined_min = request.POST.get("dateJoined_min")
dateJoined_max = request.POST.get("dateJoined_max")
lastLogin_min = request.POST.get("lastLogin_min")
lastLogin_max = request.POST.get("lastLogin_max")
storeID = request.POST.get("storeID")
condition = " "
if (employeeID_min != ""):
condition = condition + "employeeID >= \'" + employeeID_min + "\' AND "
if (employeeID_max != ""):
condition = condition + "employeeID <= \'" + employeeID_max + "\' AND "
if (firstName != ""):
condition = condition + "firstName LIKE \'%" + firstName + "%\' AND "
if (lastName != ""):
condition = condition + "lastName LIKE \'%" + lastName + "%\' AND "
if (streetAddress != ""):
condition = condition + "streetAddress LIKE \'%" + streetAddress + "%\' AND "
if (cityAddress != ""):
condition = condition + "cityAddress LIKE \'%" + cityAddress + "%\' AND "
if (postCodeAddress_min != ""):
condition = condition + "postCodeAddress >= \'" + postCodeAddress_min + "\' AND "
if (postCodeAddress_max != ""):
condition = condition + "postCodeAddress <= \'" + postCodeAddress_max + "\' AND "
if (stateAddress != ""):
condition = condition + "stateAddress LIKE \'%" + stateAddress + "%\' AND "
if (DOB_min != ""):
condition = condition + "DOB >= \'" + DOB_min + "\' AND "
if (DOB_max != ""):
condition = condition + "DOB <= \'" + DOB_max + "\' AND "
if (TFN != ""):
condition = condition + "TFN = \'" + TFN + "\' AND "
if (phoneNumber != ""):
condition = condition + "phoneNumber = \'" + phoneNumber + "\' AND "
if (email != ""):
condition = condition + "email LIKE \'%" + email + "%\' AND "
if (userName != ""):
condition = condition + "userName LIKE \'%" + userName + "%\' AND "
if (userType != ""):
condition = condition + "userType = \'" + userType + "\' AND "
if (dateJoined_min != ""):
condition = condition + "dateJoined >= \'" + dateJoined_min + "\' AND "
if (dateJoined_max != ""):
condition = condition + "dateJoined <= \'" + dateJoined_max + "\' AND "
if (lastLogin_min != ""):
condition = condition + "lastLogin >= \'" + lastLogin_min + "\' AND "
if (lastLogin_max != ""):
condition = condition + "lastLogin_max <= \'" + lastLogin_max + "\' AND "
if (storeID != ""):
condition = condition + "storeID = \'" + storeID + "\' AND "
query = 'SELECT * FROM carrentaldb.crcapp_customer WHERE' + condition[:-5] +';'
for each in Employee.objects.raw(query):
return(
each.employeeID,
each.firstName,
each.lastName,
each.streetAddress,
each.cityAddress,
each.postCodeAddress,
each.stateAddress,
each.DOB,
each.TFN,
each.phoneNumber,
each.email,
each.userName,
each.password,
each.userType,
each.dateJoined,
each.lastLogin,
each.storeID)
def changePW(request):
employeeID_ = request.POST.get("employeeID")
password_ = make_password(request.POST.get('password', ''))
existingEmployee = Employee.objects.get(employeeID=employeeID_)
try:
existingEmployee.password = password_
vali = existingEmployee.full_clean()
if vali:
return vali
else:
existingEmployee.save()
return True
return False
except ValidationError as e:
return e.message_dict
|
# Warning: this is messy!
import requests
import datetime
import dateutil.parser
from functools import reduce, lru_cache
from itertools import groupby
import data
def greenline(apikey, stop):
"""
Return processed green line data for a stop.
"""
# Only green line trips
filter_route = "Green-B,Green-C,Green-D,Green-E"
# Include vehicle and trip data
include = "vehicle,trip"
# API request
p = {"filter[route]": filter_route, "include": include, "filter[stop]": stop}
result = requests.get("https://api-v3.mbta.com/predictions", params=p).json()
return processGreenlinePredictions(result)
def processGreenlinePredictions(result):
"""Process MBTA API data on predictions for display."""
def eta(arrival, departure):
"""Take an arrival, departure datetime string and turn into eta.
returns (hours, minutes, seconds, is_departure)
"""
if not (arrival or departure):
return (None, None, None, None)
if arrival:
is_departure = False
pred = dateutil.parser.parse(arrival)
else:
is_departure = True
pred = dateutil.parser.parse(departure)
now = datetime.datetime.now(datetime.timezone.utc)
td = pred - now
hours, remainder = divmod(td.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return (hours, minutes, seconds, is_departure)
def vehicle(vehicleObject):
"""Takes vehicle object and processes."""
def whichAccessible(state):
"""
Verbal description of which cars are accessible from
a tuple of (True, False, ...) for a consist.
"""
# 1 car
if len(state) == 1:
if state[0]:
return "any"
else:
return "none"
# 2 cars
elif len(state) == 2:
back, front = state
if back and not front:
return "back"
elif front and not back:
return "front"
elif front and back:
return "any"
else:
return "none"
# 3 cars
elif len(state) == 3:
answer = ""
back, middle, front = state
if back:
answer += "back "
elif middle:
answer += "middle "
elif front:
answer += "front "
else:
answer = "none"
if all(state):
answer = "any"
return answer
# L O N G B O I
# 3+ cars aren't run in regular service
else:
raise ValueError("This vehicle is too long")
# If there is data on the vehicle then we can process it
if vehicleObject['data']:
vid = vehicleObject['data']['id']
# Search included objects from API call for vehicle ID
vehicle = [v for v in result['included'] if v['type'] == 'vehicle' and v['id'] == vid][0]
cars = vehicle['attributes']['label'].split('-')[::-1]
# Magic number: 38xx, 39xx series car numbers are accessible
accessibility = [int(cid[0:2]) >= 38 for cid in cars]
# Find correct trip from included objects in API call
trip = [t for t in result['included']
if t['type'] == 'trip'
and t['relationships']['vehicle']['data']
and t['relationships']['vehicle']['data']['id'] == vid][0]
headsign = trip['attributes'].get('headsign') or "No Data"
# Do some abbreviation
headsign = headsign.replace("Street", "St").replace("Government", "Gov't").replace("Circle", "Cir")
return {
"headsign": headsign,
"cars": list(zip(cars,accessibility)),
"n": len(cars),
"nAccessible": len(accessibility),
"whichAccessible": whichAccessible(accessibility)
}
else:
return
output = []
# Iterate over each prediction
for prediction in result['data']:
thisEta = eta(prediction['attributes']['arrival_time'], prediction['attributes']['departure_time'])
thisVehicle = vehicle(prediction['relationships']['vehicle'])
# Only process vehicles with ETAs
if any(thisEta):
thisPrediction = {
"arrival": prediction['attributes']['arrival_time'],
"departure": prediction['attributes']['departure_time'],
"eta": thisEta,
"direction": prediction['attributes']['direction_id'],
"route": prediction['relationships']['route']['data']['id'],
"vehicle": thisVehicle
}
output.append(thisPrediction)
# Sort by inbound/outbound
presorted = sorted(output, key=lambda a: a['direction'])
# And group
return groupby(presorted, lambda a: a['direction'])
def accessibilityAlerts(apikey, filter_activity="USING_WHEELCHAIR,USING_ESCALATOR", filter_stop=""):
"""Get accessibility alerts for wheelchairs and escalators."""
# Do API call
p = {"filter[activity]": filter_activity}
result = requests.get("https://api-v3.mbta.com/alerts", params=p)
result = result.json()
allStations, processed = processAlerts(result['data'])
output = {
"n": len(result['data']),
"allStations": allStations,
"alerts": processed
}
return output
def processAlerts(alerts):
"""Process alert data from MBTA API for display."""
output = []
# Iterate over alerts
for alert in alerts:
attrs = alert['attributes']
# Get location ID
location = [place for place in [e['stop'] for e in attrs['informed_entity']] if 'place-' in place]
# Otherwise "unknown"
if len(location):
location = data.places.get(location[0]) or "Unknown"
else:
location = "Unknown"
# Get relevant GTFS Activities
activities = set(reduce(list.__add__, [e['activities'] for e in attrs['informed_entity']]))
activities = [data.activityMap.get(a) for a in activities]
# Craft response
thisAlert= {
'location': location,
'active_for': attrs['active_period'],
'txt_description': attrs['description'],
'txt_blurb': attrs['header'],
'txt_active_for': attrs['timeframe'],
'txt_effect': attrs['service_effect'],
'activities': activities
}
output.append(thisAlert)
# Sort and group by location
presort = sorted(output, key=lambda a: a['location'])
allStations = sorted(set([a['location'] for a in presort]))
return allStations, groupby(presort, lambda a: a['location'])
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.flatpages.sitemaps import FlatPageSitemap
from django.contrib.sitemaps import views
from django.urls import path, include
from django.views.decorators.cache import cache_page
from rest_framework.routers import DefaultRouter
from convocatorias.feeds import ÚltimasConvocatoriasFeed
from convocatorias.sitemaps import ConvocatoriaSitemap
from convocatorias.views import ConvocatoriaDetailView, ConvocatoriaListView
from convocatorias.views import ConvocatoriaViewset
from planes.views import PlanDetailView, PlanListView
from .sitemaps import StaticViewSitemap
from .views import FechasPasadasView, HomepageView, PróximaFechaView
admin.autodiscover()
sitemaps = {
'convocatorias': ConvocatoriaSitemap,
'flatpages': FlatPageSitemap,
'static': StaticViewSitemap,
}
router = DefaultRouter()
router.register('convocatorias', ConvocatoriaViewset)
API_TITLE = 'Convocatorias PISLEA API'
API_DESCRIPTION = 'API para consultar las convocatorias contaminadas y ejemplares'
urlpatterns = [
path('admin/', admin.site.urls),
path('convocatorias/feed', ÚltimasConvocatoriasFeed()),
path('convocatorias/', ConvocatoriaListView.as_view(), name='convocatoria-list'),
path('convocatorias/<slug:slug>', ConvocatoriaDetailView.as_view(), name='convocatoria-detail'),
path('planes/', PlanListView.as_view(), name='plan-list'),
path('planes/<int:pk>', PlanDetailView.as_view(), name='plan-detail'),
path('fechas-pasadas', FechasPasadasView.as_view(), name='fechas-pasadas'),
path('proxima-fecha', PróximaFechaView.as_view(), name='proxima-fecha'),
path('pages/', include('django.contrib.flatpages.urls')),
path('', HomepageView.as_view(), name="homepage"),
path('sitemap.xml', cache_page(86400)(views.index), {'sitemaps': sitemaps}),
path('sitemap-<section>.xml', cache_page(86400)(views.sitemap), {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
path('api/', include(router.urls)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import torch
import torch.nn.functional as F
class SmoothGradCAMpp(GradCAM):
"""
Smooth Grad-CAM++, inherit from GradCAM
"""
def __init__(self, model_dict):
super(SmoothGradCAMpp, self).__init__(model_dict)
def forward(self, input_image, class_idx=None, param_n=35, mean=0, sigma=2, retain_graph=False):
b, c, h, w = input_image.size()
logit = self.model_arch(input_image)
if class_idx is None:
score = logit[:, logit.max(1)[-1]].squeeze()
else:
score = logit[:, class_idx].squeeze()
if torch.cuda.is_available():
score = score.cuda()
logit = logit.cuda()
self.model_arch.zero_grad()
score.backward(retain_graph=retain_graph)
gradients = self.gradients['value']
activations = self.activations['value']
b, k, u, v = gradients.size()
if torch.cuda.is_available():
activations = activations.cuda()
gradients = gradients.cuda()
#HYPERPARAMETERS (can be changed, have a look at the arguments!)
#mean = 0
#param_n = 35
#param_sigma_multiplier = 2
grad_2, grad_3 = torch.zeros_like(activations), torch.zeros_like(activations)
for i in range(param_n):
noise = Variable(input_image.data.new(input_image.size()).normal_(0,param_sigma_multiplier**2))
noisy_input = input_image + noise
if torch.cuda.is_available():
noisy_input = noisy_input.cuda()
out = self.model_arch(noisy_input)
score = out[:, out.max(1)[-1]].squeeze()
self.model_arch.zero_grad()
score.backward(retain_graph=retain_graph)
gradient = self.gradients['value']
grad_2.add_(gradient.pow(2))
grad_3.add_(gradient.pow(3))
grad_2.div_(param_n)
grad_3.div_(param_n)
# Alpha coefficient for each pixel
global_sum = activations.view(b, k, u * v).sum(-1, keepdim=True).view(b, k, 1, 1)
alpha_num = grad_2
alpha_denom = grad_2.mul(2) + global_sum.mul(grad_3)
alpha_denom = torch.where(alpha_denom != 0.0, alpha_denom, torch.ones_like(alpha_denom))
alpha = alpha_num.div(alpha_denom + 1e-7)
positive_gradients = F.relu(score.exp() * gradients)
weights = (alpha * positive_gradients).view(b, k, u * v).sum(-1).view(b, k, 1, 1)
saliency_map = (weights * activations).sum(1, keepdim=True)
saliency_map = F.relu(saliency_map)
saliency_map = F.interpolate(saliency_map, size=(224, 224), mode='bilinear', align_corners=False)
saliency_map_min, saliency_map_max = saliency_map.min(), saliency_map.max()
saliency_map = (saliency_map - saliency_map_min).div(saliency_map_max - saliency_map_min).data
return saliency_map
def __call__(self, input, class_idx=None, retain_graph=False):
return self.forward(input, class_idx, retain_graph)
|
from icontrol.session import iControlRESTSession
from icontrol.exceptions import iControlUnexpectedHTTPError
from requests.exceptions import HTTPError
import argparse
import json
import os
import sys
import logging
# /mgmt/shared/authz/roles/iControl_REST_API_User
# /mgmt/shared/authz/resource-groups
# /mgmt/shared/authz/roles
class IcrRbac(object):
def __init__(self,
target_user,
host="192.168.1.245",
username="admin",
password="admin",
token=None,
sync_group=None,
log_level="info",
trace=False,
persist=False,
remote_user=False):
self._username = username
self._password = password
if "http" not in host:
self.base_url = "https://%s/mgmt" %(host)
else:
self.base_url = host
self.sync_group = sync_group
self.log_level = log_level
self.trace = trace
self.persist = persist
self.target_user = target_user
if token:
self.icr = iControlRESTSession(username, password, token='tmos')
else:
self.icr = iControlRESTSession(username, password)
if remote_user:
self.remote_user = True
else:
self.remote_user = False
def _get(self,uri):
try:
return self.icr.get(self.base_url + uri)
except HTTPError as exc:
# override icontrol 404 error
if exc.response.status_code == 404 or exc.response.status_code == 204:
return exc.response
else:
raise
def _delete(self,uri):
try:
return self.icr.delete(self.base_url + uri)
except HTTPError as exc:
# override icontrol 404 error
if exc.response.status_code == 404 or exc.response.status_code == 204:
return exc.response
else:
raise
def _post(self,uri,data):
try:
return self.icr.post(self.base_url + uri,data=data)
except HTTPError as exc:
# override icontrol 404 error
if exc.response.status_code == 404 or exc.response.status_code == 204:
return exc.response
else:
raise
def _put(self,uri,data):
try:
return self.icr.put(self.base_url + uri,data=data)
except HTTPError as exc:
# override icontrol 404 error
if exc.response.status_code == 404 or exc.response.status_code == 204:
return exc.response
else:
raise
def create_resource_group(self,name='eventDrivenResourceGroup'):
rg = """{"name":"%s", "resources":[ {"restMethod":"GET", "resourceMask":"/mgmt/tm/net/self" },
{"restMethod":"GET", "resourceMask":"/mgmt/shared/service-discovery/task" },
{"restMethod":"GET", "resourceMask":"/mgmt/shared/service-discovery/task/**" },
{"restMethod":"POST", "resourceMask":"/mgmt/shared/service-discovery/task/**" },
{"restMethod":"GET", "resourceMask":"/mgmt/shared/appsvcs/info" } ]}""" %(name)
resp = self._post('/shared/authz/resource-groups',data=rg)
return resp
def delete_resource_group(self,name='eventDrivenResourceGroup'):
resp = self._get('/shared/authz/resource-groups')
id = None
for item in resp.json()['items']:
if item['name'] == name:
id = item['id']
if id:
resp = self._delete('/shared/authz/resource-groups/%s' %(id))
return resp
def create_custom_role(self,name='eventRole',username=None,resource_group='eventDrivenResourceGroup'):
if not username:
username = self.target_user
user_ref = "https://localhost/mgmt/shared/authz/users/%s" %(username)
if self.remote_user:
resp = self._get('/cm/system/authn/providers/tmos')
id = resp.json()['items'][0]['id']
resp = self._get('/cm/system/authn/providers/tmos/%s/users' %(id))
user_ref = None
for item in resp.json()['items']:
if item['name'] == username:
user_ref = item['selfLink']
resp = self._get('/shared/authz/resource-groups')
id = None
for item in resp.json()['items']:
if item['name'] == resource_group:
id = item['id']
role = """{"name":"%s", "userReferences":[ {"link":"%s"} ], "resourceGroupReferences":[{"link":"https://localhost/mgmt/shared/authz/resource-groups/%s"}]}""" %(name,user_ref,id)
resp = self._post('/shared/authz/roles',data=role)
return resp
def delete_custom_role(self,name='eventRole'):
resp = self._get('/shared/authz/roles')
id = None
for item in resp.json()['items']:
if item['name'] == name:
id = item['name']
if id:
resp = self._delete('/shared/authz/roles/%s' %(id))
return resp
def remove_user_from_role(self,username=None,role='iControl_REST_API_User'):
if not username:
username = self.target_user
# find user
# local
# client._get('/shared/authz/users').json()['items']
# remote
# client._get('/cm/system/authn/providers/tmos').json()
# client._get('/cm/system/authn/providers/tmos').json()['items'][0]['id']
resp = self._get('/shared/authz/roles/%s' %(role))
output = resp.json()
orig = resp.json()['userReferences']
if self.remote_user:
resp = self._get('/cm/system/authn/providers/tmos')
id = resp.json()['items'][0]['id']
resp = self._get('/cm/system/authn/providers/tmos/%s/users' %(id))
user_ref = None
for item in resp.json()['items']:
if item['name'] == username:
user_ref = item['id']
resp = self._get('/shared/authz/roles/%s' %(role))
updated = [a for a in resp.json()['userReferences'] if not a['link'].endswith("/%s" %(user_ref))]
else:
updated = [a for a in resp.json()['userReferences'] if not a['link'].endswith("/%s" %(username))]
if orig != updated:
output['userReferences'] = updated
resp = self._put('/shared/authz/roles/%s' %(role),data=json.dumps(output))
return resp
def add_user_to_role(self,username=None,role='eventRole'):
if not username:
username = self.target_user
# find user
# local
# client._get('/shared/authz/users').json()['items']
# remote
# client._get('/cm/system/authn/providers/tmos').json()
# client._get('/cm/system/authn/providers/tmos').json()['items'][0]['id']
resp = self._get('/shared/authz/roles/%s' %(role))
output = resp.json()
orig = resp.json()['userReferences']
if self.remote_user:
resp = self._get('/cm/system/authn/providers/tmos')
id = resp.json()['items'][0]['id']
resp = self._get('/cm/system/authn/providers/tmos/%s/users' %(id))
user_ref = None
for item in resp.json()['items']:
if item['name'] == username:
user_ref = item['id']
resp = self._get('/shared/authz/roles/%s' %(role))
updated = [a for a in resp.json()['userReferences'] if not a['link'].endswith("/%s" %(user_ref))]
else:
user_ref = "https://localhost/mgmt/shared/authz/users/%s" %(username)
updated = [a for a in resp.json()['userReferences'] if not a['link'].endswith("/%s" %(username))]
if orig == updated:
updated.append({'link':user_ref})
output['userReferences'] = updated
resp = self._put('/shared/authz/roles/%s' %(role),data=json.dumps(output))
return resp
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Script to manage an AS3 declaration')
parser.add_argument("--host", help="The IP/Hostname of the BIG-IP device",default='https://192.168.1.245/mgmt')
parser.add_argument("-u", "--username",default='admin')
parser.add_argument("-p", "--password",default='admin')
parser.add_argument("--password-file", help="The BIG-IP password stored in a file", dest='password_file')
parser.add_argument("-a","--action",help="deploy,dry-run,delete,stub,redeploy,list(partitions),list-tenants,list-ages")
parser.add_argument("--token",help="use token (remote auth)",action="store_true",default=False)
parser.add_argument("--remote-user",help="target remote user",action="store_true",default=False,dest='remote_user')
parser.add_argument("--target-user",dest='target_user',default='event')
parser.add_argument("-f","--file",help="declaration JSON file")
args = parser.parse_args()
username = args.username
password = args.password
if 'F5_USERNAME' in os.environ:
username = os.environ['F5_USERNAME']
if 'F5_PASSWORD' in os.environ:
password = os.environ['F5_PASSWORD']
if args.password_file:
password = open(args.password_file).readline().strip()
if 'F5_HOST' in os.environ:
host = os.environ['F5_HOST']
else:
host = args.host
kwargs = {'host':host,
'username':username,
'password':password,
'token':args.token,
'remote_user':args.remote_user}
client = IcrRbac(args.target_user,**kwargs)
if args.action == 'add_user':
# remove from iControl_REST_API_User
client.remove_user_from_role()
# add to eventRole
client.add_user_to_role()
elif args.action == 'cleanup':
client.delete_custom_role()
client.delete_resource_group()
elif args.action == 'setup':
client.create_resource_group()
client.create_custom_role()
|
# Generated by Django 2.2.24 on 2021-09-29 10:54
import dashboard.models
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
def modify_job_template(apps, schema_editor):
if not settings.MIGRATE_INITIAL_DATA:
return
JobTemplate = apps.get_model('dashboard', 'JobTemplate')
JobTemplate.objects.filter(job_template_type="dpushtrans").update(
job_template_json_str='{"job":{"ci_pipeline":"%PIPELINE_UUID%","exception":"raise","execution":"sequential",'
'"name":"download and push translations","package":"%PACKAGE_NAME%","return_type":"json",'
'"tasks":[{"download":[{"name":"Translation files"},{"target_langs":"%TARGET_LANGS%"},'
'{"type":"%REPO_TYPE%"},{"branch":"%REPO_BRANCH%"}]},{"upload":[{"name":"Push files"},'
'{"target_langs":"%TARGET_LANGS%"},{"prehook":"skip"},{"posthook":"skip"},'
'{"import_settings":"project"},{"update":false},{"prepend_branch":false}]}],'
'"type":"dpushtrans"}}'
)
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0013_update_job_templates_20210709'),
]
operations = [
migrations.CreateModel(
name='PipelineConfig',
fields=[
('pipeline_config_id', models.AutoField(primary_key=True, serialize=False)),
('pipeline_config_event', models.CharField(max_length=1000)),
('pipeline_config_active', models.BooleanField(default=False)),
('pipeline_config_json_str', models.TextField(unique=True)),
('pipeline_config_repo_branches', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=1000), default=list, size=None, verbose_name='Repo Branches')),
('pipeline_config_created_on', models.DateTimeField(null=True)),
('pipeline_config_updated_on', models.DateTimeField(null=True)),
('pipeline_config_last_accessed', models.DateTimeField(null=True)),
('pipeline_config_created_by', models.EmailField(max_length=254, null=True)),
('ci_pipeline', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='dashboard.CIPipeline', verbose_name='CI Pipeline')),
],
options={
'verbose_name': 'Pipeline Config',
'db_table': 'ts_cipipelineconfig',
},
bases=(dashboard.models.ModelMixin, models.Model),
),
migrations.RunPython(modify_job_template, reverse_code=migrations.RunPython.noop),
]
|
from factory.django import DjangoModelFactory
import factory
from ..models import *
class DepartmentFactory(DjangoModelFactory):
FACTORY_FOR = Department
name = factory.Sequence(lambda n: 'Department_%s' % n)
class ApplicationFactory(DjangoModelFactory):
FACTORY_FOR = Application
name = factory.Sequence(lambda n: 'Application_%s' % n)
department = factory.SubFactory(DepartmentFactory)
class EnvironmentFactory(DjangoModelFactory):
FACTORY_FOR = Environment
name = factory.Sequence(lambda n: 'Environment_%s' % n)
application = factory.SubFactory(ApplicationFactory)
class ServerRoleFactory(DjangoModelFactory):
FACTORY_FOR = ServerRole
name = factory.Sequence(lambda n: 'ServerRole_%s' % n)
department = factory.SubFactory(DepartmentFactory)
class ServerFactory(DjangoModelFactory):
FACTORY_FOR = Server
name = factory.Sequence(lambda n: 'Server_%s' % n)
host = 'localhost'
user = 'user'
environment = factory.SubFactory(EnvironmentFactory)
@factory.post_generation
def roles(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for role in extracted:
self.roles.add(role)
|
import os.path
from urllib.parse import urlparse
import colander
import pyramid.httpexceptions as exc
from pyramid.view import view_config, view_defaults
from ..models import Group
from .predicates import load_tag, load_file, load_group
from .validation import errors_to_angular
def normpath(path):
url = urlparse.urlparse(path)
url._replace(path=os.path.normpath(url.path))
return url.geturl()
class GroupSchema(colander.MappingSchema):
name = colander.SchemaNode(colander.String())
abs_path = colander.SchemaNode(colander.String(),
preparer=normpath)
web_path = colander.SchemaNode(colander.String(),
preparer=normpath)
thumbnail_path = colander.SchemaNode(colander.String(),
preparer=normpath)
@view_defaults(renderer='json')
class GroupView(object):
def __init__(self, request):
self.request = request
@view_config(route_name='groups', request_method='GET')
def get(self):
query = self.request.dbsession.query(Group)
groups = query.order_by(Group.group_id).all()
lis = []
for g in groups:
lis.append({
'id': g.group_id,
'name': g.name,
'abs_path': g.abs_path,
'web_path': g.web_path,
'thumbnail_path': g.thumbnail_path,
})
return {
'groups': lis
}
@view_config(route_name='groups', request_method='POST')
def post(self):
try:
data = GroupSchema().deserialize(self.request.json_body)
except colander.Invalid as e:
self.request.response.status = 400
return {'errors': errors_to_angular(e.asdict())}
name = data['name']
c = self.request.dbsession.query(Group)\
.filter_by(name=name).one_or_none()
if c:
self.request.response.status = 400
return {
'errors': {
# existName is the same key as angular validator
'name': {'existName': {'value': name}}
}
}
g = Group()
for k, v in data.items():
setattr(g, k, v)
self.request.dbsession.add(g)
return {
'id': g.group_id,
'name': g.name,
'abs_path': g.abs_path,
'web_path': g.web_path,
'thumbnail_path': g.thumbnail_path,
}
@view_config(route_name='group', request_method='PUT')
def put(self):
group = self.request.matchdict['group']
try:
data = GroupSchema().deserialize(self.request.json_body)
except colander.Invalid as e:
self.request.response.status = 400
return {'errors': errors_to_angular(e.asdict())}
for k, v in data.items():
setattr(group, k, v)
return exc.HTTPNoContent()
@view_defaults(renderer='json')
class FileView(object):
def __init__(self, request):
self.request = request
@view_config(route_name='files', request_method='GET')
def get(self):
group = self.request.matchdict['group']
lis = []
for f in group.files:
lis.append({
'id': f.file_id,
'rel_path': f.rel_path,
'thumbnail_path': f.thumbnail_path,
'web_path': f.web_path,
'creation_date': f.creation_date,
'creation_author': f.creation_author,
'modification_date': f.modification_date,
'modification_author': f.modification_author,
'tags': [{'name': t.name, 'id': t.tag_id} for t in f.tags],
})
return {
'files': lis
}
@view_config(route_name='file_tag', request_method='PUT')
def tag(self):
f = self.request.matchdict['file']
t = self.request.matchdict['tag']
if t in f.tags:
self.request.response.status = 409
else:
f.tags.append(t)
return {'tag': {'name': t.name, 'id': t.tag_id}}
@view_config(route_name='file_tag', request_method='DELETE')
def remove_tag(self):
f = self.request.matchdict['file']
t = self.request.matchdict['tag']
if t not in f.tags:
raise exc.HTTPNotFound()
f.tags.remove(t)
return exc.HTTPNoContent()
def includeme(config):
config.add_route('files', '/api/groups/{group_id:\d+}/files',
custom_predicates=(load_group,))
config.add_route('file_tag', '/api/files/{file_id:\d+}/tags/{tag_id:\d+}',
custom_predicates=(load_file, load_tag))
config.add_route('groups', '/api/groups')
config.add_route('group', '/api/groups/{group_id:\d+}',
custom_predicates=(load_group,))
config.scan(__name__)
|
# -----------------------------------------------------------------------------
# This file contains the API for the WWL kernel computations
#
# December 2019, M. Togninalli
# -----------------------------------------------------------------------------
from .propagation_scheme import WeisfeilerLehman, ContinuousWeisfeilerLehman
from sklearn.metrics.pairwise import laplacian_kernel, cosine_similarity
import ot
import numpy as np
def _compute_wasserstein_distance(label_sequences, sinkhorn=False,
categorical=False, sinkhorn_lambda=1e-2):
'''
Generate the Wasserstein distance matrix for the graphs embedded
in label_sequences
'''
# Get the iteration number from the embedding file
n = len(label_sequences)
M = np.zeros((n,n))
# Iterate over pairs of graphs
for graph_index_1, graph_1 in enumerate(label_sequences):
# Only keep the embeddings for the first h iterations
labels_1 = label_sequences[graph_index_1]
for graph_index_2, graph_2 in enumerate(label_sequences[graph_index_1:]):
labels_2 = label_sequences[graph_index_2 + graph_index_1]
# Get cost matrix
ground_distance = 'hamming' if categorical else 'euclidean'
costs = ot.dist(labels_1, labels_2, metric=ground_distance)
if sinkhorn:
mat = ot.sinkhorn(np.ones(len(labels_1))/len(labels_1),
np.ones(len(labels_2))/len(labels_2), costs, sinkhorn_lambda,
numItermax=50)
M[graph_index_1, graph_index_2 + graph_index_1] = np.sum(np.multiply(mat, costs))
else:
M[graph_index_1, graph_index_2 + graph_index_1] = \
ot.emd2([], [], costs)
M = (M + M.T)
return M
def pairwise_wasserstein_distance(X, node_features = None, num_iterations=3, sinkhorn=False, enforce_continuous=False):
"""
Pairwise computation of the Wasserstein distance between embeddings of the
graphs in X.
args:
X (List[ig.graphs]): List of graphs
node_features (array): Array containing the node features for continuously attributed graphs
num_iterations (int): Number of iterations for the propagation scheme
sinkhorn (bool): Indicates whether sinkhorn approximation should be used
"""
# First check if the graphs are continuous vs categorical
categorical = True
if enforce_continuous:
print('Enforce continous flag is on, using CONTINUOUS propagation scheme.')
categorical = False
elif node_features is not None:
print('Continuous node features provided, using CONTINUOUS propagation scheme.')
categorical = False
else:
for g in X:
if not 'label' in g.vs.attribute_names():
print('No label attributed to graphs, use degree instead and use CONTINUOUS propagation scheme.')
categorical = False
break
if categorical:
print('Categorically-labelled graphs, using CATEGORICAL propagation scheme.')
# Embed the nodes
if categorical:
es = WeisfeilerLehman()
node_representations = es.fit_transform(X, num_iterations=num_iterations)
else:
es = ContinuousWeisfeilerLehman()
node_representations = es.fit_transform(X, node_features=node_features, num_iterations=num_iterations)
# Compute the Wasserstein distance
pairwise_distances = _compute_wasserstein_distance(node_representations, sinkhorn=sinkhorn,
categorical=categorical, sinkhorn_lambda=1e-2)
return pairwise_distances
def wwl(X, node_features=None, num_iterations=3, sinkhorn=False, gamma=None):
"""
Pairwise computation of the Wasserstein Weisfeiler-Lehman kernel for graphs in X.
"""
D_W = pairwise_wasserstein_distance(X, node_features = node_features,
num_iterations=num_iterations, sinkhorn=sinkhorn)
wwl = laplacian_kernel(D_W, gamma=gamma)
return wwl
#######################
# Class implementation
#######################
class PairwiseWWL():
def __init__(self, X, node_features=None, enforce_continuous=False, num_iterations=3, sinkhorn=False):
self.num_iterations = num_iterations
self.sinkhorn = sinkhorn
self.enforce_continuous = enforce_continuous
self.node_features = node_features
self.X = X
self._distance_cache = {}
self._compute_node_representation()
def _compute_node_representation(self):
# First check if the graphs are continuous vs categorical
self.categorical = True
if self.enforce_continuous:
print('Enforce continous flag is on, using CONTINUOUS propagation scheme.')
self.categorical = False
elif self.node_features is not None:
print('Continuous node features provided, using CONTINUOUS propagation scheme.')
self.categorical = False
else:
for g in self.X:
if not 'label' in g.vs.attribute_names():
print('No label attributed to graphs, use degree instead and use CONTINUOUS propagation scheme.')
self.categorical = False
break
if self.categorical:
print('Categorically-labelled graphs, using CATEGORICAL propagation scheme.')
# Embed the nodes
if self.categorical:
es = WeisfeilerLehman()
node_representations = es.fit_transform(self.X, num_iterations=self.num_iterations)
else:
es = ContinuousWeisfeilerLehman()
node_representations = es.fit_transform(self.X, node_features=self.node_features, num_iterations=self.num_iterations)
self.node_representations = node_representations
def wwl_distance(self, idx_1, idx_2, sinkhorn_lambda=1e-2):
# make idx_1 <= idx_2
if idx_1 > idx_2:
idx_1, idx_2 = idx_2, idx_1
if (idx_1, idx_2) in self._distance_cache:
return self._distance_cache[(idx_1, idx_2)]
labels_1 = self.node_representations[idx_1]
labels_2 = self.node_representations[idx_2]
# Get cost matrix
ground_distance = 'hamming' if self.categorical else 'euclidean'
costs = ot.dist(labels_1, labels_2, metric=ground_distance)
if self.sinkhorn:
mat = ot.sinkhorn(np.ones(len(labels_1))/len(labels_1),
np.ones(len(labels_2))/len(labels_2), costs + 1e-6, sinkhorn_lambda,
numItermax=50)
distance = np.sum(np.multiply(mat, costs))
else:
distance = ot.emd2([], [], costs)
self._distance_cache[(idx_1, idx_2)] = distance
return distance
def __getitem__(self, indices):
idx_1, idx_2 = indices
return self.wwl_distance(idx_1, idx_2)
@property
def shape(self):
return (len(self.node_representations), len(self.node_representations))
class PairwiseOverlap(PairwiseWWL):
def overlap_distance(self, idx_1, idx_2):
# make idx_1 <= idx_2
if idx_1 > idx_2:
idx_1, idx_2 = idx_2, idx_1
if (idx_1, idx_2) in self._distance_cache:
return self._distance_cache[(idx_1, idx_2)]
labels_1 = self.node_representations[idx_1]
labels_2 = self.node_representations[idx_2]
(smaller_label, smaller_max_axis) = (labels_1, 1) if len(labels_1) <= len(labels_2) else (labels_2, 0)
similarity = cosine_similarity(labels_1, labels_2)
distance = 1 - np.sum(np.max(similarity, axis=smaller_max_axis)) / len(smaller_label)
self._distance_cache[(idx_1, idx_2)] = distance
return distance
def __getitem__(self, indices):
idx_1, idx_2 = indices
return self.overlap_distance(idx_1, idx_2)
|
# ORDERING is a dictionary that describes the required ordering of Web ACL rules for a
# given web acl ID. Map the Web ACL ID to an ordered tuple of Web ACL rule IDs
# Example usage:
# ORDERING{
# 'WebAclId-123': ('FirstRuleId', 'SecondRuleId', 'ThirdRuleId'),
# }
ORDERING = {
"EXAMPLE_WEB_ACL_ID": ("EXAMPLE_RULE_1_ID", "EXAMPLE_RULE_2_ID"),
}
def policy(resource):
# Check if Web ACL rule ordering is being enforced
if resource["WebACLId"] not in ORDERING:
return True
web_acl_rules = resource["Rules"]
# Check that the Web ACL has the correct number of rules
if len(ORDERING[resource["WebACLId"]]) != len(web_acl_rules):
return False
# Confirm that each rule is ordered correctly
for web_acl_rule in web_acl_rules:
# Rules are not neccessarily listed in their priority order in the rules list.
# This determines their priority order, and offsets by one to be indexed starting at 0.
priority_order = web_acl_rule["Priority"] - 1
if web_acl_rule["RuleId"] != ORDERING[resource["WebACLId"]][priority_order]:
return False
# The rules all matched correctly, return True
return True
|
import json
from pprint import pprint
filename = input("Input filename: ")
with open(filename) as f:
data = json.load(f)
pprint(data)
ipV4Neighbors_list = data['ipV4Neighbors']
ipV4Neighbors_dict = {}
for dicts in ipV4Neighbors_list:
for descriptions, val in dicts.items():
print(descriptions, val)
address = dicts['address']
MACs = dicts['hwAddress']
ipV4Neighbors_dict[address] = MACs
pprint(ipV4Neighbors_dict)
|
from django.db import models
# Create your models here.
class Session(models.Model):
owner = models.ForeignKey('auth.user', null=False, blank=False, related_name="session_owner")
uuid = models.CharField(max_length=36, null=False, blank=False)
#major_step = models.IntegerField()
#minor_step = models.IntegerField()
startTime = models.DateTimeField(auto_now_add = True)
|
from giotto.programs import Program, Manifest
from giotto.programs.shell import shell
from giotto.programs.tables import syncdb, flush
from giotto.views import BasicView
management_manifest = Manifest({
'syncdb': Program(
name="Make Tables",
controllers=['cmd'],
model=[syncdb],
view=BasicView()
),
'flush': Program(
name="Blast Tables",
controllers=['cmd'],
model=[flush],
view=BasicView(),
),
'shell': Program(
name="Giotto Shell",
controllers=['cmd'],
model=[shell],
view=BasicView(),
),
})
|
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('registration', '0004_supervisedregistrationprofile'),
]
operations = [
migrations.AlterField(
model_name='registrationprofile',
name='activation_key',
field=models.CharField(max_length=64, verbose_name='activation key'),
),
]
|
from os import walk
import os.path
readme = open('./Todo.md', 'w+')
readme.write('# TODOs\n\n')
for (dirpath, dirnames, filenames) in walk('..\\src\\'):
for (filename) in filenames:
file = open(dirpath + '\\' + filename)
line = file.readline()
lineNumber = 1
todosInFile = ''
while (line != ''):
search = line.find('@TODO')
if (search != -1):
linePrefix = line[:search]
todosInFile += '\n[Line ' + str(lineNumber) + ']('
todosInFile += 'https://github.com/JarateKing/0ngine/blob/master/src/' + dirpath[7:].replace('\\','/') + '/' + filename + '#L' + str(lineNumber) + ')\n'
todosInFile += '```\n'
while (line != '' and line.startswith(linePrefix)):
todosInFile += line[search:]
line = file.readline()
lineNumber += 1
todosInFile += '```\n'
else:
line = file.readline()
lineNumber += 1
if (len(todosInFile) > 0):
readme.write('## ' + dirpath[7:] + '\\' + filename + '\n' + todosInFile + '\n\n')
file.close()
readme.close()
|
"""
class mask, data handler for the function rn in rn.py, used to permit a more user-friendly approach
to RN object instantiation
"""
# Imports
from fractions import Fraction
from decimal import Decimal
from numpy import array
from rnenv110.rn.mathfuncs.funcs import fraction_from_float
# mask class
class Mask:
"""
RN mask class, gets the arguments passed at rn function at rn.py, parse them
and return an RN object.
Following the matrix_rn_representation:
accepted parameters:
INTEGER:
int
SINGLE UNIT INTEGER:
int, int, int
OTHERS:
build as expression
ALGORITHM:
validate args
args length = 1:
integer
args length = 3:
single unit
"""
PERMITTED_PARAMETERS = [('int', ), ('int', 'int', 'int')]
PERMITTED_TYPES = [int, float, Fraction, Decimal]
ERROR_MSG = 'Bad user argument, must be one of {}, got {} instead'
def __init__(self, *args):
"""
validate parameters,
:param args: mask parameters
"""
# validate parameters
self.__validate_parameters(args)
self.data = args
def __validate_parameters(self, args):
"""
validate that args match with one of the PERMITTED_PARAMETER
:param args: arguments passed
:return: None
"""
if len(args) == 1 or len(args) == 3:
if not all(isinstance(data, int) for data in args):
raise ValueError(self.ERROR_MSG.format(self.PERMITTED_PARAMETERS, args))
else:
raise ValueError(self.ERROR_MSG.format(self.PERMITTED_PARAMETERS, args))
def associated_rn(self):
"""
Returns the actual real number array and index ready to
instantiate the object
if args length is 1:
return integer
else: (args length is 3)
return unit
:return: array
"""
# parse args
if len(self.data) == 1:
ar = array([[[self.data[0], 1, 1]], [1, 1, 1]])
else:
ar = array([[[self.data[0], self.data[1], self.data[2]]], [[1, 1, 1]]])
return ar
|
from .Negamax import Negamax
from .NonRecursiveNegamax import NonRecursiveNegamax
from .TranspositionTable import TranspositionTable
from .solving import solve_with_iterative_deepening, solve_with_depth_first_search
from .MTdriver import mtd
from .SSS import SSS
from .DUAL import DUAL
from .HashTranspositionTable import HashTranspositionTable
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import os
from typing import Any, Callable, Dict, List, Optional, Union
import uuid
import airflow
from airflow.exceptions import AirflowException
if airflow.__version__ > "2.0":
from airflow.hooks.base import BaseHook
else:
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import great_expectations as ge
from great_expectations.checkpoint import LegacyCheckpoint
from great_expectations.checkpoint.types.checkpoint_result import CheckpointResult
from great_expectations.data_context.types.base import (
DataContextConfig,
GCSStoreBackendDefaults,
)
from great_expectations.data_context import BaseDataContext
class GreatExpectationsOperator(BaseOperator):
"""
An operator to leverage Great Expectations as a task in your Airflow DAG.
Current list of expectations types:
https://docs.greatexpectations.io/en/latest/reference/glossary_of_expectations.html
How to create expectations files:
https://docs.greatexpectations.io/en/latest/guides/tutorials/how_to_create_expectations.html
:param run_name: Identifies the validation run (defaults to timestamp if not specified)
:type run_name: Optional[str]
:param data_context_root_dir: Path of the great_expectations directory
:type data_context_root_dir: Optional[str]
:param data_context: A great_expectations `DataContext` object
:type data_context: Optional[BaseDataContext]
:param expectation_suite_name: The name of the Expectation Suite to use for validation
:type expectation_suite_name: Optional[str]
:param batch_kwargs: The batch_kwargs to use for validation
:type batch_kwargs: Optional[dict]
:param assets_to_validate: A list of dictionaries of batch_kwargs + Expectation Suites to use for validation
:type assets_to_validate: Optional[list[dict]]
:param checkpoint_name: A Checkpoint name to use for validation
:type checkpoint_name: Optional[str]
:param validation_operator_name: name of a Great Expectations validation operator, defaults to action_list_operator
:type validation_operator_name: Optional[str]
:param fail_task_on_validation_failure: Fail the Airflow task if the Great Expectation validation fails
:type fail_task_on_validation_failure: Optiopnal[bool]
:param validation_failure_callback: Called when the Great Expectations validation fails
:type validation_failure_callback: Callable[[CheckpointResult], None]
:param **kwargs: kwargs
:type **kwargs: Optional[dict]
"""
ui_color = "#AFEEEE"
ui_fgcolor = "#000000"
template_fields = (
"checkpoint_name",
"batch_kwargs",
"assets_to_validate",
"data_context_root_dir",
)
@apply_defaults
def __init__(
self,
*,
run_name: Optional[str] = None,
data_context_root_dir: Optional[Union[str, bytes, os.PathLike]] = None,
data_context: Optional[BaseDataContext] = None,
expectation_suite_name: Optional[str] = None,
batch_kwargs: Optional[Dict] = None,
assets_to_validate: Optional[List[Dict]] = None,
checkpoint_name: Optional[str] = None,
validation_operator_name: Optional[str] = None,
fail_task_on_validation_failure: Optional[bool] = True,
validation_failure_callback: Optional[
Callable[[CheckpointResult], None]
] = None,
**kwargs
):
super().__init__(**kwargs)
self.run_name: Optional[str] = run_name
# Check that only one of the arguments is passed to set a data context (or none)
if data_context_root_dir and data_context:
raise ValueError(
"Only one of data_context_root_dir or data_context can be specified."
)
self.data_context_root_dir: Optional[str] = data_context_root_dir
self.data_context: Optional[BaseDataContext] = data_context
# Check that only the correct args to validate are passed
# this doesn't cover the case where only one of expectation_suite_name or batch_kwargs is specified
# along with one of the others, but I'm ok with just giving precedence to the correct one
if (
sum(
bool(x)
for x in [
(expectation_suite_name and batch_kwargs),
assets_to_validate,
checkpoint_name,
]
)
!= 1
):
raise ValueError(
"Exactly one of expectation_suite_name + batch_kwargs, "
"assets_to_validate, or checkpoint_name is required to run validation."
)
self.expectation_suite_name: Optional[str] = expectation_suite_name
self.batch_kwargs: Optional[Dict] = batch_kwargs
self.assets_to_validate: Optional[List[Dict]] = assets_to_validate
self.checkpoint_name: Optional[str] = checkpoint_name
self.validation_operator_name: Optional[str] = validation_operator_name
self.fail_task_on_validation_failure = fail_task_on_validation_failure
self.validation_failure_callback = validation_failure_callback
def create_data_context(self) -> BaseDataContext:
"""Create and return the :class:`~ge.data_context.DataContext` to be used
during validation.
Subclasses should override this to provide custom logic around creating a
`DataContext`. This is called at task execution time, which defers connecting
to the meta database and allows for the use of templated variables.
"""
if self.data_context_root_dir:
return ge.data_context.DataContext(
context_root_dir=self.data_context_root_dir
)
else:
return ge.data_context.DataContext()
def execute(self, context: Any) -> CheckpointResult:
self.log.info("Ensuring data context exists...")
if not self.data_context:
self.log.info("Data context does not exist, creating now.")
self.data_context: Optional[BaseDataContext] = self.create_data_context()
self.log.info("Running validation with Great Expectations...")
batches_to_validate = []
if self.batch_kwargs and self.expectation_suite_name:
batch = {
"batch_kwargs": self.batch_kwargs,
"expectation_suite_names": [self.expectation_suite_name],
}
batches_to_validate.append(batch)
elif self.checkpoint_name:
checkpoint = self.data_context.get_checkpoint(self.checkpoint_name)
for batch in checkpoint.batches:
batch_kwargs = batch["batch_kwargs"]
for suite_name in batch["expectation_suite_names"]:
batch = {
"batch_kwargs": batch_kwargs,
"expectation_suite_names": [suite_name],
}
batches_to_validate.append(batch)
elif self.assets_to_validate:
for asset in self.assets_to_validate:
batch = {
"batch_kwargs": asset["batch_kwargs"],
"expectation_suite_names": [asset["expectation_suite_name"]],
}
batches_to_validate.append(batch)
result = LegacyCheckpoint(
name="_temp_checkpoint",
data_context=self.data_context,
validation_operator_name=self.validation_operator_name,
batches=batches_to_validate,
).run(run_name=self.run_name)
self.handle_result(result)
return result
def handle_result(self, result: CheckpointResult) -> None:
"""Handle the given validation result.
If the validation failed, this method will:
- call :attr:`~validation_failure_callback`, if set
- raise an :exc:`airflow.exceptions.AirflowException`, if
:attr:`~fail_task_on_validation_failure` is `True`, otherwise, log a warning
message
If the validation succeeded, this method will simply log an info message.
:param result: The validation result
:type result: CheckpointResult
"""
if not result["success"]:
if self.validation_failure_callback:
self.validation_failure_callback(result)
if self.fail_task_on_validation_failure:
raise AirflowException("Validation with Great Expectations failed.")
else:
self.log.warning(
"Validation with Great Expectations failed. "
"Continuing DAG execution because "
"fail_task_on_validation_failure is set to False."
)
else:
self.log.info("Validation with Great Expectations successful.")
class GreatExpectationsBigQueryOperator(GreatExpectationsOperator):
"""
An operator that allows you to use Great Expectations to validate data Expectations
against a BigQuery table or the result of a SQL query.
The Expectations need to be stored in a JSON file sitting in an accessible GCS
bucket. The validation results are output to GCS in both JSON and HTML formats.
:param gcp_project: The GCP project of the bucket holding the Great Expectations
artifacts.
:type gcp_project: str
:param gcs_bucket: GCS bucket holding the Great Expectations artifacts.
:type gcs_bucket: str
:param gcs_expectations_prefix: GCS prefix where the Expectations file can be
found. For example, "ge/expectations".
:type gcs_expectations_prefix: str
:param gcs_validations_prefix: GCS prefix where the validation output files should
be saved. For example, "ge/expectations".
:type gcs_validations_prefix: str
:param gcs_datadocs_prefix: GCS prefix where the validation datadocs files should
be saved. For example, "ge/expectations".
:type gcs_datadocs_prefix: str
:param query: The SQL query that defines the set of data to be validated. If the
query parameter is filled in then the `table` parameter cannot be.
:type query: Optional[str]
:param table: The name of the BigQuery table with the data to be validated. If the
table parameter is filled in then the `query` parameter cannot be.
:type table: Optional[str]
:param bq_dataset_name: The name of the BigQuery data set where any temp tables
will be created that are needed as part of the GE validation process.
:type bq_dataset_name: str
:param bigquery_conn_id: ID of the connection with the credentials info needed to
connect to BigQuery.
:type bigquery_conn_id: str
"""
ui_color = "#AFEEEE"
ui_fgcolor = "#000000"
template_fields = GreatExpectationsOperator.template_fields + (
"bq_dataset_name",
"gcp_project",
"gcs_bucket",
)
@apply_defaults
def __init__(
self,
*,
gcp_project: str,
gcs_bucket: str,
gcs_expectations_prefix: str,
gcs_validations_prefix: str,
gcs_datadocs_prefix: str,
query: Optional[str] = None,
table: Optional[str] = None,
bq_dataset_name: str,
bigquery_conn_id: str = "bigquery_default",
**kwargs
):
self.query: Optional[str] = query
self.table: Optional[str] = table
self.bigquery_conn_id = bigquery_conn_id
self.bq_dataset_name = bq_dataset_name
self.gcp_project = gcp_project
self.gcs_bucket = gcs_bucket
self.gcs_expectations_prefix = gcs_expectations_prefix
self.gcs_validations_prefix = gcs_validations_prefix
self.gcs_datadocs_prefix = gcs_datadocs_prefix
super().__init__(batch_kwargs=self.get_batch_kwargs(), **kwargs)
def create_data_context(self) -> BaseDataContext:
"""Create and return the `DataContext` with a BigQuery `DataSource`."""
# Get the credentials information for the BigQuery data source from the BigQuery
# Airflow connection
conn = BaseHook.get_connection(self.bigquery_conn_id)
connection_json = conn.extra_dejson
credentials_path = connection_json.get("extra__google_cloud_platform__key_path")
data_context_config = DataContextConfig(
config_version=2,
datasources={
"bq_datasource": {
"credentials": {
"url": "bigquery://"
+ self.gcp_project
+ "/"
+ self.bq_dataset_name
+ "?credentials_path="
+ credentials_path
},
"class_name": "SqlAlchemyDatasource",
"module_name": "great_expectations.datasource",
"data_asset_type": {
"module_name": "great_expectations.dataset",
"class_name": "SqlAlchemyDataset",
},
}
},
store_backend_defaults=GCSStoreBackendDefaults(
default_bucket_name=self.gcs_bucket,
default_project_name=self.gcp_project,
validations_store_prefix=self.gcs_validations_prefix,
expectations_store_prefix=self.gcs_expectations_prefix,
data_docs_prefix=self.gcs_datadocs_prefix,
),
)
return BaseDataContext(project_config=data_context_config)
def get_batch_kwargs(self) -> Dict:
# Tell GE where to fetch the batch of data to be validated.
batch_kwargs = {
"datasource": "bq_datasource",
}
# Check that only one of the arguments is passed to set a data context (or none)
if self.query and self.table:
raise ValueError("Only one of query or table can be specified.")
if self.query:
batch_kwargs["query"] = self.query
batch_kwargs["data_asset_name"] = self.bq_dataset_name
batch_kwargs["bigquery_temp_table"] = self.get_temp_table_name(
"ge_" + datetime.datetime.now().strftime("%Y%m%d") + "_", 10
)
elif self.table:
batch_kwargs["table"] = self.table
batch_kwargs["data_asset_name"] = self.bq_dataset_name
self.log.info("batch_kwargs: " + str(batch_kwargs))
return batch_kwargs
def get_temp_table_name(
self, desired_prefix: str, desired_length_of_random_portion: int
) -> str:
random_string = str(uuid.uuid4().hex)
random_portion_of_name = random_string[:desired_length_of_random_portion]
full_name = desired_prefix + random_portion_of_name
self.log.info("Generated name for temporary table: %s", full_name)
return full_name
|
from hubcheck.pageobjects.po_generic_page import GenericPage
class TagsPage(GenericPage):
"""tags page"""
def __init__(self,browser,catalog):
super(TagsPage,self).__init__(browser,catalog)
self.path = "/tags"
# load hub's classes
TagsPage_Locators = self.load_class('TagsPage_Locators')
Tags = self.load_class('Tags')
# update this object's locator
self.locators.update(TagsPage_Locators.locators)
# setup page object's components
self.tags = Tags(self,{'base':'tags'})
def goto_faq(self):
self.tags.goto_faq()
def search_for_content(self,termlist):
return self.tags.search_for_content(termlist)
def search_for_tags(self,terms):
return self.tags.search_for_tags(terms)
def get_recently_used_tags(self):
return self.tags.get_recently_used_tags()
def get_top_100_tags(self):
return self.tags.get_top_100_tags()
def goto_recently_used_tag(self,tagname):
return self.tags.goto_recently_used_tag(tagname)
def goto_top_100_tag(self,tagname):
return self.tags.goto_top_100_tag(tagname)
def goto_all_tags(self):
return self.tags.goto_all_tags()
class TagsPage_Locators_Base(object):
"""locators for TagsPage object"""
locators = {
'tags' : "css=#content",
}
|
import numpy as np
def lerp(a, b, x):
return a * (1-x) + b * x
def clamp(x, max_x, min_x):
if x > max_x:
return max_x
elif x < min_x:
return min_x
return x
def softmax(x, k) :
c = np.max(x)
exp_a = np.exp(k * (x-c))
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
def softmax_pow(x, k):
c = x / np.max(x)
c = c ** k
return c / np.sum(c)
|
'''
https://practice.geeksforgeeks.org/problems/subarray-with-given-sum/0/?ref=self
Given an unsorted array of non-negative integers, find a
continuous sub-array which adds to a given number.
Input:
The first line of input contains an integer T denoting the number of test cases.
Then T test cases follow. Each test case consists of two lines.
The first line of each test case is N and S, where N is the size of array and S is the sum.
The second line of each test case contains N space separated integers denoting the array
elements
'''
def find_contiguous_adds_to(A, s):
if len(A) == 0:
return -1
elif len(A) == 1:
return (0,0) if A[0] == s else -1
curSum = A[0]
start = 0
end = 1
while start <= end and curSum <= s and end < len(A):
if curSum + A[end] < s:
curSum += A[end]
end += 1
elif curSum + A[end] == s:
curSum += A[end]
return start + 1, end + 1
else:
curSum -= A[start]
start += 1
return -1
def find_contiguous_posneg_adds_to(A, s):
if len(A) == 0:
return -1
elif len(A) == 1:
return (0,0) if A[0] == s else -1
sumMap = dict()
curSum = 0
for i, num in enumerate(A):
curSum += num
if curSum == s:
return (0, i)
elif sumMap.get(curSum - s, None) is not None:
return sumMap.get(curSum - s) + 1, i
#if value of difference between current sum and s is in map, exclude that value (subtract it)
#and return index of solution as 1+ index of subarray from 0..A[sumMap[curSum - s]]
sumMap[curSum] = i
if __name__ == '__main__':
A = [1,2,-3, 3, 3, 7,5]
s = 12
#print(find_contiguous_adds_to(A,s))
print(find_contiguous_posneg_adds_to(A,s))
|
"""Module for the auth login command"""
from ostorlab.cli.auth.login import login
|
import re
import time
from Jumpscale import j
ZEROTIER_FIREWALL_ZONE_REGEX = re.compile(r"^firewall\.@zone\[(\d+)\]\.name='zerotier'$")
FORWARDING_FIREWALL_REGEX = re.compile(r"^firewall\.@forwarding\[(\d+)\].*?('\w+')?$")
class BuilderZeroBoot(j.baseclasses.builder):
def install(self, network_id, token, zos_version="v.1.4.1", zos_args="", reset=False):
if not reset and self._done_check("install"):
return
# update zerotier config
j.builders.network.zerotier.build(install=True, reset=reset)
# Remove sample_config
rc, _, _ = j.sal.process.execute("uci show zerotier.sample_config", die=False)
if rc == 0:
j.sal.process.execute("uci delete zerotier.sample_config")
j.sal.process.execute("uci commit")
# Add our config
if reset:
zerotier_reinit = True
else:
rc, out, _ = j.sal.process.execute("uci show zerotier.config", die=False)
zerotier_reinit = rc # rc == 1 if configuration is not present
if not zerotier_reinit:
# Check if the configuration matches our expectations
if not "zerotier.config.join='{}'".format(network_id) in out:
zerotier_reinit = True
if zerotier_reinit:
# Start zerotier at least one time to generate config files
j.sal.process.execute("uci set zerotier.config=zerotier")
j.sal.process.execute("uci set zerotier.config.enabled='1'")
j.sal.process.execute("uci set zerotier.config.interface='wan'") # restart ZT when wan status changed
j.sal.process.execute("uci add_list zerotier.config.join='{}'".format(network_id)) # Join zerotier network
j.sal.process.execute("uci set zerotier.config.secret='generate'") # Generate secret on the first start
j.sal.process.execute("uci commit")
j.sal.process.execute("/etc/init.d/zerotier enable")
j.sal.process.execute("/etc/init.d/zerotier start")
# Join Network
zerotier_client = j.clients.zerotier.get(data={"token_": token})
j.builders.network.zerotier.network_join(network_id, zerotier_client=zerotier_client)
# update TFTP and DHCP
j.sal.process.execute("uci set dhcp.@dnsmasq[0].enable_tftp='1'")
j.sal.process.execute("uci set dhcp.@dnsmasq[0].tftp_root='/opt/storage/'")
j.sal.process.execute("uci set dhcp.@dnsmasq[0].dhcp_boot='pxelinux.0'")
j.sal.process.execute("uci commit")
j.core.tools.dir_ensure("/opt/storage")
j.sal.process.execute("opkg install curl ca-bundle")
j.sal.process.execute("curl https://download.gig.tech/pxe.tar.gz -o /opt/storage/pxe.tar.gz")
j.sal.process.execute("tar -xzf /opt/storage/pxe.tar.gz -C /opt/storage")
j.sal.process.execute("cp -r /opt/storage/pxe/* /opt/storage")
j.sal.process.execute("rm -rf /opt/storage/pxe")
j.sal.process.execute(
'sed -i "s|a84ac5c10a670ca3|%s/%s|g" /opt/storage/pxelinux.cfg/default' % (network_id, zos_args)
)
j.sal.process.execute('sed -i "s|zero-os-master|%s|g" /opt/storage/pxelinux.cfg/default' % zos_version)
# this is needed to make sure that network name is ready
for _ in range(12):
try:
network_device_name = j.builders.network.zerotier.get_network_interface_name(network_id)
break
except KeyError:
time.sleep(5)
else:
raise j.exceptions.Base("Unable to join network within 60 seconds!")
j.sal.process.execute("uci set network.{0}=interface".format(network_device_name))
j.sal.process.execute("uci set network.{0}.proto='none'".format(network_device_name))
j.sal.process.execute("uci set network.{0}.ifname='{0}'".format(network_device_name))
try:
zone_id = self.get_zerotier_firewall_zone()
except KeyError:
j.sal.process.execute("uci add firewall zone")
zone_id = -1
j.sal.process.execute("uci set firewall.@zone[{0}]=zone".format(zone_id))
j.sal.process.execute("uci set firewall.@zone[{0}].input='ACCEPT'".format(zone_id))
j.sal.process.execute("uci set firewall.@zone[{0}].output='ACCEPT'".format(zone_id))
j.sal.process.execute("uci set firewall.@zone[{0}].name='zerotier'".format(zone_id))
j.sal.process.execute("uci set firewall.@zone[{0}].forward='ACCEPT'".format(zone_id))
j.sal.process.execute("uci set firewall.@zone[{0}].masq='1'".format(zone_id))
j.sal.process.execute("uci set firewall.@zone[{0}].network='{1}'".format(zone_id, network_device_name))
self.add_forwarding("lan", "zerotier")
self.add_forwarding("zerotier", "lan")
j.sal.process.execute("uci commit")
self._done_set("install")
def get_zerotier_firewall_zone(self):
_, out, _ = j.sal.process.execute("uci show firewall")
for line in out.splitlines():
m = ZEROTIER_FIREWALL_ZONE_REGEX.match(line)
if m:
return int(m.group(1))
raise j.exceptions.NotFound("Zerotier zone in firewall configuration was not found!")
def add_forwarding(self, dest, src):
_, out, _ = j.sal.process.execute("uci show firewall")
forwards = dict()
for line in out.splitlines():
m = FORWARDING_FIREWALL_REGEX.match(line)
if m:
if line.endswith("=forwarding"):
forwards[m.group(1)] = dict()
elif ".dest=" in line:
forwards[m.group(1)]["dest"] = m.group(2)
elif ".src=" in line:
forwards[m.group(1)]["src"] = m.group(2)
if {"dest": "'%s'" % dest, "src": "'%s'" % src} in forwards.values():
return
j.sal.process.execute("uci add firewall forwarding")
j.sal.process.execute("uci set firewall.@forwarding[-1]=forwarding")
j.sal.process.execute("uci set firewall.@forwarding[-1].dest='%s'" % dest)
j.sal.process.execute("uci set firewall.@forwarding[-1].src='%s'" % src)
|
import numpy as np
import random
import cv2
from augraphy.base.augmentation import Augmentation
from augraphy.base.augmentationresult import AugmentationResult
class FoldingAugmentation(Augmentation):
"""Emulates folding effect from perspective transformation
:param fold count: Number of applied foldings
:type fold_count: int, optional
:param fold_noise: Level of noise added to folding area. Range from
value of 0 to 1.
:type fold_noise: float, optional
:param gradient_width: Tuple (min, max) Measure of the space affected
by fold prior to being warped (in units of
percentage of width of page)
:type gradient_width: tuple, optional
:param gradient_height: Tuple (min, max) Measure of depth of fold (unit
measured as percentage page height)
:type gradient_height: tuple, optional
:param p: The probability this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self,
fold_count=2,
fold_noise=0.1,
gradient_width=(0.1,0.2),
gradient_height=(0.01,0.02),
p=0.5
):
super().__init__(p=p)
self.fold_count = fold_count
self.fold_noise = fold_noise
self.gradient_width = gradient_width
self.gradient_height = gradient_height
# Constructs a string representation of this Augmentation.
def __repr__(self):
return f"FoldingAugmentation(fold_count={self.fold_count}, fold_noise={self.fold_noise}, gradient_width={self.gradient_width}, gradient_height={self.gradient_height},p={self.p})"
# Perspective transform based on 4 points
def four_point_transform(self, image,pts,dst, xs,ys):
M = cv2.getPerspectiveTransform(pts, dst)
img_warped = cv2.warpPerspective(image, M, (xs, ys))
# return the warped image
return img_warped
# Transform left side of folding area
def warp_fold_left_side(self, img, ysize, fold_noise, fold_x, fold_width_one_side, fold_y_shift):
img_fuse = img.copy()
# 4 vectices of folding area
xs = 0 # xleft
xe = fold_width_one_side # xright
ys = 0 # ytop
ye = ysize # ybottom
# before distortion
top_left = [xs, ys]
top_right = [xe, ys]
bottom_left = [xs,ye]
bottom_right = [xe, ye]
# after distortion
dtop_left = [xs, ys]
dtop_right = [xe, ys+fold_y_shift]
dbottom_left = [xs,ye]
dbottom_right = [xe, ye+fold_y_shift]
# image cropping points
cxs = fold_x
cxe = fold_x + fold_width_one_side
cys = 0
cye = ysize
# points of folding area
source_pts = np.array([top_left,bottom_left,bottom_right, top_right], dtype=np.float32)
destination_pts = np.array([dtop_left,dbottom_left, dbottom_right, dtop_right], dtype=np.float32)
# crop section of folding area
img_crop= img[cys:cye, cxs:cxe]
# get image dimension of cropped image
if len(img_crop.shape)>2:
cysize,cxsize,cdim = img_crop.shape
else:
cysize,cxsize = img_crop.shape
cdim=2
# warp folding area
img_warped = self.four_point_transform(img_crop,source_pts,destination_pts, cxsize, cysize+fold_y_shift)
img_warped = self.add_noise(img_warped, 1, fold_noise/2)
if cdim>2:
img_fuse[cys:cye,cxs:cxe,:] = img_warped[:-fold_y_shift ,:,:]
else:
img_fuse[cys:cye,cxs:cxe] = img_warped[:-fold_y_shift ,:]
return img_fuse
# Transform right side of folding area
def warp_fold_right_side(self, img, ysize, fold_noise, fold_x, fold_width_one_side, fold_y_shift):
img_fuse = img.copy()
# 4 vectices of folding area
xs = 0 # xleft
xe = fold_width_one_side # xright
ys = 0 # ytop
ye = ysize # ybottom
# before distortion
top_left = [xs, ys]
top_right = [xe, ys]
bottom_left = [xs,ye]
bottom_right = [xe, ye]
# after distortion
dtop_left = [xs, ys+(fold_y_shift)]
dtop_right = [xe, ys]
dbottom_left = [xs,ye+(fold_y_shift)]
dbottom_right = [xe, ye]
# image cropping points
cxs = fold_x + fold_width_one_side
cxe = fold_x + (fold_width_one_side*2)
cys = 0
cye = ysize
# points of folding area
source_pts = np.array([top_left,bottom_left,bottom_right, top_right], dtype=np.float32)
destination_pts = np.array([dtop_left,dbottom_left, dbottom_right, dtop_right], dtype=np.float32)
# crop section of folding area
img_crop= img[cys:cye, cxs:cxe]
# get image dimension of cropped image
if len(img_crop.shape)>2:
cysize,cxsize,cdim = img_crop.shape
else:
cysize,cxsize = img_crop.shape
cdim = 2
# warp folding area
img_warped = self.four_point_transform(img_crop,source_pts,destination_pts, cxsize, cysize+fold_y_shift)
img_warped = self.add_noise(img_warped, 0, fold_noise/2)
if cdim>2:
img_fuse[cys:cye,cxs:cxe,:] = img_warped[:-fold_y_shift ,:,:]
else:
img_fuse[cys:cye,cxs:cxe] = img_warped[:-fold_y_shift ,:]
return img_fuse
# Generate noise to edges of folding
def add_noise(self, img, side, p = 0.1):
# side = flag to put more noise at certain side
# 0 = left side
# 1 = right side
# get image dimension
if len(img.shape)>2:
ysize,xsize,dim = img.shape
else:
ysize,xsize = img.shape
dim=2
for y in range(ysize):
for x in range(xsize):
if (side): # more noise on right side
p_score = (((x) / xsize)**3) * p # non linear score with power
else: # more noise on left side
p_score = (((xsize-x) / xsize)**3) * p # non linear score with power
if (p_score>random.random()):
img[y,x] = 0
return img
# Apply perspective transform 2 times and get single folding effect
def apply_folding(self, img, ysize, xsize, gradient_width, gradient_height, fold_noise):
min_fold_x = int(gradient_width[0] * xsize)
max_fold_x = int(gradient_width[1] * xsize)
fold_width_one_side = int(random.randint(min_fold_x, max_fold_x)/2) # folding width from left to center of folding, or from right to center of folding
fold_x = random.randint(fold_width_one_side+1, xsize-fold_width_one_side-1) # center of folding
fold_y_shift_min = int(gradient_height[0] * ysize)
fold_y_shift_max = int(gradient_height[1] * ysize)
fold_y_shift = random.randint(fold_y_shift_min, fold_y_shift_max) # y distortion in folding (support positive y value for now)
img_fold_l = self.warp_fold_left_side(img, ysize, fold_noise, fold_x, fold_width_one_side, fold_y_shift)
img_fold_r = self.warp_fold_right_side(img_fold_l, ysize, fold_noise, fold_x, fold_width_one_side, fold_y_shift)
return img_fold_r
# Applies the Augmentation to input data.
def __call__(self, data, force=False):
if force or self.should_run():
img = data["ink"][-1].result.copy()
# get image dimension
if len(img.shape)>2:
ysize,xsize,_ = img.shape
else:
ysize,xsize = img.shape
# apply folding multiple times
img_fold = img.copy()
for _ in range(self.fold_count):
img_fold = self.apply_folding(img_fold, ysize, xsize, self.gradient_width, self.gradient_height, self.fold_noise)
data["ink"].append(AugmentationResult(self, img_fold))
|
"""Common NCDR Alerts Data class used by both sensor and entity."""
import logging
import json
from aiohttp.hdrs import USER_AGENT
import requests
import http
from .const import (
ALERTS_TYPE,
ALERTS_AREA,
BASE_URL,
HA_USER_AGENT,
REQUEST_TIMEOUT
)
_LOGGER = logging.getLogger(__name__)
class NcdrAlertData:
"""Get alerts data from NCDR. """
def __init__(self, hass, alerts_type):
"""Initialize the data object."""
self._hass = hass
# Holds the current data from the NCDR
self.data = []
self.alerts = None
self.alert_name = None
self.alerts_type = alerts_type
self.alert_type = None
self.uri = None
async def async_update_alerts(self):
"""Async wrapper for getting alert data."""
return await self._hass.async_add_executor_job(self._update_alerts)
def get_data_for_alert(self, alert_type, data):
""" return data """
self._update_alerts()
return self.data
def _parser_json(self, alert_type, text):
""" parser json """
the_dict = json.loads(text)
data = {}
value = {}
if "entry" in the_dict:
if isinstance(the_dict["entry"], dict):
value["updated"] = the_dict["updated"]
value["title"] = the_dict["entry"]["title"]
value["author"] = the_dict["entry"]["author"]["name"]
value["text"] = the_dict["entry"]["summary"].get("#text", None)
else:
value["updated"] = the_dict["updated"]
value["title"] = the_dict["entry"][-1]["title"]
value["author"] = the_dict["entry"][-1]["author"]["name"]
value["text"] = the_dict["entry"][-1]["summary"].get("#text", None)
data[alert_type] = value
return data
def _update_alerts(self):
"""Return the alert json."""
headers = {USER_AGENT: HA_USER_AGENT}
for i in self.alerts_type:
if i in ALERTS_AREA:
self.uri = "{}County={}".format(BASE_URL, i)
else:
self.uri = "{}AlertType={}".format(BASE_URL, i)
req = None
try:
req = requests.post(
self.uri,
headers=headers,
timeout=REQUEST_TIMEOUT)
except requests.exceptions.RequestException as err:
_LOGGER.error("Failed fetching data for %s", ALERTS_TYPE[i])
if req and req.status_code == http.HTTPStatus.OK:
self.data.append(self._parser_json(i, req.text))
if self.alert_name is None:
self.alert_name = "ncdr"
self.alert_name = self.alert_name + "-" + i
else:
_LOGGER.error("Received error from NCDR")
return self.alert_name
async def async_update(self):
"""Async wrapper for update method."""
return await self._hass.async_add_executor_job(self._update)
def _update(self):
"""Get the latest data from NCDR."""
if self.alerts_type is None:
_LOGGER.error("No NCDR held, check logs for problems")
return
try:
alerts = self.get_data_for_alert(
self.alert_type, self.data
)
self.alerts = alerts
except (ValueError) as err:
_LOGGER.error("Check NCDR connection: %s", err.args)
self.alert_name = None
|
from io import StringIO
import pandas as pd
import pytest
from calliope import locations, utils
class TestLocations:
@pytest.fixture
def sample_locations(self):
setup = StringIO("""
test:
techs: ['demand', 'unmet_demand', 'ccgt']
override:
demand:
x_map: 'a: test'
constraints:
r: file=demand.csv
r_scale_to_peak: -10
ccgt:
constraints:
e_cap.max: 10
test1:
within: test
techs: ['csp']
""")
return utils.AttrDict.from_yaml(setup)
@pytest.fixture
def sample_unexploded_locations(self):
setup = StringIO("""
1,2,3:
foo:
a,b,c:
foo:
10--15:
foo:
10-20:
foo:
21--23,25,z:
foo:
x:
foo:
y:
foo:
""")
return utils.AttrDict.from_yaml(setup)
@pytest.fixture
def sample_nested_locations(self):
setup = StringIO("""
1,2,3:
techs: ['foo']
foo:
techs: ['foo']
10,11,12:
within: 1
techs: ['foo']
20,21,22:
within: 2
techs: ['foo']
bar,baz:
within: foo
techs: ['foo']
qux:
within: bar
techs: ['foo']
""")
return utils.AttrDict.from_yaml(setup)
@pytest.fixture
def sample_overlapping_locations(self):
setup = StringIO("""
1,2,3:
techs: ['foo']
1:
override:
bar: baz
""")
return utils.AttrDict.from_yaml(setup)
def test_generate_location(self, sample_locations):
location = 'test'
items = sample_locations[location]
techs = ['demand', 'unmet_demand', 'ccgt']
result = locations._generate_location(location, items, techs)
wanted_cols = ['_level', '_location',
'_override.ccgt.constraints.e_cap.max',
'_override.demand.constraints.r',
'_override.demand.constraints.r_scale_to_peak',
'_override.demand.x_map',
'_within', 'ccgt', 'demand', 'unmet_demand']
assert sorted(result.keys()) == wanted_cols
def test_generate_location_lacking_techs(self, sample_locations):
location = 'test'
items = sample_locations[location]
techs = ['unmet_demand', 'ccgt']
result = locations._generate_location(location, items, techs)
wanted_cols = ['_level', '_location',
'_override.ccgt.constraints.e_cap.max',
'_override.demand.constraints.r',
'_override.demand.constraints.r_scale_to_peak',
'_override.demand.x_map',
'_within', 'ccgt', 'unmet_demand']
assert sorted(result.keys()) == wanted_cols
def test_explode_location_single(self):
assert locations.explode_locations('a') == ['a']
def test_explode_location_range(self):
assert locations.explode_locations('1--3') == ['1', '2', '3']
def test_explode_location_range_backwards(self):
with pytest.raises(KeyError):
locations.explode_locations('3--1')
def test_explode_location_range_nonnumeric(self):
with pytest.raises(ValueError):
locations.explode_locations('a--c')
def test_explore_location_list(self):
assert locations.explode_locations('1,2,3') == ['1', '2', '3']
def test_explode_location_mixed(self):
assert (locations.explode_locations('a,b,1--3,c')
== ['a', 'b', '1', '2', '3', 'c'])
def test_explode_location_empty(self):
with pytest.raises(KeyError):
assert locations.explode_locations('')
def test_explode_location_invalid(self):
with pytest.raises(AssertionError):
assert locations.explode_locations(['a', 'b'])
def test_process_locations(self, sample_unexploded_locations):
fixture = sample_unexploded_locations
o = locations.process_locations(fixture)
assert '10' in o
assert 'x' in o
assert len(list(o.keys())) == 20
def test_process_locations_overlap(self, sample_overlapping_locations):
fixture = sample_overlapping_locations
o = locations.process_locations(fixture)
assert o['1'].level == 0
assert o['1'].override.bar == 'baz'
def test_process_locations_levels(self, sample_nested_locations):
fixture = sample_nested_locations
o = locations.process_locations(fixture)
assert o['1'].level == 0
assert o['bar'].level == 1
assert o['qux'].level == 2
def test_generate_location_matrix_cols(self, sample_locations):
techs = ['demand', 'unmet_demand', 'ccgt', 'csp']
df = locations.generate_location_matrix(sample_locations, techs)
wanted_cols = ['_level',
'_override.ccgt.constraints.e_cap.max',
'_override.demand.constraints.r',
'_override.demand.constraints.r_scale_to_peak',
'_override.demand.x_map',
'_within', 'ccgt', 'csp', 'demand', 'unmet_demand']
assert sorted(df.columns) == wanted_cols
def test_generate_location_matrix_index(self, sample_locations):
techs = ['demand', 'unmet_demand', 'ccgt', 'csp']
df = locations.generate_location_matrix(sample_locations, techs)
assert df.index.tolist() == ['test', 'test1']
def test_generate_location_matrix_values(self, sample_locations):
techs = ['demand', 'unmet_demand', 'ccgt', 'csp']
df = locations.generate_location_matrix(sample_locations, techs)
assert df.at['test', 'demand'] == 1
assert df.at['test1', 'demand'] == 0
assert (df.at['test', '_override.demand.constraints.r']
== 'file=demand.csv')
assert pd.isnull(df.at['test1', '_override.demand.constraints.r'])
def test_generate_location_matrix_additional_techs(self, sample_locations):
techs = ['demand', 'unmet_demand', 'ccgt', 'csp', 'foo']
df = locations.generate_location_matrix(sample_locations, techs)
assert sum(df['foo']) == 0
def test_generate_location_matrix_missing_techs_cols(self, sample_locations):
techs = ['ccgt']
df = locations.generate_location_matrix(sample_locations, techs)
wanted_cols = ['_level',
'_override.ccgt.constraints.e_cap.max',
'_override.demand.constraints.r',
'_override.demand.constraints.r_scale_to_peak',
'_override.demand.x_map',
'_within', 'ccgt']
assert sorted(df.columns) == wanted_cols
def test_generate_location_matrix_within_only_strings(self,
sample_nested_locations):
techs = ['foo']
df = locations.generate_location_matrix(sample_nested_locations, techs)
for i in df['_within'].tolist():
assert (i is None or isinstance(i, str))
|
from flask import Flask, render_template, request, Request
from werkzeug.formparser import parse_form_data
class InputProcessed():
"""A file like object that just raises an error when it is read."""
def read(self, *args):
raise EOFError(
'The wsgi.input stream has already been consumed, check environ["wsgi._post_form"] \
and environ["wsgi._post_files"] for previously processed form data.'
)
readline = readlines = __iter__ = read
class MethodSpooferMiddleware():
"""
A WSGI middleware that checks for a method spoofing form field
and overrides the request method accordingly.
"""
def __init__(self, app, input_name='_method'):
self.app = app
self.input_name = input_name
def __call__(self, environ, start_response):
# We only want to spoof if the request method is POST
if environ['REQUEST_METHOD'].upper() == 'POST':
stream, form, files = parse_form_data(environ)
# Replace the wsgi.input stream with an object that will raise an error if
# it is read again, and explaining how to get previously processed form data.
environ['wsgi.input'] = InputProcessed()
# Set the processed form data on environ so it can be retrieved again inside
# the app without having to process the form data again.
environ['wsgi._post_form'] = form
environ['wsgi._post_files'] = files
method = form.get(self.input_name)
if method:
# Override the request method _if_ there was a method spoofing field.
environ['REQUEST_METHOD'] = method
return self.app(environ, start_response)
class CustomRequest(Request):
"""
A custom request object that checks for previously processed form data
instead of possibly processing form data twice.
"""
@property
def form(self):
if 'wsgi._post_form' in self.environ:
# If cached form data exists.
return self.environ['wsgi._post_form']
# Otherwise return the normal dict like object you would usually use.
return super().form
@property
def files(self):
if 'wsgi._post_files' in self.environ:
# If cached files data exists.
return self.environ['wsgi._post_files']
# Otherwise return the normal dict like object you would usually use.
return super().files
app = Flask(__name__)
app.request_class = CustomRequest
app.wsgi_app = MethodSpooferMiddleware(app.wsgi_app)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/delete', methods=['DELETE'])
def delete_the_thing():
foo = request.form.get('foo')
return f'We made it to the delete route and we can still get form data, foo: {foo}'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 8 18:02:38 2022
@author: marco
"""
import subprocess
def name_generator(old, shock_nr, test='no'):
"""
Generates a unique name for .gro, .top and .tpr files according to the cycle
number (shock_nr).
Parameters:
-----------
old: string
old file name
shock_nr: integer
shock number
Returns:
--------
file is saved under the new name in folder 'shockfiles'
"""
no_ext = old.split('.')[0]
ext = old.split('.')[1]
new_name = no_ext + '_' + 's' + str(shock_nr) + '.' + ext
if test == 'no':
name_change_command = 'mv ' + old + ' ' + new_name
subprocess.call(name_change_command, shell=True)
dir_change = 'mv ' + new_name + ' shockfiles'
subprocess.call(dir_change, shell=True)
else:
return new_name
def mdp_value_changer(old, new, parameter, value, test='no'):
"""
Modifies a value in the mdp file by writing a new file and deleting the old
one.
Parameters:
-----------
old: string
path to the old mdp file
new: string
path to the new mdp file
parameter: string
parameter name to be modified
value: integer
new parameter value
test: string
name change and file replacement not exectuted if a test is run
Returns:
--------
writes new mdp file with modified parameters
"""
with open(old) as cur_mdp:
lines_mdp = cur_mdp.readlines()
nr_mdp_lines = len(lines_mdp)
for i in range(nr_mdp_lines):
spl = lines_mdp[i].split(' ')
if spl[0] != parameter:
with open(new, 'a') as new_mdp:
new_mdp.write(lines_mdp[i])
else:
spl[-1] = str(value) + '\n'
new_spl = ' '.join(spl)
with open(new, 'a') as new_mdp:
new_mdp.write(new_spl)
if test == 'no':
mdp_remove_command = 'rm ' + old
mdp_name_change_command = 'mv ' + new + ' ' + old
subprocess.call(mdp_remove_command, shell=True)
subprocess.call(mdp_name_change_command, shell=True)
def xtc_maker(all_atoms, lipids, shock_nr, temp_gro_name, rp, test='no'):
"""
Generates xtc files from gro files each pumping cycle in order to be able
to concatenate all files at the end of the process.
Parameters:
-----------
lipids: MDAnalysis atomgroup of the lipids in the system
shock_nr: integer
shock number
Returns:
--------
generates an xtc file
"""
if rp == 'no':
lipids.write(temp_gro_name)
else:
all_atoms.write(temp_gro_name)
if len(str(shock_nr)) == 1:
new_name = 'vesicle_sA' + str(shock_nr) + '_t.xtc'
elif len(str(shock_nr)) == 2:
new_name = 'vesicle_sB' + str(shock_nr) + '_t.xtc'
else:
new_name = 'vesicle_sC' + str(shock_nr) + '_t.xtc'
if test=='no':
convcommand = 'gmx trjconv -f ' + temp_gro_name + ' -o ' + new_name
subprocess.call(convcommand, shell=True)
rmcommand = 'rm ' + temp_gro_name
subprocess.call(rmcommand, shell=True)
else:
return new_name
|
from __future__ import print_function
import argparse
import time
import os
import sys
import datetime
import math
from distutils.version import LooseVersion
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import LambdaLR
from torchvision import datasets, transforms, models
import torch.utils.data.distributed
from torchsummary import summary
import cifar_resnet as resnet
import horovod.torch as hvd
from tqdm import tqdm
from utils import *
import kfac
STEP_FIRST = LooseVersion(torch.__version__) < LooseVersion('1.1.0')
# Training settings
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Example')
parser.add_argument('--model', type=str, default='resnet32',
help='ResNet model to use [20, 32, 56]')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 128)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='WE',
help='number of warmup epochs (default: 5)')
parser.add_argument('--batches-per-allreduce', type=int, default=1,
help='number of batches processed locally before '
'executing allreduce across workers; it multiplies '
'total batch size.')
# Optimizer Parameters
parser.add_argument('--base-lr', type=float, default=0.1, metavar='LR',
help='base learning rate (default: 0.1)')
parser.add_argument('--lr-decay', nargs='+', type=int, default=[100, 150],
help='epoch intervals to decay lr')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=5e-4, metavar='W',
help='SGD weight decay (default: 5e-4)')
# KFAC Parameters
parser.add_argument('--kfac-update-freq', type=int, default=10,
help='iters between kfac inv ops (0 for no kfac updates) (default: 10)')
parser.add_argument('--kfac-cov-update-freq', type=int, default=1,
help='iters between kfac cov ops (default: 1)')
parser.add_argument('--kfac-update-freq-alpha', type=float, default=10,
help='KFAC update freq multiplier (default: 10)')
parser.add_argument('--kfac-update-freq-schedule', nargs='+', type=int, default=None,
help='KFAC update freq schedule (default None)')
parser.add_argument('--stat-decay', type=float, default=0.95,
help='Alpha value for covariance accumulation (default: 0.95)')
parser.add_argument('--damping', type=float, default=0.003,
help='KFAC damping factor (defaultL 0.003)')
parser.add_argument('--damping-alpha', type=float, default=0.5,
help='KFAC damping decay factor (default: 0.5)')
parser.add_argument('--damping-schedule', nargs='+', type=int, default=None,
help='KFAC damping decay schedule (default None)')
parser.add_argument('--kl-clip', type=float, default=0.001,
help='KL clip (default: 0.001)')
parser.add_argument('--diag-blocks', type=int, default=1,
help='Number of blocks to approx layer factor with (default: 1)')
parser.add_argument('--diag-warmup', type=int, default=5,
help='Epoch to start diag block approximation at (default: 5)')
parser.add_argument('--distribute-layer-factors', action='store_true', default=False,
help='Compute A and G for a single layer on different workers')
# Other Parameters
parser.add_argument('--log-dir', default='./logs',
help='TensorBoard log directory')
parser.add_argument('--dir', type=str, default='/tmp/cifar10', metavar='D',
help='directory to download cifar10 dataset to')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--fp16-allreduce', action='store_true', default=False,
help='use fp16 compression during allreduce')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)
verbose = True if hvd.rank() == 0 else False
if args.cuda:
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
args.log_dir = os.path.join(args.log_dir,
"cifar10_{}_kfac{}_gpu_{}_{}".format(
args.model, args.kfac_update_freq, hvd.size(),
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')))
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(args.log_dir) if verbose else None
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(4)
kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
download = True if hvd.local_rank() == 0 else False
if not download: hvd.allreduce(torch.tensor(1), name="barrier")
train_dataset = datasets.CIFAR10(root=args.dir, train=True,
download=download, transform=transform_train)
test_dataset = datasets.CIFAR10(root=args.dir, train=False,
download=download, transform=transform_test)
if download: hvd.allreduce(torch.tensor(1), name="barrier")
# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size * args.batches_per_allreduce,
sampler=train_sampler, **kwargs)
# Horovod: use DistributedSampler to partition the test data.
test_sampler = torch.utils.data.distributed.DistributedSampler(
test_dataset, num_replicas=hvd.size(), rank=hvd.rank())
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=args.test_batch_size, sampler=test_sampler, **kwargs)
if args.model.lower() == "resnet20":
model = resnet.resnet20()
elif args.model.lower() == "resnet32":
model = resnet.resnet32()
elif args.model.lower() == "resnet44":
model = resnet.resnet44()
elif args.model.lower() == "resnet56":
model = resnet.resnet56()
elif args.model.lower() == "resnet110":
model = resnet.resnet110()
if args.cuda:
model.cuda()
if verbose:
summary(model, (3, 32, 32))
criterion = nn.CrossEntropyLoss()
args.base_lr = args.base_lr * hvd.size()
use_kfac = True if args.kfac_update_freq > 0 else False
optimizer = optim.SGD(model.parameters(), lr=args.base_lr, momentum=args.momentum,
weight_decay=args.weight_decay)
if use_kfac:
preconditioner = kfac.KFAC(model, lr=args.base_lr, factor_decay=args.stat_decay,
damping=args.damping, kl_clip=args.kl_clip,
fac_update_freq=args.kfac_cov_update_freq,
kfac_update_freq=args.kfac_update_freq,
diag_blocks=args.diag_blocks,
diag_warmup=args.diag_warmup,
distribute_layer_factors=args.distribute_layer_factors)
kfac_param_scheduler = kfac.KFACParamScheduler(preconditioner,
damping_alpha=args.damping_alpha,
damping_schedule=args.damping_schedule,
update_freq_alpha=args.kfac_update_freq_alpha,
update_freq_schedule=args.kfac_update_freq_schedule)
# KFAC guarentees grads are equal across ranks before opt.step() is called
# so if we do not use kfac we need to wrap the optimizer with horovod
compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Average,
backward_passes_per_step=args.batches_per_allreduce)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
lrs = create_lr_schedule(hvd.size(), args.warmup_epochs, args.lr_decay)
lr_scheduler = [LambdaLR(optimizer, lrs)]
if use_kfac:
lr_scheduler.append(LambdaLR(preconditioner, lrs))
def train(epoch):
model.train()
train_sampler.set_epoch(epoch)
train_loss = Metric('train_loss')
train_accuracy = Metric('train_accuracy')
if STEP_FIRST:
for scheduler in lr_scheduler:
scheduler.step()
if use_kfac:
kfac_param_scheduler.step(epoch)
with tqdm(total=len(train_loader),
desc='Epoch {:3d}/{:3d}'.format(epoch + 1, args.epochs),
disable=not verbose) as t:
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
for i in range(0, len(data), args.batch_size):
data_batch = data[i:i + args.batch_size]
target_batch = target[i:i + args.batch_size]
output = model(data_batch)
loss = criterion(output, target_batch)
with torch.no_grad():
train_loss.update(loss)
train_accuracy.update(accuracy(output, target_batch))
loss.div_(math.ceil(float(len(data)) / args.batch_size))
loss.backward()
optimizer.synchronize()
if use_kfac:
preconditioner.step(epoch=epoch)
with optimizer.skip_synchronize():
optimizer.step()
t.set_postfix_str("loss: {:.4f}, acc: {:.2f}%".format(
train_loss.avg.item(), 100*train_accuracy.avg.item()))
t.update(1)
if not STEP_FIRST:
for scheduler in lr_scheduler:
scheduler.step()
if use_kfac:
kfac_param_scheduler.step(epoch)
if log_writer:
log_writer.add_scalar('train/loss', train_loss.avg, epoch)
log_writer.add_scalar('train/accuracy', train_accuracy.avg, epoch)
def test(epoch):
model.eval()
test_loss = Metric('val_loss')
test_accuracy = Metric('val_accuracy')
with tqdm(total=len(test_loader),
bar_format='{l_bar}{bar}|{postfix}',
desc=' '.format(epoch + 1, args.epochs),
disable=not verbose) as t:
with torch.no_grad():
for i, (data, target) in enumerate(test_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
test_loss.update(criterion(output, target))
test_accuracy.update(accuracy(output, target))
t.update(1)
if i + 1 == len(test_loader):
t.set_postfix_str("\b\b test_loss: {:.4f}, test_acc: {:.2f}%".format(
test_loss.avg.item(), 100*test_accuracy.avg.item()),
refresh=False)
if log_writer:
log_writer.add_scalar('test/loss', test_loss.avg, epoch)
log_writer.add_scalar('test/accuracy', test_accuracy.avg, epoch)
start = time.time()
for epoch in range(args.epochs):
train(epoch)
test(epoch)
if verbose:
print("\nTraining time:", str(datetime.timedelta(seconds=time.time() - start)))
|
"""Tests for the :mod:`campy.util.randomgenerator` module."""
from campy.util.randomgenerator import RandomGenerator
def test_feed_int():
rgen = RandomGenerator.get_instance()
rgen._feed_int(5)
assert rgen.randint(0, 10) == 5
|
import cv2 as cv
src = cv.imread("D:/Images/lena.jpg")
cv.namedWindow("rgb", cv.WINDOW_NORMAL)
cv.imshow("rgb", src)
# RGB to HSV
hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV)
cv.namedWindow("hsv", cv.WINDOW_NORMAL)
cv.imshow("hsv", hsv)
# RGB to YUV
yuv = cv.cvtColor(src, cv.COLOR_BGR2YUV)
cv.namedWindow("yuv", cv.WINDOW_NORMAL)
cv.imshow("yuv", yuv)
# RGB to YCrCb
ycrcb = cv.cvtColor(src, cv.COLOR_BGR2YCrCb)
cv.namedWindow("ycrcb", cv.WINDOW_NORMAL)
cv.imshow("ycrcb", ycrcb)
# mask操作
src1 = cv.imread("D:/Images/tinygreen.png")
cv.namedWindow("src1", cv.WINDOW_NORMAL)
cv.imshow("src1", src1)
# 转换色彩空间
hsv = cv.cvtColor(src1, cv.COLOR_BGR2HSV)
cv.namedWindow("dst1", cv.WINDOW_NORMAL)
cv.imshow("dst1", hsv)
# 设置mask 这里mask取值为HSV空间中绿色的取值
mask = cv.inRange(hsv, (35, 43, 46), (77, 255, 255))
# 得到反mask
negMask = cv.bitwise_not(mask)
cv.namedWindow("mask", cv.WINDOW_NORMAL)
cv.namedWindow("negMask", cv.WINDOW_NORMAL)
cv.imshow("mask", mask)
cv.imshow("negMask", negMask)
# 使用mask配合逻辑与操作抠出背景绿幕
dst = cv.bitwise_and(src1, src1, mask=mask)
cv.namedWindow("dst", cv.WINDOW_NORMAL)
cv.imshow("dst", dst)
# 使用negMask配合逻辑与操作抠出图像中人物
dst2 = cv.bitwise_and(src1, src1, mask=negMask)
cv.namedWindow("dst2", cv.WINDOW_NORMAL)
cv.imshow("dst2", dst2)
cv.waitKey(0)
cv.destroyAllWindows()
|
from collections import namedtuple, defaultdict
from datetime import datetime, date
import pytest
from freezegun import freeze_time
from flask import current_app
from app.exceptions import DVLAException, NotificationTechnicalFailureException
from app.models import (
NotificationHistory,
NOTIFICATION_CREATED,
NOTIFICATION_DELIVERED,
NOTIFICATION_SENDING,
NOTIFICATION_TEMPORARY_FAILURE,
NOTIFICATION_TECHNICAL_FAILURE,
DailySortedLetter
)
from app.celery.tasks import (
check_billable_units,
get_billing_date_in_bst_from_filename,
persist_daily_sorted_letter_counts,
process_updates_from_file,
update_letter_notifications_statuses,
update_letter_notifications_to_error,
update_letter_notifications_to_sent_to_dvla,
record_daily_sorted_counts
)
from app.dao.daily_sorted_letter_dao import dao_get_daily_sorted_letter_by_billing_day
from tests.app.db import create_notification, create_service_callback_api, create_notification_history
from tests.conftest import set_config
@pytest.fixture
def notification_update():
"""
Returns a namedtuple to use as the argument for the check_billable_units function
"""
NotificationUpdate = namedtuple('NotificationUpdate', ['reference', 'status', 'page_count', 'cost_threshold'])
return NotificationUpdate('REFERENCE_ABC', 'sent', '1', 'cost')
def test_update_letter_notifications_statuses_raises_for_invalid_format(notify_api, mocker):
invalid_file = 'ref-foo|Sent|1|Unsorted\nref-bar|Sent|2'
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=invalid_file)
with pytest.raises(DVLAException) as e:
update_letter_notifications_statuses(filename='NOTIFY-20170823160812-RSP.TXT')
assert 'DVLA response file: {} has an invalid format'.format('NOTIFY-20170823160812-RSP.TXT') in str(e.value)
def test_update_letter_notification_statuses_when_notification_does_not_exist_updates_notification_history(
sample_letter_template,
mocker
):
valid_file = 'ref-foo|Sent|1|Unsorted'
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=valid_file)
notification = create_notification_history(sample_letter_template, reference='ref-foo', status=NOTIFICATION_SENDING,
billable_units=1)
update_letter_notifications_statuses(filename="NOTIFY-20170823160812-RSP.TXT")
updated_history = NotificationHistory.query.filter_by(id=notification.id).one()
assert updated_history.status == NOTIFICATION_DELIVERED
def test_update_letter_notifications_statuses_raises_dvla_exception(notify_api, mocker, sample_letter_template):
valid_file = 'ref-foo|Failed|1|Unsorted'
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=valid_file)
create_notification(sample_letter_template, reference='ref-foo', status=NOTIFICATION_SENDING,
billable_units=0)
with pytest.raises(DVLAException) as e:
update_letter_notifications_statuses(filename="failed.txt")
failed = ["ref-foo"]
assert "DVLA response file: {filename} has failed letters with notification.reference {failures}".format(
filename="failed.txt", failures=failed
) in str(e.value)
def test_update_letter_notifications_statuses_calls_with_correct_bucket_location(notify_api, mocker):
s3_mock = mocker.patch('app.celery.tasks.s3.get_s3_object')
with set_config(notify_api, 'NOTIFY_EMAIL_DOMAIN', 'foo.bar'):
update_letter_notifications_statuses(filename='NOTIFY-20170823160812-RSP.TXT')
s3_mock.assert_called_with('{}-ftp'.format(
current_app.config['NOTIFY_EMAIL_DOMAIN']),
'NOTIFY-20170823160812-RSP.TXT'
)
def test_update_letter_notifications_statuses_builds_updates_from_content(notify_api, mocker):
valid_file = 'ref-foo|Sent|1|Unsorted\nref-bar|Sent|2|Sorted'
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=valid_file)
update_mock = mocker.patch('app.celery.tasks.process_updates_from_file')
update_letter_notifications_statuses(filename='NOTIFY-20170823160812-RSP.TXT')
update_mock.assert_called_with('ref-foo|Sent|1|Unsorted\nref-bar|Sent|2|Sorted')
def test_update_letter_notifications_statuses_builds_updates_list(notify_api, mocker):
valid_file = 'ref-foo|Sent|1|Unsorted\nref-bar|Sent|2|Sorted'
updates = process_updates_from_file(valid_file)
assert len(updates) == 2
assert updates[0].reference == 'ref-foo'
assert updates[0].status == 'Sent'
assert updates[0].page_count == '1'
assert updates[0].cost_threshold == 'Unsorted'
assert updates[1].reference == 'ref-bar'
assert updates[1].status == 'Sent'
assert updates[1].page_count == '2'
assert updates[1].cost_threshold == 'Sorted'
def test_update_letter_notifications_statuses_persisted(notify_api, mocker, sample_letter_template):
sent_letter = create_notification(sample_letter_template, reference='ref-foo', status=NOTIFICATION_SENDING,
billable_units=1)
failed_letter = create_notification(sample_letter_template, reference='ref-bar', status=NOTIFICATION_SENDING,
billable_units=2)
create_service_callback_api(service=sample_letter_template.service, url="https://original_url.com")
valid_file = '{}|Sent|1|Unsorted\n{}|Failed|2|Sorted'.format(
sent_letter.reference, failed_letter.reference)
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=valid_file)
with pytest.raises(expected_exception=DVLAException) as e:
update_letter_notifications_statuses(filename='NOTIFY-20170823160812-RSP.TXT')
assert sent_letter.status == NOTIFICATION_DELIVERED
assert sent_letter.billable_units == 1
assert sent_letter.updated_at
assert failed_letter.status == NOTIFICATION_TEMPORARY_FAILURE
assert failed_letter.billable_units == 2
assert failed_letter.updated_at
assert "DVLA response file: {filename} has failed letters with notification.reference {failures}".format(
filename="NOTIFY-20170823160812-RSP.TXT", failures=[format(failed_letter.reference)]) in str(e.value)
def test_update_letter_notifications_does_not_call_send_callback_if_no_db_entry(notify_api, mocker,
sample_letter_template):
sent_letter = create_notification(sample_letter_template, reference='ref-foo', status=NOTIFICATION_SENDING,
billable_units=0)
valid_file = '{}|Sent|1|Unsorted\n'.format(sent_letter.reference)
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=valid_file)
send_mock = mocker.patch(
'app.celery.service_callback_tasks.send_delivery_status_to_service.apply_async'
)
update_letter_notifications_statuses(filename='NOTIFY-20170823160812-RSP.TXT')
send_mock.assert_not_called()
def test_update_letter_notifications_to_sent_to_dvla_updates_based_on_notification_references(
client,
sample_letter_template
):
first = create_notification(sample_letter_template, reference='first ref')
second = create_notification(sample_letter_template, reference='second ref')
dt = datetime.utcnow()
with freeze_time(dt):
update_letter_notifications_to_sent_to_dvla([first.reference])
assert first.status == NOTIFICATION_SENDING
assert first.sent_by == 'dvla'
assert first.sent_at == dt
assert first.updated_at == dt
assert second.status == NOTIFICATION_CREATED
def test_update_letter_notifications_to_error_updates_based_on_notification_references(
sample_letter_template
):
first = create_notification(sample_letter_template, reference='first ref')
second = create_notification(sample_letter_template, reference='second ref')
create_service_callback_api(service=sample_letter_template.service, url="https://original_url.com")
dt = datetime.utcnow()
with freeze_time(dt):
with pytest.raises(NotificationTechnicalFailureException) as e:
update_letter_notifications_to_error([first.reference])
assert first.reference in str(e.value)
assert first.status == NOTIFICATION_TECHNICAL_FAILURE
assert first.sent_by is None
assert first.sent_at is None
assert first.updated_at == dt
assert second.status == NOTIFICATION_CREATED
def test_check_billable_units_when_billable_units_matches_page_count(
client,
sample_letter_template,
mocker,
notification_update
):
mock_logger = mocker.patch('app.celery.tasks.current_app.logger.error')
create_notification(sample_letter_template, reference='REFERENCE_ABC', billable_units=1)
check_billable_units(notification_update)
mock_logger.assert_not_called()
def test_check_billable_units_when_billable_units_does_not_match_page_count(
client,
sample_letter_template,
mocker,
notification_update
):
mock_logger = mocker.patch('app.celery.tasks.current_app.logger.exception')
notification = create_notification(sample_letter_template, reference='REFERENCE_ABC', billable_units=3)
check_billable_units(notification_update)
mock_logger.assert_called_once_with(
'Notification with id {} has 3 billable_units but DVLA says page count is 1'.format(notification.id)
)
@pytest.mark.parametrize('filename_date, billing_date', [
('20170820230000', date(2017, 8, 21)),
('20170120230000', date(2017, 1, 20))
])
def test_get_billing_date_in_bst_from_filename(filename_date, billing_date):
filename = 'NOTIFY-{}-RSP.TXT'.format(filename_date)
result = get_billing_date_in_bst_from_filename(filename)
assert result == billing_date
@freeze_time("2018-01-11 09:00:00")
def test_persist_daily_sorted_letter_counts_saves_sorted_and_unsorted_values(client, notify_db_session):
letter_counts = defaultdict(int, **{'unsorted': 5, 'sorted': 1})
persist_daily_sorted_letter_counts(date.today(), "test.txt", letter_counts)
day = dao_get_daily_sorted_letter_by_billing_day(date.today())
assert day.unsorted_count == 5
assert day.sorted_count == 1
def test_record_daily_sorted_counts_persists_daily_sorted_letter_count(
notify_api,
notify_db_session,
mocker,
):
valid_file = 'Letter1|Sent|1|uNsOrTeD\nLetter2|Sent|2|SORTED\nLetter3|Sent|2|Sorted'
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=valid_file)
assert DailySortedLetter.query.count() == 0
record_daily_sorted_counts(filename='NOTIFY-20170823160812-RSP.TXT')
daily_sorted_counts = DailySortedLetter.query.all()
assert len(daily_sorted_counts) == 1
assert daily_sorted_counts[0].sorted_count == 2
assert daily_sorted_counts[0].unsorted_count == 1
def test_record_daily_sorted_counts_raises_dvla_exception_with_unknown_sorted_status(
notify_api,
mocker,
):
file_contents = 'ref-foo|Failed|1|invalid\nrow_2|Failed|1|MM'
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=file_contents)
filename = "failed.txt"
with pytest.raises(DVLAException) as e:
record_daily_sorted_counts(filename=filename)
assert "DVLA response file: {} contains unknown Sorted status".format(filename) in e.value.message
assert "'mm'" in e.value.message
assert "'invalid'" in e.value.message
def test_record_daily_sorted_counts_persists_daily_sorted_letter_count_with_no_sorted_values(
notify_api,
mocker,
notify_db_session
):
valid_file = 'Letter1|Sent|1|Unsorted\nLetter2|Sent|2|Unsorted'
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=valid_file)
record_daily_sorted_counts(filename='NOTIFY-20170823160812-RSP.TXT')
daily_sorted_letter = dao_get_daily_sorted_letter_by_billing_day(date(2017, 8, 23))
assert daily_sorted_letter.unsorted_count == 2
assert daily_sorted_letter.sorted_count == 0
def test_record_daily_sorted_counts_can_run_twice_for_same_file(
notify_api,
mocker,
notify_db_session
):
valid_file = 'Letter1|Sent|1|sorted\nLetter2|Sent|2|Unsorted'
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=valid_file)
record_daily_sorted_counts(filename='NOTIFY-20170823160812-RSP.TXT')
daily_sorted_letter = dao_get_daily_sorted_letter_by_billing_day(date(2017, 8, 23))
assert daily_sorted_letter.unsorted_count == 1
assert daily_sorted_letter.sorted_count == 1
updated_file = 'Letter1|Sent|1|sorted\nLetter2|Sent|2|Unsorted\nLetter3|Sent|2|Unsorted'
mocker.patch('app.celery.tasks.s3.get_s3_file', return_value=updated_file)
record_daily_sorted_counts(filename='NOTIFY-20170823160812-RSP.TXT')
daily_sorted_letter = dao_get_daily_sorted_letter_by_billing_day(date(2017, 8, 23))
assert daily_sorted_letter.unsorted_count == 2
assert daily_sorted_letter.sorted_count == 1
|
# Module that defines the basic interface that wrappers must implement.
#
# Author: Fernando García <ga.gu.fernando@gmail.com>
#
from abc import ABCMeta, abstractmethod
class WrapperBase:
"""
Base class for wrapper classes.
"""
__metaclass__ = ABCMeta
def __repr__(self):
return "WrapperBase"
def __str__(self):
return self.__repr__()
@property
@abstractmethod
def algorithms(self):
"""
Return all fitted algorithms.
"""
pass
|
'''
Descripttion: densechen@foxmail.com
version: 0.0
Author: Dense Chen
Date: 1970-01-01 08:00:00
LastEditors: Dense Chen
LastEditTime: 2020-08-12 20:44:20
'''
import random
import cv2
import numpy as np
import pytorch3d.transforms as transforms3d
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch3d.ops import iterative_closest_point
import utils
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, settings):
self.settings = settings
def dilated_mask(self, mask):
# without batch
# mask should be cv2 image or numpy array
mask = np.array(mask * 255, dtype=np.uint8)
if self.settings.DEBUG:
cv2.imwrite(
"{}/before_dilate.png".format(self.settings.DEBUG_PATH), mask)
kernel = np.ones((self.settings.DILATED_KERNEL_SIZE,
self.settings.DILATED_KERNEL_SIZE), np.uint8)
mask = cv2.dilate(mask, kernel)
if self.settings.DEBUG:
cv2.imwrite("{}/after_dilate.png".format(self.settings.DEBUG_PATH),
mask)
return np.array(mask > 127, dtype=np.uint8)
def add_noise_to_pose(self, pose: utils.Pose):
# with batch
# RANDOM POSE
euler = torch.tensor([
random.choice([1, -1]) * random.random(),
random.choice([1, -1]) * random.random(),
random.choice([1, -1]) * random.random()
]).view(1, 3) * self.settings.NOISE_ROT
trans = torch.tensor([
random.choice([1, -1]) * random.random(),
random.choice([1, -1]) * random.random(),
random.choice([1, -1]) * random.random()
]).view(1, 3) * self.settings.NOISE_TRANS
delta_pose = utils.Pose(Rotation=utils.build_rotation(euler,
format="euler"),
Translation=utils.build_translation(trans))
return utils.apply_transform_to_pose(pose, delta_pose)
def multi_angle_icp(self, source_points, target_points):
# build initial transform
angles = torch.tensor([[i, j, k] for i in range(0, 360, 180)
for j in range(0, 360, 180)
for k in range(0, 360, 180)]).float() / 360.0
batch_size = len(angles)
T = torch.mean(target_points, dim=0).unsqueeze(0).repeat(batch_size, 1)
init_transform = SimilarityTransform(R=utils.build_rotation(
angles, format="euler").matrix,
T=T,
s=torch.ones(batch_size))
source_points, target_points = source_points.unsqueeze(0).repeat(
batch_size, 1,
1), target_points.unsqueeze(0).repeat(batch_size, 1, 1)
icp = iterative_closest_point(source_points,
target_points,
init_transform=init_transform,
allow_reflection=True)
index = torch.min(icp.rmse, dim=0)[1]
RTs = icp.RTs
return utils.Pose(utils.build_rotation(RTs.R[index].unsqueeze(0)),
utils.build_translation(RTs.T[index].unsqueeze(0)))
def load_data(self, index):
raise NotImplementedError
def __getitem__(self, index):
mesh, image, depth, intrinsic, mask, target_pose, model_points = self.load_data(
index)
if self.settings.INIT_POSE_METHOD == "NOISE":
init_pose = self.add_noise_to_pose(
utils.unsqueeze_namedtuple(target_pose, dim=0))
init_pose = utils.squeeze_namedtuple(init_pose, dim=0)
elif self.settings.INIT_POSE_METHOD == "ICP":
raise NotImplementedError
return {
"mesh":
mesh,
"data":
utils.RawData(image, depth, intrinsic, mask, target_pose,
init_pose, model_points)
}
|
"""Constants for the MagicMirror integration."""
from logging import Logger, getLogger
LOGGER: Logger = getLogger(__package__)
DOMAIN = "magicmirror"
PLATFORMS = ["binary_sensor", "switch", "number"]
|
# Error.py
#
# Copyright (C) 2018 OSIsoft, LLC. All rights reserved.
#
# THIS SOFTWARE CONTAINS CONFIDENTIAL INFORMATION AND TRADE SECRETS OF
# OSIsoft, LLC. USE, DISCLOSURE, OR REPRODUCTION IS PROHIBITED WITHOUT
# THE PRIOR EXPRESS WRITTEN PERMISSION OF OSIsoft, LLC.
#
# RESTRICTED RIGHTS LEGEND
# Use, duplication, or disclosure by the Government is subject to restrictions
# as set forth in subparagraph (c)(1)(ii) of the Rights in Technical Data and
# Computer Software clause at DFARS 252.227.7013
#
# OSIsoft, LLC
# 1600 Alvarado St, San Leandro, CA 94577
class Error(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
# Generated by Django 3.1.6 on 2021-02-05 20:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sales', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='license',
name='activated',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='product',
name='price',
field=models.IntegerField(default=-1),
),
migrations.AlterField(
model_name='license',
name='activation_date',
field=models.DateField(null=True),
),
]
|
from glob import glob
from nltk.corpus import wordnet as wn
import numpy as np
import pickle
IMAGENET_DIR = "/media/jedrzej/Seagate/DATA/ILSVRC2012/TRAIN/"
folders_list = glob(IMAGENET_DIR+"*")
categories_list = []
all_parents = []
for category in folders_list:
category_no = category.split("/")[-1][1:]
categories_list.append(category_no)
s = wn._synset_from_pos_and_offset('n',int(category_no))
parents = s.hypernyms()
unique_parents = []
while(parents):
if( parents[0] not in unique_parents): unique_parents.append(parents[0])
if( parents[0] not in all_parents): all_parents.append(parents[0])
parents.extend(parents[0].hypernyms())
parents.remove(parents[0])
print len(all_parents)
##########################################################
frequency = np.zeros(len(all_parents))
for category in folders_list:
category_no = category.split("/")[-1][1:]
categories_list.append(category_no)
s = wn._synset_from_pos_and_offset('n',int(category_no))
parents = s.hypernyms()
unique_parents = []
while(parents):
if( parents[0] not in unique_parents): unique_parents.append(parents[0])
parents.extend(parents[0].hypernyms())
parents.remove(parents[0])
for u_p in unique_parents:
idx = all_parents.index(u_p)
frequency[idx] +=1
f1 = np.where(frequency >1)[0]
f2 = np.where(frequency <1000)[0]
f_tot = np.intersect1d(f1,f2)
#print np.max(frequency)
new_parents = [all_parents[i] for i in f_tot]
print len(new_parents)
##########################################################
positive_wordnets = dict()
for p in new_parents:
positive_wordnets[str(p.lemmas()[0].name())] = []
for category in folders_list:
category_no = category.split("/")[-1][1:]
categories_list.append(category_no)
s = wn._synset_from_pos_and_offset('n',int(category_no))
parents = s.hypernyms()
unique_parents = []
while(parents):
if( parents[0] not in unique_parents): unique_parents.append(parents[0])
parents.extend(parents[0].hypernyms())
parents.remove(parents[0])
for u_p in unique_parents:
if(u_p in new_parents):
positive_wordnets[str(u_p.lemmas()[0].name())].append('n'+category_no)
#positive_wordnets[str(u_p.lemmas()[0].name())].append(str(s.lemmas()[0].name()))
print positive_wordnets
pickle.dump(positive_wordnets, open( "./hierarchy.p", "wb"))
#print categories_list
|
"""
Just enough CRUD operations to get you going
"""
import hashlib
from sqlalchemy.orm import Session
from sqlalchemy.sql.functions import mode
from app.db import models, schemas
""" These constants need to be moved to a config """
SALT = "This is my salt. There are many like it, but this one is mine."
HMAC_ITER = 100000
def get_user(db: Session, user_id: int):
"""Get a user by user id"""
return db.query(models.User).filter(models.User.id == user_id).first()
def get_user_by_email(db: Session, email: str):
"""Get us user by email"""
return db.query(models.User).filter(models.User.email == email).first()
def get_users(db: Session, skip: int = 0, limit: int = 100):
"""Get all users, takes an offset and limit"""
return db.query(models.User).offset(skip).limit(limit).all()
def create_user(db: Session, user: schemas.UserCreate):
"""Store user in DB with hashed_password"""
hashed_password = hashlib.pbkdf2_hmac(
'sha256',
user.password.encode('utf-8'),
SALT.encode('utf-8'),
HMAC_ITER
)
db_user = models.User(email=user.email, hashed_password=hashed_password)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def get_items(db: Session, skip: int = 0, limit: int = 100):
"""Get all items, takes an offset and limit"""
return db.query(models.User).offset(skip).limit(limit).all()
def create_user_item(db: Session, item: schemas.ItemCreate, user_id: int):
"""Store itme in the DB"""
db_item = models.Item(**item.dict(), owner_id=user_id)
db.add(db_item)
db.commit()
db.refresh(db_item)
return db_item
|
#!/usr/bin/env python
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington CoMotion, email: license@uw.edu.
## @file /GUIs/pyrosetta_toolkit/window_modules/vicinity_options/vicinity_window.py
## @brief Vicinity dialog window
## @author Jared Adolf-Bryfogle (jadolfbr@gmail.com)
from tkinter import *
import tkinter.simpledialog
from os import getcwd
pwd = getcwd()
class vicinityrelaxwindow(tkinter.simpledialog.Dialog):
"""
This is the Vaccinity Relax Window to specify options while doing relax. May be used in other protocols if possible.
It is set as a tkSimpleDialog, but this can be changed fairly easily to allow more use.
"""
def body(self, main):
#self.main = Toplevel(main)
self.main = main
#self.main.title("Neighbor Options")
#self.column = column; #Column to begin Grid (Int)
#self.row = row; #Row to begin Grid (Int)
#self.pwd = pwd
row = 0; column= 0
#Options:
self.FixTarget = StringVar(); self.FixTarget.set("Open")
self.FixVaccinity = StringVar(); self.FixVaccinity.set("Open")
self.FixBoth = StringVar(); self.FixBoth.set("UnSet")
#Set Tk
print(pwd)
#Photo
VacPhoto =PhotoImage(file = (pwd+"/Media/Vaccinity_Smaller.gif"))
self.Photo = Label(self.main, image=VacPhoto)
self.Photo.image = VacPhoto
#Button/Labels
#self.setOptionsbutton_ = Button(self.main, text = "Continue...", command = lambda: self.setOptions())
self.control = Label(self.main, text = "Control.")
#Fix Options
self.FixBBlab = Label(self.main, text = " Loop/Target ")
self.FixBBOpt = OptionMenu(self.main, self.FixTarget, "Fix", "Fix BB", "Fix Chi", "Open")
self.FixChilabel_ = Label(self.main, text = " Vaccinity ")
self.FixChiOpt = OptionMenu(self.main, self.FixVaccinity, "Fix", "Fix BB", "Fix Chi", "Open")
self.FixBotlabel_ = Label(self.main, text = "Fix BackBone and Rotamers")
self.FixBothOpt = OptionMenu(self.main, self.FixBoth, "UnSet", "Target", "Vaccinity")
#ShoTk
self.Photo.grid(row =row, column = column+1, rowspan=17, columnspan=17)
self.FixBBlab.grid(row = row+5, column= column)
self.FixBBOpt.grid(row = row+6, column = column, sticky="ew")
self.FixChilabel_.grid(row=row+5, column = column+18)
self.FixChiOpt.grid(row=row+6, column = column+18, sticky="ew")
self.FixBotlabel_.grid(row=row, column = column+8+1)
self.FixBothOpt.grid(row = row+1, column = column+8+1)
self.control.grid(row=row+18, column=column+8+1)
#self.setOptionsbutton_.grid(row = row +19, column = column+8+1)
def apply(self):
fixloo = self.FixTarget.get()
fixvac = self.FixVaccinity.get()
fixboth = self.FixBoth.get()
self.result = (fixloo, fixvac, fixboth)
#self.main.destroy()
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import numpy as np
from numpy import random
#
from functools import partial
from multiprocessing import Pool
#
from scipy.spatial.distance import pdist, cdist
from scipy.stats import kstwobign, pearsonr
from scipy.stats import genextreme, chi2, norm
from scipy.interpolate import interp1d
from numba import jit
# starvine imports
from starvine.bvcopula.pc_base import PairCopula
def gauss_copula_test(x1, y1, wgts=None, nboot=8000, dist='ks',
alpha=0.05, procs=4, resample=8):
"""!
@brief Tests if a gaussian copula is a good description of the
dep structure of a bivaraiate data set.
@param x1 ndarray, shape (n1, ) where n1 is number of samples
@param y1 ndarray, shape (n1, )
@param wgts ndarray, shape (n1, )
@param dist_metric str. in ('ad', 'ks'):
'ad' for Anderson-Darling, 'ks' for Kolmogorov
@param procs int. number of processes to use. Default=4
@param resample int. Boostrap sample size. Only used if wgts are suppled.
@param alpha float. test significance level. Default=0.05
@return (p_val, d_0, h_dict)
p_val float. p-value of test
d_0 float. Distance metric
h_dict dict. {'h0': Bool} Result of hypothesis test
@note Also works with weighted samples by resampling original data
with replacement.
let h0 be the hypothesis that the gaussian copula fits the data
Malevergne, Y. and Sornette, D. Testing the Gaussian Copula
Hypothesis for Financial Asset Dependencies.
Quantitative Finance. Vol 3. pp. 231-250, 2001.
"""
assert nboot >= 80 # require adequate sample size for hypoth test
if wgts is not None:
# reasample weighted data with replacement
pc = PairCopula(x1, y1, weights=wgts, resample=resample)
else:
pc = PairCopula(x1, y1)
# standard normal transform
y_hat_1 = norm.ppf(pc.UU)
y_hat_2 = norm.ppf(pc.VV)
y_hat = np.array([y_hat_1, y_hat_2]).T
# compute cov matrix and pre-compute inverse
cov_hat = np.cov(y_hat.T, bias=True)
cov_hat_inv = np.linalg.inv(cov_hat)
assert cov_hat_inv.shape == (2, 2,)
# est orig distance metric
d_0 = dist_measure(y_hat, cov_hat_inv, dist)
print("KS-Gauss Dist= %f)" % d_0)
# estimate p-value by boostrap resampling
d = np.zeros(nboot)
if procs > 1:
pool = Pool(procs)
d = pool.map(partial(sample_d,
cov_hat=cov_hat,
cov_hat_inv=cov_hat_inv,
dist=dist,
N=len(x1)
),
list(range(nboot)))
d = np.array(d)
pool.close()
else:
for i in range(nboot):
d[i] = sample_d(i, cov_hat, cov_hat_inv, dist, len(x1))
print("KS-Gauss Empirical Dist Range= (%f, %f))" % (np.min(d), np.max(d)))
# compute p-val
# p_val = 1 - d_cdf(d_0)
p_val = (d >= d_0).sum() / len(d)
h_dict = {'h0': p_val > alpha}
return p_val, d_0, h_dict
def sample_d(i, cov_hat, cov_hat_inv, dist, N):
y_sampled = \
np.random.multivariate_normal(mean=[0., 0.],
cov=cov_hat, size=N)
d = dist_measure(y_sampled, cov_hat_inv, dist)
return d
def dist_measure(y_hat, cov_hat_inv, dist):
# gen z^2 RV which should be distributed according to a chi-squared
# distribution if h0 is true (Malevergne 2001)
z_hat_sqrd = test_z_vector(y_hat, cov_hat_inv)
# compute empirical CDF of z_hat_sqrd
F_z_x, F_z_y = ecdf(z_hat_sqrd)
# dof should be ndim (pp. 9 in Malevergrne 2001)
ndim = y_hat.shape[1]
chi2_frozen = chi2(df=ndim, loc=0., scale=1.0)
F_z_chi2 = chi2_frozen.cdf(z_hat_sqrd)
# order lowest to higest (enforce cdf monotone)
F_z_chi2_ = np.array([z_hat_sqrd, F_z_chi2]).T
sorted_F_chi2 = F_z_chi2_[F_z_chi2_[:, 0].argsort()]
F_chi2 = sorted_F_chi2[:, 1]
# check dims
assert len(F_z_y) == len(F_chi2)
# Kolmogorov-Smirnov distance
dist_map_dict = {'ks': 1, 'ks-avg': 2, 'ad': 3, 'ad-avg': 4}
dist_int = dist_map_dict[dist]
d = ks_ad_dist(F_z_y, F_chi2, dist_int)
return d
@jit(nopython=True)
def ks_ad_dist(F_z_y, F_chi2, dist=1):
d = 0.0
if dist == 1:
d = np.max(np.abs(F_z_y - F_chi2))
elif dist == 2:
# more robust to outliers
d = np.mean(np.abs(F_z_y - F_chi2))
else:
numer = np.abs(F_z_y - F_chi2)
denom = np.sqrt(F_chi2 * (1. - F_chi2))
if dist == 3:
d = np.max(numer / denom)
else:
# more robust to outliers
d = np.mean(numer / denom)
return d
@jit(nopython=True)
def test_z_vector(y_hat, cov_inv):
"""!
@brief Helper function for dist_measure
"""
z_hat_sqrd = np.zeros(y_hat.shape[0])
for k in range(y_hat.shape[0]):
for i in range(2):
for j in range(2):
z_hat_sqrd[k] += y_hat[:, i][k] * cov_inv[i, j] * y_hat[:, j][k]
return z_hat_sqrd
@jit(nopython=True)
def ecdf(x):
"""!
@brief Empirical cdf
@param x np_1darray
@return np_1darray empirical cdf
"""
xs = np.sort(x)
ys = np.arange(1, len(xs)+1)/float(len(xs))
return xs, ys
def ks2d2s(x1, y1, x2, y2, nboot=None):
"""!
@brief Two-dimensional Kolmogorov-Smirnov test on two samples.
@param x1 ndarray, shape (n1, )
@param y1 ndarray, shape (n1, )
@param x2 ndarray, shape (n2, )
@param y2 ndarray, shape (n2, )
@return tuple of floats (p-val, KS_stat)
Two-tailed p-value,
KS statistic
@note This is the two-sided K-S test. Small p-values means that the two
samples are significantly different. Note that the p-value is only an
approximation as the analytic distribution is unkonwn. The approximation is
accurate enough when N > ~20 and p-value < ~0.20 or so.
When p-value > 0.20 the value may not be accurate but it implies that the two
samples are not significantly different. (cf. Press 2007)
Peacock, J.A. 1983, Two-Dimensional Goodness-of-Fit Testing in Astronomy,
Monthly Notices of the Royal Astronomical Society, vol. 202, pp. 615-627
Fasano, G. and Franceschini, A. 1987, A Multidimensional Version of the
Kolmogorov-Smirnov Test, Monthly Notices of the Royal Astronomical Society,
vol. 225, pp. 155-170 Press, W.H. et al. 2007, Numerical Recipes, section
14.8
"""
assert (len(x1) == len(y1)) and (len(x2) == len(y2))
n1, n2 = len(x1), len(x2)
D = avgmaxdist(x1, y1, x2, y2)
if nboot is None:
sqen = np.sqrt(n1 * n2 / (n1 + n2))
r1 = pearsonr(x1, y1)[0]
r2 = pearsonr(x2, y2)[0]
r = np.sqrt(1 - 0.5 * (r1**2 + r2**2))
d = D * sqen / (1 + r * (0.25 - 0.75 / sqen))
p = kstwobign.sf(d)
else:
n = n1 + n2
x = np.concatenate([x1, x2])
y = np.concatenate([y1, y2])
d = np.empty(nboot, 'f')
for i in range(nboot):
idx = random.choice(n, n, replace=True)
ix1, ix2 = idx[:n1], idx[n1:]
#ix1 = random.choice(n, n1, replace=True)
#ix2 = random.choice(n, n2, replace=True)
d[i] = avgmaxdist(x[ix1], y[ix1], x[ix2], y[ix2])
p = np.sum(d > D).astype('f') / nboot
return p, D
def avgmaxdist(x1, y1, x2, y2):
D1 = maxdist(x1, y1, x2, y2)
D2 = maxdist(x2, y2, x1, y1)
return (D1 + D2) / 2
@jit(nopython=True)
def maxdist(x1, y1, x2, y2):
n1 = len(x1)
D1 = np.empty((n1, 4))
for i in range(n1):
a1, b1, c1, d1 = quadct(x1[i], y1[i], x1, y1)
a2, b2, c2, d2 = quadct(x1[i], y1[i], x2, y2)
D1[i] = [a1 - a2, b1 - b2, c1 - c2, d1 - d2]
# re-assign the point to maximize difference,
# the discrepancy is significant for N < ~50
D1[:, 0] -= 1 / n1
dmin, dmax = -D1.min(), D1.max() + 1 / n1
return max(dmin, dmax)
@jit(nopython=True)
def quadct(x, y, xx, yy):
n = len(xx)
ix1, ix2 = xx <= x, yy <= y
a = np.sum(ix1 & ix2) / n
b = np.sum(ix1 & ~ix2) / n
c = np.sum(~ix1 & ix2) / n
d = 1 - a - b - c
return a, b, c, d
def mardias_test(x, wgts=None, alpha=0.05, cov_bias=False):
"""!
@brief computes multivariate Mardia's tests for normality.
Mardia, K. V. (1970), Measures of multivariate skewnees and kurtosis with
applications. Biometrika, 57(3):519-530.
@param x np_2d array with shape = (n_obs, n_dim)
Each col represents a variable each row is a single
observation of alll of those vars
@param wgts observation weights np_1darray with shape = (n_obs,)
TODO: Does not support weighted samples yet
@param alpha float. significance level (default == 0.05)
@param cov_bias bool. argument passed to np.cov for covar matrix normalization
@return p1, p1c, p2, h_dict
p1: (float) skewness test p-val
p1c: (float) skewness test p-val adjusted for small samples size ~N < 50
p2: (float) kurtosis test p-val
let h0 be the null hypothesis that the data follows a multivar Gaussian
hdict: dict of hypothesis test results
{'alpha': (float) significance level,
'skew_h0': (bool) if true we can accept h0 wrt. skewness test
'skew_small_sample_h0': (bool) if true we can accept h0 even if N < 50
'kurt_h0': (bool) if true we can accept h0 wrt. kurtosis test
'h0': (bool) if true we can accept h0 wrt skew and kurt
}
"""
b1p, b2p, cov = mvar_skew_kurt(x, wgts, cov_bias)
n, p = x.shape[0], x.shape[1]
k = ((p + 1) * (n + 1) * (n + 3)) / (n * (((n + 1) * (p + 1.)) - 6))
# dof of chi2 rv
dof = (p * (p + 1.) * (p + 2.)) / 6.
g1c = (n * b1p * k) / 6.
g1 = (n * b1p) / 6.
p1 = 1 - chi2.cdf(g1, dof)
p1c = 1 - chi2.cdf(g1c, dof)
g2 = (b2p - (p * (p + 2)))/(np.sqrt((8. * p * (p + 2.))/n))
p2 = 2 * (1 - norm.cdf(abs(g2)))
# hyothesis result dict
h_dict = {'alpha': alpha,
'skew_h0': p1 >= alpha, # false if skew null hypoth is false
'skew_small_smaple_h0': p1c >= alpha,
'kurt_h0': p2 >= alpha, # false if kurtosis null hypoth is false
'h0': (p1 > alpha) & (p2 > alpha), # false if either test fails
'cov': cov # covar matrix of data
}
return p1, p1c, p2, h_dict
def mvar_skew_kurt(x, wgts=None, cov_bias=False):
"""!
@brief computes multivariate skewness and kurtosis
@param x np_2d array with shape = (n_obs, n_dim)
Each col represents a variable each row is a single
observation of all of those vars
@param cov_bias bool. argument passed to np.cov for covar matrix normalization
(default is to normalize cov matrix by N-1)
"""
# compute average vector
mvar_mu = np.average(x, weights=wgts, axis=0)
# compute covar matrix
cov = np.cov(x.T, bias=cov_bias)
cov_inv = np.linalg.inv(cov)
# compute multivar skewness
mvar_skew = (1. / (np.shape(x)[0] ** 2.)) * interior_sum_b1(x, mvar_mu, cov_inv)
# compute multivar kurtosis
mvar_kurt = (1 / x.shape[0]) * interior_sum_b2(x, mvar_mu, cov_inv)
return mvar_skew, mvar_kurt, cov
@jit(nopython=True)
def interior_sum_b1(x, mu, cov_inv):
"""!
@brief Helper function for mvar_skew_kurt
"""
sum_b1 = 0.0
for i in range(x.shape[0]):
for j in range(x.shape[0]):
sum_b1 += np.dot(x[i, :] - mu, np.dot(cov_inv, (x[j, :] - mu))) ** 3.0
return sum_b1
@jit(nopython=True)
def interior_sum_b2(x, mu, cov_inv):
"""!
@brief Helper function for mvar_skew_kurt
"""
sum_b2 = 0.0
for i in range(x.shape[0]):
sum_b2 += np.dot(x[i, :] - mu, np.dot(cov_inv, (x[i, :] - mu))) ** 2.0
return sum_b2
def estat2d(x1, y1, x2, y2, **kwds):
return estat(np.c_[x1, y1], np.c_[x2, y2], **kwds)
def estat(x, y, nboot=1000, replace=False, method='log', fitting=False):
"""!
@breif Energy distance test.
Aslan, B, Zech, G (2005) Statistical energy as a tool for binning-free
multivariate goodness-of-fit tests, two-sample comparison and unfolding.
Nuc Instr and Meth in Phys Res A 537: 626-636
Szekely, G, Rizzo, M (2014) Energy statistics: A class of statistics
based on distances. J Stat Planning & Infer 143: 1249-1272
Energy test by Brian Lau:
multdist: https://github.com/brian-lau/multdist
"""
n, N = len(x), len(x) + len(y)
stack = np.vstack([x, y])
stack = (stack - stack.mean(0)) / stack.std(0)
if replace:
rand = lambda x: random.randint(x, size=x)
else:
rand = random.permutation
en = energy(stack[:n], stack[n:], method)
en_boot = np.zeros(nboot, 'f')
for i in range(nboot):
idx = rand(N)
en_boot[i] = energy(stack[idx[:n]], stack[idx[n:]], method)
if fitting:
param = genextreme.fit(en_boot)
p = genextreme.sf(en, *param)
return p, en, param
else:
p = (en_boot >= en).sum() / nboot
return p, en, en_boot
def energy(x, y, method='log'):
dx, dy, dxy = pdist(x), pdist(y), cdist(x, y)
n, m = len(x), len(y)
if method == 'log':
dx, dy, dxy = np.log(dx), np.log(dy), np.log(dxy)
elif method == 'gaussian':
raise NotImplementedError
elif method == 'linear':
raise NotImplementedError
else:
raise ValueError
z = dxy.sum() / (n * m) - dx.sum() / n**2 - dy.sum() / m**2
# z = ((n*m)/(n+m)) * z # ref. SR
return z
|
#!/usr/bin/python
import random
import os
import time
import sys
secure_random = random.SystemRandom()
if len(sys.argv) == 2:
gpu = sys.argv[1]
elif len(sys.argv) > 2:
sys.exit('Unknown input')
else:
gpu = '0'
for _ in range(1):
#seed = str(random.randint(1, 10**6))
seed = secure_random.choice(['125894'])
dc = secure_random.choice(['0.02'])
invT = secure_random.choice(['1e3'])
anneal, c = secure_random.choice([('1.005', 'sa'), ('1.005', 'em'), ('1.005', 'sghmc'), ('1.0', 'sa')])
v0, v1, sparse = secure_random.choice([('0.5', '1e-3', '0.3'), ('0.1', '5e-4', '0.5'), ('0.1', '5e-5', '0.7'), ('0.005', '1e-5', '0.9')])
#os.system('python bayes_cnn.py -prune 0 -save 1 -lr 2e-6 ' + ' -seed ' + seed + ' -sparse 0 ' + ' -invT 1e9 -gpu ' + gpu + ' -anneal 1 ' + ' > ./output/resnet20_cifar10_invT_' + invT + '_anneal_1_pretrain_rand_' + seed)
os.system('python bayes_cnn.py -c ' + c + ' -dc ' + dc + ' -prune 1 ' + ' -v0 ' + v0 + ' -v1 ' + v1 + ' -seed ' + seed + ' -sparse ' + sparse + ' -invT ' + invT + ' -gpu ' + gpu + ' -anneal ' + anneal + ' > ./output/resnet20_cifar10_dc_' + dc + '_v0_' + v0 + '_v1_' + v1 + '_invT_' + invT + '_anneal_' + anneal + '_sparse_' + sparse + '_' + c + '_gilb_rand_' + seed)
|
def generate_discount_codes_producer():
pass
|
from django.shortcuts import render
from django.utils import timezone
from .models import Post
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def homepage_view(request):
return render(request, 'blog/index.html', {})
def contact_view(request):
return render(request, 'blog/contact.html', {})
|
import discord
from discord.ext import commands
import aiohttp
import requests
class Image(commands.Cog, name='Image'):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def cat(self, ctx):
"""Gives You Random Image Of Cat"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('http://aws.random.cat/meow') as r:
data = await r.json()
em = discord.Embed(
title='Cat', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['file'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def dog(self, ctx):
"""Gives You Random Image Of Dog"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('http://random.dog/woof.json') as r:
data = await r.json()
em = discord.Embed(
title='Dog', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['url'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def fox(self, ctx):
"""Gives You Random Image Of Fox"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/fox') as r:
data = await r.json()
em = discord.Embed(
title='Fox', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def panda(self, ctx):
"""Gives You Random Image Of Panda"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/panda') as r:
data = await r.json()
em = discord.Embed(
title='Panda', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def red_panda(self, ctx):
"""Gives You Random Image Of Red Panda"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/red_panda') as r:
data = await r.json()
em = discord.Embed(
title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def bird(self, ctx):
"""Gives You Random Image Of Bird"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/birb') as r:
data = await r.json()
em = discord.Embed(
title='Bird', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def kola(self, ctx):
"""Gives You Random Image Of Kola"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/koala') as r:
data = await r.json()
em = discord.Embed(
title='kola', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def pikachu(self, ctx):
"""Gives You Random Image Or GIF Of Pikachu"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/pikachu') as r:
data = await r.json()
em = discord.Embed(
title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
# @commands.command()
# @commands.cooldown(1, 10, commands.BucketType.user)
# async def yt(self,ctx,comment:str):
# """Comments On Youtube"""
# url = f"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}"
# em = discord.Embed(color = ctx.author.color)
# em.set_image(url=url)
# em.set_footer(text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar_url)
# await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Image(bot))
|
# coding: utf-8
from euclid3 import Vector2 as vec
vec.__repr__ = lambda self: 'vec({0}, {1})'.format(self.x, self.y)
|
import json, requests, os, logging
from rtpipe.parsecands import read_candidates
from elasticsearch import Elasticsearch
import activegit
from rflearn.features import stat_features
from rflearn.classify import calcscores
from IPython.display import Image
from IPython.core.display import HTML
logging.basicConfig()
es = Elasticsearch(['136.152.227.149:9200']) # index on berkeley macbook
def readandpush(candsfile, push=True, addscores=True, tag=None, command='index'):
""" Read, classify, and push candidates to realfast index.
Optionally push to index with scores.
Optionally can add string to 'tag' field.
"""
datalist = readcandsfile(candsfile, tag=tag)
if classify:
scores = classify(datalist)
if addscores:
for i in range(len(datalist)):
datalist[i]['rbscore'] = scores[i]
if push:
res = pushdata(datalist, command=command)
logging.info('Post status: {0}'.format(res))
else:
return datalist
def readcandsfile(candsfile, plotdir='/users/claw/public_html/plots', tag=None):
""" Read candidates from pickle file and format as list of dictionaries
plotdir is path to png plot files which are required in order to keep in datalist
"""
if tag:
assert isintance(tag, str)
loc, prop, state = read_candidates(candsfile, returnstate=True)
fileroot = state['fileroot']
if plotdir:
logging.info('Filtering data based on presence of png files in {0}'.format(plotdir))
else:
logging.info('Appending all data to datalist.')
datalist = []
for i in range(len(loc)):
data = {}
data['obs'] = fileroot
for feat in state['featureind']:
col = state['featureind'].index(feat)
data[feat] = loc[i][col]
for feat in state['features']:
col = state['features'].index(feat)
data[feat] = prop[i][col]
uniqueid = dataid(data)
data['candidate_png'] = 'cands_{0}.png'.format(uniqueid)
data['labeled'] = '0'
if tag:
data['tag'] = tag
else:
data['tag'] = ''
if plotdir:
if os.path.exists(os.path.join(plotdir, data['candidate_png'])):
datalist.append(data)
else:
datalist.append(data)
return datalist
def indextodatalist(unlabeled=True):
""" Get all from index and return datalist """
# add logic to filter for certain tag (e.g., labelled) or presence of certain field (e.g, rbscore)
# fields = ','.join(features + featureind + ['obs', 'candidate_png'])
count = es.count()['count']
if unlabeled:
res = es.search(index='realfast', doc_type='cand', body={"query": {"term": {"labeled": "0"}}, "size": count})
else:
res = es.search(index='realfast', doc_type='cand', body={"query": {"match_all": {}}, "size": count})
return [hit['_source'] for hit in res['hits']['hits']]
def restorecands(datalist, features=['snr1', 'immax1', 'l1', 'm1', 'specstd', 'specskew', 'speckurtosis', 'imskew', 'imkurtosis'],
featureind=['scan', 'segment', 'int', 'dmind', 'dtind', 'beamnum']):
""" Take list of dicts and forms as list of lists in rtpipe standard order
Order of features and featureind lists is important.
"""
obslist = []
loclist = []
proplist = []
for data in datalist:
# build features per data dict
loc = []
prop = []
for fi in featureind:
loc.append(data[fi])
for fe in features:
prop.append(data[fe])
# append data
obslist.append(data['obs'])
loclist.append(tuple(loc))
proplist.append(tuple(prop))
return obslist, loclist, proplist
def classify(datalist, agpath='/users/claw/code/alnotebook'):
""" Applies activegit repo classifier to datalist """
obslist, loc, prop = restorecands(datalist)
statfeats = stat_features(prop)
scores = calcscores(statfeats, agpath=agpath)
return scores
def pushdata(datalist, index='realfast', doc_type='cand', command='index'):
""" Pushes list of data to index
command can be 'index' or 'delete' (update by indexing with same key)
"""
status = []
for data in datalist:
uniqueid = dataid(data)
if command == 'index':
res = es.index(index=index, doc_type=doc_type, id=uniqueid, body=data)
elif command == 'delete':
res = es.delete(index=index, doc_type=doc_type, id=uniqueid)
status.append(res['_shards']['successful'])
return status
def dataid(data):
""" Returns id string for given data dict """
return '{0}_sc{1}-seg{2}-i{3}-dm{4}-dt{5}'.format(data['obs'], data['scan'], data['segment'], data['int'], data['dmind'], data['dtind'])
def getids():
""" Gets candidates from realfast index and returns them as list """
count = es.count()['count']
res = es.search(index='realfast', doc_type='cand', fields=['_id'], body={"query": {"match_all": {}}, "size": count})
return [hit['_id'] for hit in res['hits']['hits']]
def postjson(cleanjson, url='http://136.152.227.149:9200/realfast/cand/_bulk?'):
""" **Deprecated** Post json to elasticsearch instance """
# jsonStr = json.dumps(postdata,separators=(',', ':'))
# cleanjson = jsonStr.replace('}},','}}\n').replace('},','}\n').replace(']','').replace('[','') + '\n'
# return cleanjson
r = requests.post(url, data=cleanjson)
logging.info('Post status: {0}'.format(r))
|
import torch
import numpy as np
import argparse
import time
import util
import os
import matplotlib.pyplot as plt
import torch.nn as nn
import pandas as pd
from fastprogress import progress_bar
import torch.nn.functional as F
from model_stgat import stgat
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
parser = argparse.ArgumentParser(description='STGAT')
parser.add_argument('--adj_path', type=str, default='data/sensor_graph/adj_mx_distance_normalized.csv',
help='adj data path')
parser.add_argument('--data_path', type=str, default='data/METR-LA12_shuffle', help='data path')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--num_nodes', type=int, default=207, help='number of nodes')
parser.add_argument('--num_layers', type=int, default=1, help='layers of gat')
parser.add_argument('--in_dim', type=int, default=2, help='number of nodes features')
parser.add_argument('--num_hidden', type=int, default=8, help='number of hidden in gat')
parser.add_argument('--out_dim', type=int, default=8, help='number of out_dim')
parser.add_argument('--heads', type=int, default=8, help='number of out_dim')
parser.add_argument('--feat_drop', type=int, default=0.6, help=' ')
parser.add_argument('--attn_drop', type=int, default=0.6, help=' ')
parser.add_argument('--negative_slope', type=int, default=0.2, help=' ')
parser.add_argument('--activation', action="store_true", default=F.elu, help=' ')
parser.add_argument('--residual', action="store_true", default=False, help=' ')
parser.add_argument('--interval', type=int, default=100, help='')
parser.add_argument('--num_epochs', type=int, default=100, help='')
parser.add_argument('--save', type=str, default='./experiment/combine/continue_train_la_shuffle2/', help='save path')
parser.add_argument('--expid', type=int, default=1, help='experiment id')
parser.add_argument('--seq_len', type=int, default=12, help='time length of inputs')
parser.add_argument('--pre_len', type=int, default=12, help='time length of prediction')
args = parser.parse_args()
if torch.cuda.is_available():
device = torch.device("cuda:0")
print("Let's use {} GPU!".format(device))
else:
device = torch.device("cpu")
def evaluate_all(pred, target):
mape = util.masked_mape(pred, target, 0.0).item()
rmse = util.masked_rmse(pred, target, 0.0).item()
mae = util.masked_mae(pred, target, 0.0).item()
return mape, rmse, mae
def run_demo(best_path, record_save_path):
print("============Begin Testing============")
test_record_path = f'{record_save_path}/stgat_test_record.csv'
dataloader = util.load_dataset(device, args.data_path, args.batch_size, args.batch_size, args.batch_size)
g_temp = util.add_nodes_edges(adj_filename=args.adj_path, num_of_vertices=args.num_nodes)
scaler = dataloader['scaler']
run_gconv = 1
lr_decay_rate = 0.97
model = stgat(g=g_temp, run_gconv=run_gconv)
model.to(device)
model.zero_grad()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
optimizer.zero_grad()
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda epoch: lr_decay_rate ** epoch)
model.load_state_dict(torch.load(best_path))
outputs = []
target = torch.Tensor(dataloader['y_test']).to(device)
target = target[:, :, :, 0]
for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
testx = torch.Tensor(x).to(device).transpose(1, 3)
testx = nn.functional.pad(testx, (1, 0, 0, 0))
with torch.no_grad():
pred = model.forward(testx).squeeze(3)
outputs.append(pred)
yhat = torch.cat(outputs, dim=0)
yhat = yhat[:target.size(0), ...]
test_record, amape, armse, amae = [], [], [], []
pred = scaler.inverse_transform(yhat)
for i in range(12):
pred_t = pred[:, i, :]
real_target = target[:, i, :]
evaluation = evaluate_all(pred_t, real_target)
log = 'test for horizon {:d}, Test MAPE: {:.4f}, Test RMSE: {:.4f}, Test MAE: {:.4f}'
print(log.format(i + 1, evaluation[0], evaluation[1], evaluation[2]))
amape.append(evaluation[0])
armse.append(evaluation[1])
amae.append(evaluation[2])
test_record.append([x for x in evaluation])
test_record_df = pd.DataFrame(test_record, columns=['mape', 'rmse', 'mae']).rename_axis('t')
test_record_df.round(3).to_csv(test_record_path)
log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
print("=" * 10)
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
print("--- New Folder: ", path)
else:
print("--- Folder already exists:", path)
if __name__ == "__main__":
base_path = './pre_train_model/BAY_dataset' # or ./pre_train_model/LA_dataset/
best_path = f'{base_path}/stgat_1.45.pkl'
# best_path = './pre_train_model/BAY_dataset/stgat_2.88.pkl'
record_save_path = f'{base_path}/stgat'
mkdir(record_save_path)
run_demo(best_path, record_save_path)
|
"""Command executor router class."""
from __future__ import annotations
from typing import Union
from opentrons.util.helpers import utc_now
from opentrons.hardware_control.api import API as HardwareAPI
from ..state import StateView
from ..errors import ProtocolEngineError, UnexpectedProtocolError
from .. import resources, command_models as cmd
from .equipment import EquipmentHandler
from .movement import MovementHandler
from .pipetting import PipettingHandler
class CommandExecutor:
"""
CommandExecutor class.
A CommandExecutor manages triggering the side-effects of a given command
and collecting the results of those side-effects.
"""
_equipment_handler: EquipmentHandler
_movement_handler: MovementHandler
_pipetting_handler: PipettingHandler
@classmethod
def create(
cls,
hardware: HardwareAPI,
state: StateView
) -> CommandExecutor:
"""Create a CommandExecutor instance."""
id_generator = resources.IdGenerator()
labware_data = resources.LabwareData()
equipment_handler = EquipmentHandler(
state=state,
id_generator=id_generator,
labware_data=labware_data,
hardware=hardware,
)
movement_handler = MovementHandler(
state=state,
hardware=hardware
)
pipetting_handler = PipettingHandler(
state=state,
hardware=hardware,
movement_handler=movement_handler,
)
return cls(
equipment_handler=equipment_handler,
movement_handler=movement_handler,
pipetting_handler=pipetting_handler,
)
def __init__(
self,
equipment_handler: EquipmentHandler,
movement_handler: MovementHandler,
pipetting_handler: PipettingHandler,
) -> None:
"""
Initialize a CommandExecutor.
This constructor does not inject provider implementations. Prefer the
CommandExecutor.create factory classmethod.
"""
self._equipment_handler = equipment_handler
self._movement_handler = movement_handler
self._pipetting_handler = pipetting_handler
async def execute_command(
self,
command: cmd.RunningCommandType,
) -> Union[cmd.CompletedCommandType, cmd.FailedCommandType]:
"""Execute a Command, returning a CompletedCommand or FailedCommand."""
try:
return await self._try_to_execute_command(command)
except ProtocolEngineError as error:
return command.to_failed(error, utc_now())
except Exception as unhandled_error:
return command.to_failed(
UnexpectedProtocolError(unhandled_error),
utc_now()
)
# TODO(mc, 2020-11-12): this routing logic is not scaling well. Re-work
# the base command model interface so that a command contains a method
# needed to execute itself, and the CommandExecutor calls that method.
async def _try_to_execute_command(
self,
command: cmd.RunningCommandType,
) -> cmd.CompletedCommandType:
"""Execute commands by routing to specific handlers."""
# call to correct implementation based on command request type
# load labware
if isinstance(command.request, cmd.LoadLabwareRequest):
lw_res = await self._equipment_handler.handle_load_labware(
command.request
)
return command.to_completed(lw_res, utc_now())
# load pipette
elif isinstance(command.request, cmd.LoadPipetteRequest):
pip_res = await self._equipment_handler.handle_load_pipette(
command.request,
)
return command.to_completed(pip_res, utc_now())
# move to well
elif isinstance(command.request, cmd.MoveToWellRequest):
move_res = await self._movement_handler.handle_move_to_well(
command.request
)
return command.to_completed(move_res, utc_now())
# pick up tip
elif isinstance(command.request, cmd.PickUpTipRequest):
pick_up_res = await self._pipetting_handler.handle_pick_up_tip(
command.request
)
return command.to_completed(pick_up_res, utc_now())
raise NotImplementedError(f"{type(command.request)} not implemented")
|
from redcmd.api import maincmd, execute_commandline
from .polls import Polls
@maincmd
def cli(poll_name, id=None):
params = {}
params['post_id'] = id
polls = Polls()
polls.run(poll_name, params=params)
def main():
execute_commandline()
|
import json
import os
import shutil
import stat
import sys
import platform
import tarfile
import zipfile
from enum import Enum
import requests
import urllib3
import managed_tool
from maintenance import Maintenance
from tool import Tool
HOME_PATH = os.environ.get("HOME")
DEVTC_HOME_PATH = "{}/.devtc".format(HOME_PATH)
DEVTC_BIN_PATH = "{}/bin".format(DEVTC_HOME_PATH)
DEVTC_ENV_VARIABLE = "DEVTC_HOME"
class Process(Enum):
INSTALL = 'install'
REMOVE = 'remove'
def modify_env_variables(install=True):
option = "Setting" if install else "Resetting"
print("{} devtc environment variable {}={}...".format(option, DEVTC_ENV_VARIABLE, DEVTC_HOME_PATH))
bash_rc_path = "{}/.bashrc".format(HOME_PATH)
bash_rc_backup_path = "{}.bak".format(bash_rc_path)
env_variable_content = '\n' + 'export {}={}\nexport PATH=$PATH:${}/bin\n'.format(DEVTC_ENV_VARIABLE, DEVTC_HOME_PATH, DEVTC_ENV_VARIABLE)
try:
file_exists = os.path.isfile(bash_rc_path)
if file_exists:
shutil.copyfile(bash_rc_path, bash_rc_backup_path)
with open(bash_rc_path, 'r') as bash_rc_file:
bash_rc_content = bash_rc_file.read()
with open(bash_rc_path, 'wt') as bash_rc_file:
set_env_var = install and DEVTC_ENV_VARIABLE not in bash_rc_content
new_bash_rc_content = bash_rc_content + env_variable_content if set_env_var else bash_rc_content.replace(env_variable_content, '')
bash_rc_file.write(new_bash_rc_content)
print("devtc environment successfully modified")
else:
print("Could not set devtc environment variable - missing .bashrc user configuration")
except RuntimeError as e:
if os.path.isfile(bash_rc_backup_path):
os.rename(bash_rc_backup_path, bash_rc_path)
print("Could not set devtc environment variable", e)
def install_devtc():
try:
if HOME_PATH is None:
raise ValueError("Could not install devtc - as no HOME Variable is set")
if is_devtc_installed():
print("devtc already installed in {}".format(DEVTC_HOME_PATH))
return
print("Installing devtc in {}...".format(DEVTC_HOME_PATH))
os.mkdir(DEVTC_HOME_PATH)
os.mkdir(DEVTC_BIN_PATH)
modify_env_variables()
print("devtc installed successfully")
except RuntimeError as e:
print("Could not install devtc", e)
def remove_devtc():
if not is_devtc_installed():
print("devtc is already removed!")
return
shutil.rmtree(DEVTC_HOME_PATH, ignore_errors=True)
modify_env_variables(False)
print("devtc successfully removed")
def is_devtc_installed():
return os.path.isdir(DEVTC_HOME_PATH)
def process_devtc(process):
try:
install_devtc() if Process(process) == Process.INSTALL else remove_devtc()
except ValueError: # in case of no valid input of process (install, remove) Enum generation will raise this exception
print("Did you mean install?")
usage()
def install_tool(name, release_version, download_url, package_extension, package_binary_path):
try:
if not is_devtc_installed():
print("Please install devtc first!")
return
if is_tool_installed(name):
print("Last version of {} {} is already installed".format(name, release_version))
return
print("Installing {}...".format(name))
tool_home_path = "{}/{}".format(DEVTC_HOME_PATH, name)
tool_last_version_path = "{}/{}".format(tool_home_path, release_version)
os.mkdir(tool_home_path)
os.mkdir(tool_last_version_path)
download(download_url, tool_last_version_path, package_extension)
tool_sym_link_path = "{}/default".format(tool_home_path)
os.symlink(tool_last_version_path, tool_sym_link_path)
tool_binary_path = "{}/{}".format(tool_sym_link_path, package_binary_path)
os.chmod(tool_binary_path, os.stat(tool_binary_path).st_mode | stat.S_IEXEC) # guarantee execution
devtc_tool_sym_link_path = "{}/{}".format(DEVTC_BIN_PATH, name)
os.symlink(tool_binary_path, devtc_tool_sym_link_path)
print("{} successfully installed in {}/{}".format(name, DEVTC_BIN_PATH, name))
except RuntimeError as e:
print("Could not install {}".format(name), e)
def remove_tool(name):
if not is_tool_installed(name):
print("{} is not installed!".format(name))
return
print("Removing {}...".format(name))
tool_home_path = "{}/{}".format(DEVTC_HOME_PATH, name)
shutil.rmtree(tool_home_path, ignore_errors=True)
tool_link_path = "{}/{}".format(DEVTC_BIN_PATH, name)
if os.path.islink(tool_link_path):
os.unlink(tool_link_path)
print("{} successfully removed".format(name))
def process_tool(name, process):
linux = "Linux"
windows = "Windows"
darwin = "Darwin"
platform_code = platform.system()
platform_param = ""
if platform_code == linux:
platform_param = linux
elif platform_code == windows:
platform_param = windows
elif platform_code == darwin:
platform_param = darwin
else:
print("Unknown platform <{}>!".format(platform_param))
try:
mt = retrieve_managed_tool(name, platform_param)
if mt is None:
print("Unknown tool <{}>!".format(name))
return
install_tool(mt.name, mt.last_release_version, mt.download_url, mt.package_extension, mt.package_binary_path) if Process(process) == Process.INSTALL else remove_tool(name)
except ValueError: # in case of no valid input of process (install, remove) Enum generation will raise this exception
print("Did you mean install?")
usage()
def process_t(tool):
url = "http://localhost:9090/tools"
requests.post(url, tool.json(), headers={'Content-Type': "application/json"})
def process_m(tool_name, maintenance):
url = "http://localhost:9090/tools/{}/maintenance".format(tool_name)
requests.post(url, maintenance.json(), headers={'Content-Type': "application/json"})
def download(url, last_version_path, package_extension):
print("Downloading tool to {}...".format(last_version_path))
package_path = get_package_path(last_version_path, package_extension)
http = urllib3.PoolManager()
response = http.request('GET', url, preload_content=False)
try:
with open(package_path, 'wb') as out:
while True:
data = response.read(65536)
if not data:
break
out.write(data)
response.release_conn()
extract_binaries(last_version_path, package_extension)
os.remove(package_path)
except RuntimeError as e:
print("Could not download and extract binaries successfully - ", e)
def retrieve_managed_tool(name, platform_param):
params = {'platform': platform_param}
url = "http://localhost:9090/toolchain/{}".format(name)
return managed_tool.single_mapper(requests.get(url, params).json())
def retrieve_all_managed_tools(platform_param):
params = {'platform': platform_param}
url = "http://localhost:9090/toolchain"
return managed_tool.mapper(requests.get(url, params).json())
def get_package_path(last_version_path, package_extension):
return "{}/package.{}".format(last_version_path, package_extension)
def extract_binaries(last_version_path, package_extension):
print("Extracting Tool binaries...")
package_path = get_package_path(last_version_path, package_extension)
if package_extension == "zip":
opener, mode = zipfile.ZipFile, 'r'
elif package_extension == "tar":
opener, mode = tarfile.open, 'r:'
elif package_extension == "tar.gz":
opener, mode = tarfile.open, 'r:gz'
elif package_extension == "tar.bz2":
opener, mode = tarfile.open, 'r:bz2'
else:
raise ValueError("Could not extract `%s` as no appropriate extractor is found" % package_path)
with opener(package_path, mode) as file:
file.extractall(last_version_path)
print("Tool binaries are extracted to {}".format(last_version_path))
def is_tool_installed(name):
home_path = "{}/{}".format(DEVTC_HOME_PATH, name)
return os.path.isdir(home_path)
def usage():
print("Usage: devtc_app.py [global options] [tool_to_install]\n" +
"\n" +
"The available commands are listed below.\n" +
"Before installing any tool, you have to install devtc first.\n" +
"\n" +
"Examples:\n" +
"devtc_app.py install Installs devtc\n" +
"devtc_app.py remove Removes devtc with all installed tools\n" +
"devtc_app.py install terraform Installs terraform tool\n" +
"devtc_app.py remove terraform Removes terraform tool\n" +
"devtc_app.py t Creates a new tool\n" +
"devtc_app.py m Creates a new maintenance for a tool\n" +
"\n" +
"Main commands:\n" +
"install Installs a tool\n" +
"remove Removes a tool\n" +
"t Creates a new tool\n" +
"m Creates a new maintenance"
)
commands = sys.argv
number_of_commands = len(commands)
if number_of_commands == 1:
usage()
elif number_of_commands == 2:
cm = commands[1]
if cm == "devtc":
process_devtc(cm)
elif cm == "t":
tool_name_in = input("Enter tool name:\n")
process_t(Tool(tool_name_in))
elif cm == "m":
tool_name_in = input("Enter tool name:\n")
maintainer_name = input("Enter maintainer name:\n")
docs_url = input("Enter docs url:\n")
download_url_template = input("Enter download url template for example {}:\n".format("https://host/tool-{release-version}-{platform-code}.{package-extension}"))
package_binary_path_template = input("Enter binary path template for example {}:\n".format("tool-{release-version}"))
package_extension = input("Enter package extension of binaries:\n")
release_version = input("Enter last release version:\n")
supported_platform_codes = json.loads(input("Enter platform codes as json array for example {}:\n".format(["x64-linux", "x64-windows"])))
instructions = None
process_m(tool_name_in, Maintenance(maintainer_name, docs_url, download_url_template, package_binary_path_template, package_extension, release_version, supported_platform_codes, instructions))
elif number_of_commands == 3:
process_tool(commands[1], commands[2])
else:
print("Too many arguments!")
usage()
|
# -*- encoding: utf-8 -*-
#
#
# Copyright (C) 2004-2006 André Wobst <wobsta@users.sourceforge.net>
#
# This file is part of PyX (http://pyx.sourceforge.net/).
#
# PyX is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PyX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyX; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import datetime
from pyx.graph import style
from pyx.graph.axis import axis, rater
"""some experimental code for creating a time axis
- it needs python 2.3 to be used (it is based on the new datetime data type)
- a timeaxis is always based on the datetime data type (there is no distinction between times and dates)
"""
class timeaxis(axis.linear):
"time axis mapping based "
# TODO: how to deal with reversed timeaxis?
def __init__(self, parter=None, rater=rater.linear(), **args):
axis._regularaxis.__init__(self, divisor=None, **args)
self.parter = parter
self.rater = rater
def convert(self, data, x):
# XXX float division of timedelta instances
def mstimedelta(td):
"return the timedelta in microseconds"
return td.microseconds + 1000000*(td.seconds + 3600*24*td.days)
return mstimedelta(x - data.min) / float(mstimedelta(data.max - data.min))
# we could store float(mstimedelta(self.dx)) instead of self.dx, but
# I prefer a different solution (not based on huge integers) for the
# future
zero = datetime.timedelta(0)
class timetick(datetime.datetime):
def __init__(self, year, month, day, ticklevel=0, labellevel=0, label=None, labelattrs=[], **kwargs):
datetime.datetime.__init__(self, year, month, day, **kwargs)
self.ticklevel = ticklevel
self.labellevel = labellevel
self.label = label
self.labelattrs = labelattrs[:]
def merge(self, other):
if self.ticklevel is None or (other.ticklevel is not None and other.ticklevel < self.ticklevel):
self.ticklevel = other.ticklevel
if self.labellevel is None or (other.labellevel is not None and other.labellevel < self.labellevel):
self.labellevel = other.labellevel
class timetexter:
def __init__(self, format="%c"):
self.format = format
def labels(self, ticks):
for tick in ticks:
if tick.labellevel is not None and tick.label is None:
tick.label = tick.strftime(self.format)
|
#!/usr/local/bin/python3
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ds_codes = {
0: "FixedIDDefault",
1: "FixedIDByte",
3: "FixedIDInt",
4: "FixedIDNone",
2: "FixedIDShort",
10: "CacheableLinkedList",
11: "Properties",
17: "PdxType",
26: "BooleanArray",
27: "CharArray",
41: "NullObj",
42: "CacheableString",
43: "Class",
44: "JavaSerializable",
45: "DataSerializable",
46: "CacheableBytes",
47: "CacheableInt16Array",
48: "CacheableInt32Array",
49: "CacheableInt64Array",
50: "CacheableFloatArray",
51: "CacheableDoubleArray",
52: "CacheableObjectArray",
53: "CacheableBoolean",
54: "CacheableCharacter",
55: "CacheableByte",
56: "CacheableInt16",
57: "CacheableInt32",
58: "CacheableInt64",
59: "CacheableFloat",
60: "CacheableDouble",
61: "CacheableDate",
63: "CacheableFileName",
64: "CacheableStringArray",
65: "CacheableArrayList",
66: "CacheableHashSet",
67: "CacheableHashMap",
68: "CacheableTimeUnit",
69: "CacheableNullString",
70: "CacheableHashTable",
71: "CacheableVector",
72: "CacheableIdentityHashMap",
73: "CacheableLinkedHashSet",
74: "CacheableStack",
87: "CacheableASCIIString",
88: "CacheableASCIIStringHuge",
89: "CacheableStringHuge",
92: "InternalDistributedMember",
94: "CacheableEnum",
38: "ClientProxyMembershipId",
39: "CacheableUserData",
38: "CacheableUserData2",
37: "CacheableUserData4",
93: "PDX",
94: "PDX_ENUM",
37: "InterestResultPolicy",
}
|
import unittest
from nzmath.group import *
from nzmath.finitefield import FinitePrimeFieldElement, FinitePrimeField
from nzmath.intresidue import *
from nzmath.permute import Permute, PermGroup
a1 = GroupElement(Permute([2, 4, 1, 3])) #Multiplication Group
a2 = GroupElement(Permute([3, 1, 4, 2]))
aa1 = a1.getGroup()
b1 = GroupElement(IntegerResidueClass(4, 30)) #additive Group
b2 = GroupElement(IntegerResidueClass(8, 30))
bb1 = b1.getGroup()
c1_a = GroupElement(FinitePrimeFieldElement(20, 37)) #Field
cc1_a = c1_a.getGroup()
c2 = GroupElement(FinitePrimeFieldElement(15, 37))
c1_m = GroupElement(c1_a.entity)
cc1_m = c1_m.getGroup()
c1_m.setOperation(1)
cc1_m.setOperation(1)
bg = AbelianGenerate([b1, b2])
class GroupTest (unittest.TestCase):
def testEqual(self):
assert(aa1 == Group(Permute([2, 4, 1, 3]), 1))
assert(bb1 == Group(IntegerResidueClassRing(30)))
assert(cc1_a == Group(FinitePrimeField(37)))
assert(cc1_m == Group(FinitePrimeField(37), 1))
def testidentity(self):
assert(GroupElement(Permute([1, 2, 3, 4]), 1) == aa1.identity())
assert(GroupElement(IntegerResidueClass(0, 30)) == bb1.identity())
assert(GroupElement(FinitePrimeFieldElement(0, 37)) == cc1_a.identity())
assert(GroupElement(FinitePrimeFieldElement(1, 37), 1) == cc1_m.identity())
def testGroupOrder(self):
assert(24 == aa1.grouporder())
assert(30 == bb1.grouporder())
assert(37 == cc1_a.grouporder())
assert(36 == cc1_m.grouporder())
class GroupElementTest(unittest.TestCase):
def testEqual(self):
assert(a1 == GroupElement(Permute([2, 4, 1, 3]), 1))
assert(b1 == GroupElement(IntegerResidueClass(4, 30)))
assert(c1_a == GroupElement(FinitePrimeFieldElement(20, 37)))
assert(c1_m == GroupElement(FinitePrimeFieldElement(20, 37), 1))
def testOpe(self):
assert(GroupElement(Permute([1, 2, 3, 4]), 1) == a1.ope(a2))
assert(GroupElement(IntegerResidueClass(12, 30)) == b1.ope(b2))
assert(GroupElement(FinitePrimeFieldElement(35, 37)) == c1_a.ope(c2))
assert(GroupElement(FinitePrimeFieldElement(4, 37), 1) == c1_m.ope(c2))
def testOpe2(self):
assert(GroupElement(Permute([3, 1, 4, 2]), 1) == a1.ope2(3))
assert(GroupElement(IntegerResidueClass(2, 30)) == b1.ope2(8))
assert(GroupElement(FinitePrimeFieldElement(3, 37)) == c1_a.ope2(2))
assert(GroupElement(FinitePrimeFieldElement(30, 37), 1) == c1_m.ope2(2))
def testInverse(self):
assert(GroupElement(Permute([3, 1, 4, 2]), 1) == a1.inverse())
assert(GroupElement(IntegerResidueClass(26, 30)) == b1.inverse())
assert(GroupElement(FinitePrimeFieldElement(17, 37)) == c1_a.inverse())
assert(GroupElement(FinitePrimeFieldElement(13, 37), 1) == c1_m.inverse())
def testOrder(self):
assert(4 == a1.order())
assert(15 == b1.order())
assert(37 == c1_a.order())
assert(36 == c1_m.order())
def testT_Order(self):
assert(4 == a1.t_order())
assert(15 == b1.order())
assert(37 == c1_a.t_order())
assert(36 == c1_m.t_order())
def testGetGroup(self):
assert(Group(PermGroup([1, 2, 3, 4]), 1) == a1.getGroup())
assert(Group(IntegerResidueClassRing(30)) == b1.getGroup())
assert(Group(FinitePrimeField(37)) == c1_a.getGroup())
assert(Group(FinitePrimeField(37), 1) == c1_m.getGroup())
class AbelianGenerateTest(unittest.TestCase):
def testRelationLattice(self):
result = bg.relationLattice()
assert(((4 * result[1, 1] + 8 * result[2, 1]) % 30) == 0)
assert(((4 * result[1, 2] + 8 * result[2, 2]) % 30) == 0)
def testComputeStructure(self):
assert([IntegerResidueClass(4, 30)], 15 == bg.computeStructure())
def suite(suffix = "Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
|
# -*- coding: utf-8 -*-
'''
Tests to try out packeting. Potentially ephemeral
'''
# pylint: skip-file
# pylint: disable=C0103
from ioflo.base.odicting import odict
from salt.transport.road.raet import (raeting, nacling, packeting,
devicing, transacting, stacking)
def test():
'''
Test packeting.
'''
data = odict(hk=1, bk=raeting.bodyKinds.json)
body = odict(msg='Hello Raet World', extra='what is this')
packet0 = packeting.TxPacket(embody=body, data=data, )
print packet0.body.data
packet0.pack()
print packet0.packed
stuff = []
for i in range(300):
stuff.append(str(i).rjust(4, " "))
stuff = "".join(stuff)
data.update(bk=raeting.bodyKinds.raw)
packet0 = packeting.TxPacket(embody=stuff, data=data, )
packet0.pack()
print packet0.packed
rejoin = []
if packet0.segmented:
for index, segment in packet0.segments.items():
print index, segment.packed
rejoin.append(segment.body.packed)
rejoin = "".join(rejoin)
print stuff == rejoin
signer = nacling.Signer()
masterSignKeyHex = signer.keyhex
masterVerKeyHex = signer.verhex
privateer = nacling.Privateer()
masterPriKeyHex = privateer.keyhex
masterPubKeyHex = privateer.pubhex
signer = nacling.Signer()
minionSignKeyHex = signer.keyhex
minionVerKeyHex = signer.verhex
privateer = nacling.Privateer()
minionPriKeyHex = privateer.keyhex
minionPubKeyHex = privateer.pubhex
#master stack
device = devicing.LocalDevice(did=1,
sigkey=masterSignKeyHex,
prikey=masterPriKeyHex)
stack0 = stacking.StackUdp(device=device)
remote1 = devicing.RemoteDevice( did=2,
verkey=minionVerKeyHex,
pubkey=minionPubKeyHex,)
stack0.addRemoteDevice(remote1)
#minion stack
device = devicing.LocalDevice( did=2,
ha=("", raeting.RAET_TEST_PORT),
sigkey=minionSignKeyHex,
prikey=minionPriKeyHex,)
stack1 = stacking.StackUdp(device=device)
remote0 = devicing.RemoteDevice( did=1,
ha=('127.0.0.1', raeting.RAET_PORT),
verkey=masterVerKeyHex,
pubkey=masterPubKeyHex,)
stack1.addRemoteDevice(remote0)
remote0.publee = nacling.Publican(key=remote1.privee.pubhex)
remote1.publee = nacling.Publican(key=remote0.privee.pubhex)
data.update(sd=1, dd=2, bk=raeting.bodyKinds.raw, fk=raeting.footKinds.nacl)
packet0 = packeting.TxPacket(stack=stack0, embody=stuff, data=data, )
packet0.pack()
print packet0.packed
rejoin = []
if packet0.segmented:
for index, segment in packet0.segments.items():
print index, segment.packed
rejoin.append(segment.coat.packed)
rejoin = "".join(rejoin)
print stuff == rejoin
segmentage = None
if packet0.segmented:
for segment in packet0.segments.values():
packet = packeting.RxPacket(stack=stack1, packed=segment.packed)
packet.parseOuter()
if packet.segmentive:
if not segmentage:
segmentage = packeting.RxPacket(stack=packet.stack,
data=packet.data)
segmentage.parseSegment(packet)
if segmentage.desegmentable():
segmentage.desegmentize()
break
if segmentage:
if not stack1.parseInner(segmentage):
print "*******BAD SEGMENTAGE********"
return
print segmentage.body.packed
print segmentage.body.data
print segmentage.body.packed == packet0.body.packed
body = odict(stuff=stuff)
print body
data.update(sd=1, dd=2, bk=raeting.bodyKinds.json, fk=raeting.footKinds.nacl)
packet0 = packeting.TxPacket(stack=stack0, embody=body, data=data, )
packet0.pack()
print packet0.packed
segmentage = None
if packet0.segmented:
for segment in packet0.segments.values():
packet = packeting.RxPacket(stack=stack1, packed=segment.packed)
packet.parseOuter()
if packet.segmentive:
if not segmentage:
segmentage = packeting.RxPacket(stack=packet.stack,
data=packet.data)
segmentage.parseSegment(packet)
if segmentage.desegmentable():
segmentage.desegmentize()
break
if segmentage:
if not stack1.parseInner(segmentage):
print "*******BAD SEGMENTAGE********"
return
print segmentage.body.packed
print segmentage.body.data
print segmentage.body.packed == packet0.body.packed
body = odict(stuff=stuff)
print body
data.update(sd=1, dd=2,
bk=raeting.bodyKinds.json,
ck=raeting.coatKinds.nacl,
fk=raeting.footKinds.nacl)
packet0 = packeting.TxPacket(stack=stack0, embody=body, data=data, )
packet0.pack()
print "Body"
print packet0.body.size, packet0.body.packed
print "Coat"
print packet0.coat.size, packet0.coat.packed
print "Head"
print packet0.head.size, packet0.head.packed
print "Foot"
print packet0.foot.size, packet0.foot.packed
print "Packet"
print packet0.size, packet0.packed
segmentage = None
if packet0.segmented:
for segment in packet0.segments.values():
packet = packeting.RxPacket(stack=stack1, packed=segment.packed)
packet.parseOuter()
if packet.segmentive:
if not segmentage:
segmentage = packeting.RxPacket(stack=packet.stack,
data=packet.data)
segmentage.parseSegment(packet)
if segmentage.desegmentable():
segmentage.desegmentize()
break
if segmentage:
if not stack1.parseInner(segmentage):
print "*******BAD SEGMENTAGE********"
print segmentage.body.packed
print segmentage.body.data
print segmentage.body.packed == packet0.body.packed
if __name__ == "__main__":
test()
|
from fastapi import APIRouter, Depends, Form
from typing import List, Dict, Optional
from instagrapi import Client
from instagrapi.mixins.insights import POST_TYPE, TIME_FRAME, DATA_ORDERING
from dependencies import ClientStorage, get_clients
router = APIRouter(
prefix="/insights",
tags=["insights"],
responses={404: {"description": "endpoint tidak ditemukan."}},
)
@router.post("/media_feed_all", response_model=List[Dict])
async def media_feed_all(sessionid: str = Form(...),
post_type: POST_TYPE = "ALL",
time_frame: TIME_FRAME = "TWO_YEARS",
data_ordering: DATA_ORDERING = "REACH_COUNT",
count: int = 0,
clients: ClientStorage = Depends(get_clients)) -> List[Dict]:
"""Return medias with insights
"""
cl = clients.get(sessionid)
return cl.insights_media_feed_all(post_type, time_frame, data_ordering, count, sleep=2)
@router.post("/account", response_model=Dict)
async def account(sessionid: str = Form(...),
clients: ClientStorage = Depends(get_clients)) -> Dict:
"""Get insights for account
"""
cl = clients.get(sessionid)
return cl.insights_account()
@router.post("/media", response_model=Dict)
async def media(sessionid: str = Form(...),
media_pk: int = Form(...),
clients: ClientStorage = Depends(get_clients)) -> Dict:
"""Get insights data for media
"""
cl = clients.get(sessionid)
return cl.insights_media(media_pk)
|
suma = 0.0
fraccion = 0.1
for c in range(10):
suma = suma + fraccion
print(suma)
if (suma == 1.0):
print("La suma es 1")
else:
print("La suma no es 1")
|
# Created by Patrick Kao
import itertools
from pathlib import Path
import stable_baselines
from stable_baselines import DQN, A2C, ACER, ACKTR, PPO2
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.policies import MlpPolicy
from gameRL.game_simulators.blackjack import BlackjackCustomEnv
from gameRL.game_simulators.blackjack_count import BlackjackEnvwithRunningCount
from gameRL.training_scripts.utils import LargeEvalCallback
def train_multi(params):
Path("saved_models").mkdir(parents=True, exist_ok=True)
eval_callback = LargeEvalCallback(n_steps=params["TIMESTEPS_PER_MODEL"] // 100)
for (name, model_gen), rho, num_decks, max_hand_sum in itertools.product(
params["models_to_train"],
params["RHO_TO_TRY"],
params["DECKS_TO_TRY"],
params["MAX_HAND_SUM_TO_TRY"]):
# to save time, try middle with outside 2
rho_match = params["RHO_TO_TRY"].index(rho) == 1
deck_match = params["DECKS_TO_TRY"].index(num_decks) == 1
hand_match = params["MAX_HAND_SUM_TO_TRY"].index(max_hand_sum) == 1
if params.get("reduce_runs", True) and sum([rho_match, deck_match, hand_match]) < 2:
continue
descriptor = f"{name}/sum_{max_hand_sum}/rho_{rho}_nd_{num_decks}"
log = f"./runs/{descriptor}"
env = BlackjackEnvwithRunningCount(num_decks, natural_bonus=True, rho=rho,
max_hand_sum=max_hand_sum, allow_observe=True)
# env = BlackjackCustomEnv(num_decks, natural_bonus=True, rho=rho,
# max_hand_sum=max_hand_sum, simple_game=True)
model = model_gen(env, log)
model.learn(total_timesteps=params["TIMESTEPS_PER_MODEL"], callback=eval_callback)
# test game
reward, std = evaluate_policy(model, env, n_eval_episodes=2000)
print(
f"Average reward for model {name} with: rho={rho}, num decks={num_decks}, max hand sum="
f"{max_hand_sum}: {reward}")
# save
model.save(f"saved_models/{descriptor.replace('/', '_')}")
env.close()
if __name__ == "__main__":
params = {
"TIMESTEPS_PER_MODEL": int(7e5),
"RHO_TO_TRY": [0.25, 0.75, 0.95],
"DECKS_TO_TRY": [1, 3, 10],
"MAX_HAND_SUM_TO_TRY": [19, 21, 24],
# for each model, name of mode, model
"models_to_train": [
(
"dqn",
lambda use_env, log_name: DQN(stable_baselines.deepq.policies.MlpPolicy, use_env,
tensorboard_log=log_name)),
("a2c", lambda use_env, log_name: A2C(MlpPolicy, use_env, tensorboard_log=log_name)),
("acer", lambda use_env, log_name: ACER(MlpPolicy, use_env,
tensorboard_log=log_name)),
(
"acktr",
lambda use_env, log_name: ACKTR(MlpPolicy, use_env, tensorboard_log=log_name)),
("ppo2", lambda use_env, log_name: PPO2(MlpPolicy, use_env,
tensorboard_log=log_name)),
],
}
# params = {
# "TIMESTEPS_PER_MODEL": int(7e5),
# "RHO_TO_TRY": [0.95],
# "DECKS_TO_TRY": [3],
# "MAX_HAND_SUM_TO_TRY": [21],
# "reduce_runs": False,
# # for each model, name of mode, model
# "models_to_train": [
# (
# "dqn",
# lambda use_env, log_name: DQN(stable_baselines.deepq.policies.MlpPolicy, use_env,
# tensorboard_log=log_name)),
# ("a2c", lambda use_env, log_name: A2C(MlpPolicy, use_env, tensorboard_log=log_name)),
# ("acer", lambda use_env, log_name: ACER(MlpPolicy, use_env, tensorboard_log=log_name)),
# (
# "acktr",
# lambda use_env, log_name: ACKTR(MlpPolicy, use_env, tensorboard_log=log_name)),
# ("ppo2", lambda use_env, log_name: PPO2(MlpPolicy, use_env, tensorboard_log=log_name)),
# ],
# }
train_multi(params)
|
# -*- coding=utf8 -*-
## process the images from `raw/` into single letters, save them in `processed/`
from PIL import Image
from os import walk
from rawimg import randomName
RAW_PATH = 'raw/'
TEMP_PATH = 'monochrome/'
TARGET_PATH = 'processed/'
THRESHOLD = 150 # gray pixel threshold
# SURROUNDING = [(x, y) for y in [-1,0,1] for x in [-1,0,1]]
SURROUNDING = [(0,1), (0,-1), (-1,0), (1,0)] # define the isolated points
def rotate(img, angle):
width, height = img.size
background = Image.new('L', (width+100, height+100), 255)
# put it onto a large white background,
# in order to avoid black rim after rotation
background.paste(img, (50, 50))
background = background.rotate(angle)
background = background.crop((50, 50, 50+width, 50+height))
return background
# An island is some black pixels which are not connected to the rest.
def remove_island(img):
width, height = img.size
pix = img.load()
all_black, queue, island = [],[],[]
all_black = [(x,y) for x in range(width) for y in range(height) if pix[x,y]==0]
# print(all_black)
while len(all_black):
index = 0
queue = [all_black[0], ]
all_black = all_black[1:] # just like shift
while index < len(queue):
now = queue[index]
for sr in [tuple(map(sum, zip(a, now))) for a in SURROUNDING]:
if sr in all_black:
queue.append(sr)
all_black.remove(sr)
index += 1
if len(queue) < 5:
island.extend(queue)
for (x,y) in island:
pix[x, y] = 255
# print(island)
# convert image object to binarization image.
def toBin(img):
# crop first
width, height = img.size
gray = img.convert('L') # to gray
pix = gray.load()
# remove the rim
for x in range(width):
pix[x, 0] = pix[x, height-1] = 255
for y in range(height):
pix[0, y] = pix[width-1, y] = 255
# binarization
for y in range(height):
for x in range(width):
pix[x, y] = 255 if pix[x, y] > THRESHOLD else 0
# rotation makes the letter upright
gray = rotate(gray, 7)
pix = gray.load() # reload pixels
# Forcedly cut off some wide letters
MAXIMUN_SIZE = 13 # max width/height is 13px
black_count = 0
for x in range(width):
s = sum([pix[x, y]==0 for y in range(height)])
if s == 0:
black_count = 0
elif black_count == MAXIMUN_SIZE:
for hei in range(height):
pix[x+1, hei] = 255
black_count = 0
else:
black_count+=1
remove_island(gray)
return gray
# crop a image to four letter images
def cropLetters(img):
MAXIMUN_SIZE = 13 # max width/height is 13px
# find vertical lines
width, height = img.size
### (delete-line) we don't find the gaps between letter, we decide!
# gaps = [8+i*MAXIMUN_SIZE for i in range(4)]
#### fine, let's find the gaps
pix = img.load()
gaps = [] # contain four letters' start position on width axes
onLetter = False
for x in range(width):
s = sum([pix[x, y]==0 for y in range(height)])
# if (s != 0 and onLetter == False) or (len(gaps) and x-gaps[-1] > MAXIMUN_SIZE):
if (s != 0 and onLetter == False):
gaps.append(x)
onLetter = True
elif s!=0 and onLetter and x-gaps[-1] >= MAXIMUN_SIZE-1:
# too wide letter
for hei in range(height):
pix[x+1, hei] = 255
elif s==0 and onLetter == True and x-gaps[-1] >= MAXIMUN_SIZE-1:
onLetter = False
if len(gaps)<4:
return False, gaps
letters = []
# crop to four letters
for l in gaps[:4]:
# 13 is the max width of one letter
letter = img.crop((l, 0, l+MAXIMUN_SIZE, height))
lpix = letter.load()
# scan from down-to-up, deal with 'J' and 'I'
for y in range(height)[::-1]:
s = sum([lpix[x, y]==0 for x in range(MAXIMUN_SIZE)]) # [0, 13), the width
if s!=0:
# 13 is the max height of one letter, interesting :)
if y-MAXIMUN_SIZE+1 < 0:
letters.append(letter.crop((0, 0, MAXIMUN_SIZE, MAXIMUN_SIZE)))
else:
letters.append(letter.crop((0, y-MAXIMUN_SIZE+1, MAXIMUN_SIZE, y+1)))
break
return True, letters
def main():
f = []
for (dirpath, dirnames, filenames) in walk(RAW_PATH):
f.extend(filenames)
break
# using len() has better controllability
err_count = 0
for i in range(len(f)):
img = Image.open(RAW_PATH + f[i])
bimg = toBin(img) # convert to `1` mode
# bimg.save(TEMP_PATH + f[i], 'JPEG') # just for testing toBin()
success, letters = cropLetters(bimg)
if success:
for l in letters:
l.save(TARGET_PATH + randomName(9) + '.jpg', 'JPEG')
else:
err_count+=1
# print('Error crop: {name}, {lines}'.format(name=f[i], lines=str(letters)))
print('Error:', err_count)
print('Now do the classify by hand, and then exec img2feature.py.')
if __name__ == '__main__':
main()
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def main():
return 'Hello, Solar Pi'
|
from typing import Dict, NewType, Tuple, Union
import torch
import torch.nn as nn
# Load all model builders
from .poolings.stats import STAT_POOLINGS
class ClassificationHead(nn.Module):
def __init__(self,
num_classes: int,
input_features_chan: int,
head_hidden_layers=[
(256, 0.5, "ReLU"),
]):
super(ClassificationHead, self).__init__()
input_channels = input_features_chan
sequential = []
for ind, (num_units, dropout_rate, activ) in enumerate(head_hidden_layers):
sequential.append(nn.Linear(input_features_chan, num_units, bias=True))
sequential.append(nn.BatchNorm1d(num_units))
input_features_chan = num_units
if activ is not None:
sequential.append(getattr(nn, activ)())
if dropout_rate > 0:
sequential.append(nn.Dropout(p=dropout_rate))
if num_classes is not None:
sequential.append(nn.Linear(head_hidden_layers[-1][0], num_classes, bias=True))
self.fc_net = nn.Sequential(*sequential)
def forward(self, x):
return self.fc_net(x)
def build_sequential_fcnet(
input_features_chan: int,
layers=[(256, 0.5, "ReLU")],
):
sequential = []
for ind, (num_units, dropout_rate, activ) in enumerate(layers):
sequential.append(nn.Linear(input_features_chan, num_units, bias=True))
sequential.append(nn.BatchNorm1d(num_units))
input_features_chan = num_units
if activ is not None:
sequential.append(getattr(nn, activ)())
if dropout_rate > 0:
sequential.append(nn.Dropout(p=dropout_rate))
return nn.Sequential(*sequential)
class MultiTaskClassificationHead(nn.Module):
def __init__(
self,
head_setups: Dict[str, Union[int, Tuple[int, float], Dict[str, object]]],
input_features_chan: int,
head_hidden_layers=[
# (num_units, dropout, activation)
(256, 0.5, "ReLU"),
],
return_embeddings=False):
super(MultiTaskClassificationHead, self).__init__()
sequential = []
self.return_embeddings = return_embeddings
shared_hidden_layers = head_hidden_layers
self.fc_net = build_sequential_fcnet(input_features_chan, shared_hidden_layers)
def generate_head(head_setup, input_features_chan):
dropout_rate = 0.0
head_layers = []
if isinstance(head_setup, int):
num_classes = head_setup
elif isinstance(head_setup, tuple) or isinstance(head_setup, list):
num_classes, dropout_rate = head_setup
elif isinstance(head_setup, dict):
num_classes = head_setup["num_classes"]
dropout_rate = head_setup.get("dropout_rate", 0.0)
if "hidden_layers" in head_setup:
head_layers.append(
build_sequential_fcnet(input_features_chan, head_setup["hidden_layers"]))
input_features_chan = head_setup["hidden_layers"][-1][0]
if dropout_rate > 0.0:
head_layers.append(nn.Dropout(p=dropout_rate))
head_layers.append(nn.Linear(input_features_chan, num_classes, bias=True))
return nn.Sequential(*head_layers)
input_features_chan = shared_hidden_layers[-1][0]
self.heads = nn.ModuleDict(
modules={
head_name: generate_head(head_setup, input_features_chan)
for head_name, head_setup in head_setups.items()
})
def forward(self, x):
x = self.fc_net(x)
if self.return_embeddings:
return dict([(head_name, self.heads[head_name](x))
for head_name in self.heads.keys()]), x
else:
return dict([(head_name, self.heads[head_name](x)) for head_name in self.heads.keys()])
class MultiInMultiOut(nn.Module):
def __init__(
self,
branches_setup=[
{
"input_path": [0, -1], # for example
"output_name": "denoising_mask_logits",
"module": object
},
{
"input_path": [0, -1], # for example
"output_name": "vad_logits",
"module": object
},
]):
super(MultiInMultiOut, self).__init__()
module_dict = {}
input_map = {}
for branch_setup in branches_setup:
branch_name = branch_setup["output_name"]
branch_input_path = branch_setup["input_path"]
input_map[branch_name] = branch_input_path
module_dict[branch_name] = resolve_model_or_conf(branch_setup["module"])
self.module_dict = nn.ModuleDict(modules=module_dict)
print(self.modules)
self.input_map = input_map
def forward(self, inputs):
outputs = {}
for branch_name in self.input_map.keys():
x = inputs
if self.input_map[branch_name] is None:
x = x
else:
for path_part in self.input_map[branch_name]:
x = x[path_part]
outputs[branch_name] = self.module_dict[branch_name](x)
return outputs
class StatsPooling2D(nn.Module):
def __init__(self, mode="var"):
super(StatsPooling2D, self).__init__()
self.convert_mode_on = False
self.mode = mode
def forward(self, x):
s = x.size()
# x = x.view(s[0],s[1]*s[2],s[3])
# x = x.reshape(s[0],s[1]*s[2],s[3])
# x = torch.reshape(x,(int(s[0]),int(s[1]*s[2]),int(s[3])))
if self.convert_mode_on:
print(f"RESHAPE -> SPLIT+CONCAT")
x = torch.cat(torch.split(x, 1, dim=1), dim=2)[:, 0, :, :]
else:
x = torch.reshape(x, (int(s[0]), int(s[1] * s[2]), int(s[3])))
x = STAT_POOLINGS[self.mode](x, dim=2)
return x
class StatsPooling1D(nn.Module):
def __init__(self, mode="var"):
super(StatsPooling1D, self).__init__()
self.mode = mode
def forward(self, x):
s = x.size()
x = STAT_POOLINGS[self.mode](x, dim=2)
return x
class Permute(nn.Module):
def __init__(self, permutation):
super(StatsPooling1D, self).__init__()
self.permutation = permutation
def forward(self, x):
return x.permute(self.permutation)
class Transpose(nn.Module):
def __init__(self, perm):
super(Transpose, self).__init__()
self.perm = perm
def forward(self, x):
return x.transpose(self.perm)
def get_params_count(model: nn.Module):
trainable_params_count = sum(p.numel() for p in model.parameters() if p.requires_grad)
all_params_count = sum(p.numel() for p in model.parameters())
return trainable_params_count, all_params_count
ModuleOrConfig = NewType("Strides", Union[nn.Module, Dict])
def resolve_model_or_conf(mod_or_conf: ModuleOrConfig):
print(mod_or_conf)
if mod_or_conf is None:
return mod_or_conf
if isinstance(mod_or_conf, dict):
module = eval(mod_or_conf["type"])(**mod_or_conf["params"])
trainable = mod_or_conf.get("trainable", True)
if trainable is not None:
for param in module.parameters():
param.requires_grad = trainable
print(
f"{mod_or_conf['type']} trainable {trainable}, params counts : {get_params_count(module)}"
)
return module
elif isinstance(mod_or_conf, nn.Module):
return mod_or_conf
else:
raise NotImplemented()
class NormalizeAudio(nn.Module):
def __init__(self):
super(NormalizeAudio, self).__init__()
def forward(self, x):
val_range = (x.max(dim=-1, keepdims=True).values - x.min(dim=-1, keepdims=True).values +
1e-8) / 2
return (x - x.mean(dim=-1, keepdims=True)) / val_range
class AudioClassificationModel(nn.Module):
def __init__(self,
features: ModuleOrConfig = None,
backbone: ModuleOrConfig = None,
pooling: ModuleOrConfig = None,
cls_head: ModuleOrConfig = None,
spec_augs: ModuleOrConfig = None):
super(AudioClassificationModel, self).__init__()
self.features = resolve_model_or_conf(features)
self.backbone = resolve_model_or_conf(backbone)
self.pooling = resolve_model_or_conf(pooling)
self.cls_head = resolve_model_or_conf(cls_head)
self.spec_augs = resolve_model_or_conf(spec_augs)
def forward(self, x):
if self.features is not None:
x = self.features(x)
if self.spec_augs is not None:
if self.training:
with torch.no_grad():
x = self.spec_augs(x)
if self.backbone is not None:
x = self.backbone(x)
if self.pooling is not None:
x = self.pooling(x)
if self.cls_head is not None:
x = self.cls_head(x)
return x
class SequentialModel(nn.Module):
def __init__(self, submodules=[]):
super(SequentialModel, self).__init__()
self.submodules = nn.Sequential(*[resolve_model_or_conf(sm) for sm in submodules])
def forward(self, x):
return self.submodules(x)
|
from pyhocon import ConfigFactory
conf = ConfigFactory.parse_file("application.conf")
static_dir = conf.get("static_dir")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from pipeline.param.base_param import BaseParam
from pipeline.param import consts
class EncodeParam(BaseParam):
"""
Define the encode method for raw intersect
Parameters
----------
salt: the src data string will be str = str + salt, default by empty string
encode_method: str, the hash method of src data string, it support md5, sha1, sha224, sha256, sha384, sha512, sm3, default by None
base64: bool, if True, the result of hash will be changed to base64, default by False
"""
def __init__(self, salt='', encode_method='none', base64=False):
super().__init__()
self.salt = salt
self.encode_method = encode_method
self.base64 = base64
def check(self):
if type(self.salt).__name__ != "str":
raise ValueError(
"encode param's salt {} not supported, should be str type".format(
self.salt))
descr = "encode param's "
self.encode_method = self.check_and_change_lower(self.encode_method,
["none", consts.MD5, consts.SHA1, consts.SHA224,
consts.SHA256, consts.SHA384, consts.SHA512, consts.SM3],
descr)
if type(self.base64).__name__ != "bool":
raise ValueError(
"hash param's base64 {} not supported, should be bool type".format(self.base64))
return True
class RSAParam(BaseParam):
"""
Define the hash method for RSA intersect method
Parameters
----------
salt: the src data string will be str = str + salt, default ''
hash_method: str, the hash method of src data string, it support sha256, sha384, sha512, sm3, default sha256
final_hash_method: str, the hash method of result data string, it support md5, sha1, sha224, sha256, sha384, sha512, sm3, default sha256
split_calculation: bool, if True, Host & Guest split operations for faster performance, recommended on large data set
random_base_fraction: positive float, if not None, generate (fraction * public key id count) of r for encryption and reuse generated r;
note that value greater than 0.99 will be taken as 1, and value less than 0.01 will be rounded up to 0.01
key_length: positive int, bit count of rsa key, default 1024
"""
def __init__(self, salt='', hash_method='sha256', final_hash_method='sha256',
split_calculation=False, random_base_fraction=None, key_length=1024):
super().__init__()
self.salt = salt
self.hash_method = hash_method
self.final_hash_method = final_hash_method
self.split_calculation = split_calculation
self.random_base_fraction = random_base_fraction
self.key_length = key_length
def check(self):
if type(self.salt).__name__ != "str":
raise ValueError(
"rsa param's salt {} not supported, should be str type".format(
self.salt))
descr = "rsa param's hash_method "
self.hash_method = self.check_and_change_lower(self.hash_method,
[consts.SHA256, consts.SHA384, consts.SHA512, consts.SM3],
descr)
descr = "rsa param's final_hash_method "
self.final_hash_method = self.check_and_change_lower(self.final_hash_method,
[consts.MD5, consts.SHA1, consts.SHA224,
consts.SHA256, consts.SHA384, consts.SHA512, consts.SM3],
descr)
descr = "rsa param's split_calculation"
self.check_boolean(self.split_calculation, descr)
descr = "rsa param's random_base_fraction"
if self.random_base_fraction:
self.check_positive_number(self.random_base_fraction, descr)
self.check_decimal_float(self.random_base_fraction, descr)
descr = "rsa param's key_length"
self.check_positive_integer(self.key_length, descr)
return True
class IntersectCache(BaseParam):
def __init__(self, use_cache=False, id_type=consts.PHONE, encrypt_type=consts.SHA256):
super().__init__()
self.use_cache = use_cache
self.id_type = id_type
self.encrypt_type = encrypt_type
def check(self):
if type(self.use_cache).__name__ != "bool":
raise ValueError(
"IntersectCache param's use_cache {} not supported, should be bool type".format(
self.use_cache))
descr = "intersect cache param's "
self.check_and_change_lower(self.id_type,
[consts.PHONE, consts.IMEI],
descr)
self.check_and_change_lower(self.encrypt_type,
[consts.MD5, consts.SHA256],
descr)
class IntersectParam(BaseParam):
"""
Define the intersect method
Parameters
----------
intersect_method: str, it supports 'rsa' and 'raw', default by 'raw'
random_bit: positive int, it will define the encrypt length of rsa algorithm. It effective only for intersect_method is rsa
sync_intersect_ids: bool. In rsa, 'synchronize_intersect_ids' is True means guest or host will send intersect results to the others, and False will not.
while in raw, 'synchronize_intersect_ids' is True means the role of "join_role" will send intersect results and the others will get them.
Default by True.
join_role: str, role who joins ids, supports "guest" and "host" only and effective only for raw. If it is "guest", the host will send its ids to guest and find the intersection of
ids in guest; if it is "host", the guest will send its ids to host. Default by "guest".
with_encode: bool, if True, it will use hash method for intersect ids. Effective only for "raw".
encode_params: EncodeParam, it effective only for with_encode is True
rsa_params: RSAParam, effective for rsa method only
only_output_key: bool, if false, the results of intersection will include key and value which from input data; if true, it will just include key from input
data and the value will be empty or some useless character like "intersect_id"
repeated_id_process: bool, if true, intersection will process the ids which can be repeatable
repeated_id_owner: str, which role has the repeated ids
with_sample_id: bool, data with sample id or not, default False; set this param to True may lead to unexpected behavior
"""
def __init__(self, intersect_method: str = consts.RAW, random_bit=128, sync_intersect_ids=True,
join_role=consts.GUEST,
with_encode=False, only_output_key=False, encode_params=EncodeParam(),
rsa_params=RSAParam(),
intersect_cache_param=IntersectCache(), repeated_id_process=False, repeated_id_owner=consts.GUEST,
with_sample_id=False,
allow_info_share: bool = False, info_owner=consts.GUEST):
super().__init__()
self.intersect_method = intersect_method
self.random_bit = random_bit
self.sync_intersect_ids = sync_intersect_ids
self.join_role = join_role
self.with_encode = with_encode
self.encode_params = copy.deepcopy(encode_params)
self.rsa_params = copy.deepcopy(rsa_params)
self.only_output_key = only_output_key
self.intersect_cache_param = intersect_cache_param
self.repeated_id_process = repeated_id_process
self.repeated_id_owner = repeated_id_owner
self.allow_info_share = allow_info_share
self.info_owner = info_owner
self.with_sample_id = with_sample_id
def check(self):
descr = "intersect param's "
self.intersect_method = self.check_and_change_lower(self.intersect_method,
[consts.RSA, consts.RAW],
descr)
if type(self.random_bit).__name__ not in ["int"]:
raise ValueError("intersect param's random_bit {} not supported, should be positive integer".format(
self.random_bit))
if type(self.sync_intersect_ids).__name__ != "bool":
raise ValueError(
"intersect param's sync_intersect_ids {} not supported, should be bool type".format(
self.sync_intersect_ids))
self.join_role = self.check_and_change_lower(self.join_role,
[consts.GUEST, consts.HOST],
descr+"join_role")
if type(self.with_encode).__name__ != "bool":
raise ValueError(
"intersect param's with_encode {} not supported, should be bool type".format(
self.with_encode))
if type(self.only_output_key).__name__ != "bool":
raise ValueError(
"intersect param's only_output_key {} not supported, should be bool type".format(
self.only_output_key))
if type(self.repeated_id_process).__name__ != "bool":
raise ValueError(
"intersect param's repeated_id_process {} not supported, should be bool type".format(
self.repeated_id_process))
self.repeated_id_owner = self.check_and_change_lower(self.repeated_id_owner,
[consts.GUEST],
descr+"repeated_id_owner")
if type(self.allow_info_share).__name__ != "bool":
raise ValueError(
"intersect param's allow_info_sync {} not supported, should be bool type".format(
self.allow_info_share))
self.info_owner = self.check_and_change_lower(self.info_owner,
[consts.GUEST, consts.HOST],
descr+"info_owner")
self.check_boolean(self.with_sample_id, descr+"with_sample_id")
self.encode_params.check()
self.rsa_params.check()
return True
|
#/usr/bin/env python3
import logging
import time
from sqlalchemy.sql.expression import func
from ngk.schema import Comment, Post, ScopedSession, User
from ngk.html_util import normalize_text
def main() -> None:
BATCH_SIZE = 50000
total_count = 0
with ScopedSession() as session:
offset = 0
db_comments_count = session.query(func.count(Comment.comment_id)).scalar()
parsed_comments = set()
while True:
print(f'Offset: {offset}')
q = session.query(Comment).order_by(Comment.comment_id).limit(BATCH_SIZE).offset(offset)
for comment in q:
comment.text = normalize_text(comment.text)
parsed_comments.add(comment.comment_id)
total_count += 1
offset += BATCH_SIZE
if offset > db_comments_count:
break
print(f'Total_count: {total_count}, db_count: {db_comments_count}, parsed_comments_len: {len(parsed_comments)}')
if __name__ == '__main__':
main()
|
# Copyright (c) WiPhy Development Team
# This library is released under the MIT License, see LICENSE.txt
import unittest
import numpy as np
from wiphy.code.modulator import *
from wiphy.util.general import getMinimumEuclideanDistance
class Test(unittest.TestCase):
def test_PSK(self):
for L in 2 ** np.arange(1, 8, 1):
symbols = generatePSKSymbols(L)
meanNorm = np.mean(np.square(np.abs(symbols)))
self.assertAlmostEqual(meanNorm, 1.0, msg="The mean power of PSK(" + str(L) + ") symbols differs from 1.0")
med = getMinimumEuclideanDistance(symbols.reshape(L, 1, 1))
self.assertGreater(med, 0, msg="The minimum Euclidean distance of PSK(" + str(L) + ") symbols is too small")
def test_QAM(self):
for L in 2 ** np.arange(2, 8, 2):
symbols = generateQAMSymbols(L)
meanNorm = np.mean(np.square(np.abs(symbols)))
self.assertAlmostEqual(meanNorm, 1.0, msg="The mean power of QAM(" + str(L) + ") symbols differs from 1.0")
med = getMinimumEuclideanDistance(symbols.reshape(L, 1, 1))
self.assertGreater(med, 0, msg="The minimum Euclidean distance of QAM(" + str(L) + ") symbols is too small")
def test_StarQAM(self):
for L in 2 ** np.arange(1, 8, 1):
symbols = generateStarQAMSymbols(L)
meanNorm = np.mean(np.square(np.abs(symbols)))
self.assertAlmostEqual(meanNorm, 1.0,
msg="The mean power of StarQAM(" + str(L) + ") symbols differs from 1.0")
med = getMinimumEuclideanDistance(symbols.reshape(L, 1, 1))
self.assertGreater(med, 0,
msg="The minimum Euclidean distance of StarQAM(" + str(L) + ") symbols is too small")
if __name__ == '__main__':
unittest.main()
|
# app/home/__init__.py
from . import views
#from flask import Blueprint
#home = Blueprint('home', __name__)
|
import requests, json, re, time, Config
from bs4 import BeautifulSoup as BS
def Get_Grades(UserName, Password):
headers = {}
Login_data = {
'UserName': UserName,
'Password': Password}
session = requests.Session()
# Get SAMLRequest and RelayState
response = session.get('https://duval.focusschoolsoftware.com/focus/Modules.php?modname=misc/Portal.php', headers=headers)
doc = BS(response.content, 'html.parser')
SAMLRequest_and_RelayState = {
doc.input.attrs["name"]: doc.input.attrs["value"],
doc.find_all('input')[1].attrs["name"]: doc.find_all('input')[1].attrs["value"]
}
# RESPONSE 2 CONTAINS "MSISSamlRequest", "MSISSamlRequest1", "MSISSamlRequest2", AND "MSISSamlRequest3"
response2 = session.post('https://fs.duvalschools.org/adfs/ls/', headers=headers, data=SAMLRequest_and_RelayState)
ID_for_session = BS(response2.content, 'html.parser').form.attrs["action"].split("=")[1]
# This gets the new SAMLRequest and RelayState
response3 = session.post(f'https://fs.duvalschools.org/adfs/ls/?client-request-id={ID_for_session}&RedirectToIdentityProvider=AD+AUTHORITY',
headers=headers, data=Login_data, cookies=response2.cookies)
doc = BS(response3.content, 'html.parser')
SAMLRequest_and_RelayState = {
doc.input.attrs["name"]: doc.input.attrs["value"],
doc.find_all('input')[1].attrs["name"]: doc.find_all('input')[1].attrs["value"]
}
# This gets the new "PHPSESSID" cookie
response4 = session.post('https://duval.focusschoolsoftware.com/focus/sso/saml2/acs.php',
headers=headers, params={'id': 'saml'}, cookies=response.cookies.get_dict(), data=SAMLRequest_and_RelayState)
# This gets the Gradebook from the main page
response5 = requests.get('https://duval.focusschoolsoftware.com/focus/Modules.php',
headers=headers, cookies={"PHPSESSID": response4.cookies.get_dict()["PHPSESSID"]})
return json.loads((re.search('{"methods[A-z\W_0-9]+}}};', str(response5.text))[0])[:-1])
# Now create a function that gets all the assignments and grades
def Get_All_Assignments(UserName, Password):
headers = {}
Login_data = {
'UserName': UserName,
'Password': Password}
session = requests.Session()
# Get SAMLRequest and RelayState
response = session.get('https://duval.focusschoolsoftware.com/focus/Modules.php?modname=misc/Portal.php', headers=headers)
doc = BS(response.content, 'html.parser')
SAMLRequest_and_RelayState = {
doc.input.attrs["name"]: doc.input.attrs["value"],
doc.find_all('input')[1].attrs["name"]: doc.find_all('input')[1].attrs["value"]
}
# RESPONSE 2 CONTAINS "MSISSamlRequest", "MSISSamlRequest1", "MSISSamlRequest2", AND "MSISSamlRequest3"
response2 = session.post('https://fs.duvalschools.org/adfs/ls/', headers=headers, data=SAMLRequest_and_RelayState)
ID_for_session = BS(response2.content, 'html.parser').form.attrs["action"].split("=")[1]
# This gets the new SAMLRequest and RelayState
response3 = session.post(f'https://fs.duvalschools.org/adfs/ls/?client-request-id={ID_for_session}&RedirectToIdentityProvider=AD+AUTHORITY',
headers=headers, data=Login_data, cookies=response2.cookies)
doc = BS(response3.content, 'html.parser')
SAMLRequest_and_RelayState = {
doc.input.attrs["name"]: doc.input.attrs["value"],
doc.find_all('input')[1].attrs["name"]: doc.find_all('input')[1].attrs["value"]
}
# This gets the new "PHPSESSID" cookie
response4 = session.post('https://duval.focusschoolsoftware.com/focus/sso/saml2/acs.php',
headers=headers, params={'id': 'saml'}, cookies=response.cookies.get_dict(), data=SAMLRequest_and_RelayState)
# This gets the Gradebook from the main page
response5 = requests.get('https://duval.focusschoolsoftware.com/focus/Modules.php',
headers=headers, cookies={"PHPSESSID": response4.cookies.get_dict()["PHPSESSID"]})
Grade_Information = json.loads((re.search('{"methods[A-z\W_0-9]+}}};', str(response5.text))[0])[:-1])
# Collect all the href for each course
hrefs = []
Current_Quarter = Grade_Information["initial_contexts"]["PortalController"]["data"]["enrollments"][0]["grades"]["mps"][0]["key"]
for i in Grade_Information["initial_contexts"]["PortalController"]["data"]["enrollments"][0]["grades"]["rows"]:
#print(i[Current_Quarter+"_mp_grade_href"])
hrefs.append(i[Current_Quarter+"_mp_grade_href"])
# Now get all the assignments for each course
Assignments = []
for i in hrefs:
Assignments.append(Get_Assignments_Internel(i, {"PHPSESSID": response4.cookies.get_dict()["PHPSESSID"]}))
return [Grade_Information, Assignments]
def Get_Assignments_Internel(url, cookies):
Course_period_id = re.findall("course_period_id=[0-9]+", url)[0].split("=")[1]
result1 = requests.get(url, headers={}, cookies=cookies)
Session_id = (re.findall('"[A-z0-9+=/]+', re.findall('session_id.*\n.*";', result1.text)[0])[0])[1:]
Call = str({"requests":[{"controller":"StudentGBGradesController","method":"getGradebookGrid","args":[int(Course_period_id)]}]}).replace(' ', '').replace("'", '"')
headers = {
'authorization': 'Bearer ' + Session_id,
'content-type': 'multipart/form-data; boundary=----WebKitFormBoundaryaaaaaaaaaaaaaaaa',
}
Token = re.findall('token = "([A-Za-z0-9_\./\\-]*)"', result1.text)[1]
data = '------WebKitFormBoundaryaaaaaaaaaaaaaaaa\r\nContent-Disposition: form-data; name="course_period_id"\r\n\r\n'+Course_period_id+'\r\n------WebKitFormBoundaryaaaaaaaaaaaaaaaa\r\nContent-Disposition: form-data; name="__call__"\r\n\r\n'+Call+'\r\n------WebKitFormBoundaryaaaaaaaaaaaaaaaa\r\nContent-Disposition: form-data; name="__token__"\r\n\r\n'+Token+'\r\n------WebKitFormBoundaryaaaaaaaaaaaaaaaa--\r\n'
result2 = requests.post('https://duval.focusschoolsoftware.com/focus/classes/FocusModule.class.php',
headers=headers, cookies=cookies, data=data)
#print(result2.text)
return result2
|
# Simple django settings module (required when importing management commands).
SECRET_KEY = 'fake-key'
INSTALLED_APPS = (
'organisms',
'genes',
)
|
from pygluu.containerlib.document.rclone import RClone # noqa: F401
|
# Copyright (c) 2019 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
import logging
from steelscript.common import timeutils
from steelscript.appresponse.core.types import ServiceClass, ResourceObject
from steelscript.common.exceptions import RvbdHTTPException
logger = logging.getLogger(__name__)
class CertificateService(ServiceClass):
"""Interface to manage SSL certificates."""
SERVICE_NAME = 'npm.https'
def __init__(self, appresponse):
self.appresponse = appresponse
self.servicedef = None
self.certificate = None
def _bind_resources(self):
# Init service
self.servicedef = self.appresponse.find_service(self.SERVICE_NAME)
# Init resource
self.certificate = self.servicedef.bind('certificate')
def get_certificate(self):
"""Get SSL Certificate available on AppResponse appliance."""
try:
resp = self.certificate.execute('get')
return Certificate(data=resp.data, datarep=resp)
except RvbdHTTPException as e:
if str(e).startswith('404'):
raise ValueError('No certificate object found')
def import_certificate(self, obj):
"""
Import a Certificate on the AppResponse appliance.
:param obj: Certificate object. { "pem": any, "passphrase": string }
:return : Certificate object.
"""
resp = self.certificate.execute('import', _data=obj)
return Certificate(data=resp.data, datarep=resp)
def generate_certificate(self, obj):
"""
Generate a Certificate on the AppResponse appliance.
:param obj: Distinguished name data object.
{
"common_name": string,
"organization": string,
"organizational_unit": string,
"locality": string,
"state": string,
"country": string,
"email": string
}
:return : Certificate object.
"""
resp = self.certificate.execute('generate', _data=obj)
return Certificate(data=resp.data, datarep=resp)
class Certificate(ResourceObject):
resource = 'certificate'
property_names = ['Subject', 'Fingerprint', 'Key', 'Issuer',
'Valid at', 'Expires at', 'PEM']
def __str__(self):
return '<Certificate {}/{}>'.format(self.issuer(),
self.expires_at())
def __repr__(self):
return '<%s issuer: %s, expires at: %s>' % (self.__class__.__name__,
self.issuer(),
self.expires_at())
def get_properties(self):
return self.__dict__
def get_property_values(self):
return [
self.subject(), self.fingerprint(),
self.key(), self.issuer(),
timeutils.string_to_datetime(self.valid_at()),
timeutils.string_to_datetime(self.expires_at()),
self.pem()
]
def issuer(self):
return self.data.get('issuer', None)
def subject(self):
return self.data.get('subject', None)
def valid_at(self):
return self.data.get('valid_at', None)
def expires_at(self):
return self.data.get('expires_at', None)
def fingerprint(self):
return self.data.get('fingerprint', None)
def key(self):
return self.data.get('key', None)
def pem(self):
return self.data.get('pem', None)
|
# -*- coding: utf-8 -*-
"""
koordinates.sets
================
The `Sets API <https://help.koordinates.com/api/publisher-admin-api/sets-api/>`_
is used for grouping layers, tables and documents together.
"""
import logging
from koordinates.permissions import PermissionObjectMixin
from koordinates.users import Group
from koordinates.metadata import Metadata, MetadataManager
from koordinates import base
from koordinates.utils import is_bound
from .publishing import Publish
logger = logging.getLogger(__name__)
class SetManager(base.Manager):
"""
Accessor for querying Sets.
Access via the ``sets`` property of a :py:class:`koordinates.client.Client` instance.
"""
_URL_KEY = "SET"
def __init__(self, client):
super(SetManager, self).__init__(client)
# Inner model managers
self.versions = SetVersionManager(client, self)
self._data = SetDataManager(client, self)
self._metadata = MetadataManager(self, client)
def list_drafts(self):
"""
A filterable list view of sets, returning the draft version of each set.
If the most recent version has been published already, it won’t be returned here.
"""
target_url = self.client.get_url("SET", "GET", "multidraft")
return base.Query(self, target_url)
def create(self, set):
"""
Creates a new Set.
"""
target_url = self.client.get_url("SET", "POST", "create")
r = self.client.request("POST", target_url, json=set._serialize())
return set._deserialize(r.json(), self)
def list_versions(self, set_id):
"""
Filterable list of versions of a set, always ordered newest to oldest.
If the version’s source supports revisions, you can get a specific revision using
``.filter(data__source_revision=value)``. Specific values depend on the source type.
Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter
using ``<`` or ``>=`` operators respectively.
"""
target_url = self.client.get_url("SET_VERSION", "GET", "multi", {"id": set_id})
return base.Query(
self,
target_url,
valid_filter_attributes=("data",),
valid_sort_attributes=(),
)
def get_version(self, set_id, version_id, expand=[]):
"""
Get a specific version of a set.
"""
target_url = self.client.get_url(
"SET_VERSION", "GET", "single", {"id": set_id, "version_id": version_id},
)
return self._get(target_url, expand=expand)
def get_draft(self, set_id, expand=[]):
"""
Get the current draft version of a set.
:raises NotFound: if there is no draft version.
"""
target_url = self.client.get_url("SET_VERSION", "GET", "draft", {"id": set_id})
return self._get(target_url, expand=expand)
def get_published(self, set_id, expand=[]):
"""
Get the latest published version of this set.
:raises NotFound: if there is no published version.
"""
target_url = self.client.get_url(
"SET_VERSION", "GET", "published", {"id": set_id}
)
return self._get(target_url, expand=expand)
def create_draft(self, set_id):
"""
Creates a new draft version.
:rtype: Client
:return: the new version
:raises 409 Conflict: if there is already a draft version for this set.
"""
target_url = self.client.get_url(
"SET_VERSION", "POST", "create", {"id": set_id}
)
r = self.client.request("POST", target_url, json={})
return self.create_from_result(r.json())
def set_metadata(self, set_id, fp):
"""
Set the XML metadata on a set.
:param file fp: file-like object to read the XML metadata from.
"""
base_url = self.client.get_url("SET", "GET", "single", {"id": set_id})
self._metadata.set(base_url, fp)
class Set(base.Model, PermissionObjectMixin):
"""
Represents a single set grouping of layers, tables, and documents.
"""
class Meta:
manager = SetManager
serialize_skip = ("permissions",)
deserialize_skip = ("permissions",)
def _deserialize(self, data, manager):
super(Set, self)._deserialize(data, manager)
self.group = (
Group()._deserialize(data["group"], manager.client.get_manager(Group))
if data.get("group")
else None
)
self.metadata = (
Metadata()._deserialize(data["metadata"], manager._metadata, self)
if data.get("metadata")
else None
)
self.version = (
SetVersion()._deserialize(data["version"], manager.versions, self)
if data.get("version")
else None
)
return self
@is_bound
def set_metadata(self, fp, version_id=None):
"""
Set the XML metadata on this draft version.
:param file fp: file-like object to read the XML metadata from.
:raises NotAllowed: if this version is already published.
"""
if not version_id:
version_id = self.version.id
base_url = self._client.get_url(
"SET_VERSION", "GET", "single", {"id": self.id, "version_id": version_id},
)
self._manager._metadata.set(base_url, fp)
# reload myself
r = self._client.request("GET", base_url)
return self._deserialize(r.json(), self._manager)
@property
def is_published_version(self):
""" Return if this version is the published version of a layer """
pub_ver = getattr(self, "published_version", None)
this_ver = getattr(self, "this_version", None)
return this_ver and pub_ver and (this_ver == pub_ver)
@property
def is_draft_version(self):
""" Return if this version is the draft version of a layer """
pub_ver = getattr(self, "published_version", None)
latest_ver = getattr(self, "latest_version", None)
this_ver = getattr(self, "this_version", None)
return (
this_ver
and latest_ver
and (this_ver == latest_ver)
and (latest_ver != pub_ver)
)
@is_bound
def list_versions(self):
"""
Filterable list of versions of a set, always ordered newest to oldest.
If the version’s source supports revisions, you can get a specific revision using
``.filter(data__source_revision=value)``. Specific values depend on the source type.
Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter
using ``<`` or ``>=`` operators respectively.
"""
target_url = self._client.get_url(
"SET_VERSION", "GET", "multi", {"id": self.id}
)
return base.Query(
self._manager,
target_url,
valid_filter_attributes=("data",),
valid_sort_attributes=(),
)
@is_bound
def get_version(self, version_id, expand=()):
"""
Get a specific version of this set
"""
target_url = self._client.get_url(
"SET_VERSION", "GET", "single", {"id": self.id, "version_id": version_id},
)
return self._manager._get(target_url, expand=expand)
@is_bound
def get_draft_version(self, expand=()):
"""
Get the current draft version of this set.
:raises NotFound: if there is no draft version.
"""
target_url = self._client.get_url(
"SET_VERSION", "GET", "draft", {"id": self.id}
)
return self._manager._get(target_url, expand=expand)
@is_bound
def get_published_version(self, expand=()):
"""
Get the latest published version of this set.
:raises NotFound: if there is no published version.
"""
target_url = self._client.get_url(
"SET_VERSION", "GET", "published", {"id": self.id}
)
return self._manager._get(target_url, expand=expand)
@is_bound
def publish(self, version_id=None):
"""
Creates a publish task for this version.
:return: the publish task
:rtype: Publish
:raises Conflict: If the version is already published, or already has a publish job.
"""
if not version_id:
version_id = self.version.id
target_url = self._client.get_url(
"SET_VERSION", "POST", "publish", {"id": self.id, "version_id": version_id},
)
r = self._client.request("POST", target_url, json={})
return self._client.get_manager(Publish).create_from_result(r.json())
@is_bound
def save(self):
"""
Edits this draft version.
:raises NotAllowed: if the version is already published.
"""
target_url = self._client.get_url(
"SET_VERSION",
"PUT",
"edit",
{"id": self.id, "version_id": self.version.id},
)
r = self._client.request("PUT", target_url, json=self._serialize())
return self._deserialize(r.json(), self._manager)
@is_bound
def delete_version(self, version_id=None):
"""
Deletes this draft version (revert to published)
:raises NotAllowed: if this version is already published.
:raises Conflict: if this version is already deleted.
"""
if not version_id:
version_id = self.version.id
target_url = self._client.get_url(
"SET_VERSION",
"DELETE",
"single",
{"id": self.id, "version_id": version_id},
)
self._client.request("DELETE", target_url)
class SetVersionManager(base.InnerManager):
_URL_KEY = "SET_VERSION"
class SetVersion(base.InnerModel):
"""
Represents the ``version`` property of a :py:class:`koordinates.client.Client` instance.
"""
class Meta:
manager = SetVersionManager
class SetDataManager(base.InnerManager):
_URL_KEY = "DATA"
class SetData(base.InnerModel):
"""
Represents the ``data`` property of a :py:class:`koordinates.client.Client` instance.
"""
class Meta:
manager = SetDataManager
|
import numpy as np
def binstep(y, th=0):
return [1 if i > th else 0 for i in y]
def hebb_assoc_train(s, t):
w = np.zeros((len(s[0]), len(t[0])))
for r, row in enumerate(s):
for c, col in enumerate(row):
w[c] = [w[c, i] + col * t[r, i] for i in range(len(t[r]))]
return w
def hebb_assoc_train_mat(s, t):
p = [np.outer(np.reshape(s[i], (-1, 1)), t[i]) for i in range(len(s))]
return np.sum(p, 0)
def hebb_assoc_test(x, w):
y = [np.dot(x, w[:, i]) for i in range(len(w[0]))]
return binstep(y)
def hebb_assoc_test_mat(x, w):
return binstep(np.matmul(x, w))
if __name__ == '__main__':
s = [[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 1]]
t = np.array([[1, 0],
[1, 0],
[0, 1],
[0, 1]])
w = hebb_assoc_train_mat(s, t)
y = hebb_assoc_test([1, 1, 0, 0], w)
print(w)
print(y)
|
from datetime import datetime
import dateutil.parser
from botocore.exceptions import ClientError
from autobot_helpers import context_helper, boto3_helper, policy_helper
from services.aws.utils import Constants, Helpers
class EC2:
def __init__(self, region_name=Constants.AWSRegions.VIRGINIA.value):
self.client = boto3_helper.get_client('ec2', region_name=region_name)
self.autoscaling_client = boto3_helper.get_client('autoscaling', region_name=region_name)
self.region_name = region_name
def get_instances_details(self, instance_ids=None):
results = []
filters = []
if instance_ids:
if not isinstance(instance_ids, list):
instance_ids = [instance_ids]
filters = [{'Name': 'instance-id', 'Values': instance_ids}]
response = self.client.describe_instances(Filters=filters)
results.extend(response['Reservations'])
while 'NextToken' in response:
next_token = response['NextToken']
response = self.client.describe_instances(NextToken=next_token)
results.extend(response['Reservations'])
next_token = response['NextToken'] if response.get('NextToken') else False
autoscaling_instances = self.autoscaling_client.describe_auto_scaling_instances()
instance_data_list = []
for reservation in results:
for instance in reservation['Instances']:
try:
instance_data = {'id': instance['InstanceId'], 'isTerminationProtected': False,
'launchedOn': instance['LaunchTime'].isoformat(),
'state': instance['State']['Name'],
'isEbsOptimized': instance['EbsOptimized'],
'securityGroups': [], 'region': self.region_name}
if 'StateTransitionReason' in instance and instance['StateTransitionReason']:
state_trans_reason = instance['StateTransitionReason']
instance_data['isLastStateChangeUserInitiated'] = \
True if "User initiated" in state_trans_reason else False
if "(" in state_trans_reason:
date_string = state_trans_reason[
state_trans_reason.find("(") + 1:state_trans_reason.find(")")]
instance_data['lastStateChangedOn'] = \
datetime.strptime(date_string, "%Y-%m-%d %H:%M:%S %Z").isoformat()
if 'PrivateIpAddress' in instance:
instance_data['privateIp'] = instance['PrivateIpAddress']
if 'VpcId' in instance:
instance_data['vpcId'] = instance['VpcId']
instance_data['subnetId'] = instance['SubnetId']
if 'Tags' in instance:
instance_data['tags'] = instance['Tags']
for tag in instance_data['tags']:
if tag['Key'].lower() == 'name':
instance_data['name'] = tag['Value']
for security_group in instance['SecurityGroups']:
instance_data['securityGroups'].append({'id': security_group['GroupId'],
'name': security_group['GroupName']})
if "StateReason" in instance:
if instance['StateReason'].get('Code') and instance['StateReason'].get('Message'):
instance_data['reasonForLastStateChange'] = {'code': instance['StateReason'].get('Code'),
'message': instance['StateReason'].get(
'Message')}
if 'IamInstanceProfile' in instance:
instance_data['iamProfileId'] = instance['IamInstanceProfile']
ec2_protection = self.client.describe_instance_attribute(Attribute='disableApiTermination',
InstanceId=instance['InstanceId'])
if ec2_protection['DisableApiTermination']['Value']:
instance_autoscaling = next((asg_instance for asg_instance in
autoscaling_instances['AutoScalingInstances']
if asg_instance["InstanceId"] == instance['InstanceId']), None)
if not instance_autoscaling:
instance_data['isTerminationProtected'] = ec2_protection['DisableApiTermination']['Value']
else:
instance_data['autoScaled'] = True
instance_data['autoScalingGroupName'] = instance_autoscaling['AutoScalingGroupName']
instance_data['autoScalingHealthStatus'] = instance_autoscaling['HealthStatus']
instance_data_list.append(instance_data)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting Ec2Instance=%s, %s",
instance['InstanceId'], e)
return instance_data_list if instance_data_list else None
def get_security_groups_details(self):
results = []
response = self.client.describe_security_groups()
results.extend(response['SecurityGroups'])
while 'NextToken' in response:
next_token = response['NextToken']
response = self.client.describe_security_groups(NextToken=next_token)
results.extend(response['SecurityGroups'])
next_token = response['NextToken'] if response.get('NextToken') else False
security_groups = []
for sec_group in results:
try:
security_group = {'id': sec_group['GroupId'], 'name': sec_group['GroupName'],
'tags': None, 'vpcId': None, 'region': self.region_name, 'ingressRules': [],
'egressRules': []}
if 'Tags' in sec_group:
security_group['tags'] = sec_group['Tags']
if 'VpcId' in sec_group:
security_group['vpcId'] = sec_group['VpcId']
for ingress in sec_group['IpPermissions']:
security_group['ingressRules'].append(
{'fromPort': ingress.get('FromPort') if ingress.get('FromPort') else None,
'toPort': ingress.get('ToPort') if ingress.get('ToPort') else None,
'ipRange': ingress['IpRanges']})
for egress in sec_group['IpPermissionsEgress']:
security_group['egressRules'].append(
{'fromPort': egress.get('FromPort') if egress.get('FromPort') else None,
'toPort': egress.get('ToPort') if egress.get('ToPort') else None,
'ipRange': egress['IpRanges'] if egress['IpRanges'] else None})
security_groups.append(security_group)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting SecurityGroup=%s, %s",
sec_group['GroupId'], e)
return security_groups if security_groups else None
def get_stale_security_groups(self, vpc_id):
results = []
response = self.client.describe_stale_security_groups(VpcId=vpc_id)
results.extend(response['StaleSecurityGroupSet'])
while 'NextToken' in response:
next_token = response['NextToken']
response = self.client.describe_stale_security_groups(VpcId=vpc_id, NextToken=next_token)
results.extend(response['StaleSecurityGroupSet'])
next_token = response['NextToken'] if response.get('NextToken') else False
stale_sg_list = []
for stale_sg in results:
stale_sg_list.append({'id': stale_sg['GroupId'], 'name': stale_sg['GroupName'], 'region': self.region_name})
return stale_sg_list
def get_volume_details(self):
results = []
response = self.client.describe_volumes()
results.extend(response['Volumes'])
while 'NextToken' in response:
next_token = response['NextToken']
response = self.client.describe_volumes(NextToken=next_token)
results.extend(response['Volumes'])
next_token = response['NextToken'] if response.get('NextToken') else False
volumes_data = []
for volume in results:
try:
volume_data = {'id': volume['VolumeId'], 'type': volume['VolumeType'], 'size': volume['Size'],
'availabilityZone': volume['AvailabilityZone'],
'createdOn': volume['CreateTime'].isoformat(),
'attachments': [], 'region': self.region_name, 'isEncrypted': volume['Encrypted']}
if 'Iops' in volume:
volume_data['iops'] = volume['Iops']
if 'Tags' in volume:
volume_data['tags'] = volume['Tags']
for tag in volume_data['tags']:
if tag['Key'].lower() == 'name':
volume_data['name'] = tag['Value']
for attachment in volume['Attachments']:
volume_data['attachments'].append({'attachedOn': attachment['AttachTime'].isoformat(),
'instanceId': attachment['InstanceId']})
volumes_data.append(volume_data)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting Volume=%s, %s",
volume['VolumeId'], e)
return volumes_data if volumes_data else None
def get_snapshot_details(self):
results = []
response = self.client.describe_snapshots(
OwnerIds=[context_helper.get_current_session()['attributes']['accountNumber']])
results.extend(response['Snapshots'])
while 'NextToken' in response:
next_token = response['NextToken']
response = self.client.describe_snapshots(
OwnerIds=[context_helper.get_current_session()['attributes']['accountNumber']], NextToken=next_token)
results.extend(response['Snapshots'])
next_token = response['NextToken'] if response.get('NextToken') else False
snapshots = []
for snapshot in results:
try:
snapshot_data = {'id': snapshot['SnapshotId'], 'volumeId': snapshot['VolumeId'],
'isEncrypted': snapshot['Encrypted'], 'volumeSize': snapshot['VolumeSize'],
'createdOn': snapshot['StartTime'].isoformat(),
'description': snapshot['Description'], 'region': self.region_name}
if 'Tags' in snapshot:
snapshot_data['tags'] = snapshot['Tags']
for tag in snapshot_data['tags']:
if tag['Key'].lower() == 'name':
snapshot_data['name'] = tag['Value']
snapshots.append(snapshot_data)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting Snapshot=%s, %s",
snapshot['SnapshotId'], e)
return snapshots if snapshots else None
def get_eip_details(self):
addresses = self.client.describe_addresses()
eips = []
for address in addresses['Addresses']:
try:
eip = {'ip': address['PublicIp'], 'domain': address['Domain'], 'allocationId': None,
'region': self.region_name, 'id': address['PublicIp']}
if address['Domain'] == 'vpc':
eip['allocationId'] = address['AllocationId']
if 'AssociationId' in address:
eip['networkInterfaceId'] = address['NetworkInterfaceId']
if 'InstanceId' in address and address['InstanceId']:
eip['instanceId'] = address['InstanceId']
if 'Tags' in address:
eip['tags'] = address['Tags']
for tag in eip['tags']:
if tag['Key'].lower() == 'name':
eip['name'] = tag['Value']
eips.append(eip)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting ElasticIP=%s, %s",
address['PublicIp'], e)
return eips if eips else None
def get_eni_details(self):
response = self.client.describe_network_interfaces()
enis = []
for interface in response['NetworkInterfaces']:
try:
eni = {'id': interface['NetworkInterfaceId'], 'status': interface['Status'],
'subnetId': interface['SubnetId'], 'availabilityZone': interface['AvailabilityZone'],
'association': None, 'attachment': None,
'type': interface['InterfaceType'], 'region': self.region_name}
if interface['Description']:
eni['description'] = interface['Description']
if 'TagSet' in interface:
eni['tags'] = interface['TagSet']
for tag in eni['tags']:
if tag['Key'].lower() == 'name':
eni['name'] = tag['Value']
if 'Association' in interface and 'AssociationId' in interface['Association']:
eni['association'] = {'id': interface['Association']['AssociationId'],
'publicIp': interface['Association']['PublicIp'],
'allocationId': interface['Association']['AllocationId']}
if 'Attachment' in interface:
eni['attachment'] = {'id': interface['Attachment']['AttachmentId'],
'instanceId': interface['Attachment'].get('InstanceId'),
'status': interface['Attachment']['Status'],
'attachedOn': interface['Attachment'].get(
'AttachTime').isoformat() if 'AttachTime' in interface[
'Attachment'] else None
}
enis.append(eni)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting ENI=%s, %s",
interface['NetworkInterfaceId'], e)
return enis if enis else None
def __get_vpc_nat_gateways(self):
results = []
response = self.client.describe_nat_gateways()
results.extend(response['NatGateways'])
while 'NextToken' in response:
next_token = response['NextToken']
response = self.client.describe_nat_gateways(NextToken=next_token)
results.extend(response['NatGateways'])
next_token = response['NextToken'] if response.get('NextToken') else False
return results
def get_vpc_endpoint_details(self):
'''
Sampel Output:
"vpcEndpoints": [
{
"id": "vpce-02d61e5816f638f9b",
"serviceName": "com.amazonaws.us-east-1.s3",
"status": "available",
"type": "Gateway",
"vpcId": "vpc-78ef0b02"
}
]
'''
results = []
response = self.client.describe_vpc_endpoints(Filters=[{'Name': 'vpc-endpoint-state', 'Values': ['available']}])
results.extend(response['VpcEndpoints'])
while 'NextToken' in response:
next_token = response['NextToken']
response = self.client.describe_vpc_endpoints(Filters=[{'Name': 'vpc-endpoint-state',
'Values': ['available']}], NextToken=next_token)
results.extend(response['VpcEndpoints'])
next_token = response['NextToken'] if response.get('NextToken') else False
vpc_end_points = []
for endpoint in results:
try:
endpoint_detail = {'id': endpoint['VpcEndpointId'], 'vpcId': endpoint['VpcId'],
'serviceName': endpoint['ServiceName'], 'status': endpoint['State'],
'type': endpoint['VpcEndpointType']}
vpc_end_points.append(endpoint_detail)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting VPCEndpoint=%s, %s",
endpoint['VpcEndpointId'], e)
return vpc_end_points
def __get_egress_only_gateways(self):
results = []
response = self.client.describe_egress_only_internet_gateways()
results.extend(response['EgressOnlyInternetGateways'])
while 'NextToken' in response:
next_token = response['NextToken']
response = self.client.describe_egress_only_internet_gateways(NextToken=next_token)
results.extend(response['EgressOnlyInternetGateways'])
next_token = response['NextToken'] if response.get('NextToken') else False
return results
def __describe_flow_logs(self):
results = []
response = self.client.describe_flow_logs()
results.extend(response['FlowLogs'])
while 'NextToken' in response:
next_token = response['NextToken']
response = self.client.describe_flow_logs(NextToken=next_token)
results.extend(response['FlowLogs'])
next_token = response['NextToken'] if response.get('NextToken') else False
return results
def get_vpc_details(self):
vpcs = self.client.describe_vpcs()
vpc_gateways = self.__get_vpc_nat_gateways()
egress_only_gateways = self.__get_egress_only_gateways()
flow_logs = self.__describe_flow_logs()
vpc_data_list = []
for vpc in vpcs['Vpcs']:
try:
vpc_data = {'id': vpc['VpcId'], 'hasIPv6Association': False, 'isDefault': vpc['IsDefault'],
'hasEgressOnlyInternetGateways': False, 'region': self.region_name, 'natGateways': [],
'flowLogs': []}
try:
if vpc['Ipv6CidrBlockAssociationSet']:
vpc_data['hasIPv6Association'] = True
except KeyError as e:
pass
if 'Tags' in vpc:
vpc_data['tags'] = vpc['Tags']
for tag in vpc_data['tags']:
if tag['Key'].lower() == 'name':
vpc_data['name'] = tag['Value']
vpc_data['staleSecurityGroups'] = self.get_stale_security_groups(vpc['VpcId'])
for vpc_gateway in vpc_gateways:
if vpc_gateway["VpcId"] == vpc['VpcId']:
nat_gateway = {'id': vpc_gateway['NatGatewayId'],
'createdOn': vpc_gateway['CreateTime'].isoformat()}
if 'State' in vpc_gateway:
nat_gateway['state'] = vpc_gateway['State']
vpc_data['natGateways'].append(nat_gateway)
for egress_only_gateway in egress_only_gateways:
if egress_only_gateway['Attachments'][0]['VpcId'] == vpc['VpcId']:
vpc_data['hasEgressOnlyInternetGateways'] = True
for flow_log in flow_logs:
if flow_log["ResourceId"] == vpc['VpcId']:
vpc_data['flowLogs'].append({'id': flow_log['FlowLogId'], 'name': flow_log['LogGroupName'],
'status': flow_log['FlowLogStatus'],
'hasError': True if (
'DeliverLogsErrorMessage' in flow_log) else False})
vpc_data_list.append(vpc_data)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting VPC=%s, %s", vpc['VpcId'], e)
return vpc_data_list if vpc_data_list else None
def get_subnet_details(self):
response = self.client.describe_subnets()
subnets = []
for subnet in response['Subnets']:
subnet_data = {'id': subnet['SubnetId'], 'vpcId': subnet['VpcId'],
'availabilityZone': subnet['AvailabilityZone'], 'state': subnet['State']}
subnets.append(subnet_data)
return subnets
def get_route_table_details(self):
response = self.client.describe_route_tables(Filters=[
{
'Name': 'association.main',
'Values': ['false']
}
])
route_table_list = []
for route in response['RouteTables']:
try:
route_table_detail = {'id': route['RouteTableId'], 'associations': [], 'region': self.region_name}
if 'VpcId' in route:
route_table_detail['vpcId'] = route['VpcId']
if 'Tags' in route:
route_table_detail['tags'] = route['Tags']
for tag in route_table_detail['tags']:
if tag['Key'].lower() == 'name':
route_table_detail['name'] = tag['Value']
if 'Associations' in route:
route_table_detail['associations'] = route['Associations']
route_table_list.append(route_table_detail)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting RouteTable=%s, %s",
route['RouteTableId'], e)
return route_table_list if route_table_list else None
def get_internet_gateway_details(self):
response = self.client.describe_internet_gateways()
internet_gateways = []
for igw in response['InternetGateways']:
try:
internet_gateway = {'id': igw['InternetGatewayId'], 'region': self.region_name}
if 'Tags' in igw:
internet_gateway['tags'] = igw['Tags']
for tag in internet_gateway['tags']:
if tag['Key'].lower() == 'name':
internet_gateway['name'] = tag['Value']
if 'Attachments' in igw:
internet_gateway['attachments'] = igw['Attachments']
internet_gateways.append(internet_gateway)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting InternetGateway=%s, %s",
igw['InternetGatewayId'], e)
return internet_gateways if internet_gateways else None
def get_vpn_gateways(self):
response = self.client.describe_vpn_gateways()
vpn_gateways = []
for vpngw in response['VpnGateways']:
try:
vpn_gateway = {'id': vpngw['VpnGatewayId'], 'state': vpngw['State'], 'type': vpngw['Type'],
'region': self.region_name}
if 'AvailabilityZone' in vpngw:
vpn_gateway['availabilityZone'] = vpngw['AvailabilityZone']
if 'VpcAttachments' in vpngw:
vpn_gateway['vpcAttachments'] = vpngw['VpcAttachments']
if 'Tags' in vpngw:
vpn_gateway['tags'] = vpngw['Tags']
for tag in vpn_gateway['tags']:
if tag['Key'].lower() == 'name':
vpn_gateway['name'] = tag['Value']
vpn_gateways.append(vpn_gateway)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting VPNGateway=%s, %s",
vpngw['VpnGatewayId'], e)
return vpn_gateways if vpn_gateways else None
def get_ami_details(self):
response = self.client.describe_images(Filters=[
{
'Name': 'owner-id',
'Values': [context_helper.get_current_session()['attributes']['accountNumber']]
}
])
amis = []
for image in response['Images']:
try:
ami = {'id': image['ImageId'], 'state': image['State'], 'region': self.region_name, 'snapshots': []}
if 'Tags' in image:
ami['tags'] = image['Tags']
for tag in ami['tags']:
if tag['Key'].lower() == 'name':
ami['name'] = tag['Value']
if image['Name']:
ami['name'] = image['Name']
try:
created_on = datetime.strptime(image['CreationDate'], '%Y-%m-%dT%H:%M:%S.000Z')
ami['createdOn'] = created_on.isoformat()
date_diff = datetime.now() - created_on
ami['age'] = date_diff.days
for block_device_mapping in image['BlockDeviceMappings']:
ami['snapshots'].append(block_device_mapping['Ebs']['SnapshotId'])
except BaseException as e:
pass
amis.append(ami)
except BaseException as e:
context_helper.logger().exception("Some exception occurred while getting AMI=%s, %s", image['ImageId'],
e)
return amis if amis else None
def delete_volume(self, volume_id):
if not volume_id:
return {'success': False, 'error_code': 'EC2_NO_VOLUME_ID', 'message': 'Volume ID not provided'}
try:
self.client.delete_volume(
VolumeId=volume_id,
DryRun=True
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'EC2_UNAUTHORIZED', 'message': repr(e)}
try:
self.client.delete_volume(
VolumeId=volume_id,
DryRun=False
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while deleting Volume=%s, %s",
volume_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def delete_eip(self, public_ip, allocation_id):
if not allocation_id and not public_ip:
return {'success': False, 'error_code': 'EC2_INVALID_ARGS', 'message': 'AllocationId or '
'PublicIP not provided'}
try:
if allocation_id:
self.client.release_address(
AllocationId=allocation_id,
DryRun=True
)
else:
self.client.release_address(
PublicIp=public_ip,
DryRun=True
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'EC2_UNAUTHORIZED', 'message': repr(e)}
try:
if allocation_id:
self.client.release_address(
AllocationId=allocation_id,
DryRun=False
)
else:
self.client.release_address(
PublicIp=public_ip,
DryRun=False
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while deleting EIP=%s, %s",
allocation_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def delete_ami(self, image_id):
if not image_id:
return {'success': False, 'error_code': 'EC2_NO_IMAGE_ID', 'message': 'Image ID not provided'}
try:
self.client.deregister_image(
ImageId=image_id,
DryRun=True
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'EC2_UNAUTHORIZED', 'message': repr(e)}
try:
self.client.deregister_image(
ImageId=image_id,
DryRun=False
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while deleting Image=%s, %s",
image_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def terminate_instances(self, instance_ids):
if not instance_ids:
return {'success': False, 'error_code': 'EC2_NO_INSTANCE_ID', 'message': 'InstanceID(s) not provided'}
if not isinstance(instance_ids, list):
instance_ids = [instance_ids]
try:
self.client.terminate_instances(
InstanceIds=instance_ids,
DryRun=True
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'EC2_UNAUTHORIZED',
'message': repr(e)}
try:
self.client.terminate_instances(
InstanceIds=instance_ids,
DryRun=False
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while terminating Instances=%s, %s",
''.join(instance_ids), e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def stop_waiter(self, instance_ids):
waiter = self.client.get_waiter('instance_stopped')
if not isinstance(instance_ids, list):
instance_ids = [instance_ids]
waiter.wait(InstanceIds=instance_ids)
def start_waiter(self, instance_ids):
waiter = self.client.get_waiter('instance_running')
if not isinstance(instance_ids, list):
instance_ids = [instance_ids]
waiter.wait(InstanceIds=instance_ids)
def stop_instances(self, instance_ids):
if not instance_ids:
return {'success': False, 'error_code': 'EC2_NO_INSTANCE_ID', 'message': 'InstanceID(s) not provided'}
if not isinstance(instance_ids, list):
instance_ids = [instance_ids]
try:
self.client.stop_instances(
InstanceIds=instance_ids,
DryRun=True
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'EC2_UNAUTHORIZED',
'message': repr(e)}
try:
self.client.stop_instances(
InstanceIds=instance_ids,
DryRun=False
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while stopping Instances=%s, %s",
''.join(instance_ids), e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def start_instances(self, instance_ids):
if not instance_ids:
return {'success': False, 'error_code': 'EC2_NO_INSTANCE_ID', 'message': 'InstanceID(s) not provided'}
if not isinstance(instance_ids, list):
instance_ids = [instance_ids]
try:
self.client.start_instances(
InstanceIds=instance_ids,
DryRun=True
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'EC2_UNAUTHORIZED',
'message': repr(e)}
try:
self.client.start_instances(
InstanceIds=instance_ids,
DryRun=False
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while starting Instances=%s, %s",
''.join(instance_ids), e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def delete_security_group(self, security_group_id):
if not security_group_id:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'Security Group ID not provided'}
try:
self.client.delete_security_group(
GroupId=security_group_id,
DryRun=True
)
except ClientError as e:
print(e)
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'UNAUTHORIZED',
'message': repr(e)}
try:
self.client.delete_security_group(
GroupId=security_group_id,
DryRun=False
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while deleting SecurityGroup=%s, %s",
security_group_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def delete_eni(self, eni_id):
if not eni_id:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'NetworkInterfaceID not provided'}
try:
self.client.delete_network_interface(
DryRun=True,
NetworkInterfaceId=eni_id
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'UNAUTHORIZED', 'message': repr(e)}
try:
self.client.delete_network_interface(
DryRun=False,
NetworkInterfaceId=eni_id
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while deleting ENI=%s, %s",
eni_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def delete_route_table(self, route_table_id):
if not route_table_id:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'RouteTableID not provided'}
try:
self.client.delete_route_table(
DryRun=True,
RouteTableId=route_table_id
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'UNAUTHORIZED', 'message': repr(e)}
try:
self.client.delete_route_table(
DryRun=False,
RouteTableId=route_table_id
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while deleting RouteTable=%s, %s",
route_table_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def delete_snapshot(self, snapshot_id):
if not snapshot_id:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'SnapshotID not provided'}
try:
self.client.delete_snapshot(
SnapshotId=snapshot_id,
DryRun=True
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'UNAUTHORIZED', 'message': repr(e)}
try:
self.client.delete_snapshot(
SnapshotId=snapshot_id,
DryRun=False
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while deleting Snapshot=%s, %s",
snapshot_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def delete_internet_gateway(self, internet_gateway_id):
if not internet_gateway_id:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'InternetGatewayID not provided'}
try:
self.client.delete_internet_gateway(
DryRun=True,
InternetGatewayId=internet_gateway_id
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'UNAUTHORIZED',
'message': repr(e)}
try:
self.client.delete_internet_gateway(
DryRun=False,
InternetGatewayId=internet_gateway_id
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while deleting InternetGateway=%s, %s",
internet_gateway_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def delete_vpn_gateway(self, vpn_gateway_id):
if not vpn_gateway_id:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'VPNGatewayID not provided'}
try:
self.client.delete_vpn_gateway(
VpnGatewayId=vpn_gateway_id,
DryRun=True
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'UNAUTHORIZED', 'message': repr(e)}
try:
self.client.delete_vpn_gateway(
VpnGatewayId=vpn_gateway_id,
DryRun=False
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while deleting VPNGateway=%s, %s",
vpn_gateway_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def get_vpc_endpoint_s3_service_name(self, region_id):
# service_names = self.client.describe_vpc_endpoint_services()
return 'com.amazonaws.' + region_id + '.s3'
def create_s3_endpoint(self, vpc_id, region_id):
if not vpc_id or not region_id:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'VpcId or RegionId not provided'}
try:
self.client.create_vpc_endpoint(
VpcId=vpc_id,
ServiceName=self.get_vpc_endpoint_s3_service_name(region_id)
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while creating s3 endpoint "
"for VPC=%s, %s",
vpc_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def enable_termination_protection(self, instance_id):
if not instance_id:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'InstanceID not provided'}
try:
self.client.modify_instance_attribute(
DisableApiTermination={
'Value': True
},
InstanceId=instance_id,
DryRun=True,
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'UNAUTHORIZED',
'message': repr(e)}
try:
self.client.modify_instance_attribute(
DisableApiTermination={
'Value': True
},
InstanceId=instance_id,
DryRun=False,
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while enabling "
"TerminationProtection for Instnace=%s, %s",
instance_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def enable_ebs_optimise(self, instance_id):
if not instance_id:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'InstanceID not provided'}
result = self.stop_instances(instance_id)
if not result['success']:
return result
self.stop_waiter(instance_id)
try:
self.client.modify_instance_attribute(
EbsOptimized={
'Value': True
},
InstanceId=instance_id,
DryRun=True,
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
return {'success': False, 'error_code': 'UNAUTHORIZED',
'message': repr(e)}
try:
self.client.modify_instance_attribute(
EbsOptimized={
'Value': True
},
InstanceId=instance_id,
DryRun=False,
)
result = self.start_instances(instance_id)
self.start_waiter(instance_id)
if not result['success']:
result['message'] = 'Unable to start the instance. Please manually start the InstanceID=' + instance_id
return result
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while enabling "
"EBSOptimize for InstanceID=%s, %s",
instance_id, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def delete_instance_tags(self, instance_ids, tags):
if not instance_ids:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'InstanceID not provided'}
if not isinstance(instance_ids, list):
instance_ids = [instance_ids]
try:
self.client.delete_tags(
DryRun=False,
Resources=instance_ids,
Tags=tags
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while deleting tags "
"for InstanceID=%s, %s",
instance_ids, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
def set_instance_tags(self, instance_ids, tags):
if not instance_ids:
return {'success': False, 'error_code': 'VALUE_ERROR', 'message': 'InstanceID not provided'}
if not isinstance(instance_ids, list):
instance_ids = [instance_ids]
try:
self.client.create_tags(
DryRun=False,
Resources=instance_ids,
Tags=tags
)
return {'success': True}
except BaseException as e:
context_helper.logger().exception("Some exception occurred while creating tags "
"for InstanceID=%s, %s",
instance_ids, e)
return {'success': False, 'error_code': 'EXCEPTION', 'message': repr(e)}
@staticmethod
def get_unused_volume_count(volumes):
unused_volume_count = 0
unused_list = []
cost = 0
for volume in volumes:
if not volume['attachments']:
unused_volume_count += 1
unused_list.append(volume['id'])
cost += EC2.__calculate_volume_cost(volume)
return unused_volume_count, unused_list, cost
@staticmethod
def __calculate_volume_cost(volume):
costItems = Constants.cost_matrix['volumes'][volume['type']]
cost = 0
for costItem in costItems:
cost += volume[costItem] * costItems[costItem]
return cost
@staticmethod
def get_unused_eips_count(eips):
unused_count = 0
unused_list = []
cost = 0
for eip in eips:
if 'instanceId' not in eip and not eip.get('networkInterfaceId'):
unused_count += 1
unused_list.append(eip['ip'])
cost += Constants.cost_matrix['eips']
return unused_count, unused_list, cost
@staticmethod
def get_unused_amis_count(amis):
unused_count = 0
unused_list = []
for ami in amis:
if 'age' in ami and ami['age'] > policy_helper.get_policy().max_ami_age():
unused_count += 1
unused_list.append(ami['id'])
return unused_count, unused_list
@staticmethod
def get_stopped_instance_count(instances):
unused_count = 0
unused_list = []
for instance in instances:
if instance['state'] == 'stopped' and 'lastStateChangedOn' in instance:
diff = datetime.now() - dateutil.parser.parse(instance['lastStateChangedOn']).replace(tzinfo=None)
if diff.days > policy_helper.get_policy().max_stopped_instance_age():
unused_count += 1
unused_list.append(instance['id'])
return unused_count, unused_list
@staticmethod
def get_unused_security_group_count(security_groups, instances):
unused_count = 0
unused_security_groups = []
def is_security_group_used(instances, sec_group_id):
if instances:
for instance in instances:
for security_group in instance['securityGroups']:
if security_group['id'] == sec_grp['id']:
return True
return False
for sec_grp in security_groups:
if sec_grp['name'] != 'default' and not is_security_group_used(instances, sec_grp['id']) and \
'ElasticMapReduce' not in sec_grp['name']:
unused_security_groups.append(sec_grp['id'])
unused_count += 1
return unused_count, unused_security_groups
@staticmethod
def get_unused_enis_count(enis):
unused_count = 0
unused_list = []
if enis:
for eni in enis:
if eni['status'] == 'available' and (eni.get('description') != 'RDSNetworkInterface'):
unused_count += 1
unused_list.append(eni['id'])
return unused_count, unused_list
@staticmethod
def get_unused_snapshots_count(snapshots, volumes, amis):
unused_count = 0
unused_list = []
def find_volume(volume_id):
if volumes:
for vlm in volumes:
if vlm['id'] == volume_id:
return vlm
return False
def find_image(ami_id):
if amis:
for ami in amis:
if ami['id'] == ami_id:
return ami
return False
for snapshot in snapshots:
instance_id, image_id = Helpers.parse_snapshot_description(snapshot['description'])
image = None
if image_id:
image = find_image(image_id)
volume = find_volume(snapshot['volumeId'])
if (image_id and not image) and not volume:
unused_count += 1
unused_list.append(snapshot['id'])
return unused_count, unused_list;
@staticmethod
def get_unused_route_tables(route_tables):
unused_count = 0
unused_list = []
for route_table in route_tables:
if len(route_table['associations']) == 0:
unused_count += 1
unused_list.append(route_table['id'])
return unused_count, unused_list
@staticmethod
def get_unused_internet_gateways(internet_gateways):
unused_count = 0
unused_list = []
for internet_gateway in internet_gateways:
if 'attachments' not in internet_gateway or not internet_gateway['attachments']:
unused_count += 1
unused_list.append(internet_gateway['id'])
return unused_count, unused_list
@staticmethod
def get_unused_vpn_gateways(vpn_gateways):
unused_count = 0
unused_list = []
cost = 0
for vpn_gateway in vpn_gateways:
if vpn_gateway['state'] == 'available' or not vpn_gateway.get('vpcAttachments'):
unused_count += 1
unused_list.append(vpn_gateway['id'])
cost += Constants.cost_matrix['vpnGateways']
return unused_count, unused_list, cost
@staticmethod
def get_ec2_without_iams(ec2s):
count = 0
item_list = []
for ec2 in ec2s:
if not 'iamProfileId' in ec2 or not ec2['iamProfileId']:
count += 1
item_list.append(ec2['id'])
return count, item_list
@staticmethod
def get_security_groups_with_insecure_open_ports(security_groups):
count = 0
item_list = []
for security_group in security_groups:
if EC2.__check_sg_has_vulnerable_open_port(security_group,
policy_helper.get_policy().common_vulnerable_open_ports()):
count += 1
item_list.append(security_group['id'])
return count, item_list
@staticmethod
def get_security_groups_with_open_ssh_port(security_groups):
count = 0
item_list = []
for security_group in security_groups:
if EC2.__check_sg_has_vulnerable_open_port(security_group,
[22]):
count += 1
item_list.append(security_group['id'])
return count, item_list
@staticmethod
def __check_sg_has_vulnerable_open_port(security_group, ports):
if 'ingressRules' in security_group:
for ingress_rule in security_group['ingressRules']:
if 'ipRange' in ingress_rule and ingress_rule['ipRange']:
for ip_range in ingress_rule['ipRange']:
if 'CidrIp' in ip_range and ip_range['CidrIp'] == '0.0.0.0/0':
if (ingress_rule['fromPort'] or ingress_rule['toPort']) and \
(ingress_rule['fromPort'] == '-1' or ingress_rule['toPort'] == "-1" or
ingress_rule['fromPort'] in ports or ingress_rule['toPort'] in ports):
return True
return False
@staticmethod
def get_ec2s_without_TP(ec2s):
count = 0
item_list = []
for ec2 in ec2s:
if not ec2['isTerminationProtected']:
count += 1
item_list.append(ec2['id'])
return count, item_list
@staticmethod
def get_stale_sec_groups(vpcs):
count = 0
item_list = []
for vpc in vpcs:
for stale_sg in vpc['staleSecurityGroups']:
count += 1
item_list.append(stale_sg['id'])
return count, item_list
@staticmethod
def get_failing_nat_gateways(vpcs):
count = 0
item_list = []
for vpc in vpcs:
for nat_gateway in vpc['natGateways']:
if not nat_gateway['state'] or nat_gateway['state'] == 'failed':
count += 1
item_list.append(nat_gateway['id'])
return count, item_list
@staticmethod
def get_ipv6_vpc_wo_egress_igw(vpcs):
count = 0
item_list = []
for vpc in vpcs:
if vpc['hasEgressOnlyInternetGateways']:
count += 1
item_list.append(vpc['id'])
return count, item_list
@staticmethod
def get_vpc_wo_private_subnet(vpcs):
count = 0
item_list = []
for vpc in vpcs:
if not vpc.get('natGateways'):
count += 1
item_list.append(vpc['id'])
return count, item_list
@staticmethod
def get_classic_ec2s(ec2s):
count = 0
item_list = []
for ec2 in ec2s:
if not ec2.get('vpcId'):
count += 1
item_list.append(ec2['id'])
return count, item_list
@staticmethod
def filter_ec2s_wo_ebs_optimised(ec2s):
count = 0
item_list = []
for ec2 in ec2s:
if not ec2.get('isEbsOptimized'):
count += 1
item_list.append(ec2['id'])
return count, item_list
@staticmethod
def filter_volumes_unencrypted(volumes):
count = 0
item_list = []
for volume in volumes:
if not volume.get('isEncrypted'):
count += 1
item_list.append(volume['id'])
return count, item_list
@staticmethod
def get_vpcs_without_s3_endpoints(vpcs, vpc_endpoints):
count = 0
item_list = []
def vpc_has_endpoint(vpc_id, vpc_endpoints):
for endpoint in vpc_endpoints:
if endpoint['vpcId'] == vpc_id:
return True
return False
for vpc in vpcs:
if not vpc_has_endpoint(vpc['id'], vpc_endpoints):
count += 1
item_list.append(vpc['id'])
return count, item_list
|
import os.path
import json
class Probe:
service = "service"
request_url = ""
units = "metric"
response = {}
api_key = ""
user_location = ()
# Mock variables
response_stub = {}
request_sent = False
request_sent_to = ""
def get_weather_data(self):
self.response = self.make_request()
return self.response
def set_api_key(self, api_key):
self.api_key = api_key
def set_user_location(self, user_location):
self.user_location = user_location
def create_request_url(self):
self.request_url = f"https://api.openweathermap.org/data/2.5/onecall?lat={self.user_location[0]}&lon={self.user_location[1]}&exclude=minutely&appid={self.api_key}&units={self.units}"
def make_request(self):
self.request_sent = True
self.request_sent_to = self.request_url
return json.loads(self.read_response_stub())
# Mock methods
def read_response_stub(self):
with open(os.path.join("tests", "response_stub.json"), "r") as f:
file_contents = f.read()
return file_contents
|
from spynnaker.pyNN import *
__version__ = "2016.001"
|
from django.contrib import admin
# Register your models here.
from .models import Blocked, Follower, Following, Muted
admin.site.register(Following)
admin.site.register(Follower)
admin.site.register(Muted)
admin.site.register(Blocked)
|
from django.conf.urls import url
from .consumers import ChatConsumer
websocket_urlpatterns = [
url(r"^messages/(?P<username>[\w.@+-]+)/$", ChatConsumer),
]
|
import re
import socket
import platform
import sshuttle.helpers as helpers
import sshuttle.client as client
import sshuttle.firewall as firewall
import sshuttle.hostwatch as hostwatch
import sshuttle.ssyslog as ssyslog
from sshuttle.options import parser, parse_ipport
from sshuttle.helpers import family_ip_tuple, log, Fatal
from sshuttle.sudoers import sudoers
def main():
opt = parser.parse_args()
if opt.sudoers or opt.sudoers_no_modify:
if platform.platform().startswith('OpenBSD'):
log('Automatic sudoers does not work on BSD')
exit(1)
if not opt.sudoers_filename:
log('--sudoers-file must be set or omited.')
exit(1)
sudoers(
user_name=opt.sudoers_user,
no_modify=opt.sudoers_no_modify,
file_name=opt.sudoers_filename
)
if opt.daemon:
opt.syslog = 1
if opt.wrap:
import sshuttle.ssnet as ssnet
ssnet.MAX_CHANNEL = opt.wrap
helpers.verbose = opt.verbose
try:
if opt.firewall:
if opt.subnets or opt.subnets_file:
parser.error('exactly zero arguments expected')
return firewall.main(opt.method, opt.syslog)
elif opt.hostwatch:
return hostwatch.hw_main(opt.subnets, opt.auto_hosts)
else:
includes = opt.subnets + opt.subnets_file
excludes = opt.exclude
if not includes and not opt.auto_nets:
parser.error('at least one subnet, subnet file, '
'or -N expected')
remotename = opt.remote
if remotename == '' or remotename == '-':
remotename = None
nslist = [family_ip_tuple(ns) for ns in opt.ns_hosts]
if opt.seed_hosts:
sh = re.split(r'[\s,]+', (opt.seed_hosts or "").strip())
elif opt.auto_hosts:
sh = []
else:
sh = None
if opt.listen:
ipport_v6 = None
ipport_v4 = None
lst = opt.listen.split(",")
for ip in lst:
family, ip, port = parse_ipport(ip)
if family == socket.AF_INET6:
ipport_v6 = (ip, port)
else:
ipport_v4 = (ip, port)
else:
# parse_ipport4('127.0.0.1:0')
ipport_v4 = "auto"
# parse_ipport6('[::1]:0')
ipport_v6 = "auto" if not opt.disable_ipv6 else None
if opt.syslog:
ssyslog.start_syslog()
ssyslog.stderr_to_syslog()
return_code = client.main(ipport_v6, ipport_v4,
opt.ssh_cmd,
remotename,
opt.python,
opt.latency_control,
opt.dns,
nslist,
opt.method,
sh,
opt.auto_hosts,
opt.auto_nets,
includes,
excludes,
opt.daemon,
opt.to_ns,
opt.pidfile,
opt.user)
if return_code == 0:
log('Normal exit code, exiting...')
else:
log('Abnormal exit code detected, failing...' % return_code)
return return_code
except Fatal as e:
log('fatal: %s\n' % e)
return 99
except KeyboardInterrupt:
log('\n')
log('Keyboard interrupt: exiting.\n')
return 1
|
from .arch import ( # noqa # NOTE: If you want to add your architecture, please add YourCustomArchConfig class in this line.
ArchConfig,
Resnet34Config,
Resnet50Config,
Resnet56Config,
Wideresnet40Config,
)
from .datamodule import ( # noqa # NOTE: If you want to add your datamodule, please add YourCustomDataModuleConfig class in this line.
Cifar10DataModuleConfig,
DataModuleConfig,
Imagenet1kDataModuleConfig,
)
from .env import DefaultEnvConfig, EnvConfig, NogpuEnvConfig # noqa
from .optimizer import AdamConfig, OptimizerConfig, SgdConfig # noqa
from .scheduler import ( # noqa
CosinConfig,
MultistepConfig,
PlateauConfig,
SchedulerConfig,
)
|
'''
This is internal definitions used by the latextools_plugin module
This separate module is required because ST's reload semantics make it
impossible to implement something like this within that module itself.
'''
import re
_REGISTRY = None
# list of tuples consisting of a path and a glob to load in the plugin_loaded()
# method to handle the case where `add_plugin_path` is called before this
# module has been fully loaded.
_REGISTERED_PATHS_TO_LOAD = []
# list of tuples consisting of names and class objects to load in the
# plugin_loaded() method to handle the case where a plugin is defined before
# the registry has been created
_REGISTERED_CLASSES_TO_LOAD = []
# a list of tuples consisting of a module name and a module object used in the
# _latextools_modules_hack context manager to provide an API for adding modules
_WHITELIST_ADDED = []
# LaTeXToolsPlugin - base class for all plugins
class LaTeXToolsPluginMeta(type):
'''
Metaclass for plugins which will automatically register them with the
plugin registry
'''
def __init__(cls, name, bases, attrs):
try:
super(LaTeXToolsPluginMeta, cls).__init__(name, bases, attrs)
except TypeError:
# occurs on reload
return
if cls == LaTeXToolsPluginMeta or cls is None:
return
try:
if not any(
(True for base in bases if issubclass(base, LaTeXToolsPlugin))
):
return
except NameError:
return
registered_name = _classname_to_internal_name(name)
_REGISTERED_CLASSES_TO_LOAD.append((registered_name, cls))
if _REGISTRY is not None:
_REGISTRY[registered_name] = cls
LaTeXToolsPlugin = LaTeXToolsPluginMeta('LaTeXToolsPlugin', (object,), {})
LaTeXToolsPlugin.__doc__ = '''
Base class for LaTeXTools plugins. Implementation details will depend on where
this plugin is supposed to be loaded. See the documentation for details.
'''
def _classname_to_internal_name(s):
'''
Converts a Python class name in to an internal name
The intention here is to mirror how ST treats *Command objects, i.e., by
converting them from CamelCase to under_scored. Similarly, we will chop
"Plugin" off the end of the plugin, though it isn't necessary for the class
to be treated as a plugin.
E.g.,
SomeClass will become some_class
ReferencesPlugin will become references
BibLaTeXPlugin will become biblatex
'''
if not s:
return s
# little hack to support LaTeX or TeX in the plugin name
while True:
match = re.search(r'(?:Bib)?(?:La)?TeX', s)
if match:
s = s.replace(
match.group(0),
match.group(0)[0] + match.group(0)[1:].lower()
)
else:
break
# pilfered from http://code.activestate.com/recipes/66009/
s = re.sub(r'(?<=[a-z])[A-Z]|(?<!^)[A-Z](?=[a-z])', r"_\g<0>", s).lower()
if s.endswith('_plugin'):
s = s[:-7]
return s
|
__all__ = ["OAuth2Token", "get_storage_handler"]
import datetime
import json
import logging
import pathlib
from abc import abstractmethod
from typing import Optional, Type
from . import errors
_logger = logging.getLogger('coresender')
_storage_handlers = {}
def get_storage_handler(name: str) -> Type['OAuth2TokenStorage']:
try:
token_storage = _storage_handlers[name]
except KeyError:
raise errors.CoresenderError("Unknown token storage: %s" % name)
return token_storage
class OAuth2Token:
__slots__ = ('_token_type', '_expires_on', '_access_token', '_refresh_token')
def __init__(self):
self._token_type = None
self._expires_on = None
self._access_token = None
self._refresh_token = None
def is_valid(self) -> bool:
return self.expires_on > datetime.datetime.now()
@property
def token_type(self) -> Optional[str]:
return self._token_type
@token_type.setter
def token_type(self, value: str):
self._token_type = str(value)
@property
def expires_on(self) -> Optional[datetime.datetime]:
return self._expires_on
@expires_on.setter
def expires_on(self, value: datetime.datetime):
self._expires_on = value
def expire_in(self, value: str):
self.expires_on = datetime.datetime.now() + datetime.timedelta(seconds=int(value))
@property
def access_token(self) -> Optional[str]:
return self._access_token
@access_token.setter
def access_token(self, value: str):
self._access_token = value
@property
def refresh_token(self) -> Optional[str]:
return self._refresh_token
@refresh_token.setter
def refresh_token(self, value: str):
self._refresh_token = value
@classmethod
def from_rq_json(cls, data: dict) -> 'OAuth2Token':
r = cls()
r.access_token = data['access_token']
r.refresh_token = data['refresh_token']
r.token_type = data['token_type']
r.expire_in(data['expires_in'])
return r
@classmethod
def from_json(cls, data: dict) -> 'OAuth2Token':
r = cls()
r.access_token = data['access_token']
r.refresh_token = data['refresh_token']
r.token_type = data['token_type']
r.expires_on = datetime.datetime.fromisoformat(data['expires_on'])
return r
def to_json(self) -> dict:
json = {
'access_token': self.access_token,
'refresh_token': self.refresh_token,
'token_type': self.token_type,
'expires_on': self.expires_on.isoformat(),
}
return json
def __repr__(self):
r = ', '.join(['%s="%s"' % (item[1:], getattr(self, item)) for item in self.__slots__])
r = '<OAuth2Token ' + r + '>'
return r
class OAuth2TokenStorage:
def __init_subclass__(cls, storage_name: str, **kwargs):
super().__init_subclass__(**kwargs)
global _storage_handlers
_storage_handlers[storage_name] = cls
def __init__(self, params: dict):
self.params = params
@abstractmethod
def read(self) -> Optional[OAuth2Token]:
raise NotImplementedError()
@abstractmethod
def save(self, token: OAuth2Token) -> None:
raise NotImplementedError()
class OAuth2TokenFileStorage(OAuth2TokenStorage, storage_name='file'):
default_storage_path = '~/.coresender.token'
def _get_path(self):
path = self.params.get('path', self.default_storage_path)
path = pathlib.Path(path).expanduser()
return path
def read(self) -> Optional[OAuth2Token]:
path = self._get_path()
if not path.exists():
return
with path.open('r') as fh:
try:
token = json.load(fh)
except Exception as exc:
_logger.exception("Cannot read saved token from %s", path, exc_info=exc)
return
token = OAuth2Token.from_json(token)
_logger.debug("Success reading cached token data from %s", path)
return token
def save(self, token: OAuth2Token) -> None:
path = self._get_path()
with path.open('w') as fh:
json.dump(token.to_json(), fh)
_logger.debug("OAuth2Token successfully dumped into %s" % path)
|
import csv
import json
import os.path
from ConfigParser import SafeConfigParser, NoOptionError, NoSectionError
from decorator import decorator
from logger import logger
from perfrunner.helpers.misc import uhex
REPO = 'https://github.com/couchbase/perfrunner'
@decorator
def safe(method, *args, **kargs):
try:
return method(*args, **kargs)
except (NoSectionError, NoOptionError), e:
logger.warn('Failed to get option from config: {}'.format(e))
class Config(object):
def __init__(self):
self.config = SafeConfigParser()
self.name = ''
def parse(self, fname, override):
if override:
override = [x for x in csv.reader(
' '.join(override).split(','), delimiter='.')]
logger.info('Reading configuration file: {}'.format(fname))
if not os.path.isfile(fname):
logger.interrupt('File doesn\'t exist: {}'.format(fname))
self.config.optionxform = str
self.config.read(fname)
for section, option, value in override:
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, option, value)
basename = os.path.basename(fname)
self.name = os.path.splitext(basename)[0]
@safe
def _get_options_as_dict(self, section):
if section in self.config.sections():
return {p: v for p, v in self.config.items(section)}
else:
return {}
class ClusterSpec(Config):
@safe
def yield_clusters(self):
for cluster_name, servers in self.config.items('clusters'):
yield cluster_name, [s.split(',', 1)[0] for s in servers.split()]
@safe
def yield_masters(self):
for _, servers in self.yield_clusters():
yield servers[0]
@safe
def yield_servers(self):
for _, servers in self.yield_clusters():
for server in servers:
yield server
@safe
def yield_hostnames(self):
for _, servers in self.yield_clusters():
for server in servers:
yield server.split(':')[0]
@safe
def yield_servers_by_role(self, role):
for name, servers in self.config.items('clusters'):
has_service = []
for server in servers.split():
if role in server.split(',')[1:]:
has_service.append(server.split(',')[0])
yield name, has_service
@property
@safe
def roles(self):
server_roles = {}
for _, node in self.config.items('clusters'):
for server in node.split():
name = server.split(',', 1)[0]
if ',' in server:
server_roles[name] = server.split(',', 1)[1]
else: # For backward compatibility, set to kv if not specified
server_roles[name] = 'kv'
return server_roles
@property
@safe
def workers(self):
return self.config.get('clients', 'hosts').split()
@property
@safe
def gateways(self):
return self.config.get('gateways', 'hosts').split()
@property
@safe
def gateloads(self):
return self.config.get('gateloads', 'hosts').split()
@property
@safe
def client_credentials(self):
return self.config.get('clients', 'credentials').split(':')
@property
@safe
def paths(self):
data_path = self.config.get('storage', 'data')
index_path = self.config.get('storage', 'index')
return data_path, index_path
@property
@safe
def rest_credentials(self):
return self.config.get('credentials', 'rest').split(':')
@property
@safe
def ssh_credentials(self):
return self.config.get('credentials', 'ssh').split(':')
@property
def parameters(self):
return self._get_options_as_dict('parameters')
class TestConfig(Config):
@property
def test_case(self):
options = self._get_options_as_dict('test_case')
return TestCaseSettings(options)
@property
def cluster(self):
options = self._get_options_as_dict('cluster')
return ClusterSettings(options)
@property
def bucket(self):
options = self._get_options_as_dict('bucket')
return BucketSettings(options)
@property
def buckets(self):
return [
'bucket-{}'.format(i + 1) for i in range(self.cluster.num_buckets)
]
@property
def emptybuckets(self):
return [
'bucket-{}'.format(i + 1) for i in range(self.cluster.num_buckets,
self.cluster.num_buckets +
self.cluster.emptybuckets)
]
@property
def max_buckets(self):
return [
'bucket-{}'.format(i + 1) for i in range(self.cluster.max_num_buckets)
]
@property
def compaction(self):
options = self._get_options_as_dict('compaction')
return CompactionSettings(options)
@property
def watermark_settings(self):
return self._get_options_as_dict('watermarks')
@property
def load_settings(self):
options = self._get_options_as_dict('load')
return LoadSettings(options)
@property
def hot_load_settings(self):
options = self._get_options_as_dict('hot_load')
hot_load = HotLoadSettings(options)
load = self.load_settings
hot_load.doc_gen = load.doc_gen
hot_load.doc_partitions = load.doc_partitions
hot_load.size = load.size
return hot_load
@property
def xdcr_settings(self):
options = self._get_options_as_dict('xdcr')
return XDCRSettings(options)
@property
def index_settings(self):
options = self._get_options_as_dict('index')
return IndexSettings(options)
@property
def spatial_settings(self):
options = self._get_options_as_dict('spatial')
return SpatialSettings(options)
@property
def secondaryindex_settings(self):
options = self._get_options_as_dict('secondary')
return SecondaryIndexSettings(options)
@property
def n1ql_settings(self):
options = self._get_options_as_dict('n1ql')
return N1QLSettings(options)
@property
def access_settings(self):
options = self._get_options_as_dict('access')
access = AccessSettings(options)
access.resolve_subcategories(self)
load = self.load_settings
access.doc_gen = load.doc_gen
access.doc_partitions = load.doc_partitions
access.size = load.size
return access
@property
def rebalance_settings(self):
options = self._get_options_as_dict('rebalance')
return RebalanceSettings(options)
@property
def stats_settings(self):
options = self._get_options_as_dict('stats')
return StatsSettings(options)
@property
def internal_settings(self):
return self._get_options_as_dict('internal')
@property
def gateway_settings(self):
options = self._get_options_as_dict('gateway')
return GatewaySettings(options)
@property
def gateload_settings(self):
options = self._get_options_as_dict('gateload')
return GateloadSettings(options)
@property
def worker_settings(self):
options = self._get_options_as_dict('worker_settings')
return WorkerSettings(options)
def get_n1ql_query_definition(self, query_name):
return self._get_options_as_dict('n1ql-{}'.format(query_name))
class TestCaseSettings(object):
USE_WORKERS = 1
LEVEL = 'Basic' # depricated, alt: Advanced
def __init__(self, options):
self.test_module = '.'.join(options.get('test').split('.')[:-1])
self.test_class = options.get('test').split('.')[-1]
self.test_summary = options.get('summary')
self.metric_title = options.get('title')
self.larger_is_better = options.get('larger_is_better')
self.monitor_clients = options.get('monitor_clients', False)
self.level = options.get('level', self.LEVEL)
self.use_workers = int(options.get('use_workers', self.USE_WORKERS))
self.use_backup_wrapper = options.get('use_backup_wrapper', False)
class ClusterSettings(object):
NUM_BUCKETS = 1
NUM_EMPTYBUCKETS = 0
MIN_NUM_BUCKETS = 1
MAX_NUM_BUCKETS = 10
INCR_NUM_BUCKETS = 1
GROUP_NUMBER = 1
NUM_CPUS = 0 # Use defaults
RUN_CBQ = 0
SFWI = 0
TCMALLOC_AGGRESSIVE_DECOMMIT = 0
INDEX_MEM_QUOTA = 256
def __init__(self, options):
self.mem_quota = int(options.get('mem_quota'))
self.index_mem_quota = int(options.get('index_mem_quota', self.INDEX_MEM_QUOTA))
self.initial_nodes = [
int(nodes) for nodes in options.get('initial_nodes').split()
]
self.num_buckets = int(options.get('num_buckets', self.NUM_BUCKETS))
self.emptybuckets = int(options.get('emptybuckets', self.NUM_EMPTYBUCKETS))
self.min_num_buckets = int(options.get('min_num_buckets',
self.MIN_NUM_BUCKETS))
self.max_num_buckets = int(options.get('max_num_buckets',
self.MAX_NUM_BUCKETS))
self.incr_num_buckets = int(options.get('incr_num_buckets',
self.INCR_NUM_BUCKETS))
self.num_vbuckets = options.get('num_vbuckets')
self.group_number = int(options.get('group_number', self.GROUP_NUMBER))
self.num_cpus = int(options.get('num_cpus', self.NUM_CPUS))
self.disable_moxi = options.get('disable_moxi')
self.run_cbq = options.get('run_cbq', self.RUN_CBQ)
self.sfwi = options.get('sfwi', self.SFWI)
self.tcmalloc_aggressive_decommit = options.get('tcmalloc_aggressive_decommit',
self.TCMALLOC_AGGRESSIVE_DECOMMIT)
class StatsSettings(object):
CBMONITOR = {'host': 'cbmonitor.sc.couchbase.com', 'password': 'password'}
ENABLED = 1
POST_TO_SF = 0
INTERVAL = 5
SECONDARY_STATSFILE = '/root/statsfile'
LAT_INTERVAL = 1
POST_RSS = 0
POST_CPU = 0
SERIESLY = {'host': 'cbmonitor.sc.couchbase.com'}
SHOWFAST = {'host': 'showfast.sc.couchbase.com', 'password': 'password'}
def __init__(self, options):
self.cbmonitor = {'host': options.get('cbmonitor_host',
self.CBMONITOR['host']),
'password': options.get('cbmonitor_password',
self.CBMONITOR['password'])}
self.enabled = int(options.get('enabled', self.ENABLED))
self.post_to_sf = int(options.get('post_to_sf', self.POST_TO_SF))
self.interval = int(options.get('interval', self.INTERVAL))
self.lat_interval = int(options.get('lat_interval', self.LAT_INTERVAL))
self.secondary_statsfile = options.get('secondary_statsfile', self.SECONDARY_STATSFILE)
self.post_rss = int(options.get('post_rss', self.POST_RSS))
self.post_cpu = int(options.get('post_cpu', self.POST_CPU))
self.seriesly = {'host': options.get('seriesly_host',
self.SERIESLY['host'])}
self.showfast = {'host': options.get('showfast_host',
self.SHOWFAST['host']),
'password': options.get('showfast_password',
self.SHOWFAST['password'])}
class BucketSettings(object):
PASSWORD = 'password'
MAX_NUM_SHARDS = -1
MAX_THREADS = -1
WARMUP_MIN_MEMORY_THRESHOLD = -1
REPLICA_NUMBER = 1
REPLICA_INDEX = 0
EVICTION_POLICY = 'valueOnly' # alt: fullEviction
EXPIRY_PAGER_SLEEP_TIME = -1
DEFRAGMENTER_ENABLED = -1
HT_LOCKS = -1
BFILTER_ENABLED = None
def __init__(self, options):
self.password = options.get('password', self.PASSWORD)
self.max_num_shards = int(
options.get('max_num_shards', self.MAX_NUM_SHARDS)
)
self.max_threads = int(
options.get('max_threads', self.MAX_THREADS)
)
self.warmup_min_memory_threshold = int(
options.get('warmup_min_memory_threshold',
self.WARMUP_MIN_MEMORY_THRESHOLD)
)
self.replica_number = int(
options.get('replica_number', self.REPLICA_NUMBER)
)
self.replica_index = int(
options.get('replica_index', self.REPLICA_INDEX)
)
self.eviction_policy = \
options.get('eviction_policy', self.EVICTION_POLICY)
self.defragmenter_enabled = options.get('defragmenter_enabled',
self.DEFRAGMENTER_ENABLED)
self.threads_number = options.get('threads_number') # 2.x
self.exp_pager_stime = int(options.get('exp_pager_stime',
self.EXPIRY_PAGER_SLEEP_TIME))
self.ht_locks = int(options.get('ht_locks', self.HT_LOCKS))
self.bfilter_enabled = options.get('bfilter_enabled', self.BFILTER_ENABLED)
class CompactionSettings(object):
DB_PERCENTAGE = 30
VIEW_PERCENTAGE = 30
PARALLEL = True
def __init__(self, options):
self.db_percentage = options.get('db_percentage', self.DB_PERCENTAGE)
self.view_percentage = options.get('view_percentage', self.VIEW_PERCENTAGE)
self.parallel = options.get('parallel', self.PARALLEL)
def __str__(self):
return str(self.__dict__)
class TargetSettings(object):
def __init__(self, host_port, bucket, password, prefix):
self.password = password
self.node = host_port
self.bucket = bucket
self.prefix = prefix
class RebalanceSettings(object):
SWAP = 0 # Don't swap by default
FAILOVER = 0 # No failover by default
GRACEFUL_FAILOVER = 0
DELTA_RECOVERY = 0 # Full recovery by default
SLEEP_AFTER_FAILOVER = 600
START_AFTER = 1200
STOP_AFTER = 1200
def __init__(self, options):
self.nodes_after = [int(_) for _ in options.get('nodes_after').split()]
self.swap = int(options.get('swap', self.SWAP))
self.failover = int(options.get('failover', self.FAILOVER))
self.graceful_failover = int(options.get('graceful_failover',
self.GRACEFUL_FAILOVER))
self.sleep_after_failover = int(options.get('sleep_after_failover',
self.SLEEP_AFTER_FAILOVER))
self.delta_recovery = int(options.get('delta_recovery',
self.DELTA_RECOVERY))
self.start_after = int(options.get('start_after', self.START_AFTER))
self.stop_after = int(options.get('stop_after', self.STOP_AFTER))
class PhaseSettings(object):
CREATES = 0
READS = 0
UPDATES = 0
DELETES = 0
CASES = 0
OPS = 0
THROUGHPUT = float('inf')
QUERY_THROUGHPUT = float('inf')
N1QL_THROUGHPUT = float('inf')
DOC_GEN = 'old'
DOC_PARTITIONS = 1
ITEMS = 0
SIZE = 2048
EXPIRATION = 0
WORKING_SET = 100
WORKING_SET_ACCESS = 100
WORKERS = 12
QUERY_WORKERS = 0
N1QL_WORKERS = 0
N1QL_OP = 'read'
DCP_WORKERS = 0
SEQ_READS = False
SEQ_UPDATES = False
TIME = 3600 * 24
ASYNC = False
ITERATIONS = 1
def __init__(self, options):
self.creates = int(options.get('creates', self.CREATES))
self.reads = int(options.get('reads', self.READS))
self.updates = int(options.get('updates', self.UPDATES))
self.deletes = int(options.get('deletes', self.DELETES))
self.cases = int(options.get('cases', self.CASES))
self.ops = float(options.get('ops', self.OPS))
self.throughput = float(options.get('throughput', self.THROUGHPUT))
self.query_throughput = float(options.get('query_throughput',
self.QUERY_THROUGHPUT))
self.n1ql_throughput = float(options.get('n1ql_throughput',
self.N1QL_THROUGHPUT))
self.doc_gen = options.get('doc_gen', self.DOC_GEN)
self.doc_partitions = int(options.get('doc_partitions',
self.DOC_PARTITIONS))
self.size = int(options.get('size', self.SIZE))
self.items = int(options.get('items', self.ITEMS))
self.expiration = int(options.get('expiration', self.EXPIRATION))
self.working_set = float(options.get('working_set', self.WORKING_SET))
self.working_set_access = int(options.get('working_set_access',
self.WORKING_SET_ACCESS))
self.workers = int(options.get('workers', self.WORKERS))
self.query_workers = int(options.get('query_workers',
self.QUERY_WORKERS))
self.n1ql_workers = int(options.get('n1ql_workers',
self.N1QL_WORKERS))
self.n1ql_op = options.get('n1ql_op', self.N1QL_OP)
self.dcp_workers = int(options.get('dcp_workers', self.DCP_WORKERS))
self.n1ql_queries = []
if 'n1ql_queries' in options:
self.n1ql_queries = options.get('n1ql_queries').strip().split(',')
self.seq_reads = self.SEQ_READS
self.seq_updates = self.SEQ_UPDATES
self.ddocs = None
self.index_type = None
self.qparams = {}
self.n1ql = None
self.time = int(options.get('time', self.TIME))
self.async = bool(int(options.get('async', self.ASYNC)))
self.iterations = int(options.get('iterations', self.ITERATIONS))
self.filename = None
def resolve_subcategories(self, config):
subcategories = self.n1ql_queries
query_specs = []
for subcategory in subcategories:
query_specs.append(config.get_n1ql_query_definition(subcategory))
self.n1ql_queries = query_specs
def __str__(self):
return str(self.__dict__)
class LoadSettings(PhaseSettings):
CREATES = 100
SEQ_UPDATES = True
class HotLoadSettings(PhaseSettings):
SEQ_READS = True
SEQ_UPDATES = False
def __init__(self, options):
if 'size' in options:
logger.interrupt(
"The document `size` may only be set in the [load] "
"and not in the [hot_load] section")
super(HotLoadSettings, self).__init__(options)
class XDCRSettings(object):
XDCR_REPLICATION_TYPE = 'bidir'
XDCR_REPLICATION_PROTOCOL = None
XDCR_USE_SSL = False
WAN_ENABLED = False
FILTER_EXPRESSION = None
def __init__(self, options):
self.replication_type = options.get('replication_type',
self.XDCR_REPLICATION_TYPE)
self.replication_protocol = options.get('replication_protocol',
self.XDCR_REPLICATION_PROTOCOL)
self.use_ssl = int(options.get('use_ssl', self.XDCR_USE_SSL))
self.wan_enabled = int(options.get('wan_enabled', self.WAN_ENABLED))
self.filter_expression = options.get('filter_expression', self.FILTER_EXPRESSION)
def __str__(self):
return str(self.__dict__)
class IndexSettings(object):
VIEWS = '[1]'
DISABLED_UPDATES = 0
PARAMS = '{}'
def __init__(self, options):
self.views = eval(options.get('views', self.VIEWS))
self.params = eval(options.get('params', self.PARAMS))
self.disabled_updates = int(options.get('disabled_updates',
self.DISABLED_UPDATES))
self.index_type = options.get('index_type')
def __str__(self):
return str(self.__dict__)
class SpatialSettings(object):
def __init__(self, options):
if not options:
return
self.indexes = []
if 'indexes' in options:
self.indexes = options.get('indexes').strip().split('\n')
self.disabled_updates = int(options.get('disabled_updates', 0))
self.dimensionality = int(options.get('dimensionality', 0))
self.data = options.get('data', None)
if 'view_names' in options:
self.view_names = options.get('view_names').strip().split('\n')
self.queries = options.get('queries', None)
self.workers = int(options.get('workers', 0))
self.throughput = float(options.get('throughput', float('inf')))
self.params = json.loads(options.get('params', "{}"))
def __str__(self):
return str(self.__dict__)
class SecondaryIndexSettings(object):
NAME = 'noname'
FIELD = 'nofield'
DB = ''
STALE = 'true'
def __init__(self, options):
self.name = str(options.get('name', self.NAME))
self.field = str(options.get('field', self.FIELD))
self.db = str(options.get('db', self.DB))
self.stale = str(options.get('stale', self.STALE))
for name in self.name.split(","):
index_partition_name = "index_{}_partitions".format(name)
val = str(options.get(index_partition_name, ''))
if val:
setattr(self, index_partition_name, val)
self.settings = {
'indexer.settings.inmemory_snapshot.interval': 200,
'indexer.settings.log_level': 'info',
'indexer.settings.max_cpu_percent': 2400,
'indexer.settings.persisted_snapshot.interval': 5000,
'indexer.settings.scan_timeout': 0,
'projector.settings.log_level': 'info'
}
for option in options:
if option.startswith('indexer.settings') or \
option.startswith('projector.settings') or \
option.startswith('queryport.client.settings'):
value = options.get(option)
try:
if '.' in value:
self.settings[option] = float(value)
else:
self.settings[option] = int(value)
continue
except:
pass
self.settings[option] = value
def __str__(self):
return str(self.__dict__)
class N1QLSettings(object):
def __init__(self, options):
self.indexes = []
if 'indexes' in options:
self.indexes = options.get('indexes').strip().split('\n')
self.settings = {}
for option in options:
if option.startswith('query.settings'):
key = option.split('.')[2]
value = options.get(option)
try:
if '.' in value:
self.settings[key] = float(value)
else:
self.settings[key] = int(value)
continue
except:
pass
self.settings[key] = value
def __str__(self):
return str(self.__dict__)
class AccessSettings(PhaseSettings):
OPS = float('inf')
def __init__(self, options):
if 'size' in options:
logger.interrupt(
"The document `size` may only be set in the [load] "
"and not in the [access] section")
super(AccessSettings, self).__init__(options)
class Experiment(object):
def __init__(self, fname):
logger.info('Reading experiment file: {}'.format(fname))
if not os.path.isfile(fname):
logger.interrupt('File doesn\'t exist: {}'.format(fname))
else:
self.name = os.path.splitext(os.path.basename(fname))[0]
with open(fname) as fh:
self.template = json.load(fh)
class GatewaySettings(object):
COMPRESSION = 'true'
CONN_IN = 0
CONN_DB = 16
NUM_NODES = 1
LOGGING_VERBOSE = 'false'
SHADOW = 'false'
# allow customization of the GODEBUG environment variable
# see http://golang.org/pkg/runtime/
GO_DEBUG = ''
# the only allowed urls are git.io urls, ie: http://git.io/b9PK, and only the
# the last part should be passed, not the full url. So to tell it it find the
# config at http://git.io/b9PK, use gateway.config_url.b9PK
CONFIG_URL = ''
def __init__(self, options):
self.conn_in = int(options.get('conn_in', self.CONN_IN))
self.conn_db = int(options.get('conn_db', self.CONN_DB))
self.compression = options.get('compression', self.COMPRESSION)
self.num_nodes = int(options.get('num_nodes', self.NUM_NODES))
self.logging_verbose = options.get('logging_verbose', self.LOGGING_VERBOSE)
self.shadow = options.get('shadow', self.SHADOW)
self.config_url = options.get('config_url', self.CONFIG_URL)
self.go_debug = options.get('go_debug', self.GO_DEBUG)
self.node0_cache_writer = options.get('node0_cache_writer', 'false')
self.node1_cache_writer = options.get('node1_cache_writer', 'false')
self.node2_cache_writer = options.get('node2_cache_writer', 'false')
def __str__(self):
return str(self.__dict__)
class GateloadSettings(object):
PULLER = 3500
PUSHER = 1500
DOC_SIZE = 0
SEND_ATTACHMENT = 'false'
CHANNEL_ACTIVE_USERS = 40
CHANNEL_CONCURRENT_USERS = 40
SLEEP_TIME = 10 # In seconds, 10 seconds
RUN_TIME = 3600 # In seconds. 1 hr
RAMPUP_INTERVAL = 900 # In seconds, 15 minutes
P95_AVG_CRITERIA = 3
P99_AVG_CRITERIA = 5
SERIESLY_HOST = '172.23.106.228'
LOGGING_VERBOSE = 'false'
AUTH_TYPE = 'basic'
PASSWORD = ''
def __init__(self, options):
self.pullers = int(options.get('pullers', self.PULLER))
self.pushers = int(options.get('pushers', self.PUSHER))
self.doc_size = int(options.get('doc_size', self.DOC_SIZE))
self.send_attachment = options.get('send_attachment', self.SEND_ATTACHMENT)
self.channel_active_users = int(options.get('channel_active_users',
self.CHANNEL_ACTIVE_USERS))
self.channel_concurrent_users = int(options.get('channel_concurrent_users',
self.CHANNEL_CONCURRENT_USERS))
self.sleep_time = int(options.get('sleep_time', self.SLEEP_TIME))
self.p95_avg_criteria = int(options.get('p95_avg_criteria',
self.P95_AVG_CRITERIA))
self.p99_avg_criteria = int(options.get('p99_avg_criteria',
self.P99_AVG_CRITERIA))
self.run_time = int(options.get('run_time', self.RUN_TIME))
self.rampup_interval = int(options.get('rampup_interval',
self.RAMPUP_INTERVAL))
self.logging_verbose = options.get('logging_verbose', self.LOGGING_VERBOSE)
self.seriesly_host = options.get('seriesly_host', self.SERIESLY_HOST)
self.auth_type = options.get('auth_type', self.AUTH_TYPE)
self.password = options.get('password', self.PASSWORD)
def __str__(self):
return str(self.__dict__)
class WorkerSettings(object):
REUSE_WORKSPACE = 'false'
WORKSPACE_DIR = '/tmp/{}'.format(uhex()[:12])
def __init__(self, options):
self.reuse_worker = options.get('reuse_workspace', self.REUSE_WORKSPACE)
self.worker_dir = options.get('workspace_location', self.WORKSPACE_DIR)
def __str__(self):
return str(self.__dict__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.