content stringlengths 5 1.05M |
|---|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDmrcate(RPackage):
"""Methylation array and sequencing spatial analysis methods.
De novo identification and extraction of differentially methylated regions
(DMRs) from the human genome using Whole Genome Bisulfite Sequencing (WGBS)
and Illumina Infinium Array (450K and EPIC) data. Provides functionality
for filtering probes possibly confounded by SNPs and cross-hybridisation.
Includes GRanges generation and plotting functions."""
bioc = "DMRcate"
version('2.8.5', commit='c65dc79a33a047c10932a98b3383709a6bcb8903')
version('2.4.1', commit='bc6242a0291a9b997872f575a4417d38550c9550')
depends_on('r@3.6.0:', type=('build', 'run'))
depends_on('r@4.0.0:', type=('build', 'run'), when='@2.8.5:')
depends_on('r-experimenthub', type=('build', 'run'))
depends_on('r-bsseq', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-edger', type=('build', 'run'))
depends_on('r-dss', type=('build', 'run'))
depends_on('r-minfi', type=('build', 'run'))
depends_on('r-missmethyl', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-gviz', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
|
"""
Gate Class (based on Gameobject).
"""
import pygame
from game_object import GameObject
class Gate(GameObject):
def __init__(self, x, y, width, height, color, player):
GameObject.__init__(self, x, y, width, height)
self.color = color
self.player = player
def draw(self, surface):
pygame.draw.rect(surface, self.color, self.rect)
|
from .api import APITestCase, StandardAPITestCases
from .base import MockedResponse, TestCase
from .filtersets import BaseFilterSetTests, ChangeLoggedFilterSetTests
from .functions import load_json
from .views import ViewTestCases
__all__ = (
"load_json",
"MockedResponse",
"APITestCase",
"StandardAPITestCases",
"ViewTestCases",
"BaseFilterSetTests",
"ChangeLoggedFilterSetTests",
"TestCase",
)
|
"""
Using Evaluation Metrics in Model Selection
-------------------------------------------
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import mglearn
from IPython.display import display
from sklearn.svm import SVC
from sklearn.model_selection import (
cross_val_score,
train_test_split,
GridSearchCV,
)
from sklearn.datasets import load_digits
from sklearn.metrics import roc_auc_score
from sklearn.metrics.scorer import SCORERS
digits = load_digits()
# Default scoring is using model accuracy
print 'Default scoring: {}'.format(
cross_val_score(SVC(), digits['data'], digits['target'] == 9))
# Setting scoring='accuracy' does not change result
explicit_accuracy = cross_val_score(
SVC(), digits['data'], digits['target'] == 9, scoring='accuracy')
print 'Explicit accuracy scoring: {}'.format(explicit_accuracy)
roc_auc = cross_val_score(
SVC(), digits['data'], digits['target'] == 9, scoring='roc_auc')
print 'AUC scoring: {}'.format(roc_auc)
# Example of how you can use the different metrics to select parameters
# for the model
X_train, X_test, y_train, y_test = \
train_test_split(digits['data'], digits['target'] == 9, random_state=0)
param_grid = {'gamma': [.0001, .01, 1, 10]}
grid = GridSearchCV(SVC(), param_grid=param_grid)
grid.fit(X_train, y_train)
print 'Grid Search with accuracy'
print 'Best parameters: {}'.format(grid.best_params_)
print 'Best cross-validation score (accuracy): {:.3f}'.format(grid.best_score_)
print 'Test set AUC: {:.3f}'.format(
roc_auc_score(y_test, grid.decision_function(X_test)))
print 'Test set accuracy: {:.3f}'.format(grid.score(X_test, y_test))
# Grid Search with accuracy
# Best parameters: {'gamma': 0.0001}
# Best cross - validation score(accuracy): 0.970
# Test set AUC: 0.992
# Test set accuracy: 0.973
grid = GridSearchCV(
SVC(), param_grid=param_grid, scoring='roc_auc')
grid.fit(X_train, y_train)
print 'Grid Search with AUC'
print 'Best parameters: {}'.format(grid.best_params_)
print 'Best cross-validation score (accuracy): {:.3f}'.format(grid.best_score_)
print 'Test set AUC: {:.3f}'.format(
roc_auc_score(y_test, grid.decision_function(X_test)))
print 'Test set accuracy: {:.3f}'.format(grid.score(X_test, y_test))
# Grid Search with AUC
# Best parameters: {'gamma': 0.01}
# Best cross - validation score(accuracy): 0.997
# Test set AUC: 1.000
# Test set accuracy: 1.000
# Here we see using AUC on imbalanced data let to a better AUC score
# and even a better accuracy score
print 'Available scores:\n{}'.format(sorted(SCORERS.keys()))
# Different scoring metrics available
|
# 2.3 Delete Middle Node
# Implement an algorithm to delete a node in the middle of a singly linked list,
# given only access to that node.
# (i.e. any node but the first and last, not necessarily the exact middle)
class Node:
def __init__(self, data):
self.data = data
self.next = None
def delete_middle(self, middle):
if middle.next is None:
return None
placeholder = middle.next
middle.data = placeholder.data
middle.next = placeholder.next
|
# -*- coding: utf-8 -*-
# @Time : 2021/1/1 15:53
# @Author : 咸鱼型233
# @File : test1.py
# @Software: PyCharm
# @Function:
import socket
def send_msg(udp_socket):
# 获取发送内容
dest_ip = input("请输入对方的ip:")
dest_port = int(input("请输入对方的端口号:"))
send_data = input("请输入发送信息:")
udp_socket.sendto(send_data.encode("utf-8"), (dest_ip, dest_port))
pass
def recv_msg(udp_socket):
recv_data = udp_socket.recvfrom(1024)
print("从(ip, 端口号)为{0}的主机收到消息:{1} ".format(str(recv_data[1]), recv_data[0].decode("utf-8")))
pass
def main():
# 创建udp套接字
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 绑定信息
udp_socket.bind(("", 9222))
while True:
send_msg(udp_socket)
recv_msg(udp_socket)
pass
if __name__ == '__main__':
main()
|
"""Support for STIB-MIVB (Brussels public transport) information."""
import logging
from pyodstibmivb import ODStibMivb
import voluptuous as vol
import pytz
import datetime
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, DEVICE_CLASS_TIMESTAMP # TIME_MINUTES
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by opendata.stib-mivb.be"
CONF_STOPS = "stops"
CONF_STOP_ID = "stop_id"
CONF_API_KEY = "api_key"
CONF_LANG = "lang"
CONF_MESSAGE_LANG = "message_lang"
CONF_LINE_NUMBER = "line_number"
DEFAULT_NAME = "Stib-Mivb"
SUPPORTED_LANGUAGES = ["nl", "fr"]
SUPPORTED_MESSAGE_LANGUAGES = ["en", "nl", "fr"]
TYPE_ICONS = {
"0": "mdi:tram",
"1": "mdi:subway",
"3": "mdi:bus",
}
TYPES = {
"0": "tram",
"1": "subway",
"3": "bus",
}
STOP_SCHEMA = vol.Schema(
{vol.Required(CONF_STOP_ID): cv.string, vol.Required(CONF_LINE_NUMBER): cv.string}
)
STOPS_SCHEMA = vol.All(cv.ensure_list, [STOP_SCHEMA])
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_LANG): vol.In(SUPPORTED_LANGUAGES),
vol.Optional(CONF_MESSAGE_LANG): vol.In(SUPPORTED_MESSAGE_LANGUAGES),
vol.Optional(CONF_STOPS): STOPS_SCHEMA,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the sensor."""
api_key = config[CONF_API_KEY]
name = DEFAULT_NAME
if config[CONF_MESSAGE_LANG]:
message_lang = config[CONF_MESSAGE_LANG]
else:
message_lang = config[CONF_LANG]
session = async_get_clientsession(hass)
api = ODStibMivb(api_key, session)
sensors = []
for stop in config.get(CONF_STOPS):
sensors.append(
StibMivbSensor(
api,
stop.get(CONF_STOP_ID),
stop.get(CONF_LINE_NUMBER),
config[CONF_LANG],
message_lang,
)
)
async_add_entities(sensors, True)
class StibMivbSensor(Entity):
"""Representation of a Ruter sensor."""
def __init__(self, api, stop_id, line_id, lang, message_lang):
"""Initialize the sensor."""
self.api = api
self.stop_id = stop_id
self.line_id = line_id
self.lang = lang
self.message_lang = message_lang
self._attributes = {ATTR_ATTRIBUTION: ATTRIBUTION}
self._state = None
self._stop_name = None
# right now only available in dev
self._unit = "min"
#self._unit = TIME_MINUTES
async def async_update(self):
"""Get the latest data from the StibMivb API."""
if self._stop_name is None:
stop_name = await self.api.get_point_detail(self.line_id)
self._stop_name = stop_name["points"][0]["name"][self.lang]
self._attributes["stop_name"] = self._stop_name
self._name = self._stop_name + " line " + self.line_id
line_name = await self.api.get_line_long_name(self.line_id)
if self.lang == 'nl':
line_name = await self.api.get_translation_nl(line_name)
self._attributes["line_name"] = line_name
response = await self.api.get_message_by_line(self.line_id)
for i, message in enumerate(response["messages"]):
self._attributes[f"message_{i}"] = message["content"][0]["text"][0][
self.lang
]
type = await self.api.get_line_type(self.line_id)
self._attributes["line_type"] = TYPES[type]
self.__icon = TYPE_ICONS[type]
self._attributes["line_color"] = await self.api.get_line_color(self.line_id)
self._attributes["line_text_color"] = await self.api.get_line_text_color(
self.line_id
)
response = await self.api.get_waiting_time(self.stop_id)
state_set = False
for i, passing_time in enumerate(response["points"][0]["passingTimes"]):
if passing_time["lineId"] == self.line_id:
if state_set == False:
next_passing_time = pytz.utc.normalize(
datetime.datetime.fromisoformat(
passing_time["expectedArrivalTime"]
)
)
state_set = True
now = pytz.utc.normalize(pytz.utc.localize(datetime.datetime.utcnow()))
self._state = round((next_passing_time - now).total_seconds()/60)
self._attributes[f"next_passing_time_{i}"] = passing_time[
"expectedArrivalTime"
]
self._attributes[f"next_passing_destination_{i}"] = passing_time[
"destination"
][self.lang]
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_TIMESTAMP
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return self.__icon
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pdb import set_trace
from lxml import etree
import os
import argparse
import sys
import pprint
from teixml2lib.ualog import Log
from teixml2lib.xml_const import *
import re
__date__ = "20-04-2021"
__version__ = "0.2.1"
__author__ = "Marta Materni"
logerr = Log("w")
loginfo = Log("w")
def pp_data(data):
s = pprint.pformat(data, indent=0, width=80)
return s
"""
splitta path_xml_in negli episodi ci scribe in dir_out
produce <man>_list.xml con l'elenco degli episodi e scrive in dir_out
produce <man>_list.txt con l'elenco degli episodi e scrice i dir_out
"""
class XmlSplitEps:
def __init__(self, path_xml_in, dir_out, sigla_man):
self.path_xml_in = path_xml_in
self.dir_out = dir_out
self.sigla_man = sigla_man
# path_err = dir_out + "_eps_ERR_.log"
path_err = os.path.join(dir_out, "split_ERR.log")
logerr.open(path_err, liv=1)
path_info = os.path.join(dir_out, "spli.log")
loginfo.open(path_info, liv=0)
self.body = None
self.back = None
def set_body_back(self):
try:
root = etree.parse(self.path_xml_in)
xml = etree.tostring(root,
method='xml',
xml_declaration=None,
encoding='unicode',
with_tail=True,
pretty_print=False,
strip_text=False
)
m = re.search(BODY_TOP_PATTERN, xml)
p0 = m.start()
m = re.search(BODY_BOTTOM_PATTERN, xml)
p1 = m.end()
xml_body = xml[p0:p1]
loginfo.log(xml_body)
self.body = etree.fromstring(xml_body)
#
m = re.search(BACK_TOP, xml)
if m is None:
return
p0 = m.start()
m = re.search(BACK_BOTTOM, xml)
p1 = m.end()
xml_back = xml[p0:p1]
loginfo.log(xml_back)
self.back = etree.fromstring(xml_back)
except Exception as e:
logerr.log("splitteixml.py set_body_back()")
logerr.log(str(e))
sys.exit(1)
# write xml/par/eps<n>
def write_eps_xml(self, nd, name_ou):
try:
src = etree.tostring(nd,
method='xml',
xml_declaration=None,
encoding='unicode',
with_tail=True,
pretty_print=True)
with open(name_ou, "w+") as fw:
fw.write(src)
os.chmod(name_ou, 0o666)
except Exception as e:
logerr.log("splitteixml.py write_eps_xml()")
s = str(e)
logerr.log(s)
sys.exit(1)
# write xml/par/par.xml
def writ_eps_xml_lst(self, eps_lst, xml_path):
xml_src = os.linesep.join(eps_lst)
with open(xml_path, "w+") as fw:
fw.write(xml_src)
os.chmod(xml_path, 0o666)
# <div type="episode" ref="#ep1">
def node_src(self, nd):
tag = nd.tag
ks = self.node_attrs(nd)
s = "<" + tag
for k in ks:
v = ks[k]
s = s + ' %s="%s"' % (k, v)
s = s + " />"
return s
def node_attrs(self, nd):
attrs = {}
if nd.attrib is None:
return attrs
for k, v in nd.attrib.iteritems():
px = k.find('}')
if px > -1:
k = k[px + 1:]
attrs[k] = v
return attrs
def build_episode_name(self, eps):
f = self.dir_out
dirname = os.path.dirname(f)
s=str(dirname)
path = os.path.join(s, eps)
return path
def build_list_name(self, ext):
name = self.sigla_man + "_list" + ext
path = os.path.join(self.dir_out, name)
return path
def get_notes(self):
# TODO get_root
if self.back is None:
return ""
root_back = self.back
note=root_back.find('div')
nds = note.findall('teimed_note')
ls = []
for nd in nds:
xml_node = etree.tostring(nd,
method='xml',
xml_declaration=None,
encoding='unicode',
with_tail=True,
pretty_print=True)
ls.append(xml_node.strip())
s = "".join(ls)
return s
def node_tag(self, nd):
try:
tag = nd.tag
tag = tag if type(nd.tag) is str else "XXX"
pid = tag.find('}')
if pid > 0:
tag = tag[pid + 1:]
return tag.strip()
except Exception as e:
logerr.log("ERROR in xml")
logerr.log(str(e))
return "XXX"
def prn_node(self, nd):
# TODO stampa nodo nel log
return
tag = self.node_tag(nd)
ks = self.node_attrs(nd)
s = pp_data(ks)
loginfo.log(tag + " " + s)
def get_child(self, nd, tag=None):
child = None
for d in nd.iterchildren(tag=None):
child = d
break
return child
"""
trova episodio corrente
controlla che inizi con pb
trova episodio precedente
trova ultima pagina episodio precedente
inserisce la pagina trova all'inzio dell'eoisodio corrente
"""
def get_prev_pb_cb(self, nd):
def build_node(nd):
tag = self.node_tag(nd)
attrs = self.node_attrs(nd)
id = attrs.get('id', '')
n = attrs.get('n', '')
id = id + 'b'
s = f'<{tag} xml:id="{id}" n="{n}" />'
nd = etree.XML(s)
return nd
try:
ep_prev = nd.getprevious()
if ep_prev is None:
raise Exception("Node previus Not Found.")
pb = None
for d in ep_prev.iterdescendants(tag="pb"):
pb = d
if pb is None:
raise Exception("pb Not Found")
pb = build_node(pb)
cb = None
for d in ep_prev.iterdescendants(tag="cb"):
cb = d
if cb is None:
raise Exception("cb Not Found")
cb = build_node(cb)
except Exception as e:
logerr.log("ERROR splixml.py get_prev_pb_cb()")
logerr.log(str(e))
sys.exit(1)
return [pb, cb]
def begin_pag_dupl(self, nd):
def find_begin_pag(nd):
rt = True
for d in nd.iterchildren(tag=None):
tag = self.node_tag(d)
if tag != 'pb':
rt = False
break
return rt
pb = find_begin_pag(nd)
# se è la prima pagina ritona None
if (pb):
return None
pb_cb = self.get_prev_pb_cb(nd)
return pb_cb
def write_episode_lst(self):
self.set_body_back()
root_body = self.body
ls = root_body.findall('div')
eps_lst = []
eps_num_lst = []
# div null per contenere la lista episodi
eps_lst.append(NULL_TAG_START)
for xml_node in ls:
ks = self.node_attrs(xml_node)
src = self.node_src(xml_node)
# pagina iniziale con lista episoid
eps_lst.append(src)
# file testo con lista episodi
eps_num = ks['ref'].replace('#', '')
eps_num_lst.append(eps_num)
# sottoalberi episodi
# controllo inizio pagina
pbcb = self.begin_pag_dupl(xml_node)
if pbcb is not None:
# non è la prima pagina
pb = pbcb[0]
cb = pbcb[1]
self.prn_node(pb)
self.prn_node(cb)
ch = self.get_child(xml_node, 'lg')
self.prn_node(ch)
ch.addprevious(pb)
ch.addprevious(cb)
xml_eps_path = self.build_episode_name(eps_num + '.xml')
self.write_eps_xml(xml_node, xml_eps_path)
s = self.get_notes()
loginfo.log(s)
eps_lst.append(s)
# chiusura div null contenitore
eps_lst.append(NULL_TAG_END)
# lista eps<n> in file xml
xml_list_path = self.build_list_name(".xml")
self.writ_eps_xml_lst(eps_lst, xml_list_path)
def do_main(path_in, dir_out, sigla_man):
xmlspl = XmlSplitEps(path_in, dir_out, sigla_man)
xmlspl.write_episode_lst()
if __name__ == "__main__":
"""
es.
dir input: xml/par/file.xml
dir out : xml/par/par/
sigla_man: par
"""
parser = argparse.ArgumentParser()
if len(sys.argv) == 1:
print("release: %s %s" % (__version__, __date__))
parser.print_help()
sys.exit()
parser.add_argument(
'-i',
dest="src",
required=True,
metavar="",
help="-i <file.xml input>")
parser.add_argument(
'-o',
dest="ou",
required=True,
metavar="",
help="-o <dir out/<sigla>/")
parser.add_argument(
'-m',
dest="man",
required=True,
metavar="",
help="-m <sigla_maoscritto>")
args = parser.parse_args()
do_main(args.src, args.ou, args.man)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth import authenticate, login, logout as django_logout
from django.contrib.auth.models import User
from blog.models import Post
# Create your views here.
def index(request):
return HttpResponse("User authentication module")
def auth(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
posts = Post.objects.all().order_by('-created_at')
return render(request, 'posts.html', {'posts':posts, 'message': 'You successfully logged in'})
else:
return redirect('/users/error')
elif request.method == 'GET':
return render(request, 'login.html')
def error(request):
return render(request, 'login.html', {'error': 'User and/or password are wrong.'})
def logout(request):
if request.user.is_authenticated():
django_logout(request)
return redirect('/')
def register(request):
if request.method=='GET':
return render(request, 'register.html')
elif request.method == 'POST':
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.create_user(username=username, email=email, password=password)
user.save()
except:
return render(request, 'register.html', {'error': 'Username and/or email already exist'})
return redirect('/')
|
#import shutup;shutup.please() # ignora esto por favor. No es necesario en tu código
import nltk # importamos módulos
import sys
tokens = [] # creamos una lista vacia
with open(sys.argv[1], 'r') as f: # le pedimos a python que lea el archivo que especificamos en la terminal
Lines = f.readlines() # lee línea por línea
for line in Lines: # for loop: lee línea por línea y realiza una tarea repetitivamente
nltk_tokens = nltk.word_tokenize(line) # la tarea es que tokenice el texto con el módulo NLTK
tokens.append(nltk_tokens) # agrega los tokens a la lista vacía
print(nltk_tokens) # imprime el resultado
|
import json
import logging
from typing import Dict
import boto3
from botocore.exceptions import ClientError
from flask import Blueprint, current_app, jsonify
blueprint = Blueprint("api", __name__, url_prefix="/api")
_logger = logging.getLogger(__name__)
def get_quicksight_embedded_dashboard_url(
aws_account_id: str,
aws_access_key_id: str,
aws_secret_access_key: str,
aws_iam_role_arn: str,
aws_region: str,
quicksight_dashboard_id: str,
session_name: str,
reset_disabled: bool = False,
undo_redo_disabled: bool = False,
) -> Dict:
"""Generate a URL of the QuickSight dashboard that could be used to embed it into a web page.
:param aws_account_id: AWS account ID
:type aws_account_id: str
:param aws_access_key_id: AWS API access key
:type aws_access_key_id: str
:param aws_secret_access_key: AWS API secret key
:type aws_secret_access_key: str
:param aws_iam_role_arn: ARN of the AIM role allowing to embed QuickSight dashboards
:type aws_iam_role_arn: str
:param aws_region: AWS region
:type aws_region: str
:param quicksight_dashboard_id: QuickSight dashboard's ID
:type quicksight_dashboard_id: str
:param session_name: Session name - must be equal to the QuickSight user's email
:type session_name: str
:param reset_disabled: Boolean value indicating whether Disable Reset button is available in the embedded dashboard
:type reset_disabled: bool
:param undo_redo_disabled: Boolean value indicating whether
Disable Undo/Redo buttons are available in the embedded dashboard
:type undo_redo_disabled: bool
:return: Python dictionary containing the URL of the QuickSight dashboard
that could be used to embed it to a web page along with the metadata
:rtype: Dict
"""
try:
sts_client = boto3.client(
"sts",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
assumed_aim_role = sts_client.assume_role(
RoleArn=aws_iam_role_arn,
RoleSessionName=session_name,
)
except ClientError:
_logger.exception(
f"An unexpected exception occurred while trying to assume role {aws_iam_role_arn}"
)
raise
else:
assumed_aim_role_session = boto3.Session(
aws_access_key_id=assumed_aim_role["Credentials"]["AccessKeyId"],
aws_secret_access_key=assumed_aim_role["Credentials"]["SecretAccessKey"],
aws_session_token=assumed_aim_role["Credentials"]["SessionToken"],
)
try:
quicksight_client = assumed_aim_role_session.client(
"quicksight", region_name=aws_region
)
response = quicksight_client.get_dashboard_embed_url(
AwsAccountId=aws_account_id,
DashboardId=quicksight_dashboard_id,
IdentityType="IAM",
SessionLifetimeInMinutes=600,
UndoRedoDisabled=undo_redo_disabled,
ResetDisabled=reset_disabled,
)
return {
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
},
"body": json.dumps(response),
"isBase64Encoded": bool("false"),
}
except ClientError:
_logger.exception(
"An unexpected error occurred while trying to generate an embedded URL"
)
raise
@blueprint.route("/dashboard_url", methods=("GET",))
def dashboard_url():
result = get_quicksight_embedded_dashboard_url(
aws_account_id=current_app.config["AWS_ACCOUNT_ID"],
aws_access_key_id=current_app.config["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=current_app.config["AWS_SECRET_ACCESS_KEY"],
aws_iam_role_arn=current_app.config["AWS_IAM_ROLE"],
aws_region=current_app.config["AWS_REGION"],
quicksight_dashboard_id=current_app.config["QUICKSIGHT_DASHBOARD_ID"],
session_name=current_app.config["QUICKSIGHT_USER_EMAIL"],
)
return jsonify(result)
|
import pyodbc
import json
def run_script(server, uid, pwd, connect_to_db, sql):
return_code = 0
# connect to datasource
conn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+connect_to_db+';UID='+uid+';PWD='+ pwd)
# create cursor associated with connection
cursor = conn.cursor()
command = "EXEC master.dbo.DbExec @DbNamePattern = ?, @SQL = ?"
try:
cursor.execute(command, (connect_to_db, sql))
try:
result = cursor.fetchall()
result = [dict((cursor.description[i][0], value) for i, value in enumerate(row)) for row in result]
print(json.dumps(result))
except pyodbc.ProgrammingError:
print("(No results)")
print("Command has been run successfully")
conn.commit()
except ValueError:
print("Error !!!!! %s", sys.exc_info()[0])
return_code = -1
finally:
# close and delete cursor
cursor.close()
del cursor
# close Connection
conn.close()
return return_code
def run_file_script(server, uid, pwd, connect_to_db, script_path):
return_code = 0
# connect to datasource
conn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+connect_to_db+';UID='+uid+';PWD='+ pwd)
# create cursor associated with connection
cursor = conn.cursor()
command = "EXEC master.dbo.DbExec @DbNamePattern = ?, @SQL = ?"
try:
cursor.execute(command, (connect_to_db, script_path))
try:
result = cursor.fetchall()
result = [dict((cursor.description[i][0], value) for i, value in enumerate(row)) for row in result]
print(json.dumps(result))
except pyodbc.ProgrammingError:
print("(No results)")
print("Command has been run successfully")
conn.commit()
except ValueError:
print("Error !!!!! %s", sys.exc_info()[0])
return_code = -1
finally:
# close and delete cursor
cursor.close()
del cursor
# close Connection
conn.close()
return return_code |
import bisect
import copy
import json
import os
import re
from pathlib import Path
import xmltodict
from PIL import Image
from toolz.curried import groupby
from torch.utils.data import Dataset
from torchvision.datasets.utils import download_url, check_integrity
from horch.datasets.utils import download_google_drive
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/voc.py
DATASET_YEAR_DICT = {
'2012': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
'filename': 'VOCtrainval_11-May-2012.tar',
'md5': '6cd6e144f989b92b3379bac3b3de84fd',
'base_dir': 'VOCdevkit/VOC2012',
# "ann_file_url": "https://drive.google.com/open?id=1v98GB2D7oc6OoP8NdIHayZbt-8V6Fc5Q",
"ann_file_url": "https://drive.google.com/open?id=1f_MTZr4ypkY83yahZ61zCkLb6z3Bo_7Y",
},
'2011': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar',
'filename': 'VOCtrainval_25-May-2011.tar',
'md5': '6c3384ef61512963050cb5d687e5bf1e',
'base_dir': 'TrainVal/VOCdevkit/VOC2011'
},
'2010': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar',
'filename': 'VOCtrainval_03-May-2010.tar',
'md5': 'da459979d0c395079b5c75ee67908abb',
'base_dir': 'VOCdevkit/VOC2010'
},
'2009': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar',
'filename': 'VOCtrainval_11-May-2009.tar',
'md5': '59065e4b188729180974ef6572f6a212',
'base_dir': 'VOCdevkit/VOC2009'
},
'2008': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar',
'filename': 'VOCtrainval_11-May-2012.tar',
'md5': '2629fa636546599198acfcfbfcf1904a',
'base_dir': 'VOCdevkit/VOC2008'
},
'2007': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
'filename': 'VOCtrainval_06-Nov-2007.tar',
'md5': 'c52e279531787c972589f7e41ab4ae64',
'base_dir': 'VOCdevkit/VOC2007',
"ann_file_url": "https://drive.google.com/open?id=189LC78-tuvJXKawqwirFlFzflqvAI9oA",
# "ann_file_url": "https://drive.google.com/open?id=1dwgWLM4qxe5aT3o46y0Jz10DKmh17w6W",
}
}
TEST_DATASET_YEAR_DICT = {
'2007': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
'filename': 'VOCtest_06-Nov-2007.tar',
'md5': 'b6e924de25625d8de591ea690078ad9f',
'base_dir': 'VOCdevkit/VOC2007',
'ann_file_url': "https://drive.google.com/open?id=1sAT2wgrMNFqDsUWom4foQ-WtxwA_IS7e",
# "ann_file_url": "https://drive.google.com/open?id=1BGSle9xH6B_voeUE4Mp0YYYFmKZfxu0B",
}
}
VOC_CATEGORIES = [
"__background__",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
VOC_CATEGORY_TO_IDX = {name: i for i,
name in enumerate(VOC_CATEGORIES)}
class VOCDetection(Dataset):
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Detection Dataset.
Args:
root (string): Root directory of the VOC Dataset.
year (string, optional): The dataset year, supports years 2007 to 2012.
image_set (string, optional): Select the image_set to use, ``trainval`` or ``test``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
(default: alphabetic indexing of VOC's 20 classes).
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
"""
def __init__(self,
root,
year='2012',
image_set='trainval',
download=False,
transform=None):
self.root = Path(root).expanduser().absolute()
self.year = year
self.image_set = image_set
if image_set == 'test':
dataset_dict = TEST_DATASET_YEAR_DICT
else:
dataset_dict = DATASET_YEAR_DICT
self.url = dataset_dict[year]['url']
self.filename = dataset_dict[year]['filename']
self.md5 = dataset_dict[year]['md5']
base_dir = dataset_dict[year]['base_dir']
self.voc_root = self.root / base_dir
self.image_dir = self.voc_root / 'JPEGImages'
self.ann_file_url = dataset_dict[year]['ann_file_url']
self.ann_file = self.voc_root / ("%s%s.json" % (image_set, year))
if download:
self.download()
from hpycocotools.coco import COCO
with open(self.ann_file, 'r') as f:
self.data = json.load(f)
self.coco = COCO(self.data, verbose=False)
self.ids = list(self.coco.imgs.keys())
self.transform = transform
def to_coco(self, indices=None):
if indices is None:
return self.data
ids = [self.ids[i] for i in indices]
images = self.coco.loadImgs(ids)
ann_ids = self.coco.getAnnIds(ids)
annotations = self.coco.loadAnns(ann_ids)
return {
**self.data,
"images": images,
"annotations": annotations,
}
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
anns = coco.loadAnns(ann_ids)
path = coco.loadImgs([img_id])[0]['file_name']
img = Image.open(self.image_dir / path).convert('RGB')
if self.transform is not None:
img, anns = self.transform(img, anns)
return img, anns
def __len__(self):
return len(self.ids)
def download(self):
import tarfile
if self.voc_root.is_dir() and self.ann_file.exists():
print("Dataset found. Skip download or extract.")
return
if not self.voc_root.is_dir():
download_url(self.url, self.root, self.filename, self.md5)
with tarfile.open(self.root / self.filename, "r") as tar:
tar.extractall(path=self.root)
if not self.ann_file.exists():
google_drive_match = re.match(
r"https://drive.google.com/open\?id=(.*)", self.ann_file_url)
file_id = google_drive_match.group(1)
download_google_drive(file_id, self.voc_root, self.ann_file.name)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Year: {}\n'.format(self.year)
fmt_str += ' ImageSet: {}\n'.format(self.image_set)
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(
tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
TRAINAUG_FILE = {
"name": "trainaug.tar",
"md5": "7677cd72fdefc1f4d23beb556c0e87dc",
"url": "https://drive.google.com/open?id=1inOFikLz9oOW85s4nuAlCZ_1XesU_zn2",
}
class VOCSegmentation(Dataset):
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset.
Args:
root (string): Root directory of the VOC Dataset.
year (string, optional): The dataset year, supports years 2007 to 2012.
image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val`` or ``trainaug``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
"""
def __init__(self,
root,
year='2012',
image_set='train',
download=False,
transform=None):
self.root = Path(root).expanduser().absolute()
self.year = year
self.url = DATASET_YEAR_DICT[year]['url']
self.filename = DATASET_YEAR_DICT[year]['filename']
self.md5 = DATASET_YEAR_DICT[year]['md5']
self.transform = transform
self.image_set = image_set
self.augmented = image_set == 'trainaug'
base_dir = DATASET_YEAR_DICT[year]['base_dir']
self.voc_root = self.root / base_dir
image_dir = self.voc_root / 'JPEGImages'
mask_dir = self.voc_root / 'SegmentationClass'
if self.augmented:
mask_dir = self.voc_root / 'SegmentationClassAug'
if download:
self.download()
splits_dir = self.voc_root / 'ImageSets' / 'Segmentation'
split_f = splits_dir / (image_set.rstrip('\n') + '.txt')
if not split_f.exists():
raise ValueError(
'Wrong image_set entered! Please use image_set="train" '
'or image_set="trainval" or image_set="val" or image_set="trainaug"')
with open(split_f, "r") as f:
file_names = [x.strip() for x in f.readlines()]
self.images = [image_dir / (x + ".jpg") for x in file_names]
self.masks = [mask_dir / (x + ".png") for x in file_names]
assert (len(self.images) == len(self.masks))
def download(self):
import tarfile
if self.voc_root.is_dir():
print("VOC found. Skip download or extract")
else:
download_url(self.url, self.root, self.filename, self.md5)
with tarfile.open(self.root / self.filename, "r") as tar:
tar.extractall(path=self.root)
if self.augmented:
mask_dir = self.voc_root / 'SegmentationClassAug'
if mask_dir.is_dir():
print("SBT found. Skip download or extract")
else:
file_id = re.match(
r"https://drive.google.com/open\?id=(.*)", TRAINAUG_FILE['url']).group(1)
filename = TRAINAUG_FILE['name']
download_google_drive(
file_id, self.voc_root, filename, TRAINAUG_FILE['md5'])
file_path = self.voc_root / filename
with tarfile.open(file_path, "r") as tar:
tar.extractall(path=self.voc_root)
split_f = self.voc_root / 'trainaug.txt'
splits_dir = self.voc_root / 'ImageSets' / 'Segmentation'
split_f.rename(splits_dir / split_f.name)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the image segmentation.
"""
img = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.masks[index])
if self.transform is not None:
img, target = self.transform(img, target)
return img, target
def __len__(self):
return len(self.images)
class VOCDetectionConcat(Dataset):
"""
Dataset to concatenate multiple datasets.
Purpose: useful to assemble different existing datasets, possibly
large-scale datasets as the concatenation operation is done in an
on-the-fly manner.
Arguments:
datasets (sequence): List of datasets to be concatenated
"""
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets):
super().__init__()
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.datasets = list(datasets)
self.cumulative_sizes = self.cumsum(self.datasets)
self._data = merge_coco(datasets)
self._img_anns = groupby(lambda x: x['image_id'], self._data['annotations'])
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
img = self.datasets[dataset_idx][sample_idx][0]
anns = self._img_anns[idx]
return img, anns
def to_coco(self):
return copy.deepcopy(self._data)
def merge_coco(datasets):
all_annotations = [ds.to_coco() for ds in datasets]
for i in range(len(all_annotations) - 1):
assert all_annotations[i]['categories'] == all_annotations[i + 1]['categories']
images = all_annotations[0]['images']
annotations = all_annotations[0]['annotations']
image_id = images[-1]['id'] + 1
ann_id = annotations[-1]['id'] + 1
for d in all_annotations[1:]:
d_images = d['images']
n = len(d_images)
assert [img['id'] for img in d_images] == list(range(n))
img_anns = groupby(lambda x: x['image_id'], d['annotations'])
for i in range(n):
img = d_images[i]
anns = img_anns[img['id']]
img = {
**img,
'id': image_id,
}
for ann in anns:
annotations.append({
**ann,
'id': ann_id,
'image_id': image_id
})
ann_id += 1
image_id += 1
images.append(img)
return {
**all_annotations[0],
'images': images,
'annotations': annotations
}
|
# -*- coding: utf-8 -*-
"""
Input plug-in for a KKR calculation.
"""
import os
from numpy import pi, array
from aiida.orm.calculation.job import JobCalculation
from aiida_kkr.calculations.voro import VoronoiCalculation
from aiida.common.utils import classproperty
from aiida.common.exceptions import InputValidationError, ValidationError
from aiida.common.datastructures import CalcInfo, CodeInfo
from aiida.orm import DataFactory
from aiida.common.exceptions import UniquenessError
from aiida_kkr.tools.common_workfunctions import (generate_inputcard_from_structure,
check_2Dinput_consistency, update_params_wf,
vca_check)
from aiida_kkr.tools.common_functions import get_alat_from_bravais, get_Ang2aBohr
from aiida_kkr.tools.tools_kkrimp import make_scoef
from aiida_kkr.tools.kkr_params import __kkr_default_params__
#define aiida structures from DataFactory of aiida
RemoteData = DataFactory('remote')
ParameterData = DataFactory('parameter')
StructureData = DataFactory('structure')
KpointsData = DataFactory('array.kpoints')
__copyright__ = (u"Copyright (c), 2017, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.6"
__contributors__ = ("Jens Broeder", "Philipp Rüßmann")
class KkrCalculation(JobCalculation):
"""
AiiDA calculation plugin for a KKR calculation
.
"""
def _init_internal_params(self):
"""
Init internal parameters at class load time
"""
# reuse base class function
super(KkrCalculation, self)._init_internal_params()
# calculation plugin version
self._CALCULATION_PLUGIN_VERSION = __version__
# Default input and output files
self._DEFAULT_INPUT_FILE = 'inputcard' # will be shown with inputcat
self._DEFAULT_OUTPUT_FILE = 'out_kkr' # verdi shell output will be shown with outputcat
# same as _DEFAULT_OUTPUT_FILE: piped output of kkr execution to this file
self._OUTPUT_FILE_NAME = self._DEFAULT_OUTPUT_FILE
# List of mandatory input files
self._INPUT_FILE_NAME = self._DEFAULT_INPUT_FILE
self._POTENTIAL = 'potential'
# List of optional input files (may be mandatory for some settings in inputcard)
self._SHAPEFUN = 'shapefun' # mandatory if nonspherical calculation
self._SCOEF = 'scoef' # mandatory for KKRFLEX calculation and some functionalities
self._NONCO_ANGLES = 'nonco_angles.dat' # mandatory if noncollinear directions are used that are not (theta, phi)= (0,0) for all atoms
self._NONCO_ANGLES_IMP = 'nonco_angles_imp.dat' # mandatory for GREENIMP option (scattering code)
self._SHAPEFUN_IMP = 'shapefun_imp' # mandatory for GREENIMP option (scattering code)
self._POTENTIAL_IMP = 'potential_imp' # mandatory for GREENIMP option (scattering code)
# List of output files that should always be present
self._OUT_POTENTIAL = 'out_potential'
self._OUTPUT_0_INIT = 'output.0.txt'
self._OUTPUT_000 = 'output.000.txt'
self._OUTPUT_2 = 'output.2.txt'
self._OUT_TIMING_000 = 'out_timing.000.txt'
self._NONCO_ANGLES_OUT = 'nonco_angles_out.dat'
# special files (some runs)
# DOS files
self._COMPLEXDOS = 'complex.dos'
self._DOS_ATOM = 'dos.atom%i'
self._LMDOS = 'lmdos.%2i.%i.dat'
# qdos files
self._QVEC = 'qvec.dat'
self._QDOS_ATOM = 'qdos.%2i.%i.dat'
# kkrflex files for impurity calculation
self._KKRFLEX_GREEN = 'kkrflex_green'
self._KKRFLEX_TMAT = 'kkrflex_tmat'
self._KKRFLEX_ATOMINFO = 'kkrflex_atominfo'
self._KKRFLEX_INTERCELL_REF = 'kkrflex_intercell_ref'
self._KKRFLEX_INTERCELL_CMOMS = 'kkrflex_intercell_cmoms'
self._ALL_KKRFLEX_FILES = [self._KKRFLEX_GREEN, self._KKRFLEX_TMAT, self._KKRFLEX_ATOMINFO, self._KKRFLEX_INTERCELL_REF, self._KKRFLEX_INTERCELL_CMOMS]
# template.product entry point defined in setup.json
self._default_parser = 'kkr.kkrparser'
# files that will be copied from local computer if parent was KKR calc
self._copy_filelist_kkr = [self._SHAPEFUN, self._OUT_POTENTIAL]
# list of keywords that are not allowed to be modified (new calculation
# starting from structure and voronoi run is needed instead):
self._do_never_modify = ['ALATBASIS', 'BRAVAIS', 'NAEZ', '<RBASIS>', 'CARTESIAN',
'INTERFACE', '<NLBASIS>', '<RBLEFT>', 'ZPERIODL',
'<NRBASIS>', '<RBRIGHT>', 'ZPERIODR', 'KSHAPE', '<SHAPE>',
'<ZATOM>', 'NATYP', '<SITE>', '<CPA-CONC>', '<KAOEZL>', '<KAOEZR>']
#TODO implement workfunction to modify structure (e.g. to use VCA)
@classproperty
def _use_methods(cls):
"""
Add use_* methods for calculations.
Code below enables the usage
my_calculation.use_parameters(my_parameters)
"""
use_dict = JobCalculation._use_methods
use_dict.update({
"parameters": {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'parameters',
'docstring':
("Use a node that specifies the input parameters ")
},
"parent_folder": {
'valid_types': RemoteData,
'additional_parameter': None,
'linkname': 'parent_calc_folder',
'docstring': (
"Use a remote or local repository folder as parent folder "
"(also for restarts and similar). It should contain all the "
"needed files for a KKR calc, only edited files should be "
"uploaded from the repository.")
},
"impurity_info": {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'impurity_info',
'docstring': ("Use a Parameter node that specifies properties "
"for a follwoing impurity calculation (e.g. setting "
"of impurity cluster in scoef file that is "
"automatically created).")
},
"kpoints": {
'valid_types': KpointsData,
'additional_parameter': None,
'linkname': 'kpoints',
'docstring': ("Use a KpointsData node that specifies the kpoints for which a "
"bandstructure (i.e. 'qdos') calculation should be performed.")
},
})
return use_dict
def _prepare_for_submission(self, tempfolder, inputdict):
"""
Create input files.
:param tempfolder: aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: dictionary of the input nodes as they would
be returned by get_inputs_dict
"""
has_parent = False
local_copy_list = []
# Check inputdict
try:
parameters = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this calculation")
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters not of type ParameterData")
try:
imp_info = inputdict.pop(self.get_linkname('impurity_info'))
found_imp_info = True
except KeyError:
imp_info = None
found_imp_info = False
if found_imp_info and not isinstance(imp_info, ParameterData):
raise InputValidationError("impurity_info not of type ParameterData")
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this calculation")
# get qdos inputs
try:
kpath = inputdict.pop(self.get_linkname('kpoints'))
found_kpath = True
except KeyError:
found_kpath = False
try:
parent_calc_folder = inputdict.pop(self.get_linkname('parent_folder'))
except KeyError:
raise InputValidationError("Voronoi or previous KKR files needed for KKR calculation, "
"you need to provide a Parent Folder/RemoteData node.")
#TODO deal with data from folder data if calculation is continued on a different machine
if not isinstance(parent_calc_folder, RemoteData):
raise InputValidationError("parent_calc_folder must be of type RemoteData")
# extract parent calculation
parent_calcs = parent_calc_folder.get_inputs(node_type=JobCalculation)
n_parents = len(parent_calcs)
if n_parents != 1:
raise UniquenessError(
"Input RemoteData is child of {} "
"calculation{}, while it should have a single parent"
"".format(n_parents, "" if n_parents == 0 else "s"))
parent_calc = parent_calcs[0]
has_parent = True
if n_parents == 1:
parent_calc = parent_calcs[0]
has_parent = True
# check if parent is either Voronoi or previous KKR calculation
self._check_valid_parent(parent_calc)
# extract parent input parameter dict for following check
try:
parent_inp_dict = parent_calc.inp.parameters.get_dict()
except:
self.logger.error("Failed trying to find input parameter of parent {}".format(parent_calc))
raise InputValidationError("No parameter node found of parent calculation.")
# check if no keys are illegally overwritten (i.e. compare with keys in self._do_never_modify)
for key in parameters.get_dict().keys():
value = parameters.get_dict()[key]
#self.logger.info("Checking {} {}".format(key, value))
if not value is None:
if key in self._do_never_modify:
oldvalue = parent_inp_dict[key]
if oldvalue is None and key in __kkr_default_params__:
oldvalue = __kkr_default_params__.get(key)
if value != oldvalue:
self.logger.error("You are trying to set keyword {} = {} but this is not allowed since the structure would be modified. Please use a suitable workfunction instead.".format(key, value))
raise InputValidationError("You are trying to modify a keyword that is not allowed to be changed! (key={}, oldvalue={}, newvalue={})".format(key, oldvalue, value))
#TODO check for remote folder (starting from folder data not implemented yet)
# if voronoi calc check if folder from db given, or get folder from rep.
# Parent calc does not has to be on the same computer.
# so far we copy every thing from local computer ggf if kkr we want to copy remotely
# get StructureData node from Parent if Voronoi
structure = None
self.logger.info("KkrCalculation: Get structure node from voronoi parent")
if isinstance(parent_calc, VoronoiCalculation):
self.logger.info("KkrCalculation: Parent is Voronoi calculation")
try:
structure, voro_parent = VoronoiCalculation.find_parent_structure(parent_calc)
except:
self.logger.error('KkrCalculation: Could not get structure from Voronoi parent.')
raise ValidationError("Cound not find structure node")
elif isinstance(parent_calc, KkrCalculation):
self.logger.info("KkrCalculation: Parent is KKR calculation")
try:
self.logger.info('KkrCalculation: extract structure from KKR parent')
structure, voro_parent = VoronoiCalculation.find_parent_structure(parent_calc)
except:
self.logger.error('Could not get structure from parent.')
raise ValidationError('Cound not find structure node starting from parent {}'.format(parent_calc))
else:
self.logger.error("KkrCalculation: Parent is neither Voronoi nor KKR calculation!")
raise ValidationError('Cound not find structure node')
if inputdict:
self.logger.error('KkrCalculation: Unknown inputs for structure lookup')
raise ValidationError("Unknown inputs")
# for VCA: check if input structure and parameter node define VCA structure
vca_structure = vca_check(structure, parameters)
###################################
# prepare scoef file if impurity_info was given
write_scoef = False
runopt = parameters.get_dict().get('RUNOPT', None)
kkrflex_opt = False
if runopt is not None:
if 'KKRFLEX' in runopt:
kkrflex_opt = True
if kkrflex_opt:
write_scoef = True
elif found_imp_info:
self.logger.info('Found impurity_info in inputs of the calculation, automatically add runopt KKRFLEX')
write_scoef = True
runopt = parameters.get_dict().get('RUNOPT', [])
runopt.append('KKRFLEX')
parameters = update_params_wf(parameters, ParameterData(dict={'RUNOPT':runopt, 'nodename': 'update_KKRFLEX', 'nodedesc':'Update Parameter node with KKRFLEX runopt'}))
if found_imp_info and write_scoef:
scoef_filename = os.path.join(tempfolder.get_abs_path(''), self._SCOEF)
imp_info_dict = imp_info.get_dict()
Rcut = imp_info_dict.get('Rcut', None)
hcut = imp_info_dict.get('hcut', -1.)
cylinder_orient = imp_info_dict.get('cylinder_orient', [0., 0., 1.])
ilayer_center = imp_info_dict.get('ilayer_center', 0)
for i in range(len(cylinder_orient)):
try:
len(cylinder_orient[i])
vec_shape = False
except TypeError:
vec_shape = True
if ilayer_center > len(structure.sites) - 1:
raise IndexError('Index of the reference site is out of range! Possible values: 0 to {}.'.format(len(structure.sites) - 1))
elif Rcut < 0:
raise ValueError('Cutoff radius has to be positive!')
elif vec_shape == False or len(cylinder_orient) != 3:
raise TypeError('Input orientation vector ({}) has the wrong shape! It needs to be a 3D-vector!'.format(cylinder_orient))
else:
print('Input parameters for make_scoef read in correctly!')
make_scoef(structure, Rcut, scoef_filename, hcut, cylinder_orient, ilayer_center)
elif write_scoef:
self.logger.info('Need to write scoef file but no impurity_info given!')
raise ValidationError('Found RUNOPT KKRFLEX but no impurity_info in inputs')
# Check for 2D case
twoDimcheck, msg = check_2Dinput_consistency(structure, parameters)
if not twoDimcheck:
raise InputValidationError(msg)
# set shapes array either from parent voronoi run or read from inputcard in kkrimporter calculation
if parent_calc.get_parser_name() != 'kkr.kkrimporterparser':
# get shapes array from voronoi parent
shapes = voro_parent.res.shapes
else:
# extract shapes from input parameters node constructed by kkrimporter calculation
shapes = voro_parent.inp.parameters.get_dict().get('<SHAPE>')
#
use_alat_input = parameters.get_dict().get('use_input_alat', False)
# qdos option, ensure low T, E-contour, qdos run option and write qvec.dat file
if found_kpath:
# check qdos settings
change_values = []
runopt = parameters.get_dict().get('RUNOPT')
if runopt is None: runopt = []
runopt = [i.strip() for i in runopt]
if 'qdos' not in runopt:
runopt.append('qdos')
change_values.append(['RUNOPT', runopt])
tempr = parameters.get_dict().get('TEMPR')
if tempr is None or tempr>100.:
change_values.append(['TEMPR', 50.])
N1 = parameters.get_dict().get('TEMPR')
if N1 is None or N1>0:
change_values.append(['NPT1', 0])
N2 = parameters.get_dict().get('NPT2')
if N2 is None:
change_values.append(['NPT2', 100])
N3 = parameters.get_dict().get('NPT3')
if N3 is None or N3>0.:
change_values.append(['NPT3', 0])
NPOL = parameters.get_dict().get('NPOL')
if NPOL is None or NPOL>0.:
change_values.append(['NPOL', 0])
if change_values != []:
new_params = {'nodename': 'changed_params_qdos', 'nodedesc': 'Changed parameters to mathc qdos mode. Changed values: {}'.format(change_values)}
for key, val in change_values:
new_params[key] = val
new_params_node = ParameterData(dict=new_params)
parameters = update_params_wf(parameters, new_params_node)
# write qvec.dat file
kpath_array = kpath.get_kpoints()
# convert automatically to internal units
if use_alat_input:
alat = parameters.get_dict().get('ALATBASIS')
else:
alat = get_alat_from_bravais(array(structure.cell), is3D=structure.pbc[2]) * get_Ang2aBohr()
kpath_array = kpath_array * (alat/2./pi)
qvec = ['%i\n'%len(kpath_array)]
qvec+=['%e %e %e\n'%(kpt[0], kpt[1], kpt[2]) for kpt in kpath_array]
qvecpath = tempfolder.get_abs_path(self._QVEC)
with open(qvecpath, 'w') as file:
file.writelines(qvec)
# Prepare inputcard from Structure and input parameter data
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
natom, nspin, newsosol = generate_inputcard_from_structure(parameters, structure, input_filename, parent_calc, shapes=shapes, vca_structure=vca_structure, use_input_alat=use_alat_input)
#################
# Decide what files to copy based on settings to the code (e.g. KKRFLEX option needs scoef)
if has_parent:
# copy the right files #TODO check first if file, exists and throw
# warning, now this will throw an error
outfolderpath = parent_calc.out.retrieved.folder.abspath
outfolderpath = os.path.join(outfolderpath, 'path')
self.logger.info("out folder path {}".format(outfolderpath))
copylist = []
if isinstance(parent_calc, KkrCalculation):
copylist = self._copy_filelist_kkr
# TODO ggf copy remotely...
if isinstance(parent_calc, VoronoiCalculation):
copylist = [parent_calc._SHAPEFUN]
# copy either overwrite potential or voronoi output potential
# (voronoi caclualtion retreives only one of the two)
if parent_calc._POTENTIAL_IN_OVERWRITE in os.listdir(outfolderpath):
copylist.append(parent_calc._POTENTIAL_IN_OVERWRITE)
else:
copylist.append(parent_calc._OUT_POTENTIAL_voronoi)
#change copylist in case the calculation starts from an imported calculation
if parent_calc.get_parser_name() == 'kkr.kkrimporterparser':
copylist = []
if not os.path.exists(os.path.join(outfolderpath, self._OUT_POTENTIAL)):
copylist.append(self._POTENTIAL)
else:
copylist.append(self._OUT_POTENTIAL)
if os.path.exists(os.path.join(outfolderpath, self._SHAPEFUN)):
copylist.append(self._SHAPEFUN)
# create local_copy_list from copylist and change some names automatically
for file1 in copylist:
filename = file1
if (file1 == 'output.pot' or file1 == self._OUT_POTENTIAL or
(isinstance(parent_calc, VoronoiCalculation) and file1 == parent_calc._POTENTIAL_IN_OVERWRITE)):
filename = self._POTENTIAL
local_copy_list.append((
os.path.join(outfolderpath, file1),
os.path.join(filename)))
# for set-ef option:
ef_set = parameters.get_dict().get('ef_set', None)
if ef_set is not None:
print('local copy list before change: {}'.format(local_copy_list))
print("found 'ef_set' in parameters: change EF of potential to this value")
potcopy_info = [i for i in local_copy_list if i[1]==self._POTENTIAL][0]
with open(potcopy_info[0]) as file:
# change potential and copy list
local_copy_list.remove(potcopy_info)
pot_new_name = tempfolder.get_abs_path(self._POTENTIAL+'_new_ef')
local_copy_list.append((pot_new_name, self._POTENTIAL))
# change potential
txt = file.readlines()
potstart = []
for iline in range(len(txt)):
line = txt[iline]
if 'exc:' in line:
potstart.append(iline)
for ipotstart in potstart:
tmpline = txt[ipotstart+3]
tmpline = tmpline.split()
newline = '%10.5f%20.14f%20.14f\n'%(float(tmpline[0]), ef_set, float(tmpline[-1]))
txt[ipotstart+3] = newline
# write new file
pot_new_ef = open(pot_new_name, 'w')
pot_new_ef.writelines(txt)
pot_new_ef.close()
# TODO different copy lists, depending on the keywors input
print('local copy list: {}'.format(local_copy_list))
self.logger.info('local copy list: {}'.format(local_copy_list))
# Prepare CalcInfo to be returned to aiida
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = []
# TODO retrieve list needs some logic, retrieve certain files,
# only if certain input keys are specified....
calcinfo.retrieve_list = [self._DEFAULT_OUTPUT_FILE,
self._INPUT_FILE_NAME,
self._POTENTIAL,
self._SHAPEFUN,
self._SCOEF,
self._NONCO_ANGLES_OUT,
self._OUT_POTENTIAL,
self._OUTPUT_0_INIT,
self._OUTPUT_000,
self._OUTPUT_2,
self._OUT_TIMING_000]
# for special cases add files to retireve list:
# 1. dos calculation, add *dos* files if NPOL==0
retrieve_dos_files = False
print('NPOL in parameter input:', parameters.get_dict()['NPOL'])
if 'NPOL' in parameters.get_dict().keys():
if parameters.get_dict()['NPOL'] == 0:
retrieve_dos_files = True
if 'TESTOPT' in parameters.get_dict().keys():
testopts = parameters.get_dict()['TESTOPT']
if testopts is not None :
stripped_test_opts = [i.strip() for i in testopts]
if 'DOS' in stripped_test_opts:
retrieve_dos_files = True
if retrieve_dos_files:
print('adding files for dos output', self._COMPLEXDOS, self._DOS_ATOM, self._LMDOS)
add_files = [self._COMPLEXDOS]
for iatom in range(natom):
add_files.append(self._DOS_ATOM%(iatom+1))
for ispin in range(nspin):
add_files.append((self._LMDOS%(iatom+1, ispin+1)).replace(' ','0'))
calcinfo.retrieve_list += add_files
# 2. KKRFLEX calculation
retrieve_kkrflex_files = False
if 'RUNOPT' in parameters.get_dict().keys():
runopts = parameters.get_dict()['RUNOPT']
if runopts is not None :
stripped_run_opts = [i.strip() for i in runopts]
if 'KKRFLEX' in stripped_run_opts:
retrieve_kkrflex_files = True
if retrieve_kkrflex_files:
add_files = self._ALL_KKRFLEX_FILES
print('adding files for KKRFLEX output', add_files)
calcinfo.retrieve_list += add_files
# 3. qdos claculation
retrieve_qdos_files = False
if 'RUNOPT' in parameters.get_dict().keys():
runopts = parameters.get_dict()['RUNOPT']
if runopts is not None :
stripped_run_opts = [i.strip() for i in runopts]
if 'qdos' in stripped_run_opts:
retrieve_qdos_files = True
if retrieve_qdos_files:
print('adding files for qdos output', self._QDOS_ATOM, self._QVEC)
add_files = [self._QVEC]
for iatom in range(natom):
for ispin in range(nspin):
add_files.append((self._QDOS_ATOM%(iatom+1, ispin+1)).replace(' ','0'))
calcinfo.retrieve_list += add_files
codeinfo = CodeInfo()
codeinfo.cmdline_params = []
codeinfo.code_uuid = code.uuid
codeinfo.stdout_name = self._DEFAULT_OUTPUT_FILE
calcinfo.codes_info = [codeinfo]
return calcinfo
def _check_valid_parent(self, calc):
"""
Check that calc is a valid parent for a FleurCalculation.
It can be a VoronoiCalculation, KKRCalculation
"""
try:
if (((not isinstance(calc, VoronoiCalculation)))
and (not isinstance(calc, KkrCalculation))):
raise ValueError("Parent calculation must be a VoronoiCalculation or a KkrCalculation")
except ImportError:
if ((not isinstance(calc, KkrCalculation)) ):
raise ValueError("Parent calculation must be a VoronoiCalculation or a KkrCalculation")
def _set_parent_remotedata(self, remotedata):
"""
Used to set a parent remotefolder in the restart of fleur.
"""
if not isinstance(remotedata,RemoteData):
raise ValueError('remotedata must be a RemoteData')
# complain if another remotedata is already found
input_remote = self.get_inputs(node_type=RemoteData)
if input_remote:
raise ValidationError("Cannot set several parent calculation to a KKR calculation")
self.use_parent_folder(remotedata)
|
test_input = """
forward 5
down 5
forward 8
up 3
down 8
forward 2
"""
puzzle_input = """
forward 7
down 2
forward 7
down 6
forward 1
forward 7
down 3
up 5
forward 7
forward 6
down 8
down 1
up 5
up 1
down 2
forward 8
forward 3
down 8
down 9
down 1
forward 4
down 8
down 7
forward 3
up 5
up 3
forward 9
forward 5
forward 5
down 9
up 2
down 4
up 4
down 9
forward 7
down 9
up 7
forward 4
down 2
down 6
up 3
down 2
down 4
up 5
forward 7
up 8
down 4
forward 8
down 5
forward 1
forward 3
up 9
forward 5
down 4
forward 6
forward 2
up 3
down 5
down 6
forward 8
up 6
up 9
down 8
down 2
down 6
forward 2
forward 8
forward 1
forward 5
forward 3
down 8
down 5
forward 3
up 7
down 9
up 9
forward 7
forward 6
forward 4
down 5
forward 1
down 9
forward 9
forward 6
down 8
down 5
forward 5
forward 4
forward 3
up 6
up 7
forward 2
up 2
up 9
forward 8
up 3
forward 8
down 8
forward 1
forward 7
forward 4
down 5
forward 8
down 2
down 2
down 3
forward 3
forward 3
up 3
forward 4
up 9
up 8
forward 1
down 8
up 6
down 5
up 3
up 2
forward 1
up 8
down 7
up 5
down 2
forward 5
down 3
down 1
forward 2
forward 6
forward 7
forward 1
forward 5
forward 4
down 9
forward 6
down 9
up 8
forward 9
forward 5
up 2
up 7
up 2
down 1
down 7
down 1
forward 2
down 8
down 3
forward 1
down 5
forward 7
forward 5
forward 6
up 6
forward 6
forward 1
down 2
forward 5
down 7
up 1
down 5
down 4
down 8
up 2
down 2
up 6
forward 2
down 2
up 9
down 7
down 3
down 6
forward 5
up 5
forward 2
forward 7
down 9
up 3
forward 4
forward 4
down 6
down 2
down 4
forward 6
down 2
down 8
up 2
forward 9
down 8
forward 4
down 2
up 4
down 6
forward 3
forward 2
forward 7
down 7
forward 3
forward 7
down 9
up 6
down 4
forward 4
down 6
down 8
down 4
forward 3
up 5
up 4
up 9
forward 9
down 1
forward 3
forward 9
up 3
down 5
forward 2
down 9
down 9
forward 1
forward 4
forward 8
forward 9
down 4
forward 3
down 3
forward 9
down 1
down 3
down 9
down 3
down 2
down 1
up 2
down 3
up 7
forward 7
down 9
up 6
down 1
down 7
down 7
up 7
forward 8
down 1
down 7
down 8
up 4
down 6
down 7
forward 5
down 9
forward 2
down 6
down 8
down 5
down 4
forward 8
down 4
forward 8
down 3
down 6
forward 6
forward 1
up 5
down 2
down 2
forward 7
forward 1
up 3
down 6
down 3
down 9
up 6
forward 4
down 1
forward 4
up 3
forward 6
down 7
down 2
up 3
down 1
up 7
down 7
forward 5
up 9
up 1
up 2
forward 4
forward 9
up 3
down 8
up 2
down 9
forward 8
up 2
down 5
up 5
down 2
up 8
down 6
down 8
up 7
forward 9
forward 6
forward 5
forward 8
forward 7
down 2
forward 1
forward 6
down 3
down 7
up 1
forward 7
up 7
down 2
down 9
up 4
forward 2
down 3
up 8
up 3
down 9
down 2
forward 4
forward 9
forward 8
forward 2
up 2
forward 3
forward 8
down 2
down 4
up 8
up 2
forward 4
forward 7
up 8
forward 8
forward 1
forward 9
down 9
up 3
forward 9
down 5
down 9
down 2
forward 1
forward 6
forward 3
up 7
down 8
down 2
up 6
down 5
forward 4
up 7
down 5
down 3
forward 5
forward 5
up 4
down 7
down 5
up 1
down 4
down 6
forward 6
forward 3
down 9
forward 6
forward 4
down 8
up 5
down 7
forward 6
forward 7
down 9
forward 3
forward 3
forward 4
down 6
forward 2
forward 9
up 2
forward 7
up 5
forward 6
down 8
down 7
forward 1
down 6
forward 3
down 9
forward 7
forward 2
forward 1
down 9
down 2
up 8
down 1
down 3
up 6
down 5
up 2
down 2
down 8
forward 7
down 8
forward 6
up 5
down 8
down 4
down 1
forward 1
forward 9
down 3
forward 9
up 2
down 2
forward 9
up 2
up 2
down 8
down 1
up 4
down 9
down 6
up 7
down 6
forward 7
forward 3
forward 9
forward 2
down 9
down 8
down 5
forward 4
forward 1
forward 3
forward 3
forward 1
forward 6
forward 7
down 7
down 1
up 4
up 2
forward 9
up 7
down 1
forward 5
down 8
forward 3
down 9
up 4
up 1
forward 7
down 1
forward 4
up 6
down 9
forward 2
forward 7
down 1
forward 2
forward 1
down 2
forward 6
down 4
up 7
down 6
forward 1
down 9
up 8
up 6
forward 4
down 5
up 8
down 5
up 9
forward 1
forward 6
down 4
up 5
forward 4
forward 2
down 6
forward 9
down 7
down 2
forward 1
up 2
forward 4
forward 4
forward 7
down 5
up 1
down 7
down 1
forward 3
forward 6
forward 7
down 5
down 4
down 2
down 3
up 3
forward 7
down 3
up 2
forward 7
down 9
up 4
forward 9
forward 4
forward 2
down 9
forward 1
down 5
forward 3
forward 5
up 7
down 9
up 7
down 5
down 2
up 5
up 1
forward 8
forward 3
up 5
forward 2
down 2
forward 5
forward 3
forward 4
up 4
forward 3
up 4
forward 1
down 2
forward 5
down 9
forward 8
forward 2
forward 5
forward 1
up 3
up 8
forward 2
forward 9
down 7
up 5
up 2
forward 1
forward 4
up 4
forward 5
down 5
forward 5
down 2
down 8
forward 4
down 3
forward 7
down 7
forward 6
down 9
down 2
up 4
up 5
down 2
down 7
forward 3
down 1
down 5
down 6
forward 8
forward 7
down 3
forward 4
forward 8
forward 2
down 8
down 3
forward 8
down 2
up 2
forward 3
up 2
down 7
down 4
forward 8
forward 7
down 9
forward 7
down 8
up 3
forward 1
up 5
forward 6
down 7
forward 8
forward 3
forward 1
forward 5
down 8
up 8
forward 9
down 7
up 8
up 8
forward 9
up 6
forward 2
down 8
forward 6
down 6
down 6
forward 8
up 9
forward 9
down 8
down 8
forward 3
forward 3
down 8
up 7
down 1
forward 5
up 6
forward 6
up 8
down 7
down 3
down 4
forward 7
down 2
forward 4
forward 6
down 2
down 6
up 2
down 9
down 8
forward 6
up 8
up 4
forward 1
forward 2
down 8
forward 6
down 2
down 7
down 1
down 2
forward 9
forward 5
down 2
down 8
down 9
up 6
forward 6
up 2
down 9
down 4
down 9
up 7
forward 2
up 9
down 7
forward 2
down 7
up 6
down 3
up 1
down 8
down 4
forward 1
up 5
up 4
down 2
down 8
forward 8
forward 7
up 1
down 8
forward 2
forward 7
down 4
forward 4
down 3
down 7
forward 8
down 7
down 3
down 3
forward 8
forward 8
up 1
forward 8
forward 6
down 9
up 1
down 7
down 7
forward 7
forward 7
up 5
down 7
down 6
down 6
forward 8
down 3
forward 8
down 8
forward 7
forward 2
up 6
down 6
down 8
forward 1
forward 8
down 9
down 7
up 5
down 1
forward 6
down 9
forward 5
up 2
up 9
down 6
down 8
down 6
up 1
forward 7
down 9
up 2
forward 3
down 7
up 5
forward 3
forward 8
up 2
forward 1
down 6
down 7
up 4
down 5
up 8
forward 9
forward 5
up 7
down 3
forward 2
up 7
up 2
down 3
up 9
down 9
down 8
up 8
down 6
forward 9
up 7
forward 4
forward 7
up 7
down 6
forward 5
up 2
up 4
down 1
down 2
down 9
forward 5
forward 3
forward 9
up 7
forward 7
down 5
down 2
up 9
forward 4
forward 4
up 5
up 3
forward 5
forward 9
forward 4
forward 8
down 2
up 4
down 1
forward 9
forward 9
up 7
down 3
forward 2
forward 4
down 6
up 1
forward 6
down 4
up 9
down 4
forward 3
down 9
up 9
down 8
up 6
forward 9
forward 1
forward 2
up 2
forward 8
forward 9
forward 3
forward 5
down 5
down 7
forward 7
forward 5
down 3
up 2
forward 4
down 3
up 6
down 6
up 6
forward 1
forward 2
down 5
down 8
down 3
forward 5
up 4
forward 6
forward 9
forward 6
forward 1
forward 4
up 1
forward 3
forward 3
up 3
forward 9
forward 1
forward 7
forward 8
forward 1
forward 9
forward 7
up 9
forward 9
up 4
down 4
up 9
down 5
down 8
down 3
forward 6
down 7
forward 5
forward 6
forward 8
forward 7
down 7
down 5
forward 4
down 6
down 4
down 6
down 1
forward 3
down 3
down 7
forward 6
forward 3
up 2
forward 1
forward 8
down 9
down 3
down 3
up 6
down 7
down 3
forward 2
down 7
down 2
forward 1
down 7
down 3
forward 9
down 4
down 3
forward 9
up 2
up 4
forward 4
down 4
up 2
down 2
forward 8
down 1
up 9
down 5
down 7
forward 3
forward 9
forward 7
forward 1
forward 7
forward 1
forward 7
up 7
down 6
forward 6
forward 4
forward 6
up 3
down 5
down 5
down 3
down 6
down 3
down 3
up 2
down 4
up 8
down 4
up 2
down 7
forward 9
up 9
down 1
forward 8
forward 7
forward 6
forward 8
up 6
up 6
down 5
forward 6
down 3
forward 6
forward 9
down 2
down 6
down 4
down 5
forward 7
forward 4
up 3
down 6
down 6
forward 1
forward 4
down 6
up 3
forward 1
down 3
down 7
down 4
down 8
down 8
up 8
down 2
up 8
down 3
down 3
forward 3
down 3
down 7
up 6
forward 8
down 4
forward 1
down 7
down 3
forward 5
forward 8
up 1
forward 2
down 7
down 7
forward 1
up 7
down 3
up 3
forward 5
forward 9
down 3
down 7
down 5
forward 7
"""
def parse_positions(pos):
lines = [line.split() for line in pos.strip().splitlines()]
return [(i, int(j)) for i, j in lines]
def move(positions):
x, y = 0, 0
for dir, X in positions:
if dir == 'forward':
x += X
elif dir == 'down':
y += X
elif dir == 'up':
y -= X
else:
raise ValueError(f"Invalid direction {dir}")
return x, y
def move2(positions):
x, y, aim = 0, 0, 0
for dir, X in positions:
if dir == 'forward':
x += X
y += aim*X
elif dir == 'down':
aim += X
elif dir == 'up':
aim -= X
else:
raise ValueError(f"Invalid direction {dir}")
return x, y
print("Day 2")
print("Part 1")
print("Test input")
test_positions = parse_positions(test_input)
print(test_positions)
test_x, test_y = move(test_positions)
print(test_x, test_y)
print(test_x*test_y)
print("Puzzle input")
positions = parse_positions(puzzle_input)
# print(positions)
x, y = move(positions)
print(x, y)
print(x*y)
print("Part 2")
print("Test input")
test_positions = parse_positions(test_input)
test_x, test_y = move2(test_positions)
print(test_x, test_y)
print(test_x*test_y)
print("Puzzle input")
x, y = move2(positions)
print(x, y)
print(x*y)
|
################################################################################
# Modules and functions import statements
################################################################################
import logging
import socket
import jinja2
from datetime import datetime
from os import getenv
from jinja2 import Template, Environment, FileSystemLoader
from app_helpers import appconfig
################################################################################
# Function decorators
################################################################################
# N/A
################################################################################
# Basic functions
################################################################################
########################################
# Define filter functions
########################################
def get_year(value):
"""ZX: Lazy coding here. Assumes value is a valid datetime."""
return value.year
########################################
# Define global functions
########################################
def get_datetime_now():
"""Returns local datetime"""
return datetime.now()
def get_datetime_utcnow():
"""Returns local datetime in UTC"""
return datetime.utcnow()
########################################
# Define core functions
########################################
def os_env(key):
logging.debug("IN os_env()")
return getenv(key)
def get_jinja2_env():
logging.debug("Setting up jinja2 environment")
env = Environment(loader = FileSystemLoader('./templates'))
# Setup jinja2 globals variables/functions
fqdn = socket.getfqdn().split(".")
if len(fqdn) > 0:
computer_name = fqdn[0].upper()
else:
computer_name = "unknown"
# Define global variables that we can use in Jinja2 templates
env.globals['COMPUTERNAME'] = computer_name
env.globals['APPLICATION_NAME'] = appconfig["application"]["application_name"]
env.globals['SITE_NAME'] = appconfig["application"]["site_name"]
env.globals['VERSION'] = appconfig["application"]["version"]
env.globals['VERSION_DATE'] = appconfig["application"]["version_date"]
# Defines Jinja2 global functions
env.globals['datetime'] = get_datetime_now
env.globals['datetime_utc'] = get_datetime_utcnow
# Setup jinja2 filters
env.filters['os_env'] = os_env
env.filters['year'] = get_year
return env
################################################################################
# Variables dependent on Application basic functions
################################################################################
jinja2_env = get_jinja2_env()
################################################################################
# Main function
################################################################################
if __name__ == '__main__':
pass |
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
# Adding elements to the front of the list
def insertFront(self, new_node):
new_node_front = Node(new_node)
if self.head is None:
self.head = new_node_front
self.tail = new_node_front
else:
new_node_front.next = self.head
self.head.prev = new_node_front
self.head = new_node_front
def insertBack(self, new_node):
new_node_back = Node(new_node)
if self.head is None:
self.head = new_node_back
self.tail = new_node_back
else:
self.tail.next = new_node_back
new_node_back.prev = self.tail
self.tail = new_node_back
def insertIndex(self, new_node, index):
new_node_index = Node(new_node)
count = 0
curr_start = self.head
while curr_start.next:
if count == index-1:
old_next = curr_start.next
curr_start.next = new_node_index
new_node_index.next = old_next
curr_start = curr_start.next
count += 1
# Adding elements to the rear of the list
def printDll(self):
temp_iterator = self.head
if self.head is None:
print('Nothing here')
while temp_iterator is not None:
print(temp_iterator.data)
temp_iterator = temp_iterator.next
if __name__ == '__main__':
# Initializing the list
doubly_linked_list = DoublyLinkedList()
# Adding elements to the front of the list
doubly_linked_list.insertFront(5)
doubly_linked_list.insertFront(8)
doubly_linked_list.insertFront(10)
# Adding elements to the rear of the list
doubly_linked_list.insertBack(11)
doubly_linked_list.insertBack(99)
# Printing the list
doubly_linked_list.printDll()
print('---')
# Adding a desired number to a desired index
doubly_linked_list.insertIndex(10,3)
doubly_linked_list.printDll()
|
"""
The implementation of PSPNet based on Tensorflow.
@Author: Yang Lu
@Github: https://github.com/luyanger1799
@Project: https://github.com/luyanger1799/amazing-semantic-segmentation
"""
from amazingutils import layers as custom_layers
from amazingmodels import Network
import tensorflow as tf
layers = tf.keras.layers
models = tf.keras.models
backend = tf.keras.backend
class PSPNet(Network):
def __init__(self, num_classes, version='PSPNet', base_model='ResNet50', **kwargs):
"""
The initialization of PSPNet.
:param num_classes: the number of predicted classes.
:param version: 'PSPNet'
:param base_model: the backbone model
:param kwargs: other parameters
"""
dilation = [2, 4]
base_model = 'ResNet50' if base_model is None else base_model
assert version == 'PSPNet'
assert base_model in ['VGG16',
'VGG19',
'ResNet50',
'ResNet101',
'ResNet152',
'DenseNet121',
'DenseNet169',
'DenseNet201',
'DenseNet264',
'MobileNetV1',
'MobileNetV2',
'Xception-DeepLab']
super(PSPNet, self).__init__(num_classes, version, base_model, dilation, **kwargs)
def __call__(self, inputs=None, input_size=None, **kwargs):
assert inputs is not None or input_size is not None
if inputs is None:
assert isinstance(input_size, tuple)
inputs = layers.Input(shape=input_size + (3,))
return self._pspnet(inputs)
def _pspnet(self, inputs):
num_classes = self.num_classes
_, inputs_h, inputs_w, _ = backend.int_shape(inputs)
h, w = inputs_h // 8, inputs_w // 8
x = self.encoder(inputs)
if not (h % 6 == 0 and w % 6 == 0):
raise ValueError('\'pyramid pooling\' size must be divided by 6, but received {size}'.format(size=(h, w)))
pool_size = [(h, w),
(h // 2, w // 2),
(h // 3, w // 3),
(h // 6, w // 6)]
# pyramid pooling
x1 = custom_layers.GlobalAveragePooling2D(keep_dims=True)(x)
x1 = layers.Conv2D(512, 1, strides=1, kernel_initializer='he_normal')(x1)
x1 = layers.BatchNormalization()(x1)
x1 = layers.ReLU()(x1)
x1 = layers.UpSampling2D(size=pool_size[0])(x1)
x2 = layers.AveragePooling2D(pool_size=pool_size[1])(x)
x2 = layers.Conv2D(512, 1, strides=1, kernel_initializer='he_normal')(x2)
x2 = layers.BatchNormalization()(x2)
x2 = layers.ReLU()(x2)
x2 = layers.UpSampling2D(size=pool_size[1])(x2)
x3 = layers.AveragePooling2D(pool_size=pool_size[2])(x)
x3 = layers.Conv2D(512, 1, strides=1, kernel_initializer='he_normal')(x3)
x3 = layers.BatchNormalization()(x3)
x3 = layers.ReLU()(x3)
x3 = layers.UpSampling2D(size=pool_size[2])(x3)
x6 = layers.AveragePooling2D(pool_size=pool_size[3])(x)
x6 = layers.Conv2D(512, 1, strides=1, kernel_initializer='he_normal')(x6)
x6 = layers.BatchNormalization()(x6)
x6 = layers.ReLU()(x6)
x6 = layers.UpSampling2D(size=pool_size[3])(x6)
x = layers.Concatenate()([x, x1, x2, x3, x6])
x = layers.Conv2D(512, 3, strides=1, padding='same', kernel_initializer='he_normal')(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2D(num_classes, 1, strides=1, kernel_initializer='he_normal')(x)
x = layers.BatchNormalization()(x)
x = layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(x)
outputs = x
return models.Model(inputs, outputs, name=self.version)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-10-14 09:20
# Manually modified to serve as input for the data migration 0009_migrate_to_datetimes
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wafer.snippets.markdown_field
class Migration(migrations.Migration):
dependencies = [
('schedule', '0007_venue_add_video'),
]
operations = [
migrations.CreateModel(
name='ScheduleBlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.DateTimeField(blank=True, null=True)),
('end_time', models.DateTimeField(blank=True, null=True)),
],
options={
'ordering': ['start_time'],
},
),
migrations.AlterModelOptions(
name='slot',
options={'ordering': ['end_time', 'start_time']},
),
migrations.RenameField(
model_name='slot',
old_name='start_time',
new_name='old_start_time',
),
migrations.RenameField(
model_name='slot',
old_name='end_time',
new_name='old_end_time',
),
migrations.AddField(
model_name='slot',
name='end_time',
field=models.DateTimeField(help_text='Slot end time', null=True),
),
migrations.AddField(
model_name='slot',
name='start_time',
field=models.DateTimeField(blank=True, help_text='Start time (if no previous slot selected)', null=True),
),
migrations.AlterField(
model_name='slot',
name='previous_slot',
field=models.ForeignKey(blank=True, help_text='Previous slot if applicable (slots should have either a previous slot OR a start time set)',
null=True, on_delete=django.db.models.deletion.CASCADE, to='schedule.Slot'),
),
migrations.AddField(
model_name='venue',
name='blocks',
field=models.ManyToManyField(help_text='Blocks (days) on which this venue will be used.', to='schedule.ScheduleBlock'),
),
]
|
from django import forms
from .choices import *
from common.FrontendTexts import FrontendTexts
view_texts = FrontendTexts('providers')
class ProviderForm(forms.Form):
labels = view_texts.getComponent()['creator']['labels']
name = forms.CharField(max_length=255, label=labels['name'])
category = forms.ChoiceField(choices=CATEGORY_CHOICES,
label=labels['category'],
initial='',
widget=forms.Select(),
required=True)
specialty = forms.CharField(max_length=255, label=labels['specialty'])
webpage = forms.CharField(max_length=255, label=labels['webpage'])
contactNames = forms.CharField(max_length=255, label=labels['contactNames'])
emailAddresses = forms.EmailField(label=labels['emailAddresses'])
address = forms.CharField(max_length=255, label=labels['address'])
country = forms.CharField(max_length=255, label=labels['country'])
city = forms.CharField(max_length=255, label=labels['city'])
phone = forms.CharField(max_length=255, label=labels['phone'])
taxId = forms.CharField(max_length=255, label=labels['taxId'])
coordinates = forms.CharField(max_length=255, label=labels['coordinates'])
class ProviderFinderForm(forms.Form):
labels = view_texts.getComponent()['finder']['labels']
code = forms.CharField(max_length=255)
class SelectorForm(forms.Form):
labels = view_texts.getComponent()['selector']['labels']
code = forms.CharField(max_length=255, label=labels['code'])
action = forms.ChoiceField(choices=ACTION_CHOICES,
label=labels['action'],
initial='',
widget=forms.Select(),
required=True)
class CommentForm(forms.Form):
labels = view_texts.getComponent()['comment']['labels']
date = forms.CharField(max_length=255, label=labels['date'])
issuer = forms.CharField(max_length=255, label=labels['issuer'])
text = forms.CharField(max_length=500, label=labels['text'])
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional, Tuple
import torch
import torch.distributed as dist
from torch.distributed import ProcessGroup
if torch.__version__.split(".")[:2] >= ["1", "8"]:
from torch.distributed.nn.functional import all_reduce as differentiable_all_reduce
else:
# Copied from https://github.com/pytorch/pytorch/blob/v1.8.1/torch/distributed/nn/functional.py
class _AllReduce(torch.autograd.Function):
@staticmethod
def forward(ctx, op, group, tensor): # type: ignore
ctx.group = group
ctx.op = op
tensor = tensor.clone()
dist.all_reduce(tensor, op=op, group=group)
return tensor
@staticmethod
def backward(ctx, grad_output): # type: ignore
return (None, None) + (_AllReduce.apply(ctx.op, ctx.group, grad_output),)
def differentiable_all_reduce(tensor, op=dist.ReduceOp.SUM, group=dist.group.WORLD): # type: ignore
return _AllReduce.apply(op, group, tensor)
def _forward(
input: torch.Tensor,
affine: bool,
track_running_stats: bool,
mean: torch.Tensor,
meansqr: torch.Tensor,
momentum: float,
eps: float,
weight: torch.Tensor,
bias: torch.Tensor,
running_mean: torch.Tensor,
running_var: torch.Tensor,
total_count: torch.Tensor,
) -> torch.Tensor:
var = meansqr - mean * mean
if track_running_stats:
with torch.no_grad():
unbiased_var = var * (total_count / (total_count - 1))
running_mean += momentum * (mean.reshape(-1) - running_mean)
running_var += momentum * (unbiased_var.reshape(-1) - running_var)
invstd = torch.rsqrt(var + eps)
if affine:
return (input - mean) * invstd * weight.reshape(mean.shape) + bias.reshape(mean.shape)
else:
return (input - mean) * invstd
if torch.__version__.split(".")[:2] >= ["1", "7"]:
_forward = torch.jit.script(_forward) # type: ignore
class SyncBatchNorm(torch.nn.BatchNorm2d):
"""
Fast re-implementation of ``torch.nn.SyncBatchNorm`` that can achieve a speedup
of 5x or more over the default implementation depending on size of the input
and number of distributed workers.
"""
def __init__(
self, *args: Tuple[Any, ...], process_group: Optional[ProcessGroup] = None, **kwargs: Dict[str, Any]
) -> None:
super().__init__(*args, **kwargs) # type: ignore
self._process_group = process_group if process_group is not None else dist.group.WORLD
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
if not dist.is_initialized() or not self.training:
return super().forward(input)
dim = [d for d in range(input.ndim) if d != 1]
count = torch.full((1,), input.numel() // input.size(1), device=input.device, dtype=input.dtype)
total_count = count.clone()
handle = dist.all_reduce(total_count, group=self._process_group, async_op=True)
mean = torch.mean(input, dim=dim, keepdim=True)
meansqr = torch.mean(input * input, dim=dim, keepdim=True)
vec = torch.cat([mean, meansqr])
handle.wait()
vec = vec * (count / total_count)
mean, meansqr = differentiable_all_reduce(vec, group=self._process_group).chunk(2) # type: ignore
return _forward(
input,
self.affine,
self.track_running_stats,
mean,
meansqr,
self.momentum,
self.eps,
self.weight,
self.bias,
self.running_mean,
self.running_var,
total_count,
)
@classmethod
def convert_sync_batchnorm(
cls, module: torch.nn.Module, process_group: Optional[ProcessGroup] = None
) -> torch.nn.Module:
r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to
:class:`fairscale.experimental.nn.SyncBatchNorm` layers.
Args:
module (nn.Module): module containing one or more attr:`BatchNorm*D` layers
process_group (optional): process group to scope synchronization,
default is the whole world
Returns:
The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
instead.
Example::
>>> # Network with nn.BatchNorm layer
>>> module = torch.nn.Sequential(
>>> torch.nn.Linear(20, 100),
>>> torch.nn.BatchNorm1d(100),
>>> ).cuda()
>>> # creating process group (optional)
>>> # ranks is a list of int identifying rank ids.
>>> ranks = list(range(8))
>>> r1, r2 = ranks[:4], ranks[4:]
>>> # Note: every rank calls into new_group for every
>>> # process group created, even if that rank is not
>>> # part of the group.
>>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
>>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
>>> sync_bn_module = fairscale.experimental.nn.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
"""
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module_output = SyncBatchNorm(
module.num_features, # type: ignore
module.eps, # type: ignore
module.momentum, # type: ignore
module.affine, # type: ignore
module.track_running_stats, # type: ignore
process_group=process_group,
)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
if hasattr(module, "qconfig"):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(name, cls.convert_sync_batchnorm(child, process_group))
del module
return module_output
|
#! /usr/bin/python
"""Xmlgame: does xmlstuff
jsgame: does js stuff
App: gets clipboard and runs UI
Right now everythign depends on the UI being up and running. This won't always be the case, and isn't necessary.
"""
import Tkinter
import os
import re
from Tkconstants import *
import sys
import json
from xml.dom import minidom
def cb():
u = Tkinter.Tk()
v =u.clipboard_get()
u.destroy()
return v
class badXmlgame():
""" I want to make this inherit from minidom
It's not working yet"""
chars = { "2":"Thorpin", "1":"Featherwell", "3":"Ganelon", "4":"Pippen"}
def __init__(self,parsed_json):
tmp = parsed_json['json']
tmp = tmp['response']
tmp = tmp['properties']
tmp = tmp['value']
self.xml = minidom.parseString(tmp)
def show_size(self):
games = self.xml.getElementsByTagName("game")
for i in games:
charName = self.chars[i.parentNode.getAttribute("charId")]
length = len(i.toxml())
print "%s %d" % (charName, length)
def toxml(self):
return self.xml.toxml()
class Xmlgame():
"""holds the interpreted xml from the json"""
chars = { "2":"Thorpin", "1":"Featherwell", "3":"Ganelon", "4":"Pippen"}
def __init__(self,parsed_json):
tmp = parsed_json['json']
tmp = tmp['response']
tmp = tmp['properties']
tmp = tmp['value']
self.xml = minidom.parseString(tmp)
def getChar(self,name):
"""get a character given the character name.
Not case sensistive"""
mychars = self.xml.getElementsByTagName("data")
for i in mychars:
charnum = i.getAttribute("charId")
charname = Xmlgame.chars[charnum]
if re.match("(?i)%s" %(charname), name):
return i
raise NameError, "%s is not a valid name" % (name)
def show_size(self):
games = self.xml.getElementsByTagName("game")
for i in games:
charName = self.chars[i.parentNode.getAttribute("charId")]
length = len(i.toxml())
print "%s %d" % (charName, length)
def toxml(self):
return self.xml.toxml()
class App:
def __init__(self, master):
self.master = master # This is not legitimate use of "master"
cbstats = "Get Stats from the clipboard"
frame = Tkinter.Frame(master, relief = RIDGE, borderwidth=2)
frame.pack(fill = BOTH, expand=1)
label = Tkinter.Label(frame, text="hello world")
label.pack(fill=X, expand=1)
button = Tkinter.Button( frame, text="exit", command=u.destroy)
button2 = Tkinter.Button( frame, text="hello", command=self.say_hi)
button.pack(side=BOTTOM)
button2.pack(side=BOTTOM)
button3 = Tkinter.Button( frame, text=cbstats, command=self.blah)
button3.pack(side=RIGHT)
def blah(self):
tmp = self.master.clipboard_get()
print "size is %d" % (len(tmp))
g = json.loads(tmp)
game = Xmlgame(g)
game.show_size()
# print game.toxml()
def say_hi(self):
print "hi there"
def savecb(filename):
stuff = cb()
if os.path.exists(filename):
raise NameError, "file exists"
with open(filename,'w') as f:
f.write(stuff)
def getcb():
stuff = cb()
js = json.loads(stuff)
return Xmlgame(js)
def getfile(fname):
with open(fname) as infile:
js = json.load(infile)
return Xmlgame(js)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "usage: %s <filename>"
u = Tkinter.Tk()
app = App(u)
u.mainloop()
exit(1)
with open(sys.argv[1]) as f:
tmp = f.read()
parsed_json = json.loads(tmp)
print "Total length %d" % ( len(tmp))
blah = Xmlgame(parsed_json)
blah.show_size()
|
import re
from .. import logger as log
from .base import BaseFilter
class RegexParser(BaseFilter):
def __call__(self, event):
key = self.options["key"]
if key in event:
content = event[key]
pattern = re.compile(self.options["pattern"])
if pattern.search(content) is None:
log.error(self.options["pattern"])
log.error("正则表达式无法匹配: {}".format(content))
for item in pattern.finditer(content):
yield item.groupdict()
|
#!/usr/bin/env python
## category Conversion
## desc Converts to BAM format (unmapped)
'''
Convert FASTQ to BAM. This doesn't perform any mapping, it simply stores the
read sequences in BAM format as unmapped reads. If given two files, the reads
will be correctly flagged as pairs.
'''
import os
import sys
import itertools
import pysam
from ngsutils.fastq import FASTQ
def export_bam(outbam, read1, read2, quiet=False):
if read2:
def gen():
for r1, r2 in itertools.izip(read1.fetch(quiet=quiet), read2.fetch(quiet=True)):
yield (r1, r2)
else:
def gen():
for r1 in read1.fetch(quiet=quiet):
yield (r1, None)
for r1, r2 in gen():
record1 = pysam.AlignedRead()
record1.qname = r1.name
record1.seq = r1.seq
record1.qual = r1.qual
if r2:
record1.is_paired = True
record1.is_read1 = True
record2 = pysam.AlignedRead()
record2.qname = r1.name
record2.seq = r1.seq
record2.qual = r1.qual
record2.is_paired = True
record2.is_read2 = True
outbam.write(record1)
if r2:
outbam.write(record2)
def usage():
print """Usage: fastqutils tobam {opts} outfile.bam read1.fastq{.gz} {read2.fastq}
Note: If two FASTQ files are given, they are assumed to be paired end reads.
Options:
-f Force overwriting output file
"""
sys.exit(1)
if __name__ == '__main__':
outname = None
read1_fname = None
read2_fname = None
force = False
for arg in sys.argv[1:]:
if arg == '-f':
force = True
elif not outname:
if not force and os.path.exists(arg):
usage('Output file exists! (Use -f to force overwriting): %s' % arg)
outname = arg
elif not read1_fname and os.path.exists(arg):
read1_fname = arg
elif not read2_fname and os.path.exists(arg):
read2_fname = arg
if not outname or not read1_fname:
usage()
read1 = FASTQ(read1_fname)
read2 = FASTQ(read2_fname) if read2_fname else None
bam = pysam.Samfile(outname, 'wb')
export_bam(bam, read1, read2)
bam.close()
read1.close()
read2.close()
|
# -*- coding: utf-8 -*-
"""
The ``sensortoolkit.evaluation_objs`` subpackage contains modules defining
the ``sensortoolkit.SensorEvaluation`` and ``sensortoolkit.PerformanceReport``
evaluation objects.
================================================================================
@Author:
| Samuel Frederick, NSSC Contractor (ORAU)
| U.S. EPA / ORD / CEMM / AMCD / SFSB
Created:
Thu Oct 13 16:15:00 2021
Last Updated:
Thu Oct 13 16:15:00 2021
"""
from ._sensor_eval import SensorEvaluation
from ._performance_report import PerformanceReport
|
from pybind11 import get_cmake_dir
from pybind11.setup_helpers import Pybind11Extension, build_ext
from setuptools import Extension
from .utils import get_incs, get_srcs
# libi2c_module = Extension('pylibi2c', include_dirs=[
# 'ext_modules/libi2c/src'], sources=get_srcs('ext_modules/libi2c/src'))
ext_so = "./ext_modules/libmaix/components/libmaix/lib/arch/r329"
_maix_module = Extension('_maix', include_dirs=['ext_modules/_maix/include', 'ext_modules/libmaix/components/libmaix/include'],
sources=get_srcs('ext_modules/_maix'),
libraries=[
"jpeg"
],
)
# python3.8 -m pip install pybind11
# _maix_vivo_module = Pybind11Extension("_maix_vivo",
# include_dirs=[
# get_incs(
# 'ext_modules/libmaix/components/libmaix/include')
# ],
# sources=get_srcs(
# 'ext_modules/_maix_vivo'),
# libraries=[
# # "dl",
# # "rt",
# # "log",
# # "ion",
# "pthread",
# # "cdc_base",
# # "MemAdapter",
# # "media_utils",
# # "mpp_vi",
# # "mpp_isp",
# # "ISP",
# # "venc_base",
# # "mpp_component",
# # "adecoder",
# # "asound",
# # "venc_base",
# # "hwdisplay",
# # "maix_utils",
# # "maix_cam",
# # "maix_image",
# ],
# library_dirs=[ ext_so, ],
# extra_link_args=[-Wl,-rpath=/usr/lib/python3.8/site-packages/maix -DR329],
# # define_macros=[('V831Camera', None)],
# )
# python3.8 -m pip install pybind11
_maix_opencv_module = Pybind11Extension(
name = "_maix_opencv",
include_dirs=[get_incs('ext_modules/libmaix/components/libmaix/lib/arch/r329/include/opencv4/')],
sources=get_srcs('ext_modules/_maix_opencv'),
libraries=[
"opencv_aruco",
"opencv_dnn",
"opencv_hfs",
"opencv_optflow",
"opencv_shape",
"opencv_videoio",
"opencv_bgsegm",
"opencv_dpm",
"opencv_highgui",
"opencv_phase_unwrapping",
"opencv_stereo",
"opencv_video",
"opencv_bioinspired",
"opencv_face",
"opencv_imgcodecs",
"opencv_photo",
"opencv_stitching",
"opencv_videostab",
"opencv_calib3d",
"opencv_features2d",
"opencv_img_hash",
"opencv_plot",
"opencv_structured_light",
"opencv_ccalib",
"opencv_flann",
"opencv_imgproc",
"opencv_quality",
"opencv_superres",
"opencv_ximgproc",
"opencv_core",
"opencv_freetype",
"opencv_line_descriptor",
"opencv_reg",
"opencv_surface_matching",
"opencv_xobjdetect",
"opencv_datasets",
"opencv_fuzzy",
"opencv_ml",
"opencv_rgbd",
"opencv_text",
"opencv_xphoto",
"opencv_dnn_objdetect",
"opencv_objdetect",
"opencv_saliency",
"opencv_tracking"
],
library_dirs=["./ext_modules/libmaix/components/libmaix/lib/arch/r329/opencv4", ],
extra_link_args=["-Wl,-rpath=/usr/local/lib/python3.9/dist-packages/maix/_maix_opencv"],
extra_compile_args=['-std=c++11', '-std=gnu++11' ],
)
_maix_camera_module = Pybind11Extension(
name = '_maix_camera',
include_dirs=['ext_modules/_maix_camera/include', 'ext_modules/libmaix/components/libmaix/include'],
sources=get_srcs('ext_modules/_maix_camera'),
libraries=[
# "dl",
# "rt",
# "log",
# "ion",
"pthread",
# "cdc_base",
# "MemAdapter",
# "media_utils",
# "mpp_vi",
# "mpp_isp",
# "ISP",
# "venc_base",
# "mpp_component",
# "adecoder",
# "asound",
# "venc_base",
# "hwdisplay",
# "maix_utils",
"maix_cam",
# "maix_image",
],
library_dirs=["/lib", "/usr/lib", ext_so, ],
# extra_link_args = [ "-Wl,-z,origin", "-Wl,-rpath='$ORIGIN/maix'" ]
extra_compile_args=['-DR329Camera', '-std=c++11', '-std=gnu++11' ],
extra_link_args=["-Wl,-rpath=/usr/local/lib/python3.9/dist-packages/maix"]
)
_maix_display_module = Pybind11Extension(
name = "_maix_display",
include_dirs=['ext_modules/_maix_display/include', 'ext_modules/libmaix/components/libmaix/include'],
sources=get_srcs('ext_modules/_maix_display'),
libraries=[
# "dl",
# "rt",
# "log",
# "ion",
"pthread",
# "cdc_base",
# "maix_utils",
"maix_disp",
# "maix_image",
],
library_dirs=["/lib", "/usr/lib", ext_so, ],
extra_compile_args=['-DR329Display', '-std=c++11', '-std=gnu++11' ],
extra_link_args=["-Wl,-rpath=/usr/local/lib/python3.9/dist-packages/maix"]
)
# max_nn_srcs = get_srcs('ext_modules/_maix_nn/src')
# max_nn_srcs.extend(get_srcs('ext_modules/libmaix/components/libmaix/src'))
# max_nn_srcs.remove("ext_modules/libmaix/components/libmaix/src/libmaix.c")
# _maix_nn_module = Extension('_maix_nn', include_dirs=['ext_modules/_maix_nn/include', 'ext_modules/libmaix/components/libmaix/include'],
# sources=max_nn_srcs,
# libraries=[
# "maix_utils", "maix_nn",
# ],
# library_dirs=["/lib", "/usr/lib", ext_so, ],
# # extra_link_args = [ "-Wl,-z,origin", "-Wl,-rpath='$ORIGIN/maix'" ]
# extra_link_args=[-Wl,-rpath=/usr/lib/python3.8/site-packages/maix -DR329]
# )
_maix_modules = [
# libi2c_module,
_maix_module,
# _maix_vivo_module,
_maix_opencv_module,
_maix_camera_module,
_maix_display_module,
# _maix_nn_module
]
_maix_data_files = [
('/maix', get_srcs(ext_so, ['so'])),
('/maix/_maix_opencv/', get_srcs("ext_modules/libmaix/components/libmaix/lib/arch/r329/opencv4", ['so'])), # depend system provide
]
_maix_py_modules = [
"Pillow",
"rpyc",
"gpiod",
"evdev",
"spidev",
"pyserial"
"zbarlight",
]
|
from .rest_api_base import *
# First Alert Methods appear in API 3.2
class AlertMethods32():
def __init__(self, rest_api_base: TableauRestApiBase32):
self.rest_api_base = rest_api_base
def __getattr__(self, attr):
return getattr(self.rest_api_base, attr)
def query_data_driven_alerts(self) -> ET.Element:
self.start_log_block()
alerts = self.query_resource("dataAlerts")
self.end_log_block()
return alerts
def query_data_driven_alerts_for_view(self, view_luid: str) -> ET.Element:
self.start_log_block()
alerts = self.query_resource("dataAlerts?filter=viewId:eq:{}".format(view_luid))
self.end_log_block()
return alerts
def query_data_driven_alert_details(self, data_alert_luid: str) -> ET.Element:
self.start_log_block()
alert_details = self.query_resource("dataAlerts/{}".format(data_alert_luid))
self.end_log_block()
return alert_details
def delete_data_driven_alert(self, data_alert_luid: str):
self.start_log_block()
url = self.build_api_url("dataAlerts/{}".format(data_alert_luid))
self.send_delete_request(url)
self.end_log_block()
def add_user_to_data_driven_alert(self, data_alert_luid: str, username_or_luid: str):
self.start_log_block()
user_luid = self.query_user_luid(username_or_luid)
tsr = ET.Element("tsRequest")
u = ET.Element("user")
u.set("id", user_luid)
tsr.append(u)
url = self.build_api_url('dataAlerts/{}/users'.format(data_alert_luid))
self.send_add_request(url, tsr)
self.end_log_block()
def update_data_driven_alert(self, data_alert_luid: str, subject: Optional[str] = None,
frequency: Optional[str] = None,
owner_username_or_luid: Optional[str] = None) -> ET.Element:
self.start_log_block()
tsr = ET.Element("tsRequest")
d = ET.Element("dataAlert")
if subject is not None:
d.set("subject", subject)
if frequency is not None:
frequency = frequency.lower()
allowed_frequency = ('once', 'frequently', 'hourly', 'daily', 'weekly')
if frequency not in allowed_frequency:
raise InvalidOptionException('frequency must be once, frequently, hourly, daily or weekly')
d.set('frequency', frequency)
if owner_username_or_luid is not None:
owner_luid = self.query_user_luid(owner_username_or_luid)
o = ET.Element('owner')
o.set("id", owner_luid)
d.append(o)
tsr.append(d)
url = self.build_api_url("dataAlerts/{}".format(data_alert_luid))
response = self.send_update_request(url, tsr)
self.end_log_block()
return response
def delete_user_from_data_driven_alert(self, data_alert_luid: str, username_or_luid: str):
self.start_log_block()
user_luid = self.query_user_luid(username_or_luid)
url = self.build_api_url('dataAlerts/{}/users/{}'.format(data_alert_luid, user_luid))
self.send_delete_request(url)
self.end_log_block()
class AlertMethods33(AlertMethods32):
def __init__(self, rest_api_base: TableauRestApiBase33):
self.rest_api_base = rest_api_base
class AlertMethods34(AlertMethods33):
def __init__(self, rest_api_base: TableauRestApiBase34):
self.rest_api_base = rest_api_base
class AlertMethods35(AlertMethods34):
def __init__(self, rest_api_base: TableauRestApiBase35):
self.rest_api_base = rest_api_base
class AlertMethods36(AlertMethods35):
def __init__(self, rest_api_base: TableauRestApiBase36):
self.rest_api_base = rest_api_base |
'''
Decentralized Parallel ICA (“dpICA”) : COINSTAC simulator
This script computes pICA using the INFOMAX criteria in decentralized environment.
Creator : Chan Panichvatana (cpanichvatana1@student.gsu.edu)
Reference: Parallel Independent Component Analysis (pICA): (Liu et al. 2009)
'''
from coinstac_computation import COINSTACPyNode, ComputationPhase, PhaseEndWithSuccess
import remote_utils
class PhaseGlobalCompute(ComputationPhase):
def _initialize(self):
self.cache['logs'] = ["==Global logging==" + "\n"]
def compute(self):
out = {}
self.cache['logs'].append('==Global Calling dpica starting.==' + "\n")
data = remote_utils.calling_dpica(self, self.input_args, self.state)
self.cache['logs'].append('==Global Calling dpica completed.==' + "\n")
self.cache['logs'] = []
return out
class PhaseSendAggregatedResults(ComputationPhase):
def compute(self):
out = {}
self.cache['logs'].append('==End of COINSTAC simulator.==' + "\n")
remote_utils.log(self.cache, self.state, "output", "log_remote_output")
self.cache['results'] = []
return out
remote = COINSTACPyNode(mode='remote', debug=True)
remote.add_phase(PhaseGlobalCompute, multi_iterations=True)
remote.add_phase(PhaseSendAggregatedResults, local_only=True)
remote.add_phase(PhaseEndWithSuccess)
if __name__ == "__main__":
remote.to_stdout()
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import numpy as np
from sklearn.exceptions import UndefinedMetricWarning
from ... import execute
from ... import tensor as mt
from ..utils.validation import (
check_array,
check_consistent_length,
column_or_1d,
_num_samples,
)
def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"):
"""Check that y_true and y_pred belong to the same regression task.
Parameters
----------
y_true : array-like
y_pred : array-like
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'.
y_true : array-like of shape (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
dtype : str or list, default="numeric"
the dtype argument passed to check_array.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError(
"y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1])
)
n_outputs = y_true.shape[1]
allowed_multioutput_str = ("raw_values", "uniform_average", "variance_weighted")
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError(
"Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str, multioutput
)
)
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(
("There must be equally many custom weights (%d) as outputs (%d).")
% (len(multioutput), n_outputs)
)
y_type = "continuous" if n_outputs == 1 else "continuous-multioutput"
return y_type, y_true, y_pred, multioutput
def r2_score(
y_true,
y_pred,
*,
sample_weight=None,
multioutput="uniform_average",
session=None,
run_kwargs=None
):
""":math:`R^2` (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a :math:`R^2` score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average', 'variance_weighted'}, \
array-like of shape (n_outputs,) or None, default='uniform_average'
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniform_average".
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or tensor of floats
The :math:`R^2` score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, :math:`R^2` score may be negative (it need not
actually be the square of a quantity R).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from mars.learn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred)
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred,
... multioutput='variance_weighted')
0.938...
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 2, 3]
>>> r2_score(y_true, y_pred)
1.0
>>> y_true = [1, 2, 3]
>>> y_pred = [2, 2, 2]
>>> r2_score(y_true, y_pred)
0.0
>>> y_true = [1, 2, 3]
>>> y_pred = [3, 2, 1]
>>> r2_score(y_true, y_pred)
-3.0
"""
_, y_true, y_pred, multioutput = _check_reg_targets(y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
if _num_samples(y_pred) < 2:
msg = "R^2 score is not well-defined with less than two samples."
warnings.warn(msg, UndefinedMetricWarning)
return float("nan")
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.0
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0, dtype=np.float64)
denominator = (
weight * (y_true - mt.average(y_true, axis=0, weights=sample_weight)) ** 2
).sum(axis=0, dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = mt.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0
if isinstance(multioutput, str):
if multioutput == "raw_values":
# return scores individually
return output_scores
elif multioutput == "uniform_average":
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == "variance_weighted":
avg_weights = denominator
# avoid fail on constant y or one-element arrays
cond1 = mt.any(nonzero_denominator)
execute(
cond1, nonzero_denominator, session=session, **(run_kwargs or dict())
)
if not cond1.fetch():
if not mt.any(nonzero_numerator).to_numpy(
session=session, **(run_kwargs or dict())
):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return mt.average(output_scores, weights=avg_weights).execute(
session=session, **(run_kwargs or dict())
)
|
#!/usr/bin/python
"""
(C) Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from apricot import TestWithServers
from general_utils import pcmd, run_task
class SSDSocketTest(TestWithServers):
"""Test Class Description: Verify NVMe NUMA socket values.
This test covers the requirement SRS-10-0034.
dmg supports listing of available storage (NVDIMMs, SSD) and netwok adapters
and in all cases shows socket affinity
Call dmg storage scan --verbose to obtain NUMA socket value (Socket ID) of
each NVMe disk. Verify against the value in
/sys/class/pci_bus/<PCI Address Head>/device/numa_node
where PCI Address Head is the first two hex numbers separated by colon.
e.g., 0000:5e:00.0 -> PCI Address Head is 0000:5e
:avocado: recursive
"""
def debug_numa_node(self, pci_addr_heads):
"""Debug numa_node file by searching it in /sys and call hwloc-ls.
Args:
pci_addr_heads (list): List of PCI address head.
"""
for pci_addr_head in pci_addr_heads:
self.log.debug(
"----- Search PCI Addr Head %s in /sys -----", pci_addr_head)
task = run_task(
hosts=self.hostlist_servers,
command="find /sys -name \"{}\"".format(pci_addr_head))
for output, _ in task.iter_buffers():
self.log.debug(output)
# Another way to obtain the Socket ID is to use hwloc-ls --whole-io
# --verbose. It contains something like:
# Bridge Host->PCI L#9 (P#2 buses=0000:[80-81])
# Bridge PCI->PCI (P#524320 busid=0000:80:02.0 id=8086:2f04
# class=0604(PCI_B) buses=0000:[81-81])
# PCI 8086:2701 (P#528384 busid=0000:81:00.0 class=0108(NVMExp)
# PCISlot=801)
# In this case, the PCI address was 0000:81:00.0. We can figure out
# which NUMA node section these lines are in. This approach is clearly
# much more cumbersome than reading the numa_node, so it's called here
# for mainly debugging purpose.
self.log.debug("----- Show PCI Address in hwloc-ls -----")
pcmd(
hosts=self.hostlist_servers,
command="hwloc-ls --whole-io --verbose")
def test_scan_ssd(self):
"""
JIRA ID: DAOS-3584
Test Description: Verify NVMe NUMA socket values.
:avocado: tags=all,small,full_regression,hw,control,ssd_socket
"""
# Call dmg storage scan --verbose and get the PCI addresses.
data = self.get_dmg_command().storage_scan(verbose=True)
pci_addrs = data[self.hostlist_servers[0]]["nvme"].keys()
self.log.info("Testing PCI addresses: %s", pci_addrs)
pci_addr_heads = []
errors = []
# For every PCI address, verify its Socket ID against its NUMA socket
# ID.
for pci_addr in pci_addrs:
# Get the PCI Address Head and construct the path to numa_node.
cmd_socket_id = data[self.hostlist_servers[0]]["nvme"][pci_addr]\
["socket"]
pci_addr_values = pci_addr.split(":")
pci_addr_head = "{}:{}".format(
pci_addr_values[0], pci_addr_values[1])
pci_addr_heads.append(pci_addr_head)
numa_node_path = "/sys/class/pci_bus/{}/device/numa_node".format(
pci_addr_head)
# Call cat on the server host, not necessarily the local test host.
task = run_task(
hosts=[self.hostlist_servers[0]],
command="cat {}".format(numa_node_path))
# Obtain the numa_node content.
fs_socket_id = ""
for output, _ in task.iter_buffers():
fs_socket_id = str(output).splitlines()[-1]
# Test that the content is expected.
if fs_socket_id != cmd_socket_id:
errors.append(
"Unexpected socket ID! Cmd: {}; FS: {}".format(
cmd_socket_id, fs_socket_id))
if errors:
# Since we're dealing with system files and we don't have access to
# them in CI, we need some debugging info when the test fails to
# better understand the result.
self.debug_numa_node(pci_addr_heads)
self.fail("Error found!\n{}".format("\n".join(errors)))
|
import sys
import subprocess
import numpy as np
from tqdm import tqdm
argline = sys.argv[1:]
if not argline:
print('Usage:')
print(' $ python 6_random_check.py <command_to_run_your_solution>')
print('Example:')
print(' $ python 6_random_check.py python 6_my_solution.py')
exit()
def check(failure, msg, inp, out):
if failure:
print()
print('Input:', inp, sep='\n')
print('Output:', out, sep='\n')
print(msg)
exit()
def is_singular(mat):
return not np.allclose(np.linalg.det(mat) % 2, 1)
def mat2str(mat):
return '\n'.join(' '.join(str(x) for x in row) for row in mat)
print('Checking the program on random matrices...')
for i in tqdm(range(150)):
size = 2 if i < 5 else (3 if i < 15 else (4 if i < 60 else (5 if i < 100 else (6 if i < 140 else 7))))
while True:
mat_in = np.round(np.random.random((size, size))).astype('b')
if not is_singular(mat_in):
break
inp = mat2str(mat_in)
proc = subprocess.run(argline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=inp, encoding='ascii')
(out, err) = proc.stdout.strip(), proc.stderr
if err != '':
print()
print('Input:', inp, sep='\n')
print('Error:', err, sep='\n')
exit()
if out == '':
print()
print('Input:', inp, sep='\n')
print('Output is empty!')
exit()
check(len(out.split('\n')) != 3 * size, 'Incorrect size of output, must be ' + str(3 * size) + ' lines', inp, out)
check(sum(sum(x == '' for x in row.split(' ')) for row in out.split('\n')) != 0, 'Incorrect spacing in output', inp, out)
check(sum(sum(x != '0' and x != '1' for x in row.split(' ')) for row in out.split('\n')) != 0, 'Unexpected tokens in output, only 0 and 1 are allowed', inp, out)
data = [[int(x) for x in row.split(' ')] for row in out.split('\n')]
l, u, p = np.array(data[:size]), np.array(data[size:2*size]), np.array(data[2*size:])
check(is_singular(l), 'L is singular', inp, out)
check(is_singular(u), 'U is singular', inp, out)
check(is_singular(p), 'P is singular', inp, out)
check(not np.allclose(l, np.tril(l)), 'L is not a lower triangle matrix', inp, out)
check(not np.allclose(u, np.triu(u)), 'U is not an upper triangle matrix', inp, out)
check(np.any(p.sum(axis=0) - 1) or np.any(p.sum(axis=1) - 1), 'P is not a permutation matrix', inp, out)
got = (l @ u @ p) & 1
if not np.allclose(got, mat_in):
print()
print('Product of LUP is not A, decomposition is not correct')
print('Input:', inp, sep='\n')
print('Product of LUP:')
np.savetxt(sys.stdout, got, fmt='%d')
print('Output:', out, sep='\n')
exit()
print()
print('Seems to be correct!') |
#
# Generated with StressJointSegmentBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.moao import MOAOBlueprint
class StressJointSegmentBlueprint(MOAOBlueprint):
""""""
def __init__(self, name="StressJointSegment", package_path="sima/riflex", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(Attribute("length","number","Length of the segment.",default=0.0))
self.attributes.append(Attribute("numElements","integer","Number of elements",default=10))
self.attributes.append(Attribute("extDiameterEnd2","number","External diameter at second end of actual segment.",default=0.0))
self.attributes.append(Attribute("wallThicknessEnd2","number","Wall thickness at second end.",default=0.0))
self.attributes.append(Attribute("elasticModulus","number","Elastic modulus.",default=0.0))
self.attributes.append(Attribute("materialDensity","number","Density of pipe material.",default=0.0)) |
# Standard libraries
import logging
import os
import json
import datetime
import math
import copy
# External libraries
import pandas as pd
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.diagnostic import het_breuschpagan
from statsmodels.stats.diagnostic import het_white
import matplotlib.pyplot as plt
# Internal libraries
from generate_profile.constants import LOG_FORMAT, DATE_FORMAT
class LinearModel:
""" A class to develop a linear regression model to predict
transformer power profile bases on exogenous parameters mainly
weatherdata.
"""
def __init__(self,
dt_dataframe,
customer_dataframe,
weather_dataframe,
date_dataframe,
):
""" Constructor """
# setup logger
self.logger = logging.getLogger()
logging.basicConfig(format=LOG_FORMAT,level='DEBUG')
self.logger.info('Reading data .......')
# generate date_dataframe
self.data = {
'weatherdata': weather_dataframe,
'dtprofile': dt_dataframe,
'consumerenergydata' : customer_dataframe,
'datedata': date_dataframe
}
self.logger.info('Imported data successfully.')
def create_dataframe(self,dt_name, start_date, end_date):
self.dt_name = dt_name
self.logger.info(f"start_date: {start_date}, end date: {end_date}")
if start_date != '' and end_date != '':
self.timelist = [date for date in self.data['dtprofile'].index \
if date>=start_date and date <= end_date]
self.energy_proportion = self.generate_energy_proportion_dataframe(self.dt_name)
self.dataformodel = pd.concat([
self.data['weatherdata'].loc[self.timelist],
self.data['datedata'].loc[self.timelist],
self.energy_proportion
],axis=1,sort=False)
self.dataformodel['TransformerPower'] = self.data['dtprofile'][self.dt_name] \
.loc[self.timelist]
#dataset_name = self.config['file_name'].split('.')[0] + '_dataframe.csv'
#self.dataformodel.to_csv(os.path.join(self.config['export_folder'],dataset_name))
#self.normalizedtprofile()
self.dataformodel['TransformerPower'] = [el if el>0 else 0.01 \
for el in self.dataformodel['TransformerPower']]
self.logger.info(f'Created dataframe successfully : {self.dt_name}')
def normalizedtprofile(self):
trans_power = self.dataformodel['TransformerPower'].tolist()
self.dataformodel['TransformerPower'] = [x/max(trans_power) for x in trans_power]
self.logger.info('Transformer power is normalized')
def get_dataformodel(self):
""" returns data for model """
return self.dataformodel
def summary_totext(self):
""" returns a text file of model summary"""
textfile = open('texts.txt','w')
textfile.write(self.result.summary().as_text())
textfile.close()
def lm_model(self,group_name, model):
# Normalize transformer power profile
self.group_name = group_name
# develop a statistical model
self.dataformodel = self.dataformodel.fillna(0)
self.model = smf.ols(model,data=self.dataformodel)
self.logger.info(f'Model developed --> "{model}"')
# fit and predict model
self.result = self.model.fit()
#self.check_heteroskedasticity()
#self.generate_qqplot()
self.summary_totext()
self.trans_prediction = self.result.predict(self.dataformodel)
self.trans_prediction = [np.exp(el) for el in self.trans_prediction]
# predict for group
self.copydata = copy.deepcopy(self.dataformodel)
temp_dict = {'Domestic':0,'Commercial':0,'Industrial':0}
temp_dict[self.group_name] = 1
for key,value in temp_dict.items():
self.copydata[key] = [value]*len(self.copydata)
self.prediction = self.result.predict(self.copydata)
self.prediction = [np.exp(x) for x in self.prediction]
monthly_total_energy_dict = {}
for tpower, date in zip(self.trans_prediction,self.dataformodel.index):
key_name = f'{date.year}-{date.month}'
if key_name not in monthly_total_energy_dict:
monthly_total_energy_dict[key_name] = 0
if not math.isnan(tpower):
monthly_total_energy_dict[key_name] += tpower
monthly_energy_group_dict = {}
for date, power in zip(self.dataformodel.index, self.prediction):
key_name = f'{date.year}-{date.month}'
if key_name not in monthly_energy_group_dict:
monthly_energy_group_dict[key_name] = 0
if not math.isnan(power):
monthly_energy_group_dict[key_name] += power
contribution_coefficient_dict = {}
for date in self.dataformodel.index:
key_name = f'{date.year}-{date.month}'
contribution_coefficient_dict[key_name] = self.dataformodel[self.group_name][date]
for key, value in monthly_energy_group_dict.items():
monthly_energy_group_dict[key] = monthly_total_energy_dict[key] \
*contribution_coefficient_dict[key]/value
self.prediction_mod = []
for power, date in zip(self.prediction,self.dataformodel.index):
key_name = f'{date.year}-{date.month}'
self.prediction_mod.append(power*monthly_energy_group_dict[key_name])
self.logger.info(f'Model used for predictiong "{group_name}" group')
def get_group_prediction(self):
return self.prediction_mod
def get_transformer_prediction(self):
return self.trans_prediction
# "np.log(TransformerPower) ~ C(Hhindex)*Domestic*C(Month)\
# + Temperature*Domestic + Humidity*Domestic \
# + C(Month) + C(Hhindex)*Hday*Domestic"
def get_dommodel(self):
return "np.log(TransformerPower) ~ C(Hhindex)*Domestic*C(Month)\
+ Temperature*Domestic*C(Month) + Humidity*Domestic*C(Month) \
+ C(Hhindex)*C(Hday)*C(Month)"
def get_ndommodel(self):
return "np.log(TransformerPower) ~ C(Hhindex)*Commercial*C(Month)\
+ Temperature*Commercial*C(Month) + Humidity*Commercial*C(Month) \
+ C(Hhindex)*C(Hday)*C(Month)"
def get_indmodel(self):
return "np.log(TransformerPower) ~ C(Hhindex)*Industrial*C(Month)\
+ Temperature*Industrial*C(Month) + Humidity*Industrial*C(Month) \
+ C(Month)*C(Hhindex)*C(Hday)"
def generate_qqplot(self):
sm.qqplot(self.result.resid,line="45")
plt.show()
def check_heteroskedasticity(self):
white_test = het_white(self.result.resid, [self.dataformodel['Temperature']])
#bp_test = het_breuschpagan(self.result.resid, self.result.model.exog)
labels = ['LM Statistic', 'LM-Test p-value', 'F-Statistic’', 'F-Test p-value']
#self.logger.info(dict(zip(labels, bp_test)))
self.logger.info(dict(zip(labels, white_test)))
def execute_all_lm(self):
model_dict = {
'Domestic': self.get_dommodel(),
'Commercial':self.get_ndommodel(),
'Industrial': self.get_indmodel()
}
self.prediction_result = {}
for group in self.class_keys:
self.lm_model(group,model_dict[group])
self.prediction_result[group] = self.get_group_prediction()
self.prediction_result['TransformerPrediction'] = self.get_transformer_prediction()
self.logger.info('finished executing lms')
def export_all(self):
df = pd.DataFrame({'TransformerOriginal':self.dataformodel['TransformerPower']})
for group, result in self.prediction_result.items():
df[group] = result
df.index = self.dataformodel.index
#df.to_csv(os.path.join(self.config['export_folder'],self.config['file_name']))
#self.logger.info(f'exported all prediction for transformer {self.config["dt_name"]}')
def generate_energy_proportion_dataframe(self, dt_name):
""" For a specified dt, generates a time-series dataframe with proportion of energy
consumption for three classes : domestic, non-domestic and industrial
"""
# Extract dataframe for a dt
energy_data_grouped = self.data['consumerenergydata'].groupby('Transformer Name')
energydata = energy_data_grouped.get_group(dt_name)
# Extract dataframe
group_byclass = energydata.groupby('Customer Type')
class_dict = {group: [] for group in group_byclass.groups}
for key in class_dict.keys():
energydata_byclass = group_byclass.get_group(key)
for date in self.timelist:
col_name = str(date.month) + '/1/' + str(date.year)
if col_name in energydata_byclass:
class_dict[key].append(sum(energydata_byclass[col_name].tolist()))
else:
class_dict[key].append(1)
mapper = {'domestic':'Domestic','commercial':'Commercial','industrial':'Industrial'}
df = pd.DataFrame()
for id, key in enumerate(class_dict):
temp_arr = []
for x in zip(*[v for k,v in class_dict.items()]):
x_mod = [xs if xs>0 else 0 for xs in x]
contribution_pu = x_mod[id]/sum(x_mod) if sum(x_mod) !=0 else 0
temp_arr.append(contribution_pu)
temp_arr = [el if el !=0 else 0.01 for el in temp_arr]
df[mapper[key]] = temp_arr
df.index = self.timelist
# fill with zeros if a class is not present
for keys, values in mapper.items():
if values not in df.columns:
df[values] = [0]*len(df)
self.class_keys = [mapper[key] for key in class_dict.keys()]
self.logger.info(f"Developed energy proportion dataframe for transformer {dt_name}")
return df
if __name__ == '__main__':
pass
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) ARIADNEXT.
# Distributed under the terms of the Modified BSD License.
"""Annotator widget and associated models."""
from __future__ import annotations
import asyncio
from typing import Callable, Dict as TpDict, NoReturn, Union
from uuid import uuid4
from ipywidgets import CallbackDispatcher, DOMWidget, Widget, widget_serialization
from ipywidgets.widgets.widget_media import _Media
from traitlets import (
Bool,
CUnicode,
Dict,
Enum,
HasTraits,
Instance,
List,
Set,
Unicode,
)
from ._frontend import module_name, module_version
class Author(Widget):
"""Annotation author"""
_model_name = Unicode("AuthorModel").tag(sync=True)
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
id = CUnicode(default_value="", help="Annotation author ID.").tag(sync=True)
displayName = CUnicode(
default_value="", help="Annotation author display name."
).tag(sync=True)
class Annotator(_Media):
"""Annotation tool.
The image is stored as `value` attribute. It accepts a byte string.
The byte string is the raw image data that you want the browser to annotate.
You can explicitly define the format of the byte string using the `format` trait
(which defaults to "png").
If you pass `"url"` to the `"format"` trait, `value` will be interpreted
as a URL as bytes encoded in UTF-8.
"""
_model_name = Unicode("AnnotoriusModel").tag(sync=True)
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
_view_name = Unicode("AnnotoriusView").tag(sync=True)
_view_module = Unicode(module_name).tag(sync=True)
_view_module_version = Unicode(module_version).tag(sync=True)
format = Unicode("png", help="The format of the image.").tag(sync=True)
width = CUnicode(
help="""Width of the image in pixels.
Use layout.width for styling the widget."""
).tag(sync=True)
height = CUnicode(
help="""Height of the image in pixels.
Use layout.height for styling the widget."""
).tag(sync=True)
annotations = List(
Dict,
help="""List of annotations on the image.
This list must not be changed directly. Use `append_annotation`, `update_annotation`
and `remove_annotation`.""",
default_value=[],
read_only=True,
)
author = Instance(Author, allow_none=True).tag(sync=True, **widget_serialization)
default_tags = Set(
Unicode,
default_value=set(),
help="Set of default tags to propose for annotations.",
).tag(sync=True)
drawingTool = Enum(
["rect", "polygon"],
default_value="rect",
help="Drawing tool. Available values are `rect` and `polygon`.",
).tag(sync=True)
headless = Bool(
default_value=False, help="Whether to disable the editor popup or not."
).tag(sync=True)
# FIXME
# readOnly = Bool(
# default_value=False, help="Whether to display the annotations as read-only."
# ).tag(sync=True)
template = List(
trait=Dict,
default_value=[],
help="""Annotation content to add on new annotation shape.
The structure is a list of dictionaries containing keys: type, value and purpose.
Example: [
{"type": "TextualBody", "value": "My comment", "purpose": "commenting"},
{"type": "TextualBody", "value": "my-tag", "purpose": "tagging"}
]
""",
).tag(sync=True)
_create_annotation_callbacks = Instance(CallbackDispatcher, args=())
_delete_annotation_callbacks = Instance(CallbackDispatcher, args=())
_select_annotation_callbacks = Instance(CallbackDispatcher, args=())
_update_annotation_callbacks = Instance(CallbackDispatcher, args=())
@classmethod
def from_file(cls, filename: str, **kwargs) -> "Annotator":
"""Create a annotation widget from a image filename."""
return cls._from_file("image", filename, **kwargs)
def __init__(self, *args, **kwargs) -> NoReturn:
super().__init__(*args, **kwargs)
self.__image_snippets: TpDict[str, asyncio.Future] = {}
self.on_msg(self._handle_frontend_event)
def __repr__(self) -> str:
return self._get_repr(Annotator)
def append_annotation(self, annotation: dict) -> NoReturn:
"""Add an annotation."""
if "id" not in annotation:
annotation["id"] = f"#{uuid4()!s}"
self.update_annotation(annotation)
def get_annotation_image(self, annotation: Union[dict, str, None] = None) -> asyncio.Future:
"""Extract the annotation image snippet.
Args:
annotation: The annotation to extract; default is to extract the currently
selected one.
Returns:
A Future resolving in a byte image when the annotation snippet is available.
"""
uid = str(uuid4())
loop = asyncio.get_running_loop()
self.__image_snippets[uid] = future = loop.create_future()
self.send({"action": "image_snippet", "annotation": annotation, "uid": uid})
return future
def update_annotation(self, annotation: dict) -> NoReturn:
"""Update an annotation."""
indexes = [a["id"] for a in self.annotations]
try:
index = indexes.index(annotation.get("id", ""))
except ValueError:
self.annotations.append(annotation)
else:
self.annotations[index] = annotation
self.send({"action": "update", "annotation": annotation})
def remove_annotation(self, annotation: Union[dict, str]) -> NoReturn:
"""Remove an annotation given the annotation description or id."""
if isinstance(annotation, str):
annotation = list(filter(lambda a: a["id"] == annotation, self.annotations))
if len(annotation) == 0:
raise ValueError(f"Annotation '{annotation}' not in list.")
else:
annotation = annotation[0]
self.annotations.remove(annotation)
self.send({"action": "delete", "annotation": annotation})
def on_create_annotation(
self, callback: Callable[[dict], NoReturn], remove: bool = False
) -> NoReturn:
"""Add a callback on create annotation event."""
self._create_annotation_callbacks.register_callback(callback, remove=remove)
def on_delete_annotation(
self, callback: Callable[[dict], NoReturn], remove: bool = False
) -> NoReturn:
"""Add a callback on delete annotation event."""
self._delete_annotation_callbacks.register_callback(callback, remove=remove)
def on_select_annotation(
self, callback: Callable[[dict], NoReturn], remove: bool = False
) -> NoReturn:
"""Add a callback on select annotation event."""
self._select_annotation_callbacks.register_callback(callback, remove=remove)
def on_update_annotation(
self, callback: Callable[[dict, dict], NoReturn], remove: bool = False
) -> NoReturn:
"""Add a callback on update annotation event.
Args:
callback: Callback function will received the new and the previous annotations (in that order)
remove: Whether to remove or add the callback?
"""
self._update_annotation_callbacks.register_callback(callback, remove=remove)
def _handle_frontend_event(
self, _: "Widget", content: dict, buffers: list
) -> NoReturn:
"""Handle custom frontend events"""
event = content.get("event")
args = content.get("args", {})
if event is None:
return
if event == "onModelIsReady":
for annotation in self.annotations:
self.append_annotation(annotation)
elif event == "onCreateAnnotation":
self.append_annotation(
args["annotation"]
) # Propagate annotation addition to all views
self._create_annotation_callbacks(**args)
elif event == "onDeleteAnnotation":
self.remove_annotation(args["annotation"])
self._delete_annotation_callbacks(**args)
elif event == "onSelectAnnotation":
self._select_annotation_callbacks(**args)
elif event == "onUpdateAnnotation":
self.update_annotation(
args["annotation"]
) # Propagate annotation addition to all views
self._update_annotation_callbacks(**args)
elif event == "imageSnippet":
uid = content["uid"]
future = self.__image_snippets.pop(uid)
if future.cancelled():
return
if buffers:
future.set_result(bytes(buffers[0]))
else:
future.set_result(bytes())
|
# -*- encoding: utf-8 -*-
"""
Created by Ênio Viana at 22/09/2021 at 23:03:27
Project: py_dss_tools [set, 2021]
"""
from .PCElement import PCElement
class IndMach012(PCElement):
name = "IndMach012"
name_plural = "IndMach012s"
columns = ['basefreq', 'bus1', 'conn', 'd', 'daily', 'debugtrace', 'duty', 'enabled', 'h', 'kv', 'kva', 'kw',
'like', 'maxslip', 'pf', 'phases', 'purr', 'purs', 'puxm', 'puxr', 'puxs', 'slip', 'slipoption',
'spectrum', 'yearly']
def __init__(self):
super().__init__()
self.__conn = None
self.__d = None
self.__daily = None
self.__debugtrace = None
self.__duty = None
self.__h = None
self.__kv = None
self.__kva = None
self.__kw = None
self.__maxslip = None
self.__pf = None
self.__purr = None
self.__purs = None
self.__puxm = None
self.__puxr = None
self.__puxs = None
self.__slip = None
self.__slipoption = None
self.__yearly = None
@property
def conn(self):
return self.__conn
@conn.setter
def conn(self, value):
self.__conn = value
@property
def d(self):
return self.__d
@d.setter
def d(self, value):
self.__d = value
@property
def daily(self):
return self.__daily
@daily.setter
def daily(self, value):
self.__daily = value
@property
def debugtrace(self):
return self.__debugtrace
@debugtrace.setter
def debugtrace(self, value):
self.__debugtrace = value
@property
def duty(self):
return self.__duty
@duty.setter
def duty(self, value):
self.__duty = value
@property
def h(self):
return self.__h
@h.setter
def h(self, value):
self.__h = value
@property
def kv(self):
return self.__kv
@kv.setter
def kv(self, value):
self.__kv = value
@property
def kva(self):
return self.__kva
@kva.setter
def kva(self, value):
self.__kva = value
@property
def kw(self):
return self.__kw
@kw.setter
def kw(self, value):
self.__kw = value
@property
def maxslip(self):
return self.__maxslip
@maxslip.setter
def maxslip(self, value):
self.__maxslip = value
@property
def pf(self):
return self.__pf
@pf.setter
def pf(self, value):
self.__pf = value
@property
def purr(self):
return self.__purr
@purr.setter
def purr(self, value):
self.__purr = value
@property
def purs(self):
return self.__purs
@purs.setter
def purs(self, value):
self.__purs = value
@property
def puxm(self):
return self.__puxm
@puxm.setter
def puxm(self, value):
self.__puxm = value
@property
def puxr(self):
return self.__puxr
@puxr.setter
def puxr(self, value):
self.__puxr = value
@property
def puxs(self):
return self.__puxs
@puxs.setter
def puxs(self, value):
self.__puxs = value
@property
def slip(self):
return self.__slip
@slip.setter
def slip(self, value):
self.__slip = value
@property
def slipoption(self):
return self.__slipoption
@slipoption.setter
def slipoption(self, value):
self.__slipoption = value
@property
def yearly(self):
return self.__yearly
@yearly.setter
def yearly(self, value):
self.__yearly = value
|
from typing import Optional
from returns.maybe import Maybe, Nothing, Some
def test_bind_some():
"""Ensures that bind works correctly."""
def factory(inner_value: int) -> Maybe[int]:
return Some(inner_value * 2)
input_value = 5
bound = Some(input_value).bind(factory)
assert bound == factory(input_value)
assert str(bound) == '<Some: 10>'
def test_bind_optional():
"""Ensures that bind_optional works correctly."""
def factory(inner_value: int) -> Optional[int]:
return inner_value if inner_value else None
assert Some(1).bind_optional(factory) == Some(1)
assert Some(0).bind_optional(factory) == Nothing
assert Nothing.bind_optional(factory) == Nothing
|
import csv
import folium
from folium.plugins import MarkerCluster
from bike_service_proxy import BikeServiceProxy
PATH_TO_LOCATIONS = 'locations.csv'
GDANSK_CENTER_POSITION = [54.346320, 18.649246]
BATTERY_LOW_PERCENT = 20
BATTERY_MEDIUM_PERCENT = 50
def get_available_bikes_number(station_row):
return int(station_row['DOSTĘPNE ROWERY'])
def get_coordinates(station_row):
coordinates_str = station_row['WSPÓŁRZĘDNE']
coordinates = coordinates_str.split(', ')
latitude = float(coordinates[0])
longitude = float(coordinates[1])
return [latitude, longitude]
def get_available_bikes_ids(station_row):
available_bikes_ids_str = station_row['NUMERY DOSTĘPNYCH ROWERÓW']
return available_bikes_ids_str.split(',')
def get_battery_level_info(battery_level):
if battery_level is None:
return 'Nieznana wartość'
return f'{battery_level}%'
def get_marker_color(battery_level):
if battery_level is None:
return 'gray'
if battery_level > BATTERY_MEDIUM_PERCENT:
return 'green'
if battery_level > BATTERY_LOW_PERCENT:
return 'orange'
return 'red'
def prepare_bike_marker(coordinates, bike_id, battery_info, marker_color):
bike_info = f'ID: {bike_id} Bateria: {battery_info}'
marker_icon = folium.Icon(icon='bicycle', prefix='fa', color=marker_color)
return folium.Marker(location=coordinates, popup=bike_info, icon=marker_icon)
def generate_map():
bike_service_proxy = BikeServiceProxy()
bikes_map = folium.Map(location=GDANSK_CENTER_POSITION, zoom_start=10)
markers_cluster = MarkerCluster()
with open(PATH_TO_LOCATIONS, mode='r') as locations_file:
locations_reader = csv.DictReader(locations_file)
for station_row in locations_reader:
available_bikes = get_available_bikes_number(station_row)
if available_bikes > 0:
available_bikes_ids = get_available_bikes_ids(station_row)
coordinates = get_coordinates(station_row)
for bike_id in available_bikes_ids:
battery_level = bike_service_proxy.battery_info_for_bike(bike_id)
battery_info = get_battery_level_info(battery_level)
marker_color = get_marker_color(battery_level)
bike_marker = prepare_bike_marker(coordinates, bike_id, battery_info, marker_color)
markers_cluster.add_child(bike_marker)
bikes_map.add_child(markers_cluster)
bikes_map.save('bikes_map.html')
generate_map()
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.nn."""
import threading
from absl.testing import absltest
from flax import nn
import jax
from jax import random
from jax import test_util as jtu
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class DummyModule(nn.Module):
def apply(self, x):
bias = self.param('bias', x.shape, initializers.ones)
return x + bias
class NestedModule(nn.Module):
def apply(self, x):
x = DummyModule(x, name='dummy_0')
x = DummyModule(x, name='dummy_1')
return x
class NestedModel(nn.Module):
def apply(self, x, model):
x = DummyModule(x, name='dummy_0')
x = model(x, name='inner_model')
return x
class DataDependentInitModule(nn.Module):
def apply(self, x):
bias = self.param('bias', x.shape, lambda rng, shape: x + 1.)
return x + bias
class CollectionModule(nn.Module):
def apply(self, x, activations=None):
bias = self.param('bias', x.shape, initializers.ones)
y = x + bias
if activations:
previous_activation = activations.retrieve()
activations.store(y)
return y, previous_activation
else:
return y, None
class LoopModule(nn.Module):
def apply(self, x, activations=None):
module = CollectionModule.shared(activations=activations, name='dummy')
for _ in range(2):
x, _ = module(x)
return x
class ModuleTest(absltest.TestCase):
def test_init_module(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
y, params = DummyModule.init(rng, x)
y2 = DummyModule.call(params, x)
self.assertEqual(y, y2)
self.assertEqual(y, jnp.array([2.]))
self.assertEqual(params, {'bias': jnp.array([1.])})
def test_init_by_shape_module(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
y, params = DummyModule.init_by_shape(rng, [(x.shape, x.dtype)])
y2 = DummyModule.call(params, x)
self.assertEqual(y.shape, y2.shape)
self.assertEqual(y2, jnp.array([2.]))
self.assertEqual(params, {'bias': jnp.array([1.])})
def test_model(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
_, params = DummyModule.init(rng, x)
model = nn.Model(DummyModule, params)
y = model(x)
self.assertEqual(y, jnp.array([2.]))
y2 = jax.jit(model)(x)
self.assertEqual(y2, jnp.array([2.]))
def test_shared_module(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
_, initial_params = LoopModule.init(rng, x)
model = nn.Model(LoopModule, initial_params)
y = model(x)
self.assertEqual(y, jnp.array([3.]))
self.assertEqual(model.params, {'dummy': {'bias': jnp.array([1.])}})
def test_name_collsion(self):
class FaultyModule(nn.Module):
def apply(self, x):
for _ in range(2):
DummyModule(x, name='dummy')
x = jnp.array([1.])
with self.assertRaises(ValueError):
FaultyModule.init(random.PRNGKey(0), x)
def test_sharing_name_collsion(self):
class FaultyModule(nn.Module):
def apply(self, x):
for _ in range(2):
module = DummyModule.shared(name='dummy')
module(x)
x = jnp.array([1.])
with self.assertRaises(ValueError):
FaultyModule.init(random.PRNGKey(0), x)
def test_sharing_name_on_apply(self):
class FaultyModule(nn.Module):
def apply(self, x):
module = DummyModule.shared(name='dummy')
for _ in range(2):
module(x, name='dummy2')
x = jnp.array([1.])
with self.assertRaises(ValueError):
FaultyModule.init(random.PRNGKey(0), x)
def test_shared_module_called_in_other_frame(self):
"""Test that shared modules only appear once in parameters.
Concretely, create a shared submodule, then pass it in to
a child module and apply it there. Test that the parameters
are only stored once, in the frame where the shared module
was created.
"""
class SubModule(nn.Module):
def apply(self):
self.param('params', (), initializers.zeros)
class UseSharedModule(nn.Module):
def apply(self, submodule):
submodule()
class TopLevel(nn.Module):
def apply(self):
submodule = SubModule.shared(name='shared')
submodule()
UseSharedModule(submodule, name='use_shared')
_, params = TopLevel.init(random.PRNGKey(0))
self.assertEqual({
'shared': {'params': jnp.zeros(())},
'use_shared': {},
}, params)
def test_module_decorator(self):
@nn.module
def MyModule(x): # pylint: disable=invalid-name
return DummyModule(x)
self.assertEqual(MyModule.__name__, 'MyModule')
self.assertTrue(issubclass(MyModule, nn.Module))
rng = random.PRNGKey(0)
x = jnp.array([1.])
y, params = MyModule.init(rng, x)
y2 = MyModule.call(params, x)
self.assertEqual(y, y2)
self.assertEqual(y, jnp.array([2.]))
def test_partial_application(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
dummy_module = DummyModule.partial(x=x) # partially apply the inputs
self.assertEqual(DummyModule.__name__, dummy_module.__name__)
self.assertEqual(DummyModule.__qualname__, dummy_module.__qualname__)
y, initial_params = dummy_module.init(rng)
model = nn.Model(dummy_module, initial_params)
y2 = model()
self.assertEqual(y.shape, y2.shape)
self.assertEqual(y2, jnp.array([2.]))
def test_nested_model(self):
x = jnp.array([1.])
_, inner_initial_params = DummyModule.init(random.PRNGKey(0), x)
inner_model = nn.Model(DummyModule, inner_initial_params)
_, initial_params = NestedModel.init(random.PRNGKey(1), x, inner_model)
model = nn.Model(NestedModel, initial_params)
y = model(x, inner_model)
self.assertEqual(y, jnp.array([3.]))
def test_capture_module_outputs(self):
x = jnp.array([1.])
_, initial_params = NestedModule.init(random.PRNGKey(0), x)
model = nn.Model(NestedModule, initial_params)
with nn.capture_module_outputs() as activations:
model(x)
expected_activations = {
'/': [x + 2],
'/dummy_0': [x + 1],
'/dummy_1': [x + 2],
}
self.assertEqual(activations.as_dict(), expected_activations)
def test_nested_model_capture_outputs(self):
x = jnp.array([1.])
_, inner_initial_params = DummyModule.init(random.PRNGKey(0), x)
inner_model = nn.Model(DummyModule, inner_initial_params)
_, initial_params = NestedModel.init(random.PRNGKey(1), x, inner_model)
model = nn.Model(NestedModel, initial_params)
with nn.capture_module_outputs() as activations:
model(x, inner_model)
expected_activations = {
'/': [x + 2],
'/dummy_0': [x + 1],
'/inner_model': [x + 2],
}
self.assertEqual(activations.as_dict(), expected_activations)
def test_truncated_module(self):
x = jnp.array([1.])
_, initial_params = NestedModule.init(random.PRNGKey(0), x)
model = nn.Model(NestedModule, initial_params)
model = model.truncate_at('/dummy_0')
y = model(x)
self.assertEqual(y, [x + 1])
def test_call_module_method(self):
class MultiMethod(nn.Module):
def apply(self, x):
return x + self.param('bias', x.shape, initializers.ones)
@nn.module_method
def l2(self):
return jnp.sum(self.get_param('bias') ** 2)
class MultiMethodModel(nn.Module):
def apply(self, x):
layer = MultiMethod.shared()
layer(x) # init
return layer.l2()
self.assertEqual(
MultiMethod.l2.__qualname__,
MultiMethod.__qualname__ + '.l2')
x = jnp.array([1., 2.])
_, params = MultiMethod.init(random.PRNGKey(0), x)
model = nn.Model(MultiMethod, params)
self.assertEqual(model.l2(), 2.)
y, _ = MultiMethodModel.init(random.PRNGKey(0), x)
self.assertEqual(y, 2.)
def test_module_state(self):
class StatefulModule(nn.Module):
def apply(self, x, coll=None):
state = self.state('state', x.shape, nn.initializers.zeros,
collection=coll)
state.value += x
x = jnp.array([1.,])
# no collection should raise an error
with self.assertRaises(ValueError):
StatefulModule.call({}, x)
# pass collection explicitly
with nn.Collection().mutate() as state:
self.assertEqual(state.as_dict(), {})
StatefulModule.init(random.PRNGKey(0), x, state)
self.assertEqual(state.as_dict(), {'/': {'state': x}})
self.assertEqual(state.as_dict(), {'/': {'state': x}})
with state.mutate() as new_state:
# assert new_state is a clone of state
self.assertEqual(new_state.as_dict(), state.as_dict())
StatefulModule.call({}, x, new_state)
self.assertEqual(new_state.as_dict(), {'/': {'state': x + x}})
# use stateful
with nn.stateful() as state:
self.assertEqual(state.as_dict(), {})
StatefulModule.init(random.PRNGKey(0), x)
self.assertEqual(state.as_dict(), {'/': {'state': x}})
with nn.stateful(state) as new_state:
# assert new_state is a clone of state
self.assertEqual(new_state.as_dict(), state.as_dict())
StatefulModule.call({}, x)
self.assertEqual(new_state.as_dict(), {'/': {'state': x + x}})
self.assertEqual(new_state.as_dict(), {'/': {'state': x + x}})
def test_parameter_rng(self):
@nn.module
def model(x):
return nn.Dense(x, features=2, name='dummy',
bias_init=nn.initializers.normal())
rng = random.PRNGKey(0)
_, params = model.init(rng, jnp.ones((1, 1)))
dense_rng = nn.base._fold_in_str(rng, 'dummy')
kernel_rng = nn.base._fold_in_str(dense_rng, 'kernel')
bias_rng = nn.base._fold_in_str(dense_rng, 'bias')
kernel = nn.linear.default_kernel_init(kernel_rng, (1, 2))
bias = nn.initializers.normal()(bias_rng, (2,))
np.testing.assert_allclose(kernel, params['dummy']['kernel'])
np.testing.assert_allclose(bias, params['dummy']['bias'])
class CollectionTest(absltest.TestCase):
def test_collection_store_and_retrieve(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
with nn.Collection().mutate() as activations:
(_, y), initial_params = CollectionModule.init(rng, x, activations)
model = nn.Model(CollectionModule, initial_params)
self.assertEqual(y, None)
with activations.mutate() as new_activations:
_, y2 = model(x, new_activations)
self.assertEqual(y2, jnp.array([2.]))
def test_collection_multiple_calls(self):
rng = random.PRNGKey(0)
with nn.Collection().mutate() as activations:
x = jnp.array([1.])
_, _ = LoopModule.init(rng, x, activations)
expected_state = {
'/dummy': jnp.array([3.]),
}
self.assertEqual(activations.state, expected_state)
def test_collection_multiple_roots(self):
rng = random.PRNGKey(0)
with nn.Collection().mutate() as activations:
x = jnp.array([1.])
LoopModule.init(rng, x, activations, name='a')
LoopModule.init(rng, x, activations, name='b')
expected_state = {
'/a/dummy': jnp.array([3.]),
'/b/dummy': jnp.array([3.]),
}
self.assertEqual(activations.state, expected_state)
with self.assertRaises(ValueError):
with nn.Collection().mutate() as activations:
x = jnp.array([1.])
LoopModule.init(rng, x, activations)
LoopModule.init(rng, x, activations)
def test_mutable_collection_cannot_be_passed_to_jax(self):
with nn.Collection().mutate() as collection:
def fn(col):
return col
with self.assertRaises(ValueError):
jax.jit(fn)(collection)
def test_collection_lookup(self):
state = {
'/dummy/sub': 1,
}
collection = nn.Collection(state=state)
root = nn.base._ModuleFrame(None)
frame = nn.base._ModuleFrame('dummy', parent=root)
with nn.base._module_stack.frame(root):
with nn.base._module_stack.frame(frame):
self.assertEqual(collection['/dummy/sub'], 1)
def test_collection_inside_module(self):
class NestedCollection(nn.Module):
def apply(self, x):
with nn.Collection().mutate() as activations:
LoopModule(x, activations, name='a')
LoopModule(x, activations, name='b')
return activations
rng = random.PRNGKey(0)
x = jnp.array([1.])
activations, _ = NestedCollection.init(rng, x, name='nested')
expected_state = {
'/a/dummy': jnp.array([3.]),
'/b/dummy': jnp.array([3.]),
}
self.assertEqual(activations.as_dict(), expected_state)
def test_collection_store_fails_if_not_in_module(self):
@nn.module
def test():
with nn.Collection().mutate() as coll:
coll.store(1)
pattern = 'State should be stored from within a module'
with self.assertRaisesRegex(ValueError, pattern):
test.init(random.PRNGKey(0))
def test_collection_store_fails_if_out_of_scope(self):
@nn.module
def stateful_module(coll):
coll.store(1)
@nn.module
def test_inner(f):
with nn.Collection().mutate() as coll:
# this should fail because f is a shared module defined
# in the parent. Therefore we cannot capture in the scope
# of this Module.
f(coll)
@nn.module
def test():
f = stateful_module.shared()
test_inner(f)
pattern = 'Trying to capture state outside the scope'
with self.assertRaisesRegex(ValueError, pattern):
test.init(random.PRNGKey(0))
# TODO(jheek): re-introduce this test when the tracer check is revived.
# def test_jax_transform_of_stateful_function(self):
# test = self
# class NestedTransform(nn.Module):
# def apply(self, state, y):
# def inner_fn(x):
# # constants should be storable
# state.store(1.)
# # values in the same trace should be storable
# state.store({'a': y})
# with test.assertRaises(ValueError):
# # values depending on the vmap should not be storable
# state.store({'a': y, 'b': x})
# jax.vmap(inner_fn)(jnp.ones((2,)))
# def outer_fn(x):
# with nn.Collection().mutate() as state:
# NestedTransform.init(random.PRNGKey(0), state, x)
# outer_fn(1.)
# jax.jit(outer_fn)(1.)
class UtilsTest(absltest.TestCase):
def test_call_stack_happy_path(self):
stack = nn.utils.CallStack()
self.assertFalse(stack)
with stack.frame({'id': 1}):
self.assertTrue(stack)
self.assertEqual(stack[-1], {'id': 1})
with stack.frame({'id': 2}):
self.assertEqual(list(stack), [{'id': 1}, {'id': 2}])
self.assertEqual(list(stack), [{'id': 1}])
def test_call_stack_multithreading(self):
stack = nn.utils.CallStack()
self.assertFalse(stack)
with stack.frame({'id': 1}):
self.assertEqual(stack[-1], {'id': 1})
def _main():
# Each thread should have its own stack.
self.assertFalse(stack)
with stack.frame({'id': 2}):
self.assertEqual(stack[-1], {'id': 2})
thread = threading.Thread(target=_main)
thread.start()
thread.join()
def test_call_stack_error_path(self):
stack = nn.utils.CallStack()
with stack.frame({'id': 1}):
with self.assertRaises(ValueError):
with stack.frame({'id': 2}):
raise ValueError('dummy')
self.assertEqual(list(stack), [{'id': 1}])
class PoolTest(absltest.TestCase):
def test_pool_custom_reduce(self):
x = jnp.full((1, 3, 3, 1), 2.)
mul_reduce = lambda x, y: x * y
y = nn.pooling.pool(x, 1., mul_reduce, (2, 2), (1, 1), 'VALID')
np.testing.assert_allclose(y, np.full((1, 2, 2, 1), 2. ** 4))
def test_avg_pool(self):
x = jnp.full((1, 3, 3, 1), 2.)
pool = lambda x: nn.avg_pool(x, (2, 2))
y = pool(x)
np.testing.assert_allclose(y, np.full((1, 2, 2, 1), 2.))
y_grad = jax.grad(lambda x: pool(x).sum())(x)
expected_grad = jnp.array([
[0.25, 0.5, 0.25],
[0.5, 1., 0.5],
[0.25, 0.5, 0.25],
]).reshape((1, 3, 3, 1))
np.testing.assert_allclose(y_grad, expected_grad)
def test_max_pool(self):
x = jnp.arange(9).reshape((1, 3, 3, 1)).astype(jnp.float32)
pool = lambda x: nn.max_pool(x, (2, 2))
expected_y = jnp.array([
[4., 5.],
[7., 8.],
]).reshape((1, 2, 2, 1))
y = pool(x)
np.testing.assert_allclose(y, expected_y)
y_grad = jax.grad(lambda x: pool(x).sum())(x)
expected_grad = jnp.array([
[0., 0., 0.],
[0., 1., 1.],
[0., 1., 1.],
]).reshape((1, 3, 3, 1))
np.testing.assert_allclose(y_grad, expected_grad)
def test_max_pool_explicit_pads(self):
x = jnp.arange(9).reshape((1, 3, 3, 1)).astype(jnp.float32)
pool = lambda x: nn.max_pool(x, (2, 2), padding=((1,1),(1,1)))
expected_y = jnp.array([
[0.,1.,2.,2.],
[3.,4.,5.,5.],
[6.,7.,8.,8.],
[6.,7.,8.,8.],
]).reshape((1, 4, 4, 1))
y = pool(x)
np.testing.assert_allclose(y, expected_y)
y_grad = jax.grad(lambda x: pool(x).sum())(x)
expected_grad = jnp.array([
[1., 1., 2.],
[1., 1., 2.],
[2., 2., 4.],
]).reshape((1, 3, 3, 1))
np.testing.assert_allclose(y_grad, expected_grad)
class NormalizationTest(absltest.TestCase):
def test_batch_norm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (4, 3, 2))
model_cls = nn.BatchNorm.partial(momentum=0.9)
with nn.stateful() as state_0:
y, initial_params = model_cls.init(key2, x)
model = nn.Model(model_cls, initial_params)
mean = y.mean((0, 1))
var = y.var((0, 1))
np.testing.assert_allclose(mean, np.array([0., 0.]), atol=1e-4)
np.testing.assert_allclose(var, np.array([1., 1.]), rtol=1e-4)
with nn.stateful(state_0) as state:
y = model(x)
ema = state['/']
np.testing.assert_allclose(
ema['mean'], 0.1 * x.mean((0, 1), keepdims=False), atol=1e-4)
np.testing.assert_allclose(
ema['var'], 0.9 + 0.1 * x.var((0, 1), keepdims=False), rtol=1e-4)
def test_layer_norm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
e = 1e-5
x = random.normal(key1, (2, 3, 4))
y, _ = nn.LayerNorm.init(key2, x, bias=False, scale=False, epsilon=e)
assert x.shape == y.shape
input_type = type(x)
assert isinstance(y, input_type)
y_one_liner = ((x - x.mean(axis=-1, keepdims=True)) *
jax.lax.rsqrt(x.var(axis=-1, keepdims=True) + e))
np.testing.assert_allclose(y_one_liner, y, atol=1e-4)
def test_group_norm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
e = 1e-5
x = random.normal(key1, (2, 5, 4, 4, 32))
y, _ = nn.GroupNorm.init(key2, x, num_groups=2,
bias=True, scale=True, epsilon=e)
self.assertEqual(x.shape, y.shape)
self.assertIsInstance(y, type(x))
x_gr = x.reshape([2, 5, 4, 4, 2, 16])
y_test = ((x_gr - x_gr.mean(axis=[1, 2, 3, 5], keepdims=True)) *
jax.lax.rsqrt(x_gr.var(axis=[1, 2, 3, 5], keepdims=True) + e))
y_test = y_test.reshape([2, 5, 4, 4, 32])
np.testing.assert_allclose(y_test, y, atol=1e-4)
# TODO(flax-dev): add integration tests for RNN cells
class RecurrentTest(absltest.TestCase):
def test_lstm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 3))
c0, h0 = nn.LSTMCell.initialize_carry(rng, (2,), 4)
self.assertEqual(c0.shape, (2, 4))
self.assertEqual(h0.shape, (2, 4))
(carry, y), initial_params = nn.LSTMCell.init(key2, (c0, h0), x)
lstm = nn.Model(nn.LSTMCell, initial_params)
self.assertEqual(carry[0].shape, (2, 4))
self.assertEqual(carry[1].shape, (2, 4))
np.testing.assert_allclose(y, carry[1])
param_shapes = jax.tree_map(np.shape, lstm.params)
self.assertEqual(param_shapes, {
'ii': {'kernel': (3, 4)},
'if': {'kernel': (3, 4)},
'ig': {'kernel': (3, 4)},
'io': {'kernel': (3, 4)},
'hi': {'kernel': (4, 4), 'bias': (4,)},
'hf': {'kernel': (4, 4), 'bias': (4,)},
'hg': {'kernel': (4, 4), 'bias': (4,)},
'ho': {'kernel': (4, 4), 'bias': (4,)},
})
def test_gru(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 3))
carry0 = nn.GRUCell.initialize_carry(rng, (2,), 4)
self.assertEqual(carry0.shape, (2, 4))
(carry, y), initial_params = nn.GRUCell.init(key2, carry0, x)
gru = nn.Model(nn.GRUCell, initial_params)
self.assertEqual(carry.shape, (2, 4))
np.testing.assert_allclose(y, carry)
param_shapes = jax.tree_map(np.shape, gru.params)
self.assertEqual(param_shapes, {
'ir': {'kernel': (3, 4), 'bias': (4,)},
'iz': {'kernel': (3, 4), 'bias': (4,)},
'in': {'kernel': (3, 4), 'bias': (4,)},
'hr': {'kernel': (4, 4)},
'hz': {'kernel': (4, 4)},
'hn': {'kernel': (4, 4), 'bias': (4,)},
})
def test_conv2dlstm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 4, 4, 3))
c0, h0 = nn.ConvLSTM.initialize_carry(rng, (2,), (4, 4, 6))
self.assertEqual(c0.shape, (2, 4, 4, 6))
self.assertEqual(h0.shape, (2, 4, 4, 6))
(carry, y), initial_params = nn.ConvLSTM.init(
key2, (c0, h0), x, features=6, kernel_size=(3, 3))
lstm = nn.Model(nn.ConvLSTM, initial_params)
self.assertEqual(carry[0].shape, (2, 4, 4, 6))
self.assertEqual(carry[1].shape, (2, 4, 4, 6))
np.testing.assert_allclose(y, carry[1])
param_shapes = jax.tree_map(np.shape, lstm.params)
self.assertEqual(param_shapes, {
'hh': {'bias': (6*4,), 'kernel': (3, 3, 6, 6*4)},
'ih': {'bias': (6*4,), 'kernel': (3, 3, 3, 6*4)},
})
def test_optimized_lstm_cell_matches_regular(self):
# Create regular LSTMCell.
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 3))
c0, h0 = nn.LSTMCell.initialize_carry(rng, (2,), 4)
self.assertEqual(c0.shape, (2, 4))
self.assertEqual(h0.shape, (2, 4))
(carry, y), initial_params = nn.LSTMCell.init(key2, (c0, h0), x)
lstm = nn.Model(nn.LSTMCell, initial_params)
# Create OptimizedLSTMCell.
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 3))
c0, h0 = nn.OptimizedLSTMCell.initialize_carry(rng, (2,), 4)
self.assertEqual(c0.shape, (2, 4))
self.assertEqual(h0.shape, (2, 4))
(carry, y_opt), initial_params = nn.OptimizedLSTMCell.partial(
name='LSTMCell').init(key2, (c0, h0), x)
lstm_opt = nn.Model(nn.OptimizedLSTMCell.partial(name='LSTMCell'),
initial_params)
np.testing.assert_allclose(y, y_opt, rtol=1e-6)
jtu.check_eq(lstm.params, lstm_opt.params)
class StochasticTest(absltest.TestCase):
def test_make_rng_requires_stochastic(self):
with self.assertRaises(ValueError):
nn.make_rng()
def test_stochastic_rngs(self):
rng = random.PRNGKey(0)
with nn.stochastic(rng):
r1 = nn.make_rng()
r2 = nn.make_rng()
self.assertTrue(np.all(r1 == random.fold_in(rng, 1)))
self.assertTrue(np.all(r2 == random.fold_in(rng, 2)))
# TODO(jheek): re-introduce this test when the tracer check is revived.
# def test_make_rng_in_jax_transform_check(self):
# with nn.stochastic(random.PRNGKey(0)):
# with self.assertRaises(ValueError):
# jax.jit(nn.make_rng)()
def test_init_by_shape_lifts_stochastic(self):
class StochasticModule(nn.Module):
def apply(self):
return nn.make_rng()
with nn.stochastic(random.PRNGKey(0)):
rng, _ = StochasticModule.init_by_shape(random.PRNGKey(1), [])
expected_rng = random.fold_in(random.PRNGKey(0), 1)
expected_rng = random.fold_in(expected_rng, 1)
self.assertTrue(np.all(rng == expected_rng))
if __name__ == '__main__':
absltest.main()
|
import requests
import urllib.parse
from bs4 import BeautifulSoup
from geopy.geocoders import Nominatim
def parseCoord (coord):
start = 0
for i in range(0, len(coord)):
if coord[i].isnumeric () or coord[i] == '-' :
start = i
break
coordAsStr = coord[start: - 1].split (", ")
coords = (float (coordAsStr[0]), float (coordAsStr[1]))
return coords
def parseAddr (address):
A = address
B = A.split (',')
finale = str ()
for i in B:
finale = finale + i.strip () + ", "
return finale[:-3]
def getAddress (url):
response = requests.get (url)
soup = BeautifulSoup(response.text, 'lxml')
address = str ()
highFives = soup.find_all ('h5', attrs={'class':'card-title'})
for i in highFives:
if i.text == 'Ubicación':
address = i.next_sibling.text
break
address = parseAddr (address)
try:
coord = soup.find('img', attrs={'class':'img-static-map'})['onclick']
coord = parseCoord (coord)
return (address, coord)
except:
try:
print (address, ": No coordinates found, will attempt to find coordinates through address...")
geolocator = Nominatim(user_agent="trobify")
location = geolocator.geocode(address)
coord = (float (location.latitude), float (location.longitude))
print (address, ": Coordinates found")
return (address, coord)
except:
print (address, ": Couldn't find coordinates, entry will be ignored")
return None
addr = getAddress ("https://century21mexico.com/propiedad/402980_casa-en-venta-en-bosque-de-echegaray-naucalpan-estado-de-mexico-mexico")
print (addr)
print (addr[1][0]) |
#!/usr/bin/env python
# Copyright (c) 2017 Facebook, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
import ctypes as ct
import unittest
from bcc import BPF
from netaddr import IPAddress
class KeyV4(ct.Structure):
_fields_ = [("prefixlen", ct.c_uint),
("data", ct.c_ubyte * 4)]
class KeyV6(ct.Structure):
_fields_ = [("prefixlen", ct.c_uint),
("data", ct.c_ushort * 8)]
class TestLpmTrie(unittest.TestCase):
def test_lpm_trie_v4(self):
test_prog1 = """
BPF_F_TABLE("lpm_trie", u64, int, trie, 16, BPF_F_NO_PREALLOC);
"""
b = BPF(text=test_prog1)
t = b["trie"]
k1 = KeyV4(24, (192, 168, 0, 0))
v1 = ct.c_int(24)
t[k1] = v1
k2 = KeyV4(28, (192, 168, 0, 0))
v2 = ct.c_int(28)
t[k2] = v2
k = KeyV4(32, (192, 168, 0, 15))
self.assertEqual(t[k].value, 28)
k = KeyV4(32, (192, 168, 0, 127))
self.assertEqual(t[k].value, 24)
with self.assertRaises(KeyError):
k = KeyV4(32, (172, 16, 1, 127))
v = t[k]
def test_lpm_trie_v6(self):
test_prog1 = """
struct key_v6 {
u32 prefixlen;
u32 data[4];
};
BPF_F_TABLE("lpm_trie", struct key_v6, int, trie, 16, BPF_F_NO_PREALLOC);
"""
b = BPF(text=test_prog1)
t = b["trie"]
k1 = KeyV6(64, IPAddress('2a00:1450:4001:814:200e::').words)
v1 = ct.c_int(64)
t[k1] = v1
k2 = KeyV6(96, IPAddress('2a00:1450:4001:814::200e').words)
v2 = ct.c_int(96)
t[k2] = v2
k = KeyV6(128, IPAddress('2a00:1450:4001:814::1024').words)
self.assertEqual(t[k].value, 96)
k = KeyV6(128, IPAddress('2a00:1450:4001:814:2046::').words)
self.assertEqual(t[k].value, 64)
with self.assertRaises(KeyError):
k = KeyV6(128, IPAddress('2a00:ffff::').words)
v = t[k]
if __name__ == "__main__":
unittest.main()
|
class ActivityBase:
def __init__(self, activityId, x, y, h):
self.activityId = activityId
self.x = x
self.y = y
self.h = h
def __str__(self):
string = '<ActivityBase activityId=%d, ' % self.activityId
string += 'x=%d, y=%d, h=%d>' % (self.x, self.y, self.h)
return string
def __repr__(self):
return self.__str__()
|
#!/usr/bin/env python3
## Simple talker demo that published std_msgs/Strings messages
## to the 'chatter' topic
import rospy
from std_msgs.msg import String
def talker():
rospy.init_node('talker', anonymous=True)
pub = rospy.Publisher('chatter', String, queue_size=10)
r = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
str = "hello world %s"%rospy.get_time()
rospy.loginfo(str)
pub.publish(str)
r.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException: pass
|
# -*- coding: utf-8 -*-
"""
Paraxial optical calculations
"""
|
from typing import List, Optional, Union
import pandas as pd
from sklearn.utils.validation import check_is_fitted
from feature_engine.creation.base_creation import BaseCreation
from feature_engine.docstrings import (
Substitution,
_drop_original_docstring,
_feature_names_in_docstring,
_fit_not_learn_docstring,
_fit_transform_docstring,
_missing_values_docstring,
_n_features_in_docstring,
_variables_numerical_docstring,
)
from feature_engine.variable_manipulation import _find_or_check_numerical_variables
_PERMITTED_FUNCTIONS = [
"add",
"sub",
"mul",
"div",
"truediv",
"floordiv",
"mod",
"pow",
]
@Substitution(
variables=_variables_numerical_docstring,
missing_values=_missing_values_docstring,
drop_original=_drop_original_docstring,
feature_names_in_=_feature_names_in_docstring,
n_features_in_=_n_features_in_docstring,
fit=_fit_not_learn_docstring,
transform=BaseCreation._transform_docstring,
fit_transform=_fit_transform_docstring,
)
class RelativeFeatures(BaseCreation):
"""
RelativeFeatures() applies basic mathematical operations between a group
of variables and one or more reference features. It adds the resulting features
to the dataframe.
In other words, RelativeFeatures() adds, subtracts, multiplies, performs the
division, true division, floor division, module or exponentiation of a group of
features to / by a group of reference variables. The features resulting from these
functions are added to the dataframe.
This transformer works only with numerical variables. It uses the pandas methods
`pd.DataFrme.add`, `pd.DataFrme.sub`, `pd.DataFrme.mul`, `pd.DataFrme.div`,
`pd.DataFrme.truediv`, `pd.DataFrme.floordiv`, `pd.DataFrme.mod` and
`pd.DataFrme.pow`.
Find out more in `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.add.html>`_.
More details in the :ref:`User Guide <relative_features>`.
Parameters
----------
variables: list
The list of numerical variables to combine with the reference variables.
reference: list
The list of reference variables that will be added, subtracted, multiplied,
used as denominator for division and module, or exponent for the exponentiation.
func: list
The list of functions to be used in the transformation. The list can contain
one or more of the following strings: 'add', 'mul','sub', 'div', truediv,
'floordiv', 'mod', 'pow'.
{missing_values}
{drop_original}
Attributes
----------
{feature_names_in_}
{n_features_in_}
Methods
-------
{fit}
{fit_transform}
{transform}
Notes
-----
Although the transformer allows us to combine any feature with any function, we
recommend its use to create domain knowledge variables. Typical examples within the
financial sector are:
- Ratio between income and debt to create the debt_to_income_ratio.
- Subtraction of rent from income to obtain the disposable_income.
"""
def __init__(
self,
variables: List[Union[str, int]],
reference: List[Union[str, int]],
func: List[str],
missing_values: str = "ignore",
drop_original: bool = False,
) -> None:
if (
not isinstance(variables, list)
or not all(isinstance(var, (int, str)) for var in variables)
or len(set(variables)) != len(variables)
):
raise ValueError(
"variables must be a list of strings or integers. "
f"Got {variables} instead."
)
if (
not isinstance(reference, list)
or not all(isinstance(var, (int, str)) for var in reference)
or len(set(reference)) != len(reference)
):
raise ValueError(
"reference must be a list of strings or integers. "
f"Got {reference} instead."
)
if (
not isinstance(func, list)
or any(fun not in _PERMITTED_FUNCTIONS for fun in func)
or len(set(func)) != len(func)
):
raise ValueError(
"At least one of the entered functions is not supported or you entered "
"duplicated functions. "
"Supported functions are {}. ".format(", ".join(_PERMITTED_FUNCTIONS))
)
super().__init__(missing_values, drop_original)
self.variables = variables
self.reference = reference
self.func = func
def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):
"""
This transformer does not learn any parameter.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The training input samples. Can be the entire dataframe, not just the
variables to transform.
y: pandas Series, or np.array. Default=None.
It is not needed in this transformer. You can pass y or None.
"""
# Common checks and attributes
X = super().fit(X, y)
# check variables are numerical
self.reference = _find_or_check_numerical_variables(X, self.reference)
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Add new features.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The data to transform.
Returns
-------
X_new: Pandas dataframe
The input dataframe plus the new variables.
"""
X = super().transform(X)
methods_dict = {
"add": self._add,
"mul": self._mul,
"sub": self._sub,
"div": self._div,
"truediv": self._truediv,
"floordiv": self._floordiv,
"mod": self._mod,
"pow": self._pow,
}
for func in self.func:
methods_dict[func](X)
if self.drop_original:
X.drop(
columns=set(self.variables + self.reference),
inplace=True,
)
return X
def _sub(self, X):
for reference in self.reference:
varname = [f"{var}_sub_{reference}" for var in self.variables]
X[varname] = X[self.variables].sub(X[reference], axis=0)
return X
def _div(self, X):
for reference in self.reference:
if (X[reference] == 0).any():
raise ValueError(
"Some of the reference variables contain 0 as values. Check and "
"remove those before using this transformer."
)
varname = [f"{var}_div_{reference}" for var in self.variables]
X[varname] = X[self.variables].div(X[reference], axis=0)
return X
def _add(self, X):
for reference in self.reference:
varname = [f"{var}_add_{reference}" for var in self.variables]
X[varname] = X[self.variables].add(X[reference], axis=0)
return X
def _mul(self, X):
for reference in self.reference:
varname = [f"{var}_mul_{reference}" for var in self.variables]
X[varname] = X[self.variables].mul(X[reference], axis=0)
return X
def _truediv(self, X):
for reference in self.reference:
if (X[reference] == 0).any():
raise ValueError(
"Some of the reference variables contain 0 as values. Check and "
"remove those before using this transformer."
)
varname = [f"{var}_truediv_{reference}" for var in self.variables]
X[varname] = X[self.variables].truediv(X[reference], axis=0)
return X
def _floordiv(self, X):
for reference in self.reference:
if (X[reference] == 0).any():
raise ValueError(
"Some of the reference variables contain 0 as values. Check and "
"remove those before using this transformer."
)
varname = [f"{var}_floordiv_{reference}" for var in self.variables]
X[varname] = X[self.variables].floordiv(X[reference], axis=0)
return X
def _mod(self, X):
for reference in self.reference:
if (X[reference] == 0).any():
raise ValueError(
"Some of the reference variables contain 0 as values. Check and "
"remove those before using this transformer."
)
varname = [f"{var}_mod_{reference}" for var in self.variables]
X[varname] = X[self.variables].mod(X[reference], axis=0)
return X
def _pow(self, X):
for reference in self.reference:
varname = [f"{var}_pow_{reference}" for var in self.variables]
X[varname] = X[self.variables].pow(X[reference], axis=0)
return X
def get_feature_names_out(self, all: bool = False) -> List:
"""Get output feature names for transformation.
Parameters
----------
all: bool, default=False
Whether to return all or only the new features. If False, returns the names
of the new features. If True, returns the names of all features in the
transformed dataframe.
Returns
-------
feature_names_out: list
The feature names.
"""
check_is_fitted(self)
# Names of new features
feature_names = [
f"{var}_{fun}_{reference}"
for fun in self.func
for reference in self.reference
for var in self.variables
]
if all is True:
if self.drop_original is True:
# Remove names of variables to drop.
original = [
f
for f in self.feature_names_in_
if f not in self.variables + self.reference
]
feature_names = original + feature_names
else:
feature_names = self.feature_names_in_ + feature_names
return feature_names
|
from wurst.geo import geomatcher
from . import DATA_DIR
REGION_MAPPING_FILEPATH = DATA_DIR / "regionmappingH12.csv"
def get_IAM_geomatcher():
"""
Geographical boundaries for IMAGE regions are initally included in geomatcher.
However, they are not properly labelled.
"""
d_image_regions = {
"BRA": "Brazil",
"CAN": "Canada",
"CEU": "Central Europe",
"CHN": "China Region",
"EAF": "Eastern Africa",
"INDIA": "India",
"INDO": "Indonesia Region",
"JAP": "Japan",
"KOR": "Korea Region",
"ME": "Middle east",
"MEX": "Mexico",
"NAF": "Northern Africa",
"OCE": "Oceania",
"RCAM": "Central America",
"RSAF": "Rest of Southern Africa",
"RSAM": "Rest of South America",
"RSAS": "Rest of South Asia",
"RUS": "Russia Region",
"SAF": "South Africa",
"SEAS": "South Asia",
"STAN": "Central Asia",
"TUR": "Turkey",
"UKR": "Ukraine region",
"USA": "USA",
"WAF": "Western Africa",
"WEU": "Western Europe",
}
d_map = {("IMAGE", v): ("IMAGE", k) for k, v in d_image_regions.items()}
new_def = dict()
for k, v in geomatcher.items():
if isinstance(k, tuple):
if k[0] == "IMAGE" and k[1] in list(d_image_regions.values()):
new_def[d_map[k]] = v
geo = geomatcher
for k in list(geomatcher.keys()):
if k[0] == "IMAGE" and k[1] in list(d_image_regions.values()):
geomatcher.pop(k)
geo.update(new_def)
with open(REGION_MAPPING_FILEPATH) as f:
f.readline()
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
l = [(x[1], x[2]) for x in csv_list]
# List of countries not found
countries_not_found = ["CC", "CX", "GG", "JE", "BL"]
rmnd_to_iso = {}
iso_to_rmnd = {}
# Build a dictionary that maps region names (used by REMIND) to ISO country codes
# And a reverse dictionary that maps ISO country codes to region names
for ISO, region in l:
if ISO not in countries_not_found:
try:
rmnd_to_iso[region].append(ISO)
except KeyError:
rmnd_to_iso[region] = [ISO]
iso_to_rmnd[region] = ISO
geo.add_definitions(rmnd_to_iso, "REMIND")
return geo
class Geomap:
"""
Map ecoinvent locations to IAM regions and vice-versa.
"""
def __init__(self):
self.geo = get_IAM_geomatcher()
def iam_to_ecoinvent_location(self, location, contained=False):
"""
Find the corresponding ecoinvent region given an IAM region.
:param location: name of a IAM region
:type location: str
:return: name of an ecoinvent region
:rtype: str
"""
if location == "World":
return ["GLO"]
ecoinvent_locations = []
searchfunc = self.geo.contained if contained else self.geo.intersects
for iam in ("REMIND", "IMAGE"):
loc = (iam, location)
try:
searchfunc(loc)
for r in searchfunc(loc):
if not isinstance(r, tuple):
ecoinvent_locations.append(r)
except KeyError:
pass
if len(ecoinvent_locations) == 0:
print("Can't find location {} using the geomatcher.".format(location))
return ecoinvent_locations
|
"""API response model for PyTautulli Api.."""
from __future__ import annotations
from enum import Enum
from typing import Any
from .activity import PyTautulliApiActivity
from .base import APIResponseType, PyTautulliApiBaseModel
from .session import PyTautulliApiSession
from .user import PyTautulliApiUser
class APIResult(str, Enum):
"""ApiResult."""
SUCCESS = "success"
ERROR = "error"
class PyTautulliApiResponse(PyTautulliApiBaseModel):
"""API response model for PyTautulli Api."""
data: dict[str, Any] | list[
dict[str, Any]
] | PyTautulliApiActivity | PyTautulliApiSession | list[
PyTautulliApiUser
] | None = None
message: str | None = None
result: APIResult | None = None
def _generate_data(self, data: dict[str, Any] | list[dict[str, Any]]) -> None:
"""Generate data."""
if self._datatype is None:
return data
if self._datatype._responsetype == APIResponseType.LIST:
return [self._datatype(item, self._datatype) for item in data]
return self._datatype(data, self._datatype)
|
#!/usr/bin/env python
"""Download files from Cohesity backups using Python"""
# usage: ./downloadFile.py -v mycluster -u myusername -d mydomain.net -o myserver -f 'mypath/myfile' -p /Users/myusername/Downloads
from pyhesity import *
from urllib import quote_plus
import sys
import os
# command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True) # the Cohesity cluster to connect to
parser.add_argument('-u', '--username', type=str, required=True) # the Cohesity username to use
parser.add_argument('-d', '--domain', type=str, default='local') # the Cohesity domain to use
parser.add_argument('-o', '--objectname', type=str, required=True) # the protected object to search
parser.add_argument('-f', '--filesearch', type=str, required=True) # partial filename to search for
parser.add_argument('-p', '--destinationpath', type=str, required=True) # local path to download file to
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
objectname = args.objectname
filesearch = args.filesearch
destinationpath = args.destinationpath
# authenticate
apiauth(vip, username, domain)
# identify python version
if sys.version_info[0] < 3:
pyversion = 2
else:
pyversion = 3
# find entity
entities = api('get', '/entitiesOfType?environmentTypes=kVMware&environmentTypes=kPhysical&environmentTypes=kView&isProtected=true&physicalEntityTypes=kHost&viewEntityTypes=kView&vmwareEntityTypes=kVirtualMachine')
entity = [entity for entity in entities if entity['displayName'].lower() == objectname.lower()]
if len(entity) == 0:
print('Object %s not found' % objectname)
exit()
# find file results
encodedfilename = quote_plus(filesearch)
fileresults = api('get', '/searchfiles?entityIds=%s&filename=%s' % (entity[0]['id'], encodedfilename))
if fileresults['count'] > 10:
print('%s results found. Please narrow your search' % fileresults['count'])
exit()
else:
print('\nPlease select which file to recover or press CTRL-C to exit\n')
for idx, fileresult in enumerate(fileresults['files']):
print('%s %s' % (idx, fileresult['fileDocument']['filename']))
# prompt user to select file
if pyversion == 2:
selected = raw_input('\nSelection: ')
else:
selected = input('\nSelection: ')
if selected.isdigit() is False:
print('Invalid selection')
exit()
else:
selected = int(selected)
if selected >= len(fileresults['files']):
print('Invalid selection')
exit()
# gather details for download
selectedfile = fileresults['files'][selected]
clusterId = selectedfile['fileDocument']['objectId']['jobUid']['clusterId']
clusterIncarnationId = selectedfile['fileDocument']['objectId']['jobUid']['clusterIncarnationId']
jobId = selectedfile['fileDocument']['objectId']['jobUid']['objectId']
viewBoxId = selectedfile['fileDocument']['viewBoxId']
filePath = selectedfile['fileDocument']['filename']
encodedfilePath = quote_plus(filePath)
filename = os.path.split(filePath)[1]
outpath = os.path.join(destinationpath, filename)
# find versions
versions = api('get', '/file/versions?clusterId=%s&clusterIncarnationId=%s&entityId=%s&filename=%s&fromObjectSnapshotsOnly=false&jobId=%s' % (clusterId, clusterIncarnationId, entity[0]['id'], encodedfilePath, jobId))
print('\nPlease select a version of the file to recover\n')
for idx, version in enumerate(versions['versions']):
print('%s %s' % (idx, usecsToDate(version['instanceId']['jobStartTimeUsecs'])))
# prompt user to select version
if pyversion == 2:
selected = raw_input('\nSelection: ')
else:
selected = input('\nSelection: ')
if selected.isdigit() is False:
print('Invalid selection')
exit()
else:
selected = int(selected)
if selected >= len(versions['versions']):
print('Invalid selection')
exit()
# gather versioon info
version = versions['versions'][selected]
attemptNum = version['instanceId']['attemptNum']
jobInstanceId = version['instanceId']['jobInstanceId']
jobStartTimeUsecs = version['instanceId']['jobStartTimeUsecs']
# download the file
print('Downloading %s to %s' % (filename, destinationpath))
fileDownload('/downloadfiles?attemptNum=%s&clusterId=%s&clusterIncarnationId=%s&entityId=%s&filepath=%s&jobId=%s&jobInstanceId=%s&jobStartTimeUsecs=%s&viewBoxId=%s' % (attemptNum, clusterId, clusterIncarnationId, entity[0]['id'], encodedfilePath, jobId, jobInstanceId, jobStartTimeUsecs, viewBoxId), outpath)
|
# 现在单位成本价,现在数量
current_unit_cost = 78.1
current_amount = 1300
# 计算补仓后成本价
def calc_stock_new_cost(add_buy_amount,add_buy_unit_cost):
# 补仓买入成本
buy_stock_cost = add_buy_amount*add_buy_unit_cost
# 补仓后总投入股票成本 = 现数量 * 现成本单价 + 新数量 * 新成本单价
new_stock_cost = current_amount * current_unit_cost + buy_stock_cost
# 补仓后总股票数量 = 现数量 + 新数量
new_stock_amount = current_amount + add_buy_amount
# 补仓后新成本价 = 补仓后总投入股票成本 / 补仓后总股票数量
new_stock_unit_cost = new_stock_cost/new_stock_amount
# 补仓后新市值 = 新成本单价 * 总股票数量
new_stock_value = add_buy_unit_cost * new_stock_amount
# 补仓后跌幅 = (补仓后新市值-补仓后总投入股票成本)/补仓后总投入股票成本
value_diff_cost = new_stock_value-new_stock_cost
stock_rate = value_diff_cost/new_stock_cost*100
print("本次补仓买入成本: %.2f, 总买入成本: %.2f, 新成本单价: %.2f" % (buy_stock_cost,new_stock_cost, new_stock_unit_cost))
print("新市值: %.2f, 新涨跌幅: %.2f, 新盈亏额: %.2f " % (new_stock_value, stock_rate, value_diff_cost))
# 2021.07.28 预计算补仓后成本价
calc_stock_new_cost(2000,53.3) |
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.contrib.kythe.tasks.indexable_java_targets import IndexableJavaTargets
class IndexJava(NailgunTask):
_KYTHE_INDEXER_MAIN = 'com.google.devtools.kythe.analyzers.java.JavaIndexer'
cache_target_dirs = True
@classmethod
def implementation_version(cls):
# Bump this version to invalidate all past artifacts generated by this task.
return super(IndexJava, cls).implementation_version() + [('IndexJava', 6), ]
@classmethod
def product_types(cls):
return ['kythe_entries_files']
@classmethod
def prepare(cls, options, round_manager):
super(IndexJava, cls).prepare(options, round_manager)
round_manager.require_data('kindex_files')
@classmethod
def register_options(cls, register):
super(IndexJava, cls).register_options(register)
register('--force', type=bool, fingerprint=True,
help='Re-index all targets, even if they are valid.',
removal_version='1.6.0.dev0', removal_hint='Use --cache-ignore instead.')
cls.register_jvm_tool(register,
'kythe-indexer',
main=cls._KYTHE_INDEXER_MAIN)
def execute(self):
def entries_file(_vt):
return os.path.join(_vt.results_dir, 'index.entries')
indexable_targets = IndexableJavaTargets.get(self.context)
with self.invalidated(indexable_targets, invalidate_dependents=True) as invalidation_check:
kindex_files = self.context.products.get_data('kindex_files')
# TODO(John Sirois): `vts_to_index` should be inlined to `invalidation_check.invalid_vts`
# when the deprecation cycle for `--force` is completed.
vts_to_index = (invalidation_check.all_vts if self.get_options().force
else invalidation_check.invalid_vts)
indexer_cp = self.tool_classpath('kythe-indexer')
# Kythe jars embed a copy of Java 9's com.sun.tools.javac and javax.tools, for use on JDK8.
# We must put these jars on the bootclasspath, ahead of any others, to ensure that we load
# the Java 9 versions, and not the runtime's versions.
jvm_options = ['-Xbootclasspath/p:{}'.format(':'.join(indexer_cp))]
jvm_options.extend(self.get_options().jvm_options)
for vt in vts_to_index:
self.context.log.info('Kythe indexing {}'.format(vt.target.address.spec))
kindex_file = kindex_files.get(vt.target)
if not kindex_file:
raise TaskError('No .kindex file found for {}'.format(vt.target.address.spec))
args = [kindex_file, '--out', entries_file(vt)]
result = self.runjava(classpath=indexer_cp, main=self._KYTHE_INDEXER_MAIN,
jvm_options=jvm_options,
args=args, workunit_name='kythe-index',
workunit_labels=[WorkUnitLabel.COMPILER])
if result != 0:
raise TaskError('java {main} ... exited non-zero ({result})'.format(
main=self._KYTHE_INDEXER_MAIN, result=result))
for vt in invalidation_check.all_vts:
self.context.products.get_data('kythe_entries_files', dict)[vt.target] = entries_file(vt)
|
#! /usr/bin/python
# usage: python topics.py [beta-file] [vocab-file] [num words] [result-file]
#
# [beta-file] is output from the dln-c code
# [vocab-file] is a list of words, one per line
# [num words] is the number of words to print from each topic
# [result-file] is file to write top [num words] words
import sys
import numpy as np
def print_topics(beta_file, vocab_file, nwords, result_file):
# get the vocabulary
vocab = open(vocab_file, 'r').readlines()
# vocab = map(lambda x: x.split()[0], vocab)
vocab = list(map(lambda x: x.strip(), vocab))
# open file to write
fp_result = open(result_file, 'w')
fp_topics = open(beta_file, 'r')
topic_no = 0
for topic in fp_topics:
fp_result.write('topic %03d\n' % (topic_no))
topic = np.array(list(map(float, topic.split())))
indices = np.argsort(-topic)
for i in range(nwords):
fp_result.write (' %s \t\t %f\n' % (vocab[indices[i]], topic[indices[i]]))
topic_no = topic_no + 1
fp_result.write( '\n')
fp_result.close()
fp_topics.close()
if (__name__ == '__main__'):
if (len(sys.argv) != 5):
print('usage: python topics.py [beta-file] [vocab-file] [num words] [result-file]')
sys.exit(1)
beta_file = sys.argv[1]
vocab_file = sys.argv[2]
nwords = int(sys.argv[3])
result_file = sys.argv[4]
print_topics(beta_file, vocab_file, nwords, result_file)
|
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
import torch.nn.functional as F
class mCrossentropyND(nn.Module):
"""
Network has to have NO NONLINEARITY!
"""
def __init__(self, asy, gama, margin, weights):
super(mCrossentropyND, self).__init__()
self.asy = asy
self.gama = gama
self.margin = margin
self.weights = weights
def forward(self, inp, target):
target = target.long()
num_classes = inp.size()[1]
i0 = 1
i1 = 2
while i1 < len(inp.shape): # this is ugly but torch only allows to transpose two axes at once
inp = inp.transpose(i0, i1)
i0 += 1
i1 += 1
inp = inp.contiguous()
inp = inp.view(-1, num_classes)
target = target.view(-1,)
# now inp is [N,C], target is [N,]
y_one_hot = self.one_hot_embedding(target.data.cpu(), num_classes)
y_one_hot = y_one_hot.cuda()
y_one_hot[:,2] = y_one_hot[:,2] * self.weights
# p_y_given_x_train is corresponding [N,C]
################################### Symmetric/ asymmetric large margin loss ###################################
'''
I should get the input to softmax to get q, with the shape [N,C]
'''
if self.asy == 0:
# have margin on all classes.
r = [1, 1, 1]
if self.asy == 1:
# have margin only on foreground class
r = [0, 1, 1]
if self.asy == 2:
# have margin only on the tumor class
r = [0, 0, 1]
# extend r from [1,C] to [N,C]
r = torch.reshape(torch.tensor(r), [1, len(r)])
rRepeat = torch.cat(inp.shape[0] * [r])
# this is the input to softmax, which will give us q
inppost = inp - rRepeat.float().cuda() * y_one_hot * self.margin
#########################################################################################################
# do the softmax and get q
p_y_given_x_train = torch.softmax(inppost, 1)
e1 = 1e-6 ## without the small margin, it would lead to nan after several epochs
log_p_y_given_x_train = (p_y_given_x_train + e1).log()
################################### Symmetric/ asymmetric focal loss ###################################
if self.asy == 0:
# have focal reduction on all classes.
r = [0, 0, 0]
if self.asy == 1:
# have focal reduction only on 0 class
r = [0, 1, 1]
if self.asy == 2:
# have focal reduction only on 0 and 1 class
r = [0, 0, 1]
# extend r from [1,C] to [N,C]
r = torch.reshape(torch.tensor(r), [1, len(r)])
rRepeat = torch.cat(log_p_y_given_x_train.shape[0] * [r])
focal_conduct_active = (1 - p_y_given_x_train + e1) ** self.gama
focal_conduct_inactive = torch.ones(p_y_given_x_train.size())
focal_conduct = focal_conduct_active * (1-rRepeat.float().cuda()) + focal_conduct_inactive.cuda() * rRepeat.float().cuda()
m_log_p_y_given_x_train = focal_conduct * log_p_y_given_x_train
# I also need to pass the focal_conduct to the DSC loss, which is calculated outside
#########################################################################################################
num_samples = m_log_p_y_given_x_train.size()[0]
loss = - (1. / num_samples) * m_log_p_y_given_x_train * y_one_hot
# print(loss.sum())
return loss.sum(), inppost, focal_conduct
def one_hot_embedding(self, labels, num_classes):
'''Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N,#classes].
'''
y = torch.eye(num_classes) # [D,D]
return y[labels] # [N,D] |
import matplotlib.pyplot as plt
import pandas as pd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--log-file', '-l', default='reward.log', type=str,
help='reward log file name')
args = parser.parse_args()
df = pd.read_csv(args.log_file)
x = df.columns[0]
y = df.columns[1]
ax = df.plot(kind='scatter', x=x, y=y)
df[y] = pd.rolling_mean(df[y], window=20)
df.plot(kind='line', x=x, y=y, ax=ax)
plt.show()
|
###
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from builtins import str
import logging
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
from django.conf import settings
from time import sleep
from google_helpers.bigquery.cohort_support import BigQuerySupport
from cohorts.metadata_helpers import *
from visualizations.data_access_views_v2 import get_confirmed_project_ids_for_cohorts
from visualizations.feature_access_views_v2 import build_feature_ids
from bq_data_access.v2.plot_data_support import get_merged_feature_vectors
from bq_data_access.v2.data_access import FeatureVectorBigQueryBuilder
from google_helpers.bigquery.service_v2 import BigQueryServiceSupport
from cohorts.models import Project, Program
logger = logging.getLogger('main_logger')
def get_program_set_for_oncoprint(cohort_id_array):
return Program.objects.filter(name='TCGA',is_public=True,active=True)
def is_valid_genomic_build(genomic_build_param):
"""
Returns: True if given genomic build is valid, otherwise False.
"""
return genomic_build_param == "HG19" or genomic_build_param == "HG38"
@login_required
def oncoprint_view_data(request):
try:
gene_list_str = request.GET.get('gene_list', None)
gene_array = gene_list_str.split(',')
genomic_build = request.GET.get('genomic_build', None)
cohort_id_param_array = request.GET.getlist('cohort_id', None)
if not is_valid_genomic_build(genomic_build):
return JsonResponse({'error': 'Invalid genomic build'}, status=400)
cohort_id_array = []
for cohort_id in cohort_id_param_array:
try:
cohort_id = int(cohort_id)
cohort_id_array.append(cohort_id)
except Exception as e:
return JsonResponse({'error': 'Invalid cohort parameter'}, status=400)
if len(cohort_id_array) == 0:
return JsonResponse({'error': 'No cohorts specified'}, status=400)
program_set = get_program_set_for_oncoprint(cohort_id_array)
confirmed_project_ids, user_only_study_ids = get_confirmed_project_ids_for_cohorts(cohort_id_array)
# Only samples in projects from a data type's valid programs should be queried
projects_this_program_set = Project.objects.filter(id__in=confirmed_project_ids,program__in=program_set).values_list('id', flat=True)
if not len(program_set):
return JsonResponse(
{'message': "The chosen cohorts do not contain samples from programs with Gene Mutation data."})
query_template = """
#standardSQL
SELECT cs.case_barcode, sm.Hugo_Symbol, sm.Alteration, sm.Type
FROM (
SELECT case_barcode
FROM `{cohort_table}`
WHERE cohort_id IN ({cohort_id_list})
AND (project_id IS NULL{project_clause})
AND case_barcode LIKE 'TCGA-%'
GROUP BY case_barcode
) cs
LEFT JOIN (
SELECT
case_barcode, Hugo_Symbol,
CASE
WHEN Protein_position IS NOT NULL AND Protein_position NOT LIKE '-/%' THEN
CONCAT(
COALESCE(REGEXP_EXTRACT(Amino_acids,r'^([A-Za-z*\-]+)'),'-'),
COALESCE(REGEXP_EXTRACT(Protein_position,r'^([0-9]+)'), '-'),
CASE
WHEN Variant_Classification IN ('Frame_Shift_Del', 'Frame_Shift_Ins') OR {conseq_col} LIKE '%frameshift%' THEN '_fs'
WHEN Variant_Classification IN ('Splice_Site', 'Splice_Region') THEN '_splice'
WHEN Amino_acids LIKE '%/%' THEN REGEXP_EXTRACT(Amino_acids,r'^.*/([A-Za-z*-]+)')
ELSE '-'
END
)
ELSE
CASE
WHEN {conseq_col} LIKE '%splice_%_variant%' THEN REGEXP_EXTRACT({conseq_col},r'^(splice_[^_]+_variant)')
WHEN {conseq_col} LIKE '%intron_variant%' THEN 'intron_variant'
WHEN Variant_Classification = 'IGR' THEN 'Intergenic'
ELSE Variant_Classification
END
END AS Alteration,
CASE
WHEN (Amino_acids IS NOT NULL AND REGEXP_EXTRACT(Amino_acids,r'^.*/([A-Za-z*-]+)$') = '*') OR Variant_Classification IN ('Frame_Shift_Del', 'Frame_Shift_Ins', 'Splice_Site', 'Splice_Region') THEN 'TRUNC'
WHEN Variant_Classification = 'Nonsense_Mutation' AND {conseq_col} LIKE 'stop_gained%' THEN 'TRUNC'
WHEN Variant_Classification = 'Nonstop_Mutation' OR (Variant_Classification = 'Missense_Mutation' AND Variant_Type IN ('DEL','INS')) OR (Variant_Classification = 'Translation_Start_Site') THEN 'MISSENSE'
WHEN (Variant_Classification = 'Missense_Mutation' AND Variant_Type IN ('ONP','SNP', 'TNP')) OR (Variant_Classification IN ('In_Frame_Del','In_Frame_Ins')) OR {conseq_col} LIKE '%inframe%' THEN 'INFRAME'
WHEN Variant_Classification IN ("RNA","IGR", "3\'UTR","3\'Flank","5\'UTR","5\'Flank") THEN
CASE
WHEN {conseq_col} LIKE '%intergenic%' THEN 'INTERGENIC'
WHEN {conseq_col} LIKE '%regulatory%' THEN 'REGULATORY'
WHEN {conseq_col} LIKE '%miRNA%' THEN 'miRNA'
WHEN {conseq_col} LIKE '%transcript%' THEN 'TRANSCRIPT'
WHEN {conseq_col} LIKE '%downstream%' THEN 'DOWNSTREAM'
WHEN {conseq_col} LIKE '%upstream%' THEN 'UPSTREAM'
ELSE UPPER(Variant_Classification)
END
ELSE UPPER(Variant_Classification)
END AS Type
FROM `{bq_data_project_id}.{dataset_name}.{table_name}`
WHERE Variant_Classification NOT IN ('Silent') {filter_clause}
AND case_barcode IN (
SELECT case_barcode
FROM `{cohort_table}`
WHERE cohort_id IN ({cohort_id_list})
AND (project_id IS NULL{project_clause})
GROUP BY case_barcode
)
GROUP BY case_barcode, Hugo_Symbol, Alteration, Type
ORDER BY case_barcode
) sm
ON sm.case_barcode = cs.case_barcode
;
"""
project_id_stmt = ""
if projects_this_program_set and len(projects_this_program_set):
project_id_stmt = ', '.join([str(project_id) for project_id in projects_this_program_set])
project_clause = " OR project_id IN ({})".format(project_id_stmt) if projects_this_program_set else ""
gene_list_stm = ''
if gene_array is not None:
gene_list_stm = ', '.join('\'{0}\''.format(gene) for gene in gene_array)
filter_clause = "AND Hugo_Symbol IN ({})".format(gene_list_stm) if gene_list_stm != "" else ""
cohort_id_list = ', '.join([str(cohort_id) for cohort_id in cohort_id_array])
cohort_table_id = "{project_name}.{dataset_id}.{table_id}".format(
project_name=settings.BIGQUERY_PROJECT_ID,
dataset_id=settings.BIGQUERY_COHORT_DATASET_ID,
table_id=settings.BIGQUERY_COHORT_TABLE_ID)
bq_table_info = BQ_MOLECULAR_ATTR_TABLES['TCGA'][genomic_build]
somatic_mut_query = query_template.format(
bq_data_project_id = settings.BIGQUERY_DATA_PROJECT_ID,
dataset_name=bq_table_info['dataset'],
table_name=bq_table_info['table'],
conseq_col=("one_consequence" if genomic_build == "hg38" else 'consequence'),
cohort_table=cohort_table_id,
filter_clause=filter_clause,
cohort_id_list=cohort_id_list,
project_clause=project_clause
)
somatic_mut_query_job = BigQuerySupport.insert_query_job(somatic_mut_query)
plot_data = []
genes_with_no_cnvr = []
attempts = 0
job_is_done = BigQuerySupport.check_job_is_done(somatic_mut_query_job)
while attempts < settings.BQ_MAX_ATTEMPTS and not job_is_done:
job_is_done = BigQuerySupport.check_job_is_done(somatic_mut_query_job)
sleep(1)
attempts += 1
if job_is_done:
results = BigQuerySupport.get_job_results(somatic_mut_query_job['jobReference'])
#Only add plot_data if gene info is not missing
if results and len(results) > 0:
for row in results:
if row['f'][1]['v']:
plot_data.append("{}\t{}\t{}\t{}".format(str(row['f'][0]['v']),str(row['f'][1]['v']),str(row['f'][2]['v']),str(row['f'][3]['v'])))
# Build the CNVR features
for gene in gene_array:
feature = build_feature_ids(
"CNVR", {'value_field': 'segment_mean', 'gene_name': gene, 'genomic_build': genomic_build}
)
if not feature or not len(feature):
logger.warn("[WARNING] No internal feature ID found for CNVR, gene {}, build {}.".format(gene,genomic_build))
genes_with_no_cnvr.append(gene)
continue
feature = feature[0]['internal_feature_id']
fvb = FeatureVectorBigQueryBuilder.build_from_django_settings(BigQueryServiceSupport.build_from_django_settings())
data = get_merged_feature_vectors(fvb, feature, None, None, cohort_id_array, None, projects_this_program_set, program_set=program_set)['items']
if data and len(data):
for item in data:
# 01A are tumor samples, which is what we want
if item['sample_id'].split('-')[-1] == '01A':
seg_mean = float(item['x'])
if seg_mean > 0.112 or seg_mean < -0.112:
cnvr_result = "AMP" if seg_mean > 1 else "GAIN" if seg_mean > 0.62 else "HOMDEL" if seg_mean < -1 else "HETLOSS"
plot_data.append("{}\t{}\t{}\t{}".format(item['case_id'],gene,cnvr_result,"CNA"))
if len(plot_data):
plot_message = \
'' if not genes_with_no_cnvr \
else "No internal feature ID found for CNVR, gene [{}], build {}."\
.format(', '.join(genes_with_no_cnvr), genomic_build)
return JsonResponse({
'plot_data': plot_data,
'gene_list': gene_array,
'bq_tables': ["{bq_data_project_id}:{dataset_name}.{table_name}".format(
bq_data_project_id=settings.BIGQUERY_DATA_PROJECT_ID,
dataset_name=bq_table_info['dataset'],
table_name=bq_table_info['table'])],
'plot_message': plot_message,
})
else:
return JsonResponse(
{'message': "The chosen genes and cohorts do not contain any samples with Gene Mutation data."})
except Exception as e:
logger.error("[ERROR] In oncoprint_view_data: ")
logger.exception(e)
return JsonResponse({'Error': str(e)}, status=500)
|
from enum import IntEnum;
from struct import unpack_from;
try:
from OgreHardwareBuffer import OgreFakeHardwareBuffer
except ImportError as e:
directory = os.path.dirname(os.path.realpath(__file__));
print("Import error: " + str(e) + " manual compilation" );
srcfile="OgreHardwareBuffer.py"; exec(compile(open(os.path.join(directory,srcfile)).read(), srcfile, 'exec'))
class OgreVertexBuffer(OgreFakeHardwareBuffer):
"""
Just a class to simulate a graphic card memory buffer
"""
def __init__(self, vertexSize, numVertices):
OgreFakeHardwareBuffer.__init__(self);
self._vertexSize = vertexSize;
self._numVertices = numVertices;
@property
def vertexSize(self):
return self._vertexSize;
@property
def numVertices(self):
return self._numVertices;
@property
def sizeInBytes(self):
return self.vertexSize * self.numVertices;
class OgreVertexElementSemantic(IntEnum):
"""
Vertex element semantics, used to identify the meaning of vertex buffer contents
"""
VES_UNKNOWN = 0;
# Position, 3 reals per vertex
VES_POSITION = 1;
# Blending weights
VES_BLEND_WEIGHTS = 2;
# Blending indices
VES_BLEND_INDICES = 3;
# Normal, 3 reals per vertex
VES_NORMAL = 4;
# Diffuse colours
VES_DIFFUSE = 5;
# Specular colours
VES_SPECULAR = 6;
# Texture coordinates
VES_TEXTURE_COORDINATES = 7;
# Binormal (Y axis if normal is Z)
VES_BINORMAL = 8;
# Tangent (X axis if normal is Z)
VES_TANGENT = 9;
# The number of VertexElementSemantic elements (note - the first value VES_POSITION is 1)
VES_COUNT = 9;
def toStr(ves):
if (ves==OgreVertexElementSemantic.VES_UNKNOWN):
return "VES_UNKNOWN";
elif (ves==OgreVertexElementSemantic.VES_POSITION):
return "VES_POSITION";
elif (ves==OgreVertexElementSemantic.VES_BLEND_WEIGHTS):
return "VES_BLEND_WEIGHTS";
elif (ves==OgreVertexElementSemantic.VES_BLEND_INDICES):
return "VES_BLEND_INDICES";
elif (ves==OgreVertexElementSemantic.VES_NORMAL):
return "VES_NORMAL";
elif (ves==OgreVertexElementSemantic.VES_DIFFUSE):
return "VES_DIFFUSE";
elif (ves==OgreVertexElementSemantic.VES_SPECULAR):
return "VES_SPECULAR";
elif (ves==OgreVertexElementSemantic.VES_TEXTURE_COORDINATES):
return "VES_TEXTURE_COORDINATES";
elif (ves==OgreVertexElementSemantic.VES_BINORMAL):
return "VES_BINORMAL";
elif (ves==OgreVertexElementSemantic.VES_TANGENT):
return "VES_TANGENT";
elif (ves==OgreVertexElementSemantic.VES_COUNT):
return "VES_COUNT";
class OgreVertexElementType(IntEnum):
"""
Vertex element type, used to identify the base types of the vertex contents
"""
VET_FLOAT1 = 0;
VET_FLOAT2 = 1;
VET_FLOAT3 = 2;
VET_FLOAT4 = 3;
# alias to more specific colour type - use the current rendersystem's colour packing
VET_COLOUR = 4;
VET_SHORT1 = 5;
VET_SHORT2 = 6;
VET_SHORT3 = 7;
VET_SHORT4 = 8;
VET_UBYTE4 = 9;
# D3D style compact colour
VET_COLOUR_ARGB = 10;
# GL style compact colour
VET_COLOUR_ABGR = 11;
VET_DOUBLE1 = 12;
VET_DOUBLE2 = 13;
VET_DOUBLE3 = 14;
VET_DOUBLE4 = 15;
VET_USHORT1 = 16;
VET_USHORT2 = 17;
VET_USHORT3 = 18;
VET_USHORT4 = 19;
VET_INT1 = 20;
VET_INT2 = 21;
VET_INT3 = 22;
VET_INT4 = 23;
VET_UINT1 = 24;
VET_UINT2 = 25;
VET_UINT3 = 26;
VET_UINT4 = 27;
def toStr(vet):
if (vet==OgreVertexElementType.VET_FLOAT1):
return "VET_FLOAT1";
elif (vet==OgreVertexElementType.VET_FLOAT2):
return "VET_FLOAT2";
elif (vet==OgreVertexElementType.VET_FLOAT3):
return "VET_FLOAT3";
elif (vet==OgreVertexElementType.VET_FLOAT4):
return "VET_FLOAT4";
elif (vet==OgreVertexElementType.VET_COLOUR):
return "VET_COLOUR";
elif (vet==OgreVertexElementType.VET_SHORT1):
return "VET_SHORT1";
elif (vet==OgreVertexElementType.VET_SHORT2):
return "VET_SHORT2";
elif (vet==OgreVertexElementType.VET_SHORT3):
return "VET_SHORT3";
elif (vet==OgreVertexElementType.VET_SHORT4):
return "VET_SHORT4";
elif (vet==OgreVertexElementType.VET_USHORT1):
return "VET_USHORT1";
elif (vet==OgreVertexElementType.VET_USHORT2):
return "VET_USHORT2";
elif (vet==OgreVertexElementType.VET_USHORT3):
return "VET_USHORT3";
elif (vet==OgreVertexElementType.VET_USHORT4):
return "VET_USHORT4";
elif (vet==OgreVertexElementType.VET_UBYTE4):
return "VET_UBYTE4";
elif (vet==OgreVertexElementType.VET_COLOUR_ABGR):
return "VET_COLOUR_ABGR";
elif (vet==OgreVertexElementType.VET_COLOUR_ARGB):
return "VET_COLOUR_ARGB";
elif (vet==OgreVertexElementType.VET_DOUBLE1):
return "VET_COLOUR_DOUBLE1";
elif (vet==OgreVertexElementType.VET_DOUBLE2):
return "VET_COLOUR_DOUBLE2";
elif (vet==OgreVertexElementType.VET_DOUBLE3):
return "VET_COLOUR_DOUBLE3";
elif (vet==OgreVertexElementType.VET_DOUBLE4):
return "VET_COLOUR_DOUBLE4";
elif (vet==OgreVertexElementType.VET_INT1):
return "VET_COLOUR_INT1";
elif (vet==OgreVertexElementType.VET_INT2):
return "VET_COLOUR_INT2";
elif (vet==OgreVertexElementType.VET_INT3):
return "VET_COLOUR_INT3";
elif (vet==OgreVertexElementType.VET_INT4):
return "VET_COLOUR_INT4";
elif (vet==OgreVertexElementType.VET_UINT1):
return "VET_COLOUR_UINT1";
elif (vet==OgreVertexElementType.VET_UINT2):
return "VET_COLOUR_UINT2";
elif (vet==OgreVertexElementType.VET_UINT3):
return "VET_COLOUR_UINT3";
elif (vet==OgreVertexElementType.VET_UINT4):
return "VET_COLOUR_UINT4";
class OgreVertexElement:
"""
This class declares the usage of a single vertex buffer as a component
of a complete VertexDeclaration.
@remarks
Several vertex buffers can be used to supply the input geometry for a
rendering operation, and in each case a vertex buffer can be used in
different ways for different operations; the buffer itself does not
define the semantics (position, normal etc), the VertexElement
class does.
"""
def __init__(self, source, offset, theType, semantic, index):
assert(type(source) is int and type(source) is int and type(index) is int);
self._source = source;
self._offset = offset;
self._type = theType;
self._semantic = semantic;
self._index = index;
def getType(self):
return self._type;
@property
def semantic(self):
return self._semantic;
@property
def index(self):
return self._index;
@property
def offset(self):
return self._offset;
@property
def source(self):
return self._source;
def getTypeSize(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB):
return 4;
elif (t==OgreVertexElementType.VET_FLOAT1):
return 4*1;
elif (t==OgreVertexElementType.VET_FLOAT2):
return 4*2;
elif (t==OgreVertexElementType.VET_FLOAT3):
return 4*3;
elif (t==OgreVertexElementType.VET_FLOAT4):
return 4*4;
elif (t==OgreVertexElementType.VET_DOUBLE1):
return 8*1;
elif (t==OgreVertexElementType.VET_DOUBLE2):
return 8*2;
elif (t==OgreVertexElementType.VET_DOUBLE3):
return 8*3;
elif (t==OgreVertexElementType.VET_DOUBLE4):
return 8*4;
elif (t==OgreVertexElementType.VET_SHORT1):
return 2*1;
elif (t==OgreVertexElementType.VET_SHORT2):
return 2*2;
elif (t==OgreVertexElementType.VET_SHORT3):
return 2*3;
elif (t==OgreVertexElementType.VET_SHORT4):
return 2*4;
elif (t==OgreVertexElementType.VET_USHORT1):
return 2*1;
elif (t==OgreVertexElementType.VET_USHORT2):
return 2*2;
elif (t==OgreVertexElementType.VET_USHORT3):
return 2*3;
elif (t==OgreVertexElementType.VET_USHORT4):
return 2*4;
elif (t==OgreVertexElementType.VET_INT1):
return 4*1;
elif (t==OgreVertexElementType.VET_INT2):
return 4*2;
elif (t==OgreVertexElementType.VET_INT3):
return 4*3;
elif (t==OgreVertexElementType.VET_INT4):
return 4*4;
elif (t==OgreVertexElementType.VET_UINT1):
return 4*1;
elif (t==OgreVertexElementType.VET_UINT2):
return 4*2;
elif (t==OgreVertexElementType.VET_UINT3):
return 4*3;
elif (t==OgreVertexElementType.VET_UINT4):
return 4*4;
elif (t==OgreVertexElementType.VET_UBYTE4):
return 4;
return 0;
def getTypeCount(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB or \
t==OgreVertexElementType.VET_FLOAT1 or \
t==OgreVertexElementType.VET_DOUBLE1 or \
t==OgreVertexElementType.VET_SHORT1 or \
t==OgreVertexElementType.VET_USHORT1 or \
t==OgreVertexElementType.VET_INT1 or \
t==OgreVertexElementType.VET_UINT1):
return 1;
elif (t==OgreVertexElementType.VET_FLOAT2 or \
t==OgreVertexElementType.VET_DOUBLE2 or \
t==OgreVertexElementType.VET_SHORT2 or \
t==OgreVertexElementType.VET_USHORT2 or \
t==OgreVertexElementType.VET_INT2 or \
t==OgreVertexElementType.VET_UINT2):
return 2;
elif (t==OgreVertexElementType.VET_FLOAT3 or \
t==OgreVertexElementType.VET_DOUBLE3 or \
t==OgreVertexElementType.VET_SHORT3 or \
t==OgreVertexElementType.VET_USHORT3 or \
t==OgreVertexElementType.VET_INT3 or \
t==OgreVertexElementType.VET_UINT3):
return 3;
elif (t==OgreVertexElementType.VET_FLOAT4 or \
t==OgreVertexElementType.VET_DOUBLE4 or \
t==OgreVertexElementType.VET_SHORT4 or \
t==OgreVertexElementType.VET_USHORT4 or \
t==OgreVertexElementType.VET_INT4 or \
t==OgreVertexElementType.VET_UINT4):
return 4;
raise ValueError("OgreVertexElement.getTypeCount(type): Invalid type");
def getTypePythonUnpackStr(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB):
raise ValueError("OgreVertexElement.getTypePythonUnpackStr(type): Color unsupported yet");
elif (t==OgreVertexElementType.VET_FLOAT1 or \
t==OgreVertexElementType.VET_FLOAT2 or \
t==OgreVertexElementType.VET_FLOAT3 or \
t==OgreVertexElementType.VET_FLOAT4):
return 'f' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_DOUBLE1 or \
t==OgreVertexElementType.VET_DOUBLE2 or \
t==OgreVertexElementType.VET_DOUBLE3 or \
t==OgreVertexElementType.VET_DOUBLE4):
return 'd' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_SHORT1 or \
t==OgreVertexElementType.VET_SHORT2 or \
t==OgreVertexElementType.VET_SHORT3 or \
t==OgreVertexElementType.VET_SHORT4):
return 'h' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_USHORT1 or \
t==OgreVertexElementType.VET_USHORT2 or \
t==OgreVertexElementType.VET_USHORT3 or \
t==OgreVertexElementType.VET_USHORT4):
return 'H' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_INT1 or \
t==OgreVertexElementType.VET_INT2 or \
t==OgreVertexElementType.VET_INT3 or \
t==OgreVertexElementType.VET_INT4):
return 'i' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_UINT1 or \
t==OgreVertexElementType.VET_UINT2 or \
t==OgreVertexElementType.VET_UINT3 or \
t==OgreVertexElementType.VET_UINT4):
return 'I' * OgreVertexElement.getTypeCount(t);
raise ValueError("OgreVertexElement.getTypePythonUnpackStr(type): Invalid type");
def getBestCoulourVertexElementType():
#Blender use opengl
return OgreVertexElementType.VET_COLOUR_ABGR;
def __eq__(self, other):
if (self._source == other._source and \
self._index == other._index and \
self._offet == other._offset and \
self._semantic == other._semantic and \
self._type == other._type):
return True;
else:
return False;
def getSize(self):
return OgreVertexElement.getTypeSize(self._type);
def extractFromBuffer(self, vertexBufferBinding, dest, endianess):
buf = vertexBufferBinding.getBuffer(self.source);
cmd = "";
#FIXME: endianess not working...
#if (endianess.value == 'big'):
# cmd = '<';
#elif (endianess.value == 'little'):
# cmd = '>';
#else :
# cmd = endianess;
#assert(cmd == '<' or cmd == '>');
cmd = "="
cmd = cmd + OgreVertexElement.getTypePythonUnpackStr(self.getType());
print(cmd);
data = buf.data[self.offset:]
for i in range(buf.numVertices):
v = unpack_from(cmd, data, i * buf.vertexSize);
dest.append(v);
class OgreVertexDeclaration:
"""
This class declares the format of a set of vertex inputs, which
can be issued to the rendering API through a RenderOperation.
@remarks
You should be aware that the ordering and structure of the
VertexDeclaration can be very important on DirectX with older
cards,so if you want to maintain maximum compatibility with
all render systems and all cards you should be careful to follow these
rules:<ol>
<li>VertexElements should be added in the following order, and the order of the
elements within a shared buffer should be as follows:
position, blending weights, normals, diffuse colours, specular colours,
texture coordinates (in order, with no gaps)</li>
<li>You must not have unused gaps in your buffers which are not referenced
by any VertexElement</li>
<li>You must not cause the buffer & offset settings of 2 VertexElements to overlap</li>
</ol>
Whilst GL and more modern graphics cards in D3D will allow you to defy these rules,
sticking to them will ensure that your buffers have the maximum compatibility.
@par
Like the other classes in this functional area, these declarations should be created and
destroyed using the HardwareBufferManager.
"""
def __init__(self):
self._elementList = [];
def getElements(self):
return self._elementList;
def addElement(self, source, offset, theType, semantic, index):
if (theType == OgreVertexElementType.VET_COLOUR):
theType = OgreVertexElement.getBestCoulourVertexElementType();
self._elementList.append(OgreVertexElement(source,offset,theType,semantic,index));
return self._elementList[-1];
def insertElement(self, atPosition, source, offset, theType, semantic, index):
if (atPosition >= len(_elementList)):
return self.addElement(source,offset,theType,semantic,index);
_elementList.insert(atPosition,OgreVertexElement(source,offset,theType,semantic,index));
return _elementList[-1];
def getElement(self, index):
return self._elementList[index];
def removeElement(self, index):
del self._elementList[index];
def removeElementWithSemantic(self, semantic, index):
for i in range(self._elementList):
if (self._elementList[i].semantic == semantic and self._elementList[i].index == index):
del self._elementList[i];
break;
def removeAllElements(self):
self._elementList = [];
def findElementBySemantic(self, sem, index):
for e in self._elementList:
if (e.semantic == sem and e.index == index):
return e;
return None;
def findElementsBySemantic(self,sem):
elements = []
for e in self._elementList:
if (e.semantic == sem):
elements.append(e);
return elements;
def findElementBySource(self,source):
return [e for e in self._elementList if e.source == source];
def getVertexSize(self, source):
sz = 0;
for e in self._elementList:
if (e.source == source):
sz += e.getSize();
return sz;
def vertexElementLess(e1, e2):
if (e1.source < e2.source):
return True;
elif (e1.source == e2.source):
if (e1.semantic < e2.semantic):
return True;
elif (e1.semantic == e2.semantic):
if (e1.index < e2.index):
return True;
return False;
def sort(self):
self._elementList.sort(cmp=OgreVertexDeclaration.vertexElementLess);
def closeGapInSource(self):
if (not self._elementList):
return;
self.sort();
raise NotImplementedError;
class OgreVertexBufferBinding:
"""
This is the legacy of Ogre code. Because ogre separate vertex declarations
from vertex buffer in his file. So this class allow us to associate the
correct declaration with the correct buffer.
"""
def __init__(self):
self._bindingMap = {};
def setBinding(self, index, vbuffer):
self._bindingMap[str(index)]=vbuffer;
def getBuffer(self, source):
return self._bindingMap[str(source)];
def unsetAllBindings(self):
self._bindingMap = {};
|
import signal
import logging
import email
import os
import time
from datetime import datetime
from email.policy import default
from paho.mqtt import publish
from emqtt.plugins import EmailProcessor
from emqtt.plugins import PluginManager
from emqtt.mqtt import mqtt_packet
log = logging.getLogger('emqtt')
class EMQTTHandler:
def __init__(self, loop, config):
self.config = config
self.loop = loop
self.reset_time = self.config['MQTT_RESET_TIME']
self.handles = {}
self.quit = False
signal.signal(signal.SIGTERM, self.set_quit)
signal.signal(signal.SIGINT, self.set_quit)
if self.config['SAVE_ATTACHMENTS']:
log.info('Configured to save attachments')
async def handle_DATA(self, server, session, envelope):
log.debug('Message from %s', envelope.mail_from)
email_message = email.message_from_bytes(
envelope.original_content,
policy=default
)
log.debug(
'Message data (truncated): %s',
email_message.as_string()[:250]
)
# If enabled this saves the message content as a string to disk
# this is only useful for debugging or recording messages to
# be used in tests
if self.config['SAVE_RAW_MESSAGES']:
msg_filename = email_message['subject']
log.debug( "Saving message content: %s", msg_filename )
file_path = os.path.join('messages', msg_filename)
with open(file_path, 'w+') as f:
f.write( email_message.as_string() )
# Check the dynamic plugins
actions = EmailProcessor.get_plugins()
log.debug( "Loaded processor plugins: %s", actions )
mqtt_msg = None
plugin = PluginManager().get_plugin( email_message['from'] )
mqtt_msg = plugin.mqtt_message( email_message )
self.mqtt_publish( mqtt_msg.topic, mqtt_msg.payload )
# Save attached files if configured to do so.
if self.config['SAVE_ATTACHMENTS'] and (
# Don't save them during rese time unless configured to do so.
mqtt_msg.topic not in self.handles
or self.config['SAVE_ATTACHMENTS_DURING_RESET_TIME']):
log.debug(
'Saving attachments. Topic "%s" aldready triggered: %s, '
'Save attachment override: %s',
mqtt_msg.topic,
mqtt_msg.topic in self.handles,
self.config['SAVE_ATTACHMENTS_DURING_RESET_TIME']
)
for att in email_message.iter_attachments():
# Just save images
if not att.get_content_type().startswith('image'):
continue
filename = att.get_filename()
image_data = att.get_content()
file_path = os.path.join(self.config['ATTACHMENTS_DIRECTORY'], filename)
log.info('Saving attached file %s to %s', filename, file_path)
with open(file_path, 'wb') as f:
f.write(image_data)
else:
log.debug('Not saving attachments')
log.debug(self.handles)
# Cancel any current scheduled resets of this topic
if mqtt_msg.topic in self.handles:
self.handles.pop(mqtt_msg.topic).cancel()
if self.reset_time:
# Schedule a reset of this topic
log.debug( "Sheduling reset in %ds for %s", self.reset_time, mqtt_msg.topic )
self.handles[mqtt_msg.topic] = self.loop.call_later(
self.reset_time,
self.reset,
mqtt_msg.topic
)
return '250 Message accepted for delivery'
def mqtt_publish(self, topic, payload):
log.info('Publishing "%s" to %s', payload, topic)
try:
publish.single(
topic,
payload,
hostname=self.config['MQTT_HOST'],
port= self.config['MQTT_PORT'],
auth={
'username': self.config['MQTT_USERNAME'],
'password': self.config['MQTT_PASSWORD']
} if self.config['MQTT_USERNAME'] else None
)
except Exception as e:
log.exception('Failed publishing')
def reset(self, topic):
log.info(f'Resetting topic {topic}')
self.handles.pop(topic)
self.mqtt_publish(topic, self.config['MQTT_RESET_PAYLOAD'])
def set_quit(self, *args):
log.info('Quitting...')
self.quit = True |
"""A standard Weasel menu providing functions to download and upload DICOM data from/to XNAT."""
def main(weasel):
menu = weasel.menu("XNAT")
menu.item(
label = 'Download DICOM from XNAT',
tooltip = 'Prompts the user to choose the images to download from XNAT',
icon = 'Documents/images/XNAT-LOGO.png',
pipeline = 'XNAT__App',
functionName = 'download'),
menu.item(
label = 'Upload DICOM to XNAT',
tooltip = 'Prompts the user to upload the selected images to XNAT',
icon = 'Documents/images/XNAT-LOGO.png',
pipeline = 'XNAT__App',
functionName = 'upload'), |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import sys
from datetime import datetime
from awsglue.transforms import ApplyMapping, SelectFields, ResolveChoice
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
# System arguments
args_list = ["job_type", "ddb_issues_table_name", "ddb_data_hierarchy_table_name", "glue_db_name", "glue_issues_table_name", "glue_data_hierarchy_table_name", "glue_output_bucket"]
args = getResolvedOptions(sys.argv, args_list) # NOSONAR: python:S4823
JOB_TYPE = args["job_type"]
DDB_ISSUES_TABLE_NAME = args["ddb_issues_table_name"]
DDB_DATA_HIERARCHY_TABLE_NAME = args["ddb_data_hierarchy_table_name"]
GLUE_ISSUES_TABLE_NAME = args["glue_issues_table_name"]
GLUE_DATA_HIERARCHY_TABLE_NAME = args["glue_data_hierarchy_table_name"]
GLUE_DB_NAME = args["glue_db_name"]
GLUE_OUTPUT_BUCKET = args["glue_output_bucket"]
# Sets Glue context and logging
spark_context = SparkContext()
glue_context = GlueContext(spark_context)
job = Job(glue_context)
class JobInputException(Exception):
"""Raised when input to the job is not valid"""
pass
def log_message(msg):
msg_arr = [f'****** LOG_MSG {datetime.now()} ******']
if not isinstance(msg, list):
msg = [msg]
# Add some preceding whitespace to each line for the log message.
# This makes it easier to read in the Glue logs on Cloudwatch
msg = list(map(lambda x: f' {x}', msg))
msg_arr.extend(msg)
msg_arr.append('') # empty line
# Glue sends Python logging messages (using logger) to the error logs in CloudWatch.
# Instead, we will use the print statement as they appear in the normal Logs section
# of the Glue job.
print('\n'.join(msg_arr))
def get_column_mapping(column_name):
"""
Maps the columns from the Glue Data Catalog that was generated by Crawling the
DynamoDB table with the column in the table that was crated in Glue. Defaults to string
if no other data type is specified
"""
bigint_const = "bigint##bigint"
if (JOB_TYPE == "issues"):
data_types = {
"version": bigint_const,
"createddateutc": "date##date",
"acknowledgedtime": bigint_const,
"resolutiontime": bigint_const
}
elif (JOB_TYPE == "hierarchy"):
data_types = {
"version": bigint_const,
"rootcauses": "array##string",
"filterPolicy": "struct##string"
}
if column_name in data_types is not None:
split_type = data_types[column_name].split("##")
return (column_name, split_type[0], column_name, split_type[1])
else:
return (column_name, "string", column_name, "string")
def main():
"""This script will load data from the supplied DynamoDB Table to S3 so it can be analyzed with Athena"""
if (JOB_TYPE == "issues"):
DDB_TABLE_NAME = DDB_ISSUES_TABLE_NAME
GLUE_TABLE_NAME = GLUE_ISSUES_TABLE_NAME
FIELD_PATHS = [
"eventid",
"acknowledged",
"created",
"sitename",
"issuesource",
"priority",
"areaname#status#processname#eventdescription#stationname#devicename#created",
"version",
"devicename",
"devicename#eventid",
"createdat",
"areaname",
"processname",
"createddateutc",
"eventdescription",
"areaname#status#processname#stationname#devicename#created",
"stationname",
"id",
"acknowledgedtime",
"status",
"updatedat",
"closed",
"resolutiontime",
"createdby",
"acknowledgedby",
"closedby",
"rejectedby",
"additionaldetails"
]
elif (JOB_TYPE == "hierarchy"):
DDB_TABLE_NAME = DDB_DATA_HIERARCHY_TABLE_NAME
GLUE_TABLE_NAME = GLUE_DATA_HIERARCHY_TABLE_NAME
FIELD_PATHS = [
"createdat",
"name",
"description",
"id",
"devicestationid",
"type",
"version",
"parentid",
"updatedat",
"areasiteid",
"eventprocessid",
"eventtype",
"priority",
"rootcauses",
"sms",
"eventimgkey",
"email",
"protocol",
"endpoint",
"filterpolicy",
"subscriptionarn",
"stationareaid",
"processareaid",
"alias"
]
else:
raise JobInputException(f"JOB_TYPE was invalid ({JOB_TYPE}). Expecting either \"issues\" or \"hierarchy\"")
log_message([
"Running with the following context:",
f"DDB_TABLE_NAME: {DDB_TABLE_NAME}",
f"GLUE_TABLE_NAME: {GLUE_TABLE_NAME}",
f"GLUE_DB_NAME: {GLUE_DB_NAME}",
f"GLUE_OUTPUT_BUCKET: {GLUE_OUTPUT_BUCKET}"
])
DDB_TABLE_NAME_FORMATTED = DDB_TABLE_NAME.lower().replace('-', '_')
log_message("Mapping columns")
COLUMN_MAPPINGS = list(map(lambda x: get_column_mapping(x), FIELD_PATHS))
log_message("Creating a Dynamic Frame from the DynamoDB table schema")
datasource0 = glue_context.create_dynamic_frame.from_catalog(
database = GLUE_DB_NAME,
table_name = DDB_TABLE_NAME_FORMATTED,
transformation_ctx = "datasource0"
)
log_message("Applying column mappings")
applymapping1 = ApplyMapping.apply(
frame = datasource0,
mappings = COLUMN_MAPPINGS,
transformation_ctx = "applymapping1"
)
log_message("Selecting fields")
selectfields2 = SelectFields.apply(
frame = applymapping1,
paths = FIELD_PATHS,
transformation_ctx = "selectfields2"
)
log_message("Resolving")
resolvechoice3 = ResolveChoice.apply(
frame = selectfields2,
choice = "MATCH_CATALOG",
database = GLUE_DB_NAME,
table_name = GLUE_TABLE_NAME,
transformation_ctx = "resolvechoice3"
)
resolvechoice4 = ResolveChoice.apply(
frame = resolvechoice3,
choice = "make_struct",
transformation_ctx = "resolvechoice4"
)
log_message("Persisting data in S3")
glue_context.write_dynamic_frame.from_catalog(
frame = resolvechoice4,
database = GLUE_DB_NAME,
table_name = GLUE_TABLE_NAME,
transformation_ctx = "datasink5"
)
job.commit()
log_message("Done")
if __name__ == '__main__':
main() |
# THIS FUNCTION ALLOWS TO PARSE A CIGAR SEGMENT CONTAINING ONLY DELETED BASES AND INCREMENT
# A SPECIFIC PILEUP DICTIONNARY ACCORDINGLY
#
# INPUT : -PILEUP : (DICT) THE DICTIONNARY CONTAINING COUNTERS THAT ARE CHROMOSOME-POSITION-BASE-STRAND SPECIFIC
# -UMI : (STR) UMI SEQUENCE OF THE READ
# -STRAND : (INT) THE STRAND OF THE READ (0 = FORWARD | 1 = REVERSE)
# -CHROM : (STR) THE CHROMOSOME MAPPED TO THE READ
# -START : (INT) THE START POSITION OF THE READ
# -CIGAR : (STR) THE CIGAR SEQUENCE OF THE READ
# -SEQ : (STR) THE SEQUENCE OF THE READ
# -QUAL : (STR) THE QUALITY STRING OF THE READ
# -QUALS : (DICT) A DICTIONNARY FOR THE CONVERSION OF THE QUALITIES FROM ASCII TO INT
# -MIN_BASE_QUALITY : (INT) MINIMUM QUALITY SCORE OF THE BASE FOR IT TO ADDED TO THE PILEUP DICTIONNARY
# -ALL_UMIS : (DICT) A DICTIONNARY CONTAINING UMIS INDEXES
#
# VALUE : NONE
#
import re
from AddMatches import *
from AddInsertions import *
from AddDeletions import *
def AddRead(pileup, umi, strand, chrom, start, cigar, seq, qual, quals, MIN_BASE_QUALITY, ALL_UMIS):
# split the cigar sequence using any of the five characters as a delimiter:
# M (match/mismatch), S (soft clip), I (insertion),
# D (deletion) or H (hard clip)
# this will return a list with the lengths of the consecutive events
cigar_lengths = re.split('M|S|I|D|H', cigar)
# try removing the empty element at the end of the list created by splitting
# if last character is empty
try:
# remove it
cigar_lengths.remove('')
# if last character is not empty
# a character not in [M,I,S,D,H] is in the cigar element
# don't know what to do => skip read
except:
print('\n')
PrintTime("error", "\t\tUnexpected character found in CIGAR ("+cigar+")... Read skipped !\n")
return False
# split the cigar sequence by any number
# this will return a list with the consecutive events
cigar_chars = re.split('[0-9]+', cigar)
# remove the empty element at the end of the list created by splitting
cigar_chars.remove('')
# remove N fragment and its length from cigar if 'N' in cigar:
if "N" in cigar:
cigar_lengths.remove(cigar_lengths[cigar_chars.index('N')])
cigar_chars.remove('N')
# initialize a counter to increment position only without advancing in the sequence
cursor_pos = 0
# initialize a counter to advance in the sequence without incrementing the position
cursor_seq = 0
# the need of two seperate is for cases when the event is an insertion as we need to
# advance in the sequence of the read without incrementing the position or when the
# event is a deletion as we need to increment the position without advancing in the
# sequence of the read
# for each cigar event in the cigar events list (M|S|I|D|H)
for i in range(0, len(cigar_chars)):
# if the event is a match/mismatch
if cigar_chars[i] == "M":
# get the length of the match/mismatch event
maxx = int(cigar_lengths[i])
# call the specific function responsible of parsing a match/mismatch segment of the read
value = AddMatches(pileup, umi, chrom, start, seq, strand, cursor_pos, cursor_seq, maxx, qual, quals, MIN_BASE_QUALITY, ALL_UMIS)
# get the returned values to continue from the position where the event ends
position = value[0]
cursor_seq = value[1]
cursor_pos = value[2]
# if the event is an insertion
elif cigar_chars[i] == "I":
# get the length of the insertion event
maxx = int(cigar_lengths[i])
# try to get position
# normally, an read does not start with an insertion so the position of the insertion
# is the last position of the previous cigar element
try:
test = position
# in some cases, the read starts with an insertion, therefore that position is not defined
# in this case, an exception will be throw in order to attribute the start value to position
except:
position = start
# call the specific function responsible of parsing an inserted segment of the read
value = AddInsertions(pileup, umi, chrom, position, seq, strand, cursor_pos, cursor_seq, maxx, qual, quals, MIN_BASE_QUALITY, ALL_UMIS)
# get the returned values to continue from the position where the event ends
position = value[0]
cursor_seq = value[1]
cursor_pos = value[2]
# if the event is a deletion
elif cigar_chars[i] == "D":
# get the length of the deletion event
maxx = int(cigar_lengths[i])
# call the specific function responsible of parsing a deleted segment of the read
value = AddDeletions(pileup, umi, chrom, start, seq, strand, cursor_pos, cursor_seq, maxx, ALL_UMIS)
# get the returned values to continue from the position where the event ends
position = value[0]
cursor_seq = value[1]
cursor_pos = value[2]
# if the event is a soft clip
# soft clipping removes the clipped sequence from the read without correcting read start position
elif cigar_chars[i] == "S":
# test if the soft clip occurs at the beginning of the read
try:
# if not beginning of the read => cursor_pos > 0
test = 20 / cursor_pos
# in this case, do nothing since the soft clipping occured at the end of the read
# since a clipping event can only occurs at the start or at the end of the read
continue
# if beginning of the read => cursor_pos = 0 => exception
except:
# get length of the clipped sequence
clipped = int(cigar_lengths[i])
# correct the read start position
start -= clipped
# increment the 2 cursors and continue
cursor_pos += clipped
cursor_seq += clipped
# if the event is a hard clip
# just continue (hard clipping corrects read start position and removes the clipped sequence from the read)
elif cigar_chars[i] == "H":
continue
|
import jwt
from . import GopublishTestCase
class TestApiToken(GopublishTestCase):
def test_get_token_no_body(self, client):
"""
Get token without a body
"""
url = "/api/token/create"
response = client.post(url)
assert response.status_code == 400
assert response.json.get("error") == "Missing body"
def test_get_token_no_username(self, client):
"""
Get a token without a username
"""
body = {"password": "xxx"}
url = "/api/token/create"
response = client.post(url, json=body)
assert response.status_code == 400
assert response.json.get("error") == "Missing either username or password in body"
def test_get_token_no_password(self, client):
"""
Get a token without a password
"""
body = {"username": "xxx"}
url = "/api/token/create"
response = client.post(url, json=body)
assert response.status_code == 400
assert response.json.get("error") == "Missing either username or password in body"
def test_get_token(self, app, client):
"""
Get a token
"""
body = {"username": "root", "password": "xxx"}
url = "/api/token/create"
response = client.post(url, json=body)
assert response.status_code == 200
assert response.json.get("token")
payload = jwt.decode(response.json.get("token"), app.config['SECRET_KEY'], algorithms=["HS256"])
assert payload['username'] == "root"
|
"""Support for WiLight Fan."""
from __future__ import annotations
from pywilight.const import (
FAN_V1,
ITEM_FAN,
WL_DIRECTION_FORWARD,
WL_DIRECTION_OFF,
WL_DIRECTION_REVERSE,
WL_SPEED_HIGH,
WL_SPEED_LOW,
WL_SPEED_MEDIUM,
)
from homeassistant.components.fan import DIRECTION_FORWARD, FanEntity, FanEntityFeature
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
)
from . import DOMAIN, WiLightDevice
ORDERED_NAMED_FAN_SPEEDS = [WL_SPEED_LOW, WL_SPEED_MEDIUM, WL_SPEED_HIGH]
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up WiLight lights from a config entry."""
parent = hass.data[DOMAIN][entry.entry_id]
# Handle a discovered WiLight device.
entities = []
for item in parent.api.items:
if item["type"] != ITEM_FAN:
continue
index = item["index"]
item_name = item["name"]
if item["sub_type"] != FAN_V1:
continue
entity = WiLightFan(parent.api, index, item_name)
entities.append(entity)
async_add_entities(entities)
class WiLightFan(WiLightDevice, FanEntity):
"""Representation of a WiLights fan."""
_attr_supported_features = FanEntityFeature.SET_SPEED | FanEntityFeature.DIRECTION
def __init__(self, api_device, index, item_name):
"""Initialize the device."""
super().__init__(api_device, index, item_name)
# Initialize the WiLights fan.
self._direction = WL_DIRECTION_FORWARD
@property
def icon(self):
"""Return the icon of device based on its type."""
return "mdi:fan"
@property
def is_on(self):
"""Return true if device is on."""
return self._status.get("direction", WL_DIRECTION_OFF) != WL_DIRECTION_OFF
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
if (
"direction" in self._status
and self._status["direction"] == WL_DIRECTION_OFF
):
return 0
if (wl_speed := self._status.get("speed")) is None:
return None
return ordered_list_item_to_percentage(ORDERED_NAMED_FAN_SPEEDS, wl_speed)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return len(ORDERED_NAMED_FAN_SPEEDS)
@property
def current_direction(self) -> str:
"""Return the current direction of the fan."""
if (
"direction" in self._status
and self._status["direction"] != WL_DIRECTION_OFF
):
self._direction = self._status["direction"]
return self._direction
async def async_turn_on(
self,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
"""Turn on the fan."""
if percentage is None:
await self._client.set_fan_direction(self._index, self._direction)
else:
await self.async_set_percentage(percentage)
async def async_set_percentage(self, percentage: int):
"""Set the speed of the fan."""
if percentage == 0:
await self._client.set_fan_direction(self._index, WL_DIRECTION_OFF)
return
if (
"direction" in self._status
and self._status["direction"] == WL_DIRECTION_OFF
):
await self._client.set_fan_direction(self._index, self._direction)
wl_speed = percentage_to_ordered_list_item(ORDERED_NAMED_FAN_SPEEDS, percentage)
await self._client.set_fan_speed(self._index, wl_speed)
async def async_set_direction(self, direction: str):
"""Set the direction of the fan."""
wl_direction = WL_DIRECTION_REVERSE
if direction == DIRECTION_FORWARD:
wl_direction = WL_DIRECTION_FORWARD
await self._client.set_fan_direction(self._index, wl_direction)
async def async_turn_off(self, **kwargs):
"""Turn the fan off."""
await self._client.set_fan_direction(self._index, WL_DIRECTION_OFF)
|
def _fix_fracs(string):
substrs = string.split("\\frac")
new_str = substrs[0]
if len(substrs) > 1:
substrs = substrs[1:]
for substr in substrs:
new_str += "\\frac"
if substr[0] == "{":
new_str += substr
else:
try:
assert len(substr) >= 2
except:
return string
a = substr[0]
b = substr[1]
if b != "{":
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}{" + b + "}" + post_substr
else:
new_str += "{" + a + "}{" + b + "}"
else:
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}" + b + post_substr
else:
new_str += "{" + a + "}" + b
string = new_str
return string
def _fix_a_slash_b(string):
if len(string.split("/")) != 2:
return string
a = string.split("/")[0]
b = string.split("/")[1]
try:
a = int(a)
b = int(b)
assert string == "{}/{}".format(a, b)
new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
return new_string
except:
return string
def _remove_right_units(string):
# "\\text{ " only ever occurs (at least in the val set) when describing units
if "\\text{ " in string:
splits = string.split("\\text{ ")
assert len(splits) == 2
return splits[0]
else:
return string
def _fix_sqrt(string):
if "\\sqrt" not in string:
return string
splits = string.split("\\sqrt")
new_string = splits[0]
for split in splits[1:]:
if split[0] != "{":
a = split[0]
new_substr = "\\sqrt{" + a + "}" + split[1:]
else:
new_substr = "\\sqrt" + split
new_string += new_substr
return new_string
def _strip_string(string):
# linebreaks
string = string.replace("\n", "")
#print(string)
# remove inverse spaces
string = string.replace("\\!", "")
#print(string)
# replace \\ with \
string = string.replace("\\\\", "\\")
#print(string)
# replace tfrac and dfrac with frac
string = string.replace("tfrac", "frac")
string = string.replace("dfrac", "frac")
#print(string)
# remove \left and \right
string = string.replace("\\left", "")
string = string.replace("\\right", "")
#print(string)
# Remove circ (degrees)
string = string.replace("^{\\circ}", "")
string = string.replace("^\\circ", "")
# remove dollar signs
string = string.replace("\\$", "")
# remove units (on the right)
string = _remove_right_units(string)
# remove percentage
string = string.replace("\\%", "")
string = string.replace("\%", "")
# " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
string = string.replace(" .", " 0.")
string = string.replace("{.", "{0.")
# if empty, return empty string
if len(string) == 0:
return string
if string[0] == ".":
string = "0" + string
# to consider: get rid of e.g. "k = " or "q = " at beginning
if len(string.split("=")) == 2:
if len(string.split("=")[0]) <= 2:
string = string.split("=")[1]
# fix sqrt3 --> sqrt{3}
string = _fix_sqrt(string)
# remove spaces
string = string.replace(" ", "")
# \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b}
string = _fix_fracs(string)
# manually change 0.5 --> \frac{1}{2}
if string == "0.5":
string = "\\frac{1}{2}"
# NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
string = _fix_a_slash_b(string)
return string
def is_equiv(str1, str2, verbose=False):
if str1 is None and str2 is None:
print("WARNING: Both None")
return True
if str1 is None or str2 is None:
return False
try:
ss1 = _strip_string(str1)
ss2 = _strip_string(str2)
if verbose:
print(ss1, ss2)
return ss1 == ss2
except:
return str1 == str2
|
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
FILES = ['send_log.txt', 'recv_log.txt']
if __name__ == '__main__':
send_list = []
recv_list = []
file_handles = {filename: open(filename, 'r') for filename in FILES}
while 1:
for filename, fi in file_handles.items():
line = next(fi, None)
if line is not None:
line = line.rstrip('\n')
if line.split(" ")[1] != 0:
if filename == FILES[0]:
send_list.append(line.split(" ")[1])
else:
recv_list.append(line.split(" ")[1])
else:
fi.close()
break
if line is None:
break
data_0 = [] # original
data_1 = [] # 2 failures
data_2 = [] # 10 failures
result_1 = []
result_2 = []
print(send_list)
print(recv_list)
for i in range(len(send_list)):
diff = float(send_list[i]) - float(recv_list[i])
if i % 30 <= 9: data_0.append(diff)
elif i % 30 <= 19: data_1.append(diff)
else: data_2.append(diff)
for i in range(len(data_0)):
if data_0[i] <= 0.05: # erase some abnormal data (normally 3s to 5s) due to record deviations
result_1.append(0)
result_2.append(0)
#continue
else:
add_value_1 = (data_1[i]/data_0[i])*100
if add_value_1 < 0:
result_1.append(100)
else:
result_1.append(add_value_1)
add_value_2 = (data_2[i]/data_0[i])*100
if add_value_2 < 0:
result_2.append(100)
else:
result_2.append(add_value_2)
print(max(result_1))
print(max(result_2))
#x = np.arange(0, 150)
result_1.sort()
result_2.sort()
#x_2 = np.array(result_2.sort())
freq_1 = np.array(result_1)
freq_2 = np.array(result_2)
pdf_1 = freq_1/np.sum(freq_1)
pdf_2 = freq_2/np.sum(freq_2)
cdf_1 = np.cumsum(pdf_1)
cdf_2 = np.cumsum(pdf_2)
#print freq_1
#print freq_2
print cdf_1
print cdf_2
# only plot percentage increases
index = 0
for i in range(len(result_1)):
if result_1[i]>100:
index = i
break
my_x_ticks = np.arange(100,1800,400)
my_y_ticks = np.arange(0.8,1.02,0.02)
ax = plt.subplot()
ax.scatter(freq_1[index:], cdf_1[index:], marker='o', s=70, label = "Packet Latency (2 failures)")
ax.scatter(freq_2[index:], cdf_2[index:], marker='D', s=70, label = "Packet Latency (10 failures)")
plt.ylabel('CDF over packets')
plt.xlabel('Percentage Increase in Latency')
plt.xticks(my_x_ticks, [0,400,800,1200,1600], fontsize=22)
plt.yticks(my_y_ticks, fontsize=22)
plt.ylim((0.8,1.01))
plt.xlim((100, 1800))
plt.grid(linestyle='-.')
plt.legend(loc="lower right", fontsize=18)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig("./pktLatency/figure_2.png")
|
#!/usr/bin/env python
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
app = Flask(__name__)
api = Api(app, prefix="/api/v1")
# subscriber key value store
subscribers = {}
class Subscriber(Resource):
parser = reqparse.RequestParser()
parser.add_argument("email", type=str, required=True,
help="This field must not be blank.")
def make_resp(self, name):
subscriber = {}
subscriber[name] = subscribers[name]
return {"subscriber": subscriber}
def get(self, name):
try:
return self.make_resp(name)
except:
return {"subscriber": {}}
def delete(self, name):
try:
del subscribers[name]
return {"message": "[{}] deleted".format(name)}
except:
return {"message": "[{}] does not exist".format(name)}
def put(self, name):
args = self.parser.parse_args()
try:
del subscribers[name]
except:
pass
subscribers[name] = args['email']
return self.make_resp(name)
def post(self, name):
args = self.parser.parse_args()
if name not in subscribers.keys():
subscribers[name] = args['email']
return self.make_resp(name)
class Subscribers(Resource):
def get(self):
return {"subscriber": subscribers}
api.add_resource(Subscriber, '/subscriber/<string:name>')
api.add_resource(subscriber, '/subscriber')
if __name__ == '__main__':
app.run(port=5000, debug=True)
|
class EventWaitHandleAuditRule(AuditRule):
"""
Represents a set of access rights to be audited for a user or group. This class cannot be inherited.
EventWaitHandleAuditRule(identity: IdentityReference,eventRights: EventWaitHandleRights,flags: AuditFlags)
"""
@staticmethod
def __new__(self,identity,eventRights,flags):
""" __new__(cls: type,identity: IdentityReference,eventRights: EventWaitHandleRights,flags: AuditFlags) """
pass
AccessMask=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the access mask for this rule.
"""
EventWaitHandleRights=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the access rights affected by the audit rule.
Get: EventWaitHandleRights(self: EventWaitHandleAuditRule) -> EventWaitHandleRights
"""
|
__author__ = "jalfaizy"
# code for https://www.hackerrank.com/challenges/handshake
#!/bin/python
import sys
T = int(raw_input().strip())
for a0 in xrange(T):
N = int(raw_input().strip())
ans = (N*(N - 1))/2
print ans
|
#!/bin/env python
#
# -*- coding: utf-8 -*-
#
# Copyright (C) University of Manchester 2012
# Julian Selley <j.selley@manchester.ac.uk>
################################################################################
"""
Test Enzyme Module
******************
This is a test suite for the enzyme module. It tests the elements of the enzyme
module.
Overview
========
@todo 201112130955 JNS: write the documentation for this test suite
"""
# Metadata
__version__ = '0.1.0'
__author__ = 'Julian Selley <j.selley@manchester.ac.uk'
__copyright__ = 'Copyright 2011 Julian Selley <j.selley@manchester.ac.uk>'
__license__ = 'The Artistic License 2.0 (see the file LICENSE included with the distribution)'
# This is a bit of a *hack*
# to make this code work as part of the package.
from os.path import join as pjoin
# check ../.. for the mascot module to test
import sys
sys.path.append(pjoin('..', '..'))
# Imports
import proteomics.enzyme
import unittest
# There is no test for the struct classes as these simply store data, without
# any methods attached to test.
class DigestionEnzymeTestCase(unittest.TestCase):
"""Provides a template for testing DigestionEnzyme sub-classes.
The DigestionEnzyme class is an abstract class, so it should throw
a NotImplemented exception. This is the one test that this class
checks.
"""
def setUp(self):
"""Sets up the group of tests for DigestionEnzyme.
However, this is an abstract class that should throw an
NotImplemented exception if an instance is tried to be
generated.
"""
self.sequence = ''.join(
["MAGKKGQKKSGLGNHGKNSDMDVEDRLQAVVLTDSYETRFMPLTAVKPRCLLPLANVPLI",
"EYTLEFLAKAGVHEVFLICSSHANQINDYIENSKWNLPWSPFKITTIMSPEARCTGDVMR"])
def test_abstract_static_digest(self):
"""Checks this class is abstract.
If you try and create a DigestionEnzyme, which is an abstract
class, it should throw a NotImplemented exception.
"""
with self.assertRaises(NotImplementedError) as cm:
proteomics.enzyme.DigestionEnzyme.digest(self.sequence)
class TrypsinTestCase(DigestionEnzymeTestCase):
"""Test the Trypsin class.
This set of tests check the DigestionEnzyme Trypsin.
"""
def setUp(self):
"""Sets up the group of tests.
It does this by providing a definition of the expected
digestion of the sequence provided in DigestionEnzymeTestCase,
which this TestCase extends.
"""
super(TrypsinTestCase, self).setUp()
self.expected_peptides = ["MAGK",
"K",
"GQK",
"K",
"SGLGNHGK",
"NSDMDVEDR",
"LQAVVLTDSYETR",
"FMPLTAVKPR",
"CLLPLANVPLIEYTLEFLAK",
"AGVHEVFLICSSHANQINDYIENSK",
"WNLPWSPFK",
"ITTIMSPEAR",
"CTGDVMR"]
self.observed_peptides = proteomics.enzyme.Trypsin.digest(self.sequence)
def test_trypsin_digestion(self):
"""Checks the digestion by the Trypsin DigestionEnzyme.
Checks that the function digest, produces the expected
output.
"""
self.assertEqual(len(self.observed_peptides),
len(self.expected_peptides),
'check the length of digested peptides')
self.assertEqual(self.observed_peptides,
self.expected_peptides,
'check all elements are equal')
# if this test is being run from the command line, generate the relevant suites,
# combine them together and then run them.
if __name__ == '__main__':
deSuite = unittest.TestLoader().loadTestsFromTestCase(DigestionEnzymeTestCase)
trypsinSuite = unittest.TestLoader().loadTestsFromTestCase(TrypsinTestCase)
suite = unittest.TestSuite([deSuite, trypsinSuite])
#suite = unittest.TestLoader().loadTestsFromModule('enzyme_test.py')
unittest.TextTestRunner(verbosity=2).run(suite)
|
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, UpdateModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from .permissions import IsStaffOrSelf
from .serializers import UserSerializer
User = get_user_model()
class UserViewSet(RetrieveModelMixin, ListModelMixin, UpdateModelMixin, GenericViewSet):
serializer_class = UserSerializer
permission_classes = [IsStaffOrSelf]
queryset = User.objects.all()
lookup_field = "username"
@action(detail=False, methods=["GET"])
def me(self, request):
serializer = UserSerializer(request.user, context={"request": request})
return Response(status=status.HTTP_200_OK, data=serializer.data)
@action(detail=True, methods=["POST"])
def groups(self, request, username=None):
user: User = self.get_object()
if user == request.user:
raise ValidationError(
detail={"detail": "You cannot change your assigned groups"}
)
if "group" not in request.data:
raise ValidationError(detail={"group": "This field is required"})
group_name = request.data["group"]
if group_name == "User":
user.groups.clear()
else:
group: Group = get_object_or_404(Group, name=group_name)
user.groups.clear()
group.user_set.add(user)
return Response(status=status.HTTP_202_ACCEPTED)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
# This module is part of the package MOM.DBW.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# MOM.DBW._DBS_
#
# Purpose
# Encapsulate db-specific functionality
#
# Revision Dates
# 23-Jun-2010 (CT) Creation
# 30-Nov-2010 (CT) `Fatal_Exceptions` added (here, an empty tuple)
# 14-Jun-2011 (MG) `url` add the `query` of the `db_url` to
# `scheme_auth` (to allow specification of the the mysql
# socket file)
# 15-Jun-2011 (MG) `url` fixed (only add `query` if it is not empty)
# 27-Apr-2012 (MG) `reserve_cid` added
# ««revision-date»»···
#--
from _MOM import MOM
from _TFL import TFL
from _TFL import sos
import _MOM._DBW._Manager_
import _TFL._Meta.Object
import _TFL.Url
class _M_DBS_ (TFL.Meta.Object.__class__) :
"""Meta class for DBS classes."""
def __init__ (cls, name, bases, dct) :
cls.__m_super.__init__ (name, bases, dct)
if not name.startswith ("_") :
MOM.DBW._Manager_.DBS_map [cls.scheme] = cls
# end def __init__
# end class _M_DBS_
class _DBS_ (TFL.Meta.Object, metaclass = _M_DBS_) :
"""Base class for DBS classes."""
Fatal_Exceptions = ()
@classmethod
def create_database (cls, db_url, manager) :
pass
# end def create_database
@classmethod
def delete_database (cls, db_url, manager) :
try :
sos.unlink (db_url.path)
except OSError :
pass
# end def delete_database
@classmethod
def reserve_cid (cls, connection, cid) :
pass
# end def reserve_cid
@classmethod
def reserve_pid (cls, connection, pid) :
pass
# end def reserve_pid
@classmethod
def Url (cls, value, ANS, default_path = None) :
result = TFL.Url (value, fs_path = True)
if not result.path and default_path is not None :
result = TFL.Url.new (result, path = default_path, fs_path = True)
result.scheme_auth = "://".join ((result.scheme, result.authority))
if result.query :
result.scheme_auth = "?".join ((result.scheme_auth, result.query))
result.create = False
return result
# end def Url
# end class _DBS_
if __name__ != "__main__" :
MOM.DBW._Export ("_DBS_")
### __END__ MOM.DBW._DBS_
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import matplotlib as mpl
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from ..cache import get_memory
from ..cache.cloudpickle_backend import register_backend as register_cloudpickle_backend
from ..data import *
from ..logging_config import enable_logging
from ..qstat import get_ncpus
from .analysis import *
from .cx1_fitting import CX1Fit
from .plotting import *
__all__ = ("rf_time_lag_grid_search",)
logger = logging.getLogger(__name__)
register_cloudpickle_backend()
memory = get_memory("time_lags", backend="cloudpickle", verbose=100)
# Creating the Data Structures used for Fitting
@memory.cache
def get_data(shift_months=None, selection_variables=None, masks=None):
target_variable = "GFED4 BA"
# Creation of new variables.
transformations = {
"Temp Range": lambda exog_data: (exog_data["Max Temp"] - exog_data["Min Temp"])
}
# Variables to be deleted after the aforementioned transformations.
deletions = ("Min Temp",)
# Variables required for the above.
required_variables = ["Max Temp", "Min Temp", target_variable]
# Carry out transformations, replacing old variables in the process.
# log_var_names = ["Temp Range", "Dry Day Period"]
# sqrt_var_names = [
# # "Lightning Climatology",
# "popd"
# ]
# Dataset selection.
# TODO: Make this selection process more elegant.
selection_datasets = [
AvitabileThurnerAGB(),
ERA5_Temperature(),
Copernicus_SWI(),
ERA5_CAPEPrecip(),
ESA_CCI_Landcover_PFT(),
GFEDv4(),
# GlobFluo_SIF(), # TODO: Fix regridding!!
HYDE(),
]
# These datasets will potentially be shifted.
datasets_to_shift = [ERA5_DryDayPeriod(), MOD15A2H_LAI_fPAR(), VODCA()]
selection_datasets += datasets_to_shift
if shift_months is not None:
for shift in shift_months:
for shift_dataset in datasets_to_shift:
selection_datasets.append(
shift_dataset.get_temporally_shifted_dataset(
months=-shift, deep=False
)
)
if selection_variables is None:
selection_variables = [
"AGB Tree",
"Max Temp",
"Min Temp",
"SWI(1)",
"CAPE x Precip",
"Dry Day Period",
"ShrubAll",
"TreeAll",
"pftCrop",
"pftHerb",
# "SIF", # TODO: Fix regridding!
"popd",
"FAPAR",
"LAI",
"VOD Ku-band",
]
if shift_months is not None:
for shift in shift_months:
selection_variables.extend(
[
f"LAI {-shift} Month",
f"FAPAR {-shift} Month",
f"Dry Day Period {-shift} Month",
f"VOD Ku-band {-shift} Month",
]
)
selection_variables = list(set(selection_variables).union(required_variables))
selection = Datasets(selection_datasets).select_variables(selection_variables)
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = data_processing(
selection,
which="climatology",
transformations=transformations,
deletions=deletions,
# log_var_names=log_var_names,
# sqrt_var_names=sqrt_var_names,
use_lat_mask=False,
use_fire_mask=False,
target_variable=target_variable,
masks=masks,
)
return (
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
)
def print_header(ncol=70, char="#", fill=""):
print(char * ncol)
name_str = np.array(list("#" * ncol))
n_fill = len(fill)
start_index = int((ncol / 2) - (n_fill / 2)) - 1
end_index = start_index + n_fill + 2
name_str[start_index:end_index] = list(f" {fill} ")
print("".join(name_str))
print(char * ncol)
def rf_time_lag_grid_search():
# Hyperparameter Optimisation Using CX1
for shift_months, data_name in zip(
(None, [1, 3, 6, 12, 24]), ("full_no_shift", "full_shift")
):
logger.info(f"RF with data: {data_name}.")
print_header(fill=data_name)
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = get_data(shift_months=shift_months)
# Define the training and test data.
X_train, X_test, y_train, y_test = train_test_split(
exog_data, endog_data, random_state=1, shuffle=True, test_size=0.3
)
# Define the parameter space.
parameters_RF = {
"n_estimators": [10, 50, 100],
"max_depth": [None, 10, 20],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [3, 10, 20],
"max_features": ["auto"],
"bootstrap": [False, True],
"random_state": [1],
}
fitting = CX1Fit(
X_train, y_train, data_name=data_name, param_grid=parameters_RF
)
fitting.run_job()
output = fitting.get_best_model(timeout=60 * 60)
if output:
logger.info("Output found")
regr = output["model"]
regr.n_jobs = get_ncpus()
print("RF n_jobs:", regr.n_jobs)
print(regr)
regr.predict(X_test)
# Carry out predictions on the training dataset to diagnose overfitting.
regr.predict(X_train)
results = {}
results["R2_train"] = regr.score(X_train, y_train)
results["R2_test"] = regr.score(X_test, y_test)
model_name = "RF"
print(f"{model_name} R2 train: {results['R2_train']}")
print(f"{model_name} R2 test: {results['R2_test']}")
importances = regr.feature_importances_
std = np.std(
[tree.feature_importances_ for tree in regr.estimators_], axis=0
)
importances_df = pd.DataFrame(
{
"Name": exog_data.columns.values,
"Importance": importances,
"Importance STD": std,
"Ratio": np.array(std) / np.array(importances),
}
)
print(
"\n"
+ str(
importances_df.sort_values("Importance", ascending=False).to_string(
index=False, float_format="{:0.3f}".format, line_width=200
)
)
)
print("VIFs")
print_vifs(exog_data)
with FigureSaver(
[f"pdp_{data_name}_{feature}" for feature in X_test.columns]
):
fig_axes = partial_dependence_plot(
regr,
X_test,
X_test.columns,
n_cols=4,
grid_resolution=70,
coverage=0.05,
predicted_name="burned area",
single_plots=True,
)
plt.subplots_adjust(wspace=0.16)
else:
logger.info("No output found")
if __name__ == "__main__":
enable_logging()
warnings.filterwarnings("ignore", ".*Collapsing a non-contiguous coordinate.*")
warnings.filterwarnings("ignore", ".*DEFAULT_SPHERICAL_EARTH_RADIUS*")
warnings.filterwarnings("ignore", ".*guessing contiguous bounds*")
normal_coast_linewidth = 0.5
mpl.rc("figure", figsize=(14, 6))
mpl.rc("font", size=9.0)
np.random.seed(1)
shift_months = [1, 3, 6, 12, 24]
selection_variables = (
"VOD Ku-band -3 Month",
"SIF",
"VOD Ku-band -1 Month",
"Dry Day Period -3 Month",
"FAPAR",
"pftHerb",
"LAI -1 Month",
"popd",
"Dry Day Period -24 Month",
"pftCrop",
"FAPAR -1 Month",
"FAPAR -24 Month",
"Max Temp",
"Dry Day Period -6 Month",
"VOD Ku-band -6 Month",
# Extra 5 split.
# "Dry Day Period -1 Month",
# "FAPAR -6 Month",
# "ShrubAll",
# "SWI(1)",
# "TreeAll",
)
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = get_data(shift_months=shift_months, selection_variables=selection_variables)
n_vars = len(exog_data.columns)
data_name = f"clim_{n_vars}{'_shifted' if shift_months is not None else ''}"
FigureSaver.directory = os.path.expanduser(
os.path.join("~", "tmp", "time_lags", data_name)
)
os.makedirs(FigureSaver.directory, exist_ok=True)
FigureSaver.debug = True
# Define the training and test data.
X_train, X_test, y_train, y_test = train_test_split(
exog_data, endog_data, random_state=1, shuffle=True, test_size=0.3
)
# Define the parameter space.
parameters_RF = {
"n_estimators": 100,
"max_depth": None,
"min_samples_split": 2,
"min_samples_leaf": 3,
"max_features": "auto",
"bootstrap": True,
"random_state": 1,
}
regr = RandomForestRegressor(**parameters_RF, n_jobs=get_ncpus())
# Refit the model on all the data and store this as well.
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
# Carry out predictions on the training dataset to diagnose overfitting.
y_pred_train = regr.predict(X_train)
print(regr)
results = {}
results["R2_train"] = regr.score(X_train, y_train)
results["R2_test"] = regr.score(X_test, y_test)
model_name = "RF"
print(f"{model_name} R2 train: {results['R2_train']}")
print(f"{model_name} R2 test: {results['R2_test']}")
importances = regr.feature_importances_
std = np.std([tree.feature_importances_ for tree in regr.estimators_], axis=0)
importances_df = pd.DataFrame(
{
"Name": exog_data.columns.values,
"Importance": importances,
"Importance STD": std,
"Ratio": np.array(std) / np.array(importances),
}
)
print(
"\n"
+ str(
importances_df.sort_values("Importance", ascending=False).to_string(
index=False, float_format="{:0.3f}".format, line_width=200
)
)
)
print("VIFs")
print_vifs(exog_data)
with FigureSaver([f"pdp_{data_name}_{feature}" for feature in X_test.columns]):
fig_axes = partial_dependence_plot(
regr,
X_test,
X_test.columns,
n_cols=4,
grid_resolution=50,
coverage=0.6,
predicted_name="burned area",
single_plots=True,
log_x_scale=("Dry Day Period", "popd"),
X_train=X_train,
plot_range=False,
)
plt.subplots_adjust(wspace=0.16)
|
# @lint-ignore-every PYTHON3COMPATIMPORTS
from .linear_relu import LinearReLU
from .conv_relu import ConvReLU2d, ConvReLU3d
__all__ = [
'LinearReLU',
'ConvReLU2d',
'ConvReLU3d',
]
|
from time import sleep
import dbus
class Spotify:
def __init__(self):
try:
self.spotify_bus = dbus.SessionBus().get_object("org.mpris.MediaPlayer2.spotify",
"/org/mpris/MediaPlayer2")
except:
self.isSpotifyPlaying = False
else:
self.isSpotifyPlaying = True
if self.isSpotifyPlaying:
self.spotify_properties = dbus.Interface(self.spotify_bus,"org.freedesktop.DBus.Properties")
self.spotify_player = dbus.Interface(self.spotify_bus, "org.mpris.MediaPlayer2.Player")
self.metadata = self.spotify_properties.Get("org.mpris.MediaPlayer2.Player", "Metadata")
self.currentSong = self.to_song()
else:
self.currentSong="Spotify is not playing anything"
def update_song(self):
if self.isSpotifyPlaying:
self.metadata = self.spotify_properties.Get("org.mpris.MediaPlayer2.Player", "Metadata")
self.currentSong = self.to_song()
return self
else:
self.currentSong="Spotify is not playing anything"
def to_song(self):
if self.isSpotifyPlaying:
album = str(self.metadata["xesam:album"])
artist = str(self.metadata["xesam:artist"][0].strip())
title = str(self.metadata["xesam:title"])
spotifyURL = str(self.metadata["xesam:url"])
return Song(title, artist, spotifyURL, album)
def PlayPause(self):
if self.isSpotifyPlaying:
self.spotify_player.PlayPause()
def Next(self):
if self.isSpotifyPlaying:
self.spotify_player.Next()
sleep(0.5)
self.update_song()
def Previous(self):
if self.isSpotifyPlaying:
self.spotify_player.Previous()
sleep(0.5)
self.update_song()
class Song:
def __init__(self, title, artist, spotifyURL="", album=""):
self.title = title
self.artist = artist
self.album = album
self.spotifyURL = spotifyURL
def __str__(self):
return f"""\
Album: {self.album}
Artist: {self.artist}
Title: {self.title}
Spotify URL: {self.spotifyURL}"""
def __repr__(self):
return "repr: \n" + self.__str__()
if __name__ == "__main__":
print(Spotify().currentSong)
|
# encoding=utf-8
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import lightgbm as lgb
import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
start_time=time.time()
print("Starting job at time:", time.time())
debug = True
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates=["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
train_df = shuffle(train_df, random_state=1234);
train_df = train_df.iloc[:100000]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=1000, parse_dates=["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", nrows=1000, usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", nrows=1000, usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", nrows=1000, parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", nrows=1000, parse_dates=["date_from", "date_to"])
print("loading data done!")
# =============================================================================
# Add image quality: by steeve
# =============================================================================
import pickle
with open('../input/inception_v3_include_head_max_train.p', 'rb') as f:
x = pickle.load(f)
train_features = x['features']
train_ids = x['ids']
with open('../input/inception_v3_include_head_max_test.p', 'rb') as f:
x = pickle.load(f)
test_features = x['features']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_features, columns=['image_quality'])
incep_test_image_df = pd.DataFrame(test_features, columns=[f'image_quality'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_blurinesses, columns=['blurinesses'])
incep_test_image_df = pd.DataFrame(test_blurinesses, columns=[f'blurinesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding whitenesses ...')
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_whitenesses = x['whitenesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_whitenesses = x['whitenesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_whitenesses, columns=['whitenesses'])
incep_test_image_df = pd.DataFrame(test_whitenesses, columns=[f'whitenesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding dullnesses ...')
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_dullnesses = x['dullnesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_dullnesses = x['dullnesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_dullnesses, columns=['dullnesses'])
incep_test_image_df = pd.DataFrame(test_dullnesses, columns=[f'dullnesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
# =============================================================================
# add geo info: https://www.kaggle.com/frankherfert/avito-russian-region-cities/data
# =============================================================================
# tmp = pd.read_csv("../input/avito_region_city_features.csv", usecols=["region", "city", "latitude","longitude"])
# train_df = train_df.merge(tmp, on=["city","region"], how="left")
# train_df["lat_long"] = train_df["latitude"]+train_df["longitude"]
# test_df = test_df.merge(tmp, on=["city","region"], how="left")
# test_df["lat_long"] = test_df["latitude"]+test_df["longitude"]
# del tmp; gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/region_income.csv", sep=";", names=["region", "income"])
train_df = train_df.merge(tmp, on="region", how="left")
test_df = test_df.merge(tmp, on="region", how="left")
del tmp;
gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/city_population_wiki_v3.csv")
train_df = train_df.merge(tmp, on="city", how="left")
test_df = test_df.merge(tmp, on="city", how="left")
del tmp;
gc.collect()
# =============================================================================
# Here Based on https://www.kaggle.com/bminixhofer/aggregated-features-lightgbm/code
# =============================================================================
all_samples = pd.concat([train_df, train_active, test_df, test_active]).reset_index(drop=True)
all_samples.drop_duplicates(["item_id"], inplace=True)
del train_active, test_active;
gc.collect()
all_periods = pd.concat([train_periods, test_periods])
del train_periods, test_periods;
gc.collect()
all_periods["days_up"] = (all_periods["date_to"] - all_periods["date_from"]).dt.days
gp = all_periods.groupby(["item_id"])[["days_up"]]
gp_df = pd.DataFrame()
gp_df["days_up_sum"] = gp.sum()["days_up"]
gp_df["times_put_up"] = gp.count()["days_up"]
gp_df.reset_index(inplace=True)
gp_df.rename(index=str, columns={"index": "item_id"})
all_periods.drop_duplicates(["item_id"], inplace=True)
all_periods = all_periods.merge(gp_df, on="item_id", how="left")
all_periods = all_periods.merge(all_samples, on="item_id", how="left")
gp = all_periods.groupby(["user_id"])[["days_up_sum", "times_put_up"]].mean().reset_index() \
.rename(index=str, columns={"days_up_sum": "avg_days_up_user",
"times_put_up": "avg_times_up_user"})
n_user_items = all_samples.groupby(["user_id"])[["item_id"]].count().reset_index() \
.rename(index=str, columns={"item_id": "n_user_items"})
gp = gp.merge(n_user_items, on="user_id", how="outer") # left
del all_samples, all_periods, n_user_items
gc.collect()
train_df = train_df.merge(gp, on="user_id", how="left")
test_df = test_df.merge(gp, on="user_id", how="left")
agg_cols = list(gp.columns)[1:]
del gp;
gc.collect()
for col in agg_cols:
train_df[col].fillna(-1, inplace=True)
test_df[col].fillna(-1, inplace=True)
print("merging supplimentary data done!")
# =============================================================================
# done! go to the normal steps
# =============================================================================
def rmse(predictions, targets):
print("calculating RMSE ...")
return np.sqrt(((predictions - targets) ** 2).mean())
def read_stopwords():
with open('RussianStopWords.txt',encoding='utf-8') as fin:
words=[]
for line in fin:
words.append(line.strip())
return set(words)
def text_preprocessing(text):
text = str(text)
text = text.lower()
text = re.sub(r"(\\u[0-9A-Fa-f]+)", r"", text)
text = re.sub(r"===", r" ", text)
# https://www.kaggle.com/demery/lightgbm-with-ridge-feature/code
text = " ".join(map(str.strip, re.split('(\d+)', text)))
regex = re.compile(u'[^[:alpha:]]')
text = regex.sub(" ", text)
text = " ".join(text.split())
return text
def text_preprocessing_v2(text):
'''
新的预处理函数
:param text:
:return:
'''
text = str(text)
text = text.lower()
text = re.sub(r'\\xa0', ' ', text)
text = re.sub(r'\●', ' ', text)
text = re.sub(r'\😎', ' ', text)
text = re.sub(r'\👍', ' ', text)
text = re.sub(r'\»', ' ', text)
text = re.sub(r'\«', ' ', text)
text = re.sub(r'\↓', ' ', text)
text = re.sub(r'iphone', ' iphone ', text)
text = re.sub(r'samsung', ' samsung ', text)
text = re.sub(r'apple', ' apple ', text)
text = re.sub(r'dell', ' dell ', text)
text = re.sub(r'seilmann', ' steilmann ', text)
text = re.sub(r'multipad', ' multipad ', text)
text = re.sub(r'triple', ' triple ', text)
text = re.sub(r'philip', ' philip ', text)
#以上0.0001的改进
# 再次改进0.0001
text = re.sub(r'ipod', ' ipod ', text)
# text = re.sub(r'ip4200', ' canon4200 ', text)
# text = re.sub(r'ip4300', ' canon4300 ', text)
# text = re.sub(r'ip4500', ' canon4500 ', text)
# text = re.sub(r'mp500', ' canon500 ', text)
# text = re.sub(r'mp530', ' canon530 ', text)
# text = re.sub(r'mp610', ' canon610 ', text)
# #以上没有影响
# #
#
# text = re.sub(r'hamburg', ' hamburg ', text)
# text = re.sub(r'lumia', ' lumia ', text)
# text = re.sub(r'seagate', ' seagate ', text)
#
# text = re.sub(r'512mb', ' 512mb ', text)
# text = re.sub(r'128mb', ' 128mb ', text)
# text = re.sub(r'256mb', ' 256mb ', text)
# text = re.sub(r'16gb', ' 16gb ', text)
# text = re.sub(r'32gb', ' 32gb ', text)
# text = re.sub(r'64gb', ' 64gb ', text)
# text = re.sub(r'500gb', ' 500gb ', text)
# text = re.sub(r'260gb', ' 260gb ', text)
# text = re.sub(r'250gb', ' 250gb ', text)
# text = re.sub(r'320gb', ' 320gb ', text)
# text = re.sub(r'1000gb', ' 1000gb ', text)
# text = re.sub(r'20gb', ' 20gb ', text)
#
# text = re.sub(r'\®', ' ', text)
# text = re.sub(r'intel', ' intel ', text)
#
# text = re.sub(r'canon', ' canon ', text)
# text = re.sub(r'adidas', ' adidas ', text)
# text = re.sub(r'gucci', ' gucci ', text)
# #没有什么改进,不变
# text = re.sub(r'\\u200b', ' ', text)
# text = re.sub(r'\\u200d', ' ', text)
# text = re.sub(r'\квартира', ' \квартира ', text)
# text = re.sub(r'nokia', ' nokia ', text)
# text = re.sub(r'sony', ' sony ', text)
# text = re.sub(r'xiaomi', ' xiaomi ', text)
text = re.sub(r'asusintel', ' asus intel ', text)
text = re.sub(r'00asus', ' asus ', text)
text = re.sub(r'chevrolet', ' chevrolet ', text)
text = re.sub(r'nikenike', ' nike ', text)
#panasoni,0.236955
text = re.sub(r'\™', ' ', text)
# text = re.sub(r'panasoni', ' panasonic ', text)
#mean rmse is: 0.2369177999350502
text = re.sub(r'compac', ' compac ', text)
# text = re.sub(r'tomy', ' tomy ', text)
# text = re.sub(r'✔', ' ', text)
# text = re.sub(r'👌', ' ', text)
# text = re.sub(r'💰', ' ', text)
# text = re.sub(r'❤', ' ', text)
# text = re.sub(r'htc', ' htc ', text)
#
# text = re.sub(r'playstation', ' playstation ', text)
#
# text = re.sub(r'huawei', ' huawei ', text)
#
# text = re.sub(r'motorola', ' motorola ', text)
# text = re.sub(r'meizu', ' meizu ', text)
# text = re.sub(r'nikon', ' nikon ', text)
#
# #
# text = re.sub(r'toshiba', ' toshiba ', text)
text = re.sub(r'gtx', ' gtx ', text)
text = re.sub(r"(\\u[0-9A-Fa-f]+)",r"", text)
text = re.sub(r"===",r" ", text)
# https://www.kaggle.com/demery/lightgbm-with-ridge-feature/code
text = " ".join(map(str.strip, re.split('(\d+)',text)))
regex = re.compile(u'[^[:alpha:]]')
text = regex.sub(" ", text)
text = " ".join(text.split())
return text
def split_rus(text):
tmp=[]
# isEn=False
is_before_en=False
for i,w in enumerate(text):
# print(w)
if ord(w)<256:#如果是英语,
# isEn=True
if not is_before_en and i>1:#如果前面字符不是英语,就用空格分隔
# print('1分隔...')
tmp.append(' ')
is_before_en = True
else:
is_before_en=True
else:#如果不是英语
if is_before_en:
# print('2分隔...')
tmp.append(' ')
is_before_en = False
tmp.append(w)
return ''.join(tmp)
@contextmanager
def feature_engineering(df):
# All the feature engineering here
def Do_Text_Hash(df):
print("feature engineering -> hash text ...")
df["text_feature"] = df.apply(lambda row: " ".join([str(row["param_1"]),
str(row["param_2"]), str(row["param_3"])]), axis=1)
df["text_feature_2"] = df.apply(lambda row: " ".join([str(row["param_2"]), str(row["param_3"])]), axis=1)
# df["title_description"] = df.apply(lambda row: " ".join([str(row["title"]), str(row["description"])]), axis=1)
print("feature engineering -> preprocess text ...")
df["text_feature"] = df["text_feature"].apply(lambda x: text_preprocessing(x))
df["text_feature_2"] = df["text_feature_2"].apply(lambda x: text_preprocessing(x))
df["description"] = df["description"].apply(lambda x: text_preprocessing(x))
df["title"] = df["title"].apply(lambda x: text_preprocessing(x))
# df["title_description"] = df["title_description"].apply(lambda x: text_preprocessing_v2(x))
# new feature
# df["description"] = df["description"].apply(lambda x: split_rus(x))
# df["title"] = df["title"].apply(lambda x: split_rus(x))
def Do_Datetime(df):
print("feature engineering -> date time ...")
df["wday"] = df["activation_date"].dt.weekday
df["wday"] = df["wday"].astype(np.uint8)
def Do_Label_Enc(df):
print("feature engineering -> lable encoding ...")
lbl = LabelEncoder()
cat_col = ["user_id", "region", "city", "parent_category_name",
"category_name", "user_type", "image_top_1",
"param_1", "param_2", "param_3", "image",
]
for col in cat_col:
df[col] = lbl.fit_transform(df[col].astype(str))
gc.collect()
import string
count = lambda l1, l2: sum([1 for x in l1 if x in l2])
def Do_NA(df):
print("feature engineering -> fill na ...")
df["image_top_1"].fillna(-1, inplace=True)
df["image"].fillna("noinformation", inplace=True)
df["param_1"].fillna("nicapotato", inplace=True)
df["param_2"].fillna("nicapotato", inplace=True)
df["param_3"].fillna("nicapotato", inplace=True)
df["title"].fillna("nicapotato", inplace=True)
df["description"].fillna("nicapotato", inplace=True)
# price vs income
# df["price_vs_city_income"] = df["price"] / df["income"]
# df["price_vs_city_income"].fillna(-1, inplace=True)
def Do_Count(df):
print("feature engineering -> do count ...")
# some count
df["num_desc_punct"] = df["description"].apply(lambda x: count(x, set(string.punctuation))).astype(np.uint16)
df["num_desc_capE"] = df["description"].apply(lambda x: count(x, "[A-Z]")).astype(np.uint16)
df["num_desc_capP"] = df["description"].apply(lambda x: count(x, "[А-Я]")).astype(np.uint16)
df["num_title_punct"] = df["title"].apply(lambda x: count(x, set(string.punctuation))).astype(np.uint16)
df["num_title_capE"] = df["title"].apply(lambda x: count(x, "[A-Z]")).astype(np.uint16)
df["num_title_capP"] = df["title"].apply(lambda x: count(x, "[А-Я]")).astype(np.uint16)
# good, used, bad ... count
df["is_in_desc_хорошо"] = df["description"].str.contains("хорошо").map({True: 1, False: 0}).astype(np.uint8)
df["is_in_desc_Плохо"] = df["description"].str.contains("Плохо").map({True: 1, False: 0}).astype(np.uint8)
df["is_in_desc_новый"] = df["description"].str.contains("новый").map({True: 1, False: 0}).astype(np.uint8)
df["is_in_desc_старый"] = df["description"].str.contains("старый").map({True: 1, False: 0}).astype(np.uint8)
df["is_in_desc_используемый"] = df["description"].str.contains("используемый").map({True: 1, False: 0}).astype(
np.uint8)
df["is_in_desc_есплатная_доставка"] = df["description"].str.contains("есплатная доставка").map(
{True: 1, False: 0}).astype(np.uint8)
df["is_in_desc_есплатный_возврат"] = df["description"].str.contains("есплатный возврат").map(
{True: 1, False: 0}).astype(np.uint8)
df["is_in_desc_идеально"] = df["description"].str.contains("идеально").map({True: 1, False: 0}).astype(np.uint8)
df["is_in_desc_подержанный"] = df["description"].str.contains("подержанный").map({True: 1, False: 0}).astype(
np.uint8)
df["is_in_desc_пСниженные_цены"] = df["description"].str.contains("Сниженные цены").map(
{True: 1, False: 0}).astype(np.uint8)
#new features
df["is_in_desc_iphone"] = df["title"].str.contains("iphone").map({True: 1, False: 0}).astype(np.uint8)
df["is_in_desc_ipod"] = df["title"].str.contains("ipod").map({True: 1, False: 0}).astype(np.uint8)
df["is_in_desc_samsung"] = df["title"].str.contains("samsung").map({True: 1, False: 0}).astype(np.uint8)
#new again
# df["is_in_desc_philip"] = df["title"].str.contains("philip").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_canon"] = df["title"].str.contains("canon").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_gtx"] = df["title"].str.contains("gtx").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_gucci"] = df["title"].str.contains("gucci").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_adidas"] = df["title"].str.contains("adidas").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_panasonic"] = df["title"].str.contains("panasonic").map({True: 1, False: 0}).astype(np.uint8)
#加这块有效
# df["is_in_desc_intel"] = df["title"].str.contains("intel").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_nokia"] = df["title"].str.contains("nokia").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_sony"] = df["title"].str.contains("sony").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_xiaomi"] = df["title"].str.contains("xiaomi").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_asus"] = df["title"].str.contains("asus").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_playstation"] = df["title"].str.contains("playstation").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_nokia"] = df["title"].str.contains("nokia").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_motorola"] = df["title"].str.contains("motorola").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_meizu"] = df["title"].str.contains("meizu").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_nikon"] = df["title"].str.contains("nikon").map({True: 1, False: 0}).astype(np.uint8)
# df["is_in_desc_toshiba"] = df["title"].str.contains("toshiba").map({True: 1, False: 0}).astype(np.uint8)
df["num_title_Exclamation"] = df["title"].apply(lambda x: count(x, "!")).astype(np.int16)
df["num_title_Question"] = df["title"].apply(lambda x: count(x, "?")).astype(np.int16)
df["num_desc_Exclamation"] = df["description"].apply(lambda x: count(x, "!")).astype(np.int16)
df["num_desc_Question"] = df["description"].apply(lambda x: count(x, "?")).astype(np.int16)
def Do_Drop(df):
df.drop(["activation_date", "item_id"], axis=1, inplace=True)
def Do_Stat_Text(df):
print("feature engineering -> statistics in text ...")
textfeats = ["text_feature", "text_feature_2", "description", "title"]
for col in textfeats:
df[col + "_num_chars"] = df[col].apply(len)
df[col + "_num_words"] = df[col].apply(lambda comment: len(comment.split()))
df[col + "_num_unique_words"] = df[col].apply(lambda comment: len(set(w for w in comment.split())))
df[col + "_words_vs_unique"] = df[col + "_num_unique_words"] / df[col + "_num_words"] * 100
gc.collect()
# choose which functions to run
Do_NA(df)
Do_Text_Hash(df)
Do_Label_Enc(df)
Do_Count(df)
Do_Datetime(df)
Do_Stat_Text(df)
Do_Drop(df)
gc.collect()
return df
def data_vectorize(df):
russian_stop1 = set(stopwords.words("russian"))
russian_stop2 = read_stopwords()
russian_stop=list(set(russian_stop1).intersection(set(russian_stop2)))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": "word",
"token_pattern": r"\w{1,}",
"sublinear_tf": True,
"dtype": np.float32,
"norm": "l2",
# "min_df":5,
# "max_df":.9,
"smooth_idf": False
}
tfidf_para2 = {
"stop_words": russian_stop,
"analyzer": "char",
"token_pattern": r"\w{1,}",
"sublinear_tf": True,
"dtype": np.float32,
"norm": "l2",
# "min_df":5,
# "max_df":.9,
"smooth_idf": False
}
def get_col(col_name): return lambda x: x[col_name]
vectorizer = FeatureUnion([
("description", TfidfVectorizer(
ngram_range=(1, 2),
max_features=40000, # 40000,18000
**tfidf_para,
preprocessor=get_col("description"))
),
# ("title_description", TfidfVectorizer(
# ngram_range=(1, 2),#(1,2)
# max_features=1800,#40000,18000
# **tfidf_para,
# preprocessor=get_col("title_description"))
# ),
("text_feature", CountVectorizer(
ngram_range=(1, 2),
preprocessor=get_col("text_feature"))
),
("title", TfidfVectorizer(
ngram_range=(1, 2),
**tfidf_para,
preprocessor=get_col("title"))
),
# 新加入两个文本处理title2,title_char
("title2", TfidfVectorizer(
ngram_range=(1, 1),
**tfidf_para,
preprocessor=get_col("title"))
),
#增加了1倍的运行时间
# ("title_char", TfidfVectorizer(
#
# ngram_range=(1, 4), # (1, 4),(1,6)
# max_features=16000, # 16000
# **tfidf_para2,
# preprocessor=get_col("title"))
# ),
# # 新加2018-6-3,速度很慢
# ("description_feature", CountVectorizer(
# ngram_range=(1, 2),
# stop_words= russian_stop,
# max_features=8000,
# preprocessor=get_col("description"))
# ),
])
vectorizer.fit(df.to_dict("records"))
ready_full_df = vectorizer.transform(df.to_dict("records"))
tfvocab = vectorizer.get_feature_names()
df.drop(["text_feature", "text_feature_2", "description", "title",
# "title_description"
], axis=1, inplace=True)
df.fillna(-1, inplace=True)
return df, ready_full_df, tfvocab
# =============================================================================
# Ridge feature https://www.kaggle.com/demery/lightgbm-with-ridge-feature/code
# =============================================================================
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool=True):
if (seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
NFOLDS = 10 # 5
SEED = 42
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((len_train,))
oof_test = np.zeros((len_test,))
oof_test_skf = np.empty((NFOLDS, len_test))
for i, (train_index, test_index) in enumerate(kf):
# print('Ridege oof Fold {}'.format(i))
x_tr = x_train[train_index]
y = np.array(y)
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
full_df = pd.concat([train_df, test_df])
sub_item_id = test_df["item_id"]
len_train = len(train_df)
len_test = len(test_df)
kf = KFold(len_train, n_folds=NFOLDS, shuffle=True, random_state=SEED)
# =============================================================================
# handle price
# =============================================================================
def feature_Eng_On_Price_SEQ(df):
print('feature engineering -> on price and SEQ ...')
df["price"] = np.log(df["price"] + 0.001).astype("float32")
df["price"].fillna(-1, inplace=True)
df["price+"] = np.round(df["price"] * 2.8).astype(np.int16) # 4.8
df["item_seq_number+"] = np.round(df["item_seq_number"] / 100).astype(np.int16)
return df
train_df, val_df = train_test_split(
full_df.iloc[:len_train], test_size=0.1, random_state=42) # 23
def feature_Eng_On_Deal_Prob(df, df_train):
print('feature engineering -> on price deal prob +...')
df2 = df
# tmp = df_train.groupby(["price+"], as_index=False)['deal_probability'].median().rename(columns={'deal_probability':'median_deal_probability_price+'})
# df = pd.merge(df, tmp, how='left', on=["price+"])
# df2['median_deal_probability_price+'] = df['median_deal_probability_price+']
# df2['median_deal_probability_price+'] =df2['median_deal_probability_price+'].astype(np.float32)
# del tmp; gc.collect()
#
# tmp = df_train.groupby(["item_seq_number+"], as_index=False)['deal_probability'].median().rename(columns={'deal_probability':'median_deal_probability_item_seq_number+'})
# df = pd.merge(df, tmp, how='left', on=["item_seq_number+"])
# df2['median_deal_probability_item_seq_number+'] = df['median_deal_probability_item_seq_number+']
# df2['median_deal_probability_item_seq_number+'] =df2['median_deal_probability_item_seq_number+'].astype(np.float32)
# tmp = df.groupby(["image_top_1"], as_index=False)['price'].median().rename(columns={'price':'median_price_image_top_1'})
# df = pd.merge(df, tmp, how='left', on=["image_top_1"])
# df2['median_price_image_top_1'] = df['median_price_image_top_1']
# df2['median_price_image_top_1'] = df2['median_price_image_top_1'].astype(np.float32)
# df2['median_price_image_top_1'] = df2['median_price_image_top_1']
# df2.fillna(-1, inplace=True)
# del tmp; gc.collect()
return df2
del full_df['deal_probability'];
gc.collect()
# =============================================================================
# use additianl image data
# =============================================================================
feature_engineering(full_df)
feature_Eng_On_Price_SEQ(full_df)
feature_Eng_On_Price_SEQ(train_df)
# 不考虑使用均值
# feature_Eng_On_Deal_Prob(full_df, train_df)
del train_df, test_df;
gc.collect()
full_df, ready_full_df, tfvocab = data_vectorize(full_df)
# 'alpha':20.0
ridge_params = {'alpha': 20.0, 'fit_intercept': True, 'normalize': False, 'copy_X': True,
'max_iter': None, 'tol': 0.001, 'solver': 'auto', 'random_state': SEED}
ridge = SklearnWrapper(clf=Ridge, seed=SEED, params=ridge_params)
ready_df = ready_full_df
print('ridge 1 oof ...')
ridge_oof_train, ridge_oof_test = get_oof(ridge, np.array(full_df)[:len_train], y, np.array(full_df)[len_train:])
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
full_df['ridge_preds_1'] = ridge_preds
full_df['ridge_preds_1'].clip(0.0, 1.0, inplace=True)
print('ridge 2 oof ...')
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:len_train], y, ready_df[len_train:])
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
full_df['ridge_preds_2'] = ridge_preds
full_df['ridge_preds_2'].clip(0.0, 1.0, inplace=True)
del ridge_oof_train, ridge_oof_test, ridge_preds, ridge, ready_df
gc.collect()
print("Modeling Stage ...")
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(full_df.iloc[:len_train]), ready_full_df[:len_train]]) # Sparse Matrix
tfvocab = full_df.columns.tolist() + tfvocab
X_test_full = full_df.iloc[len_train:]
X_test_ready = ready_full_df[len_train:]
del ready_full_df, full_df
gc.collect()
print("Feature Names Length: ", len(tfvocab))
cat_col = [
"user_id",
"region",
"city",
"parent_category_name",
"category_name",
"user_type",
"image_top_1",
"param_1",
"param_2",
"param_3",
"price+",
"item_seq_number+",
]
from sklearn.model_selection import KFold
kf = KFold(n_splits=10, random_state=42, shuffle=True)
numIter = 0
rmse_sume = 0.
numLimit = 5
for train_index, valid_index in kf.split(y):
numIter += 1
if numIter >= numLimit + 1:
pass
else:
print("Modeling Stage ...")
X_train, X_valid = X.tocsr()[train_index], X.tocsr()[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
gc.collect()
lgbm_params = {
"tree_method": "feature",
"num_threads": 7,
"task": "train",
"boosting_type": "gbdt",
"objective": "regression",
"metric": "rmse",
# "max_depth": 15,
"num_leaves": 500, # 280,360,500,32
"feature_fraction": 0.2, # 0.4
"bagging_fraction": 0.2, # 0.4
"learning_rate": 0.015, # 0.015
"verbose": -1,
'lambda_l1': 1,
'lambda_l2': 1,
"max_bin": 200,
}
lgtrain = lgb.Dataset(X_train, y_train,
feature_name=tfvocab,
categorical_feature=cat_col)
lgvalid = lgb.Dataset(X_valid, y_valid,
feature_name=tfvocab,
categorical_feature=cat_col)
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=32000,
valid_sets=[lgtrain, lgvalid],
valid_names=["train", "valid"],
early_stopping_rounds=200,
verbose_eval=100, # 200
)
print("save model ...")
joblib.dump(lgb_clf, "lgb_{}.pkl".format(numIter))
## load model
# lgb_clf = joblib.load("lgb.pkl")
print("Model Evaluation Stage")
print("RMSE:", rmse(y_valid, lgb_clf.predict(X_valid, num_iteration=lgb_clf.best_iteration)))
test = hstack([csr_matrix(X_test_full), X_test_ready]) # Sparse Matrix
lgpred = lgb_clf.predict(test, num_iteration=lgb_clf.best_iteration)
lgsub = pd.DataFrame(lgpred, columns=["deal_probability"], index=sub_item_id)
lgsub["deal_probability"].clip(0.0, 1.0, inplace=True) # Between 0 and 1
lgsub.to_csv("ml_lgb_sub_{}.csv".format(numIter), index=True, header=True)
rmse_sume += rmse(y_valid, lgb_clf.predict(X_valid, num_iteration=lgb_clf.best_iteration))
del X_train, X_valid, y_train, y_valid, lgtrain, lgvalid
gc.collect()
print("mean rmse is:", rmse_sume / numLimit)
print("Features importance...")
bst = lgb_clf
gain = bst.feature_importance("gain")
ft = pd.DataFrame({"feature": bst.feature_name(), "split": bst.feature_importance("split"),
"gain": 100 * gain / gain.sum()}).sort_values("gain", ascending=False)
print(ft.head(50))
#
# plt.figure()
# ft[["feature","gain"]].head(50).plot(kind="barh", x="feature", y="gain", legend=False, figsize=(10, 20))
# plt.gcf().savefig("features_importance.png")
print("time costs:{}".format(time.time()-start_time))
print("All Done.")
"""
10w mengfei的特征
calculating RMSE ...
mean rmse is: 0.227470908737
calculating RMSE ...
mean rmse is: 0.227452125002
10w 仅仅统计iPhone等3个特征
calculating RMSE ...
mean rmse is: 0.227380797381
停用词的并集
calculating RMSE ...
mean rmse is: 0.227500445884
停用词的交集
calculating RMSE ...
mean rmse is: 0.227282540687
1w去掉char功能
calculating RMSE ...
mean rmse is: 0.2358583443750936
time costs:206.2117302417755
1w 不加新特征
calculating RMSE ...
mean rmse is: 0.235880369348843
mean rmse is: 0.2358012922087179
1w 新特征 5折
calculating RMSE ...
mean rmse is: 0.23640032088836488
calculating RMSE ...
mean rmse is: 0.236380033591672
calculating RMSE ...
mean rmse is: 0.23623534673154473
5w 不要新特征
calculating RMSE ...
mean rmse is: 0.23150961548765264
5w 要新特征
calculating RMSE ...
mean rmse is: 0.23133951850101137
10w 原来 5折平均
calculating RMSE ...
mean rmse is: 0.2275115776789228
改进后 5折平均
calculating RMSE ...
mean rmse is: 0.22747084242905377
mean rmse is: 0.22719179805910664
time costs:4384.6224999427795
calculating RMSE ...
mean rmse is: 0.22723049318423594
time costs:4685.339641332626
calculating RMSE ...
mean rmse is: 0.22724368413992296
time costs:3650.90975856781
10w
Training until validation scores don't improve for 200 rounds.
[100] train's rmse: 0.217028 valid's rmse: 0.234943
[200] train's rmse: 0.192643 valid's rmse: 0.233402
[300] train's rmse: 0.173163 valid's rmse: 0.233263
[400] train's rmse: 0.157665 valid's rmse: 0.234255
Early stopping, best iteration is:
[239] train's rmse: 0.18445 valid's rmse: 0.23315
save model ...
Model Evaluation Stage
calculating RMSE ...
RMSE: 0.23315031644628909
/home/deepcam/anaconda2/envs/py36/lib/python3.6/site-packages/lightgbm/basic.py:447: UserWarning: Converting data to scipy sparse matrix.
warnings.warn('Converting data to scipy sparse matrix.')
calculating RMSE ...
Modeling Stage ...
[LightGBM] [Warning] Unknown parameter: tree_method
Training until validation scores don't improve for 200 rounds.
[100] train's rmse: 0.215289 valid's rmse: 0.247115
[200] train's rmse: 0.191162 valid's rmse: 0.246157
[300] train's rmse: 0.171891 valid's rmse: 0.246581
Early stopping, best iteration is:
[162] train's rmse: 0.199329 valid's rmse: 0.246073
save model ...
Model Evaluation Stage
calculating RMSE ...
RMSE: 0.24607253175351204
calculating RMSE ...
Modeling Stage ...
[LightGBM] [Warning] Unknown parameter: tree_method
[LightGBM] [Warning] Met negative value in categorical features, will convert it to NaN
[LightGBM] [Warning] Met negative value in categorical features, will convert it to NaN
Training until validation scores don't improve for 200 rounds.
[100] train's rmse: 0.215796 valid's rmse: 0.240517
[200] train's rmse: 0.19131 valid's rmse: 0.238724
[300] train's rmse: 0.172705 valid's rmse: 0.239144
[400] train's rmse: 0.157289 valid's rmse: 0.239466
Early stopping, best iteration is:
[208] train's rmse: 0.189765 valid's rmse: 0.238603
save model ...
Model Evaluation Stage
calculating RMSE ...
RMSE: 0.2386025086637869
calculating RMSE ...
Modeling Stage ...
[LightGBM] [Warning] Unknown parameter: tree_method
Training until validation scores don't improve for 200 rounds.
[100] train's rmse: 0.216788 valid's rmse: 0.234751
[200] train's rmse: 0.191797 valid's rmse: 0.231529
[300] train's rmse: 0.172888 valid's rmse: 0.230917
[400] train's rmse: 0.157764 valid's rmse: 0.230966
[500] train's rmse: 0.145616 valid's rmse: 0.230711
[600] train's rmse: 0.134631 valid's rmse: 0.230809
Early stopping, best iteration is:
[487] train's rmse: 0.147029 valid's rmse: 0.230622
full data
fold 1: [2787] train's rmse: 0.17505 valid's rmse: 0.215004
fold 2: [2586] train's rmse: 0.176207 valid's rmse: 0.214477
100k data
-------------------------------------------------------------------------------------------
mean rmse is: 0.22698800710415235 - reduce max_bin to 200 - 2folds LB:0.2207
mean rmse is: 0.22710252553320615 - add median_price_image_top_1 #在2208版去除
mean rmse is: 0.22723174589672093 - add image information ---------------------------------
mean rmse is: 0.22736661491719415 - add price_vs_city_income #在2fold-2207版本去除
mean rmse is: 0.22768780227534266 - price+ 4.8 to 2.8 -------------------------------------
mean rmse is: 0.22780198548953648 - original - 2folds LB 0.2198
[200] train's rmse: 0.216075 valid's rmse: 0.220928
[300] train's rmse: 0.211498 valid's rmse: 0.218822
[400] train's rmse: 0.208222 valid's rmse: 0.217763
[500] train's rmse: 0.205433 valid's rmse: 0.217029
[600] train's rmse: 0.202888 valid's rmse: 0.216503
[700] train's rmse: 0.200629 valid's rmse: 0.216155
[800] train's rmse: 0.198475 valid's rmse: 0.215904
[900] train's rmse: 0.196621 valid's rmse: 0.215725
[1000] train's rmse: 0.194879 valid's rmse: 0.215554
[1100] train's rmse: 0.193265 valid's rmse: 0.215432
[1200] train's rmse: 0.191686 valid's rmse: 0.215331
[1300] train's rmse: 0.19016 valid's rmse: 0.215254
[1400] train's rmse: 0.188807 valid's rmse: 0.215205
[1500] train's rmse: 0.187473 valid's rmse: 0.215139
[1600] train's rmse: 0.18622 valid's rmse: 0.215099
[1700] train's rmse: 0.185041 valid's rmse: 0.215064
[1800] train's rmse: 0.183911 valid's rmse: 0.21503
[1900] train's rmse: 0.182841 valid's rmse: 0.215007
[2000] train's rmse: 0.181771 valid's rmse: 0.214982
[2100] train's rmse: 0.180755 valid's rmse: 0.21495
[2200] train's rmse: 0.179723 valid's rmse: 0.214936
[2300] train's rmse: 0.178825 valid's rmse: 0.21492
[2400] train's rmse: 0.177879 valid's rmse: 0.214896
[2500] train's rmse: 0.176977 valid's rmse: 0.21488
[2600] train's rmse: 0.176063 valid's rmse: 0.214868
[2700] train's rmse: 0.17521 valid's rmse: 0.214865
""" |
# -*- coding: utf-8 -*-
import logging
import smtplib
from contextlib import contextmanager
from functools import wraps
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email import encoders
from etutorservice.utils.config_helper import config
from etutorservice.utils.datetime_helper import get_now
logger = logging.getLogger(__name__)
class _SmtpManager(object):
def __init__(self):
self.__client = None
@staticmethod
def __get_mail_client():
smtp_config = config.data['smtp']
server_host = smtp_config['host']
server_port = smtp_config['port']
user_name = smtp_config['user_name']
password = smtp_config['password']
timeout = smtp_config.get('timeout', 30)
client = smtplib.SMTP(server_host, server_port, timeout=timeout)
client.starttls()
client.login(user_name, password)
return client
def __is_connected(self):
try:
status = self.__client.noop()[0]
except smtplib.SMTPServerDisconnected:
status = -1
return True if status == 250 else False
def get_mail_client(self):
if not self.__client or not self.__is_connected():
self.__client = self.__get_mail_client()
return self.__client
smtp_manager = _SmtpManager()
class MailSender(object):
def __init__(self):
self.__client = None
def send_email(self, to_addresses, title, content, from_address=None,
content_type='html', charset='utf-8', files=None):
"""
发送邮件
:param to_addresses: 收件人邮箱,以;分隔
:param title: 邮件主题
:param content: 邮件正文
:param content_type:
:param charset:
:param from_address
:param files: 文件名/文件夹名(皆可)列表
:return:
"""
self.__client = smtp_manager.get_mail_client()
if not from_address:
from_address = config.data['smtp']['user_name']
if files:
message = self.__attach_file(files)
message.attach(MIMEText(content, _subtype=content_type,
_charset=charset))
else:
message = MIMEText(content, _subtype=content_type, _charset=charset)
message['Subject'] = title
message['From'] = from_address
message['To'] = ';'.join(to_addresses)
self.__client.sendmail(from_address, to_addresses, message.as_string())
@staticmethod
def __attach_file(virtual_file, file_name=None):
if not file_name:
file_name = get_now().format('YYYY-MM-DD') + '.csv'
message = MIMEMultipart()
msg = MIMEBase('application', 'octet-stream')
msg.set_payload(virtual_file.getvalue())
encoders.encode_base64(msg)
msg.add_header('Content-Disposition', 'attachment', filename=file_name)
message.attach(msg)
return message
|
class Solution:
def canReach(self, arr: List[int], start: int) -> bool:
result:bool = False
def DFS(arr: [], index: int):
nonlocal result
if arr[index] == -1:
return
elif arr[index] == 0:
result = True
return
else:
pos1 = index + arr[index]
pos2 = index - arr[index]
arr[index] = -1
if 0 <= pos1 < len(arr):
DFS(arr, pos1)
if 0<= pos2 <= len(arr):
DFS(arr,pos2)
DFS(arr, start)
return result
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 7 14:50:11 2018
@author: mahsayedsalem
"""
from imblearn.under_sampling import RandomUnderSampler
import numpy as np
import os
from random import shuffle
import cv2
from keras.utils.np_utils import to_categorical
import gc
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
def one_hot(Y_train, Y_test, n_classes):
Y__train_hot = to_categorical(Y_train, num_classes = n_classes)
Y_test_hot = to_categorical(Y_test, num_classes = n_classes)
return Y__train_hot, Y_test_hot
def under_sampling(X_train, X_test, Y_train, Y_test, height, width, channels, n_classes):
ros = RandomUnderSampler(ratio='auto')
X_trainShape = X_train.shape[1]*X_train.shape[2]*X_train.shape[3]
X_testShape = X_test.shape[1]*X_test.shape[2]*X_test.shape[3]
X_trainFlat = X_train.reshape(X_train.shape[0], X_trainShape)
X_testFlat = X_test.reshape(X_test.shape[0], X_testShape)
X_trainRos, Y_trainRos = ros.fit_sample(X_trainFlat, Y_train)
X_testRos, Y_testRos = ros.fit_sample(X_testFlat, Y_test)
Y_trainRosHot, Y_testRosHot= one_hot(Y_trainRos, Y_testRos, n_classes)
for i in range(len(X_trainRos)):
X_trainRosReshaped = X_trainRos.reshape(len(X_trainRos),height,width,channels)
for i in range(len(X_testRos)):
X_testRosReshaped = X_testRos.reshape(len(X_testRos),height,width,channels)
from sklearn.utils import class_weight
class_weight = class_weight.compute_class_weight('balanced', np.unique(Y_train), Y_train)
print("Old Class Weights: ",class_weight)
from sklearn.utils import class_weight
class_weight2 = class_weight.compute_class_weight('balanced', np.unique(Y_trainRos), Y_trainRos)
print("New Class Weights: ",class_weight2)
return X_trainRosReshaped, Y_trainRosHot, X_testRosReshaped, Y_testRosHot, class_weight2
def memory_management(deleted):
if deleted is not None:
del deleted
gc.collect()
def label_img(Dir_num, num):
labels = []
for i in range(0,Dir_num):
labels.append(0)
labels[num] = 1
return labels
def create_train_data_from_folders(Directories, labels, IMG_SIZE, channels, percent):
n_classes = len(Directories)
full_data = []
label_decoder = []
for dir in range(len(Directories)):
label = []
for img in tqdm(os.listdir(Directories[dir])):
label = label_img(n_classes, dir)
path = os.path.join(Directories[dir], img)
img = cv2.imread(path)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
full_data.append([np.array(img), np.array(label)])
label_decoder.append([np.array(label), labels[dir]])
shuffle(full_data)
train_range = int(len(full_data) * (1-percent))
train = full_data[:train_range]
test = full_data[train_range:]
x_train = np.array([i[0] for i in train]).reshape(-1, IMG_SIZE, IMG_SIZE, channels)
y_train = np.array([i[1] for i in train])
x_test = np.array([i[0] for i in test]).reshape(-1, IMG_SIZE, IMG_SIZE, channels)
y_test = np.array([i[1] for i in test])
np.save('label_decoder.npy', label_decoder)
np.save('x_train.npy', x_train)
np.save('y_train.npy', y_train)
np.save('x_test.npy', x_test)
np.save('y_test.npy', y_test)
return x_train, y_train, x_test, y_test, label_decoder
def normalize(x_train, x_test):
x_train = x_train/255
x_test = x_test/255
return x_train, x_test |
from random import choice
from typing import Dict, List, Tuple
import numpy as np
import pandas
from hts._t import MethodT, NAryTreeT
from hts.hierarchy import make_iterable
def to_sum_mat(ntree: NAryTreeT) -> Tuple[np.ndarray, List[str]]:
"""
This function creates a summing matrix for the bottom up and optimal combination approaches
All the inputs are the same as above
The output is a summing matrix, see Rob Hyndman's "Forecasting: principles and practice" Section 9.4
Parameters
----------
ntree : NAryTreeT
Returns
-------
numpy.ndarray
Summing matrix.
List[str]
Row order list of the level in the hierarchy represented by each row in the summing matrix.
"""
nodes = ntree.level_order_traversal()
node_labels = ntree.get_level_order_labels()
num_at_level = list(map(sum, nodes))
columns = num_at_level[-1]
# Initialize summing matrix with bottom level rows
sum_mat = np.identity(columns)
# Names of each row in summing matrix.
sum_mat_labels = []
# Bottom level matrix labels, with indices correspoding to column in summing matrix
bl_mat_idx_ref = node_labels[-1]
# Skip total and bottom level of tree. Rows added outside of loop.
for level in node_labels[1:-1]:
for label in level:
# Exclude duplicates specified in tree
if label not in sum_mat_labels:
row = []
for bl_element in bl_mat_idx_ref:
# Check if the bottom level element is part of label
is_component = all(
[True if x in bl_element else False for x in label.split("_")]
)
if is_component:
row.append(1)
else:
row.append(0)
# Add row correspoding to label to top of summing matrix
row = np.array(row)
sum_mat = np.vstack((row, sum_mat))
sum_mat_labels.append(label)
# Add top as first row in summing matrix
top = np.ones(columns)
sum_mat = np.vstack((top, sum_mat))
# Reverse list of labels to match summing matrix, since vstack and append worked in the opposite order.
# Not currently returned, but could be for information or matrix alignment.
sum_mat_labels.reverse()
sum_mat_labels = ["total"] + sum_mat_labels + bl_mat_idx_ref
return sum_mat, sum_mat_labels
def project(
hat_mat: np.ndarray, sum_mat: np.ndarray, optimal_mat: np.ndarray
) -> np.ndarray:
new_mat = np.empty([hat_mat.shape[0], sum_mat.shape[0]])
for i in range(hat_mat.shape[0]):
new_mat[i, :] = np.dot(optimal_mat, np.transpose(hat_mat[i, :]))
return new_mat
def y_hat_matrix(forecasts, keys=None):
if not keys:
keys = forecasts.keys()
first = list(forecasts.keys())[0]
y_hat_mat = np.zeros([len(forecasts[first].yhat), 1])
for key in keys:
f1 = np.array(forecasts[key].yhat)
f2 = f1[:, np.newaxis]
if np.all(y_hat_mat == 0):
y_hat_mat = f2
else:
y_hat_mat = np.concatenate((y_hat_mat, f2), axis=1)
return y_hat_mat
def optimal_combination(
forecasts: Dict[str, pandas.DataFrame],
sum_mat: np.ndarray,
method: str,
mse: Dict[str, float],
):
"""
Produces the optimal combination of forecasts by trace minimization (as described by
Wickramasuriya, Athanasopoulos, Hyndman in "Optimal Forecast Reconciliation for Hierarchical and Grouped Time
Series Through Trace Minimization")
Parameters
----------
forecasts : dict
Dictionary of pandas.DataFrames containing the future predictions
sum_mat : np.ndarray
The summing matrix
method : str
One of:
- OLS (ordinary least squares)
- WLSS (structurally weighted least squares)
- WLSV (variance weighted least squares)
mse
Returns
-------
"""
hat_mat = y_hat_matrix(forecasts)
transpose = np.transpose(sum_mat)
if method == MethodT.OLS.name:
ols = np.dot(
np.dot(sum_mat, np.linalg.inv(np.dot(transpose, sum_mat))), transpose
)
return project(hat_mat=hat_mat, sum_mat=sum_mat, optimal_mat=ols)
elif method == MethodT.WLSS.name:
diag = np.diag(np.transpose(np.sum(sum_mat, axis=1)))
elif method == MethodT.WLSV.name:
diag = [mse[key] for key in mse.keys()]
diag = np.diag(np.flip(np.hstack(diag) + 0.0000001, 0))
else:
raise ValueError("Invalid method")
# S*inv(S'S)*S'
optimal_mat = np.dot(
np.dot(
np.dot(
sum_mat,
np.linalg.inv(np.dot(np.dot(transpose, np.linalg.inv(diag)), sum_mat)),
),
transpose,
),
np.linalg.inv(diag),
)
return project(hat_mat=hat_mat, sum_mat=sum_mat, optimal_mat=optimal_mat)
def proportions(nodes, forecasts, sum_mat, method=MethodT.PHA.name):
n_cols = len(list(forecasts.keys()))
fcst = forecasts[list(forecasts.keys())[0]].yhat
fcst = fcst[:, np.newaxis]
num_bts = sum_mat.shape[1]
cols = [n.key for n in [nodes] + nodes.traversal_level()][
(n_cols - num_bts) : n_cols
]
bts_dat = nodes.to_pandas()[cols]
if method == MethodT.AHP.name:
divs = np.divide(np.transpose(np.array(bts_dat)), np.array(nodes.get_series()))
props = divs.mean(1)
props = props[:, np.newaxis]
elif method == MethodT.PHA.name:
bts_sum = bts_dat.sum(0)
top_sum = sum(nodes.get_series())
props = bts_sum / top_sum
props = props[:, np.newaxis]
else:
raise ValueError("Invalid method")
return np.dot(np.array(fcst), np.transpose(props))
def forecast_proportions(forecasts, nodes):
"""
Cons:
Produces biased revised forecasts even if base forecasts are unbiased
"""
n_cols = len(list(forecasts.keys())) + 1
levels = nodes.get_height()
column = 0
first_node = 1
key = choice(list(forecasts.keys()))
new_mat = np.empty([len(forecasts[key].yhat), n_cols - 1])
new_mat[:, 0] = forecasts[key].yhat
as_iterable = make_iterable(nodes, prop=None)
for level in range(levels - 1):
for i, node in enumerate(nodes.level_order_traversal()[level]):
num_child = node
last_node = first_node + num_child
base_fcst = np.array(
[forecasts[k.key].yhat[:] for k in as_iterable[first_node:last_node]]
)
print(base_fcst.shape)
fore_sum = np.sum(base_fcst, axis=0)
fore_sum = fore_sum[:, np.newaxis]
if column == 0:
rev_top = np.array(forecasts["total"].yhat)
rev_top = rev_top[:, np.newaxis]
else:
rev_top = np.array(new_mat[:, column])
rev_top = rev_top[:, np.newaxis]
new_mat[:, first_node:last_node] = np.divide(
np.multiply(np.transpose(base_fcst), rev_top), fore_sum
)
column += 1
first_node += num_child
return new_mat
|
# Get input
import glob
import CSGM.dcgan.dcgan_utils as dcgan_utils
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
def mnist_data(hparams, num_batches):
# mnist = input_data.read_data_sets('./data/mnist', one_hot=True)
"""Create input tensors"""
image_paths = glob.glob(hparams.train_data)
image_paths.sort()
images = [dcgan_utils.get_image(image_path,hparams.input_size,True,hparams.input_size,True) for image_path in image_paths]#images = [dcgan_utils.get_image(image_path, image_size) for image_path in image_paths]
images = [image.reshape([hparams.input_size*hparams.input_size*1])/2+0.5 for image in images]
images = np.array(images)
image_batches = [images[i*hparams.batch_size:(i+1)*hparams.batch_size] for i in range(num_batches)]
image_batches = np.array(image_batches)
return image_batches
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re
import numpy as np
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
# Keras
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from tensorflow.keras.applications.imagenet_utils import decode_predictions
from werkzeug.utils import secure_filename
#from gevent.pywsgi import WSGIServer
# Define a flask app
app = Flask(__name__)
# Model saved with Keras model.save()
MODEL_PATH ='model_mobilenetv2.h5'
# Load your trained model
model = MobileNetV2(weights = MODEL_PATH)
import numpy as np
import re
import base64
import numpy as np
from PIL import Image
from io import BytesIO
def base64_to_pil(img_base64):
"""
Convert base64 image data to PIL image
"""
image_data = re.sub('^data:image/.+;base64,', '', img_base64)
pil_image = Image.open(BytesIO(base64.b64decode(image_data)))
return pil_image
def np_to_base64(img_np):
"""
Convert numpy image (RGB) to base64 string
"""
img = Image.fromarray(img_np.astype('uint8'), 'RGB')
buffered = BytesIO()
img.save(buffered, format="PNG")
return u"data:image/png;base64," + base64.b64encode(buffered.getvalue()).decode("ascii")
#def model_predict(img_path, model):
def model_predict(img, model):
#print(img_path)
#img = image.load_img(img_path, target_size=(224, 224))
#img = img.resize((224,224))
img = np.resize(img, (224,224,3))
print(img.shape)
print('-'*20)
print(img)
# Preprocessing the image
x = image.img_to_array(img)
x = x.astype('float32')
print(x.shape)
# x = np.true_divide(x, 255)
## Scaling
x=x/255.
x = np.expand_dims(x, axis=0)
#print(x.shape)
# Be careful how your trained model deals with the input
# otherwise, it won't make correct prediction!
# x = preprocess_input(x)
preds = model.predict(x)
#print(preds)
#preds=np.argmax(preds, axis=1)
"""if preds==0:
preds="The leaf is a diseased cotton leaf"
elif preds==1:
preds="The leaf is a diseased cotton plant"
elif preds==2:
preds="The leaf is a fresh cotton leaf"
else:
preds="The leaf is a fresh cotton plant"
"""
pred_proba = "{:.3f}".format(np.amax(preds))
pred_class = decode_predictions(preds, top = 1)
result = str(pred_class[0][0][1])
result = result.replace('_', ' ').capitalize()
return f"The result is {result}"
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
#f = request.files['file']
#print(request.files)
#print('-' * 20)
filestr = request.files['file'].read()
print(filestr)
import cv2
npimg = np.fromstring(filestr, np.uint8)
print(npimg)
print(npimg.shape)
#img = cv2.imdecode(npimg, cv2.CV_LOAD_IMAGE_UNCHANGED)
img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
#img = base64_to_pil(request.files['file'].read())
print(img)
print(img.shape)
# Save the file to ./uploads
#basepath = os.path.dirname(__file__)
#file_path = os.path.join(
# basepath, 'uploads', secure_filename(f.filename))
#f.save(file_path)
# Make prediction
#preds = model_predict(file_path, model)
preds= model_predict(img, model)
result=preds
return result
return None
if __name__ == '__main__':
app.run(port=5001,debug=True)
|
# coding: utf-8
import gym
import numpy as np
import xtools as xt
import xtools.simulation as xs
from .base import BaseEnv
from ..models.lvaircraft import LVAircraftEx
class LVAircraftAltitudeV0(BaseEnv):
def __init__(
self,
dt,
range_target=[-2000, 2000],
sampling_interval=1,
name="LVAircraftAltitudeV0"
):
super().__init__(dt, name=name)
self._model = LVAircraftEx(
dt,
range_elevator=xt.d2r([-5, 5]),
range_throttle=[-1.0, 1.0],
name=name
)
self.IX_U = self._model.IX_U
self.IX_H = self._model.IX_H
self.IX_u = self._model.IX_u
self.IX_w = self._model.IX_w
self.IX_T = self._model.IX_T
self.IX_q = self._model.IX_q
self.IX_C = len(self._model.get_observation())
self.range_target = range_target
self.target_altitude = 0
# env params
self.action_space = xs.BaseModel.generate_space(self._model.act_low, self._model.act_high)
self._obs_low = np.concatenate([self._model.obs_low, [self._model.H0 + np.min(self.range_target)]])
self._obs_high = np.concatenate([self._model.obs_high, [self._model.H0 + np.max(self.range_target)]])
self.observation_space = xs.BaseModel.generate_space(self._obs_low, self._obs_high)
self.viewer = None
def reset(self):
self._model.reset()
self.target_altitude = float(self._model.H0 + np.random.randint(
np.min(self.range_target),
np.max(self.range_target)
))
return self.get_observation()
def step(self, action):
action = np.asanyarray(action).astype(self._model.dtype)
assert action.shape == (2,), "action size must be 2 and only single action is accepted"
next_obs = self._model(action)
reward = self.calc_reward(next_obs)
done = False
info = {}
return self.get_observation(), reward, done, info
def calc_reward(self, obs):
reward_position = obs[self.IX_H] - self.target_altitude
reward_position = reward_position / np.max(self.range_target)
reward_attitude = obs[self.IX_T]
reward = - reward_position ** 2 - reward_attitude ** 2
return reward
def get_observation(self):
obs = self._model.get_observation()
return np.concatenate([obs, [self.target_altitude]])
def render(self, mode='human'):
screen_width = 500
screen_height = 500
aircraft_length = 30
aircraft_width = 10
target_width = np.abs(self.range_target[0] - self.range_target[1]) * 2
target_ratio = screen_height / target_width
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
# target line
l, r, t, b = 0, screen_width, 1, -1
target_line = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
target_line.set_color(0.4, 0.6, 0.8)
self.target_trans = rendering.Transform()
target_line.add_attr(self.target_trans)
self.viewer.add_geom(target_line)
# aircraft
aircraft = rendering.make_capsule(aircraft_length, aircraft_width)
aircraft.set_color(0.8, 0.4, 0.4)
self.aircraft_trans = rendering.Transform()
aircraft.add_attr(self.aircraft_trans)
self.viewer.add_geom(aircraft)
xs = self.get_observation()
# target line
ht = self.target_altitude - self._model.H0
ht = ht * target_ratio + screen_height / 2
self.target_trans.set_translation(0, ht)
# aircraft
h = xs[self.IX_H] - self._model.H0
h = h * target_ratio + screen_height / 2
x = (screen_width - aircraft_length) / 2
self.aircraft_trans.set_translation(x, h)
T = xs[self.IX_T]
self.aircraft_trans.set_rotation(T)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
|
import pandas as pd
from sklearn.model_selection import train_test_split
import joblib
import lightgbm as lgb
import numpy as np
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class ModelTraining:
"""
Train models
Args:
train_df_path: str Path to train_df
test_df_path: str Path to test_df
roi_name: str Name of roi
fold: int Fold number
ups_method: str Upsampling method
"""
def __init__(self, train_df_path: str,
test_df_path: str,
roi_name: str,
fold: int,
ups_method: str,
output_folder: str
):
self.roi_name = roi_name
self.fold = fold
self.ups_method = ups_method
self.output_folder = output_folder
train_data = pd.read_csv(train_df_path)
test_data = pd.read_csv(test_df_path)
self.y_train = train_data["label"]
self.X_train = train_data.drop(["label"], axis=1)
self.y_test = test_data["label"]
self.X_test = test_data.drop(["label"], axis=1)
def train_lgbm(self,):
logger.info("Training LightGBM")
SEARCH_PARAMS = {'learning_rate': 0.4,
'max_depth': 15,
'num_leaves': 32,
'feature_fraction': 0.8,
'subsample': 0.2}
FIXED_PARAMS={'objective': 'binary',
'metric': 'auc',
'is_unbalance':False,
'bagging_freq':5,
'boosting':'dart',
'num_boost_round':200,
'early_stopping_rounds':300}
X_train, X_test, y_train, y_test = train_test_split(self.X_train, self.y_train, test_size=0.2,stratify=self.y_train,random_state=50)
train_data = lgb.Dataset(X_train, label=y_train)
valid_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
params = {'metric':FIXED_PARAMS['metric'],
'objective':FIXED_PARAMS['objective'],
**SEARCH_PARAMS}
model = lgb.train(params, train_data,
valid_sets=[valid_data],
num_boost_round=FIXED_PARAMS['num_boost_round'],
early_stopping_rounds=FIXED_PARAMS['early_stopping_rounds'],
valid_names=['valid'])
score = model.best_score['valid']['auc']
path = os.path.join(self.output_folder, self.roi_name, f"FOLD{self.fold }", self.ups_method,'lgbm.pkl')
os.makedirs(os.path.dirname(path), exist_ok=True)
joblib.dump(model, path)
def train_rf(self,):
logger.info("Training Random Forest")
rf = RandomForestClassifier()
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1000, num = 100)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt', 'log2']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 22)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10,20]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 500, cv = 4, scoring = 'roc_auc', verbose=1, random_state=42, n_jobs = -1)
# Fit the random search model
rf_random.fit(self.X_train, self.y_train)
model = rf_random.best_estimator_
path = os.path.join(self.output_folder, self.roi_name, f"FOLD{self.fold }", self.ups_method,'rf.pkl')
os.makedirs(os.path.dirname(path), exist_ok=True)
joblib.dump(model, path)
def train_subspaceKNN(self,):
logger.info("Training subspace KNN")
# Method of selecting samples for training each tree
bootstrap = [True, False]
k_range= list(range(1,15))
# Create the random grid
random_grid = {'max_features': [10,12,15],
'max_samples': [10,20,30,40,50,60,70,80,90,100],
'n_estimators': [50,100,150,200,250,300,400,500],
#'n_neighbors': k_range,
#'max_depth': max_depth,
#'min_samples_split': min_samples_split,
#'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
knn = KNeighborsClassifier(n_neighbors=5)
knn_classifier = BaggingClassifier(base_estimator = knn ,
max_samples = 10,
n_estimators = 100)
knn_random = RandomizedSearchCV(estimator = knn_classifier,
param_distributions = random_grid,
n_iter = 1000,
cv = 4,
verbose=2,
random_state=42,
n_jobs = -1,
scoring='roc_auc')
# Fit the random search model
knn_random.fit(self.X_train, self.y_train)
model = knn_random.best_estimator_
path = os.path.join(self.output_folder, self.roi_name, f"FOLD{self.fold }", self.ups_method,'subspaceKNN.pkl')
os.makedirs(os.path.dirname(path), exist_ok=True)
joblib.dump(model, path)
def train_all(self,):
self.train_lgbm()
self.train_rf()
self.train_subspaceKNN() |
# -*- coding: utf-8 -*-
import logging
from django.test.client import Client
from mock import patch
from networkapi.api_vip_request.models import VipRequest
from networkapi.api_vip_request.serializers.v3 import VipRequestV3Serializer
from networkapi.api_vip_request.tasks.deploy import deploy
from networkapi.test.test_case import NetworkApiTestCase
from networkapi.usuario.models import Usuario
log = logging.getLogger(__name__)
class VipRequestAsyncPostDeploySuccessTestCase(NetworkApiTestCase):
def setUp(self):
self.client = Client()
def tearDown(self):
pass
@patch('networkapi.api_vip_request.facade.v3.create_real_vip_request')
@patch('networkapi.api_vip_request.facade.v3.get_vip_request_by_id')
@patch('networkapi.usuario.models.Usuario.objects.get')
@patch('networkapi.api_vip_request.tasks.deploy.deploy.update_state')
def test_task_id_create_in_post_deploy_one_vip_request_success(self, *args):
"""Test success of id task generate for vip request post deploy success."""
mock_get_user = args[1]
mock_get_vip = args[2]
mock_create_real_vip = args[3]
user = Usuario(id=1, nome='test')
vip = VipRequest(id=1)
vip_serializer = VipRequestV3Serializer(
vip, include=('ports__identifier',))
mock_create_real_vip.return_value = vip
mock_get_vip.return_value = vip
mock_get_user.return_value = user
deploy(vip.id, user.id)
mock_create_real_vip.assert_called_with(
[vip_serializer.data], user)
class VipRequestAsyncPostDeployErrorTestCase(NetworkApiTestCase):
def setUp(self):
self.client = Client()
def tearDown(self):
pass
def test_task_id_create_in_post_deploy_one_vip_request_error(self):
"""Test success of id task generate for vip request post deploy error."""
pass
|
print()
print("Which Ted Lasso character are you?")
teastr = input("Do you like tea? (y/n) ")
optimiststr = input("Are you an optimist? (y/n) ")
print()
if (teastr == "y") & (optimiststr == "y"):
print("You are Becks!")
elif (teastr == "y") & (optimiststr == "n"):
print("You are Roy!")
elif (teastr == "n") & (optimiststr == "y"):
print("You are Ted!")
else:
print("You are Beard!")
|
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
from polls import apiviews
class TestPoll(APITestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.view = apiviews.PollViewSet.as_view({'get': 'list'})
self.uri = '/polls/'
def test_list(self):
request = self.factory.get(self.uri)
response = self.view(request)
self.assertEqual(response.status_code, 200,
'Expected Response Code 200, received {0} instead.'
.format(response.status_code))
|
import argparse
import dill as pickle
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.ticker import FormatStrFormatter
from nes.ensemble_selection.config import BUDGET, PLOT_EVERY
matplotlib.use("Agg")
import os
from pathlib import Path
import numpy as np
import pandas as pd
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.linestyle'] = 'dotted'
plt.rcParams['font.size'] = 15
parser = argparse.ArgumentParser()
parser.add_argument(
"--esa",
type=str,
default="beam_search",
help="Ensemble selection algorithm. See nes/ensemble_selection/esas.py. Default: beam_search.",
)
parser.add_argument(
"--save_dir", type=str, help="Directory to save plots.",
)
parser.add_argument(
"--load_plotting_data_dir",
type=str,
help="Directory where outputs of evaluate_ensembles.py are saved.",
)
parser.add_argument(
"--methods", type=str, nargs="+", help="A sequence of method names to plot."
)
parser.add_argument(
"--dataset", choices=["cifar10", "cifar100", "fmnist", "imagenet", "tiny"], type=str, help="Dataset."
)
parser.add_argument(
"--runs", type=str, default=[''], nargs='+', help="Subdirectories in load_plotting_data_dir over which to average runs."
)
parser.add_argument(
"--plot_type", type=str, choices=["budget", "ensemble_size", "severity"], help="Which type of plots to make."
)
args = parser.parse_args()
SAVE_DIR = args.save_dir
data_types = ["val", "test"]
ens_attrs = ["evals", "avg_baselearner_evals", "oracle_evals"]
severities = range(6) if (args.dataset in ["cifar10", "cifar100", "tiny"]) else range(1)
dataset_to_budget = {
"cifar10": 400,
"cifar100": 400,
"fmnist": 400,
"tiny": 200,
"imagenet": 1000
}
BUDGET = dataset_to_budget[args.dataset]
PLOT_EVERY = 25
plot_individual_lines = False
# ---------------------------------------------------------------------------- #
# Helper functions #
# ---------------------------------------------------------------------------- #
def merge_and_fill_trajectories(pandas_data_frames, default_value=None):
# merge all trajectories keeping all time steps
df = pd.DataFrame().join(pandas_data_frames, how='outer')
# forward fill to make it a propper step function
df = df.fillna(method='ffill')
if default_value is None:
# backward fill to replace the NaNs for the early times by the
# performance of a random configuration
df = df.fillna(method='bfill')
else:
df = df.fillna(default_value)
return df
def get_trajectories(losses, iterations):
'''
methods_dict (dict):
key (str): method name; should be one in methods
values (dict): key -> str: 'losses' or 'iterations';
values -> list of lists with all the evaluated metrics
'''
dfs = []
for i in range(len(losses)):
loss = losses[i]
iteration = iterations[i]
# print('Run %d, Min: %f'%(i, loss))
df = pd.DataFrame({str(i): loss}, index=iteration)
dfs.append(df)
df = merge_and_fill_trajectories(dfs, default_value=None)
if df.empty:
pass
return np.array(df.index), np.array(df.T)
# ===================================
# Plot things
# ===================================
plt.rcParams["axes.grid"] = True
plt.rcParams["grid.linestyle"] = "dotted"
plt.rcParams["font.size"] = 15
metric_label = {"loss": "NLL", "error": "Error", "ece": "ECE"}
colors = {
"nes_rs": "forestgreen",
"deepens_rs": "dodgerblue",
"nes_re": "crimson",
"deepens_darts": "black",
"deepens_darts_anchor": "aqua",
"deepens_gdas": "cyan",
"deepens_minimum": "darkgoldenrod",
"deepens_amoebanet": "darkorange",
"darts_esa": "black",
"amoebanet_esa": "darkorange",
"nes_rs_esa": "dodgerblue",
"darts_rs": "mediumorchid",
"darts_hyper": "aqua",
"joint": "darkorange",
}
markers = {
'nes_rs': 'v',
'deepens_rs': 'h',
'nes_re': 'x',
'deepens_minimum': '^',
'deepens_darts': '<',
'deepens_darts_anchor': '*',
'deepens_gdas': '.',
'deepens_amoebanet': '>',
"darts_esa": "o",
"amoebanet_esa": "o",
"nes_rs_esa": "o",
"darts_rs": "h",
"darts_hyper": ">",
"joint": "*",
}
label_names = {
'nes_rs': 'NES-RS',
'deepens_rs': 'DeepEns (RS)',
'nes_re': 'NES-RE',
'deepens_minimum': 'DeepEns (best arch.)',
'deepens_darts': 'DeepEns (DARTS)',
'deepens_darts_anchor': 'AnchoredEns (DARTS)',
'deepens_gdas': 'DeepEns (GDAS)',
'deepens_amoebanet': 'DeepEns (AmoebaNet)',
"darts_esa": "DeepEns+ES (DARTS)",
"amoebanet_esa": "DeepEns+ES (AmoebaNet)",
"nes_rs_esa": "DeepEns+ES (RS)",
"darts_rs": "NES-RS (depth, width)",
"joint": "NES + HyperEns",
"darts_hyper": "HyperEns",
}
ens_attr_to_title = {
"evals": "Ensemble",
"avg_baselearner_evals": "Average baselearner",
"oracle_evals": "Oracle ensemble",
}
dataset_to_title = {
"tiny": "Tiny ImageNet",
"cifar10": "CIFAR-10",
"cifar100": "CIFAR-100",
"fmnist": "FMNIST",
"imagenet": "ImageNet16",
}
linestyle_method = {
'deepens_minimum': 'DeepEns (best arch.)',
'deepens_darts': 'DeepEns (DARTS)',
'deepens_darts_anchor': 'AnchoredEns (DARTS)',
'deepens_gdas': 'DeepEns (GDAS)',
'deepens_amoebanet': 'DeepEns (AmoebaNet)',
}
if args.dataset == "tiny":
validation_sizes = [10, 19, 39, 79, 158, 315, 629, 1256, 2506, 5000]
else:
validation_sizes = [10, 21, 46, 100, 215, 464, 1000, 2154, 4641, 10000]
alphas = list(np.linspace(0.4, 1, len(severities)))
alphas.reverse()
if args.plot_type == "budget":
alphas = np.linspace(0.2, 1, len(validation_sizes))
for data_type in data_types:
for ens_attr in ens_attrs:
metric = "loss"
for pool_name in args.methods:
M = 10
for i, severity in enumerate(severities):
fig, ax = plt.subplots(
1,
1,
figsize=(5., 5.5),
sharex="col",
sharey=False,
)
for val_size in validation_sizes:
xs = []
ys = []
runs = args.runs
for plot_dir in [os.path.join(args.load_plotting_data_dir, p) for p in runs]:
with open(
os.path.join(
plot_dir,
f"plotting_data__esa_{args.esa}_M_{M}_pool_{pool_name}_valsize_{val_size}.pickle",
),
"rb",
) as f:
plotting_data = pickle.load(f)
x = plotting_data[str(M)][str(severity)][ens_attr][args.esa][
pool_name
].x
yy = plotting_data[str(M)][str(severity)][ens_attr][args.esa][
pool_name
].y
y = [item[data_type][str(severity)][metric] for item in yy]
xs.append(x)
ys.append(y)
assert len(xs) == len(ys)
assert len(set(xs)) == 1
x = xs[0]
y = np.array(ys).mean(axis=0)
err = np.array(ys).std(axis=0)
label = "Val. size = {}".format(val_size)
alpha = alphas[list(validation_sizes).index(val_size)]
ax.plot(x, y, label=label, color=colors[pool_name],
linewidth=2, marker=markers[pool_name],
markersize=7, markevery=1 if args.dataset=="tiny" else 2,
alpha=alpha)
#ax.fill_between(x, y - 1.96*err/np.sqrt(len(xs)), y
#+ 1.96*err/np.sqrt(len(xs)),
#color=colors[pool_name], alpha=0.15)
ax.set_xlabel("# nets evaluated")
if data_type == "val":
title_label = "Validation"
else:
title_label = "Test"
ax.set_title(f"{dataset_to_title[args.dataset]}, "+title_label)
sev_level = (
"(no shift)" if severity == 0 else f"(severity = {severity})"
)
ax.set_ylabel(
"{}".format(ens_attr_to_title[ens_attr])
+ f" {metric_label[metric]}", fontsize=17
)
ax.legend(framealpha=0.6, fontsize=10)
plt.setp(ax, xlim=(PLOT_EVERY, BUDGET))
plt.setp(ax.xaxis.get_majorticklabels(), ha="right")
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
Path(os.path.join(SAVE_DIR, data_type, ens_attr)).mkdir(
exist_ok=True, parents=True
)
plt.tight_layout()
fig.savefig(
os.path.join(SAVE_DIR, data_type, ens_attr,
f"metric_{metric}_sev_{severity}_M_{M}_{pool_name}.pdf"),
bbox_inches="tight",
pad_inches=0.01,
)
print("Plot saved.")
plt.close("all")
elif args.plot_type == "severity":
for metric in ["loss", "error"]:
fig, ax = plt.subplots(1, 1, figsize=(4, 4.5), sharex="col", sharey=False)
for pool_name in args.methods:
for i, severity in enumerate(severities):
y_mean = []
y_err = []
for val_size in validation_sizes:
xs = []
ys = []
runs = args.runs
for plot_dir in [os.path.join(args.load_plotting_data_dir, p) for p in runs]:
with open(
os.path.join(
plot_dir,
f"plotting_data__esa_{args.esa}_M_10_pool_{pool_name}_valsize_{val_size}.pickle",
),
"rb",
) as f:
plotting_data = pickle.load(f)
x = plotting_data["10"][str(severity)]["evals"][args.esa][
pool_name
].x
yy = plotting_data["10"][str(severity)]["evals"][args.esa][
pool_name
].y
y = [item["test"][str(severity)][metric] for item in yy]
xs.append(x)
ys.append(y)
assert len(xs) == len(ys)
assert len(set(xs)) == 1
x = xs[0]
y = np.array(ys).mean(axis=0)
err = np.array(ys).std(axis=0)
y_mean.append(y[-1])
y_err.append(1.96*err[-1]/np.sqrt(len(xs)))
if severity == 0:
label = f"{label_names[pool_name]}"
else:
label = None
#label = "Severity = {}".format(severity)
color = colors[pool_name]
marker = markers[pool_name]
alpha = alphas[severities.index(severity)]
y_mean = np.array(y_mean)
y_err = np.array(y_err)
ax.plot(validation_sizes, y_mean,
label=label,
color=color,
marker=marker, linewidth=2, markersize=10,
markevery=1, ls="-", alpha=alpha)
ax.fill_between(validation_sizes, y_mean - y_err, y_mean + y_err,
color=color, alpha=0.1)
ax.set_xlabel("Validation size", fontsize=17)
ax.set_title(f"{dataset_to_title[args.dataset]}")
sev_level = (
"(no shift)" if severity == 0 else f"(severity = {severity})"
)
ax.set_ylabel(
"{}".format(ens_attr_to_title["evals"])
+ f" {metric_label[metric]}", fontsize=17
)
ax.legend(fontsize=14, framealpha=0.6)
ax.set_xticks(validation_sizes)
ax.set_xticklabels(validation_sizes)
plt.setp(ax, xlim=(10, max(validation_sizes)))
plt.setp(ax.xaxis.get_majorticklabels(), ha="right")
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax.set_xscale('log')
Path(os.path.join(SAVE_DIR, "test", "evals")).mkdir(
exist_ok=True, parents=True
)
plt.tight_layout()
fig.savefig(
os.path.join(SAVE_DIR, "test", "evals",
f"metric_{metric}_M_10.pdf"),
bbox_inches="tight",
pad_inches=0.01,
)
print("Plot saved.")
plt.close("all")
#for metric in ["loss", "error"]:
#for pool_name in args.methods:
#fig, ax = plt.subplots(1, 1, figsize=(6, 6.5), sharex="col", sharey=False)
#for val_size in validation_sizes:
#y_mean = []
#y_err = []
#for i, severity in enumerate(severities):
#xs = []
#ys = []
#
#runs = args.runs
#
#for plot_dir in [os.path.join(args.load_plotting_data_dir, p) for p in runs]:
#
#with open(
#os.path.join(
#plot_dir,
#f"plotting_data__esa_{args.esa}_M_10_pool_{pool_name}_valsize_{val_size}.pickle",
#),
#"rb",
#) as f:
#plotting_data = pickle.load(f)
#
#x = plotting_data["10"][str(severity)]["evals"][args.esa][
#pool_name
#].x
#yy = plotting_data["10"][str(severity)]["evals"][args.esa][
#pool_name
#].y
#y = [item["test"][str(severity)][metric] for item in yy]
#
#xs.append(x)
#ys.append(y)
#
#assert len(xs) == len(ys)
#assert len(set(xs)) == 1
#
#x = xs[0]
#y = np.array(ys).mean(axis=0)
#err = np.array(ys).std(axis=0)
#
#y_mean.append(y[-1])
#y_err.append(1.96*err[-1]/np.sqrt(len(xs)))
#
##label = f"{label_names[pool_name]}"
#label = "Val. size = {}".format(val_size)
#color = colors[pool_name]
#marker = markers[pool_name]
#alpha = alphas[list(validation_sizes).index(val_size)]
#
#y_mean = np.array(y_mean)
#y_err = np.array(y_err)
#print(y_mean)
#
#ls = "-"
#
#ax.plot(severities, y_mean, label=label, color=color,
#marker=marker, linewidth=2, markersize=10,
#markevery=1, ls=ls, alpha=alpha)
##ax.fill_between(severities, y_mean - y_err, y_mean + y_err,
##color=color, alpha=0.15)
#
#ax.set_xlabel("Shift severity", fontsize=20)
#ax.set_title(f"{dataset_to_title[args.dataset]}, {label_names[pool_name]}")
#
#ax.set_ylabel(
#"{}".format(ens_attr_to_title["evals"])
#+ f" {metric_label[metric]}"
#)
#sev_level = (
#"(no shift)" if severity == 0 else f"(severity = {severity})"
#)
#ax.set_ylabel(
#"{}".format(ens_attr_to_title["evals"])
#+ f" {metric_label[metric]}", fontsize=17
#)
#
#ax.legend(fontsize=12)
#
#ax.set_xticks(severities)
#ax.set_xticklabels(severities)
#plt.setp(ax, xlim=(0, max(severities)))
#plt.setp(ax.xaxis.get_majorticklabels(), ha="right")
#
#ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
#
#Path(os.path.join(SAVE_DIR, "test", "evals")).mkdir(
#exist_ok=True, parents=True
#)
#plt.tight_layout()
#fig.savefig(
#os.path.join(SAVE_DIR, "test", "evals",
#f"metric_{metric}_M_10_{pool_name}.pdf"),
#bbox_inches="tight",
#pad_inches=0.01,
#)
#
#print("Plot saved.")
#plt.close("all")
|
from flask import Blueprint, render_template
errors= Blueprint('errors', __name__)
#There's also a method called errorhandler() but we aren't using it as it would only be applicable for this blueprint.
#Our main goal is to enable the error handlers throughout the application.
@errors.app_errorhandler(404)
def error_404(error):
return render_template('errors/404.html'), 404 #404 is specified to give the correct error code response.
@errors.app_errorhandler(403)
def error_403(error):
return render_template('errors/403.html'), 403 #404 is specified to give the correct error code response.
@errors.app_errorhandler(500)
def error_500(error):
return render_template('errors/500.html'), 500 #404 is specified to give the correct error code response.
|
"""Sanity check image sizes and svg viewboxes."""
from PIL import Image
from pathlib import Path
from lxml import etree
def _check_image(base_dir, image_dir):
assert image_dir.is_dir()
expected_size = (int(image_dir.name), int(image_dir.name))
num_bad = 0
num_good = 0
for image_file in image_dir.iterdir():
with Image.open(image_file) as image:
actual_size = image.size
if expected_size != actual_size:
print(f"bad_dim {image_file.relative_to(base_dir)} actual {actual_size} expected {expected_size}")
num_bad += 1
else:
num_good += 1
return num_bad, num_good
def _check_svg(base_dir, svg_dir):
expected_viewbox = (0.0, 0.0, 128.0, 128.0)
num_bad = 0
num_good = 0
for svg_file in svg_dir.iterdir():
if not svg_file.name.startswith("emoji_u"):
continue
assert svg_file.is_file()
with open(svg_file) as f:
actual_viewbox = etree.parse(f).getroot().attrib["viewBox"]
actual_viewbox = tuple(float(s) for s in actual_viewbox.split(" "))
if expected_viewbox != actual_viewbox:
print(f"bad_dim {svg_file.relative_to(base_dir)} actual {actual_viewbox} expected {expected_viewbox}")
num_bad += 1
else:
num_good += 1
return num_bad, num_good
def main():
base_dir = Path(__file__).parent
image_dir = base_dir / "png"
svg_dir = base_dir / "svg"
assert image_dir.is_dir()
assert svg_dir.is_dir()
for size_dir in image_dir.iterdir():
num_bad, num_good = _check_image(base_dir, size_dir)
print(f"{num_bad}/{num_bad+num_good} issues with {size_dir}")
num_bad, num_good = _check_svg(base_dir, svg_dir)
print(f"{num_bad}/{num_bad+num_good} issues with {svg_dir}")
if __name__ == "__main__":
main() |
"""View of QuotaWindowView."""
from gi.repository import Gtk
from lib.mvc.bases import WindowViewBase
from lib.exception_feedback import add_default_exception_handling
class QuotaWindowView(Gtk.Window, WindowViewBase):
"""View of QuotaWindowView."""
def __init__(self, app, model):
"""Ctor of QuotaWindowView."""
Gtk.Window.__init__(self)
WindowViewBase.__init__(self, app, model)
self.on_open = None
self.on_close = None
@add_default_exception_handling('Failed to initialize Quota Window')
def initialize(self):
"""Create the actual view with all widgets."""
self.connect("delete-event", self.cb_close)
# create tree view
sorted_model = Gtk.TreeModelSort(model=self.model.create_model())
sorted_model.set_sort_column_id(1, Gtk.SortType.ASCENDING)
self.tree_view = Gtk.TreeView(model=sorted_model)
self.create_columns(self.tree_view)
# create a grid and attach the treeview to it
self.grid = Gtk.Grid()
self.grid.attach(self.tree_view, 0, 0, 1, 1)
# attach grid to window
self.add(self.grid)
@add_default_exception_handling('Failed to open Quota Window')
def cb_show(self, w, data):
"""On show."""
self.set_icon_from_file(self.getIcon())
if self.on_open is not None:
self.on_open()
sorted_model = Gtk.TreeModelSort(model=self.model.create_model())
sorted_model.set_sort_column_id(1, Gtk.SortType.ASCENDING)
self.tree_view.set_model(model=sorted_model)
self.show_all()
return True
@add_default_exception_handling('Failed to close Quota Window')
def cb_close(self, w, data):
""""On window close."""
if self.on_close is not None:
self.on_close()
self.hide()
return True
@add_default_exception_handling('Failed to update Quota Window')
def on_update(self):
"""On update."""
self.tree_view.set_model(self.model.create_model())
@add_default_exception_handling()
def register_on_open(self, func):
"""Register on open event."""
self.on_open = func
@add_default_exception_handling()
def register_on_close(self, func):
"""Register on close event."""
self.on_close = func
@add_default_exception_handling('Failed to display storage information')
def create_columns(self, tree_view):
"""Create the columns of the TreeView."""
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("File", rendererText, text=0)
column.set_sort_column_id(0)
tree_view.append_column(column)
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Size [MB]", rendererText, text=1)
column.set_sort_column_id(1)
column.set_cell_data_func(
rendererText, lambda col, cell, model, iter,
unused: cell.set_property(
"text", '{0:.2f}'.format(model.get(iter, 1)[0])))
tree_view.append_column(column)
|
from hippy.builtin_klass import wrap_method
from hippy.builtin import ThisUnwrapper, Optional
from hippy.klass import def_class
from hippy.objects.base import W_Root
from hippy.objects.instanceobject import W_InstanceObject
from hippy import consts
class W_ApplevelArrayIterator(W_InstanceObject):
pass
@wrap_method(['interp', ThisUnwrapper(W_ApplevelArrayIterator),
Optional(W_Root)],
name='ArrayIterator::__construct')
def ArrayIterator_construct(interp, this, w_arr=None):
if w_arr is None:
w_arr = interp.space.new_array_from_list([])
this.setattr(interp, "storage", w_arr, k_ArrayIterator)
@wrap_method([], name='ArrayIterator::current')
def ArrayIterator_current():
pass
@wrap_method([], name='ArrayIterator::next')
def ArrayIterator_next():
pass
@wrap_method([], name='ArrayIterator::key')
def ArrayIterator_key():
pass
@wrap_method([], name='ArrayIterator::rewind')
def ArrayIterator_rewind():
pass
@wrap_method([], name='ArrayIterator::valid')
def ArrayIterator_valid():
pass
k_ArrayIterator = def_class('ArrayIterator', [
ArrayIterator_construct,
ArrayIterator_current,
ArrayIterator_next,
ArrayIterator_key,
ArrayIterator_rewind,
ArrayIterator_valid],
[('storage', consts.ACC_PRIVATE)],
instance_class=W_ApplevelArrayIterator,
implements=["Iterator"]
)
|
"""Module for Genomic DelIns Translation."""
from variation.translators.translator import Translator
from variation.schemas.classification_response_schema import ClassificationType
from variation.schemas.token_response_schema import GenomicDelInsToken
class GenomicDelIns(Translator):
"""The Genomic DelIns Translator class."""
def can_translate(self, type: ClassificationType) -> bool:
"""Return if classification type is Genomic DelIns."""
return type == ClassificationType.GENOMIC_DELINS
def is_token_instance(self, token):
"""Return if the token is an Genomic DelIns token instance."""
return isinstance(token, GenomicDelInsToken)
|
# -*- coding: utf-8 -*-
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_iris
from sklearn_porter import Porter
iris_data = load_iris()
X = iris_data.data
y = iris_data.target
clf = KNeighborsClassifier(algorithm='brute',
n_neighbors=3,
weights='uniform')
clf.fit(X, y)
porter = Porter(clf, language='js')
output = porter.export()
print(output)
"""
var KNeighborsClassifier = function(nNeighbors, nClasses, power, X, y) {
this.nNeighbors = nNeighbors;
this.nTemplates = y.length;
this.nClasses = nClasses;
this.power = power;
this.X = X;
this.y = y;
var Neighbor = function(clazz, dist) {
this.clazz = clazz;
this.dist = dist;
};
var compute = function(temp, cand, q) {
var dist = 0.,
diff;
for (var i = 0, l = temp.length; i < l; i++) {
diff = Math.abs(temp[i] - cand[i]);
if (q==1) {
dist += diff;
} else if (q==2) {
dist += diff*diff;
} else if (q==Number.POSITIVE_INFINITY) {
if (diff > dist) {
dist = diff;
}
} else {
dist += Math.pow(diff, q);
}
}
if (q==1 || q==Number.POSITIVE_INFINITY) {
return dist;
} else if (q==2) {
return Math.sqrt(dist);
} else {
return Math.pow(dist, 1. / q);
}
};
this.predict = function(features) {
var classIdx = 0, i;
if (this.nNeighbors == 1) {
var minDist = Number.POSITIVE_INFINITY,
curDist;
for (i = 0; i < this.nTemplates; i++) {
curDist = compute(this.X[i], features, this.power);
if (curDist <= minDist) {
minDist = curDist;
classIdx = this.y[i];
}
}
} else {
var classes = new Array(this.nClasses).fill(0);
var dists = [];
for (i = 0; i < this.nTemplates; i++) {
dists.push(new Neighbor(this.y[i], compute(this.X[i], features, this.power)));
}
dists.sort(function compare(n1, n2) {
return (n1.dist < n2.dist) ? -1 : 1;
});
for (i = 0; i < this.nNeighbors; i++) {
classes[dists[i].clazz]++;
}
for (i = 0; i < this.nClasses; i++) {
classIdx = classes[i] > classes[classIdx] ? i : classIdx;
}
}
return classIdx;
};
};
if (typeof process !== 'undefined' && typeof process.argv !== 'undefined') {
if (process.argv.length - 2 === 4) {
// Features:
var features = process.argv.slice(2);
// Parameters:
var X = [[5.0999999999999996, 3.5, 1.3999999999999999, 0.20000000000000001], [4.9000000000000004, 3.0, 1.3999999999999999, 0.20000000000000001], [4.7000000000000002, 3.2000000000000002, 1.3, 0.20000000000000001], [4.5999999999999996, 3.1000000000000001, 1.5, 0.20000000000000001], [5.0, 3.6000000000000001, 1.3999999999999999, 0.20000000000000001], [5.4000000000000004, 3.8999999999999999, 1.7, 0.40000000000000002], [4.5999999999999996, 3.3999999999999999, 1.3999999999999999, 0.29999999999999999], [5.0, 3.3999999999999999, 1.5, 0.20000000000000001], [4.4000000000000004, 2.8999999999999999, 1.3999999999999999, 0.20000000000000001], [4.9000000000000004, 3.1000000000000001, 1.5, 0.10000000000000001], [5.4000000000000004, 3.7000000000000002, 1.5, 0.20000000000000001], [4.7999999999999998, 3.3999999999999999, 1.6000000000000001, 0.20000000000000001], [4.7999999999999998, 3.0, 1.3999999999999999, 0.10000000000000001], [4.2999999999999998, 3.0, 1.1000000000000001, 0.10000000000000001], [5.7999999999999998, 4.0, 1.2, 0.20000000000000001], [5.7000000000000002, 4.4000000000000004, 1.5, 0.40000000000000002], [5.4000000000000004, 3.8999999999999999, 1.3, 0.40000000000000002], [5.0999999999999996, 3.5, 1.3999999999999999, 0.29999999999999999], [5.7000000000000002, 3.7999999999999998, 1.7, 0.29999999999999999], [5.0999999999999996, 3.7999999999999998, 1.5, 0.29999999999999999], [5.4000000000000004, 3.3999999999999999, 1.7, 0.20000000000000001], [5.0999999999999996, 3.7000000000000002, 1.5, 0.40000000000000002], [4.5999999999999996, 3.6000000000000001, 1.0, 0.20000000000000001], [5.0999999999999996, 3.2999999999999998, 1.7, 0.5], [4.7999999999999998, 3.3999999999999999, 1.8999999999999999, 0.20000000000000001], [5.0, 3.0, 1.6000000000000001, 0.20000000000000001], [5.0, 3.3999999999999999, 1.6000000000000001, 0.40000000000000002], [5.2000000000000002, 3.5, 1.5, 0.20000000000000001], [5.2000000000000002, 3.3999999999999999, 1.3999999999999999, 0.20000000000000001], [4.7000000000000002, 3.2000000000000002, 1.6000000000000001, 0.20000000000000001], [4.7999999999999998, 3.1000000000000001, 1.6000000000000001, 0.20000000000000001], [5.4000000000000004, 3.3999999999999999, 1.5, 0.40000000000000002], [5.2000000000000002, 4.0999999999999996, 1.5, 0.10000000000000001], [5.5, 4.2000000000000002, 1.3999999999999999, 0.20000000000000001], [4.9000000000000004, 3.1000000000000001, 1.5, 0.10000000000000001], [5.0, 3.2000000000000002, 1.2, 0.20000000000000001], [5.5, 3.5, 1.3, 0.20000000000000001], [4.9000000000000004, 3.1000000000000001, 1.5, 0.10000000000000001], [4.4000000000000004, 3.0, 1.3, 0.20000000000000001], [5.0999999999999996, 3.3999999999999999, 1.5, 0.20000000000000001], [5.0, 3.5, 1.3, 0.29999999999999999], [4.5, 2.2999999999999998, 1.3, 0.29999999999999999], [4.4000000000000004, 3.2000000000000002, 1.3, 0.20000000000000001], [5.0, 3.5, 1.6000000000000001, 0.59999999999999998], [5.0999999999999996, 3.7999999999999998, 1.8999999999999999, 0.40000000000000002], [4.7999999999999998, 3.0, 1.3999999999999999, 0.29999999999999999], [5.0999999999999996, 3.7999999999999998, 1.6000000000000001, 0.20000000000000001], [4.5999999999999996, 3.2000000000000002, 1.3999999999999999, 0.20000000000000001], [5.2999999999999998, 3.7000000000000002, 1.5, 0.20000000000000001], [5.0, 3.2999999999999998, 1.3999999999999999, 0.20000000000000001], [7.0, 3.2000000000000002, 4.7000000000000002, 1.3999999999999999], [6.4000000000000004, 3.2000000000000002, 4.5, 1.5], [6.9000000000000004, 3.1000000000000001, 4.9000000000000004, 1.5], [5.5, 2.2999999999999998, 4.0, 1.3], [6.5, 2.7999999999999998, 4.5999999999999996, 1.5], [5.7000000000000002, 2.7999999999999998, 4.5, 1.3], [6.2999999999999998, 3.2999999999999998, 4.7000000000000002, 1.6000000000000001], [4.9000000000000004, 2.3999999999999999, 3.2999999999999998, 1.0], [6.5999999999999996, 2.8999999999999999, 4.5999999999999996, 1.3], [5.2000000000000002, 2.7000000000000002, 3.8999999999999999, 1.3999999999999999], [5.0, 2.0, 3.5, 1.0], [5.9000000000000004, 3.0, 4.2000000000000002, 1.5], [6.0, 2.2000000000000002, 4.0, 1.0], [6.0999999999999996, 2.8999999999999999, 4.7000000000000002, 1.3999999999999999], [5.5999999999999996, 2.8999999999999999, 3.6000000000000001, 1.3], [6.7000000000000002, 3.1000000000000001, 4.4000000000000004, 1.3999999999999999], [5.5999999999999996, 3.0, 4.5, 1.5], [5.7999999999999998, 2.7000000000000002, 4.0999999999999996, 1.0], [6.2000000000000002, 2.2000000000000002, 4.5, 1.5], [5.5999999999999996, 2.5, 3.8999999999999999, 1.1000000000000001], [5.9000000000000004, 3.2000000000000002, 4.7999999999999998, 1.8], [6.0999999999999996, 2.7999999999999998, 4.0, 1.3], [6.2999999999999998, 2.5, 4.9000000000000004, 1.5], [6.0999999999999996, 2.7999999999999998, 4.7000000000000002, 1.2], [6.4000000000000004, 2.8999999999999999, 4.2999999999999998, 1.3], [6.5999999999999996, 3.0, 4.4000000000000004, 1.3999999999999999], [6.7999999999999998, 2.7999999999999998, 4.7999999999999998, 1.3999999999999999], [6.7000000000000002, 3.0, 5.0, 1.7], [6.0, 2.8999999999999999, 4.5, 1.5], [5.7000000000000002, 2.6000000000000001, 3.5, 1.0], [5.5, 2.3999999999999999, 3.7999999999999998, 1.1000000000000001], [5.5, 2.3999999999999999, 3.7000000000000002, 1.0], [5.7999999999999998, 2.7000000000000002, 3.8999999999999999, 1.2], [6.0, 2.7000000000000002, 5.0999999999999996, 1.6000000000000001], [5.4000000000000004, 3.0, 4.5, 1.5], [6.0, 3.3999999999999999, 4.5, 1.6000000000000001], [6.7000000000000002, 3.1000000000000001, 4.7000000000000002, 1.5], [6.2999999999999998, 2.2999999999999998, 4.4000000000000004, 1.3], [5.5999999999999996, 3.0, 4.0999999999999996, 1.3], [5.5, 2.5, 4.0, 1.3], [5.5, 2.6000000000000001, 4.4000000000000004, 1.2], [6.0999999999999996, 3.0, 4.5999999999999996, 1.3999999999999999], [5.7999999999999998, 2.6000000000000001, 4.0, 1.2], [5.0, 2.2999999999999998, 3.2999999999999998, 1.0], [5.5999999999999996, 2.7000000000000002, 4.2000000000000002, 1.3], [5.7000000000000002, 3.0, 4.2000000000000002, 1.2], [5.7000000000000002, 2.8999999999999999, 4.2000000000000002, 1.3], [6.2000000000000002, 2.8999999999999999, 4.2999999999999998, 1.3], [5.0999999999999996, 2.5, 3.0, 1.1000000000000001], [5.7000000000000002, 2.7999999999999998, 4.0999999999999996, 1.3], [6.2999999999999998, 3.2999999999999998, 6.0, 2.5], [5.7999999999999998, 2.7000000000000002, 5.0999999999999996, 1.8999999999999999], [7.0999999999999996, 3.0, 5.9000000000000004, 2.1000000000000001], [6.2999999999999998, 2.8999999999999999, 5.5999999999999996, 1.8], [6.5, 3.0, 5.7999999999999998, 2.2000000000000002], [7.5999999999999996, 3.0, 6.5999999999999996, 2.1000000000000001], [4.9000000000000004, 2.5, 4.5, 1.7], [7.2999999999999998, 2.8999999999999999, 6.2999999999999998, 1.8], [6.7000000000000002, 2.5, 5.7999999999999998, 1.8], [7.2000000000000002, 3.6000000000000001, 6.0999999999999996, 2.5], [6.5, 3.2000000000000002, 5.0999999999999996, 2.0], [6.4000000000000004, 2.7000000000000002, 5.2999999999999998, 1.8999999999999999], [6.7999999999999998, 3.0, 5.5, 2.1000000000000001], [5.7000000000000002, 2.5, 5.0, 2.0], [5.7999999999999998, 2.7999999999999998, 5.0999999999999996, 2.3999999999999999], [6.4000000000000004, 3.2000000000000002, 5.2999999999999998, 2.2999999999999998], [6.5, 3.0, 5.5, 1.8], [7.7000000000000002, 3.7999999999999998, 6.7000000000000002, 2.2000000000000002], [7.7000000000000002, 2.6000000000000001, 6.9000000000000004, 2.2999999999999998], [6.0, 2.2000000000000002, 5.0, 1.5], [6.9000000000000004, 3.2000000000000002, 5.7000000000000002, 2.2999999999999998], [5.5999999999999996, 2.7999999999999998, 4.9000000000000004, 2.0], [7.7000000000000002, 2.7999999999999998, 6.7000000000000002, 2.0], [6.2999999999999998, 2.7000000000000002, 4.9000000000000004, 1.8], [6.7000000000000002, 3.2999999999999998, 5.7000000000000002, 2.1000000000000001], [7.2000000000000002, 3.2000000000000002, 6.0, 1.8], [6.2000000000000002, 2.7999999999999998, 4.7999999999999998, 1.8], [6.0999999999999996, 3.0, 4.9000000000000004, 1.8], [6.4000000000000004, 2.7999999999999998, 5.5999999999999996, 2.1000000000000001], [7.2000000000000002, 3.0, 5.7999999999999998, 1.6000000000000001], [7.4000000000000004, 2.7999999999999998, 6.0999999999999996, 1.8999999999999999], [7.9000000000000004, 3.7999999999999998, 6.4000000000000004, 2.0], [6.4000000000000004, 2.7999999999999998, 5.5999999999999996, 2.2000000000000002], [6.2999999999999998, 2.7999999999999998, 5.0999999999999996, 1.5], [6.0999999999999996, 2.6000000000000001, 5.5999999999999996, 1.3999999999999999], [7.7000000000000002, 3.0, 6.0999999999999996, 2.2999999999999998], [6.2999999999999998, 3.3999999999999999, 5.5999999999999996, 2.3999999999999999], [6.4000000000000004, 3.1000000000000001, 5.5, 1.8], [6.0, 3.0, 4.7999999999999998, 1.8], [6.9000000000000004, 3.1000000000000001, 5.4000000000000004, 2.1000000000000001], [6.7000000000000002, 3.1000000000000001, 5.5999999999999996, 2.3999999999999999], [6.9000000000000004, 3.1000000000000001, 5.0999999999999996, 2.2999999999999998], [5.7999999999999998, 2.7000000000000002, 5.0999999999999996, 1.8999999999999999], [6.7999999999999998, 3.2000000000000002, 5.9000000000000004, 2.2999999999999998], [6.7000000000000002, 3.2999999999999998, 5.7000000000000002, 2.5], [6.7000000000000002, 3.0, 5.2000000000000002, 2.2999999999999998], [6.2999999999999998, 2.5, 5.0, 1.8999999999999999], [6.5, 3.0, 5.2000000000000002, 2.0], [6.2000000000000002, 3.3999999999999999, 5.4000000000000004, 2.2999999999999998], [5.9000000000000004, 3.0, 5.0999999999999996, 1.8]];
var y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2];
// Estimator:
var clf = new KNeighborsClassifier(3, 3, 2, X, y);
var prediction = clf.predict(features);
console.log(prediction);
}
}
"""
|
"""Add indexes to models
Revision ID: 440a8a3c0d96
Revises: 4d1e2e13e514
Create Date: 2020-02-04 17:15:37.150756
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "440a8a3c0d96"
down_revision = "4d1e2e13e514"
def upgrade():
op.create_index(
"idx_component_builds_build_id_nvr",
"component_builds",
["module_id", "nvr"],
unique=True,
)
op.create_index(
"idx_component_builds_build_id_task_id",
"component_builds",
["module_id", "task_id"],
unique=True,
)
op.create_index(
op.f("ix_component_builds_batch"), "component_builds", ["batch"], unique=False
)
op.create_index(
"idx_module_builds_name_stream_version_context",
"module_builds",
["name", "stream", "version", "context"],
unique=True,
)
op.create_index(
op.f("ix_module_builds_name"), "module_builds", ["name"], unique=False
)
op.create_index(
op.f("ix_module_builds_state"), "module_builds", ["state"], unique=False
)
op.create_index(
op.f("ix_module_builds_koji_tag"), "module_builds", ["koji_tag"], unique=False
)
def downgrade():
op.drop_index(op.f("ix_module_builds_koji_tag"), table_name="module_builds")
op.drop_index(op.f("ix_module_builds_state"), table_name="module_builds")
op.drop_index(op.f("ix_module_builds_name"), table_name="module_builds")
op.drop_index(
"idx_module_builds_name_stream_version_context", table_name="module_builds"
)
op.drop_index(op.f("ix_component_builds_batch"), table_name="component_builds")
op.drop_index(
"idx_component_builds_build_id_task_id", table_name="component_builds"
)
op.drop_index("idx_component_builds_build_id_nvr", table_name="component_builds")
|
from .bars import *
from .feature_mat import *
from .frac_diff import *
from .filters import * |
import unittest
import ray
from ray.rllib.agents.dqn import DQNTrainer, DEFAULT_CONFIG
from ray.rllib.utils.test_utils import check, framework_iterator
class TestPolicy(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_policy_save_restore(self):
config = DEFAULT_CONFIG.copy()
for _ in framework_iterator(config):
trainer = DQNTrainer(config=config, env="CartPole-v0")
policy = trainer.get_policy()
state1 = policy.get_state()
trainer.train()
state2 = policy.get_state()
check(
state1["_exploration_state"]["last_timestep"],
state2["_exploration_state"]["last_timestep"],
false=True,
)
check(state1["global_timestep"], state2["global_timestep"], false=True)
# Reset policy to its original state and compare.
policy.set_state(state1)
state3 = policy.get_state()
# Make sure everything is the same.
check(state1, state3)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
from check.serializers import PingSerializer, PongSerializer
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
class PongViewSet(ListAPIView):
serializer_class = PongSerializer
message = "PING: The API is running!"
def get(self, format=None):
return Response(self.message)
class PingViewSet(ListAPIView):
serializer_class = PingSerializer
message = "PONG: The API is running"
def get(self, format=None):
return Response(self.message)
pong_viewset = PongViewSet.as_view()
ping_viewset = PingViewSet.as_view()
|
import unittest
import xmlrunner
def runner(output='python_test_xml'):
return xmlrunner.XMLTestRunner(
output=output
)
def find_test():
return unittest.TestLoader().discover('pystache')
if __name__=='__main__':
runner().run(find_tests()) |
D = float(input('Qual é a distância da sua viagem?'))
print(f'Você está prestes a começar uma viagem de {D:.1f}Km')
if D > 200:
print(f'E o preço da sua passagem será de R${D*0.45:.2f}')
else:
print(f'E o preço da sua passagem será de R${D*0.50:.2f}')
|
from typing import Dict, Any
import copy
class Label(object):
def __init__(self, raw_label: Dict[str, Any]):
self._raw = copy.deepcopy(raw_label)
def name(self) -> str:
return self._raw["name"]
def to_raw(self) -> Dict[str, Any]:
return copy.deepcopy(self._raw)
|
lst = ["Москва", "Санкт-Петербург", "Тверь", "Казань"]
print(lst)
print(lst[1:3])
lst2 = lst[2:4]
print(lst2)
lst3 = lst[-2:-1] # правая граница не входит
print(lst3)
# создается ли копия списка
c = lst[:]
print(c, id(c))
print(lst, id(lst))
print("Создание списка через функцию-конструктор list(lst): ")
c = list(lst)
print(c, id(c))
print("# Присваивание срезам новых значений: ")
lst[1:3] = "Владимир", "Астрахань"
print(lst) |
import numpy as np
import pytest
from psydac.core.bsplines import make_knots
from psydac.fem.basic import FemField
from psydac.fem.splines import SplineSpace
from psydac.fem.tensor import TensorFemSpace
from psydac.fem.vector import ProductFemSpace
from psydac.feec.derivatives import DirectionalDerivativeOperator
from psydac.feec.derivatives import Derivative_1D, Gradient_2D, Gradient_3D
from psydac.feec.derivatives import ScalarCurl_2D, VectorCurl_2D, Curl_3D
from psydac.feec.derivatives import Divergence_2D, Divergence_3D
from mpi4py import MPI
#==============================================================================
# these tests test the DirectionalDerivativeOperator structurally.
# They do not check, if it really computes the derivatives
# (this is done in the gradient etc. tests below already)
def run_directional_derivative_operator(comm, domain, ncells, degree, periodic, direction, negative, transposed, seed, matrix_assembly=False):
# determinize tests
np.random.seed(seed)
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
Ns = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
# original space
V0 = TensorFemSpace(*Ns, comm=comm)
# reduced space
V1 = V0.reduce_degree(axes=[direction], basis='M')
diffop = DirectionalDerivativeOperator(V0.vector_space, V1.vector_space, direction, negative=negative, transposed=transposed)
# some boundary, and transposed handling
vpads = np.array(V0.vector_space.pads, dtype=int)
pads = np.array(V1.vector_space.pads, dtype=int)
# transpose, if needed
if transposed:
V0, V1 = V1, V0
counts = np.array(V1.vector_space.ends, dtype=int) - np.array(V1.vector_space.starts, dtype=int) + 1
diffadd = np.zeros((len(ncells),), dtype=int)
diffadd[direction] = 1
localslice = tuple([slice(p,-p) for p in V1.vector_space.pads])
# random vector, scaled-up data (with fixed seed)
v = V0.vector_space.zeros()
v._data[:] = np.random.random(v._data.shape) * 100
v.update_ghost_regions()
# compute reference solution (do it element-wise for now...)
# (but we only test small domains here)
ref = V1.vector_space.zeros()
outslice = tuple([slice(s, s+c) for s,c in zip(pads, counts)])
idslice = tuple([slice(s, s+c) for s,c in zip(vpads, counts)])
diffslice = tuple([slice(s+d, s+c+d) for s,c,d in zip(vpads, counts, diffadd)])
if transposed:
ref._data[idslice] -= v._data[outslice]
ref._data[diffslice] += v._data[outslice]
# we need to account for the ghost region write which diffslice does,
# i.e. the part which might be sent to another process, or even swapped to the other side
# since the ghost layers of v are updated, we update the data on the other side
# (also update_ghost_regions won't preserve the data we wrote there)
ref_restslice = [c for c in idslice]
ref_restslice[direction] = slice(vpads[direction], vpads[direction] + 1)
v_restslice = [c for c in outslice]
v_restslice[direction] = slice(pads[direction] - 1, pads[direction])
ref._data[tuple(ref_restslice)] += v._data[tuple(v_restslice)]
else:
ref._data[outslice] = v._data[diffslice] - v._data[idslice]
if negative:
ref._data[localslice] = -ref._data[localslice]
ref.update_ghost_regions()
# compute and compare
# case one: dot(v, out=None)
res1 = diffop.dot(v)
assert np.allclose(ref._data[localslice], res1._data[localslice])
# case two: dot(v, out=w)
out = V1.vector_space.zeros()
res2 = diffop.dot(v, out=out)
assert res2 is out
assert np.allclose(ref._data[localslice], res2._data[localslice])
# flag to skip matrix assembly if it takes too long or fails
if matrix_assembly:
# case three: tokronstencil().tostencil().dot(v)
# (i.e. test matrix conversion)
matrix = diffop.tokronstencil().tostencil()
res3 = matrix.dot(v)
assert np.allclose(ref._data[localslice], res3._data[localslice])
# compare matrix assembly (in non-parallel case at least)
if not diffop.domain.parallel:
assert np.array_equal(diffop.toarray_nopads(), matrix.toarray(with_pads=False))
# case four: tosparse().dot(v._data)
res4 = diffop.tosparse().dot(v._data.flatten())
assert np.allclose(ref._data[localslice], res4.reshape(ref._data.shape)[localslice])
def compare_diff_operators_by_matrixassembly(lo1, lo2):
m1 = lo1.tokronstencil().tostencil()
m2 = lo2.tokronstencil().tostencil()
m1.update_ghost_regions()
m2.update_ghost_regions()
assert np.allclose(m1._data, m2._data)
def test_directional_derivative_operator_invalid_wrongsized1():
# test if we detect incorrectly-sized spaces
# i.e. V0.vector_space.npts != V1.vector_space.npts
# (NOTE: if periodic was [True,True], this test would most likely pass)
periodic = [False, False]
domain = [(0,1),(0,1)]
ncells = [8, 8]
degree = [3, 3]
direction = 0
negative = False
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
Ns = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
# original space
V0 = TensorFemSpace(*Ns)
# reduced space
V1 = V0.reduce_degree(axes=[1], basis='M')
with pytest.raises(AssertionError):
_ = DirectionalDerivativeOperator(V0.vector_space, V1.vector_space, direction, negative=negative)
def test_directional_derivative_operator_invalid_wrongspace2():
# test, if it fails when the pads are not the same
periodic = [False, False]
domain = [(0,1),(0,1)]
ncells = [8, 8]
degree = [3, 3]
direction = 0
negative = False
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
Ns = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
Ms = [SplineSpace(degree=d-1, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
# original space
V0 = TensorFemSpace(*Ns)
# reduced space
V1 = TensorFemSpace(*Ms)
with pytest.raises(AssertionError):
_ = DirectionalDerivativeOperator(V0.vector_space, V1.vector_space, direction, negative=negative)
def test_directional_derivative_operator_transposition_correctness():
# interface tests, to see if negation and transposition work as their methods suggest
periodic = [False, False]
domain = [(0,1),(0,1)]
ncells = [8, 8]
degree = [3, 3]
direction = 0
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
Ns = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
# original space
V0 = TensorFemSpace(*Ns)
# reduced space
V1 = V0.reduce_degree(axes=[0], basis='M')
diff = DirectionalDerivativeOperator(V0.vector_space, V1.vector_space, direction, negative=False, transposed=False)
# compare, if the transpose is actually correct
M = diff.tokronstencil().tostencil()
MT = diff.T.tokronstencil().tostencil()
assert np.allclose(M.T._data, MT._data)
assert np.allclose(M._data, MT.T._data)
sparseM = diff.tosparse().tocoo()
sparseMT = diff.T.tosparse().tocoo()
sparseM_T = sparseM.T.tocoo()
sparseMT_T = sparseMT.T.tocoo()
assert np.array_equal( sparseMT.col , sparseM_T.col )
assert np.array_equal( sparseMT.row , sparseM_T.row )
assert np.array_equal( sparseMT.data, sparseM_T.data )
assert np.array_equal( sparseM.col , sparseMT_T.col )
assert np.array_equal( sparseM.row , sparseMT_T.row )
assert np.array_equal( sparseM.data, sparseMT_T.data )
def test_directional_derivative_operator_interface():
# interface tests, to see if negation and transposition work as their methods suggest
periodic = [False, False]
domain = [(0,1),(0,1)]
ncells = [8, 8]
degree = [3, 3]
direction = 0
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
Ns = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
# original space
V0 = TensorFemSpace(*Ns)
# reduced space
V1 = V0.reduce_degree(axes=[0], basis='M')
diff = DirectionalDerivativeOperator(V0.vector_space, V1.vector_space, direction, negative=False, transposed=False)
diffT = DirectionalDerivativeOperator(V0.vector_space, V1.vector_space, direction, negative=False, transposed=True)
diffN = DirectionalDerivativeOperator(V0.vector_space, V1.vector_space, direction, negative=True, transposed=False)
diffNT = DirectionalDerivativeOperator(V0.vector_space, V1.vector_space, direction, negative=True, transposed=True)
# compare all with all by assembling matrices
compare_diff_operators_by_matrixassembly(diff.T, diffT)
compare_diff_operators_by_matrixassembly(-diff, diffN)
compare_diff_operators_by_matrixassembly(-diff.T, diffNT)
compare_diff_operators_by_matrixassembly(diffT.T, diff)
compare_diff_operators_by_matrixassembly(-diffT, diffNT)
compare_diff_operators_by_matrixassembly(-diffT.T, diffN)
compare_diff_operators_by_matrixassembly(diffN.T, diffNT)
compare_diff_operators_by_matrixassembly(-diffN, diff)
compare_diff_operators_by_matrixassembly(-diffN.T, diffT)
compare_diff_operators_by_matrixassembly(diffNT.T, diffN)
compare_diff_operators_by_matrixassembly(-diffNT, diffT)
compare_diff_operators_by_matrixassembly(-diffNT.T, diff)
@pytest.mark.parametrize('domain', [(0, 1), (-2, 3)])
@pytest.mark.parametrize('ncells', [11, 37])
@pytest.mark.parametrize('degree', [2, 3, 4, 5])
@pytest.mark.parametrize('periodic', [True, False])
@pytest.mark.parametrize('direction', [0])
@pytest.mark.parametrize('negative', [True, False])
@pytest.mark.parametrize('transposed', [True, False])
@pytest.mark.parametrize('seed', [1,3])
def test_directional_derivative_operator_1d_ser(domain, ncells, degree, periodic, direction, negative, transposed, seed):
run_directional_derivative_operator(None, [domain], [ncells], [degree], [periodic], direction, negative, transposed, seed, True)
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8])])
@pytest.mark.parametrize('ncells', [(10, 9), (27, 15)])
@pytest.mark.parametrize('degree', [(3, 2), (4, 5)])
@pytest.mark.parametrize('periodic', [(True, False), (False, True)])
@pytest.mark.parametrize('direction', [0,1])
@pytest.mark.parametrize('negative', [True, False])
@pytest.mark.parametrize('transposed', [True, False])
@pytest.mark.parametrize('seed', [1,3])
def test_directional_derivative_operator_2d_ser(domain, ncells, degree, periodic, direction, negative, transposed, seed):
run_directional_derivative_operator(None, domain, ncells, degree, periodic, direction, negative, transposed, seed, True)
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8], [-0.5, 0.5])])
@pytest.mark.parametrize('ncells', [(4, 5, 7)])
@pytest.mark.parametrize('degree', [(3, 2, 5), (2, 4, 7)])
@pytest.mark.parametrize('periodic', [( True, False, False),
(False, True, False),
(False, False, True)])
@pytest.mark.parametrize('direction', [0,1,2])
@pytest.mark.parametrize('negative', [True, False])
@pytest.mark.parametrize('transposed', [True, False])
@pytest.mark.parametrize('seed', [1,3])
def test_directional_derivative_operator_3d_ser(domain, ncells, degree, periodic, direction, negative, transposed, seed):
run_directional_derivative_operator(None, domain, ncells, degree, periodic, direction, negative, transposed, seed)
@pytest.mark.parametrize('domain', [(0, 1), (-2, 3)])
@pytest.mark.parametrize('ncells', [29, 37])
@pytest.mark.parametrize('degree', [2, 3, 4, 5])
@pytest.mark.parametrize('periodic', [True, False])
@pytest.mark.parametrize('direction', [0])
@pytest.mark.parametrize('negative', [True, False])
@pytest.mark.parametrize('transposed', [True, False])
@pytest.mark.parametrize('seed', [1,3])
@pytest.mark.parallel
def test_directional_derivative_operator_1d_par(domain, ncells, degree, periodic, direction, negative, transposed, seed):
# TODO: re-enable KroneckerStencilMatrix assembly here (fails right now sometimes when transposing)
run_directional_derivative_operator(MPI.COMM_WORLD, [domain], [ncells], [degree], [periodic], direction, negative, transposed, seed)
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8])])
@pytest.mark.parametrize('ncells', [(10, 11), (27, 15)])
@pytest.mark.parametrize('degree', [(3, 2), (4, 5)])
@pytest.mark.parametrize('periodic', [(True, False), (False, True)])
@pytest.mark.parametrize('direction', [0,1])
@pytest.mark.parametrize('negative', [True, False])
@pytest.mark.parametrize('transposed', [True, False])
@pytest.mark.parametrize('seed', [1,3])
@pytest.mark.parallel
def test_directional_derivative_operator_2d_par(domain, ncells, degree, periodic, direction, negative, transposed, seed):
# TODO: re-enable KroneckerStencilMatrix assembly here (fails right now sometimes when transposing)
run_directional_derivative_operator(MPI.COMM_WORLD, domain, ncells, degree, periodic, direction, negative, transposed, seed)
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8], [-0.5, 0.5])])
@pytest.mark.parametrize('ncells', [(5, 5, 7)])
@pytest.mark.parametrize('degree', [(2, 2, 3)])
@pytest.mark.parametrize('periodic', [( True, False, False),
(False, True, False),
(False, False, True)])
@pytest.mark.parametrize('direction', [0,1,2])
@pytest.mark.parametrize('negative', [True, False])
@pytest.mark.parametrize('transposed', [True, False])
@pytest.mark.parametrize('seed', [3])
@pytest.mark.parallel
def test_directional_derivative_operator_3d_par(domain, ncells, degree, periodic, direction, negative, transposed, seed):
run_directional_derivative_operator(MPI.COMM_WORLD, domain, ncells, degree, periodic, direction, negative, transposed, seed)
# (higher dimensions are not tested here for now)
#==============================================================================
@pytest.mark.parametrize('domain', [(0, 1), (-2, 3)])
@pytest.mark.parametrize('ncells', [11, 37])
@pytest.mark.parametrize('degree', [2, 3, 4, 5])
@pytest.mark.parametrize('periodic', [True, False])
@pytest.mark.parametrize('seed', [1,3])
def test_Derivative_1D(domain, ncells, degree, periodic, seed):
# determinize tests
np.random.seed(seed)
breaks = np.linspace(*domain, num=ncells+1)
knots = make_knots(breaks, degree, periodic)
# H1 space (0-forms)
N = SplineSpace(degree=degree, knots=knots, periodic=periodic, basis='B')
V0 = TensorFemSpace(N)
# L2 space (1-forms)
V1 = V0.reduce_degree(axes=[0], basis='M')
# Linear operator: 1D derivative
grad = Derivative_1D(V0, V1)
# Create random field in V0
u0 = FemField(V0)
s, = V0.vector_space.starts
e, = V0.vector_space.ends
u0.coeffs[s:e+1] = np.random.random(e-s+1)
# Compute gradient (=derivative) of u0
u1 = grad(u0)
# Create evaluation grid, and check if ∂/∂x u0(x) == u1(x)
xgrid = np.linspace(*N.domain, num=11)
vals_grad_u0 = np.array([u0.gradient(x)[0] for x in xgrid])
vals_u1 = np.array([u1(x) for x in xgrid])
# Test if relative max-norm of error is <= TOL
maxnorm_field = abs(vals_u1).max()
maxnorm_error = abs(vals_u1 - vals_grad_u0).max()
assert maxnorm_error / maxnorm_field <= 1e-14
#==============================================================================
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8])]) # 1 case
@pytest.mark.parametrize('ncells', [(10, 9), (27, 15)]) # 2 cases
@pytest.mark.parametrize('degree', [(3, 2), (4, 5)]) # 2 cases
@pytest.mark.parametrize('periodic', [(True, False), (False, True)]) # 2 cases
@pytest.mark.parametrize('seed', [1,3])
def test_Gradient_2D(domain, ncells, degree, periodic, seed):
# determinize tests
np.random.seed(seed)
# Compute breakpoints along each direction
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
# H1 space (0-forms)
Nx, Ny = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
V0 = TensorFemSpace(Nx, Ny)
# H-curl space (1-forms)
DxNy = V0.reduce_degree(axes=[0], basis='M')
NxDy = V0.reduce_degree(axes=[1], basis='M')
V1 = ProductFemSpace(DxNy, NxDy)
# Linear operator: 2D gradient
grad = Gradient_2D(V0, V1)
# Create random field in V0
u0 = FemField(V0)
s1, s2 = V0.vector_space.starts
e1, e2 = V0.vector_space.ends
u0.coeffs[s1:e1+1, s2:e2+1] = np.random.random((e1-s1+1, e2-s2+1))
# Compute gradient of u0
u1 = grad(u0)
# x and y components of u1 vector field
u1x = u1.fields[0]
u1y = u1.fields[1]
# Create evaluation grid, and check if
# ∂/∂x u0(x, y) == u1x(x, y)
# ∂/∂y u0(x, y) == u1y(x, y)
xgrid = np.linspace(*domain[0], num=11)
ygrid = np.linspace(*domain[1], num=11)
vals_grad_u0 = np.array([[u0.gradient(x, y) for x in xgrid] for y in ygrid])
vals_u1 = np.array([[[u1x(x, y), u1y(x, y)] for x in xgrid] for y in ygrid])
# Test if relative max-norm of error is <= TOL
maxnorm_field = abs(vals_u1).max()
maxnorm_error = abs(vals_u1 - vals_grad_u0).max()
assert maxnorm_error / maxnorm_field <= 1e-14
#==============================================================================
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8], [-0.5, 0.5])]) # 1 case
@pytest.mark.parametrize('ncells', [(4, 5, 7)]) # 1 case
@pytest.mark.parametrize('degree', [(3, 2, 5), (2, 4, 7)]) # 2 cases
@pytest.mark.parametrize('periodic', [( True, False, False), # 3 cases
(False, True, False),
(False, False, True)])
@pytest.mark.parametrize('seed', [1,3])
def test_Gradient_3D(domain, ncells, degree, periodic, seed):
# determinize tests
np.random.seed(seed)
# Compute breakpoints along each direction
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
# H1 space (0-forms)
Nx, Ny, Nz = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
V0 = TensorFemSpace(Nx, Ny, Nz)
# H-curl space (1-forms)
DxNyNz = V0.reduce_degree(axes=[0], basis='M')
NxDyNz = V0.reduce_degree(axes=[1], basis='M')
NxNyDz = V0.reduce_degree(axes=[2], basis='M')
V1 = ProductFemSpace(DxNyNz, NxDyNz, NxNyDz)
# Linear operator: 3D gradient
grad = Gradient_3D(V0, V1)
# Create random field in V0
u0 = FemField(V0)
s1, s2, s3 = V0.vector_space.starts
e1, e2, e3 = V0.vector_space.ends
u0.coeffs[s1:e1+1, s2:e2+1, s3:e3+1] = np.random.random((e1-s1+1, e2-s2+1, e3-s3+1))
# Compute gradient of u0
u1 = grad(u0)
# Components of u1 vector field
u1x, u1y, u1z = u1.fields
# Create random evaluation points (x, y, z) for evaluating fields
npts = 1000
xyz_pts = [[lims[0]+s*(lims[1]-lims[0]) for s, lims in zip(np.random.random(3), domain)]
for i in range(npts)]
# Check if
# ∂/∂x u0(x, y, z) == u1x(x, y, z)
# ∂/∂y u0(x, y, z) == u1y(x, y, z)
# ∂/∂z u0(x, y, z) == u1z(x, y, z)
vals_grad_u0 = np.array([u0.gradient(*xyz) for xyz in xyz_pts])
vals_u1 = np.array([[u1x(*xyz), u1y(*xyz), u1z(*xyz)] for xyz in xyz_pts])
# Test if relative max-norm of error is <= TOL
maxnorm_field = abs(vals_u1).max()
maxnorm_error = abs(vals_u1 - vals_grad_u0).max()
assert maxnorm_error / maxnorm_field <= 1e-14
#==============================================================================
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8])]) # 1 case
@pytest.mark.parametrize('ncells', [(10, 9), (27, 15)]) # 2 cases
@pytest.mark.parametrize('degree', [(3, 2), (4, 5)]) # 2 cases
@pytest.mark.parametrize('periodic', [(True, False), (False, True)]) # 2 cases
@pytest.mark.parametrize('seed', [1,3])
def test_ScalarCurl_2D(domain, ncells, degree, periodic, seed):
# determinize tests
np.random.seed(seed)
# Compute breakpoints along each direction
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
# H1 space (0-forms)
Nx, Ny = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
V0 = TensorFemSpace(Nx, Ny)
# H-curl space (1-forms)
DxNy = V0.reduce_degree(axes=[0], basis='M')
NxDy = V0.reduce_degree(axes=[1], basis='M')
V1 = ProductFemSpace(DxNy, NxDy)
# L2 space (2-forms)
DxDy = V0.reduce_degree(axes=[0, 1], basis='M')
V2 = DxDy
# Linear operator: curl
curl = ScalarCurl_2D(V1, V2)
# ...
# Create random field in V1
u1 = FemField(V1)
s1, s2 = V1.vector_space[0].starts
e1, e2 = V1.vector_space[0].ends
u1.coeffs[0][s1:e1+1, s2:e2+1] = np.random.random((e1-s1+1, e2-s2+1))
s1, s2 = V1.vector_space[1].starts
e1, e2 = V1.vector_space[1].ends
u1.coeffs[1][s1:e1+1, s2:e2+1] = np.random.random((e1-s1+1, e2-s2+1))
# ...
# Compute curl of u1
u2 = curl(u1)
# Components of vector field u1
u1x, u1y = u1.fields
# Create random evaluation points (x, y, z) for evaluating fields
npts = 1000
xyz_pts = [[lims[0]+s*(lims[1]-lims[0]) for s, lims in zip(np.random.random(2), domain)]
for i in range(npts)]
# Check if
# ∂/∂y u1x(x, y) - ∂/∂x u1y(x, y) == u2(x, y)
def eval_curl(fx, fy, *eta):
dfx_dx, dfx_dy = fx.gradient(*eta)
dfy_dx, dfy_dy = fy.gradient(*eta)
return dfy_dx - dfx_dy
vals_curl_u1 = np.array([eval_curl(u1x, u1y, *xyz) for xyz in xyz_pts])
vals_u2 = np.array([u2(*xyz) for xyz in xyz_pts])
# Test if relative max-norm of error is <= TOL
maxnorm_field = abs(vals_u2).max()
maxnorm_error = abs(vals_u2 - vals_curl_u1).max()
assert maxnorm_error / maxnorm_field <= 1e-14
#==============================================================================
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8])]) # 1 case
@pytest.mark.parametrize('ncells', [(10, 9), (27, 15)]) # 2 cases
@pytest.mark.parametrize('degree', [(3, 2), (4, 5)]) # 2 cases
@pytest.mark.parametrize('periodic', [(True, False), (False, True)]) # 2 cases
@pytest.mark.parametrize('seed', [1,3])
def test_VectorCurl_2D(domain, ncells, degree, periodic, seed):
# determinize tests
np.random.seed(seed)
# Compute breakpoints along each direction
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
# H1 space (0-forms)
Nx, Ny = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
V0 = TensorFemSpace(Nx, Ny)
# Hdiv space (1-forms)
NxDy = V0.reduce_degree(axes=[1], basis='M')
DxNy = V0.reduce_degree(axes=[0], basis='M')
V1 = ProductFemSpace(NxDy, DxNy)
# Linear operator: 2D vector curl
curl = VectorCurl_2D(V0, V1)
# Create random field in V0
u0 = FemField(V0)
s1, s2 = V0.vector_space.starts
e1, e2 = V0.vector_space.ends
u0.coeffs[s1:e1+1, s2:e2+1] = np.random.random((e1-s1+1, e2-s2+1))
# Compute curl of u0
u1 = curl(u0)
# x and y components of u1 vector field
u1x = u1.fields[0]
u1y = u1.fields[1]
# Create evaluation grid, and check if
# ∂/∂y u0(x, y) == u1x(x, y)
# -∂/∂x u0(x, y) == u1y(x, y)
def eval_curl(f, *eta):
df_dx, df_dy = f.gradient(*eta)
return [df_dy, -df_dx]
xgrid = np.linspace(*domain[0], num=11)
ygrid = np.linspace(*domain[1], num=11)
vals_curl_u0 = np.array([[eval_curl(u0, x, y) for x in xgrid] for y in ygrid])
vals_u1 = np.array([[[u1x(x, y), u1y(x, y)] for x in xgrid] for y in ygrid])
# Test if relative max-norm of error is <= TOL
maxnorm_field = abs(vals_u1).max()
maxnorm_error = abs(vals_u1 - vals_curl_u0).max()
assert maxnorm_error / maxnorm_field <= 1e-14
#==============================================================================
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8], [-0.5, 0.5])]) # 1 case
@pytest.mark.parametrize('ncells', [(4, 5, 7)]) # 1 case
@pytest.mark.parametrize('degree', [(3, 2, 5), (2, 4, 7)]) # 2 cases
@pytest.mark.parametrize('periodic', [( True, False, False), # 3 cases
(False, True, False),
(False, False, True)])
@pytest.mark.parametrize('seed', [1,3])
def test_Curl_3D(domain, ncells, degree, periodic, seed):
# determinize tests
np.random.seed(seed)
# Compute breakpoints along each direction
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
# H1 space (0-forms)
Nx, Ny, Nz = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
V0 = TensorFemSpace(Nx, Ny, Nz)
# H-curl space (1-forms)
DxNyNz = V0.reduce_degree(axes=[0], basis='M')
NxDyNz = V0.reduce_degree(axes=[1], basis='M')
NxNyDz = V0.reduce_degree(axes=[2], basis='M')
V1 = ProductFemSpace(DxNyNz, NxDyNz, NxNyDz)
# H-div space (2-forms)
NxDyDz = V0.reduce_degree(axes=[1, 2], basis='M')
DxNyDz = V0.reduce_degree(axes=[2, 0], basis='M')
DxDyNz = V0.reduce_degree(axes=[0, 1], basis='M')
V2 = ProductFemSpace(NxDyDz, DxNyDz, DxDyNz)
# Linear operator: curl
curl = Curl_3D(V1, V2)
# ...
# Create random field in V1
u1 = FemField(V1)
s1, s2, s3 = V1.vector_space[0].starts
e1, e2, e3 = V1.vector_space[0].ends
u1.coeffs[0][s1:e1+1, s2:e2+1, s3:e3+1] = np.random.random((e1-s1+1, e2-s2+1, e3-s3+1))
s1, s2, s3 = V1.vector_space[1].starts
e1, e2, e3 = V1.vector_space[1].ends
u1.coeffs[1][s1:e1+1, s2:e2+1, s3:e3+1] = np.random.random((e1-s1+1, e2-s2+1, e3-s3+1))
s1, s2, s3 = V1.vector_space[2].starts
e1, e2, e3 = V1.vector_space[2].ends
u1.coeffs[2][s1:e1+1, s2:e2+1, s3:e3+1] = np.random.random((e1-s1+1, e2-s2+1, e3-s3+1))
# ...
# Compute curl of u1
u2 = curl(u1)
# Components of vector fields u1 and u2
u1x, u1y, u1z = u1.fields
u2x, u2y, u2z = u2.fields
# Create random evaluation points (x, y, z) for evaluating fields
npts = 1000
xyz_pts = [[lims[0]+s*(lims[1]-lims[0]) for s, lims in zip(np.random.random(3), domain)]
for i in range(npts)]
# Check if
# ∂/∂y u1z(x, y, z) - ∂/∂z u1y(x, y, z) == u2x(x, y, z)
# ∂/∂z u1x(x, y, z) - ∂/∂x u1z(x, y, z) == u2y(x, y, z)
# ∂/∂x u1y(x, y, z) - ∂/∂y u1x(x, y, z) == u2z(x, y, z)
def eval_curl(fx, fy, fz, *eta):
dfx_dx, dfx_dy, dfx_dz = fx.gradient(*eta)
dfy_dx, dfy_dy, dfy_dz = fy.gradient(*eta)
dfz_dx, dfz_dy, dfz_dz = fz.gradient(*eta)
return [dfz_dy - dfy_dz,
dfx_dz - dfz_dx,
dfy_dx - dfx_dy]
vals_curl_u1 = np.array([eval_curl(u1x, u1y, u1z, *xyz) for xyz in xyz_pts])
vals_u2 = np.array([[u2x(*xyz), u2y(*xyz), u2z(*xyz)] for xyz in xyz_pts])
# Test if relative max-norm of error is <= TOL
maxnorm_field = abs(vals_u2).max()
maxnorm_error = abs(vals_u2 - vals_curl_u1).max()
assert maxnorm_error / maxnorm_field <= 1e-14
#==============================================================================
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8])]) # 1 case
@pytest.mark.parametrize('ncells', [(10, 9), (27, 15)]) # 2 cases
@pytest.mark.parametrize('degree', [(3, 2), (4, 5)]) # 2 cases
@pytest.mark.parametrize('periodic', [(True, False), (False, True)]) # 2 cases
@pytest.mark.parametrize('seed', [1,3])
def test_Divergence_2D(domain, ncells, degree, periodic, seed):
# determinize tests
np.random.seed(seed)
# Compute breakpoints along each direction
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
# H1 space (0-forms)
Nx, Ny = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
V0 = TensorFemSpace(Nx, Ny)
# H-div space (1-forms)
NxDy = V0.reduce_degree(axes=[1], basis='M')
DxNy = V0.reduce_degree(axes=[0], basis='M')
V1 = ProductFemSpace(NxDy, DxNy)
# L2 space (2-forms)
V2 = V0.reduce_degree(axes=[0, 1], basis='M')
# Linear operator: divergence
div = Divergence_2D(V1, V2)
# ...
# Create random field in V1
u1 = FemField(V1)
s1, s2 = V1.vector_space[0].starts
e1, e2 = V1.vector_space[0].ends
u1.coeffs[0][s1:e1+1, s2:e2+1] = np.random.random((e1-s1+1, e2-s2+1))
s1, s2 = V1.vector_space[1].starts
e1, e2 = V1.vector_space[1].ends
u1.coeffs[1][s1:e1+1, s2:e2+1] = np.random.random((e1-s1+1, e2-s2+1))
# ...
# Compute divergence of u1
u2 = div(u1)
# Components of vector field u1
u1x, u1y = u1.fields
# Create random evaluation points (x, y, z) for evaluating fields
npts = 1000
xyz_pts = [[lims[0]+s*(lims[1]-lims[0]) for s, lims in zip(np.random.random(3), domain)]
for i in range(npts)]
# Check if
# ∂/∂x u1x(x, y) + ∂/∂y u1y(x, y) == u2(x, y)
def eval_div(fx, fy, *eta):
dfx_dx, dfx_dy = fx.gradient(*eta)
dfy_dx, dfy_dy = fy.gradient(*eta)
return dfx_dx + dfy_dy
vals_div_u1 = np.array([eval_div(u1x, u1y, *xyz) for xyz in xyz_pts])
vals_u2 = np.array([u2(*xyz) for xyz in xyz_pts])
# Test if relative max-norm of error is <= TOL
maxnorm_field = abs(vals_u2).max()
maxnorm_error = abs(vals_u2 - vals_div_u1).max()
assert maxnorm_error / maxnorm_field <= 1e-14
#==============================================================================
@pytest.mark.parametrize('domain', [([-2, 3], [6, 8], [-0.5, 0.5])]) # 1 case
@pytest.mark.parametrize('ncells', [(4, 5, 7)]) # 1 case
@pytest.mark.parametrize('degree', [(3, 2, 5), (2, 4, 7)]) # 2 cases
@pytest.mark.parametrize('periodic', [( True, False, False), # 3 cases
(False, True, False),
(False, False, True)])
@pytest.mark.parametrize('seed', [1,3])
def test_Divergence_3D(domain, ncells, degree, periodic, seed):
# determinize tests
np.random.seed(seed)
# Compute breakpoints along each direction
breaks = [np.linspace(*lims, num=n+1) for lims, n in zip(domain, ncells)]
# H1 space (0-forms)
Nx, Ny, Nz = [SplineSpace(degree=d, grid=g, periodic=p, basis='B') \
for d, g, p in zip(degree, breaks, periodic)]
V0 = TensorFemSpace(Nx, Ny, Nz)
# H-div space (2-forms)
NxDyDz = V0.reduce_degree(axes=[1, 2], basis='M')
DxNyDz = V0.reduce_degree(axes=[2, 0], basis='M')
DxDyNz = V0.reduce_degree(axes=[0, 1], basis='M')
V2 = ProductFemSpace(NxDyDz, DxNyDz, DxDyNz)
# L2 space (3-forms)
V3 = V0.reduce_degree(axes=[0, 1, 2], basis='M')
# Linear operator: divergence
div = Divergence_3D(V2, V3)
# ...
# Create random field in V2
u2 = FemField(V2)
s1, s2, s3 = V2.vector_space[0].starts
e1, e2, e3 = V2.vector_space[0].ends
u2.coeffs[0][s1:e1+1, s2:e2+1, s3:e3+1] = np.random.random((e1-s1+1, e2-s2+1, e3-s3+1))
s1, s2, s3 = V2.vector_space[1].starts
e1, e2, e3 = V2.vector_space[1].ends
u2.coeffs[1][s1:e1+1, s2:e2+1, s3:e3+1] = np.random.random((e1-s1+1, e2-s2+1, e3-s3+1))
s1, s2, s3 = V2.vector_space[2].starts
e1, e2, e3 = V2.vector_space[2].ends
u2.coeffs[2][s1:e1+1, s2:e2+1, s3:e3+1] = np.random.random((e1-s1+1, e2-s2+1, e3-s3+1))
# ...
# Compute divergence of u2
u3 = div(u2)
# Components of vector field u2
u2x, u2y, u2z = u2.fields
# Create random evaluation points (x, y, z) for evaluating fields
npts = 1000
xyz_pts = [[lims[0]+s*(lims[1]-lims[0]) for s, lims in zip(np.random.random(3), domain)]
for i in range(npts)]
# Check if
# ∂/∂x u2x(x, y, z) + ∂/∂y u2y(x, y, z) + ∂/∂z u2z(x, y, z) == u3(x, y, z)
def eval_div(fx, fy, fz, *eta):
dfx_dx, dfx_dy, dfx_dz = fx.gradient(*eta)
dfy_dx, dfy_dy, dfy_dz = fy.gradient(*eta)
dfz_dx, dfz_dy, dfz_dz = fz.gradient(*eta)
return dfx_dx + dfy_dy + dfz_dz
vals_div_u2 = np.array([eval_div(u2x, u2y, u2z, *xyz) for xyz in xyz_pts])
vals_u3 = np.array([u3(*xyz) for xyz in xyz_pts])
# Test if relative max-norm of error is <= TOL
maxnorm_field = abs(vals_u3).max()
maxnorm_error = abs(vals_u3 - vals_div_u2).max()
assert maxnorm_error / maxnorm_field <= 1e-14
#==============================================================================
if __name__ == '__main__':
test_Derivative_1D(domain=[0, 1], ncells=12, degree=3, periodic=False, seed=1)
test_Derivative_1D(domain=[0, 1], ncells=12, degree=3, periodic=True, seed=1)
test_Gradient_2D(
domain = ([0, 1], [0, 1]),
ncells = (10, 15),
degree = (3, 2),
periodic = (False, True),
seed = 1
)
test_Gradient_3D(
domain = ([0, 1], [0, 1], [0, 1]),
ncells = (5, 8, 4),
degree = (3, 2, 3),
periodic = (False, True, True),
seed = 1
)
test_ScalarCurl_2D(
domain = ([0, 1], [0, 1]),
ncells = (10, 15),
degree = (3, 2),
periodic = (False, True),
seed = 1
)
test_VectorCurl_2D(
domain = ([0, 1], [0, 1]),
ncells = (10, 15),
degree = (3, 2),
periodic = (False, True),
seed = 1
)
test_Curl_3D(
domain = ([0, 1], [0, 1], [0, 1]),
ncells = (5, 8, 4),
degree = (3, 2, 3),
periodic = (False, True, True),
seed = 1
)
test_Divergence_2D(
domain = ([0, 1], [0, 1]),
ncells = (10, 15),
degree = (3, 2),
periodic = (False, True),
seed = 1
)
test_Divergence_3D(
domain = ([0, 1], [0, 1], [0, 1]),
ncells = (5, 8, 4),
degree = (3, 2, 3),
periodic = (False, True, True),
seed = 1
)
|
# DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays (songplay_id serial PRIMARY KEY,
user_id int NOT NULL, song_id varchar, artist_id varchar,
session_id int NOT NULL, start_time timestamp NOT NULL,
level varchar, location varchar,
user_agent text)
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users (userId int PRIMARY KEY, firstName varchar,
lastName varchar, gender varchar, level varchar)
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs (song_id varchar PRIMARY KEY, artist_id varchar,
title varchar, year int, duration numeric)
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists (artist_id varchar PRIMARY KEY, name varchar,
artist_location varchar, artist_latitude numeric,
artist_longitude numeric)
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time (start_time timestamp PRIMARY KEY, hour int, day int,
week int, month int, year int, weekday varchar)
""")
# INSERT RECORDS
songplay_table_insert = ("""
INSERT INTO songplays (user_id, song_id, artist_id, session_id, start_time, level, location, user_agent)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
""")
user_table_insert = ("""
INSERT INTO users (userId, firstName, lastName, gender, level)
VALUES(%s, %s, %s, %s, %s)
ON CONFLICT (userId) DO UPDATE set level = EXCLUDED.level
""")
song_table_insert = ("""
INSERT INTO songs (song_id, artist_id, title, year, duration)
VALUES(%s, %s, %s, %s, %s)
ON CONFLICT (song_id) DO NOTHING
""")
artist_table_insert = ("""
INSERT INTO artists (artist_id, name, artist_location, artist_latitude, artist_longitude)
VALUES(%s, %s, %s, %s, %s)
ON CONFLICT (artist_id) DO UPDATE SET
artist_location=EXCLUDED.artist_location, artist_latitude=EXCLUDED.artist_latitude,
artist_longitude=EXCLUDED.artist_longitude
""")
time_table_insert = ("""
INSERT INTO time (start_time, hour, day, week, month, year, weekday)
VALUES(%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (start_time) DO NOTHING
""")
# FIND SONGS
song_select = ("""SELECT songs.song_id songid, artists.artist_id artistid
FROM songs
JOIN artists ON songs.artist_id=artists.artist_id
WHERE songs.title=%s
AND artists.name=%s
AND songs.duration=%s
""")
# QUERY LISTS
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.