content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for Weather Symbols Trees."""
import pytest
from improver.wxcode.utilities import WX_DICT, get_parameter_names
from improver.wxcode.wxcode_decision_tree import START_NODE, wxcode_decision_tree
from improver.wxcode.wxcode_decision_tree_global import (
START_NODE_GLOBAL,
wxcode_decision_tree_global,
)
from . import check_diagnostic_lists_consistency
TREE_NAMES = ["high_resolution", "global"]
TREES = {
"high_resolution": wxcode_decision_tree(),
"global": wxcode_decision_tree_global(),
}
START_NODES = {"high_resolution": START_NODE, "global": START_NODE_GLOBAL}
REQUIRED_KEY_WORDS = [
"succeed",
"fail",
"probability_thresholds",
"threshold_condition",
"condition_combination",
"diagnostic_fields",
"diagnostic_thresholds",
"diagnostic_conditions",
]
OPTIONAL_KEY_WORDS = ["diagnostic_missing_action", "diagnostic_gamma"]
THRESHOLD_CONDITIONS = ["<=", "<", ">", ">="]
CONDITION_COMBINATIONS = ["AND", "OR"]
DIAGNOSTIC_CONDITIONS = ["below", "above"]
KEYWORDS_DIAGNOSTIC_MISSING_ACTION = ["succeed", "fail"]
@pytest.mark.parametrize("tree_name", TREE_NAMES)
def test_basic(tree_name):
"""Test that the wxcode_decision_tree returns a dictionary."""
tree = TREES[tree_name]
assert isinstance(tree, dict)
@pytest.mark.parametrize("tree_name", TREE_NAMES)
def test_keywords(tree_name):
"""Test that the only permissible keywords are used."""
tree = TREES[tree_name]
all_key_words = REQUIRED_KEY_WORDS + OPTIONAL_KEY_WORDS
for node in tree:
for entry in tree[node]:
assert entry in all_key_words
@pytest.mark.parametrize("tree_name", TREE_NAMES)
def test_start_node_in_tree(tree_name):
"""Test that the start node is in the tree"""
tree = TREES[tree_name]
start_node = START_NODES[tree_name]
assert start_node in tree
def test_keywords_diagnostic_missing():
"""Test only set keywords are used in diagnostic_missing_action.
This only exists in the 'high_resolution' tree."""
tree = TREES["high_resolution"]
all_key_words = KEYWORDS_DIAGNOSTIC_MISSING_ACTION
for items in tree.values():
if "diagnostic_missing_action" in items:
entry = items["diagnostic_missing_action"]
assert entry in all_key_words
@pytest.mark.parametrize("tree_name", TREE_NAMES)
def test_condition_combination(tree_name):
"""Test only permissible values are used in condition_combination."""
tree = TREES[tree_name]
for node in tree:
combination = tree[node]["condition_combination"]
num_diagnostics = len(tree[node]["diagnostic_fields"])
if num_diagnostics == 2:
assert combination in CONDITION_COMBINATIONS
else:
assert not combination
@pytest.mark.parametrize("tree_name", TREE_NAMES)
def test_threshold_condition(tree_name):
"""Test only permissible values are used in threshold_condition."""
tree = TREES[tree_name]
for node in tree:
threshold = tree[node]["threshold_condition"]
assert threshold in THRESHOLD_CONDITIONS
@pytest.mark.parametrize("tree_name", TREE_NAMES)
def test_diagnostic_condition(tree_name):
"""Test only permissible values are used in diagnostic_conditions."""
tree = TREES[tree_name]
for node in tree:
diagnostic = tree[node]["diagnostic_conditions"]
tests_diagnostic = diagnostic
if isinstance(diagnostic[0], list):
tests_diagnostic = [item for sublist in diagnostic for item in sublist]
for value in tests_diagnostic:
assert value in DIAGNOSTIC_CONDITIONS
@pytest.mark.parametrize("tree_name", TREE_NAMES)
def test_node_points_to_valid_value(tree_name):
"""Test that succeed and fail point to valid values or nodes."""
valid_codes = list(WX_DICT.keys())
tree = TREES[tree_name]
for node in tree:
for value in tree[node]["succeed"], tree[node]["fail"]:
if isinstance(value, str):
assert value in tree.keys()
else:
assert value in valid_codes
@pytest.mark.parametrize("tree_name", TREE_NAMES)
def test_diagnostic_len_match(tree_name):
"""Test diagnostic fields, thresholds and conditions are same
nested-list structure."""
tree = TREES[tree_name]
for node in tree:
query = tree[node]
check_diagnostic_lists_consistency(query)
@pytest.mark.parametrize("tree_name", TREE_NAMES)
def test_probability_len_match(tree_name):
"""Test probability_thresholds list is right shape."""
tree = TREES[tree_name]
for _, query in tree.items():
check_list = query["probability_thresholds"]
assert all([isinstance(x, (int, float)) for x in check_list])
assert len(check_list) == len(get_parameter_names(query["diagnostic_fields"]))
|
from rest_framework import serializers
from django.contrib.auth import get_user_model
from ...models import OverallRating, Rating
class UserSerializer(serializers.ModelSerializer):
shortName = serializers.CharField(source='short_name')
fullName = serializers.CharField(source='full_name')
class Meta:
model = get_user_model()
fields = [
'pk',
'shortName',
'email',
'fullName',
]
class RatingSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = Rating
fields = [
'pk',
'rating',
'comment',
'category',
'user',
]
class OverallRatingSerializer(serializers.ModelSerializer):
ratings = RatingSerializer(many=True)
userStatus = serializers.SerializerMethodField()
category = serializers.SerializerMethodField()
class Meta:
model = OverallRating
fields = [
'pk',
'rating',
'category',
'ratings',
'userStatus',
]
def get_userStatus(self, obj):
user = self.context.get('request').user
return obj.user_status(user)
def get_category(self, obj):
return obj.category.upper().replace('-', '_')
|
from django.contrib import admin
# import inventory.models
from .models import Fixture, Source, Source_Type, Fixture_Type, Manufacturer
# Register your models here.
admin.site.register(Fixture)
admin.site.register(Fixture_Type)
admin.site.register(Source)
admin.site.register(Source_Type)
admin.site.register(Manufacturer)
|
import claripy
import nose
from claripy.vsa import MaybeResult, BoolResult, DiscreteStridedIntervalSet, StridedInterval, RegionAnnotation
def vsa_model(a):
return claripy.backends.vsa.convert(a)
def test_fucked_extract():
not_fucked = claripy.Reverse(claripy.Concat(claripy.BVS('file_/dev/stdin_6_0_16_8', 8, explicit_name=True), claripy.BVS('file_/dev/stdin_6_1_17_8', 8, explicit_name=True)))
m = claripy.backends.vsa.max(not_fucked)
assert m > 0
zx = claripy.ZeroExt(16, not_fucked)
pre_fucked = claripy.Reverse(zx)
m = claripy.backends.vsa.max(pre_fucked)
assert m > 0
#print(zx, claripy.backends.vsa.convert(zx))
#print(pre_fucked, claripy.backends.vsa.convert(pre_fucked))
fucked = pre_fucked[31:16]
m = claripy.backends.vsa.max(fucked)
assert m > 0
# here's another case
wtf = (
(
claripy.Reverse(
claripy.Concat(
claripy.BVS('w', 8), claripy.BVS('x', 8), claripy.BVS('y', 8), claripy.BVS('z', 8)
)
) & claripy.BVV(15, 32)
) + claripy.BVV(48, 32)
)[7:0]
m = claripy.backends.vsa.max(wtf)
assert m > 0
def test_reversed_concat():
a = claripy.SI('a', 32, lower_bound=10, upper_bound=0x80, stride=10)
b = claripy.SI('b', 32, lower_bound=1, upper_bound=0xff, stride=1)
reversed_a = claripy.Reverse(a)
reversed_b = claripy.Reverse(b)
# First let's check if the reversing makes sense
nose.tools.assert_equal(claripy.backends.vsa.min(reversed_a), 0xa000000)
nose.tools.assert_equal(claripy.backends.vsa.max(reversed_a), 0x80000000)
nose.tools.assert_equal(claripy.backends.vsa.min(reversed_b), 0x1000000)
nose.tools.assert_equal(claripy.backends.vsa.max(reversed_b), 0xff000000)
a_concat_b = claripy.Concat(a, b)
nose.tools.assert_equal(a_concat_b._model_vsa._reversed, False)
ra_concat_b = claripy.Concat(reversed_a, b)
nose.tools.assert_equal(ra_concat_b._model_vsa._reversed, False)
a_concat_rb = claripy.Concat(a, reversed_b)
nose.tools.assert_equal(a_concat_rb._model_vsa._reversed, False)
ra_concat_rb = claripy.Concat(reversed_a, reversed_b)
nose.tools.assert_equal(ra_concat_rb._model_vsa._reversed, False)
def test_simple_cardinality():
x = claripy.BVS('x', 32, 0xa, 0x14, 0xa)
nose.tools.assert_equal(x.cardinality, 2)
def test_wrapped_intervals():
#SI = claripy.StridedInterval
# Disable the use of DiscreteStridedIntervalSet
claripy.vsa.strided_interval.allow_dsis = False
#
# Signedness/unsignedness conversion
#
si1 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=0xffffffff)
nose.tools.assert_equal(vsa_model(si1)._signed_bounds(), [ (0x0, 0x7fffffff), (-0x80000000, -0x1) ])
nose.tools.assert_equal(vsa_model(si1)._unsigned_bounds(), [ (0x0, 0xffffffff) ])
#
# Pole-splitting
#
# south-pole splitting
si1 = claripy.SI(bits=32, stride=1, lower_bound=-1, upper_bound=1)
si_list = vsa_model(si1)._ssplit()
nose.tools.assert_equal(len(si_list), 2)
nose.tools.assert_true(
si_list[0].identical(vsa_model(claripy.SI(bits=32, stride=1, lower_bound=-1, upper_bound=-1))))
nose.tools.assert_true(
si_list[1].identical(vsa_model(claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=1))))
# north-pole splitting
si1 = claripy.SI(bits=32, stride=1, lower_bound=-1, upper_bound=-3)
si_list = vsa_model(si1)._nsplit()
nose.tools.assert_equal(len(si_list), 2)
nose.tools.assert_true(
si_list[0].identical(vsa_model(claripy.SI(bits=32, stride=1, lower_bound=-1, upper_bound=0x7fffffff))))
nose.tools.assert_true(
si_list[1].identical(vsa_model(claripy.SI(bits=32, stride=1, lower_bound=0x80000000, upper_bound=-3))))
# north-pole splitting, episode 2
si1 = claripy.SI(bits=32, stride=3, lower_bound=3, upper_bound=0)
si_list = vsa_model(si1)._nsplit()
nose.tools.assert_equal(len(si_list), 2)
nose.tools.assert_true(
si_list[0].identical(vsa_model(claripy.SI(bits=32, stride=3, lower_bound=3, upper_bound=0x7ffffffe))))
nose.tools.assert_true(
si_list[1].identical(vsa_model(claripy.SI(bits=32, stride=3, lower_bound=0x80000001, upper_bound=0))))
# bipolar splitting
si1 = claripy.SI(bits=32, stride=1, lower_bound=-2, upper_bound=-8)
si_list = vsa_model(si1)._psplit()
nose.tools.assert_equal(len(si_list), 3)
nose.tools.assert_true(
si_list[0].identical(vsa_model(claripy.SI(bits=32, stride=1, lower_bound=-2, upper_bound=-1))))
nose.tools.assert_true(
si_list[1].identical(vsa_model(claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=0x7fffffff))))
nose.tools.assert_true(
si_list[2].identical(vsa_model(claripy.SI(bits=32, stride=1, lower_bound=0x80000000, upper_bound=-8))))
#
# Addition
#
# Plain addition
si1 = claripy.SI(bits=32, stride=1, lower_bound=-1, upper_bound=1)
si2 = claripy.SI(bits=32, stride=1, lower_bound=-1, upper_bound=1)
si3 = claripy.SI(bits=32, stride=1, lower_bound=-2, upper_bound=2)
nose.tools.assert_true(claripy.backends.vsa.identical(si1 + si2, si3))
si4 = claripy.SI(bits=32, stride=1, lower_bound=0xfffffffe, upper_bound=2)
nose.tools.assert_true(claripy.backends.vsa.identical(si1 + si2, si4))
si5 = claripy.SI(bits=32, stride=1, lower_bound=2, upper_bound=-2)
nose.tools.assert_false(claripy.backends.vsa.identical(si1 + si2, si5))
# Addition with overflowing cardinality
si1 = claripy.SI(bits=8, stride=1, lower_bound=0, upper_bound=0xfe)
si2 = claripy.SI(bits=8, stride=1, lower_bound=0xfe, upper_bound=0xff)
nose.tools.assert_true(vsa_model((si1 + si2)).is_top)
# Addition that shouldn't get a TOP
si1 = claripy.SI(bits=8, stride=1, lower_bound=0, upper_bound=0xfe)
si2 = claripy.SI(bits=8, stride=1, lower_bound=0, upper_bound=0)
nose.tools.assert_false(vsa_model((si1 + si2)).is_top)
#
# Subtraction
#
si1 = claripy.SI(bits=8, stride=1, lower_bound=10, upper_bound=15)
si2 = claripy.SI(bits=8, stride=1, lower_bound=11, upper_bound=12)
si3 = claripy.SI(bits=8, stride=1, lower_bound=-2, upper_bound=4)
nose.tools.assert_true(claripy.backends.vsa.identical(si1 - si2, si3))
#
# Multiplication
#
# integer multiplication
si1 = claripy.SI(bits=32, to_conv=0xffff)
si2 = claripy.SI(bits=32, to_conv=0x10000)
si3 = claripy.SI(bits=32, to_conv=0xffff0000)
nose.tools.assert_true(claripy.backends.vsa.identical(si1 * si2, si3))
# intervals multiplication
si1 = claripy.SI(bits=32, stride=1, lower_bound=10, upper_bound=15)
si2 = claripy.SI(bits=32, stride=1, lower_bound=20, upper_bound=30)
si3 = claripy.SI(bits=32, stride=1, lower_bound=200, upper_bound=450)
nose.tools.assert_true(claripy.backends.vsa.identical(si1 * si2, si3))
#
# Division
#
# integer division
si1 = claripy.SI(bits=32, to_conv=10)
si2 = claripy.SI(bits=32, to_conv=5)
si3 = claripy.SI(bits=32, to_conv=2)
nose.tools.assert_true(claripy.backends.vsa.identical(si1 / si2, si3))
si3 = claripy.SI(bits=32, to_conv=0)
nose.tools.assert_true(claripy.backends.vsa.identical(si2 / si1, si3))
# intervals division
si1 = claripy.SI(bits=32, stride=1, lower_bound=10, upper_bound=100)
si2 = claripy.SI(bits=32, stride=1, lower_bound=10, upper_bound=20)
si3 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=10)
nose.tools.assert_true(claripy.backends.vsa.identical(si1 / si2, si3))
#
# Extension
#
# zero-extension
si1 = claripy.SI(bits=8, stride=1, lower_bound=0, upper_bound=0xfd)
si_zext = si1.zero_extend(32 - 8)
si_zext_ = claripy.SI(bits=32, stride=1, lower_bound=0x0, upper_bound=0xfd)
nose.tools.assert_true(claripy.backends.vsa.identical(si_zext, si_zext_))
# sign-extension
si1 = claripy.SI(bits=8, stride=1, lower_bound=0, upper_bound=0xfd)
si_sext = si1.sign_extend(32 - 8)
si_sext_ = claripy.SI(bits=32, stride=1, lower_bound=0xffffff80, upper_bound=0x7f)
nose.tools.assert_true(claripy.backends.vsa.identical(si_sext, si_sext_))
#
# Comparisons
#
# -1 == 0xff
si1 = claripy.SI(bits=8, stride=1, lower_bound=-1, upper_bound=-1)
si2 = claripy.SI(bits=8, stride=1, lower_bound=0xff, upper_bound=0xff)
nose.tools.assert_true(claripy.backends.vsa.is_true(si1 == si2))
# -2 != 0xff
si1 = claripy.SI(bits=8, stride=1, lower_bound=-2, upper_bound=-2)
si2 = claripy.SI(bits=8, stride=1, lower_bound=0xff, upper_bound=0xff)
nose.tools.assert_true(claripy.backends.vsa.is_true(si1 != si2))
# [-2, -1] < [1, 2] (signed arithmetic)
si1 = claripy.SI(bits=8, stride=1, lower_bound=1, upper_bound=2)
si2 = claripy.SI(bits=8, stride=1, lower_bound=-2, upper_bound=-1)
nose.tools.assert_true(claripy.backends.vsa.is_true(si2.SLT(si1)))
# [-2, -1] <= [1, 2] (signed arithmetic)
nose.tools.assert_true(claripy.backends.vsa.is_true(si2.SLE(si1)))
# [0xfe, 0xff] > [1, 2] (unsigned arithmetic)
nose.tools.assert_true(claripy.backends.vsa.is_true(si2.UGT(si1)))
# [0xfe, 0xff] >= [1, 2] (unsigned arithmetic)
nose.tools.assert_true(claripy.backends.vsa.is_true(si2.UGE(si1)))
def test_join():
# Set backend
b = claripy.backends.vsa
claripy.solver_backends = [ ]
SI = claripy.SI
a = claripy.SI(bits=8, to_conv=2)
b = claripy.SI(bits=8, to_conv=10)
c = claripy.SI(bits=8, to_conv=120)
d = claripy.SI(bits=8, to_conv=130)
e = claripy.SI(bits=8, to_conv=132)
f = claripy.SI(bits=8, to_conv=135)
# union a, b, c, d, e => [2, 132] with a stride of 2
tmp1 = a.union(b)
nose.tools.assert_true(claripy.backends.vsa.identical(tmp1, SI(bits=8, stride=8, lower_bound=2, upper_bound=10)))
tmp2 = tmp1.union(c)
nose.tools.assert_true(claripy.backends.vsa.identical(tmp2, SI(bits=8, stride=2, lower_bound=2, upper_bound=120)))
tmp3 = tmp2.union(d).union(e)
nose.tools.assert_true(claripy.backends.vsa.identical(tmp3, SI(bits=8, stride=2, lower_bound=2, upper_bound=132)))
# union a, b, c, d, e, f => [2, 135] with a stride of 1
tmp = a.union(b).union(c).union(d).union(e).union(f)
nose.tools.assert_true(claripy.backends.vsa.identical(tmp, SI(bits=8, stride=1, lower_bound=2, upper_bound=135)))
a = claripy.SI(bits=8, to_conv=1)
b = claripy.SI(bits=8, to_conv=10)
c = claripy.SI(bits=8, to_conv=120)
d = claripy.SI(bits=8, to_conv=130)
e = claripy.SI(bits=8, to_conv=132)
f = claripy.SI(bits=8, to_conv=135)
g = claripy.SI(bits=8, to_conv=220)
h = claripy.SI(bits=8, to_conv=50)
# union a, b, c, d, e, f, g, h => [220, 135] with a stride of 1
tmp = a.union(b).union(c).union(d).union(e).union(f).union(g).union(h)
nose.tools.assert_true(claripy.backends.vsa.identical(tmp, SI(bits=8, stride=1, lower_bound=220, upper_bound=135)))
nose.tools.assert_true(220 in vsa_model(tmp).eval(255))
nose.tools.assert_true(225 in vsa_model(tmp).eval(255))
nose.tools.assert_true(0 in vsa_model(tmp).eval(255))
nose.tools.assert_true(135 in vsa_model(tmp).eval(255))
nose.tools.assert_false(138 in vsa_model(tmp).eval(255))
def test_vsa():
# Set backend
b = claripy.backends.vsa
SI = claripy.SI
VS = claripy.ValueSet
BVV = claripy.BVV
# Disable the use of DiscreteStridedIntervalSet
claripy.vsa.strided_interval.allow_dsis = False
def is_equal(ast_0, ast_1):
return claripy.backends.vsa.identical(ast_0, ast_1)
si1 = claripy.TSI(32, name="foo", explicit_name=True)
nose.tools.assert_equal(vsa_model(si1).name, b"foo")
# Normalization
si1 = SI(bits=32, stride=1, lower_bound=10, upper_bound=10)
nose.tools.assert_equal(vsa_model(si1).stride, 0)
# Integers
si1 = claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)
si2 = claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)
si3 = claripy.SI(bits=32, stride=0, lower_bound=28, upper_bound=28)
# Strided intervals
si_a = claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=20)
si_b = claripy.SI(bits=32, stride=2, lower_bound=-100, upper_bound=200)
si_c = claripy.SI(bits=32, stride=3, lower_bound=-100, upper_bound=200)
si_d = claripy.SI(bits=32, stride=2, lower_bound=50, upper_bound=60)
si_e = claripy.SI(bits=16, stride=1, lower_bound=0x2000, upper_bound=0x3000)
si_f = claripy.SI(bits=16, stride=1, lower_bound=0, upper_bound=255)
si_g = claripy.SI(bits=16, stride=1, lower_bound=0, upper_bound=0xff)
si_h = claripy.SI(bits=32, stride=0, lower_bound=0x80000000, upper_bound=0x80000000)
nose.tools.assert_true(is_equal(si1, claripy.SI(bits=32, to_conv=10)))
nose.tools.assert_true(is_equal(si2, claripy.SI(bits=32, to_conv=10)))
nose.tools.assert_true(is_equal(si1, si2))
# __add__
si_add_1 = si1 + si2
nose.tools.assert_true(is_equal(si_add_1, claripy.SI(bits=32, stride=0, lower_bound=20, upper_bound=20)))
si_add_2 = si1 + si_a
nose.tools.assert_true(is_equal(si_add_2, claripy.SI(bits=32, stride=2, lower_bound=20, upper_bound=30)))
si_add_3 = si_a + si_b
nose.tools.assert_true(is_equal(si_add_3, claripy.SI(bits=32, stride=2, lower_bound=-90, upper_bound=220)))
si_add_4 = si_b + si_c
nose.tools.assert_true(is_equal(si_add_4, claripy.SI(bits=32, stride=1, lower_bound=-200, upper_bound=400)))
# __add__ with overflow
si_add_5 = si_h + 0xffffffff
nose.tools.assert_true(is_equal(si_add_5, claripy.SI(bits=32, stride=0, lower_bound=0x7fffffff, upper_bound=0x7fffffff)))
# __sub__
si_minus_1 = si1 - si2
nose.tools.assert_true(is_equal(si_minus_1, claripy.SI(bits=32, stride=0, lower_bound=0, upper_bound=0)))
si_minus_2 = si_a - si_b
nose.tools.assert_true(is_equal(si_minus_2, claripy.SI(bits=32, stride=2, lower_bound=-190, upper_bound=120)))
si_minus_3 = si_b - si_c
nose.tools.assert_true(is_equal(si_minus_3, claripy.SI(bits=32, stride=1, lower_bound=-300, upper_bound=300)))
# __neg__ / __invert__ / bitwise not
si_neg_1 = ~si1
nose.tools.assert_true(is_equal(si_neg_1, claripy.SI(bits=32, to_conv=-11)))
si_neg_2 = ~si_b
nose.tools.assert_true(is_equal(si_neg_2, claripy.SI(bits=32, stride=2, lower_bound=-201, upper_bound=99)))
# __or__
si_or_1 = si1 | si3
nose.tools.assert_true(is_equal(si_or_1, claripy.SI(bits=32, to_conv=30)))
si_or_2 = si1 | si2
nose.tools.assert_true(is_equal(si_or_2, claripy.SI(bits=32, to_conv=10)))
si_or_3 = si1 | si_a # An integer | a strided interval
nose.tools.assert_true(is_equal(si_or_3 , claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=30)))
si_or_3 = si_a | si1 # Exchange the operands
nose.tools.assert_true(is_equal(si_or_3, claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=30)))
si_or_4 = si_a | si_d # A strided interval | another strided interval
nose.tools.assert_true(is_equal(si_or_4, claripy.SI(bits=32, stride=2, lower_bound=50, upper_bound=62)))
si_or_4 = si_d | si_a # Exchange the operands
nose.tools.assert_true(is_equal(si_or_4, claripy.SI(bits=32, stride=2, lower_bound=50, upper_bound=62)))
si_or_5 = si_e | si_f #
nose.tools.assert_true(is_equal(si_or_5, claripy.SI(bits=16, stride=1, lower_bound=0x2000, upper_bound=0x30ff)))
si_or_6 = si_e | si_g #
nose.tools.assert_true(is_equal(si_or_6, claripy.SI(bits=16, stride=1, lower_bound=0x2000, upper_bound=0x30ff)))
# Shifting
si_shl_1 = si1 << 3
nose.tools.assert_equal(si_shl_1.size(), 32)
nose.tools.assert_true(is_equal(si_shl_1, claripy.SI(bits=32, stride=0, lower_bound=80, upper_bound=80)))
# Multiplication
si_mul_1 = si1 * 3
nose.tools.assert_equal(si_mul_1.size(), 32)
nose.tools.assert_true(is_equal(si_mul_1, claripy.SI(bits=32, stride=0, lower_bound=30, upper_bound=30)))
si_mul_2 = si_a * 3
nose.tools.assert_equal(si_mul_2.size(), 32)
nose.tools.assert_true(is_equal(si_mul_2, claripy.SI(bits=32, stride=6, lower_bound=30, upper_bound=60)))
si_mul_3 = si_a * si_b
nose.tools.assert_equal(si_mul_3.size(), 32)
nose.tools.assert_true(is_equal(si_mul_3, claripy.SI(bits=32, stride=2, lower_bound=-2000, upper_bound=4000)))
# Division
si_div_1 = si1 / 3
nose.tools.assert_equal(si_div_1.size(), 32)
nose.tools.assert_true(is_equal(si_div_1, claripy.SI(bits=32, stride=0, lower_bound=3, upper_bound=3)))
si_div_2 = si_a / 3
nose.tools.assert_equal(si_div_2.size(), 32)
nose.tools.assert_true(is_equal(si_div_2, claripy.SI(bits=32, stride=1, lower_bound=3, upper_bound=6)))
# Modulo
si_mo_1 = si1 % 3
nose.tools.assert_equal(si_mo_1.size(), 32)
nose.tools.assert_true(is_equal(si_mo_1, claripy.SI(bits=32, stride=0, lower_bound=1, upper_bound=1)))
si_mo_2 = si_a % 3
nose.tools.assert_equal(si_mo_2.size(), 32)
nose.tools.assert_true(is_equal(si_mo_2, claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=2)))
#
# Extracting the sign bit
#
# a negative integer
si = claripy.SI(bits=64, stride=0, lower_bound=-1, upper_bound=-1)
sb = si[63: 63]
nose.tools.assert_true(is_equal(sb, claripy.SI(bits=1, to_conv=1)))
# non-positive integers
si = claripy.SI(bits=64, stride=1, lower_bound=-1, upper_bound=0)
sb = si[63: 63]
nose.tools.assert_true(is_equal(sb, claripy.SI(bits=1, stride=1, lower_bound=0, upper_bound=1)))
# Extracting an integer
si = claripy.SI(bits=64, stride=0, lower_bound=0x7fffffffffff0000, upper_bound=0x7fffffffffff0000)
part1 = si[63 : 32]
part2 = si[31 : 0]
nose.tools.assert_true(is_equal(part1, claripy.SI(bits=32, stride=0, lower_bound=0x7fffffff, upper_bound=0x7fffffff)))
nose.tools.assert_true(is_equal(part2, claripy.SI(bits=32, stride=0, lower_bound=0xffff0000, upper_bound=0xffff0000)))
# Concatenating two integers
si_concat = part1.concat(part2)
nose.tools.assert_true(is_equal(si_concat, si))
# Extracting a claripy.SI
si = claripy.SI(bits=64, stride=0x9, lower_bound=0x1, upper_bound=0xa)
part1 = si[63 : 32]
part2 = si[31 : 0]
nose.tools.assert_true(is_equal(part1, claripy.SI(bits=32, stride=0, lower_bound=0x0, upper_bound=0x0)))
nose.tools.assert_true(is_equal(part2, claripy.SI(bits=32, stride=9, lower_bound=1, upper_bound=10)))
# Concatenating two claripy.SIs
si_concat = part1.concat(part2)
nose.tools.assert_true(is_equal(si_concat, si))
# Concatenating two SIs that are of different sizes
si_1 = SI(bits=64, stride=1, lower_bound=0, upper_bound=0xffffffffffffffff)
si_2 = SI(bits=32, stride=1, lower_bound=0, upper_bound=0xffffffff)
si_concat = si_1.concat(si_2)
nose.tools.assert_true(is_equal(si_concat, SI(bits=96, stride=1,
lower_bound=0,
upper_bound=0xffffffffffffffffffffffff)))
# Zero-Extend the low part
si_zeroextended = part2.zero_extend(32)
nose.tools.assert_true(is_equal(si_zeroextended, claripy.SI(bits=64, stride=9, lower_bound=1, upper_bound=10)))
# Sign-extension
si_signextended = part2.sign_extend(32)
nose.tools.assert_true(is_equal(si_signextended, claripy.SI(bits=64, stride=9, lower_bound=1, upper_bound=10)))
# Extract from the result above
si_extracted = si_zeroextended[31:0]
nose.tools.assert_true(is_equal(si_extracted, claripy.SI(bits=32, stride=9, lower_bound=1, upper_bound=10)))
# Union
si_union_1 = si1.union(si2)
nose.tools.assert_true(is_equal(si_union_1, claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)))
si_union_2 = si1.union(si3)
nose.tools.assert_true(is_equal(si_union_2, claripy.SI(bits=32, stride=18, lower_bound=10, upper_bound=28)))
si_union_3 = si1.union(si_a)
nose.tools.assert_true(is_equal(si_union_3, claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=20)))
si_union_4 = si_a.union(si_b)
nose.tools.assert_true(is_equal(si_union_4, claripy.SI(bits=32, stride=2, lower_bound=-100, upper_bound=200)))
si_union_5 = si_b.union(si_c)
nose.tools.assert_true(is_equal(si_union_5, claripy.SI(bits=32, stride=1, lower_bound=-100, upper_bound=200)))
# Intersection
si_intersection_1 = si1.intersection(si1)
nose.tools.assert_true(is_equal(si_intersection_1, si2))
si_intersection_2 = si1.intersection(si2)
nose.tools.assert_true(is_equal(si_intersection_2, claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)))
si_intersection_3 = si1.intersection(si_a)
nose.tools.assert_true(is_equal(si_intersection_3, claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)))
si_intersection_4 = si_a.intersection(si_b)
nose.tools.assert_true(is_equal(si_intersection_4, claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=20)))
si_intersection_5 = si_b.intersection(si_c)
nose.tools.assert_true(is_equal(si_intersection_5, claripy.SI(bits=32, stride=6, lower_bound=-100, upper_bound=200)))
# More intersections
t0 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=0x27)
t1 = claripy.SI(bits=32, stride=0x7fffffff, lower_bound=0x80000002, upper_bound=1)
si_is_6 = t0.intersection(t1)
nose.tools.assert_true(is_equal(si_is_6, claripy.SI(bits=32, stride=0, lower_bound=1, upper_bound=1)))
t2 = claripy.SI(bits=32, stride=5, lower_bound=20, upper_bound=30)
t3 = claripy.SI(bits=32, stride=1, lower_bound=27, upper_bound=0xffffffff)
si_is_7 = t2.intersection(t3)
nose.tools.assert_true(is_equal(si_is_7, claripy.SI(bits=32, stride=0, lower_bound=30, upper_bound=30)))
t4 = claripy.SI(bits=32, stride=5, lower_bound=-400, upper_bound=400)
t5 = claripy.SI(bits=32, stride=1, lower_bound=395, upper_bound=-395)
si_is_8 = t4.intersection(t5)
nose.tools.assert_true(is_equal(si_is_8, claripy.SI(bits=32, stride=5, lower_bound=-400, upper_bound=400)))
# Sign-extension
si = claripy.SI(bits=1, stride=0, lower_bound=1, upper_bound=1)
si_signextended = si.sign_extend(31)
nose.tools.assert_true(is_equal(si_signextended, claripy.SI(bits=32, stride=0, lower_bound=0xffffffff, upper_bound=0xffffffff)))
# Comparison between claripy.SI and BVV
si = claripy.SI(bits=32, stride=1, lower_bound=-0x7f, upper_bound=0x7f)
si._model_vsa.uninitialized = True
bvv = BVV(0x30, 32)
comp = (si < bvv)
nose.tools.assert_true(vsa_model(comp).identical(MaybeResult()))
# Better extraction
# si = <32>0x1000000[0xcffffff, 0xdffffff]R
si = claripy.SI(bits=32, stride=0x1000000, lower_bound=0xcffffff, upper_bound=0xdffffff)
si_byte0 = si[7: 0]
si_byte1 = si[15: 8]
si_byte2 = si[23: 16]
si_byte3 = si[31: 24]
nose.tools.assert_true(is_equal(si_byte0, claripy.SI(bits=8, stride=0, lower_bound=0xff, upper_bound=0xff)))
nose.tools.assert_true(is_equal(si_byte1, claripy.SI(bits=8, stride=0, lower_bound=0xff, upper_bound=0xff)))
nose.tools.assert_true(is_equal(si_byte2, claripy.SI(bits=8, stride=0, lower_bound=0xff, upper_bound=0xff)))
nose.tools.assert_true(is_equal(si_byte3, claripy.SI(bits=8, stride=1, lower_bound=0xc, upper_bound=0xd)))
# Optimization on bitwise-and
si_1 = claripy.SI(bits=32, stride=1, lower_bound=0x0, upper_bound=0xffffffff)
si_2 = claripy.SI(bits=32, stride=0, lower_bound=0x80000000, upper_bound=0x80000000)
si = si_1 & si_2
nose.tools.assert_true(is_equal(si, claripy.SI(bits=32, stride=0x80000000, lower_bound=0, upper_bound=0x80000000)))
si_1 = claripy.SI(bits=32, stride=1, lower_bound=0x0, upper_bound=0x7fffffff)
si_2 = claripy.SI(bits=32, stride=0, lower_bound=0x80000000, upper_bound=0x80000000)
si = si_1 & si_2
nose.tools.assert_true(is_equal(si, claripy.SI(bits=32, stride=0, lower_bound=0, upper_bound=0)))
# Concatenation: concat with zeros only increases the stride
si_1 = claripy.SI(bits=8, stride=0xff, lower_bound=0x0, upper_bound=0xff)
si_2 = claripy.SI(bits=8, stride=0, lower_bound=0, upper_bound=0)
si = si_1.concat(si_2)
nose.tools.assert_true(is_equal(si, claripy.SI(bits=16, stride=0xff00, lower_bound=0, upper_bound=0xff00)))
# Extract from a reversed value
si_1 = claripy.SI(bits=64, stride=0xff, lower_bound=0x0, upper_bound=0xff)
si_2 = si_1.reversed[63 : 56]
nose.tools.assert_true(is_equal(si_2, claripy.SI(bits=8, stride=0xff, lower_bound=0x0, upper_bound=0xff)))
#
# ValueSet
#
def VS(name=None, bits=None, region=None, val=None):
region = 'foobar' if region is None else region
return claripy.ValueSet(bits, region=region, region_base_addr=0, value=val, name=name)
vs_1 = VS(bits=32, val=0)
vs_1 = vs_1.intersection(VS(bits=32, val=1))
nose.tools.assert_true(vsa_model(vs_1).is_empty)
# Test merging two addresses
vsa_model(vs_1)._merge_si('global', 0, vsa_model(si1))
vsa_model(vs_1)._merge_si('global', 0, vsa_model(si3))
nose.tools.assert_true(vsa_model(vs_1).get_si('global').identical(vsa_model(SI(bits=32, stride=18, lower_bound=10, upper_bound=28))))
# Length of this ValueSet
nose.tools.assert_equal(len(vsa_model(vs_1)), 32)
vs_1 = VS(name='boo', bits=32, val=0).intersection(VS(name='makeitempty', bits=32, val=1))
vs_2 = VS(name='foo', bits=32, val=0).intersection(VS(name='makeitempty', bits=32, val=1))
nose.tools.assert_true(claripy.backends.vsa.identical(vs_1, vs_1))
nose.tools.assert_true(claripy.backends.vsa.identical(vs_2, vs_2))
vsa_model(vs_1)._merge_si('global', 0, vsa_model(si1))
nose.tools.assert_false(claripy.backends.vsa.identical(vs_1, vs_2))
vsa_model(vs_2)._merge_si('global', 0, vsa_model(si1))
nose.tools.assert_true(claripy.backends.vsa.identical(vs_1, vs_2))
nose.tools.assert_true(claripy.backends.vsa.is_true((vs_1 & vs_2) == vs_1))
vsa_model(vs_1)._merge_si('global', 0, vsa_model(si3))
nose.tools.assert_false(claripy.backends.vsa.identical(vs_1, vs_2))
# Subtraction
# Subtraction of two pointers yields a concrete value
vs_1 = VS(name='foo', region='global', bits=32, val=0x400010)
vs_2 = VS(name='bar', region='global', bits=32, val=0x400000)
si = vs_1 - vs_2
nose.tools.assert_is(type(vsa_model(si)), StridedInterval)
nose.tools.assert_true(claripy.backends.vsa.identical(si, claripy.SI(bits=32, stride=0, lower_bound=0x10, upper_bound=0x10)))
#
# IfProxy
#
si = claripy.SI(bits=32, stride=1, lower_bound=10, upper_bound=0xffffffff)
if_0 = claripy.If(si == 0, si, si - 1)
nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_0))
nose.tools.assert_false(claripy.backends.vsa.identical(if_0, si))
# max and min on IfProxy
si = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=0xffffffff)
if_0 = claripy.If(si == 0, si, si - 1)
max_val = b.max(if_0)
min_val = b.min(if_0)
nose.tools.assert_equal(max_val, 0xffffffff)
nose.tools.assert_equal(min_val, 0x00000000)
# identical
nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_0))
nose.tools.assert_true(claripy.backends.vsa.identical(if_0, si))
if_0_copy = claripy.If(si == 0, si, si - 1)
nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_0_copy))
if_1 = claripy.If(si == 1, si, si - 1)
nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_1))
si = SI(bits=32, stride=0, lower_bound=1, upper_bound=1)
if_0 = claripy.If(si == 0, si, si - 1)
if_0_copy = claripy.If(si == 0, si, si - 1)
nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_0_copy))
if_1 = claripy.If(si == 1, si, si - 1)
nose.tools.assert_false(claripy.backends.vsa.identical(if_0, if_1))
if_1 = claripy.If(si == 0, si + 1, si - 1)
nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_1))
if_1 = claripy.If(si == 0, si, si)
nose.tools.assert_false(claripy.backends.vsa.identical(if_0, if_1))
# if_1 = And(VS_2, IfProxy(si == 0, 0, 1))
vs_2 = VS(region='global', bits=32, val=0xFA7B00B)
si = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=1)
if_1 = (vs_2 & claripy.If(si == 0, claripy.SI(bits=32, stride=0, lower_bound=0, upper_bound=0), claripy.SI(bits=32, stride=0, lower_bound=0xffffffff, upper_bound=0xffffffff)))
nose.tools.assert_true(claripy.backends.vsa.is_true(vsa_model(if_1.ite_excavated.args[1]) == vsa_model(VS(region='global', bits=32, val=0))))
nose.tools.assert_true(claripy.backends.vsa.is_true(vsa_model(if_1.ite_excavated.args[2]) == vsa_model(vs_2)))
# if_2 = And(VS_3, IfProxy(si != 0, 0, 1)
vs_3 = VS(region='global', bits=32, val=0xDEADCA7)
si = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=1)
if_2 = (vs_3 & claripy.If(si != 0, claripy.SI(bits=32, stride=0, lower_bound=0, upper_bound=0), claripy.SI(bits=32, stride=0, lower_bound=0xffffffff, upper_bound=0xffffffff)))
nose.tools.assert_true(claripy.backends.vsa.is_true(vsa_model(if_2.ite_excavated.args[1]) == vsa_model(VS(region='global', bits=32, val=0))))
nose.tools.assert_true(claripy.backends.vsa.is_true(vsa_model(if_2.ite_excavated.args[2]) == vsa_model(vs_3)))
# Something crazy is gonna happen...
#if_3 = if_1 + if_2
#nose.tools.assert_true(claripy.backends.vsa.is_true(vsa_model(if_3.ite_excavated.args[1]) == vsa_model(vs_3)))
#nose.tools.assert_true(claripy.backends.vsa.is_true(vsa_model(if_3.ite_excavated.args[1]) == vsa_model(vs_2)))
def test_vsa_constraint_to_si():
# Set backend
b = claripy.backends.vsa
s = claripy.SolverVSA() #pylint:disable=unused-variable
SI = claripy.SI
BVV = claripy.BVV
claripy.vsa.strided_interval.allow_dsis = False
#
# If(SI == 0, 1, 0) == 1
#
s1 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
ast_true = (claripy.If(s1 == BVV(0, 32), BVV(1, 1), BVV(0, 1)) == BVV(1, 1))
ast_false = (claripy.If(s1 == BVV(0, 32), BVV(1, 1), BVV(0, 1)) != BVV(1, 1))
trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
nose.tools.assert_equal(trueside_sat, True)
nose.tools.assert_equal(len(trueside_replacement), 1)
nose.tools.assert_true(trueside_replacement[0][0] is s1)
# True side: claripy.SI<32>0[0, 0]
nose.tools.assert_true(
claripy.backends.vsa.is_true(trueside_replacement[0][1] == claripy.SI(bits=32, stride=0, lower_bound=0, upper_bound=0)))
falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
nose.tools.assert_equal(falseside_sat, True)
nose.tools.assert_equal(len(falseside_replacement), 1)
nose.tools.assert_true(falseside_replacement[0][0] is s1)
# False side; claripy.SI<32>1[1, 2]
nose.tools.assert_true(
claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=1, upper_bound=2))
)
#
# If(SI == 0, 1, 0) <= 1
#
s1 = SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
ast_true = (claripy.If(s1 == BVV(0, 32), BVV(1, 1), BVV(0, 1)) <= BVV(1, 1))
ast_false = (claripy.If(s1 == BVV(0, 32), BVV(1, 1), BVV(0, 1)) > BVV(1, 1))
trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
nose.tools.assert_equal(trueside_sat, True) # Always satisfiable
falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
nose.tools.assert_equal(falseside_sat, False) # Not sat
#
# If(SI == 0, 20, 10) > 15
#
s1 = SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
ast_true = (claripy.If(s1 == BVV(0, 32), BVV(20, 32), BVV(10, 32)) > BVV(15, 32))
ast_false = (claripy.If(s1 == BVV(0, 32), BVV(20, 32), BVV(10, 32)) <= BVV(15, 32))
trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
nose.tools.assert_equal(trueside_sat, True)
nose.tools.assert_equal(len(trueside_replacement), 1)
nose.tools.assert_true(trueside_replacement[0][0] is s1)
# True side: SI<32>0[0, 0]
nose.tools.assert_true(
claripy.backends.vsa.identical(trueside_replacement[0][1], SI(bits=32, stride=0, lower_bound=0, upper_bound=0))
)
falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
nose.tools.assert_equal(falseside_sat, True)
nose.tools.assert_equal(len(falseside_replacement), 1)
nose.tools.assert_true(falseside_replacement[0][0] is s1)
# False side; SI<32>1[1, 2]
nose.tools.assert_true(
claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=1, upper_bound=2))
)
#
# If(SI == 0, 20, 10) >= 15
#
s1 = SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
ast_true = (claripy.If(s1 == BVV(0, 32), BVV(15, 32), BVV(10, 32)) >= BVV(15, 32))
ast_false = (claripy.If(s1 == BVV(0, 32), BVV(15, 32), BVV(10, 32)) < BVV(15, 32))
trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
nose.tools.assert_equal(trueside_sat, True)
nose.tools.assert_equal(len(trueside_replacement), 1)
nose.tools.assert_true(trueside_replacement[0][0] is s1)
# True side: SI<32>0[0, 0]
nose.tools.assert_true(
claripy.backends.vsa.identical(trueside_replacement[0][1], SI(bits=32, stride=0, lower_bound=0, upper_bound=0))
)
falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
nose.tools.assert_equal(falseside_sat, True)
nose.tools.assert_equal(len(falseside_replacement), 1)
nose.tools.assert_true(falseside_replacement[0][0] is s1)
# False side; SI<32>0[0,0]
nose.tools.assert_true(
claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=1, upper_bound=2))
)
#
# Extract(0, 0, Concat(BVV(0, 63), If(SI == 0, 1, 0))) == 1
#
s2 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
ast_true = (claripy.Extract(0, 0, claripy.Concat(BVV(0, 63), claripy.If(s2 == 0, BVV(1, 1), BVV(0, 1)))) == 1)
ast_false = (claripy.Extract(0, 0, claripy.Concat(BVV(0, 63), claripy.If(s2 == 0, BVV(1, 1), BVV(0, 1)))) != 1)
trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
nose.tools.assert_equal(trueside_sat, True)
nose.tools.assert_equal(len(trueside_replacement), 1)
nose.tools.assert_true(trueside_replacement[0][0] is s2)
# True side: claripy.SI<32>0[0, 0]
nose.tools.assert_true(
claripy.backends.vsa.identical(trueside_replacement[0][1], SI(bits=32, stride=0, lower_bound=0, upper_bound=0))
)
falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
nose.tools.assert_equal(falseside_sat, True)
nose.tools.assert_equal(len(falseside_replacement), 1)
nose.tools.assert_true(falseside_replacement[0][0] is s2)
# False side; claripy.SI<32>1[1, 2]
nose.tools.assert_true(
claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=1, upper_bound=2))
)
#
# Extract(0, 0, ZeroExt(32, If(SI == 0, BVV(1, 32), BVV(0, 32)))) == 1
#
s3 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
ast_true = (claripy.Extract(0, 0, claripy.ZeroExt(32, claripy.If(s3 == 0, BVV(1, 32), BVV(0, 32)))) == 1)
ast_false = (claripy.Extract(0, 0, claripy.ZeroExt(32, claripy.If(s3 == 0, BVV(1, 32), BVV(0, 32)))) != 1)
trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
nose.tools.assert_equal(trueside_sat, True)
nose.tools.assert_equal(len(trueside_replacement), 1)
nose.tools.assert_true(trueside_replacement[0][0] is s3)
# True side: claripy.SI<32>0[0, 0]
nose.tools.assert_true(
claripy.backends.vsa.identical(trueside_replacement[0][1], SI(bits=32, stride=0, lower_bound=0, upper_bound=0))
)
falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
nose.tools.assert_equal(falseside_sat, True)
nose.tools.assert_equal(len(falseside_replacement), 1)
nose.tools.assert_true(falseside_replacement[0][0] is s3)
# False side; claripy.SI<32>1[1, 2]
nose.tools.assert_true(
claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=1, upper_bound=2))
)
#
# Extract(0, 0, ZeroExt(32, If(Extract(32, 0, (SI & claripy.SI)) < 0, BVV(1, 1), BVV(0, 1))))
#
s4 = claripy.SI(bits=64, stride=1, lower_bound=0, upper_bound=0xffffffffffffffff)
ast_true = (
claripy.Extract(0, 0, claripy.ZeroExt(32, claripy.If(claripy.Extract(31, 0, (s4 & s4)).SLT(0), BVV(1, 32), BVV(0, 32)))) == 1)
ast_false = (
claripy.Extract(0, 0, claripy.ZeroExt(32, claripy.If(claripy.Extract(31, 0, (s4 & s4)).SLT(0), BVV(1, 32), BVV(0, 32)))) != 1)
trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
nose.tools.assert_equal(trueside_sat, True)
nose.tools.assert_equal(len(trueside_replacement), 1)
nose.tools.assert_true(trueside_replacement[0][0] is s4[31:0])
# True side: claripy.SI<32>0[0, 0]
nose.tools.assert_true(
claripy.backends.vsa.identical(trueside_replacement[0][1], SI(bits=32, stride=1, lower_bound=-0x80000000, upper_bound=-1))
)
falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
nose.tools.assert_equal(falseside_sat, True)
nose.tools.assert_equal(len(falseside_replacement), 1)
nose.tools.assert_true(falseside_replacement[0][0] is s4[31:0])
# False side; claripy.SI<32>1[1, 2]
nose.tools.assert_true(
claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=0, upper_bound=0x7fffffff))
)
#
# TOP_SI != -1
#
s5 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=0xffffffff)
ast_true = (s5 == claripy.SI(bits=32, stride=1, lower_bound=0xffffffff, upper_bound=0xffffffff))
ast_false = (s5 != claripy.SI(bits=32, stride=1, lower_bound=0xffffffff, upper_bound=0xffffffff))
trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
nose.tools.assert_true(trueside_sat)
nose.tools.assert_equal(len(trueside_replacement), 1)
nose.tools.assert_true(trueside_replacement[0][0] is s5)
nose.tools.assert_true(claripy.backends.vsa.identical(trueside_replacement[0][1],
SI(bits=32, stride=1, lower_bound=0xffffffff,
upper_bound=0xffffffff)
)
)
falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
nose.tools.assert_true(falseside_sat)
nose.tools.assert_equal(len(falseside_replacement), 1)
nose.tools.assert_true(falseside_replacement[0][0] is s5)
nose.tools.assert_true(claripy.backends.vsa.identical(falseside_replacement[0][1],
SI(bits=32, stride=1, lower_bound=0,
upper_bound=0xfffffffe)
)
)
# TODO: Add some more insane test cases
def test_vsa_discrete_value_set():
"""
Test cases for DiscreteStridedIntervalSet.
"""
# Set backend
b = claripy.backends.vsa
s = claripy.SolverVSA() #pylint:disable=unused-variable
SI = claripy.SI
BVV = claripy.BVV
# Allow the use of DiscreteStridedIntervalSet (cuz we wanna test it!)
claripy.vsa.strided_interval.allow_dsis = True
#
# Union
#
val_1 = BVV(0, 32)
val_2 = BVV(1, 32)
r = val_1.union(val_2)
nose.tools.assert_true(isinstance(vsa_model(r), DiscreteStridedIntervalSet))
nose.tools.assert_true(vsa_model(r).collapse(), claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=1))
r = r.union(BVV(3, 32))
ints = b.eval(r, 4)
nose.tools.assert_equal(len(ints), 3)
nose.tools.assert_equal(ints, [0, 1, 3])
#
# Intersection
#
val_1 = BVV(0, 32)
val_2 = BVV(1, 32)
r = val_1.intersection(val_2)
nose.tools.assert_true(isinstance(vsa_model(r), StridedInterval))
nose.tools.assert_true(vsa_model(r).is_empty)
val_1 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=10)
val_2 = claripy.SI(bits=32, stride=1, lower_bound=10, upper_bound=20)
val_3 = claripy.SI(bits=32, stride=1, lower_bound=15, upper_bound=50)
r = val_1.union(val_2)
nose.tools.assert_true(isinstance(vsa_model(r), DiscreteStridedIntervalSet))
r = r.intersection(val_3)
nose.tools.assert_equal(sorted(b.eval(r, 100)), [ 15, 16, 17, 18, 19, 20 ])
#
# Some logical operations
#
val_1 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=10)
val_2 = claripy.SI(bits=32, stride=1, lower_bound=5, upper_bound=20)
r_1 = val_1.union(val_2)
val_3 = claripy.SI(bits=32, stride=1, lower_bound=20, upper_bound=30)
val_4 = claripy.SI(bits=32, stride=1, lower_bound=25, upper_bound=35)
r_2 = val_3.union(val_4)
nose.tools.assert_true(isinstance(vsa_model(r_1), DiscreteStridedIntervalSet))
nose.tools.assert_true(isinstance(vsa_model(r_2), DiscreteStridedIntervalSet))
# r_1 < r_2
nose.tools.assert_true(BoolResult.is_maybe(vsa_model(r_1 < r_2)))
# r_1 <= r_2
nose.tools.assert_true(BoolResult.is_true(vsa_model(r_1 <= r_2)))
# r_1 >= r_2
nose.tools.assert_true(BoolResult.is_maybe(vsa_model(r_1 >= r_2)))
# r_1 > r_2
nose.tools.assert_true(BoolResult.is_false(vsa_model(r_1 > r_2)))
# r_1 == r_2
nose.tools.assert_true(BoolResult.is_maybe(vsa_model(r_1 == r_2)))
# r_1 != r_2
nose.tools.assert_true(BoolResult.is_maybe(vsa_model(r_1 != r_2)))
#
# Some arithmetic operations
#
val_1 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=10)
val_2 = claripy.SI(bits=32, stride=1, lower_bound=5, upper_bound=20)
r_1 = val_1.union(val_2)
val_3 = claripy.SI(bits=32, stride=1, lower_bound=20, upper_bound=30)
val_4 = claripy.SI(bits=32, stride=1, lower_bound=25, upper_bound=35)
r_2 = val_3.union(val_4)
nose.tools.assert_true(isinstance(vsa_model(r_1), DiscreteStridedIntervalSet))
nose.tools.assert_true(isinstance(vsa_model(r_2), DiscreteStridedIntervalSet))
# r_1 + r_2
r = r_1 + r_2
nose.tools.assert_true(isinstance(vsa_model(r), DiscreteStridedIntervalSet))
nose.tools.assert_true(vsa_model(r).collapse().identical(vsa_model(SI(bits=32, stride=1, lower_bound=20, upper_bound=55))))
# r_2 - r_1
r = r_2 - r_1
nose.tools.assert_true(isinstance(vsa_model(r), DiscreteStridedIntervalSet))
nose.tools.assert_true(vsa_model(r).collapse().identical(vsa_model(SI(bits=32, stride=1, lower_bound=0, upper_bound=35))))
# Disable it in the end
claripy.vsa.strided_interval.allow_dsis = False
def test_solution():
# Set backend
solver_type = claripy.SolverVSA
s = solver_type()
def VS(name=None, bits=None, region=None, val=None):
region = 'foobar' if region is None else region
return claripy.ValueSet(bits, region=region, region_base_addr=0, value=val, name=name)
si = claripy.SI(bits=32, stride=10, lower_bound=32, upper_bound=320)
nose.tools.assert_true(s.solution(si, si))
nose.tools.assert_true(s.solution(si, 32))
nose.tools.assert_false(s.solution(si, 31))
si2 = claripy.SI(bits=32, stride=0, lower_bound=3, upper_bound=3)
nose.tools.assert_true(s.solution(si2, si2))
nose.tools.assert_true(s.solution(si2, 3))
nose.tools.assert_false(s.solution(si2, 18))
nose.tools.assert_false(s.solution(si2, si))
vs = VS(region='global', bits=32, val=0xDEADCA7)
nose.tools.assert_true(s.solution(vs, 0xDEADCA7))
nose.tools.assert_false(s.solution(vs, 0xDEADBEEF))
si = claripy.SI(bits=32, stride=0, lower_bound=3, upper_bound=3)
si2 = claripy.SI(bits=32, stride=10, lower_bound=32, upper_bound=320)
vs = VS(bits=si.size(), region='foo', val=si._model_vsa)
# vs = vs.annotate(RegionAnnotation('foo', 0, si2))
vs2 = VS(bits=si2.size(), region='foo', val=si2._model_vsa)
vs = vs.union(vs2)
nose.tools.assert_true(s.solution(vs, 3))
nose.tools.assert_true(s.solution(vs, 122))
nose.tools.assert_true(s.solution(vs, si))
nose.tools.assert_false(s.solution(vs, 2))
nose.tools.assert_false(s.solution(vs, 322))
def test_reasonable_bounds():
si = claripy.SI(bits=32, stride=1, lower_bound=-20, upper_bound=-10)
b = claripy.backends.vsa
assert b.max(si) == 0xfffffff6
assert b.min(si) == 0xffffffec
si = claripy.SI(bits=32, stride=1, lower_bound=-20, upper_bound=10)
b = claripy.backends.vsa
assert b.max(si) == 0xffffffff
assert b.min(si) == 0
def test_shifting():
SI = claripy.SI
identical = claripy.backends.vsa.identical
# <32>1[2,4] LShR 1 = <32>1[1,2]
si = SI(bits=32, stride=1, lower_bound=2, upper_bound=4)
r = si.LShR(1)
nose.tools.assert_true(identical(r, SI(bits=32, stride=1, lower_bound=1, upper_bound=2)))
# <32>4[15,11] LShR 4 = <32>1[0, 0xfffffff]
si = SI(bits=32, stride=4, lower_bound=15, upper_bound=11)
r = si.LShR(4)
nose.tools.assert_true(identical(r, SI(bits=32, stride=1, lower_bound=0, upper_bound=0xfffffff)))
# Extract
si = SI(bits=32, stride=4, lower_bound=15, upper_bound=11)
r = si[31:4]
nose.tools.assert_true(identical(r, SI(bits=28, stride=1, lower_bound=0, upper_bound=0xfffffff)))
# <32>1[-4,-2] >> 1 = <32>1[-2,-1]
si = SI(bits=32, stride=1, lower_bound=-4, upper_bound=-2)
r = (si >> 1)
nose.tools.assert_true(identical(r, SI(bits=32, stride=1, lower_bound=-2, upper_bound=-1)))
# <32>1[-4,-2] LShR 1 = <32>1[0x7ffffffe,0x7fffffff]
si = SI(bits=32, stride=1, lower_bound=-4, upper_bound=-2)
r = si.LShR(1)
nose.tools.assert_true(identical(r, SI(bits=32, stride=1, lower_bound=0x7ffffffe, upper_bound=0x7fffffff)))
def test_reverse():
x = claripy.SI(name="TOP", bits=64, lower_bound=0, upper_bound=0xffffffffffffffff, stride=1) # TOP
y = claripy.SI(name="range", bits=64, lower_bound=0, upper_bound=1337, stride=1) # [0, 1337]
r0 = x.intersection(y)
r1 = x.reversed.intersection(y)
r2 = x.intersection(y.reversed).reversed
r3 = x.reversed.intersection(y.reversed).reversed
nose.tools.assert_equal(r0._model_vsa.max, 1337)
nose.tools.assert_equal(r1._model_vsa.max, 1337)
nose.tools.assert_equal(r2._model_vsa.max, 1337)
nose.tools.assert_equal(r3._model_vsa.max, 1337)
if __name__ == '__main__':
test_reasonable_bounds()
test_reversed_concat()
test_fucked_extract()
test_simple_cardinality()
test_wrapped_intervals()
test_join()
test_vsa()
test_vsa_constraint_to_si()
test_vsa_discrete_value_set()
test_solution()
test_shifting()
test_reverse()
|
import unittest
class WindowsSanityTestCase(unittest.TestCase):
'''
This test case is a no-op, and exists only to ensure that windows paths work
as part of the windows sanity ci test.
'''
def test_windows_sanity(self):
print('sanity passed - test was run')
|
# flake8: noqa
from .bot_setting import BotSettingViewSet
from .deleted_message import DeletedMessageViewSet
from .documentation_link import DocumentationLinkViewSet
from .infraction import InfractionViewSet
from .nomination import NominationViewSet
from .off_topic_channel_name import OffTopicChannelNameViewSet
from .offensive_message import OffensiveMessageViewSet
from .reminder import ReminderViewSet
from .role import RoleViewSet
from .tag import TagViewSet
from .user import UserViewSet
|
import os
from montreal_forced_aligner.g2p.trainer import PyniniTrainer as Trainer
from montreal_forced_aligner.dictionary import Dictionary
from montreal_forced_aligner.exceptions import ArgumentError
from montreal_forced_aligner.config import TEMP_DIR
from montreal_forced_aligner.utils import get_available_dict_languages, get_dictionary_path
def train_g2p(args):
if not args.temp_directory:
temp_dir = TEMP_DIR
else:
temp_dir = os.path.expanduser(args.temp_directory)
dictionary = Dictionary(args.dictionary_path, '')
t = Trainer(dictionary, args.output_model_path, temp_directory=temp_dir, order=args.order, num_jobs=args.num_jobs,
use_mp=not args.disable_mp)
if args.validate:
t.validate()
t.train()
def validate(args, download_dictionaries=None):
if args.dictionary_path.lower() in download_dictionaries:
args.dictionary_path = get_dictionary_path(args.dictionary_path.lower())
if not os.path.exists(args.dictionary_path):
raise (ArgumentError('Could not find the dictionary file {}'.format(args.dictionary_path)))
if not os.path.isfile(args.dictionary_path):
raise (ArgumentError('The specified dictionary path ({}) is not a text file.'.format(args.dictionary_path)))
def run_train_g2p(args, download_dictionaries=None):
if download_dictionaries is None:
download_dictionaries = get_available_dict_languages()
validate(args, download_dictionaries)
train_g2p(args)
if __name__ == '__main__': # pragma: no cover
from montreal_forced_aligner.command_line.mfa import train_g2p_parser, fix_path, unfix_path, dict_languages
train_args = train_g2p_parser.parse_args()
fix_path()
run_train_g2p(train_args, dict_languages)
unfix_path()
|
# Copyright 2020 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
from __future__ import annotations
import functools
import warnings
import re
import json
import urllib.parse
import threading
import pathlib
import requests
import logging
from typing import (Optional, Tuple, Union, Container, Hashable, Iterator,
Iterable, Any, Dict, FrozenSet, Callable, Type, Sequence)
import flask
from marley import constants
from marley.worlds.grid_royale.core import get_games_folder
from marley.jamswank.jamming import server as jamming_server
from marley.jamswank.jamming.jam_file_database import JamFileDatabase
from marley.jamswank import SingletonSwank, SimpleField
class ArezzoUserSession(SingletonSwank):
favorites = SimpleField(lambda: [])
|
# -*- coding: utf-8 -*-
import json
import pytest
from mock import patch, Mock
from sceptre.connection_manager import ConnectionManager
from sceptre.exceptions import UnsupportedTemplateFileTypeError
from sceptre_migration_tool import template
from sceptre_migration_tool.exceptions import ImportFailureError
from sceptre_migration_tool.migration_environment import MigrationEnvironment
class TestTemplate(object):
class MockConfig(dict):
pass
def setup_method(self, test_method):
connection_manager = Mock(spec=ConnectionManager)
environment_config = self.MockConfig()
environment_config.sceptre_dir = 'fake-spectre-dir'
environment_config['user_variables'] = {}
self.migration_environment = MigrationEnvironment(
connection_manager, environment_config)
@patch("sceptre_migration_tool.template._write_template")
@patch("sceptre_migration_tool.template._normalize_template_for_write")
def test_import_template__json_template_new_json_target(
self, mock_normalize, mock_write):
fake_template_body = {
'TemplateBody': {
'Key': 'Value'
}
}
fake_template_body_string = \
json.dumps(fake_template_body['TemplateBody'])
mock_connection_manager =\
self.migration_environment.connection_manager
mock_connection_manager.call.return_value = fake_template_body
mock_normalize.return_value = fake_template_body_string
template.import_template(
self.migration_environment,
'fake-aws-stack-name',
'templates/fake-template-path.json'
)
mock_connection_manager.call.assert_called_once_with(
service='cloudformation',
command='get_template',
kwargs={
'StackName': 'fake-aws-stack-name',
'TemplateStage': 'Original'
}
)
mock_normalize.assert_called_once_with(
fake_template_body['TemplateBody'],
'.json'
)
mock_write.assert_called_once_with(
'fake-spectre-dir/templates/fake-template-path.json',
fake_template_body_string
)
def test__normalize_template_for_write_json_to_json(self):
result = template._normalize_template_for_write(
{'Key': 'Value'},
".json"
)
assert result == '{"Key": "Value"}'
def test__normalize_template_for_write_yaml_to_json(self):
result = template._normalize_template_for_write(
'Key: Value\n',
".json"
)
assert result == '{"Key": "Value"}'
def test__normalize_template_for_write_json_to_yaml(self):
result = template._normalize_template_for_write(
{'Key': 'Value'},
".yaml"
)
assert result == 'Key: Value\n'
def test__normalize_template_for_write_yaml_to_yaml(self):
result = template._normalize_template_for_write(
'Key: Value\n',
".yaml"
)
assert result == 'Key: Value\n'
def test__normalize_template_for_write_yaml_to_unsupported(self):
with pytest.raises(UnsupportedTemplateFileTypeError):
template._normalize_template_for_write('Key: Value\n', ".txt")
@patch("sceptre_migration_tool.template.open")
@patch("os.makedirs")
@patch("os.path.isfile")
def test__write_template__new_file(
self, mock_isfile, mock_makedirs, mock_open
):
mock_isfile.return_value = False
template._write_template('fake-path/fake-file', 'fake-body')
mock_makedirs.assert_called_once_with('fake-path')
mock_open.called_once_with('fake-path')
mock_open.return_value.__enter__.return_value\
.write.called_once_with('fake-body/fake-file', 'w')
@patch("sceptre_migration_tool.template.open")
@patch("os.path.isfile")
def test__write_template__existing_same_file(self, mock_isfile, mock_open):
mock_isfile.return_value = True
mock_open.return_value.__enter__.return_value\
.read.return_value = 'fake-body: !Ref value'
template._write_template('fake-path', 'fake-body: !Ref value')
mock_open.called_once_with('fake-path', 'r')
mock_open.return_value.read.called_once()
mock_open.return_value.__enter__.return_value\
.write.assert_not_called()
@patch("sceptre_migration_tool.template.open")
@patch("os.path.isfile")
def test__write_template__existing_diff_file(self, mock_isfile, mock_open):
mock_isfile.return_value = True
mock_open.return_value.__enter__.return_value\
.read.return_value = 'fake-diff-body'
with pytest.raises(ImportFailureError):
template._write_template('fake-path', 'fake-body')
mock_open.called_once_with('fake-path')
mock_open.return_value.read.called_once()
mock_open.write.assert_not_called()
|
from data_structures.graph import Graph
from data_structures.tests.test_union_find import equal_sets_collection
def test_basic():
adj_list = {
1: [],
2: [3, 4],
3: [1, 3],
4: [],
}
G = Graph()
for v1, edges in adj_list.items():
G.add_vertex(v1)
for v2 in edges:
G.add_edge(v1, v2)
actual = G.adjacency_list
assert actual == adj_list, actual
def test_is_undirected():
directed_list = {
1: [],
2: [3, 4],
3: [1, 3],
4: [],
}
G = Graph(adjacency_list=directed_list)
assert G.adjacency_list == directed_list
assert not G.is_undirected()
undirected_list = {
1: [3],
2: [3, 4],
3: [1, 3, 2],
4: [2],
}
G = Graph(adjacency_list=undirected_list)
assert G.adjacency_list == undirected_list
assert G.is_undirected()
def test_components():
adj_list = {
0: [5, 7],
1: [3],
2: [3, 4],
3: [1, 3, 2],
4: [2],
5: [0],
6: [],
7: [0],
}
G = Graph(adjacency_list=adj_list)
assert G.adjacency_list == adj_list
assert G.is_undirected()
expected = [{0, 5, 7}, {1, 2, 3, 4}, {6}]
actual = G.get_components()
assert equal_sets_collection(expected, actual), actual
test_components()
|
#!/usr/bin/env python
"""
Removes a dataset file ( which was first renamed by appending _purged to the file name ) from disk.
Usage: python remove_renamed_datasets_from_disk.py renamed.log
"""
from __future__ import print_function
import os
import sys
assert sys.version_info[:2] >= (2, 4)
def usage(prog):
print("usage: %s file" % prog)
print("""
Removes a set of files from disk. The input file should contain a list of files
to be deleted, one per line. The full path must be specified and must begin
with /var/opt/galaxy.
A log of files deleted is created in a file with the same name as that input but
with .removed.log appended.
""")
def main():
if len(sys.argv) != 2 or sys.argv == "-h" or sys.argv == "--help":
usage(sys.argv[0])
sys.exit()
infile = sys.argv[1]
outfile = infile + ".removed.log"
out = open(outfile, 'w')
print("# The following renamed datasets have been removed from disk", file=out)
i = 0
removed_files = 0
for i, line in enumerate(open(infile)):
line = line.rstrip('\r\n')
if line and line.startswith('/var/opt/galaxy'):
try:
os.unlink(line)
print(line, file=out)
removed_files += 1
except Exception as exc:
print("# Error, exception " + str(exc) + " caught attempting to remove " + line, file=out)
print("# Removed " + str(removed_files) + " files", file=out)
if __name__ == "__main__":
main()
|
from .inference_methods import (inference_qpbo, inference_dai, inference_lp,
inference_ad3, inference_ogm,
inference_dispatch, get_installed,
compute_energy)
__all__ = ["inference_qpbo", "inference_dai", "inference_lp", "inference_ad3",
"inference_dispatch", "get_installed", "compute_energy",
"inference_ogm"]
|
import tensorflow as tf
def gated_linear_layer(inputs, gates, name=None):
activation = tf.multiply(x=inputs, y=tf.sigmoid(gates), name=name)
return activation
# Why not BN but IN? original article use BN (IN is for CycleGAN-VC)
def instance_norm_layer(inputs, epsilon=1e-05, activation_fn=None, name=None):
instance_norm_layer = tf.contrib.layers.instance_norm(
inputs=inputs, center=True, scale=True, epsilon=epsilon, activation_fn=activation_fn, scope=name)
return instance_norm_layer
def conv2d_layer(inputs, filters, kernel_size, strides, padding: list = None, activation=None, kernel_initializer=None, name=None):
p = tf.constant([[0, 0], [padding[0], padding[0]], [padding[1], padding[1]], [0, 0]])
out = tf.pad(inputs, p, name=name + 'conv2d_pad')
conv_layer = tf.layers.conv2d(
inputs=out,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='valid',
activation=activation,
kernel_initializer=kernel_initializer,
name=name)
return conv_layer
# GLU + Conv2d + IN
def downsample2d_block(inputs, filters, kernel_size, strides, padding: list = None, name_prefix='downsample2d_block_'):
h1 = conv2d_layer(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, activation=None, name=name_prefix + 'h1_conv')
h1_norm = instance_norm_layer(inputs=h1, activation_fn=None, name=name_prefix + 'h1_norm')
h1_gates = conv2d_layer(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, activation=None, name=name_prefix + 'h1_gates')
h1_norm_gates = instance_norm_layer(inputs=h1_gates, activation_fn=None, name=name_prefix + 'h1_norm_gates')
h1_glu = gated_linear_layer(inputs=h1_norm, gates=h1_norm_gates, name=name_prefix + 'h1_glu')
return h1_glu
# Used only for unsampling in Generator
def upsample2d_block(inputs, filters, kernel_size, strides, name_prefix='upsample2d_block_'):
t1 = tf.keras.layers.Conv2DTranspose(filters, kernel_size, strides, padding='same')(inputs)
t2 = tf.contrib.layers.instance_norm(t1, scope=name_prefix + 'instance1')
x1_gates = tf.keras.layers.Conv2DTranspose(filters, kernel_size, strides, padding='same')(inputs)
x1_norm_gates = tf.contrib.layers.instance_norm(x1_gates, scope=name_prefix + 'instance2')
x1_glu = gated_linear_layer(t2, x1_norm_gates)
return x1_glu
# Generator
def generator_gatedcnn(inputs, speaker_id=None, reuse=False, scope_name='generator_gatedcnn'):
#input shape [batchsize, h, w, c]
#speaker_id [batchsize, one_hot_vector]
#one_hot_vector:[0,1,0,0]
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
#downsample
d1 = downsample2d_block(inputs, filters=32, kernel_size=[3, 9], strides=[1, 1], padding=[1, 4], name_prefix='down_1')
d2 = downsample2d_block(d1, filters=64, kernel_size=[4, 8], strides=[2, 2], padding=[1, 3], name_prefix='down_2')
d3 = downsample2d_block(d2, filters=128, kernel_size=[4, 8], strides=[2, 2], padding=[1, 3], name_prefix='down_3')
d4 = downsample2d_block(d3, filters=64, kernel_size=[3, 5], strides=[1, 1], padding=[1, 2], name_prefix='down_4')
d5 = downsample2d_block(d4, filters=5, kernel_size=[9, 5], strides=[9, 1], padding=[1, 2], name_prefix='down_5')
#upsample
speaker_id = tf.convert_to_tensor(speaker_id, dtype=tf.float32)
## (with PyTorch Notation:)
# (1, 1, 4)
# [
# [0,1,0,0]
# ]
# =>
# (1, 4, 1, 1)
# [[
# [[0]],
# [[1]],
# [[0]],
# [[0]],
# ]]
c_cast = tf.cast(tf.reshape(speaker_id, [-1, 1, 1, speaker_id.shape.dims[-1].value]), tf.float32)
c = tf.tile(c_cast, [1, d5.shape.dims[1].value, d5.shape.dims[2].value, 1])
print(c.shape.as_list())
concated = tf.concat([d5, c], axis=-1) # concat in channel dim (in tf, channel is last axis)
# print(concated.shape.as_list())
u1 = upsample2d_block(concated, 64, kernel_size=[9, 5], strides=[9, 1], name_prefix='gen_up_u1')
c1 = tf.tile(c_cast, [1, u1.shape.dims[1].value, u1.shape.dims[2].value, 1])
u1_concat = tf.concat([u1, c1], axis=-1)
u2 = upsample2d_block(u1_concat, 128, [3, 5], [1, 1], name_prefix='gen_up_u2')
c2 = tf.tile(c_cast, [1, u2.shape[1], u2.shape[2], 1])
u2_concat = tf.concat([u2, c2], axis=-1)
u3 = upsample2d_block(u2_concat, 64, [4, 8], [2, 2], name_prefix='gen_up_u3')
c3 = tf.tile(c_cast, [1, u3.shape[1], u3.shape[2], 1])
u3_concat = tf.concat([u3, c3], axis=-1)
u4 = upsample2d_block(u3_concat, 32, [4, 8], [2, 2], name_prefix='gen_up_u4')
c4 = tf.tile(c_cast, [1, u4.shape[1], u4.shape[2], 1])
u4_concat = tf.concat([u4, c4], axis=-1)
u5 = tf.layers.Conv2DTranspose(filters=1, kernel_size=[3, 9], strides=[1, 1], padding='same', name='generator_last_deconv')(u4_concat)
return u5
def discriminator(inputs, speaker_id, reuse=False, scope_name='discriminator'):
# inputs has shape [batch_size, height,width, channels]
with tf.variable_scope(scope_name) as scope:
# Discriminator would be reused in CycleGAN
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
#convert data type to float32
c_cast = tf.cast(tf.reshape(speaker_id, [-1, 1, 1, speaker_id.shape[-1]]), tf.float32)
c = tf.tile(c_cast, [1, inputs.shape[1], inputs.shape[2], 1])
concated = tf.concat([inputs, c], axis=-1)
# Downsample
d1 = downsample2d_block(
inputs=concated, filters=32, kernel_size=[3, 9], strides=[1, 1], padding=[1, 4], name_prefix='downsample2d_dis_block1_')
c1 = tf.tile(c_cast, [1, d1.shape[1], d1.shape[2], 1])
d1_concat = tf.concat([d1, c1], axis=-1)
d2 = downsample2d_block(
inputs=d1_concat, filters=32, kernel_size=[3, 8], strides=[1, 2], padding=[1, 3], name_prefix='downsample2d_dis_block2_')
c2 = tf.tile(c_cast, [1, d2.shape[1], d2.shape[2], 1])
d2_concat = tf.concat([d2, c2], axis=-1)
d3 = downsample2d_block(
inputs=d2_concat, filters=32, kernel_size=[3, 8], strides=[1, 2], padding=[1, 3], name_prefix='downsample2d_dis_block3_')
c3 = tf.tile(c_cast, [1, d3.shape[1], d3.shape[2], 1])
d3_concat = tf.concat([d3, c3], axis=-1)
d4 = downsample2d_block(
inputs=d3_concat, filters=32, kernel_size=[3, 6], strides=[1, 2], padding=[1, 2], name_prefix='downsample2d_diss_block4_')
c4 = tf.tile(c_cast, [1, d4.shape[1], d4.shape[2], 1])
d4_concat = tf.concat([d4, c4], axis=-1)
c1 = conv2d_layer(d4_concat, filters=1, kernel_size=[36, 5], strides=[36, 1], padding=[0, 1], name='discriminator-last-conv')
c1_red = tf.reduce_mean(c1, keepdims=True)
return c1_red
# Why not strided-Conv but max-pooling? In G & D, you use strided-Conv
def domain_classifier(inputs, reuse=False, scope_name='classifier'):
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
# add slice input shape [batchsize, 8, 512, 1]
#get one slice
one_slice = inputs[:, 0:8, :, :]
d1 = tf.layers.conv2d(one_slice, 8, kernel_size=[4, 4], padding='same', name=scope_name + '_conv2d01')
d1_p = tf.layers.max_pooling2d(d1, [2, 2], strides=[2, 2], name=scope_name + 'p1')
d2 = tf.layers.conv2d(d1_p, 16, [4, 4], padding='same', name=scope_name + '_conv2d02')
d2_p = tf.layers.max_pooling2d(d2, [2, 2], strides=[2, 2], name=scope_name + 'p2')
d3 = tf.layers.conv2d(d2_p, 32, [4, 4], padding='same', name=scope_name + '_conv2d03')
d3_p = tf.layers.max_pooling2d(d3, [2, 2], strides=[2, 2], name=scope_name + 'p3')
d4 = tf.layers.conv2d(d3_p, 16, [3, 4], padding='same', name=scope_name + '_conv2d04')
d4_p = tf.layers.max_pooling2d(d4, [1, 2], strides=[1, 2], name=scope_name + 'p4')
d5 = tf.layers.conv2d(d4_p, 4, [1, 4], padding='same', name=scope_name + '_conv2d05')
d5_p = tf.layers.max_pooling2d(d5, [1, 2], strides=[1, 2], name=scope_name + 'p5')
# (1 batch, 1 height, 64 width, 4 channel)
# =>
# (1 batch, )
p = tf.keras.layers.GlobalAveragePooling2D()(d5_p)
o_r = tf.reshape(p, [-1, 1, 1, p.shape.dims[1].value])
return o_r
|
"""
Tests the performance of all of the solutions listed in the following article:
https://therenegadecoder.com/code/how-to-convert-an-integer-to-a-string-in-python/
"""
from test_bench import test_bench
def control(_):
"""
Provides a control scenario for testing. In this case, none of the functions
share any overhead, so this function is empty.
:param _: a placeholder for the int input
:return: None
"""
pass
def convert_by_type_casting(integer: int) -> str:
"""
Converts an integer to a string by type casting.
:param integer: an integer
:return: the integer as a string
"""
return str(integer)
def convert_by_f_string(integer: int) -> str:
"""
Converts an integer to a string using f-strings.
:param integer: an integer
:return: the integer as a string
"""
return f"{integer}"
def convert_by_interpolation(integer: int) -> str:
"""
Converts an integer to a string using string interpolation.
:param integer: an integer
:return: the integer as a string
"""
return "%s" % integer
def main() -> None:
"""
Tests the performance of all the functions defined in this file.
"""
test_bench(
[
control,
convert_by_type_casting,
convert_by_f_string,
convert_by_interpolation
],
{
"Zero": [0],
"Single Digit": [5],
"Small Number": [1107321],
"Massive Number": [2 ** 64]
}
)
if __name__ == '__main__':
main()
|
from flasker.redis_client import RedisConn
ITEM_ADDED_SUCCESSFULLY_BLOOM_FILTER = 1
ITEM_FOUND_IN_BLOOM_FILTER = 1
ITEM_NOT_FOUND_IN_BLOOM_FILTER = 0
def add_bloom_filter(bloom_filter, item):
"""
Adds item to specified bloom filter.
Keyword arguments:
bloom_filter -- the bloom filter
item -- the item to add
Returns:
1 if added, 0 if not added
0 may indicate the item was previously added
Throws:
AssertionError on None value args
"""
assert bloom_filter is not None
assert item is not None
r_client = RedisConn().get_client()
return r_client.bf().add(bloom_filter, item)
def add_to_bloom_filter_format_result(bloom_filter, item):
"""
Adds items to specified bloom filter and formats return as boolean.
Keyword arguments:
bloom_filter -- the bloom filter
item -- the item to add
Return:
boolean indicating success
Throws:
AssertionError on None value args
"""
result = add_bloom_filter(bloom_filter, item)
return result == ITEM_ADDED_SUCCESSFULLY_BLOOM_FILTER
def is_in_bloom_filter(bloom_filter, item):
"""
Checks for item in specified bloom filter.
Keyword arguments:
bloom_filter -- the bloom filter
item -- the item to check
Returns:
1 if found, 0 if not found
Throws:
AssertionError on None value args
"""
assert bloom_filter is not None
assert item is not None
r_client = RedisConn().get_client()
return r_client.bf().exists(bloom_filter, item)
def is_unique_bloom_filter(bloom_filter, item):
"""
Converts Redis results to boolean representing if item was unique (aka not found).
Keyword arguments:
bloom_filter -- the bloom filter
item -- the item to check
Returns:
boolean -- True if unique (aka not found)
Throws:
AssertionError on None value args
"""
result = is_in_bloom_filter(bloom_filter, item)
return result == ITEM_NOT_FOUND_IN_BLOOM_FILTER
|
from google.cloud import datastore
# The kind for the new entity
kind = 'Task'
# The name/ID for the new entity
name = 'sampletask1'
# The Cloud Datastore key for the new entity
task_key = datastore_client.key(kind, name)
# Prepares the new entity
task = datastore.Entity(key=task_key)
task['description'] = 'Buy milk'
# Saves the entity
datastore_client.put(task)
print('Saved {}: {}'.format(task.key.name, task['description']))
class DatastoreManager(object):
def __init__(self, id):
self._client = datastore.Client()
self._id = id
def has_id(self):
return False
|
#!/usr/bin/env python
import mapnik
m = mapnik.Map(1000, 1000, '+init=epsg:4326') # create a map with a given width and height in pixels and projected in WGS 84.
m.background = mapnik.Color('#87CEFF') # set background colour.
ssStyles = mapnik.Style() # style object to hold rules for Study Site
ssRules = mapnik.Rule() # rule object to hold symbolizers for Study Site
citiesStyles = mapnik.Style() # style object to hold rules for Cities
citiesRules = mapnik.Rule() # rule object to hold symbolizers Cities
pStyles = mapnik.Style() # style object to hold rules for Peru
pRules = mapnik.Rule() # rule object to hold symbolizers Peru
studySiteDS = mapnik.Shapefile(file='StudySiteRegion.shp')
studySiteLyr = mapnik.Layer('Study Site', '+init=epsg:4326')
studySiteLyr.datasource = studySiteDS
peruDS = mapnik.Shapefile(file='GRC_adm1.shp')
peruLyr = mapnik.Layer('Peru', '+init=epsg:4326')
peruLyr.datasource = peruDS
cityNames = mapnik.Shapefile(file='places.shp')
citiesLyr = mapnik.Layer('Cities', '+init=epsg:4326')
citiesLyr.datasource = cityNames
#point_symbolizer_cities = mapnik.PointSymbolizer()#mapnik.Color('#000000'))
#citiesRules.symbols.append(point_symbolizer_cities)
t = mapnik.TextSymbolizer(mapnik.Expression('[name]'), 'DejaVu Sans Book', 12, mapnik.Color('#000000'))
#t.halo_fill = mapnik.Color('#FFFFFF')
t.fill = mapnik.Color('#000000')
t.avoid_edges = True
#t.halo_radius = 0.5
t.label_placement = mapnik.label_placement.POINT_PLACEMENT #is default
#dir(t)
citiesRules.symbols.append(t)
citiesStyles.rules.append(citiesRules)
# to fill a polygon we create a PolygonSymbolizer
polygon_symbolizer_peru = mapnik.PolygonSymbolizer(mapnik.Color('#F0FFF0'))
pRules.symbols.append(polygon_symbolizer_peru) # add the symbolizer to the rule object
# to add outlines to a polygon we create a LineSymbolizer
line_symbolizer_peru = mapnik.LineSymbolizer(mapnik.Color('#000000'),1)
pRules.symbols.append(line_symbolizer_peru) # add the symbolizer to the rule object
pStyles.rules.append(pRules) # now add the rule to the style and we're done
# to fill a polygon we create a PolygonSymbolizer
polygon_symbolizer_studysite = mapnik.PolygonSymbolizer(mapnik.Color('#FF0000'))
polygon_symbolizer_studysite.fill = mapnik.Color('#FF0000')
polygon_symbolizer_studysite.fill_opacity = 1
ssRules.symbols.append(polygon_symbolizer_studysite) # add the symbolizer to the rule object
# to add outlines to a polygon we create a LineSymbolizer
line_symbolizer_studysite = mapnik.LineSymbolizer(mapnik.Color('#000000'),1)
ssRules.symbols.append(line_symbolizer_studysite) # add the symbolizer to the rule object
ssStyles.rules.append(ssRules) # now add the rule to the style and we're done
m.append_style('StudySite Style',ssStyles) # Styles are given names only as they are applied to the map
m.append_style('Peru Style',pStyles) # Styles are given names only as they are applied to the map
m.append_style('Cities Style',citiesStyles) # Styles are given names only as they are applied to the map
studySiteLyr.styles.append('StudySite Style')
peruLyr.styles.append('Peru Style')
citiesLyr.styles.append('Cities Style')
m.layers.append(peruLyr)
m.layers.append(studySiteLyr)
m.layers.append(citiesLyr)
print "Study Site BBOX: ", studySiteLyr.envelope()
#m.zoom_all()
bbox = mapnik.Envelope(mapnik.Coord(21.0, 36), mapnik.Coord(26, 41))
#bbox = mapnik.Envelope(mapnik.Coord(15.0, 30), mapnik.Coord(32, 46))
m.zoom_to_box(bbox)
page = mapnik.printing.PDFPrinter(pagesize=(0.15, 0.16), margin=0.0075, resolution=150, preserve_aspect=True, centering=5, is_latlon=False, use_ocg_layers=False)
page.render_map(m,"StudySiteMapZoom.pdf")
mapCTX = page.get_context()
page.render_on_map_scale(m)
#mapCTX.move_to(50,250)
#page.render_scale(m, ctx=mapCTX, width=0.05)
#page.render_legend(m)
#mapCTX.move_to(130,20)
#page.write_text(mapCTX, "Study Site within Greece", size=14, fill_color=(0.0, 0.0, 0.0), alignment=None)
page.finish()
#mapnik.render_to_file(m, "othermap.pdf", 'pdf')
|
"Testcases for photo messages"
from .. import Case
from bobot.Rule import Rule
from bobot.Response import Photo
responsePhotoWithoutCaption = Case.Case([
Rule({
'match': 'p',
'response': {
'photo': {
'photo': './test/files/image.png'
}
}
})
], [
{
'expected': [Case.Photo('./test/files/image.png', photoId='test').value()],
'message': Case.Message('p', photoId='test').value()
}
])
responsePhotoWithCaption = Case.Case([
Rule({
'match': 'p',
'response': {
'photo': {
'photo': './test/files/image.png',
'caption': 'Hello'
}
}
})
], [
{
'expected': [Case.Photo('./test/files/image.png', caption='Hello', photoId='test').value()],
'message': Case.Message('p', photoId='test', caption='Hello').value()
}
])
responsePhotoErrorFileNotFound = Case.Case([
Rule({
'match': 'p',
'response': {
'photo': {
'photo': 'no name '
}
}
})
], [
{
'expected': [Case.Photo('./test/files/image.png', caption='Hello', photoId='test').value()],
'message': Case.Message('p', photoId='test', caption='Hello').value()
}
])
responsePhotoWithoutCaptionAsPhoto = Case.Case([
Rule({
'match': 'p',
'response': Photo('./test/files/image.png')
})
], [
{
'expected': [Case.Photo('./test/files/image.png', photoId='test').value()],
'message': Case.Message('p', photoId='test').value()
}
])
responsePhotoWithCaptionAsPhoto = Case.Case([
Rule({
'match': 'p',
'response': Photo('./test/files/image.png', 'Hello')
})
], [
{
'expected': [Case.Photo('./test/files/image.png', caption='Hello', photoId='test').value()],
'message': Case.Message('p', photoId='test', caption='Hello').value()
}
])
responsePhotoErrorFileNotFoundAsPhoto = Case.Case([
Rule({
'match': 'p',
'response': Photo('no name')
})
], [
{
'expected': [Case.Photo('./test/files/image.png', caption='Hello', photoId='test').value()],
'message': Case.Message('p', photoId='test', caption='Hello').value()
}
])
|
from django.conf import (
settings,
)
from django.core.management.base import (
BaseCommand,
)
from django.utils import (
timezone,
)
from ...models import (
EmailTracker,
MonthlyStat,
)
class Command(BaseCommand):
help = 'Updates monthly stats with data from email trackers and deletes outdated trackers'
def handle(self, *args, **options):
current_datetime = timezone.now()
monthly_stat, created = MonthlyStat.objects.get_or_create(
year_int=current_datetime.year,
month_int=current_datetime.month,
)
try:
tracking_months = settings.EMAILER_TRACKING_MONTHS
except AttributeError:
tracking_months = 3
for tracker in EmailTracker.objects.all():
''' Find and delete outdated trackers '''
deletion_datetime_months = tracker.send_complete.year * 12 + tracker.send_complete.month + tracking_months
current_datetime_months = current_datetime.year * 12 + current_datetime.month + 1
if current_datetime_months > deletion_datetime_months:
tracker.delete()
else:
''' Find and attach any appropriate unattached trackers as current or older '''
if tracker not in monthly_stat.current_trackers.all() \
and tracker not in monthly_stat.older_trackers.all() \
and tracker.json_data:
for key, value in tracker.json_data.items():
year = value[0]
month = value[1]
if year == current_datetime.year and month == current_datetime.month:
if current_datetime.year == tracker.send_complete.year \
and current_datetime.month == tracker.send_complete.month:
monthly_stat.current_trackers.add(tracker)
else:
monthly_stat.older_trackers.add(tracker)
break
''' Create sorted list of subscription names '''
subscription_names = []
month_trackers = monthly_stat.current_trackers.all()
for tracker in month_trackers:
if tracker.subscription_name not in subscription_names:
subscription_names.append(tracker.subscription_name)
subscription_names.sort()
''' Append 'Older emails' to list of subscriptions if appropriate '''
if monthly_stat.older_trackers.exists():
subscription_names += ['Older emails']
''' Reset stat data and tallies '''
monthly_stat.stat_data = ''
for name in subscription_names:
sent_tally = 0
open_tally = 0
monthly_stat.stat_data = \
'{} \
<tr id="emailer_title_row"> \
<td> </td> \
<td>{}</td> \
<td id="emailer_numerical">Sent</td> \
<td id="emailer_numerical">Opens</td> \
<td></td> \
</tr>'.format(monthly_stat.stat_data, name)
''' Get appropriate group of trackers '''
if name == 'Older emails':
subscription_trackers = monthly_stat.older_trackers.all()
else:
subscription_trackers = month_trackers.filter(
subscription_name=name,
)
stat_dict = {}
for tracker in subscription_trackers:
''' Calculate tracker's opened number for current month '''
if tracker.json_data:
opens = 0
for key, value in tracker.json_data.items():
year = value[0]
month = value[1]
if year == current_datetime.year and month == current_datetime.month:
opens += 1
''' Create a list of stat data and add to stat dictionary '''
stat_dict[tracker.pk] = [
opens,
tracker.number_sent,
tracker.subject,
tracker.send_complete_string(),
]
''' Sort stat dictionary '''
sorted_by_value = sorted(stat_dict.items(), key=lambda kv: kv[1], reverse=True)
if sorted_by_value:
''' Format data as rows '''
row_number = 1
for email in sorted_by_value:
if row_number & 1:
row_id = 'emailer_row_odd'
else:
row_id = 'emailer_row_even'
row_number_str = '{}.'.format(str(row_number))
tracker_data = email[1]
opens = '{:,}'.format(tracker_data[0])
sent = '{:,}'.format(tracker_data[1])
if tracker_data[0] != 0 and tracker_data[1] != 0:
percentage = '{:.1%}'.format(tracker_data[0] / tracker_data[1])
else:
percentage = '0%'
subject = tracker_data[2]
distribution_date = tracker_data[3]
monthly_stat.stat_data = \
'{} \
<tr id="{}"> \
<td id="emailer_numerical">{}</td> \
<td>{}<br>{}</td> \
<td id="emailer_numerical">{}</td> \
<td id="emailer_numerical">{}</td> \
<td id="emailer_numerical">{}</td> \
</tr>'.format(
monthly_stat.stat_data,
row_id,
row_number_str,
subject,
distribution_date,
sent,
opens,
percentage,
)
row_number += 1
sent_tally += tracker_data[1]
open_tally += tracker_data[0]
total_opens = '{:,}'.format(open_tally)
total_sent = '{:,}'.format(sent_tally)
if open_tally != 0 and sent_tally != 0:
total_percentage = '{:.1%}'.format(open_tally / sent_tally)
else:
total_percentage = '0%'
monthly_stat.stat_data = \
'{} \
<tr id="emailer_title_row"> \
<td><br><br></td> \
<td id="emailer_numerical">Totals:<br><br></td> \
<td id="emailer_numerical">{}<br><br></td> \
<td id="emailer_numerical">{}<br><br></td> \
<td id="emailer_numerical">{}<br><br></td> \
</tr>'.format(
monthly_stat.stat_data,
total_sent,
total_opens,
total_percentage,
)
monthly_stat.save()
|
# -*- coding: utf-8 -*-
from ._version import version as __version__
from .data import *
from .era5_download import *
from .era5_tables import *
from .logging_config import *
from .processing_workers import *
del _version
|
import re
from tqdm import tqdm
from awa.data_source import DataSource
from awa.cached import Cached
class AIHRCReports(DataSource):
def __init__(self):
DataSource.__init__(
self,
"Afghanistan Independent Human Rights Commission",
"https://www.refworld.org/publisher/AIHRC.html",
)
self.segment_link_pattern = re.compile("AIHRC,+\\d+.html")
self.segment_name_pattern = re.compile("\\d+")
def is_result_page_link(self, link):
href = link.get("href")
link_text = link.text
return (
self.segment_link_pattern.search(href) is not None
and self.segment_name_pattern.search(link_text) is not None
)
def find_links(self):
parsed = self.soup()
links = [x for x in parsed.find_all("a") if x.get("href") is not None]
other_parts = [x for x in links if self.is_result_page_link(x)]
urls = [self.url] + [
self.relative_url(link.get("href")) for link in other_parts
]
cacheable = [Cached(u) for u in urls]
with tqdm(total=len(urls), position=0) as pbar:
for gettable in cacheable:
soup = gettable.soup()
links = [
x
for x in soup.find_all("a")
if x.get("href") is not None
and "itemlink" in x.get_attribute_list("class")
]
for link in links:
title = link.text.strip().replace("\n", "")
href = gettable.relative_url(link.get("href"))
yield title, href
pbar.update(1)
|
from scrapy.utils.log import configure_logging
import logging
import os
# Scrapy settings for scrapy_scout project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapy_scout'
SPIDER_MODULES = ['scrapy_scout.spiders']
NEWSPIDER_MODULE = 'scrapy_scout.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapy_scout (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'scrapy_scout.middlewares.ScrapyScoutSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'scrapy_scout.middlewares.ScrapyScoutDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapy_scout.pipelines.ScrapyScoutPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
env = os.environ
DATABASE = env.get('DATABASE_URL', 'sqlite:///scrapy_scout.db')
SENTRY_DSN = env.get('SENTRY_DSN')
MAILGUN_API_KEY = env.get('MAILGUN_API_KEY')
MAILGUN_DOMAIN = env.get('MAILGUN_DOMAIN')
RECEIVERS = env.get('RECEIVERS')
LOG_SETTINGS = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'console': {
'format': '[%(asctime)s][%(levelname)s] %(name)s '
'%(filename)s:%(funcName)s:%(lineno)d | %(message)s',
'datefmt': '%H:%M:%S',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console'
},
'sentry': {
'level': 'WARNING',
'class': 'raven.handlers.logging.SentryHandler',
'dsn': SENTRY_DSN,
},
},
'loggers': {
'': {
'handlers': ['console', 'sentry'],
'level': 'DEBUG',
'propagate': False,
},
'scrapy_scout': {
'level': 'DEBUG',
'propagate': True,
},
}
}
try:
from scrapy_scout.local_settings import *
except ImportError:
pass
configure_logging(install_root_handler=False)
logging.config.dictConfig(LOG_SETTINGS)
|
from typing import *
if TYPE_CHECKING:
from django_hint import *
__all__ = [
"get_ip", "get_previous_site"
]
def get_ip(request: "RequestType") -> str:
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_previous_site(request: "RequestType") -> Optional[str]:
return request.META.get("HTTP_REFERER")
|
from django.apps import AppConfig
class HatvanhatConfig(AppConfig):
name = 'hatvanhat'
|
import click
import json
import operator
def pwdpolicy(passwords, minlength=0, need_lower=0, need_upper=0, need_number=0, need_special=0, need_families=0):
valid = []
for password in passwords:
if not password:
continue
if len(password) < minlength:
continue
password = password.decode(errors='ignore')
are_lower = 0
are_upper = 0
are_number = 0
are_special = 0
families = 0
for ch in password:
#print(str(ch))
#ch = ch.decode()
if ch.islower():
are_lower += 1
if are_lower == 1:
families += 1
continue
if ch.isupper():
are_upper += 1
if are_upper == 1:
families += 1
continue
if ch.isnumeric():
are_number += 1
if are_number == 1:
families += 1
continue
are_special += 1
if are_special == 1:
families += 1
print(
'lower:', are_lower,
'upper:', are_upper,
'number:', are_number,
'special:', are_special,
'families:', families
)
if need_families > families:
continue
if need_lower > are_lower:
continue
if need_upper > are_upper:
continue
if need_special > are_special:
continue
valid.append(password)
return valid
@click.command()
@click.argument('infile', type=click.File('rb', errors='ignore'), default='-')
@click.option('--length', 'minlength', default=0)
@click.option('--number', 'need_number', default=0)
@click.option('--lower', 'need_lower', default=0)
@click.option('--upper', 'need_upper', default=0)
@click.option('--special', 'need_special', default=0)
@click.option('--families', 'need_families', default=0)
def cmd_pwd_policy(infile, minlength, need_number, need_lower, need_upper, need_special, need_families):
passwords = infile.read().split(b'\n')
valid = pwdpolicy(passwords, minlength=minlength, need_number=need_number, need_lower=need_lower, need_upper=need_upper, need_special=need_special, need_families=need_families)
print('\n'.join(valid))
if __name__ == '__main__':
#with open('/home/f/code/SecLists/Passwords/darkc0de.txt', encoding='utf-8', errors='ignore') as pwdfile:
# passwords = pwdfile.read().split('\n')
#print(json.dumps(pwdlyze(passwords), indent=4))
cmd_pwd_policy()
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import theanets
from utils import load_mnist, plot_layers
train, valid, _ = load_mnist(labels=True)
N = 10
e = theanets.Experiment(
theanets.Classifier,
layers=(784, N * N, ('softmax', 10)),
)
e.train(train, valid, min_improvement=0.001)
plot_layers([e.network.find('hid1', 'w'), e.network.find('out', 'w')])
plt.tight_layout()
plt.show()
|
Given the head of a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list. Return the linked list sorted as well.
Example 1:
Input: head = [1,2,3,3,4,4,5]
Output: [1,2,5]
Example 2:
Input: head = [1,1,1,2,3]
Output: [2,3]
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
dummy = curr = ListNode()
Dict = {}
while head != None:
if head.val not in Dict.keys():
Dict[head.val] = 1
else:
Dict[head.val] += 1
head = head.next
for val1, val2 in Dict.items():
if val2 == 1:
curr.next = ListNode(val1)
curr = curr.next
return dummy.next
|
from django.test.utils import setup_test_environment
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
setup_test_environment()
class PairProgramTest(StaticLiveServerTestCase):
fixtures = ['users.json']
def setUp(self):
self.browser = webdriver.PhantomJS()
self.browser.set_window_size(1400, 1000)
self.browser.implicitly_wait(10)
def tearDown(self):
self.browser.quit()
def test_can_reach_pair_program_page(self):
self.browser.get(self.live_server_url)
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Codango', body.text)
# logging in username and password
username_field = self.browser.find_element_by_name('username')
username_field.send_keys('lade')
password_field = self.browser.find_element_by_name('password')
password_field.send_keys('password')
password_field.send_keys(Keys.RETURN)
# username and password accepted
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Share', body.text)
# View Sessions page
self.browser.find_element_by_link_text('Pair Programming').click()
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Create a new Programming session', body.text)
# create a new session
self.browser.find_element_by_link_text(
'Create a new Programming session').click()
block = WebDriverWait(self.browser, 60)
block.until(
EC.visibility_of_element_located(
(By.CLASS_NAME, 'modal')
)
)
self.browser.find_element_by_name(
'session_name').send_keys('Pairing Session with the boss')
self.browser.find_element_by_xpath(
"//button[contains(text(),'Create')]").click()
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Pairing Session with the boss', body.text)
|
"""A setuptools based setup module."""
from os import path
from setuptools import setup, find_packages
from io import open
here = path.abspath(path.dirname(__file__))
setup(
name='hass-wh-triggers',
version='0.0.7',
description='HASS-WH-Triggers',
long_description='https://github.com/danielperna84/hass-wh-triggers',
url='https://github.com/danielperna84/hass-wh-triggers',
author='Daniel Perna',
author_email='danielperna84@gmail.com',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3.7',
],
keywords='Home Assistant FIDO2 WebAuthn TOTP',
packages=find_packages(),
install_requires=[
"cbor2==4.1.2",
"cryptography",
"Flask",
"Flask-Login",
"Flask-SQLAlchemy",
"Flask-WTF",
"future",
"pyotp",
"pyOpenSSL",
"setuptools",
"setuptools-scm",
"six",
"SQLAlchemy",
"wheel",
"WTForms",
],
include_package_data=True,
data_files=[]
)
|
# The first version was licensed as "Original Source License"(see below).
# Several enhancements and at UW Robot Learning Lab
#
# Original Source License:
#
# Copyright (c) 2019 Georgia Tech Robot Learning Lab
# Licensed under the MIT License.
"""
configs for ploting
"""
from matplotlib import cm
from itertools import chain
SET2COLORS = cm.get_cmap('Set2').colors
SET2 = {'darkgreen': SET2COLORS[0],
'orange': SET2COLORS[1],
'blue': SET2COLORS[2],
'pink': SET2COLORS[3],
'lightgreen': SET2COLORS[4],
'gold': SET2COLORS[5],
'brown': SET2COLORS[6],
'grey': SET2COLORS[7],
}
SET1COLORS = cm.get_cmap('Set1').colors
SET1 = {
'red': SET1COLORS[0],
'blue': SET1COLORS[1],
'green': SET1COLORS[2],
'purple': SET1COLORS[3],
'orange': SET1COLORS[4],
'yellow': SET1COLORS[5],
'brown': SET1COLORS[6],
'pink': SET1COLORS[7],
'grey': SET1COLORS[8]
}
code_configs = {
'bc-nn': (r'\textsc{BC+NN}', SET1['blue']),
'bc-rmp': (r'\textsc{BC+RMP}', SET1['purple']),
'code-nn': (r'\textsc{CODE+NN}', SET1['green']),
'code-rmp': (r'\textsc{CODE+RMP}', SET2['lightgreen']),
'order': [
'bc-nn', 'bc-rmp', 'code-nn', 'code-rmp']
}
rmp2_configs = {
'rmp': (r'\textsc{RMP}', SET2['lightgreen']),
'rmp-obs-feat': (r'\textsc{RMP-RESIDUAL}', SET1['blue']),
'nn': (r'\textsc{NN}', 'gray'), # SET1['grey']),
'nn-residual': (r'\textsc{NN-RESIDUAL}', 'indianred'), # SET1['red']),
'order': [
'rmp-obs-feat', 'rmp', 'nn-residual', 'nn']
}
gtc_configs = {
'rmp-obs-feat': (r'\textsc{STRUCTURED}', [0.4627451, 0.7254902, 0.]),
'nn': (r'\textsc{NN}', 'gray'), # SET1['grey']),
'order': [
'rmp-obs-feat', 'nn']
}
class Configs(object):
def __init__(self, style=None, colormap=None):
if not style:
self.configs = None
if colormap is None:
c1 = iter(cm.get_cmap('Set1').colors)
c2 = iter(cm.get_cmap('Set2').colors)
c3 = iter(cm.get_cmap('Set3').colors)
self.colors = chain(c1, c2, c3)
else:
self.colors = iter(cm.get_cmap(colormap).colors)
else:
self.configs = globals()[style + '_configs']
for exp_name in self.configs['order']:
assert exp_name in self.configs, 'Unknown exp: {}'.format(exp_name)
def color(self, exp_name):
if self.configs is None:
color = next(self.colors)
else:
color = self.configs[exp_name][1]
return color
def label(self, exp_name):
if self.configs is None:
return exp_name
return self.configs[exp_name][0]
def sort_dirs(self, dirs):
if self.configs is None:
return dirs
def custom_key(exp_name):
if exp_name in self.configs['order']:
return self.configs['order'].index(exp_name)
else:
return 100
return sorted(dirs, key=custom_key)
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='setup_qt',
version='1.0.0',
description='Compile Qt resource files, UI files and translations in setup.py',
long_description=open('README.rst').read(),
author='Michal Krenek (Mikos)',
author_email='m.krenek@gmail.com',
url='https://github.com/xmikos/setup_qt',
license='MIT',
py_modules=['setup_qt'],
install_requires=[
'setuptools',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Framework :: Setuptools Plugin',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Internationalization',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: User Interfaces',
'Topic :: System :: Software Distribution',
]
)
|
import os
import azure.cognitiveservices.speech as speechsdk
speech_key, service_region = "e0c8f2481be144a5963b9d6b8d24dfac", "westus2"
from_language = 'zh-CN'
to_language = 'en'
def translate_speech_to_speech():
translation_config = speechsdk.translation.SpeechTranslationConfig(
subscription=speech_key, region=service_region)
translation_config.speech_recognition_language = from_language
translation_config.add_target_language(to_language)
recognizer = speechsdk.translation.TranslationRecognizer(
translation_config=translation_config)
print('Say something...')
result = recognizer.recognize_once()
print(get_result_text(reason=result.reason, result=result))
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
speech_config.speech_synthesis_voice_name = "en-US-AriaNeural"
audio_config = speechsdk.audio.AudioOutputConfig(use_default_speaker=True)
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
speech_synthesizer.speak_text_async(result.translations[to_language]).get()
def get_result_text(reason, result):
reason_format = {
speechsdk.ResultReason.TranslatedSpeech:
f'RECOGNIZED "{from_language}": {result.text}\n' +
f'TRANSLATED into "{to_language}"": {result.translations[to_language]}',
speechsdk.ResultReason.RecognizedSpeech: f'Recognized: "{result.text}"',
speechsdk.ResultReason.NoMatch: f'No speech could be recognized: {result.no_match_details}',
speechsdk.ResultReason.Canceled: f'Speech Recognition canceled: {result.cancellation_details}'
}
return reason_format.get(reason, 'Unable to recognize speech')
translate_speech_to_speech() |
import numpy as np
def histo(img):
hist = np.zeros([2,256],dtype=np.int)
hist[0] = np.arange(256)
for i in range(256):
hist[1,i] = np.sum(np.where(img==i))
return hist
def calculate_metrics(img):
len = img.shape
sum = img.sum()
num = np.prod(len)
mean = sum/num
sd_hat = (img-mean)**2
sd = sd_hat.sum()/num
if len == 2:
hist = histo(img)
else:
hist = np.zeros([2,256,3],dtype=np.int)
hist1 = histo(img[:,:,0])
hist2 = histo(img[:,:,1])
hist3 = histo(img[:,:,2])
hist = np.stack((hist1,hist2,hist3),axis=2)
return mean,sd,hist
def normalisation(img,pn=True,pc=False,ps=False):
range=np.max(img)-np.min(img)
len = img.shape
sum = img.sum()
num = np.prod(len)
mean = sum/num
sd_hat = (img-mean)**2
sd = sd_hat.sum()/num
if pn == True:
norm = (img-np.min(img))/range
elif pc == True:
norm = (img-mean)
elif ps == True:
norm = (img-mean)/sd
output_normalised_image = norm
mean_output_image = norm.sum()/num
range_output_image = np.array([np.max(norm),np.min(norm)])
variance_output_image = ((norm-mean_output_image)**2)/num
return output_normalised_image, mean_output_image, range_output_image,variance_output_image
|
from django.apps import AppConfig
class MecabNerConfig(AppConfig):
name = 'mecab_ner'
|
import bisect
from math import fabs
def index(a, x):
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
return -1
while True:
try:
n = int(input())
line = input()
m = int(input())
array = []
for i in line.split():
bisect.insort_right(array, int(i))
sol = None
min_diff = 10000000
for i in range(len(array)) :
j = index(array[i+1:], m - array[i])
if j >= 0:
j += i + 1
diff = array[i] - array[j]
if diff < 0:
diff *= -1
if diff < min_diff:
min_diff = diff
sol = (i, j)
b_i = array[sol[0]]
b_j = array[sol[1]]
print("Peter should buy books whose prices are {} and {}.".format(b_i, b_j))
print()
input()
except(EOFError):
break
|
import sqlite3
import os
DB_FILE_NAME = "simple-e-commerce.db"
if os.path.exists(DB_FILE_NAME):
os.remove(DB_FILE_NAME)
connection = sqlite3.connect(DB_FILE_NAME)
cursor = connection.cursor()
cursor.execute("""
CREATE TABLE products
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
catId INTEGER,
priority INTEGER,
name TEXT,
priceMin INTEGER,
priceMax INTEGER,
imgUrl TEXT,
activeImg INTEGER,
briefImgScale INTEGER,
briefImgCroppedX INTEGER,
briefImgCroppedY INTEGER,
desc TEXT,
briefImgUrl TEXT
)
""")
connection.commit()
|
from discord.errors import ClientException, DiscordException
__all__ = (
'others'
)
class CommandError(DiscordException):
r"""The base exception type for all command related errors.
This inherits from :exc:`discord.DiscordException`.
This exception and exceptions inherited from it are handled
in a special way as they are caught and passed into a special event
from :class:`.Bot`\, :func:`on_command_error`.
"""
def __init__(self, message=None, *args):
if message is not None:
# clean-up @everyone and @here mentions
m = message.replace('@everyone', '@\u200beveryone').replace('@here', '@\u200bhere')
super().__init__(m, *args)
else:
super().__init__(*args)
class other(CommandError):
pass
class QuestionMarkError(other):
pass |
# Generated by Django 2.0.1 on 2018-02-03 07:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('club', '0002_club_position'),
]
operations = [
migrations.AlterField(
model_name='club',
name='club_description',
field=models.TextField(blank=True, null=True, verbose_name='club description'),
),
migrations.AlterField(
model_name='club',
name='club_image',
field=models.ImageField(blank=True, null=True, upload_to='profile/%Y/%m/%d/'),
),
]
|
"""
All main classes of the library, in general related to internal types and whatnot. Reading-related classes are at the
:mod:`~py2df.reading` module.
"""
from .abc import *
from .subcollections import *
from .dataclass import *
from .mc_types import *
from .collections import *
from .variable import *
|
# lambdas
# list comprehension
# dict comprehension
|
import sys
from io import StringIO
from discord import Embed
from discord.ext.commands import (Cog, Context, command)
from util.colorUtil import bot_color, red_color
class EvalCommand(Cog):
def __init__(self, bot):
self.bot = bot
@command(name='evalpy')
async def evaluate_command(self, ctx: Context, *, code):
await ctx.message.delete()
code = code.replace('```py', '').replace('```', '')
try:
old_stdout = sys.stdout
redirected_out = sys.stdout = StringIO()
exec(code)
sys.stdout = old_stdout
except:
await ctx.message.channel.send(
embed=Embed(
title='Python Code Evaluation',
description='Your code generated an exception!',
color=red_color
).set_footer(text=f'Code By: {ctx.message.author.name}', icon_url=ctx.message.author.avatar_url).add_field(
name='Your Code:',
value=f'```py\n{code}```'
)
)
return
await ctx.message.channel.send(
embed=Embed(
title='Python Code Evaluation',
color=bot_color
).set_footer(text=f'Code By: {ctx.message.author.name}', icon_url=ctx.message.author.avatar_url).add_field(
inline=False,
name='Your Code:',
value=f'```py\n{code}```'
).add_field(
inline=False,
name='Output:',
value=f'```\n{redirected_out.getvalue()}```'
)
)
def setup(bot):
bot.add_cog(EvalCommand(bot))
|
import random
import math
import json
import copy
class MockMorphParser:
"""
When created, an object of this class calculates the number
of different lexemes based on the corpus size, generates the
lexemes, and then works as a morphological parser, taking a
Wordform object and adding analyses to it.
"""
def __init__(self, settings, n_lexemes):
self.settings = settings
self.n_lexemes = n_lexemes
lengths = [(3 - abs(i)) * [settings['constants']['MEAN_WORD_LENGTH'] + i]
for i in range(-2, 3)
if settings['constants']['MEAN_WORD_LENGTH'] + i >= 3]
self.lengths = []
for l in lengths:
self.lengths += l
self.pos = [gr for gr in settings['grammar']]
self.generate_probabilities()
self.generate_lexemes()
@staticmethod
def ddistr(probabilities):
cdf = [(i, sum(p for j, p in probabilities
if j is None or (j is not None and i is not None and j < i)))
for i, _ in probabilities]
r = random.random()
return max(i for i, c in cdf if (c <= r))
def generate_probabilities(self):
self.prob = {}
l = self.settings['constants']['MEAN_AMBIGUITY']
self.prob['n_ana'] = [(k, l ** k * math.exp(-l) / math.factorial(k))
for k in range(math.floor(l) * 5)]
for pos in self.settings['grammar']:
for cat in self.settings['grammar'][pos]:
values = self.settings['grammar'][pos][cat]
if len(values) < 8:
self.prob[cat] = [(v, 1 / len(values)) for v in values]
else:
norm_coef = sum(1 / i for i in range(1, len(values) + 1))
self.prob[cat] = []
for i in range(len(values)):
self.prob[cat].append((values[i], 1 / (norm_coef * (i + 1))))
print(cat, self.prob[cat])
def one_lexeme(self):
"""
Generate and return one random lexeme.
"""
l = {}
lLen = random.choice(self.lengths)
l['lex'] = ''.join(random.choice(self.settings['constants']['ALPHABET'])
for _ in range(lLen))
l['gr.pos'] = random.choice(self.pos)
return l
def generate_lexemes(self):
"""
Generate random lexemes and store them in self.lexemes.
"""
self.lexemes = [self.one_lexeme()
for _ in range(self.n_lexemes)]
def get_random_cat_value(self, cat):
return MockMorphParser.ddistr(self.prob[cat])
def generate_analysis(self):
lex = copy.deepcopy(random.choice(self.lexemes))
for cat in self.settings['grammar'][lex['gr.pos']]:
lex['gr.' + cat] = self.get_random_cat_value(cat)
return lex
def add_analysis(self, wf):
"""
Add a random analysis (or analyses) to a Wordform object.
"""
n = MockMorphParser.ddistr(self.prob['n_ana'])
wf.ana = [self.generate_analysis()
for _ in range(n)]
# print(n)
return n
if __name__ == '__main__':
f = open('settings.json', 'r', encoding='utf-8')
settings = json.loads(f.read())
f.close()
mp = MockMorphParser(settings, 30000)
from gen_wfms_with_repr import WordForm
n = 0
for i in range(20):
wf = WordForm(1)
n += mp.add_analysis(wf)
print(wf.ana)
print(n / 20)
print(len(set((l['lex'], l['gr.pos']) for l in mp.lexemes)), 'lexemes total.')
|
import json
import random
INPUT = 'factbank.json'
OUTPUT = 'factbank.html'
html_output = "<html><head><title>Factbank</title></head><body>"
factbank = json.load(open(INPUT))
for task, facts in factbank.items():
random.shuffle(facts)
html_output += f"<h1>Task {task}</h1><ol>"
for fact in facts:
html_output += f"<li>{fact}</li>"
html_output += "</ol>"
html_output += "</body></html>"
with open(OUTPUT, 'w') as f:
f.write(html_output)
|
"""
To find the duplicate character from the string, we count the occurrence of each character in the string. If count is greater than 1, it implies that a character has a duplicate entry in the string. In above example, the characters highlighted in green are duplicate characters.
"""
string=input('Enter string here: ').lower()
print("Duplicate characters in a given string: ");
#Counts each character present in the string
for i in range(0, len(string)):
count = 1;
for j in range(i+1, len(string)):
if(string[i] == string[j] and string[i] != ' '):
count = count + 1;
#Set string[j] to 0 to avoid printing visited character
string = string[:j] + '0' + string[j+1:];
#A character is considered as duplicate if count is greater than 1
if(count > 1 and string[i] != '0'):
print(string[i]);
|
import os
import sys
import h5py
import numpy as np
import scipy.io as sio
import find_caffe as find_caffe
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '..'))
import data_analysis.get_feature_from_model as feature
import data_analysis.pre_process as pre
import data_analysis.hyperspectral_datasets as HSI
import data_analysis.train_test_split as train_test_split
caffe_root = find_caffe.caffe_root
class ExpConfigInfo(object):
def __init__(self, name, label_unique, new_dir_name, gpus=0, net_name='bn_net', exp_index=0, spatial_info='1x1_mean', train_nums=200):
self.name = name
self.proto_dir = '../result/{}/{}/proto'.format(self.name, new_dir_name)
self.model_dir = '../result/{}/{}/model'.format(self.name, new_dir_name)
self.result_dir = '../result/{}/{}/result'.format(self.name, new_dir_name)
self.train_net_file = '{}/{}_{}_train.prototxt'.format(self.proto_dir, self.name, spatial_info)
self.test_net_file = '{}/{}_{}_test.prototxt'.format(self.proto_dir, self.name, spatial_info)
self.deploy_net_file = '{}/{}_{}_deploy.prototxt'.format(self.proto_dir, self.name, spatial_info)
self.solver_file = '{}/{}_{}_solver.prototxt'.format(self.proto_dir, self.name, spatial_info)
self.train_data_list_file = '{}/{}_train_file.txt'.format(self.proto_dir, spatial_info)
self.test_data_list_file = '{}/{}_test_file.txt'.format(self.proto_dir, spatial_info)
self.data_dir = os.path.expanduser('../hyperspectral_datas/{}/data/'.format(self.name))
self.train_data_file = self.data_dir + '{}_train_{}.h5'.format(self.name, spatial_info)
self.test_data_file = self.data_dir + '{}_test_{}.h5'.format(self.name, spatial_info)
self.snapshot_prefix = '{}/{}_models_time_{}'.format(self.model_dir, spatial_info, exp_index)
self.deploy_file = os.getcwd() + '/' + self.deploy_net_file
# set hyperparameters
self.channels = 224
self.CK_channels = 448
self.kernel_size = 24
self.CK_kernel_size = 48
self.max_iter = 30000
self.train_nums = train_nums
self.use_CK = spatial_info in ['3x3_mean_std', '5x5_mean_std']
# save results and final model
self.gpus = gpus
self.net_name = net_name
self.label_unique = label_unique
self.spatial_info = spatial_info
self.log_file = "{}/{}_{}_{}_{}.log".format(os.getcwd() + '/' + self.result_dir, self.net_name, self.name,
self.spatial_info,
exp_index)
self.result_mat_file = self.result_dir + '/{}_pred_'.format(exp_index) + spatial_info + '_model_{}.mat'.format(self.name)
self.result_dat_file = self.result_dir + '/{}_pred_'.format(exp_index) + spatial_info + '_model_{}.dat'.format(self.name)
self.final_model = ''
self.test_nums = 0
self.max_class = 0
def set_data(self):
mkdir_if_not_exist(self.proto_dir)
mkdir_if_not_exist(self.model_dir)
mkdir_if_not_exist(self.result_dir)
self.test_nums, self.max_class = get_train_test_data(label_unique=self.label_unique,
dataset_name=self.name, spatial_info=self.spatial_info,
train_nums=self.train_nums, data_set_dir=self.data_dir)
def set_final_model(self):
self.final_model = os.getcwd() + '/' + self.snapshot_prefix + '_iter_{}.caffemodel.h5'.format(
self.max_iter)
def mkdir_if_not_exist(the_dir):
if not os.path.isdir(the_dir) :
os.makedirs(the_dir)
def get_train_test_data(label_unique, dataset_name = 'indian_pines', spatial_info='5x5_mean_std', train_nums=200, data_set_dir=''):
assert dataset_name in ['indian_pines', 'salina']
assert spatial_info in ['1x1_mean', '3x3_mean', '3x3_mean_std', '5x5_mean', '5x5_mean_std']
class data_set_info:pass
data_set_info.data = sio.loadmat(data_set_dir + '/' + dataset_name + '_' + spatial_info + '.mat')['data']
data_set_info.labels = sio.loadmat(data_set_dir + '/' + dataset_name + '_' + spatial_info + '.mat')['labels']
data_set_info.h5train = data_set_dir + '/' + dataset_name + '_train_' + spatial_info + '.h5'
data_set_info.h5test = data_set_dir + '/' + dataset_name + '_test_' + spatial_info + '.h5'
(train_label, train_index, train_data), (test_label, test_index, test_data) = train_test_split.train_test_split(
data_set_info.data, data_set_info.labels,
label_unique=label_unique,
train=train_nums)
put_data_to_h5file({'data': train_data, 'labels': train_label, 'index': train_index}, data_set_info.h5train)
put_data_to_h5file({'data': test_data, 'labels': test_label, 'index': test_index}, data_set_info.h5test)
return len(test_label), max(label_unique)+1
def put_data_to_h5file(data, file_name, isRPCA=False):
if isRPCA:
write_data = data['data']
else:
write_data = data['data'].reshape(data['data'].shape[0], 1, data['data'].shape[1], 1)
write_label = data['labels'].reshape(data['labels'].shape[0], 1)
write_index = data['index']
if os.path.exists(file_name):
os.remove(file_name)
f = h5py.File(file_name, 'w')
f.create_dataset('data',shape = write_data.shape, dtype=np.float32, data = write_data)
f.create_dataset('label', shape=write_label.shape, dtype=np.float32, data=write_label)
f.create_dataset('index', dtype=np.float32, data=write_index)
f.close()
def get_y_pred_from_model(model, mode='test', score_layer_name = 'ip2'):
assert os.path.exists(model.deploy_file) and os.path.exists(model.final_model)
model_feature = feature.GetFeatureFromCaffe(deploy_file=model.deploy_file, pretrained_model=model.final_model, score_layer_name=score_layer_name)
if mode is 'test':
assert os.path.exists(model.test_data_file)
model_feature.get_h5_data(model.test_data_file)
elif mode is 'train':
assert os.path.exists(model.train_data_file)
model_feature.get_h5_data(model.train_data_file)
model_feature.get_metric()
return {
'classify_report' : model_feature.classify_report,
'confusion_matrix' : model_feature.confusion_matrix,
'y_true' : model_feature.y_true,
'y_pred' : model_feature.y_pred,
'y_index' : model_feature.index,
'OA' : model_feature.overall_accuracy,
'AA' : model_feature.average_accuracy,
'ACC' : model_feature.acc_for_each_class
}
def get_feature_from_model(model, mode='test', score_layer_name = 'ip1'):
assert os.path.exists(model.deploy_file) and os.path.exists(model.final_model)
model_feature = feature.GetFeatureFromCaffe(deploy_file=model.deploy_file, pretrained_model=model.final_model, score_layer_name=score_layer_name)
if mode is 'test':
assert os.path.exists(model.test_data_file)
model_feature.get_h5_data(model.test_data_file)
elif mode is 'train':
assert os.path.exists(model.train_data_file)
model_feature.get_h5_data(model.train_data_file)
model_feature.get_y_pred()
return {
'y_feature' : model_feature.feature,
'y_index' : model_feature.index,
} |
import numpy as np
class Subclone:
"""
Initializes a Subclone Population.
:attr label: Either A, B or S
:attr fitness: Current fitness
:attr prop: Current Proportion
"""
def __init__(self, lbl, c, alpha, prop=0.333, parent=None, birthtime=None, color=None):
self.label = lbl
self.fitness = 0.0
self.prop = prop
self.c = c
self.parent = parent
self.alpha = alpha
self.bt = birthtime
self.color = None
def __str__(self):
return self.label
def update_fitness(self, treatment):
"""
Returns the fitness with the given environment for subclone [type]
@ param treatment: 1d np.ndarray of shape (num_treatments) for intensity of treatment
"""
self.fitness = max(0, min(1, 1 - self.c - np.dot(self.alpha, treatment)))
return self.fitness
def log(self):
print("Node: ", self.label)
print("Birthtime: ", self.bt)
print(f'\t \t Alpha: {self.alpha}')
print(f'\t \t Prop: {self.prop}')
print(f'\t \t Resistant: {self.c}')
print(f'\t \t Fitness: {self.fitness}')
|
from __future__ import print_function
import os
import os.path
import sys
import torch
import torch.utils.data as data
import numpy as np
import scipy.spatial as spatial
import random
REAL_DATA = True
TRAINING = False
def load_shape(point_filename, normals_filename, curv_filename, pidx_filename, clean_points_filename, outliers_filename):
pts = np.load(point_filename+'.npy')
if normals_filename != None:
normals = np.load(normals_filename+'.npy')
else:
normals = None
if curv_filename != None:
curvatures = np.load(curv_filename+'.npy')
else:
curvatures = None
if pidx_filename != None:
patch_indices = np.load(pidx_filename+'.npy')
else:
patch_indices = None#np.load(point_filename[:-4]+'.pidx.npy')
if clean_points_filename != None:
clean_points = np.load(clean_points_filename+'.npy')
else:
clean_points= None
if outliers_filename != None:
outliers = np.load(outliers_filename+'.npy')
else:
outliers= None
sys.setrecursionlimit(int(max(1000, round(pts.shape[0]/10)))) # otherwise KDTree construction may run out of recursions
kdtree = spatial.cKDTree(pts, 10)
clean_points_kdtree = None
if clean_points is not None:
clean_points_kdtree = spatial.cKDTree(clean_points, 10)
sh = Shape(pts=pts, kdtree=kdtree, normals=normals, curv=curvatures, pidx=patch_indices, clean_points = clean_points,
clean_kdtree = clean_points_kdtree, outliers = outliers, point_filename=point_filename)
return sh
class SequentialPointcloudPatchSampler(data.sampler.Sampler):
def __init__(self, data_source):
self.data_source = data_source
self.total_patch_count = None
self.total_patch_count = 0
for shape_ind, _ in enumerate(self.data_source.shape_names):
self.total_patch_count = self.total_patch_count + self.data_source.shape_patch_count[shape_ind]
def __iter__(self):
return iter(range(self.total_patch_count))
def __len__(self):
return self.total_patch_count
class SequentialShapeRandomPointcloudPatchSampler(data.sampler.Sampler):
def __init__(self, data_source, patches_per_shape, seed=None, sequential_shapes=False, identical_epochs=False):
self.data_source = data_source
self.patches_per_shape = patches_per_shape
self.sequential_shapes = sequential_shapes
self.seed = seed
self.identical_epochs = identical_epochs
self.total_patch_count = None
self.shape_patch_inds = None
if self.seed is None:
self.seed = np.random.random_integers(0, 2**32-1, 1)[0]
self.rng = np.random.RandomState(self.seed)
self.total_patch_count = 0
for shape_ind, _ in enumerate(self.data_source.shape_names):
self.total_patch_count = self.total_patch_count + min(self.patches_per_shape, self.data_source.shape_patch_count[shape_ind])
def __iter__(self):
# optionally always pick the same permutation (mainly for debugging)
if self.identical_epochs:
self.rng.seed(self.seed)
# global point index offset for each shape
shape_patch_offset = list(np.cumsum(self.data_source.shape_patch_count))
shape_patch_offset.insert(0, 0)
shape_patch_offset.pop()
shape_inds = range(len(self.data_source.shape_names))
if not self.sequential_shapes:
shape_inds = self.rng.permutation(shape_inds)
# return a permutation of the points in the dataset where all points in the same shape are adjacent (for performance reasons):
# first permute shapes, then concatenate a list of permuted points in each shape
self.shape_patch_inds = [[]]*len(self.data_source.shape_names)
point_permutation = []
for shape_ind in shape_inds:
start = shape_patch_offset[shape_ind]
end = shape_patch_offset[shape_ind]+self.data_source.shape_patch_count[shape_ind]
global_patch_inds = self.rng.choice(range(start, end), size=min(self.patches_per_shape, end-start), replace=False)
point_permutation.extend(global_patch_inds)
# save indices of shape point subset
self.shape_patch_inds[shape_ind] = global_patch_inds - start
return iter(point_permutation)
def __len__(self):
return self.total_patch_count
class RandomPointcloudPatchSampler(data.sampler.Sampler):
def __init__(self, data_source, patches_per_shape, seed=None, identical_epochs=False):
self.data_source = data_source
self.patches_per_shape = patches_per_shape
self.seed = seed
self.identical_epochs = identical_epochs
self.total_patch_count = None
if self.seed is None:
self.seed = np.random.random_integers(0, 2**32-1, 1)[0]
self.rng = np.random.RandomState(self.seed)
self.total_patch_count = 0
for shape_ind, _ in enumerate(self.data_source.shape_names):
self.total_patch_count = self.total_patch_count + min(self.patches_per_shape, self.data_source.shape_patch_count[shape_ind])
def __iter__(self):
# optionally always pick the same permutation (mainly for debugging)
if self.identical_epochs:
self.rng.seed(self.seed)
return iter(self.rng.choice(sum(self.data_source.shape_patch_count), size=self.total_patch_count, replace=False))
def __len__(self):
return self.total_patch_count
class Shape():
def __init__(self, pts, kdtree, normals=None, curv=None, pidx=None, clean_points=None, clean_kdtree=None, outliers = None, point_filename=None):
self.pts = pts
self.kdtree = kdtree
self.clean_kdtree = clean_kdtree
self.normals = normals
self.clean_points = clean_points
self.curv = curv
self.outliers = outliers
seed = 3627473
self.pidx = pidx # patch center points indices (None means all points are potential patch centers)
indexes = np.array(range(len(pts)))
inlier_idx = indexes[outliers==0]
outlier_idx = indexes[outliers==1]
random.seed(seed)
if len(inlier_idx)>len(outlier_idx):
majority = inlier_idx
minority = outlier_idx
else:
majority = outlier_idx
minority = inlier_idx
balanced_idx = random.sample(list(majority), len(minority))
balanced_idx += list(minority)
random.shuffle(balanced_idx)
# balance data distribution at training time (optional)
if TRAINING:
self.pidx = balanced_idx
np.save(point_filename[:-4]+'.pidx.npy', balanced_idx)
class Cache():
def __init__(self, capacity, loader, loadfunc):
self.elements = {}
self.used_at = {}
self.capacity = capacity
self.loader = loader
self.loadfunc = loadfunc
self.counter = 0
def get(self, element_id):
if element_id not in self.elements:
# cache miss
# if at capacity, throw out least recently used item
if len(self.elements) >= self.capacity:
remove_id = min(self.used_at, key=self.used_at.get)
del self.elements[remove_id]
del self.used_at[remove_id]
# load element
self.elements[element_id] = self.loadfunc(self.loader, element_id)
self.used_at[element_id] = self.counter
self.counter += 1
return self.elements[element_id]
class PointcloudPatchDataset(data.Dataset):
# patch radius as fraction of the bounding box diagonal of a shape
def __init__(self, root, shapes_list_file, patch_radius, points_per_patch, patch_features,
seed=None, identical_epochs=False, use_pca=True, center='point', point_tuple=1, cache_capacity=1, point_count_std=0.0, sparse_patches=False, eval=False):
# initialize parameters
self.root = root
self.shapes_list_file = shapes_list_file
self.patch_features = patch_features
self.patch_radius = patch_radius
self.points_per_patch = points_per_patch
self.identical_epochs = identical_epochs
self.use_pca = use_pca
self.sparse_patches = sparse_patches
self.center = center
self.point_tuple = point_tuple
self.point_count_std = point_count_std
self.seed = seed
self.include_normals = False
self.include_curvatures = False
self.include_clean_points = False
self.include_original = False
self.include_outliers = False
for pfeat in self.patch_features:
if pfeat == 'normal':
self.include_normals = True
elif pfeat == 'max_curvature' or pfeat == 'min_curvature':
self.include_curvatures = True
elif pfeat == 'clean_points':
self.include_clean_points = True
elif pfeat == 'original':
self.include_original = True
elif pfeat == 'outliers':
self.include_outliers = True
else:
raise ValueError('Unknown patch feature: %s' % (pfeat))
# self.loaded_shape = None
self.load_iteration = 0
self.shape_cache = Cache(cache_capacity, self, PointcloudPatchDataset.load_shape_by_index)
# get all shape names in the dataset
self.shape_names = []
with open(os.path.join(root, self.shapes_list_file)) as f:
self.shape_names = f.readlines()
self.shape_names = [x.strip() for x in self.shape_names]
self.shape_names = list(filter(None, self.shape_names))
# initialize rng for picking points in a patch
if self.seed is None:
self.seed = np.random.random_integers(0, 2**32-1, 1)[0]
self.rng = np.random.RandomState(self.seed)
# get basic information for each shape in the dataset
self.shape_patch_count = []
self.patch_radius_absolute = []
for shape_ind, shape_name in enumerate(self.shape_names):
print('getting information for shape %s' % (shape_name))
# load from text file and save in more efficient numpy format
point_filename = os.path.join(self.root, shape_name+'.xyz')
pts = np.loadtxt(point_filename).astype('float32')
np.save(point_filename+'.npy', pts)
if self.include_normals:
normals_filename = os.path.join(self.root, shape_name+'.normals')
normals = np.loadtxt(normals_filename).astype('float32')
np.save(normals_filename+'.npy', normals)
else:
normals_filename = None
if self.include_outliers:
outliers_filename = os.path.join(self.root, shape_name + ".outliers")
outliers = np.loadtxt(outliers_filename).astype('float32')
np.save(outliers_filename + '.npy', outliers)
if self.include_clean_points:
clean_points_filename = os.path.join(self.root, shape_name + ".clean_xyz")
clean_points = np.loadtxt(clean_points_filename).astype('float32')
np.save(clean_points_filename + '.npy', clean_points)
else:
clean_points_filename = None
if self.include_curvatures:
curv_filename = os.path.join(self.root, shape_name+'.curv')
curvatures = np.loadtxt(curv_filename).astype('float32')
np.save(curv_filename+'.npy', curvatures)
else:
curv_filename = None
if self.sparse_patches:
pidx_filename = os.path.join(self.root, shape_name+'.pidx')
patch_indices = np.loadtxt(pidx_filename).astype('int')
np.save(pidx_filename+'.npy', patch_indices)
else:
pidx_filename = None
shape = self.shape_cache.get(shape_ind)
if eval:
shape.pidx = None
if shape.pidx is None:
self.shape_patch_count.append(shape.pts.shape[0])
else:
self.shape_patch_count.append(len(shape.pidx))
if REAL_DATA:
bbdiag = float(np.linalg.norm(shape.pts.max(0) - shape.pts.min(0), 2))
self.patch_radius_absolute.append([bbdiag * rad for rad in self.patch_radius])
else:
# find the radius of the ground truth points
real_points = shape.pts[[True if x==0 else False for x in outliers]]
bbdiag = float(np.linalg.norm(real_points.max(0) - real_points.min(0), 2))
self.patch_radius_absolute.append([bbdiag*1 * rad for rad in self.patch_radius])
def select_patch_points(self, patch_radius, global_point_index, center_point_ind, shape, radius_index,
scale_ind_range, patch_pts_valid, patch_pts, clean_points=False):
if clean_points:
patch_point_inds = np.array(shape.clean_kdtree.query_ball_point(shape.clean_points[center_point_ind, :], patch_radius))
# patch_point_inds = np.array(shape.clean_kdtree.query_ball_point(shape.pts[center_point_ind, :], patch_radius))
else:
patch_point_inds = np.array(shape.kdtree.query_ball_point(shape.pts[center_point_ind, :], patch_radius))
# optionally always pick the same points for a given patch index (mainly for debugging)
if self.identical_epochs:
self.rng.seed((self.seed + global_point_index) % (2**32))
point_count = min(self.points_per_patch, len(patch_point_inds))
# randomly decrease the number of points to get patches with different point densities
if self.point_count_std > 0:
point_count = max(5, round(point_count * self.rng.uniform(1.0-self.point_count_std*2)))
point_count = min(point_count, len(patch_point_inds))
# if there are too many neighbors, pick a random subset
if point_count < len(patch_point_inds):
patch_point_inds = patch_point_inds[self.rng.choice(len(patch_point_inds), point_count, replace=False)]
start = radius_index*self.points_per_patch
end = start+point_count
scale_ind_range[radius_index, :] = [start, end]
patch_pts_valid += list(range(start, end))
if clean_points:
points_base = shape.clean_points
else:
points_base = shape.pts
# convert points to torch tensors
patch_pts[start:end, :] = torch.from_numpy(points_base[patch_point_inds, :])
# center patch (central point at origin - but avoid changing padded zeros)
if self.center == 'mean':
patch_pts[start:end, :] = patch_pts[start:end, :] - patch_pts[start:end, :].mean(0)
elif self.center == 'point':
patch_pts[start:end, :] = patch_pts[start:end, :] - torch.from_numpy(shape.pts[center_point_ind, :])
elif self.center == 'none':
pass # no centering
else:
raise ValueError('Unknown patch centering option: %s' % (self.center))
# normalize size of patch (scale with 1 / patch radius)
patch_pts[start:end, :] = patch_pts[start:end, :] / patch_radius
return patch_pts, patch_pts_valid, scale_ind_range
def get_gt_point(self, index):
shape_ind, patch_ind = self.shape_index(index)
shape = self.shape_cache.get(shape_ind)
if shape.pidx is None:
center_point_ind = patch_ind
else:
center_point_ind = shape.pidx[patch_ind]
return shape.pts[center_point_ind]
# returns a patch centered at the point with the given global index
# and the ground truth normal the the patch center
def __getitem__(self, index):
# find shape that contains the point with given global index
shape_ind, patch_ind = self.shape_index(index)
shape = self.shape_cache.get(shape_ind)
if shape.pidx is None:
center_point_ind = patch_ind
else:
center_point_ind = shape.pidx[patch_ind]
# get neighboring points (within euclidean distance patch_radius)
patch_pts = torch.FloatTensor(self.points_per_patch*len(self.patch_radius_absolute[shape_ind]), 3).zero_()
patch_pts_valid = []
scale_ind_range = np.zeros([len(self.patch_radius_absolute[shape_ind]), 2], dtype='int')
for radius_index, patch_radius in enumerate(self.patch_radius_absolute[shape_ind]):
patch_pts, patch_pts_valid, scale_ind_range = self.select_patch_points(patch_radius, index,
center_point_ind, shape, radius_index, scale_ind_range, patch_pts_valid, patch_pts)
if self.include_normals:
patch_normal = torch.from_numpy(shape.normals[center_point_ind, :])
if self.include_curvatures:
patch_curv = torch.from_numpy(shape.curv[center_point_ind, :])
# scale curvature to match the scaled vertices (curvature*s matches position/s):
patch_curv = patch_curv * self.patch_radius_absolute[shape_ind][0]
if self.include_original:
original = shape.pts[center_point_ind]
if self.include_clean_points:
tmp = []
patch_clean_points = torch.FloatTensor(self.points_per_patch, 3).zero_()
scale_clean_ind_range = np.zeros([len(self.patch_radius_absolute[shape_ind]), 2], dtype='int')
clean_patch_radius = max(self.patch_radius_absolute[shape_ind])
patch_clean_points, _, _,_ = self.select_patch_points(clean_patch_radius, index,
center_point_ind, shape, 0, scale_clean_ind_range, tmp, patch_clean_points, clean_points=True)
if self.include_outliers:
outlier = shape.outliers[center_point_ind]
if self.use_pca:
# compute pca of points in the patch:
# center the patch around the mean:
pts_mean = patch_pts[patch_pts_valid, :].mean(0)
patch_pts[patch_pts_valid, :] = patch_pts[patch_pts_valid, :] - pts_mean
trans, _, _ = torch.svd(torch.t(patch_pts[patch_pts_valid, :]))
patch_pts[patch_pts_valid, :] = torch.mm(patch_pts[patch_pts_valid, :], trans)
cp_new = -pts_mean # since the patch was originally centered, the original cp was at (0,0,0)
cp_new = torch.matmul(cp_new, trans)
# re-center on original center point
patch_pts[patch_pts_valid, :] = patch_pts[patch_pts_valid, :] - cp_new
if self.include_normals:
patch_normal = torch.matmul(patch_normal, trans)
else:
trans = torch.eye(3).float()
# get point tuples from the current patch
if self.point_tuple > 1:
patch_tuples = torch.FloatTensor(self.points_per_patch*len(self.patch_radius_absolute[shape_ind]), 3*self.point_tuple).zero_()
for s, rad in enumerate(self.patch_radius_absolute[shape_ind]):
start = scale_ind_range[s, 0]
end = scale_ind_range[s, 1]
point_count = end - start
tuple_count = point_count**self.point_tuple
# get linear indices of the tuples
if tuple_count > self.points_per_patch:
patch_tuple_inds = self.rng.choice(tuple_count, self.points_per_patch, replace=False)
tuple_count = self.points_per_patch
else:
patch_tuple_inds = np.arange(tuple_count)
# linear tuple index to index for each tuple element
patch_tuple_inds = np.unravel_index(patch_tuple_inds, (point_count,)*self.point_tuple)
for t in range(self.point_tuple):
patch_tuples[start:start+tuple_count, t*3:(t+1)*3] = patch_pts[start+patch_tuple_inds[t], :]
patch_pts = patch_tuples
patch_feats = ()
for pfeat in self.patch_features:
if pfeat == 'normal':
patch_feats = patch_feats + (patch_normal,)
elif pfeat == 'max_curvature':
patch_feats = patch_feats + (patch_curv[0:1],)
elif pfeat == 'min_curvature':
patch_feats = patch_feats + (patch_curv[1:2],)
elif pfeat == 'clean_points':
patch_feats = patch_feats + (patch_clean_points,)
elif pfeat == "original":
patch_feats = patch_feats + (original,patch_radius)
elif pfeat == "outliers":
patch_feats = patch_feats + (outlier,)
else:
raise ValueError('Unknown patch feature: %s' % (pfeat))
return (patch_pts,) + patch_feats + (trans,)
def __len__(self):
return sum(self.shape_patch_count)
# translate global (dataset-wide) point index to shape index & local (shape-wide) point index
def shape_index(self, index):
shape_patch_offset = 0
shape_ind = None
for shape_ind, shape_patch_count in enumerate(self.shape_patch_count):
if index >= shape_patch_offset and index < shape_patch_offset + shape_patch_count:
shape_patch_ind = index - shape_patch_offset
break
shape_patch_offset = shape_patch_offset + shape_patch_count
return shape_ind, shape_patch_ind
# load shape from a given shape index
def load_shape_by_index(self, shape_ind):
point_filename = os.path.join(self.root, self.shape_names[shape_ind]+'.xyz')
normals_filename = os.path.join(self.root, self.shape_names[shape_ind]+'.normals') if self.include_normals else None
curv_filename = os.path.join(self.root, self.shape_names[shape_ind]+'.curv') if self.include_curvatures else None
pidx_filename = os.path.join(self.root, self.shape_names[shape_ind]+'.pidx') if self.sparse_patches else None
clean_points_filename = os.path.join(self.root, self.shape_names[shape_ind]+'.clean_xyz') if self.include_clean_points else None
outliers_filename = os.path.join(self.root, self.shape_names[shape_ind]+'.outliers') if self.include_outliers else None
return load_shape(point_filename, normals_filename, curv_filename, pidx_filename, clean_points_filename, outliers_filename)
|
from .r_dependencies import *
from .r_base import r_base
from math import sqrt
class r_svd(r_base):
def calculate_pcaMethods_svd(self,
data_var_I,
data_var_O,):
'''calculate svd using pcaMethods'''
try:
r_statement = ('%s <- svd(%s)' %(
data_var_O,data_var_I))
ans = robjects.r(r_statement);
except Exception as e:
print(e);
exit(-1);
def calculate_pcaMethods_robustSvd(self,
data_var_I,
data_var_O,):
'''calculate robustSvd using pcaMethods
The robust SVD of the matrix is x = u d v'.
d A vector containing the singular values of x.
u A matrix whose columns are the left singular vectors of x.
v A matrix whose columns are the right singular vectors of x
'''
try:
r_statement = ('%s <- robustSvd(%s)' %(
data_var_O,data_var_I))
ans = robjects.r(r_statement);
except Exception as e:
print(e);
exit(-1);
def extract_svd(self,
data_var_I,):
'''extract svd matrices
d
a vector containing the singular values of x, of length min(n, p).
u
a matrix whose columns contain the left singular vectors of x, present if nu > 0. Dimension c(n, nu)
v
a matrix whose columns contain the right singular vectors of x, present if nv > 0. Dimension c(p, nv)
'''
try:
r_statement = ('%s$u' %(data_var_I))
ans = robjects.r(r_statement);
u = np.array(ans);
#r_statement = ('diag(%s$d)' %(data_var_I))
r_statement = ('%s$d' %(data_var_I))
ans = robjects.r(r_statement);
d = np.array(ans);
r_statement = ('%s$v' %(data_var_I))
ans = robjects.r(r_statement);
v = np.array(ans);
return u,d,v;
except Exception as e:
print(e);
exit(-1);
def detect_outliers_svd(self,data_I):
'''detect outliers using pcaMethods
## Load a complete sample metabolite data set and mean center the data
data(metaboliteDataComplete)
mdc <- prep(metaboliteDataComplete, center=TRUE, scale="none")
## Now create 5% of outliers.
cond <- runif(length(mdc)) < 0.05;
mdcOut <- mdc
mdcOut[cond] <- 10
## Now we do a conventional SVD and a robustSvd on both, the original and the
## data with outliers.
resSvd <- svd(mdc)
resSvdOut <- svd(mdcOut)
resRobSvd <- robustSvd(mdc)
resRobSvdOut <- robustSvd(mdcOut)
## Now we plot the results for the original data against those with outliers
## We can see that robustSvd is hardly affected by the outliers.
plot(resSvd$v[,1], resSvdOut$v[,1])
plot(resRobSvd$v[,1], resRobSvdOut$v[,1])
'''
# format into R matrix and list objects
# convert data dict to matrix filling in missing values
# with 'NA'
listdict = listDict(data_I);
concentrations,cn_sorted,sns_sorted,row_variables,column_variables = listdict.convert_listDict2dataMatrixList_pd(
row_label_I='component_name',
column_label_I='sample_name_short',
value_label_I='calculated_concentration',
row_variables_I=['component_group_name'],
column_variables_I=['sample_name_abbreviation'],
na_str_I="NA");
cgn = row_variables['component_group_name'];
sna = column_variables['sample_name_abbreviation'];
sna_unique = listdict.get_uniqueValues_list(sna);
# check if there were any missing values in the data set in the first place
mv = 0;
mv = listdict.count_missingValues_pivotTable();
if mv==0:
# Call to R
try:
# clear the workspace
self.clear_workspace();
# convert lists to R matrix
self.make_matrixFromList(concentrations,len(cn_sorted),len(sns_sorted),'concentrations_m');
# convert to the transpose
self.transpose_matrix('concentrations_mt','concentrations_m');
# calculate svd
self.calculate_pcaMethods_svd('concentrations_mt','resSVD');
# extract svd matrices
u,d,v = self.extract_svd('resSVD');
# calculate robustSVD
self.calculate_pcaMethods_robustSvd('concentrations_mt','resRobustSVD');
# extract out robustSVD matrices
ur,dr,vr = self.extract_svd('resRobustSVD');
#plot(resSvd$v[,1], resSvdOut$v[,1])
#plot(resRobSvd$v[,1], resRobSvdOut$v[,1])
#s$u %*% D %*% t(s$v) # X = U D V'
#robjects.r('resSVD$u %*% diag(resSVD$d) %*% t(resSVD$v)')
#import np as np
#import matplotlib.pyplot as plt
#plt.scatter(v[:,1], vr[:,1]);
#plt.show()
#x = np.array(robjects.r('concentrations_mt'))
#xsvd = np.multiply(np.multiply(u,np.diag(d)),np.transpose(v));
except Exception as e:
print(e);
exit(-1);
return u,d,v,ur,dr,vr;
else:
print('missing values found!');
def calculate_svd(self,data_I,svd_method_I,svd_options_I={}):
'''calculate svd
'''
u_O,d_O,v_O = [],[],[];
# format into R matrix and list objects
# convert data dict to matrix filling in missing values
# with 'NA'
listdict = listDict(data_I);
concentrations,cn_sorted,sns_sorted,row_variables,column_variables = listdict.convert_listDict2dataMatrixList_pd(
row_label_I='component_name',
column_label_I='sample_name_short',
value_label_I='calculated_concentration',
row_variables_I=['component_group_name'],
column_variables_I=['sample_name_abbreviation'],
na_str_I="NA");
cgn = row_variables['component_group_name'];
sna = column_variables['sample_name_abbreviation'];
sna_unique = listdict.get_uniqueValues_list(sna);
# check if there were any missing values in the data set in the first place
mv = 0;
mv = listdict.count_missingValues_pivotTable();
if mv==0:
# Call to R
try:
# clear the workspace
self.clear_workspace();
# convert lists to R matrix
self.make_matrixFromList(concentrations,len(cn_sorted),len(sns_sorted),'concentrations_m');
# convert to the transpose
self.transpose_matrix('concentrations_mt','concentrations_m');
# calculate svd
if svd_method_I == 'svd':
self.calculate_pcaMethods_svd('concentrations_mt','resSVD');
elif svd_method_I == 'robustSvd':
self.calculate_pcaMethods_robustSvd('concentrations_mt','resSVD');
else:
print('svd method not recognized');
return u_O,d_O,v_O;
# extract svd matrices
u,d,v = self.extract_svd('resSVD');
# reformat svd output
u_O,d_O,v_O = self.reformat_udv(u,d,v,cn_sorted,sns_sorted,cgn,sna,svd_method_I,svd_options_I);
except Exception as e:
print(e);
exit(-1);
return u_O,d_O,v_O;
else:
print('missing values found!');
return u_O,d_O,v_O ;
def reformat_udv(self,u,d,v,cn,sns,cgn,sna,svd_method_I,svd_options_I):
'''reformat SVD u, d, and v matrices to listDicts
INPUT:
OUTPUT
u_O
d_O
v_O
'''
# extract out the U matrix
u_O = [];
cnt=0;
for r in range(u.shape[0]):
for c in range(u.shape[1]):
data_tmp = {};
data_tmp['sample_name_short'] = sns[r];
data_tmp['sample_name_abbreviation'] = sna[r];
data_tmp['u_matrix'] = u[r,c];
data_tmp['singular_value_index'] = c+1;
data_tmp['svd_method'] = svd_method_I;
data_tmp['svd_options'] = {
};
u_O.append(data_tmp);
cnt+=1;
# extract out the V matrix
v_O = [];
cnt=0;
for r in range(v.shape[0]):
for c in range(v.shape[1]): #comp
data_tmp = {};
data_tmp['component_name'] = cn[r]; #need to double check
data_tmp['component_group_name'] = cgn[r];
data_tmp['v_matrix'] = v[r,c]; #need to double check
data_tmp['singular_value_index'] = c+1;
data_tmp['svd_method'] = svd_method_I;
data_tmp['svd_options'] = {
};
v_O.append(data_tmp);
cnt+=1;
# extract out d vector
d_fraction,d_fraction_cumulative = self.make_svd_dFractionCumulative(d);
d_O = [];
cnt=0;
for r in range(d.shape[0]):
data_tmp = {};
data_tmp['d_vector'] = d[r];
data_tmp['d_fraction'] = d_fraction[r];
data_tmp['d_fraction_cumulative'] = d_fraction_cumulative[r];
data_tmp['singular_value_index'] = r+1;
data_tmp['svd_method'] = svd_method_I;
data_tmp['svd_options'] = {
};
d_O.append(data_tmp);
cnt+=1;
return u_O,d_O,v_O;
def make_svd_dFractionCumulative(self,d):
'''make singular value fraction and fraction_cumulative vectors
INPUT:
d = d vector
OUTPUT
d_fraction
d_fraction_cumulative
'''
d_fraction = np.zeros_like(d);
d_fraction_cumulative = np.zeros_like(d);
d_total = np.sum(d);
for i in range(d.shape[0]):
fraction = d[i]/d_total;
if i==0:
d_fraction[i] = fraction;
d_fraction_cumulative[i] = fraction;
else:
d_fraction[i] = fraction;
d_fraction_cumulative[i] = d_fraction_cumulative[i-1] + fraction;
return d_fraction,d_fraction_cumulative; |
#!/usr/local/bin/python3
"""
Fetch a list of flaky tests from a CircleCI project.
Searches the last 30 builds that have failed on the master branch and reports
the tests that have failed.
Branch name, number of builds, and number of results are all customizable.
"""
import argparse
import collections
import os
import sys
# External dependencies
try:
import requests
except ImportError:
sys.exit("Could not find `requests` package. Please install it with `pip3 install requests`")
def get_token():
token = os.environ.get("CIRCLECI_TOKEN")
if token is None:
sys.exit(
"No CIRCLECI_TOKEN environment variable set\n."
"Visit https://circleci.com/account/api to create a new token.\n"
"Then invoke this command with CIRCLECI_TOKEN=your_token"
)
return token
def circleci_fetch(api, **kwargs):
base_url = "https://circleci.com/api/v1.1"
token = get_token()
response = requests.get(
f"{base_url}{api}",
params={"circle-token": token, **kwargs},
)
return response.json()
def fetch_failed_builds(project_name, branch, builds):
MAX_LIMIT = 100
return circleci_fetch(
f"/project/github/{project_name}/tree/{branch}",
filter="failed",
limit=min(builds, MAX_LIMIT),
)
def fetch_test_failures(project_name, build_number):
test_results = circleci_fetch(f"/project/github/{project_name}/{build_number}/tests")
for test_result in test_results["tests"]:
if test_result["result"] == "failure":
classname = test_result["classname"]
name = test_result["name"]
yield f"{classname}.{name}"
def flaky(project_name, branch, builds, top):
failures = []
failures_map = collections.defaultdict(list)
builds = fetch_failed_builds(project_name, branch, builds)
for build in builds:
for failure in fetch_test_failures(project_name, build["build_num"]):
failures.append(failure)
failures_map[failure].append(build)
counter = collections.Counter(failures)
for failure, count in counter.most_common(top):
print(f"{failure} failed {count} times")
for failed_build in failures_map[failure]:
print(f"{failed_build['build_url']}")
print()
def get_parser():
parser = argparse.ArgumentParser(description='Check a CircleCI project for flaky tests')
parser.add_argument('project', type=str, help='the project to check')
parser.add_argument(
'--branch',
dest='branch',
default='master',
type=str,
help='branch to check for failures (default: master)',
)
parser.add_argument(
'--builds',
dest='builds',
default=30,
metavar='N',
type=int,
help='number of failed builds to check (default: 30)',
)
parser.add_argument(
'--top',
dest='top',
metavar='N',
default=None,
type=int,
help='limit results to the top N (default: all)',
)
return parser
def main():
if not get_token():
sys.exit(
"No CIRCLECI_TOKEN environment variable set\n."
"Visit https://circleci.com/account/api to create a new token.\n"
"Then invoke this command with CIRCLECI_TOKEN=your_token"
)
parser = get_parser()
args = parser.parse_args()
flaky(args.project, args.branch, args.builds, args.top)
if __name__ == "__main__":
main()
|
"""Utility functions for search"""
from contextlib import contextmanager
import json
from tempfile import NamedTemporaryFile
def traverse_mapping(mapping, parent_key):
"""
Traverse a mapping, yielding each nested dict
Args:
mapping (dict): The mapping itself, or a nested dict
parent_key (str): The key for the mapping
Returns:
generator: A generator of key, dict within the mapping
"""
yield parent_key, mapping
for key, value in mapping.items():
if isinstance(value, dict):
yield from traverse_mapping(value, key)
class _JsonStream:
"""
Handles storing large amounts of newline separated JSON data
"""
def __init__(self, file):
self.file = file
def write_stream(self, gen):
"""
Write objects to the JSON file
"""
self.file.seek(0)
for obj in gen:
self.file.write(json.dumps(obj))
self.file.write("\n")
def read_stream(self):
"""
Reads stream of json objects from a file
"""
self.file.seek(0)
for line in self.file.readlines():
yield json.loads(line)
@contextmanager
def open_json_stream():
"""
Open a temporary file for reading and writing json objects
"""
with NamedTemporaryFile("w+") as file:
yield _JsonStream(file)
def fix_nested_filter(query, parent_key):
"""
Fix the invalid 'filter' in the Elasticsearch queries
Args:
query (dict): An Elasticsearch query
parent_key (any): The parent key
Returns:
dict: An updated Elasticsearch query with filter replaced with query
"""
if isinstance(query, dict):
if 'filter' in query and parent_key == 'nested':
copy = dict(query)
if 'query' in copy:
raise Exception("Unexpected 'query' found")
copy['query'] = copy['filter']
del copy['filter']
return copy
else:
return {
key: fix_nested_filter(value, key) for key, value in query.items()
}
elif isinstance(query, list):
return [
fix_nested_filter(piece, key) for key, piece in enumerate(query)
]
else:
return query
|
""" Create simulated reads with barcodes from a given id file, with information about
correct barcodes in the read headers for a posteriori verification of demultiplexing.
"""
import argparse
import random
import shutil
def chooseEvent(prop_inserts=1, prop_deletions=1, prop_modifications=18):
total = prop_inserts + prop_deletions + prop_modifications
p = random.randrange(0, total)
if p <= prop_inserts:
return 'i'
elif prop_inserts < p <= (prop_inserts + prop_deletions):
return 'd'
else:
return 'm'
def getRandomBase():
bases = ['A', 'C', 'G', 'T']
return random.choice(bases)
def messupBarcode(barcode, errorFreq, mutation_model=None):
res = ''
qual = ''
eventList = dict()
if mutation_model == None:
mutation_model = (None,)
for i in range(errorFreq):
# Will this error hit the barcode?
pos = random.randrange(0, 100)
if pos < len(barcode):
# Hit!
event = chooseEvent(*mutation_model)
eventList[pos] = event
i = 0
while i < len(barcode):
if i in eventList:
event = eventList[i]
if event == 'i':
res += getRandomBase()
qual += '?'
del eventList[i]
elif event == 'm':
res += getRandomBase()
qual += '?'
i += 1
else:
# Deletion!
i += 1
else:
res += barcode[i]
qual += 'F'
i += 1
return [res, qual]
def main(fastqFile, index, reads, errorFreq, mutation_model=None):
ids = open(index, 'r')
outF = open(fastqFile, 'w')
for i in range(reads):
index = ids.readline()
if not index:
ids.seek(0)
index = ids.readline()
# Get the barcode
barcode, _, _ = index.rstrip().partition('\t')
bQual = ''
[barcode, bQual] = messupBarcode(barcode, errorFreq, mutation_model)
# Print this within a fastq, pos 10
entry = '@Test_string\t{}\n'.format(index.rstrip())
read = ''
qual = ''
for i in range(10):
read += getRandomBase()
qual += 'F'
read += barcode
qual += bQual
for i in range(10):
read += getRandomBase()
qual += 'F'
read += index.rstrip().split('\t')[0]
qual += '!' * len(index.rstrip().split('\t')[0])
entry += read + '\n+\n' + qual + '\n'
outF.write(entry)
outF.close()
shutil.copy(fastqFile, fastqFile + '_pair')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
'python makeTestErrorFrequency.py out_fw.fastq index.txt number_of_reads errorFreq'
parser.add_argument("out_fw_fastq", \
help="Output fastq file with forward reads.")
parser.add_argument("index_txt", \
help="ID file with barcodes.")
parser.add_argument("number_of_reads", \
help="Number of reads to simulate.", type=int)
parser.add_argument("errorFreq", \
help="Frequency of errors in simulated reads (percentage)", \
type=int)
parser.add_argument("-m", "--mutation_model", nargs="+", default=None, type=int, \
help="Model for mutation events, input as three integers, given "\
"as an 'inserts deletions modifications' triple. Then e.g. " \
"the proportion of inserts will be inserts / (inserts + " \
"deletions + modifications). Default values are '1 1 18'.")
args = parser.parse_args()
main(args.out_fw_fastq, args.index_txt, args.number_of_reads, \
args.errorFreq, args.mutation_model)
|
#! /usr/bin/env python
"""Read data from a GEBCO NetCDF file into a RasterModelGrid."""
try:
import netCDF4 as nc4
except ImportError:
import warnings
warnings.warn("Unable to import netCDF4.", ImportWarning)
from scipy.io import netcdf as nc
from landlab import RasterModelGrid
from landlab.io.netcdf.errors import NotRasterGridError
_COORDINATE_NAMES = ["x_range", "y_range", "z_range", "spacing", "dimension"]
_AXIS_NAMES = ["x", "y"]
def _read_netcdf_grid_shape(root):
"""Read the grid shape from a GEBCO NetCDF file.
Parameters
----------
root : netcdf_file
A NetCDF file.
Returns
-------
tuple of int
The shape of the grid as number of rows, then columns.
"""
return root.variables["dimension"][:]
def _read_netcdf_grid_spacing(root):
"""Read the grid spacing from a GEBCO NetCDF file.
Parameters
----------
root : netcdf_file
A NetCDF file.
Returns
-------
tuple of float
The spacing of the grid between rows, then columns.
"""
return root.variables["spacing"][:]
def _read_netcdf_structured_data(root):
"""Read the grid data from a GEBCO NetCDF file.
Parameters
----------
root : netcdf_file
A NetCDF file.
Returns
-------
dict
The data fields as numpy arrays. Keys are the variable names, and
values are the data.
"""
fields = dict()
for (name, var) in root.variables.items():
if name not in _COORDINATE_NAMES:
fields[name] = var[:] * var.scale_factor + var.add_offset
fields[name].shape = (fields[name].size,)
return fields
def read_netcdf(nc_file, just_grid=False):
"""Read a GEBCO-formatted NetCDF file.
Reads the NetCDF file *nc_file*, and writes it to the fields of a new
RasterModelGrid, which it then returns. Check the names of the fields
in the returned grid with grid.at_nodes.keys().
Parameters
----------
nc_file : str
Name of the NetCDF file.
just_grid : bool, optional
If ``True``, just read the grid information and forget the data.
Otherwise add the data as fields.
Returns
-------
RasterModelGrid
A newly-created :any:`RasterModelGrid`.
"""
try:
root = nc.netcdf_file(nc_file, "r", version=2)
except TypeError:
root = nc4.Dataset(nc_file, "r", format="NETCDF4")
shape = _read_netcdf_grid_shape(root)
spacing = _read_netcdf_grid_spacing(root)
assert len(shape) == 2
assert len(spacing) == 2
if spacing[0] != spacing[1]:
raise NotRasterGridError()
grid = RasterModelGrid(shape, xy_spacing=spacing)
if not just_grid:
fields = _read_netcdf_structured_data(root)
for (name, values) in fields.items():
grid.add_field("node", name, values)
root.close()
return grid
|
import sys
import time
from django.core.management.base import BaseCommand
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
Dataset,
Experiment,
Organism,
ProcessorJob,
ProcessorJobDatasetAssociation,
)
from data_refinery_common.utils import queryset_page_iterator
logger = get_and_configure_logger(__name__)
def create_job_for_organism(organism: Organism):
"""Returns a quantpendia job for the provided organism."""
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_QUANTPENDIA.value
job.save()
dset = Dataset()
dset.data = build_dataset(organism)
dset.scale_by = "NONE"
dset.aggregate_by = "EXPERIMENT"
dset.quantile_normalize = False
dset.quant_sf_only = True
dset.svd_algorithm = "NONE"
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
return job
def build_dataset(organism: Organism):
data = {}
experiments = Experiment.objects.filter(organisms=organism, technology="RNA-SEQ",).distinct()
for experiment_page in queryset_page_iterator(experiments):
for experiment in experiment_page:
# only include the samples from the target organism that have quant.sf files
experiment_samples = experiment.samples.filter(organism=organism, technology="RNA-SEQ")
# split the query into two so to avoid timeouts.
# assume processed rna-seq samples have a quant.sf file
processed_samples_with_quantsf = experiment_samples.filter(
is_processed=True
).values_list("accession_code", flat=True)
# and only check for quant file for unprocessed samples
unprocessed_samples_with_quantsf = (
experiment_samples.filter(
is_processed=False, results__computedfile__filename="quant.sf"
)
.values_list("accession_code", flat=True)
.distinct()
)
sample_accession_codes = list(processed_samples_with_quantsf) + list(
unprocessed_samples_with_quantsf
)
if sample_accession_codes:
data[experiment.accession_code] = sample_accession_codes
time.sleep(5)
return data
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organisms", type=str, help=("Comma separated list of organism names.")
)
parser.add_argument(
"--organisms-exclude",
type=str,
help=("Comma separated list of organism names that we want to exclude from the list"),
)
def handle(self, *args, **options):
"""Create a quantpendia for one or more organisms."""
all_organisms = Organism.objects.all()
if options["organisms"] is not None:
organisms = options["organisms"].upper().replace(" ", "_").split(",")
all_organisms = all_organisms.filter(name__in=organisms)
if options["organisms_exclude"]:
organisms = options["organisms_exclude"].upper().replace(" ", "_").split(",")
all_organisms = all_organisms.exclude(name__in=organisms)
logger.debug("Generating quantpendia for organisms", organisms=all_organisms)
for organism in all_organisms:
# only generate the quantpendia for organisms that have some samples
# with quant.sf files.
has_quantsf_files = organism.sample_set.filter(
technology="RNA-SEQ", results__computedfile__filename="quant.sf"
).exists()
if not has_quantsf_files:
continue
job = create_job_for_organism(organism)
logger.info(
"Sending compendia job for Organism", job_id=str(job.pk), organism=str(organism)
)
send_job(ProcessorPipeline.CREATE_QUANTPENDIA, job)
sys.exit(0)
|
from lib_imports import *
import tensorflow as tf
from logger import logger
from utils.geometry import rects_intersection
class ObjectDetectionRate(tf.keras.metrics.Metric):
def __init__(self, name='object_detection_rate', num_classes=6, iou_threshold=0.5, **kwargs):
super(ObjectDetectionRate, self).__init__(name=name, **kwargs)
self.num_classes = num_classes
self.iou_threshold = iou_threshold
self.reset_states()
def update_state(self, y_pred, anns, rois):
was_detected = [False] * len(anns)
for image_y_pred, roi in zip(y_pred, rois):
if image_y_pred == 0:
continue
roi_square = roi[-2] * roi[-1]
detected = False
for ind, ann in enumerate(anns):
if image_y_pred == ann['category_id']:
intersection_square = rects_intersection(ann['bbox'], roi)
iou_score = float(intersection_square) / (roi_square + ann['bbox'][-2] * ann['bbox'][-1] - intersection_square)
if iou_score >= self.iou_threshold:
detected = True
was_detected[ind] = True
break
if detected:
self.true_positives[image_y_pred] += 1
else:
self.false_positives[image_y_pred] += 1
for dataset_object_detected, ann in zip(was_detected, anns):
self.total_in_dataset[ann['category_id']] += 1
if not dataset_object_detected:
continue
self.dataset_objects_detected[ann['category_id']] += 1
@tf.autograph.experimental.do_not_convert
def result(self):
logger.log('true_positives: ' + str(self.true_positives[1:]))
logger.log('false_positives: ' + str(self.false_positives[1:]))
logger.log('total_in_dataset: ' + str(self.total_in_dataset[1:]))
logger.log('dataset_objects_detected: ' + str(self.dataset_objects_detected[1:]))
recall = self.true_positives[1:] / self.total_in_dataset[1:]
precision = self.true_positives[1:] / (self.true_positives[1:] + self.false_positives[1:] + 0.0001)
logger.log('recall: ' + str(recall))
logger.log('precision: ' + str(precision))
return recall * precision / (recall + precision + 0.0001)
def reset_states(self):
self.true_positives = np.zeros((self.num_classes,), dtype=np.float)
self.false_positives = np.zeros((self.num_classes,), dtype=np.float)
self.total_in_dataset = np.zeros((self.num_classes,), dtype=np.float)
self.dataset_objects_detected = np.zeros((self.num_classes,), dtype=np.float)
|
from datetime import datetime
from sqlalchemy import ForeignKey, Table, Column
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from freshwater import db, app
from flask_security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin
# This Function takes in a table from db and returns a list of dictionaries for each existing record in that table (each row is a dictionary, columns titles are keys ) ordered by primary id in table
def model_to_list_of_dicts(model):
# print("**** dbTolst: before query")
print(".",end='')
#print()
records_in_model = model.query.all()
# print("**** dbTolst: after query")
# make sure db in use has working dict function within its class
lst = [x.dict() for x in records_in_model]
# Orders our list of dictionaries with id from smallest to largest
lst.sort(key=lambda x: x["id"])
return lst # Remember this needs to be jsonfied to pass it to html
class Messages(db.Model):
__tablename__ = "Messages"
id = db.Column(db.Integer, primary_key=True)
fkSender = db.Column(db.String(255))
fkReciever = db.Column(db.String(255))
message = db.Column(db.String(400))
timeCreated = db.Column(db.DateTime, default=datetime.utcnow)
unread = db.Column(db.Integer) #0 is read, 1 is unread
fk_listing_id = db.Column(db.Integer)
def dict(self):
return {"id": self.id,
"fkSender": self.fkSender,
"fkReciever": self.fkReciever,
"message": self.message,
"timeCreated": self.timeCreated,
"fk_listing_id": self.fk_listing_id
}
@staticmethod
def list_of_dicts():
return model_to_list_of_dicts(Messages)
class Images(db.Model): # Db where all Image paths are stored
#__bind_key__ = 'db1'
__tablename__ = "Images" # Name of table
id = db.Column(db.Integer, primary_key=True)
# All images must be associted with the Onwer(/User)'s ID & Listing ID
fk_user_id = db.Column(db.Integer, ForeignKey('User.id')) # Forgien Key for
user = relationship("User")
fk_listing_id = db.Column(db.Integer, ForeignKey('Listings.id')) # Forgien Key for
listing = relationship("Listings")
# Informs us if a sell or Someone looking to rent our a unit
# sellOrRent = db.Column(db.String)
path = db.Column(db.String(255)) # Relative file path of image
def dict(self):
return {"id": self.id,
"fk_user_id": self.fk_user_id,
# "fkEmail": self.fkEmail,
"fk_listing_id": self.fk_listing_id,
# "sellOrRent": self.sellOrRent,
"path": self.path}
@staticmethod
def list_of_dicts():
return model_to_list_of_dicts(Images)
class Listings(db.Model):
#__bind_key__ = 'db1'
__tablename__ = "Listings"
id = db.Column(db.Integer, primary_key=True)
fk_user_id = db.Column(db.Integer)
#fkEmail = db.Column(db.String)
timeCreated = db.Column(db.DateTime, default=datetime.utcnow)
title = db.Column(db.String(255))
houseType = db.Column(db.String(255))
sellOrRent = db.Column(db.String(255))
petsAllowed = db.Column(db.Integer)
city = db.Column(db.String(255))
postalCode = db.Column(db.Integer)
street_address = db.Column(db.String(255))
distance_from_SFSU = db.Column(db.Float)
#houseNum = db.Column(db.Integer)
#gps = db.Column(db.String)
description = db.Column(db.String(8000))
price = db.Column(db.Integer)
sqft = db.Column(db.Integer)
bedroomNum = db.Column(db.Integer)
bathroomNum = db.Column(db.Integer)
adminAppr = db.Column(db.Integer)
def dict(self):
return {
"id": self.id,
"fk_user_id": self.fk_user_id,
#"fkEmail": self.fkEmail,
"timeCreated": self.timeCreated,
"title": self.title,
"houseType": self.houseType,
"sellOrRent": self.sellOrRent,
"petsAllowed": self.petsAllowed,
"city": self.city,
"postalCode": self.postalCode,
"street_address": self.street_address,
"distance_from_SFSU": self.distance_from_SFSU,
#"houseNum": self.houseNum,
#"gps": self.gps,
"description": self.description,
"price": self.price,
"sqft": self.sqft,
"bedroomNum": self.bedroomNum,
"bathroomNum": self.bathroomNum,
"adminAppr": self.adminAppr,
}
@staticmethod
def list_of_dicts():
return model_to_list_of_dicts(Listings)
#Define models
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('User.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('Role.id')),
# info={'bind_key': 'user'}
)
class Role(db.Model, RoleMixin):
# __bind_key__ = 'user'
__tablename__ = "Role"
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, UserMixin):
# __bind_key__ = 'user'
__tablename__ = "User"
id = db.Column(db.Integer, primary_key=True)
# first_name = db.Column(db.String(255))
# last_name = db.Column(db.String(255))
# phone_number = db.Column(db.String(25))
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
sfsu_confirmed = db.Column(db.Integer()) # 0 no, 1 yes
date_registered = db.Column(db.DateTime(), default=datetime.utcnow)
roles = db.relationship('Role',
secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
def dict(self):
return {
'id': self.id,
# 'first_name': self.first_name,
# 'last_name' : self.last_name,
# 'phone_number': self.phone_number,
'email': self.email,
'password': self.password,
'active': self.active,
'sfsu_confirmed': self.sfsu_confirmed,
'date_registered': self.date_registered,
'roles': self.roles
}
@staticmethod
def list_of_dicts():
return model_to_list_of_dicts(User)
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
app.security = Security(app, user_datastore)
|
def DBAPIQueryHelper_test():
pass # removing `assert True`
|
from django.apps import AppConfig
class GaleriasConfig(AppConfig):
name = 'galerias'
|
import math
import time
def time_decorator(funct):
def wrapper(*arg):
result = funct(*arg)
timer = time.perf_counter()
# print(f"Timer: {timer}")
return result, timer
return wrapper
@time_decorator
def factorial_counter(x, y):
fact = math.factorial(x)
fact2 = math.factorial(y)
# print(math.gcd(fact, fact2))
return math.gcd(fact, fact2)
if __name__ == "__main__":
for i in range(10):
output = factorial_counter(10000, 10)
print(f"GCD = {output[0]}, Timer = {output[1]}")
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import random
from math import *
import time
import copy
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR, MultiStepLR
# In[2]:
torch.cuda.set_device(0)
# In[3]:
torch.set_default_tensor_type('torch.DoubleTensor')
# In[4]:
# defination of activation function
def activation(x):
return x * torch.sigmoid(x)
# In[5]:
# build ResNet with one blocks
class Net(nn.Module):
def __init__(self,input_size,width):
super(Net,self).__init__()
self.layer_in = nn.Linear(input_size,width)
self.layer_1 = nn.Linear(width,width)
self.layer_2 = nn.Linear(width,width)
self.layer_out = nn.Linear(width,1)
def forward(self,x):
output = self.layer_in(x)
output = output + activation(self.layer_2(activation(self.layer_1(output)))) # residual block 1
output = self.layer_out(output)
return output
# In[6]:
input_size = 1
width = 4
# In[7]:
# exact solution
def u_ex(x):
return torch.sin(pi*x)
# In[8]:
# f(x)
def f(x):
return pi**2 * torch.sin(pi*x)
# In[9]:
grid_num = 200
x = torch.zeros(grid_num + 1, input_size)
for index in range(grid_num + 1):
x[index] = index * 1 / grid_num
# In[10]:
CUDA = torch.cuda.is_available()
# print('CUDA is: ', CUDA)
if CUDA:
net = Net(input_size,width).cuda()
x = x.cuda()
else:
net = Net(input_size,width)
x = x
# In[11]:
def model(x):
return x * (x - 1.0) * net(x)
# In[12]:
optimizer = optim.Adam(net.parameters())
# In[13]:
# Xavier normal initialization for weights:
# mean = 0 std = gain * sqrt(2 / fan_in + fan_out)
# zero initialization for biases
def initialize_weights(self):
for m in self.modules():
if isinstance(m,nn.Linear):
nn.init.xavier_normal(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
# In[14]:
initialize_weights(net)
# In[15]:
# loss function to DGM by auto differential
def loss_function(x):
h = 1 / grid_num
sum_0 = 0.0
sum_1 = 0.0
sum_2 = 0.0
sum_a = 0.0
sum_b = 0.0
for index in range(grid_num):
x_temp = x[index] + h / 2
x_temp.requires_grad = True
if CUDA:
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape).cuda(), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape).cuda(), create_graph = True)
else:
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
sum_1 += ((grad_grad_x_temp[0])[0] + f(x_temp)[0])**2
for index in range(1, grid_num):
x_temp = x[index]
x_temp.requires_grad = True
if CUDA:
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape).cuda(), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape).cuda(), create_graph = True)
else:
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
sum_2 += ((grad_grad_x_temp[0])[0] + f(x_temp)[0])**2
x_temp = x[0]
x_temp.requires_grad = True
if CUDA:
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape).cuda(), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape).cuda(), create_graph = True)
else:
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
sum_a = ((grad_grad_x_temp[0])[0] + f(x_temp)[0])**2
x_temp = x[grid_num]
x_temp.requires_grad = True
if CUDA:
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape).cuda(), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape).cuda(), create_graph = True)
else:
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
sum_b = ((grad_grad_x_temp[0])[0] + f(x_temp)[0])**2
sum_0 = h / 6 * (sum_a + 4 * sum_1 + 2 * sum_2 + sum_b)
return sum_0
# In[16]:
def error_function(x):
error = 0.0
for index in range(len(x)):
x_temp = x[index]
error += (model(x_temp)[0] - u_ex(x_temp)[0])**2
return error / len(x)
# In[17]:
param_num = sum(x.numel() for x in net.parameters())
# In[19]:
def get_weights(net):
""" Extract parameters from net, and return a list of tensors"""
return [p.data for p in net.parameters()]
# In[20]:
def set_weights(net, weights, directions=None, step=None):
"""
Overwrite the network's weights with a specified list of tensors
or change weights along directions with a step size.
"""
if directions is None:
# You cannot specify a step length without a direction.
for (p, w) in zip(net.parameters(), weights):
p.data.copy_(w.type(type(p.data)))
else:
assert step is not None, 'If a direction is specified then step must be specified as well'
if len(directions) == 2:
dx = directions[0]
dy = directions[1]
changes = [d0*step[0] + d1*step[1] for (d0, d1) in zip(dx, dy)]
else:
changes = [d*step for d in directions[0]]
for (p, w, d) in zip(net.parameters(), weights, changes):
p.data = w + torch.Tensor(d).type(type(w))
# In[21]:
def set_states(net, states, directions=None, step=None):
"""
Overwrite the network's state_dict or change it along directions with a step size.
"""
if directions is None:
net.load_state_dict(states)
else:
assert step is not None, 'If direction is provided then the step must be specified as well'
if len(directions) == 2:
dx = directions[0]
dy = directions[1]
changes = [d0*step[0] + d1*step[1] for (d0, d1) in zip(dx, dy)]
else:
changes = [d*step for d in directions[0]]
new_states = copy.deepcopy(states)
assert (len(new_states) == len(changes))
for (k, v), d in zip(new_states.items(), changes):
d = torch.tensor(d)
v.add_(d.type(v.type()))
net.load_state_dict(new_states)
# In[22]:
def get_random_weights(weights):
"""
Produce a random direction that is a list of random Gaussian tensors
with the same shape as the network's weights, so one direction entry per weight.
"""
return [torch.randn(w.size()) for w in weights]
# In[23]:
def get_random_states(states):
"""
Produce a random direction that is a list of random Gaussian tensors
with the same shape as the network's state_dict(), so one direction entry
per weight, including BN's running_mean/var.
"""
return [torch.randn(w.size()) for k, w in states.items()]
# In[24]:
def get_diff_weights(weights, weights2):
""" Produce a direction from 'weights' to 'weights2'."""
return [w2 - w for (w, w2) in zip(weights, weights2)]
# In[25]:
def get_diff_states(states, states2):
""" Produce a direction from 'states' to 'states2'."""
return [v2 - v for (k, v), (k2, v2) in zip(states.items(), states2.items())]
# In[26]:
def normalize_direction(direction, weights, norm='filter'):
"""
Rescale the direction so that it has similar norm as their corresponding
model in different levels.
Args:
direction: a variables of the random direction for one layer
weights: a variable of the original model for one layer
norm: normalization method, 'filter' | 'layer' | 'weight'
"""
if norm == 'filter':
# Rescale the filters (weights in group) in 'direction' so that each
# filter has the same norm as its corresponding filter in 'weights'.
for d, w in zip(direction, weights):
d.mul_(w.norm()/(d.norm() + 1e-10))
elif norm == 'layer':
# Rescale the layer variables in the direction so that each layer has
# the same norm as the layer variables in weights.
direction.mul_(weights.norm()/direction.norm())
elif norm == 'weight':
# Rescale the entries in the direction so that each entry has the same
# scale as the corresponding weight.
direction.mul_(weights)
elif norm == 'dfilter':
# Rescale the entries in the direction so that each filter direction
# has the unit norm.
for d in direction:
d.div_(d.norm() + 1e-10)
elif norm == 'dlayer':
# Rescale the entries in the direction so that each layer direction has
# the unit norm.
direction.div_(direction.norm())
# In[27]:
def normalize_directions_for_weights(direction, weights, norm='filter', ignore='biasbn'):
"""
The normalization scales the direction entries according to the entries of weights.
"""
assert(len(direction) == len(weights))
for d, w in zip(direction, weights):
if d.dim() <= 1:
if ignore == 'biasbn':
d.fill_(0) # ignore directions for weights with 1 dimension
else:
d.copy_(w) # keep directions for weights/bias that are only 1 per node
else:
normalize_direction(d, w, norm)
# In[28]:
def normalize_directions_for_states(direction, states, norm='filter', ignore='ignore'):
assert(len(direction) == len(states))
for d, (k, w) in zip(direction, states.items()):
if d.dim() <= 1:
if ignore == 'biasbn':
d.fill_(0) # ignore directions for weights with 1 dimension
else:
d.copy_(w) # keep directions for weights/bias that are only 1 per node
else:
normalize_direction(d, w, norm)
# In[29]:
def ignore_biasbn(directions):
""" Set bias and bn parameters in directions to zero """
for d in directions:
if d.dim() <= 1:
d.fill_(0)
# In[30]:
def create_random_direction(net, dir_type='weights', ignore='biasbn', norm='filter'):
"""
Setup a random (normalized) direction with the same dimension as
the weights or states.
Args:
net: the given trained model
dir_type: 'weights' or 'states', type of directions.
ignore: 'biasbn', ignore biases and BN parameters.
norm: direction normalization method, including
'filter" | 'layer' | 'weight' | 'dlayer' | 'dfilter'
Returns:
direction: a random direction with the same dimension as weights or states.
"""
# random direction
if dir_type == 'weights':
weights = get_weights(net) # a list of parameters.
direction = get_random_weights(weights)
normalize_directions_for_weights(direction, weights, norm, ignore)
elif dir_type == 'states':
states = net.state_dict() # a dict of parameters, including BN's running mean/var.
direction = get_random_states(states)
normalize_directions_for_states(direction, states, norm, ignore)
return direction
# In[31]:
# load model parameters
pretrained_dict = torch.load('net_params_DRM.pkl')
# get state_dict
net_state_dict = net.state_dict()
# remove keys that does not belong to net_state_dict
pretrained_dict_1 = {k: v for k, v in pretrained_dict.items() if k in net_state_dict}
# update dict
net_state_dict.update(pretrained_dict_1)
# set new dict back to net
net.load_state_dict(net_state_dict)
# In[32]:
# loss_function(x)
# In[33]:
# error_function(x)
# In[34]:
weights_temp = get_weights(net)
states_temp = net.state_dict()
# In[35]:
# print(weights_temp)
# In[36]:
step_size = 0.001
grid = np.arange(-0.01, 0.01 + step_size, step_size)
loss_matrix = np.zeros((len(grid), len(grid)))
time_start = time.time()
for dx in grid:
for dy in grid:
itemindex_1 = np.argwhere(grid == dx)
itemindex_2 = np.argwhere(grid == dy)
weights = weights_temp
states = states_temp
step = [dx, dy]
direction_1 = create_random_direction(net, dir_type='weights', ignore='biasbn', norm='filter')
normalize_directions_for_states(direction_1, states, norm='filter', ignore='ignore')
direction_2 = create_random_direction(net, dir_type='weights', ignore='biasbn', norm='filter')
normalize_directions_for_states(direction_2, states, norm='filter', ignore='ignore')
directions = [direction_1, direction_2]
set_states(net, states, directions, step)
loss_temp = loss_function(x)
loss_matrix[itemindex_1[0][0], itemindex_2[0][0]] = loss_temp
# get state_dict
net_state_dict = net.state_dict()
# remove keys that does not belong to net_state_dict
pretrained_dict_1 = {k: v for k, v in pretrained_dict.items() if k in net_state_dict}
# update dict
net_state_dict.update(pretrained_dict_1)
# set new dict back to net
net.load_state_dict(net_state_dict)
weights_temp = get_weights(net)
states_temp = net.state_dict()
time_end = time.time()
print('total time is: ', time_end-time_start, 'seconds')
np.save("loss_matrix_DGM_DRM.npy",loss_matrix)
# In[37]:
# print(loss_matrix)
# In[38]:
print(loss_matrix[10, 10])
# In[ ]:
# In[ ]:
|
import json
import psutil
from datetime import datetime as dt
from django.shortcuts import render
from django.contrib import admin
from django.conf import settings
from django import forms
from django.apps import apps
from .models import Natural
from django.urls import path
from pyecharts.charts import Bar, Gauge
from pyecharts import options as opts
from flyadmin.widget.forms import SelectBoxWidget, TimelineWidget, EditorWidget, DateTimeWidget, UploadImagesWidget, InputNumberWidget, UploadFileWidget, StepsWidget, StepsNormalWidget
from flyadmin.views.charts import bar, pie, line
class NaturalAdmin(admin.ModelAdmin):
model = Natural
def admin_view_natural(self, request):
_f1 = lambda x:round(x/1024/1024/1024, 1)
#将数据写入到里面来到首页数据里(不得修改)
mem = psutil.virtual_memory()
#cpu = psutil.cpu_percent()
disk = psutil.disk_usage('/')
network = psutil.net_io_counters()
naturals = Natural.objects.order_by('create_time').all()
print([_f1(i.ava) for i in naturals])
c = line('cpu趋势(G)', [i.create_time.strftime('%Y-%m-%d %H') for i in naturals], {'占用比率':[i.ava for i in naturals]})
p = pie("内存占比(G)", ('全部','可用'), (_f1(mem.total), _f1(mem.available)))
d = pie("硬盘占比(G)", ('全部','使用', '可用'), (_f1(disk.total), _f1(disk.used), _f1(disk.free)))
n = pie("网络情况(G)", ('接收','发送'), (_f1(network.bytes_recv), _f1(network.bytes_sent)))
html = ['<el-row><div class="grid-content">']
html.append('<div class="el-col el-col-8">{}</div>'.format(p))
html.append('<div class="el-col el-col-8">{}</div>'.format(d))
html.append('<div class="el-col el-col-8">{}</div>'.format(n))
html.append('</div></el-row>')
html.append('<el-row><div class="grid-content"><div class="el-col el-col-24">{}</div></div></el-row>'.format(c))
xplots = ''.join(html)
return render(request, "admin/xplots.html", locals())
def changelist_view(self, request, extra_content=None):
return self.admin_view_natural(request)
admin.site.register(Natural, NaturalAdmin)
|
import logging
import os
import time
from typing import Any, Dict, Tuple
from trezorlib import transport as trezor_transport
from trezorlib.transport import bridge as trezor_bridge
from tilapia.lib.conf import settings
from tilapia.lib.hardware import exceptions, proxy
from tilapia.lib.hardware.callbacks import helper
from tilapia.lib.hardware.callbacks import host as host_callback
from tilapia.lib.hardware.callbacks import terminal as terminal_callback
logger = logging.getLogger("app.hardware")
_CLIENTS: Dict[str, Tuple[proxy.HardwareProxyClient, int]] = {}
def enumerate_all_devices() -> Dict[str, trezor_transport.Transport]:
try:
try:
enumerates_by_bridge = trezor_bridge.call_bridge("enumerate").json()
except Exception as e:
logger.debug(f"Error in enumerating devices from bridge, try others. error: {e}", exc_info=True)
devices = trezor_transport.enumerate_devices()
else:
legacy = trezor_bridge.is_legacy_bridge()
devices = [trezor_bridge.BridgeTransport(i, legacy) for i in enumerates_by_bridge]
except Exception as e:
logger.exception(f"Error in enumerating devices. error: {e}")
devices = []
return {i.get_path(): i for i in devices}
def _create_client(device: trezor_transport.Transport) -> proxy.HardwareProxyClient:
if settings.runtime == "host":
callback = host_callback.HostCallback()
else:
callback = terminal_callback.TerminalCallback(
always_prompt=True, pin_on_device=os.environ.get("HARDWARE_PIN_ON_DEVICE") == "True"
)
return proxy.HardwareProxyClient(device, callback)
def get_client(hardware_device_path: str, force_check: bool = False) -> proxy.HardwareProxyClient:
client, expired_at = _CLIENTS.get(hardware_device_path) or (None, None)
if not force_check and expired_at is not None and expired_at > time.time():
return client
if not client:
devices = enumerate_all_devices()
devices = [device for path, device in devices.items() if path == hardware_device_path]
client = _create_client(devices[0]) if devices else None
if not client:
raise exceptions.NoAvailableDevice()
client.ensure_device()
_CLIENTS[hardware_device_path] = (client, int(time.time() + 10))
return client
def ping(hardware_device_path: str, message: str) -> str:
return get_client(hardware_device_path).ping(message)
def get_feature(hardware_device_path: str, force_refresh: bool = False) -> dict:
return get_client(hardware_device_path, force_check=force_refresh).get_feature(force_refresh)
def get_key_id(hardware_device_path: str) -> str:
return get_client(hardware_device_path).get_key_id()
def apply_settings(hardware_device_path: str, settings: dict) -> bool:
return get_client(hardware_device_path).apply_settings(settings)
def setup_mnemonic_on_device(
hardware_device_path: str,
language: str = "english",
label: str = "OneKey",
mnemonic_strength: int = 128,
) -> bool:
return get_client(hardware_device_path).setup_mnemonic_on_device(language, label, mnemonic_strength)
def setup_or_change_pin(hardware_device_path: str) -> bool:
return get_client(hardware_device_path).setup_or_change_pin()
def wipe_device(hardware_device_path: str) -> bool:
return get_client(hardware_device_path).wipe_device()
def dump_hardware_agent() -> dict:
return helper.dump_agent()
def update_hardware_agent(attr_name: str, value: Any):
helper.set_value_to_agent(attr_name, value)
|
import numpy as np
import pandas as pd
from src.biota_models.coral.model.coral_constants import CoralConstants
from src.biota_models.coral.model.coral_model import Coral
from src.core import RESHAPE
from src.core.common.space_time import DataReshape
class Photosynthesis:
"""Photosynthesis."""
def __init__(
self, light_in, first_year, constants: CoralConstants = CoralConstants()
):
"""
Photosynthetic efficiency based on photosynthetic dependencies.
:param light_in: incoming light-intensity at the water-air interface [umol photons m-2 s-1]
:param first_year: first year of the model simulation
:type light_in: float, list, tuple, numpy.ndarray
:type first_year: bool
"""
self.I0 = RESHAPE().variable2matrix(light_in, "time")
self.first_year = first_year
self.pld = 1
self.ptd = 1
self.pfd = 1
self.constants = constants
def photo_rate(self, coral, environment, year):
"""Photosynthetic efficiency.
:param coral: coral animal
:param environment: environmental conditions
:param year: year of simulation
:type coral: Coral
:type environment: Environment
:type year: int
"""
# components
self.light_dependency(coral, "qss")
self.thermal_dependency(coral, environment, year)
self.flow_dependency(coral)
# combined
coral.photo_rate = self.pld * self.ptd * self.pfd
def light_dependency(self, coral, output):
"""Photosynthetic light dependency.
:param coral: coral animal
:param output: type of output
:type coral: Coral
:type output: str
"""
def photo_acclimation(x_old, param):
"""Photo-acclimation."""
# input check
params = ("Ik", "Pmax")
if param not in params:
message = f"{param} not in {params}."
raise ValueError(message)
# parameter definitions
x_max = self.constants.ik_max if param == "Ik" else self.constants.pm_max
beta_x = self.constants.betaI if param == "Ik" else self.constants.betaP
# calculations
xs = x_max * (coral.light / self.I0) ** beta_x
if output == "qss":
return xs
elif output == "new":
return xs + (x_old - xs) * np.exp(-self.constants.iota)
# # parameter definitions
if output == "qss":
ik = photo_acclimation(None, "Ik")
p_max = photo_acclimation(None, "Pmax")
else:
msg = f"Only the quasi-steady state solution is currently implemented; use key-word 'qss'."
raise NotImplementedError(msg)
# # calculations
self.pld = p_max * (
np.tanh(coral.light / ik) - np.tanh(self.constants.Icomp * self.I0 / ik)
)
def thermal_dependency(self, coral: Coral, env, year):
"""Photosynthetic thermal dependency.
:param coral: coral animal
:param env: environmental conditions
:param year: year of simulation
:type coral: Coral
:type env: Environment
:type year: int
"""
_reshape = RESHAPE()
def thermal_acc():
"""Thermal-acclimation."""
if self.constants.tme:
if self.first_year:
env.tmeMMMmin = (
pd.DataFrame(
data=pd.concat(
[env.temp_mmm["min"]] * _reshape.space, axis=1
).values,
columns=[np.arange(_reshape.space)],
)
+ coral.dTc
)
env.tmeMMMmax = (
pd.DataFrame(
data=pd.concat(
[env.temp_mmm["max"]] * _reshape.space, axis=1
).values,
columns=[np.arange(_reshape.space)],
)
+ coral.dTc
)
else:
env.tmeMMMmin[env.tmeMMM.index == year] += coral.dTc
env.tmeMMMmax[env.tmeMMm.index == year] += coral.dTc
mmm_min = env.tmeMMMmin[
np.logical_and(
env.tmeMMM.index < year,
env.tmeMMM.index >= year - int(self.constants.nn / coral.Csp),
)
]
m_min = mmm_min.mean(axis=0)
s_min = mmm_min.std(axis=0)
mmm_max = env.tmeMMMmax[
np.logical_and(
env.tmeMMM.index < year,
env.tmeMMM.index >= year - int(self.constants.nn / coral.Csp),
)
]
m_max = mmm_max.mean(axis=0)
s_max = mmm_max.std(axis=0)
else:
mmm = env.temp_mmm[
np.logical_and(
env.temp_mmm.index < year,
env.temp_mmm.index >= year - int(self.constants.nn / coral.Csp),
)
]
m_min, m_max = mmm.mean(axis=0)
s_min, s_max = mmm.std(axis=0)
coral.Tlo = m_min - self.constants.k_var * s_min
coral.Thi = m_max + self.constants.k_var * s_max
def adapted_temp():
"""Adapted temperature response."""
def spec():
"""Specialisation term."""
return 4e-4 * np.exp(-0.33 * (delta_temp - 10))
response = -(coral.temp - coral.Tlo) * (
(coral.temp - coral.Tlo) ** 2 - delta_temp ** 2
)
temp_cr = coral.Tlo - (1 / np.sqrt(3)) * delta_temp
try:
if self.constants.tme:
response[coral.temp <= temp_cr] = -(
(2 / (3 * np.sqrt(3))) * delta_temp[coral.temp <= temp_cr] ** 3
)
else:
response[coral.temp <= temp_cr] = -(
(2 / (3 * np.sqrt(3))) * delta_temp ** 3
)
except TypeError:
if coral.temp <= temp_cr:
response = (2 / (3 * np.sqrt(3))) * delta_temp ** 3
return response * spec()
def thermal_env():
"""Thermal envelope."""
return np.exp(
(self.constants.Ea / self.constants.R) * (1 / 300 - 1 / temp_opt)
)
# # parameter definitions
thermal_acc()
delta_temp = coral.Thi - coral.Tlo
temp_opt = coral.Tlo + (1 / np.sqrt(3)) * delta_temp
# # calculations
f1 = adapted_temp()
f2 = thermal_env()
self.ptd = f1 * f2
def flow_dependency(self, coral: Coral):
"""Photosynthetic flow dependency.
:param coral: coral animal
:type coral: Coral
"""
if self.constants.pfd:
pfd = self.constants.pfd_min + (1 - self.constants.pfd_min) * np.tanh(
2 * coral.ucm / self.constants.ucr
)
self.pfd = RESHAPE().variable2matrix(pfd, "space")
else:
self.pfd = 1
|
import sensor,image,lcd
import KPU as kpu
from fpioa_manager import fm
from machine import UART
from board import board_info
import utime
lcd.init()
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((224, 224))
sensor.set_vflip(1)
sensor.run(1)
fm.register(board_info.PIN15,fm.fpioa.UART1_TX)
fm.register(board_info.PIN17,fm.fpioa.UART1_RX)
#fm.register(board_info.PIN5,fm.fpioa.UART1_TX)
#fm.register(board_info.PIN4,fm.fpioa.UART1_RX)
#uart_A = UART(UART.UART1, 115200, 8, None, 1, timeout=1000, read_buf_len=4096)
uart_A = UART(UART.UART1, 9600, 8, None, 1, timeout=1000, read_buf_len=4096)
classes = ["license"]
task = kpu.load('/sd/m3.kmodel')
anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828)
a = kpu.init_yolo2(task, 0.3, 0.3, 5, anchor)
while(True):
img = sensor.snapshot()
#.rotation_corr(z_rotation=90.0)
#a = img.pix_to_ai()
code = kpu.run_yolo2(task, img)
if code:
for i in code:
a = img.draw_rectangle(i.rect(),color = (0, 255, 0))
a = img.draw_string(i.x(),i.y(), classes[i.classid()], color=(255,0,0), scale=3)
uart_A.write(b'0xAA')
a = lcd.display(img)
a = lcd.draw_string(50, 50, "Detected", lcd.BLACK, lcd.WHITE)
utime.sleep(5)
#break
else:
a = lcd.display(img)
#uart_A.write(b'0x55')
a = kpu.deinit(task)
uart_A.deinit()
del uart_A
|
from django.db import models
from api.models.photo import Photo
from api.models.user import User, get_deleted_user
class AlbumPlace(models.Model):
title = models.CharField(max_length=512, db_index=True)
photos = models.ManyToManyField(Photo)
geolocation_level = models.IntegerField(db_index=True, null=True)
favorited = models.BooleanField(default=False, db_index=True)
owner = models.ForeignKey(
User, on_delete=models.SET(get_deleted_user), default=None
)
shared_to = models.ManyToManyField(User, related_name="album_place_shared_to")
class Meta:
unique_together = ("title", "owner")
@property
def cover_photos(self):
return self.photos.filter(hidden=False)[:4]
def __str__(self):
return "%d: %s" % (self.id, self.title)
def get_album_place(title, owner):
return AlbumPlace.objects.get_or_create(title=title, owner=owner)[0]
|
import gym
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, \
TYPE_CHECKING, Union
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.jax.jax_modelv2 import JAXModelV2
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import TorchPolicy
from ray.rllib.utils import add_mixins, force_list, NullContextManager
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.framework import try_import_torch, try_import_jax
from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY
from ray.rllib.utils.torch_ops import convert_to_non_torch_type
from ray.rllib.utils.typing import ModelGradients, TensorType, \
TrainerConfigDict
if TYPE_CHECKING:
from ray.rllib.evaluation.episode import Episode # noqa
jax, _ = try_import_jax()
torch, _ = try_import_torch()
# TODO: (sven) Unify this with `build_tf_policy` as well.
@DeveloperAPI
def build_policy_class(
name: str,
framework: str,
*,
loss_fn: Optional[Callable[[
Policy, ModelV2, Type[TorchDistributionWrapper], SampleBatch
], Union[TensorType, List[TensorType]]]],
get_default_config: Optional[Callable[[], TrainerConfigDict]] = None,
stats_fn: Optional[Callable[[Policy, SampleBatch], Dict[
str, TensorType]]] = None,
postprocess_fn: Optional[Callable[[
Policy, SampleBatch, Optional[Dict[Any, SampleBatch]], Optional[
"Episode"]
], SampleBatch]] = None,
extra_action_out_fn: Optional[Callable[[
Policy, Dict[str, TensorType], List[TensorType], ModelV2,
TorchDistributionWrapper
], Dict[str, TensorType]]] = None,
extra_grad_process_fn: Optional[Callable[[
Policy, "torch.optim.Optimizer", TensorType
], Dict[str, TensorType]]] = None,
# TODO: (sven) Replace "fetches" with "process".
extra_learn_fetches_fn: Optional[Callable[[Policy], Dict[
str, TensorType]]] = None,
optimizer_fn: Optional[Callable[[Policy, TrainerConfigDict],
"torch.optim.Optimizer"]] = None,
validate_spaces: Optional[Callable[
[Policy, gym.Space, gym.Space, TrainerConfigDict], None]] = None,
before_init: Optional[Callable[
[Policy, gym.Space, gym.Space, TrainerConfigDict], None]] = None,
before_loss_init: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], None]] = None,
after_init: Optional[Callable[
[Policy, gym.Space, gym.Space, TrainerConfigDict], None]] = None,
_after_loss_init: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], None]] = None,
action_sampler_fn: Optional[Callable[[TensorType, List[
TensorType]], Tuple[TensorType, TensorType]]] = None,
action_distribution_fn: Optional[Callable[[
Policy, ModelV2, TensorType, TensorType, TensorType
], Tuple[TensorType, type, List[TensorType]]]] = None,
make_model: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], ModelV2]] = None,
make_model_and_action_dist: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], Tuple[ModelV2, Type[TorchDistributionWrapper]]]] = None,
compute_gradients_fn: Optional[Callable[[Policy, SampleBatch], Tuple[
ModelGradients, dict]]] = None,
apply_gradients_fn: Optional[Callable[
[Policy, "torch.optim.Optimizer"], None]] = None,
mixins: Optional[List[type]] = None,
get_batch_divisibility_req: Optional[Callable[[Policy], int]] = None
) -> Type[TorchPolicy]:
"""Helper function for creating a new Policy class at runtime.
Supports frameworks JAX and PyTorch.
Args:
name (str): name of the policy (e.g., "PPOTorchPolicy")
framework (str): Either "jax" or "torch".
loss_fn (Optional[Callable[[Policy, ModelV2,
Type[TorchDistributionWrapper], SampleBatch], Union[TensorType,
List[TensorType]]]]): Callable that returns a loss tensor.
get_default_config (Optional[Callable[[None], TrainerConfigDict]]):
Optional callable that returns the default config to merge with any
overrides. If None, uses only(!) the user-provided
PartialTrainerConfigDict as dict for this Policy.
postprocess_fn (Optional[Callable[[Policy, SampleBatch,
Optional[Dict[Any, SampleBatch]], Optional["Episode"]],
SampleBatch]]): Optional callable for post-processing experience
batches (called after the super's `postprocess_trajectory` method).
stats_fn (Optional[Callable[[Policy, SampleBatch],
Dict[str, TensorType]]]): Optional callable that returns a dict of
values given the policy and training batch. If None,
will use `TorchPolicy.extra_grad_info()` instead. The stats dict is
used for logging (e.g. in TensorBoard).
extra_action_out_fn (Optional[Callable[[Policy, Dict[str, TensorType],
List[TensorType], ModelV2, TorchDistributionWrapper]], Dict[str,
TensorType]]]): Optional callable that returns a dict of extra
values to include in experiences. If None, no extra computations
will be performed.
extra_grad_process_fn (Optional[Callable[[Policy,
"torch.optim.Optimizer", TensorType], Dict[str, TensorType]]]):
Optional callable that is called after gradients are computed and
returns a processing info dict. If None, will call the
`TorchPolicy.extra_grad_process()` method instead.
# TODO: (sven) dissolve naming mismatch between "learn" and "compute.."
extra_learn_fetches_fn (Optional[Callable[[Policy],
Dict[str, TensorType]]]): Optional callable that returns a dict of
extra tensors from the policy after loss evaluation. If None,
will call the `TorchPolicy.extra_compute_grad_fetches()` method
instead.
optimizer_fn (Optional[Callable[[Policy, TrainerConfigDict],
"torch.optim.Optimizer"]]): Optional callable that returns a
torch optimizer given the policy and config. If None, will call
the `TorchPolicy.optimizer()` method instead (which returns a
torch Adam optimizer).
validate_spaces (Optional[Callable[[Policy, gym.Space, gym.Space,
TrainerConfigDict], None]]): Optional callable that takes the
Policy, observation_space, action_space, and config to check for
correctness. If None, no spaces checking will be done.
before_init (Optional[Callable[[Policy, gym.Space, gym.Space,
TrainerConfigDict], None]]): Optional callable to run at the
beginning of `Policy.__init__` that takes the same arguments as
the Policy constructor. If None, this step will be skipped.
before_loss_init (Optional[Callable[[Policy, gym.spaces.Space,
gym.spaces.Space, TrainerConfigDict], None]]): Optional callable to
run prior to loss init. If None, this step will be skipped.
after_init (Optional[Callable[[Policy, gym.Space, gym.Space,
TrainerConfigDict], None]]): DEPRECATED: Use `before_loss_init`
instead.
_after_loss_init (Optional[Callable[[Policy, gym.spaces.Space,
gym.spaces.Space, TrainerConfigDict], None]]): Optional callable to
run after the loss init. If None, this step will be skipped.
This will be deprecated at some point and renamed into `after_init`
to match `build_tf_policy()` behavior.
action_sampler_fn (Optional[Callable[[TensorType, List[TensorType]],
Tuple[TensorType, TensorType]]]): Optional callable returning a
sampled action and its log-likelihood given some (obs and state)
inputs. If None, will either use `action_distribution_fn` or
compute actions by calling self.model, then sampling from the
so parameterized action distribution.
action_distribution_fn (Optional[Callable[[Policy, ModelV2, TensorType,
TensorType, TensorType], Tuple[TensorType,
Type[TorchDistributionWrapper], List[TensorType]]]]): A callable
that takes the Policy, Model, the observation batch, an
explore-flag, a timestep, and an is_training flag and returns a
tuple of a) distribution inputs (parameters), b) a dist-class to
generate an action distribution object from, and c) internal-state
outputs (empty list if not applicable). If None, will either use
`action_sampler_fn` or compute actions by calling self.model,
then sampling from the parameterized action distribution.
make_model (Optional[Callable[[Policy, gym.spaces.Space,
gym.spaces.Space, TrainerConfigDict], ModelV2]]): Optional callable
that takes the same arguments as Policy.__init__ and returns a
model instance. The distribution class will be determined
automatically. Note: Only one of `make_model` or
`make_model_and_action_dist` should be provided. If both are None,
a default Model will be created.
make_model_and_action_dist (Optional[Callable[[Policy,
gym.spaces.Space, gym.spaces.Space, TrainerConfigDict],
Tuple[ModelV2, Type[TorchDistributionWrapper]]]]): Optional
callable that takes the same arguments as Policy.__init__ and
returns a tuple of model instance and torch action distribution
class.
Note: Only one of `make_model` or `make_model_and_action_dist`
should be provided. If both are None, a default Model will be
created.
compute_gradients_fn (Optional[Callable[
[Policy, SampleBatch], Tuple[ModelGradients, dict]]]): Optional
callable that the sampled batch an computes the gradients w.r.
to the loss function.
If None, will call the `TorchPolicy.compute_gradients()` method
instead.
apply_gradients_fn (Optional[Callable[[Policy,
"torch.optim.Optimizer"], None]]): Optional callable that
takes a grads list and applies these to the Model's parameters.
If None, will call the `TorchPolicy.apply_gradients()` method
instead.
mixins (Optional[List[type]]): Optional list of any class mixins for
the returned policy class. These mixins will be applied in order
and will have higher precedence than the TorchPolicy class.
get_batch_divisibility_req (Optional[Callable[[Policy], int]]):
Optional callable that returns the divisibility requirement for
sample batches. If None, will assume a value of 1.
Returns:
Type[TorchPolicy]: TorchPolicy child class constructed from the
specified args.
"""
original_kwargs = locals().copy()
parent_cls = TorchPolicy
base = add_mixins(parent_cls, mixins)
class policy_cls(base):
def __init__(self, obs_space, action_space, config):
# Set up the config from possible default-config fn and given
# config arg.
if get_default_config:
config = dict(get_default_config(), **config)
self.config = config
# Set the DL framework for this Policy.
self.framework = self.config["framework"] = framework
# Validate observation- and action-spaces.
if validate_spaces:
validate_spaces(self, obs_space, action_space, self.config)
# Do some pre-initialization steps.
if before_init:
before_init(self, obs_space, action_space, self.config)
# Model is customized (use default action dist class).
if make_model:
assert make_model_and_action_dist is None, \
"Either `make_model` or `make_model_and_action_dist`" \
" must be None!"
self.model = make_model(self, obs_space, action_space, config)
dist_class, _ = ModelCatalog.get_action_dist(
action_space, self.config["model"], framework=framework)
# Model and action dist class are customized.
elif make_model_and_action_dist:
self.model, dist_class = make_model_and_action_dist(
self, obs_space, action_space, config)
# Use default model and default action dist.
else:
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"], framework=framework)
self.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework=framework)
# Make sure, we passed in a correct Model factory.
model_cls = TorchModelV2 if framework == "torch" else JAXModelV2
assert isinstance(self.model, model_cls), \
"ERROR: Generated Model must be a TorchModelV2 object!"
# Call the framework-specific Policy constructor.
self.parent_cls = parent_cls
self.parent_cls.__init__(
self,
observation_space=obs_space,
action_space=action_space,
config=config,
model=self.model,
loss=None if self.config["in_evaluation"] else loss_fn,
action_distribution_class=dist_class,
action_sampler_fn=action_sampler_fn,
action_distribution_fn=action_distribution_fn,
max_seq_len=config["model"]["max_seq_len"],
get_batch_divisibility_req=get_batch_divisibility_req,
)
# Merge Model's view requirements into Policy's.
self.view_requirements.update(self.model.view_requirements)
_before_loss_init = before_loss_init or after_init
if _before_loss_init:
_before_loss_init(self, self.observation_space,
self.action_space, config)
# Perform test runs through postprocessing- and loss functions.
self._initialize_loss_from_dummy_batch(
auto_remove_unneeded_view_reqs=True,
stats_fn=None if self.config["in_evaluation"] else stats_fn,
)
if _after_loss_init:
_after_loss_init(self, obs_space, action_space, config)
# Got to reset global_timestep again after this fake run-through.
self.global_timestep = 0
@override(Policy)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
# Do all post-processing always with no_grad().
# Not using this here will introduce a memory leak
# in torch (issue #6962).
with self._no_grad_context():
# Call super's postprocess_trajectory first.
sample_batch = super().postprocess_trajectory(
sample_batch, other_agent_batches, episode)
if postprocess_fn:
return postprocess_fn(self, sample_batch,
other_agent_batches, episode)
return sample_batch
@override(parent_cls)
def extra_grad_process(self, optimizer, loss):
"""Called after optimizer.zero_grad() and loss.backward() calls.
Allows for gradient processing before optimizer.step() is called.
E.g. for gradient clipping.
"""
if extra_grad_process_fn:
return extra_grad_process_fn(self, optimizer, loss)
else:
return parent_cls.extra_grad_process(self, optimizer, loss)
@override(parent_cls)
def extra_compute_grad_fetches(self):
if extra_learn_fetches_fn:
fetches = convert_to_non_torch_type(
extra_learn_fetches_fn(self))
# Auto-add empty learner stats dict if needed.
return dict({LEARNER_STATS_KEY: {}}, **fetches)
else:
return parent_cls.extra_compute_grad_fetches(self)
@override(parent_cls)
def compute_gradients(self, batch):
if compute_gradients_fn:
return compute_gradients_fn(self, batch)
else:
return parent_cls.compute_gradients(self, batch)
@override(parent_cls)
def apply_gradients(self, gradients):
if apply_gradients_fn:
apply_gradients_fn(self, gradients)
else:
parent_cls.apply_gradients(self, gradients)
@override(parent_cls)
def extra_action_out(self, input_dict, state_batches, model,
action_dist):
with self._no_grad_context():
if extra_action_out_fn:
stats_dict = extra_action_out_fn(
self, input_dict, state_batches, model, action_dist)
else:
stats_dict = parent_cls.extra_action_out(
self, input_dict, state_batches, model, action_dist)
return self._convert_to_non_torch_type(stats_dict)
@override(parent_cls)
def optimizer(self):
if optimizer_fn:
optimizers = optimizer_fn(self, self.config)
else:
optimizers = parent_cls.optimizer(self)
optimizers = force_list(optimizers)
if getattr(self, "exploration", None):
optimizers = self.exploration.get_exploration_optimizer(
optimizers)
return optimizers
@override(parent_cls)
def extra_grad_info(self, train_batch):
with self._no_grad_context():
if stats_fn:
stats_dict = stats_fn(self, train_batch)
else:
stats_dict = self.parent_cls.extra_grad_info(
self, train_batch)
return self._convert_to_non_torch_type(stats_dict)
def _no_grad_context(self):
if self.framework == "torch":
return torch.no_grad()
return NullContextManager()
def _convert_to_non_torch_type(self, data):
if self.framework == "torch":
return convert_to_non_torch_type(data)
return data
def with_updates(**overrides):
"""Creates a Torch|JAXPolicy cls based on settings of another one.
Keyword Args:
**overrides: The settings (passed into `build_torch_policy`) that
should be different from the class that this method is called
on.
Returns:
type: A new Torch|JAXPolicy sub-class.
Examples:
>> MySpecialDQNPolicyClass = DQNTorchPolicy.with_updates(
.. name="MySpecialDQNPolicyClass",
.. loss_function=[some_new_loss_function],
.. )
"""
return build_policy_class(**dict(original_kwargs, **overrides))
policy_cls.with_updates = staticmethod(with_updates)
policy_cls.__name__ = name
policy_cls.__qualname__ = name
return policy_cls
|
from hummingbird.physics.fixed_wing_dynamics import FixedWingDynamics
# from hummingbird.physics.mav_dynamics import MavDynamics
from hummingbird.physics.sensors.sensors import Sensors
from hummingbird.parameters.aerosonde_parameters import MavParameters
from hummingbird.parameters.sensor_parameters import SensorParameters
from hummingbird.parameters.simulation_parameters import SimulationParameters
class FixedWing:
sim_p = SimulationParameters()
def __init__(self,
mav_p=MavParameters(),
sensor_p=SensorParameters(),
obs_p=None,
pfollow_p=None,
pmanager_p=None,
pplanner_p=None,
dt_dynamics=sim_p.dt_simulation,
dt_simu=sim_p.dt_simulation,
dt_sensors=sim_p):
# self.dynamics = MavDynamics(x0=mav_p.initial_state,
# t0=sim_p.start_time,
# dt_integration=sim_p.dt_simulation,
# mav_p=mav_p)
self.dynamics = FixedWingDynamics()
# self.dynamics = MavDynamics()
self.sensors = Sensors(mav_p=mav_p,
sensor_p=sensor_p,
dt_simulation=self.sim_p.dt_simulation,
initial_state=self.dynamics.true_state,
initial_forces=self.dynamics._forces)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 16 19:59:07 2021
@author: Alexander Southan
"""
import numpy as np
import unittest
from src.pyController import pid_control
class TestController(unittest.TestCase):
def test_pid(self):
pass
|
from mock import patch
from binascii import unhexlify
from tests.serial_mock import SerialMock
from .packets import BGAPIPacketBuilder
def uuid_to_bytearray(uuid_str):
"""
Turns a UUID string in the format "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
to a bytearray.
uuid -- the UUID to convert.
Returns a bytearray containing the UUID.
"""
return unhexlify(uuid_str.replace('-', ''))
class MockBGAPISerialDevice(object):
def __init__(self, serial_port_name='mock'):
self.serial_port_name = serial_port_name
self.mocked_serial = SerialMock(self.serial_port_name, 0.25)
self.patcher = patch('serial.Serial',
return_value=self.mocked_serial).start()
def stop(self):
self.patcher.stop()
@staticmethod
def _get_connection_status_flags_byte(flags):
flags_byte = 0x00
if 'connected' in flags:
flags_byte |= 0x01
if 'encrypted' in flags:
flags_byte |= 0x02
if 'completed' in flags:
flags_byte |= 0x04
if 'parameters_change' in flags:
flags_byte |= 0x08
return flags_byte
def stage_disconnected_by_remote(
self, connection_handle=0x00):
# Stage ble_evt_connection_disconnected (terminated by remote user)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.connection_disconnected(
connection_handle, 0x0213))
def stage_disconnect_packets(self, connected, fail, connection_handle=0x00):
if connected:
if fail:
raise NotImplementedError()
# Stage ble_rsp_connection_disconnect (success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.connection_disconnect(
connection_handle, 0x0000))
# Stage ble_evt_connection_disconnected (success by local user)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.connection_disconnected(
connection_handle, 0x0000))
else: # not connected always fails
# Stage ble_rsp_connection_disconnect (fail, not connected)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.connection_disconnect(
connection_handle, 0x0186))
def stage_run_packets(self, connection_handle=0x00):
# Stage ble_rsp_connection_disconnect (not connected, fail)
self.stage_disconnect_packets(False, True)
# Stage ble_rsp_gap_set_mode (success)
self.mocked_serial.stage_output(BGAPIPacketBuilder.gap_set_mode(0x0000))
# Stage ble_rsp_gap_end_procedure (fail, device in wrong state)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.gap_end_procedure(0x0181))
# Stage ble_rsp_sm_set_bondable_mode (always success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.sm_set_bondable_mode())
def stage_connect_packets(self, addr, flags, connection_handle=0x00):
self.mocked_serial.stage_output(
BGAPIPacketBuilder.sm_set_bondable_mode())
# Stage ble_rsp_gap_connect_direct (success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.gap_connect_direct(connection_handle, 0x0000))
# Stage ble_evt_connection_status
flags_byte = self._get_connection_status_flags_byte(flags)
self.mocked_serial.stage_output(BGAPIPacketBuilder.connection_status(
addr, flags_byte, connection_handle, 0,
0x0014, 0x0006, 0x0000, 0xFF))
def stage_get_rssi_packets(self, connection_handle=0x00,
rssi=-80):
# Stage ble_rsp_connection_get_rssi
self.mocked_serial.stage_output(
BGAPIPacketBuilder.connection_get_rssi(connection_handle, rssi))
def stage_bond_packets(self, addr, flags,
connection_handle=0x00, bond_handle=0x01):
# Stage ble_rsp_sm_set_bondable_mode (always success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.sm_set_bondable_mode())
# Stage ble_rsp_sm_encrypt_start (success)
self.mocked_serial.stage_output(BGAPIPacketBuilder.sm_encrypt_start(
connection_handle, 0x0000))
# Stage ble_evt_sm_bond_status
self.mocked_serial.stage_output(BGAPIPacketBuilder.sm_bond_status(
bond_handle, 0x00, 0x00, 0x00))
# Stage ble_evt_connection_status
flags_byte = self._get_connection_status_flags_byte(flags)
self.mocked_serial.stage_output(BGAPIPacketBuilder.connection_status(
addr, flags_byte, connection_handle, 0,
0x0014, 0x0006, 0x0000, 0xFF))
def stage_clear_bonds_packets(
self, bonds, disconnects=False):
"""bonds -- list of 8-bit integer bond handles"""
if disconnects:
self.stage_disconnected_by_remote()
# Stage ble_rsp_get_bonds
self.mocked_serial.stage_output(
BGAPIPacketBuilder.sm_get_bonds(len(bonds)))
# Stage ble_evt_sm_bond_status (bond handle)
for b in bonds:
if disconnects:
self.stage_disconnected_by_remote()
self.mocked_serial.stage_output(BGAPIPacketBuilder.sm_bond_status(
b, 0x00, 0x00, 0x00))
# Stage ble_rsp_sm_delete_bonding (success)
for b in bonds:
if disconnects:
self.stage_disconnected_by_remote()
self.mocked_serial.stage_output(
BGAPIPacketBuilder.sm_delete_bonding(0x0000))
def stage_scan_packets(self, scan_responses=[]):
# Stage ble_rsp_gap_set_scan_parameters (success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.gap_set_scan_parameters(0x0000))
# Stage ble_rsp_gap_discover (success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.gap_discover(0x0000))
for srp in scan_responses:
# Stage ble_evt_gap_scan_response
self.mocked_serial.stage_output(
BGAPIPacketBuilder.gap_scan_response(
srp['rssi'], srp['packet_type'], srp['bd_addr'],
srp['addr_type'], srp['bond'],
[len(srp['data'])+1]+srp['data']))
# Stage ble_rsp_gap_end_procedure (success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.gap_end_procedure(0x0000))
def stage_discover_characteristics_packets(
self, uuid_handle_list, connection_handle=0x00):
# Stage ble_rsp_attclient_find_information (success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.attclient_find_information(
connection_handle, 0x0000))
for i in range(0, len(uuid_handle_list)/2):
uuid = uuid_to_bytearray(uuid_handle_list[2*i])
handle = uuid_handle_list[2*i + 1]
# Stage ble_evt_attclient_find_information_found
u = [len(uuid) + 1]
self.mocked_serial.stage_output(
BGAPIPacketBuilder.attclient_find_information_found(
connection_handle, handle,
(u+list(reversed([ord(b) for b in uuid])))))
# Stage ble_evt_attclient_procedure_completed (success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.attclient_procedure_completed(
connection_handle, 0x0000, 0xFFFF))
def stage_char_read_packets(
self, att_handle, att_type, value, connection_handle=0x00):
# Stage ble_rsp_attclient_read_by_handle (success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.attclient_read_by_handle(
connection_handle, 0x0000))
# Stage ble_evt_attclient_attribute_value
self.mocked_serial.stage_output(
BGAPIPacketBuilder.attclient_attribute_value(
connection_handle, att_handle, att_type, [len(value)+1]+value))
def stage_char_write_packets(
self, handle, value, connection_handle=0x00):
# Stage ble_rsp_attclient_attribute_write (success)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.attclient_attribute_write(
connection_handle, 0x0000))
# Stage ble_evt_attclient_procedure_completed
self.mocked_serial.stage_output(
BGAPIPacketBuilder.attclient_procedure_completed(
connection_handle, 0x0000, handle))
def stage_indication_packets(
self, handle, packet_values, connection_handle=0x00):
# Stage ble_evt_attclient_attribute_value
for value in packet_values:
val = list(value)
self.mocked_serial.stage_output(
BGAPIPacketBuilder.attclient_attribute_value(
connection_handle, handle, 0x00, value=[len(val)+1]+val))
|
#
# Copyright (C) 2020 Satoru SATOH <satoru.satoh@gmail.com>.
# SPDX-License-Identifier: MIT
#
# pylint: disable=missing-function-docstring
#
"""User and group tests.
"""
import os
import pwd
import infraspec.common.user_group as TT
try:
USERNAME = os.getlogin()
except OSError:
USERNAME = pwd.getpwuid(os.getuid()).pw_name
def test_get_all_groups_itr():
assert list(TT.get_all_groups_itr())
def test_get_all_groups():
res = TT.get_all_groups()
assert res
assert [usr for usr in res if usr.get("gid") == os.getgid()]
TT.get_all_groups.cache_clear()
def test_get_user_by_name():
assert TT.get_user_by_name(USERNAME)
TT.get_user_by_name.cache_clear()
def test_get_user_by_name_with_group():
res = TT.get_user_by_name(USERNAME, group=True)
assert res
assert res["groups"]
TT.get_user_by_name.cache_clear()
def test_get_group_by_name():
usr = TT.get_user_by_name(USERNAME, group=True)
assert usr["groups"]
for group in usr["groups"]:
assert TT.get_group_by_name(group)
TT.get_user_by_name.cache_clear()
TT.get_group_by_name.cache_clear()
# vim:sw=4:ts=4:et:
|
"""
Data loader for everything. Word vectors are loaded later.
1. Words -> Attributes (use AttributesDataLoader)
2. Imsitu + Attributes -> imsitu labels (use ImsituAttributesDataLoader
4. Defintiions dataset -> counts (use DefinitionsDataLoader
"""
import os
import random
import numpy as np
import pandas as pd
from config import ATTRIBUTES_PATH, ATTRIBUTES_SPLIT, IMSITU_VERBS, GLOVE_PATH, GLOVE_TYPE, \
DEFNS_PATH, IMSITU_VAL_LIST, DATA_PATH
import torch
from torch.autograd import Variable
import zipfile
from six.moves.urllib.request import urlretrieve
from tqdm import trange, tqdm
### From older torchtext
URL = {
'glove.42B': 'http://nlp.stanford.edu/data/glove.42B.300d.zip',
'glove.840B': 'http://nlp.stanford.edu/data/glove.840B.300d.zip',
'glove.twitter.27B': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
'glove.6B': 'http://nlp.stanford.edu/data/glove.6B.zip'
}
def reporthook(t):
"""https://github.com/tqdm/tqdm"""
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
"""
b: int, optionala
Number of blocks just transferred [default: 1].
bsize: int, optional
Size of each block (in tqdm units) [default: 1].
tsize: int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
def load_word_vectors(root, wv_type, dim):
"""Load word vectors from a path, trying .pt, .txt, and .zip extensions."""
if isinstance(dim, int):
dim = str(dim) + 'd'
fname = os.path.join(root, wv_type + '.' + dim)
if os.path.isfile(fname + '.pt'):
fname_pt = fname + '.pt'
print('loading word vectors from', fname_pt)
return torch.load(fname_pt)
if os.path.isfile(fname + '.txt'):
fname_txt = fname + '.txt'
cm = open(fname_txt, 'rb')
cm = [line for line in cm]
elif os.path.basename(wv_type) in URL:
url = URL[wv_type]
print('downloading word vectors from {}'.format(url))
filename = os.path.basename(fname)
if not os.path.exists(root):
os.makedirs(root)
with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
fname, _ = urlretrieve(url, fname, reporthook=reporthook(t))
with zipfile.ZipFile(fname, "r") as zf:
print('extracting word vectors into {}'.format(root))
zf.extractall(root)
if not os.path.isfile(fname + '.txt'):
raise RuntimeError('no word vectors of requested dimension found')
return load_word_vectors(root, wv_type, dim)
else:
raise RuntimeError('unable to load word vectors')
#######
np.random.seed(123456)
random.seed(123)
COLUMNS = ['intrans', 'trans_pers', 'trans_obj', 'atomicity', 'energy',
'time', 'solitary', 'bodyparts_Arms', 'bodyparts_Head',
'bodyparts_Legs', 'bodyparts_Torso', 'bodyparts_other',
'intrans_effect_0', 'intrans_effect_1', 'intrans_effect_2',
'intrans_effect_3', 'trans_obj_effect_0', 'trans_obj_effect_1',
'trans_obj_effect_2', 'trans_obj_effect_3', 'trans_pers_effect_0',
'trans_pers_effect_1', 'trans_pers_effect_2', 'trans_pers_effect_3']
def invert_permutation(p):
'''The argument p is assumed to be some permutation of 0, 1, ..., len(p)-1.
Returns an array s, where s[i] gives the index of i in p.
'''
s = {ind: i for i, ind in enumerate(p)}
return s
def get_lemma_to_infinitive():
with open(IMSITU_VERBS, 'r') as f:
imsitu_verbs_lemmatized = f.read().splitlines()
with open(IMSITU_VAL_LIST, 'r') as f:
imsitu_verbs_nonlemmatized = {int(x.split(' ')[1]):x.split('_')[0] for x in f.read().splitlines()}
imsitu_verbs_nonlemmatized = [imsitu_verbs_nonlemmatized[x] for x in range(504)]
l2f = {lem:inf for inf, lem in zip(imsitu_verbs_nonlemmatized, imsitu_verbs_lemmatized)}
assert len(l2f) == 504
return l2f
def _load_imsitu_verbs():
"""
:return: a list of imsitu verbs
"""
imsitu_verbs = []
with open(IMSITU_VERBS, 'r') as f:
for v in f.read().splitlines():
imsitu_verbs.append(v)
assert len(imsitu_verbs) == 504
# assert imsitu_verbs == sorted(imsitu_verbs)
return imsitu_verbs
def _load_attributes(imsitu_only=False):
"""
:param imsitu_only: if true, only return data for verbs in imsitu
:return: a pandas dataframe containing attributes along with split info
"""
attributes_df = pd.read_csv(ATTRIBUTES_PATH)
split_df = pd.read_csv(ATTRIBUTES_SPLIT)
merged_df = pd.merge(split_df, attributes_df, on='verb', how='inner')
if imsitu_only:
imsitu_part = merged_df[merged_df['in_imsitu'] & ~merged_df['template'].str.contains(' ')]
merged_df = imsitu_part.reset_index(drop=True)
# permute with imsitu verbs
imsitu_verbs = _load_imsitu_verbs()
for v in imsitu_verbs:
if v not in list(merged_df.template):
print("NO {}".format(v))
merged_df = pd.DataFrame([
merged_df.iloc[merged_df[merged_df.template == v].index[0]].T.rename(idx) for idx, v in enumerate(imsitu_verbs)
])
# Remove the in_imsitu and verb information (only templates are relevant)
merged_df = merged_df.drop(['in_imsitu', 'verb'], 1)
return merged_df
def attributes_split(imsitu_only=False):
"""
:param imsitu_only: if true, only return data for verbs in imsitu
:return: train, test, val dataframes
"""
df = _load_attributes(imsitu_only).reset_index()
train_atts = df[df['train']].drop(['train', 'val', 'test'], 1).set_index('template')
val_atts = df[df['val']].drop(['train', 'val', 'test'], 1).set_index('template')
test_atts = df[df['test']].drop(['train', 'val', 'test'], 1).set_index('template')
return train_atts, val_atts, test_atts
def _load_defns(atts_df, is_test=False):
"""
Loads a dataframe with the definition joined to the attributes.
At test time, we only use the first definition per template.
Importantly, some of the templates might lack definitions. To avoid this problem, we
drop the particle if this occurs. This works for all of the verbs in the test set :)
However, it means that some training verbs (such as "Unpocket") can't be used because they
don't have definitions.
:param atts_df: Dataframe with attributes
:param is_test: If true, we'll drop everything except the first definition.
:return: A dataframe with the definitions.
"""
verb_defns = pd.read_csv(DEFNS_PATH)
if is_test:
verb_defns = verb_defns.groupby('template').first().reset_index()
# Some phrases aren't going to have definitions. The fix is to drop off the
# particle...
verbs_with_defns = set(verb_defns['template'])
verbs_we_want = set(atts_df.index)
for v in (verbs_we_want - verbs_with_defns):
if len(v.split(' ')) == 1 and is_test:
raise ValueError("{} has no definition".format(v))
else:
append_defns = verb_defns[verb_defns['template'] == v.split(' ')[0]].copy()
append_defns['template'] = v
verb_defns = pd.concat((verb_defns, append_defns), ignore_index=True)
missing_verbs = verbs_we_want - set(verb_defns['template'])
if len(missing_verbs) != 0:
if is_test:
raise ValueError("Some verbs are missing: {}".format(missing_verbs))
else:
print("Some verbs are missing definitions: {}".format(missing_verbs))
joined_df = verb_defns.join(atts_df, 'template', how='inner')
joined_df = joined_df.drop(['POS'], 1).set_index('template')
return joined_df
def _get_template_emb(template, wv_dict, wv_arr):
"""
Ideally, we'll get the word embedding directly. Otherwise, presumably it's a multiword
expression, and we'll get the average of the expressions. If these don't work, and it starts
with "un", we'll split on that.
:param template: Possibly a multiword template
:param wv_dict: dictionary mapping tokens -> indices
:param wv_arr: Array of word embeddings
:return: The embedding for template
"""
wv_index = wv_dict.get(template, None)
if wv_index is not None:
return wv_arr[wv_index]
if len(template.split(' ')) > 1:
t0, t1 = template.split(' ')
ind0 = wv_dict.get(t0, None)
ind1 = wv_dict.get(t1, None)
if (ind0 is None) or (ind1 is None):
raise ValueError("Error on {}".format(template))
return (wv_arr[ind0] + wv_arr[ind1]) / 2.0
if template.startswith('un'):
print("un-ning {}".format(template))
ind0 = wv_dict.get('un', None)
ind1 = wv_dict.get(template[2:], None)
if (ind0 is None) or (ind1 is None):
raise ValueError("Error on {}".format(template))
return (wv_arr[ind0] + wv_arr[ind1]) / 2.0
if template == 'cheerlead':
return (wv_arr[wv_dict.get('cheer', None)] + wv_arr[wv_dict.get('lead', None)]) / 2.0
if template == 'intermingle':
return (wv_arr[wv_dict.get('inter', None)] + wv_arr[wv_dict.get('mingle', None)]) / 2.0
if template == 'moisturize':
return wv_arr[wv_dict.get('moisture', None)]
else:
print("Problem with {}".format(template))
return torch.FloatTensor(np.random.randn(300))
raise ValueError("Problem with {}".format(template))
def _load_glove(words):
"""
Loads word vectors of a list of words
:param words:
:return:
"""
wv_dict, wv_arr, _ = load_word_vectors(GLOVE_PATH, GLOVE_TYPE, 300)
embeds = torch.Tensor(len(words), 300).zero_()
for i, token in enumerate(words):
embeds[i] = _get_template_emb(token, wv_dict, wv_arr)
return embeds
def _load_counterfit(words):
"""
Loads word vectors of a list of words
:param words:
:return:
"""
wv_dict, wv_arr, _ = load_word_vectors(DATA_PATH, 'ffdnglove', 300)
# wv_dict, wv_arr, _ = load_word_vectors(DATA_PATH, 'cfv', 300)
embeds = torch.Tensor(len(words), 300).zero_()
for i, token in enumerate(words):
embeds[i] = _get_template_emb(token, wv_dict, wv_arr)
return embeds
class Attributes(object):
def __init__(self, vector_type='glove', word_type='lemma', use_train=False, use_val=False, use_test=False, imsitu_only=False,
use_defns=False, first_defn_at_test=True):
"""
Use this class to represent a chunk of attributes for each of the test labels.
This is needed because at test time we'll need to compare against all of the attributes
"""
assert use_train or use_val or use_test
self.atts_df = pd.concat([a for a, use_a in zip(attributes_split(imsitu_only),
(use_train, use_val, use_test)) if use_a])
self.use_defns = use_defns
if self.use_defns:
self.atts_df = _load_defns(self.atts_df,
is_test=(use_val or use_test) and first_defn_at_test)
# perm is a permutation from the normal index to the new one.
# This can be used for getting the attributes for Imsitu
self.ind_perm = invert_permutation(self.atts_df['index'].as_matrix())
self.domains = [(c, len(self.atts_df[c].unique())) for c in COLUMNS]
self.atts_matrix = Variable(torch.LongTensor(self.atts_df[COLUMNS].as_matrix()),
volatile=not use_train)
# LOAD THE VECTORS
assert word_type in ('lemma', 'infinitive')
if word_type == 'lemma':
all_words = self.atts_df.index.values
else:
l2i = get_lemma_to_infinitive()
all_words = [l2i[w] for w in self.atts_df.index.values]
if vector_type == 'glove':
matrix = _load_glove(all_words)
elif vector_type == 'counterfit':
matrix = _load_counterfit(all_words)
else:
raise ValueError("unspecified vector type")
self.embeds = Variable(matrix, volatile=not use_train)
@property
def _balanced_inds(self):
# Returns the inds that balance the dataset
counts = self.atts_df.groupby('template').defn.nunique()
max_count = max(counts)
all_inds = []
for template, inds in self.atts_df.groupby('template').indices.items():
all_inds.append(inds)
all_inds.append(np.random.choice(inds, size=max_count-len(inds)))
all_inds = np.concatenate(all_inds, 0)
np.random.shuffle(all_inds)
return all_inds
def __len__(self):
return self.atts_df.shape[0]
def __getitem__(self, index):
if self.use_defns:
return self.atts_matrix[index], self.embeds[index], self.atts_df.defn.iloc[index]
return self.atts_matrix[index], self.embeds[index]
def cuda(self, device_id=None):
self.atts_matrix = self.atts_matrix.cuda(device_id)
self.embeds = self.embeds.cuda(device_id)
@classmethod
def splits(cls, cuda=True, **kwargs):
train = cls(use_train=True, use_val=False, use_test=False, **kwargs)
val = cls(use_train=False, use_val=True, use_test=False, **kwargs)
test = cls(use_train=False, use_val=False, use_test=True, **kwargs)
if cuda:
train.cuda()
val.cuda()
test.cuda()
return train, val, test
|
import time
import aiohttp
from async_utility import asyncio_run
class Product:
all_products: list
highly_rated_products: list
api_products_endpoint = "https://example.com/api/products"
def __init__(self):
self.session = asyncio_run(self.init_session())
asyncio_run(self.get_products())
def __exit__(self, *args):
asyncio_run(self.close_session())
async def init_session(self):
return aiohttp.ClientSession()
async def get_products(self):
"""Gets all available products"""
try:
async with self.session.get(self.api_products_endpoint) as response:
if response.status == 200:
json_response = await response.json()
if isinstance(json_response, list):
self.all_products = json_response
else:
self.all_products = []
else:
self.all_products = []
except Exception as error:
print("Error!", error.__class__, "occurred.")
self.all_products = []
return self.all_products
def get_highly_rated(self, rate_limit: float = 4.0):
"""Gets products with high rating starting from a specific rating"""
try:
self.highly_rated_products = []
all_products_length = len(self.all_products)
if all_products_length > 0:
for product in self.all_products:
if product["rating"] >= rate_limit:
self.highly_rated_products.append(product)
time.sleep(0.000001)
return self.highly_rated_products
except ValueError:
print("Error! Invalid Value.")
return None
except TypeError:
print("Error! Type error.")
return None
except Exception as error:
print("Error! ", error.__class__, " occurred.")
return None
@staticmethod
def is_key_valid(d_key: str):
product_dic_keys = ["product", "price", "rating"]
return d_key in product_dic_keys
async def close_session(self):
await self.session.close()
|
from collections import Counter
from collections import OrderedDict
class LeagueTable:
def __init__(self, players):
self.standings = OrderedDict([(player, Counter()) for player in players])
def record_result(self, player, score):
self.standings[player]['games_played'] += 1
self.standings[player]['score'] += score
def player_rank(self, rank):
return None
if __name__ == "__main__":
table = LeagueTable(['Mike', 'Chris', 'Arnold'])
table.record_result('Mike', 2)
table.record_result('Mike', 3)
table.record_result('Arnold', 5)
table.record_result('Chris', 5)
print(table.player_rank(1))
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import ssl
import socket
import six
import unittest2
from oslo_config import cfg
from st2common.transport import utils as transport_utils
from st2tests.fixturesloader import get_fixtures_base_path
__all__ = [
'RabbitMQTLSListenerTestCase'
]
CERTS_FIXTURES_PATH = os.path.join(get_fixtures_base_path(), 'ssl_certs/')
ST2_CI = (os.environ.get('ST2_CI', 'false').lower() == 'true')
NON_SSL_LISTENER_PORT = 5672
SSL_LISTENER_PORT = 5671
# NOTE: We only run those tests on the CI provider because at the moment, local
# vagrant dev VM doesn't expose RabbitMQ SSL listener by default
@unittest2.skipIf(not ST2_CI,
'Skipping tests because ST2_CI environment variable is not set to "true"')
class RabbitMQTLSListenerTestCase(unittest2.TestCase):
def setUp(self):
# Set default values
cfg.CONF.set_override(name='ssl', override=False, group='messaging')
cfg.CONF.set_override(name='ssl_keyfile', override=None, group='messaging')
cfg.CONF.set_override(name='ssl_certfile', override=None, group='messaging')
cfg.CONF.set_override(name='ssl_ca_certs', override=None, group='messaging')
cfg.CONF.set_override(name='ssl_cert_reqs', override=None, group='messaging')
def test_non_ssl_connection_on_ssl_listener_port_failure(self):
connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/')
expected_msg_1 = '[Errno 104]' # followed by: ' Connection reset by peer' or ' ECONNRESET'
expected_msg_2 = 'Socket closed'
expected_msg_3 = 'Server unexpectedly closed connection'
try:
connection.connect()
except Exception as e:
self.assertFalse(connection.connected)
self.assertIsInstance(e, (IOError, socket.error))
self.assertTrue(expected_msg_1 in six.text_type(e) or
expected_msg_2 in six.text_type(e) or
expected_msg_3 in six.text_type(e))
else:
self.fail('Exception was not thrown')
if connection:
connection.release()
def test_ssl_connection_on_ssl_listener_success(self):
# Using query param notation
urls = 'amqp://guest:guest@127.0.0.1:5671/?ssl=true'
connection = transport_utils.get_connection(urls=urls)
try:
self.assertTrue(connection.connect())
self.assertTrue(connection.connected)
finally:
if connection:
connection.release()
# Using messaging.ssl config option
cfg.CONF.set_override(name='ssl', override=True, group='messaging')
connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/')
try:
self.assertTrue(connection.connect())
self.assertTrue(connection.connected)
finally:
if connection:
connection.release()
def test_ssl_connection_ca_certs_provided(self):
ca_cert_path = os.path.join(CERTS_FIXTURES_PATH, 'ca/ca_certificate_bundle.pem')
cfg.CONF.set_override(name='ssl', override=True, group='messaging')
cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging')
# 1. Validate server cert against a valid CA bundle (success) - cert required
cfg.CONF.set_override(name='ssl_cert_reqs', override='required', group='messaging')
connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/')
try:
self.assertTrue(connection.connect())
self.assertTrue(connection.connected)
finally:
if connection:
connection.release()
# 2. Validate server cert against other CA bundle (failure)
# CA bundle which was not used to sign the server cert
ca_cert_path = os.path.join('/etc/ssl/certs/thawte_Primary_Root_CA.pem')
cfg.CONF.set_override(name='ssl_cert_reqs', override='required', group='messaging')
cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging')
connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/')
expected_msg = r'\[SSL: CERTIFICATE_VERIFY_FAILED\] certificate verify failed'
self.assertRaisesRegexp(ssl.SSLError, expected_msg, connection.connect)
# 3. Validate server cert against other CA bundle (failure)
ca_cert_path = os.path.join('/etc/ssl/certs/thawte_Primary_Root_CA.pem')
cfg.CONF.set_override(name='ssl_cert_reqs', override='optional', group='messaging')
cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging')
connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/')
expected_msg = r'\[SSL: CERTIFICATE_VERIFY_FAILED\] certificate verify failed'
self.assertRaisesRegexp(ssl.SSLError, expected_msg, connection.connect)
# 4. Validate server cert against other CA bundle (failure)
# We use invalid bundle but cert_reqs is none
ca_cert_path = os.path.join('/etc/ssl/certs/thawte_Primary_Root_CA.pem')
cfg.CONF.set_override(name='ssl_cert_reqs', override='none', group='messaging')
cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging')
connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/')
try:
self.assertTrue(connection.connect())
self.assertTrue(connection.connected)
finally:
if connection:
connection.release()
def test_ssl_connect_client_side_cert_authentication(self):
# 1. Success, valid client side cert provided
ssl_keyfile = os.path.join(CERTS_FIXTURES_PATH, 'client/private_key.pem')
ssl_certfile = os.path.join(CERTS_FIXTURES_PATH, 'client/client_certificate.pem')
ca_cert_path = os.path.join(CERTS_FIXTURES_PATH, 'ca/ca_certificate_bundle.pem')
cfg.CONF.set_override(name='ssl_keyfile', override=ssl_keyfile, group='messaging')
cfg.CONF.set_override(name='ssl_certfile', override=ssl_certfile, group='messaging')
cfg.CONF.set_override(name='ssl_cert_reqs', override='required', group='messaging')
cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging')
connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/')
try:
self.assertTrue(connection.connect())
self.assertTrue(connection.connected)
finally:
if connection:
connection.release()
# 2. Invalid client side cert provided - failure
ssl_keyfile = os.path.join(CERTS_FIXTURES_PATH, 'client/private_key.pem')
ssl_certfile = os.path.join(CERTS_FIXTURES_PATH, 'server/server_certificate.pem')
ca_cert_path = os.path.join(CERTS_FIXTURES_PATH, 'ca/ca_certificate_bundle.pem')
cfg.CONF.set_override(name='ssl_keyfile', override=ssl_keyfile, group='messaging')
cfg.CONF.set_override(name='ssl_certfile', override=ssl_certfile, group='messaging')
cfg.CONF.set_override(name='ssl_cert_reqs', override='required', group='messaging')
cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging')
connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/')
expected_msg = r'\[X509: KEY_VALUES_MISMATCH\] key values mismatch'
self.assertRaisesRegexp(ssl.SSLError, expected_msg, connection.connect)
|
# -*- coding: utf-8 -*-
db.define_table('account',
Field('name',type='string', requires=IS_NOT_EMPTY()),
Field('balance',type='float', requires= IS_NOT_EMPTY()))
|
import sys
import os
import os.path
import subprocess
import re
from setuptools import setup
from setuptools.command.install_lib import install_lib
from setuptools.command.install import install
from setuptools.command.build_ext import build_ext
import setuptools.command.bdist_egg
import sys
import distutils.spawn
import numpy as np
from Cython.Build import cythonize
#from numpy.distutils.core import setup as numpy_setup, Extension as numpy_Extension
extra_compile_args = {
"msvc": ["/openmp","/Dfloat64_t=double"],
#"unix": ["-O0", "-g", "-Wno-uninitialized"), # Replace the line below with this line to enable debugging of the compiled extension
"unix": ["-fopenmp","-O5","-Wno-uninitialized"],
"clang": ["-fopenmp","-O5","-Wno-uninitialized"],
}
extra_include_dirs = {
"msvc": [".", np.get_include() ],
"unix": [".", np.get_include() ],
"clang": [".", np.get_include() ],
}
extra_libraries = {
"msvc": [],
"unix": ["gomp",],
"clang": [],
}
extra_link_args = {
"msvc": [],
"unix": [],
"clang": ["-fopenmp=libomp"],
}
class build_ext_compile_args(build_ext):
def build_extensions(self):
compiler=self.compiler.compiler_type
for ext in self.extensions:
if compiler in extra_compile_args:
ext.extra_compile_args=extra_compile_args[compiler]
ext.extra_link_args=extra_link_args[compiler]
ext.include_dirs.extend(list(extra_include_dirs[compiler]))
ext.libraries.extend(list(extra_libraries[compiler]))
pass
else:
# use unix parameters as default
ext.extra_compile_args=extra_compile_args["unix"]
ext.extra_link_args=extra_link_args["unix"]
ext.include_dirs.extend(list(extra_include_dirs["unix"]))
ext.libraries.extend(extra_libraries["unix"])
pass
pass
build_ext.build_extensions(self)
pass
pass
greensinversion_package_files = [ "pt_steps/*" ]
ext_modules=cythonize("greensinversion/*.pyx")
emdict=dict([ (module.name,module) for module in ext_modules])
#gi_fillholes_pyx_ext=emdict['greensinversion.fillholes']
#gi_fillholes_pyx_ext.sources.append("greensinversion/fillholes_c.c")
#gi_fillholes_pyx_ext.extra_compile_args=['-g']
#gi_fillholes_pyx_ext.extra_compile_args=['-fopenmp','-O3']
#gi_fillholes_pyx_ext.extra_link_args=['-lgomp']
setup(name="greensinversion",
description="greensinversion",
author="Stephen D. Holland",
url="http://thermal.cnde.iastate.edu",
zip_safe=False,
ext_modules=ext_modules,
packages=["greensinversion"],
cmdclass={
"build_ext": build_ext_compile_args,
},
package_data={"greensinversion": greensinversion_package_files},
entry_points={"limatix.processtrak.step_url_search_path": [ "limatix.share.pt_steps = greensinversion:getstepurlpath" ]})
|
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.template import Context, loader
from ctfweb.models import *
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.models import User
from django.contrib.auth import logout, views
from ctfweb.support import *
def index(request):
return HttpResponse("You shouldn't be here... Go to http://ctfboard.ctf/ and start over please...")
def allchallenges(request):
if not request.user.is_authenticated():
return HttpResponseRedirect("/ctfweb/login/")
else:
auth_player = currentplayer(request)
if not gameisrunning(currentgame()):
return genericerror(request, "Game Not Running!")
category_list = Category.objects.order_by("name")
return render(request, "ctfweb/challenges.html", {'category_list': category_list, 'auth_player': auth_player})
def challenge(request, challenge_id):
if not gameisrunning(currentgame()):
return genericerror(request, "Game Not Running!")
if not request.user.is_authenticated():
return HttpResponseRedirect("/ctfweb/login/")
else:
auth_player = currentplayer(request)
chall = get_object_or_404(Challenge, pk=challenge_id)
numsolved = Solved.objects.filter(challenge=chall, competitor=auth_player)
if numsolved:
unsolved = False
else:
unsolved = True
try:
hints = Hint.objects.filter(challenge=challenge_id, active=1)
except Hint.DoesNotExist:
hints = 0
return render(request, 'ctfweb/challenge.html', {'chall': chall, 'hints' : hints, 'unsolved': unsolved})
def competitordetail(request, comp_id):
if not gameisrunning(currentgame()):
return genericerror(request, "Game Not Running!")
if not request.user.is_authenticated():
return HttpResponseRedirect("/ctfweb/login/")
else:
auth_player = currentplayer(request)
competitor = get_object_or_404(Competitor, pk=comp_id)
numsolved = Solved.objects.filter(competitor=competitor).count()
if numsolved:
solved_list=Solved.objects.filter(competitor=competitor)
else:
solved_list=''
return render(request, 'ctfweb/competitordetail.html', {'numsolved': numsolved, 'competitor': competitor, 'solved_list': solved_list})
def submitkey(request, challenge_id):
if not gameisrunning(currentgame()):
return genericerror(request, "Game Not Running!")
if not request.user.is_authenticated():
return HttpResponseRedirect("/ctfweb/login/")
else:
auth_player = currentplayer(request)
chall = Challenge.objects.get(id=challenge_id)
if chall:
if chall.active:
if washstring(chall.key) == washstring(request.POST['keysubmit']) :
if not hassolved(auth_player, chall):
solve(auth_player, chall)
return genericerror(request, "Correct Key! Challenge : " + chall.name + " solved\n " + str(chall.points) + " points awarded!")
else:
return genericerror(request, "Already Solved")
else:
comp = auth_player
comp.bad_keys += 1
comp.save()
return genericerror(request, "Bad Key!")
else: #not active
return genericerror(request, "Not active! Go Away")
else:
return genericerror(request, "No challenge by that name! Go Away")
def scoreboard(request):
score_list = Competitor.objects.order_by('points').exclude(active=0).reverse()
game = currentgame()
now = datetime.datetime.now()
if now < game.start_time:
status = " not running yet! Starts in: " + str(game.start_time - now)
else:
if now > game.end_time:
status = "Game Over!"
else:
status = str(game.end_time - now) + " remaining"
return render(request, "ctfweb/scoreboard.html", {'score_list': score_list, 'status' : status})
def genericerror(request, errorstring):
return render(request, "ctfweb/genericerror.html", {'errorstring': errorstring})
def registerform(request):
if request.user.is_authenticated():
return genericerror(request, "You're already authenticated... GTFO")
else:
game = currentgame()
return render(request, "ctfweb/registerform.html", {'usingregcodes': game.require_regcodes})
def registerformerror(request, fieldstring):
game = currentgame()
errorrecall = True
if request.user.is_authenticated():
return genericerror(request, "You're already authenticated... GTFO")
else:
return render(request, "ctfweb/registerform.html", {'fieldstring': fieldstring, 'errorrecall': errorrecall, 'usingregcodes': game.require_regcodes})
def registerprocess(request):
game = currentgame()
error = False
errorstring =''
if request.POST['username'] == '':
errorstring += ' username'
error=True
if request.POST['password'] == '':
errorstring += ' password'
error=True
if request.POST['passwordconfirm'] == '':
errorstring += ' passwordconfirm'
error=True
if request.POST['displayname'] == '':
errorstring += ' displayname'
error=True
if (len(request.POST['displayname']) >= 50) or (len(request.POST['affiliation']) >= 50) :
errorstring += ' really long displayname or affiliation - be nice'
error=True
if request.POST['email'] == '':
errorstring += ' email'
error=True
if request.POST['passwordconfirm'] != request.POST['password'] :
errorstring += ' ** password confirmation does not match password**'
error=True
if User.objects.filter(username__exact=request.POST['username']).count():
errorstring += ' duplicate username: ' + request.POST['username']
error=True
if game.require_regcodes :
try:
code = RegCodes.objects.get(code=request.POST['regcode'])
if code.used :
errorstring += ' registration code has been used'
error = True
except RegCodes.DoesNotExist:
errorstring += ' invalid registration code'
error = True
if Competitor.objects.filter(display_name=request.POST['displayname']).count():
errorstring += " duplicate displayname: " + request.POST['displayname']
error=True
if error:
return registerformerror(request, errorstring)
else:
user = User.objects.create_user(request.POST['username'], request.POST['email'], request.POST['password'])
comp = Competitor(game=currentgame(), user=user, display_name=request.POST['displayname'], affiliation=request.POST['affiliation'], url=request.POST['url'], points=0, bad_keys=0, active=1, ipaddr=get_client_ip(request), regcode=None)
if game.require_regcodes and code :
comp.regcode = code
code.used = 1
code.save()
comp.save()
return genericerror (request, "User Created - Please Login")
def logout_view(request):
logout(request)
return genericerror(request, "Logged Out...")
# Create your views here.
|
import unittest
import jupyter_kernel_test
class XeusEchoKernelTests(jupyter_kernel_test.KernelTests):
kernel_name = "test_echo_kernel"
language_name = "cpp"
code_hello_world = "hello, world"
code_page_something = "?"
code_execute_result = [
{'code': '6*7', 'result': '6*7'},
{'code': 'test', 'result': 'test'}
]
completion_samples = [
{'text': 'a.', 'matches': ['a.echo1', 'a.echo2']}
]
complete_code_samples = ["complete"]
incomplete_code_samples = ["incomplete"]
invalid_code_samples = ["invalid"]
code_inspect_sample = "invalid"
def test_xeus_stderr(self):
reply, output_msgs = self.execute_helper(code='error')
self.assertEqual(output_msgs[0]['msg_type'], 'stream')
self.assertEqual(output_msgs[0]['content']['name'], 'stderr')
self.assertEqual(output_msgs[0]['content']['text'], 'error')
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luc LEGER / Coopérative ARTEFACTS <artefacts.lle@gmail.com>
from django.views.generic.base import View
import francoralite.apps.francoralite_front.tools as tools
class InstitutionDelete(View):
def get(self, request, *args, **kwargs):
return tools.delete('institution', request, *args, **kwargs)
|
from setuptools import setup, find_packages
setup(
name='keeptrackd',
version='1.0.0',
description='always keep tracked to your interest',
url='https://github.com/delihiros/keeptrackd',
author='delihiros',
author_email='delihiros@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
keywords='web crawler utilities tracker checker',
install_requires=['selenium', 'chromedriver-binary', 'requests'],
packages=['keeptrackd'],
entry_points={
'console_scripts': ['keeptrackd=keeptrackd:main']
}
)
|
import numpy as np
import pycuda.autoinit # NOQA:401
import pycuda.gpuarray as gpuarray
from cufinufft import cufinufft
import utils
def _test_type1(dtype, shape=(16, 16, 16), M=4096, tol=1e-3):
complex_dtype = utils._complex_dtype(dtype)
dim = len(shape)
k = utils.gen_nu_pts(M, dim=dim).astype(dtype)
c = utils.gen_nonuniform_data(M).astype(complex_dtype)
k_gpu = gpuarray.to_gpu(k)
c_gpu = gpuarray.to_gpu(c)
fk_gpu = gpuarray.GPUArray(shape, dtype=complex_dtype)
plan = cufinufft(1, shape, eps=tol, dtype=dtype)
plan.set_pts(k_gpu[0], k_gpu[1], k_gpu[2])
plan.execute(c_gpu, fk_gpu)
fk = fk_gpu.get()
ind = int(0.1789 * np.prod(shape))
fk_est = fk.ravel()[ind]
fk_target = utils.direct_type1(c, k, shape, ind)
type1_rel_err = np.abs(fk_target - fk_est) / np.abs(fk_target)
print('Type 1 relative error:', type1_rel_err)
assert type1_rel_err < 0.01
def test_type1_32(shape=(16, 16, 16), M=4096, tol=1e-3):
return _test_type1(dtype=np.float32, shape=shape, M=M, tol=tol)
def test_type1_64(shape=(16, 16, 16), M=4096, tol=1e-3):
return _test_type1(dtype=np.float64, shape=shape, M=M, tol=tol)
def _test_type2(dtype, shape=(16, 16, 16), M=4096, tol=1e-3):
complex_dtype = utils._complex_dtype(dtype)
k = utils.gen_nu_pts(M).astype(dtype)
fk = utils.gen_uniform_data(shape).astype(complex_dtype)
k_gpu = gpuarray.to_gpu(k)
fk_gpu = gpuarray.to_gpu(fk)
c_gpu = gpuarray.GPUArray(shape=(M,), dtype=complex_dtype)
plan = cufinufft(2, shape, eps=tol, dtype=dtype)
plan.set_pts(k_gpu[0], k_gpu[1], k_gpu[2])
plan.execute(c_gpu, fk_gpu)
c = c_gpu.get()
ind = M // 2
c_est = c[ind]
c_target = utils.direct_type2(fk, k[:, ind])
type2_rel_err = np.abs(c_target - c_est) / np.abs(c_target)
print('Type 2 relative error:', type2_rel_err)
assert type2_rel_err < 0.01
def test_type2_32(shape=(16, 16, 16), M=4096, tol=1e-3):
return _test_type2(dtype=np.float32, shape=shape, M=M, tol=tol)
def test_type2_64(shape=(16, 16, 16), M=4096, tol=1e-3):
return _test_type2(dtype=np.float64, shape=shape, M=M, tol=tol)
def test_opts(shape=(8, 8, 8), M=32, tol=1e-3):
dtype = np.float32
complex_dtype = utils._complex_dtype(dtype)
dim = len(shape)
k = utils.gen_nu_pts(M, dim=dim).astype(dtype)
c = utils.gen_nonuniform_data(M).astype(complex_dtype)
k_gpu = gpuarray.to_gpu(k)
c_gpu = gpuarray.to_gpu(c)
fk_gpu = gpuarray.GPUArray(shape, dtype=complex_dtype)
plan = cufinufft(1, shape, eps=tol, dtype=dtype, gpu_sort=False,
gpu_maxsubprobsize=10)
plan.set_pts(k_gpu[0], k_gpu[1], k_gpu[2])
plan.execute(c_gpu, fk_gpu)
fk = fk_gpu.get()
ind = int(0.1789 * np.prod(shape))
fk_est = fk.ravel()[ind]
fk_target = utils.direct_type1(c, k, shape, ind)
type1_rel_err = np.abs(fk_target - fk_est) / np.abs(fk_target)
assert type1_rel_err < 0.01
def main():
test_type1_32()
test_type2_32()
test_type1_64()
test_type2_64()
if __name__ == '__main__':
main()
|
import os
from flask import Flask, make_response, request
from content_store.api.database import DB
from content_store.api.config import DevelopmentConfig
def create_app(config=None):
"""
application factory
:param config: override config
:return: application
"""
app = Flask(__name__)
# basic configurations setup for now
if config:
app.config.from_object(config)
else:
if "APP_SETTINGS" in os.environ:
app.config.from_object(os.environ["APP_SETTINGS"])
else:
app.config.from_object(DevelopmentConfig)
DB.init_app(app)
# could move to blueprint
@app.route("/ping")
def _ping():
"""
simple pingpong responder
:return: response with content "pong"
"""
resp = make_response("pong")
resp.headers["Cache-Control"] = "no-store, must-revalidate"
resp.headers["Content-Type"] = "text/plain; charset=utf-8"
if request.environ.get("SERVER_PROTOCOL") == "HTTP/1.0":
resp.headers["Expires"] = 0
return resp
return app
|
import sqlite3, datetime, os
app_dir = "/run/media/james/MediaB/projects/agency_loan_level/"
db_name = "mortgage.db"
conn = sqlite3.connect(app_dir + db_name)
c = conn.cursor()
raw_scripts = [ \
"raw_fannie_origination.sql", \
"raw_fannie_performance.sql", \
"raw_freddie_origination.sql", \
"raw_freddie_performance.sql" \
]
|
# Copyright 2019 Silverbackhq
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Third Party Library
from django.views import View
from django.urls import reverse
from django.utils.translation import gettext as _
# Local Library
from app.controllers.controller import Controller
from app.modules.core.metric import Metric as MetricModule
from app.modules.core.decorators import allow_if_authenticated
class Metrics(View, Controller):
"""Create and List Metrics Private Endpoint Controller"""
def __init__(self):
self.__metric = MetricModule()
@allow_if_authenticated
def post(self, request):
request_data = self.get_request_data(request, "post", {
"title": "",
"description": "",
"source": "",
"application": "",
"metric": "",
"x_axis": "",
"y_axis": ""
})
self.form().add_inputs({
'title': {
'value': request_data["title"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [1, 60],
'error': _('Error! Metric title must be 1 to 60 characters long.')
}
}
},
'description': {
'value': request_data["description"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [0, 150],
'error': _('Error! Metric description must be less than 150 characters long.')
},
'optional': {}
}
},
'source': {
'value': request_data["source"],
'sanitize': {
'strip': {}
},
'validate': {
'any_of': {
'param': [["newrelic"]],
'error': _('Error! Source is invalid.')
}
}
},
'application': {
'value': request_data["application"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [1, 60],
'error': _('Error! Application must be 1 to 60 characters long.')
}
}
},
'metric': {
'value': request_data["metric"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [1, 60],
'error': _('Error! Metric must be 1 to 60 characters long.')
}
}
},
'x_axis': {
'value': request_data["x_axis"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [1, 40],
'error': _('Error! X-Axis label must be 1 to 40 characters long.')
}
}
},
'y_axis': {
'value': request_data["y_axis"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [1, 40],
'error': _('Error! Y-Axis label must be 1 to 40 characters long.')
}
}
}
})
self.form().process()
if not self.form().is_passed():
return self.json(self.form().get_errors())
if self.__metric.get_one_by_title(self.form().get_sinput("title")):
return self.json([{
"type": "error",
"message": _("Error! Metric title is used before.")
}])
result = self.__metric.insert_one({
"title": self.form().get_sinput("title"),
"description": self.form().get_sinput("description"),
"source": self.form().get_sinput("source"),
"x_axis": self.form().get_sinput("x_axis"),
"y_axis": self.form().get_sinput("y_axis"),
"data": '{"application":"%s", "metric":"%s"}' % (
self.form().get_sinput("application"),
self.form().get_sinput("metric")
)
})
if result:
return self.json([{
"type": "success",
"message": _("Metric created successfully.")
}])
else:
return self.json([{
"type": "error",
"message": _("Error! Something goes wrong while creating metric.")
}])
@allow_if_authenticated
def get(self, request):
request_data = self.get_request_data(request, "get", {
"offset": 0,
"limit": 20
})
try:
offset = int(request_data["offset"])
limit = int(request_data["limit"])
except Exception:
offset = 0
limit = 20
return self.json([], {
'metrics': self.__format_metrics(self.__metric.get_all(offset, limit)),
'metadata': {
'offset': offset,
'limit': limit,
'count': self.__metric.count_all()
}
})
def __format_metrics(self, metrics):
metrics_list = []
for metric in metrics:
metrics_list.append({
"id": metric.id,
"title": metric.title,
"source": metric.source.title(),
"created_at": metric.created_at.strftime("%b %d %Y %H:%M:%S"),
"edit_url": reverse("app.web.admin.metric.edit", kwargs={'metric_id': metric.id}),
"delete_url": reverse("app.api.private.v1.admin.metric.endpoint", kwargs={'metric_id': metric.id})
})
return metrics_list
class Metric(View, Controller):
"""Update and Delete Metric Private Endpoint Controller"""
def __init__(self):
self.__metric = MetricModule()
@allow_if_authenticated
def post(self, request, metric_id):
request_data = self.get_request_data(request, "post", {
"title": "",
"description": "",
"source": "",
"application": "",
"metric": "",
"x_axis": "",
"y_axis": ""
})
self.form().add_inputs({
'title': {
'value': request_data["title"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [1, 60],
'error': _('Error! Metric title must be 1 to 60 characters long.')
}
}
},
'description': {
'value': request_data["description"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [0, 150],
'error': _('Error! Metric description must be less than 150 characters long.')
},
'optional': {}
}
},
'source': {
'value': request_data["source"],
'sanitize': {
'strip': {}
},
'validate': {
'any_of': {
'param': [["newrelic"]],
'error': _('Error! Source is invalid.')
}
}
},
'application': {
'value': request_data["application"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [1, 60],
'error': _('Error! Application must be 1 to 60 characters long.')
}
}
},
'metric': {
'value': request_data["metric"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [1, 60],
'error': _('Error! Metric must be 1 to 60 characters long.')
}
}
},
'x_axis': {
'value': request_data["x_axis"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [1, 40],
'error': _('Error! X-Axis label must be 1 to 40 characters long.')
}
}
},
'y_axis': {
'value': request_data["y_axis"],
'sanitize': {
'strip': {}
},
'validate': {
'length_between': {
'param': [1, 40],
'error': _('Error! Y-Axis label must be 1 to 40 characters long.')
}
}
}
})
self.form().process()
if not self.form().is_passed():
return self.json(self.form().get_errors())
current_metric = self.__metric.get_one_by_title(self.form().get_sinput("title"))
if current_metric and not current_metric["id"] == metric_id:
return self.json([{
"type": "error",
"message": _("Error! Metric title is used before.")
}])
result = self.__metric.update_one_by_id(metric_id, {
"title": self.form().get_sinput("title"),
"description": self.form().get_sinput("description"),
"source": self.form().get_sinput("source"),
"x_axis": self.form().get_sinput("x_axis"),
"y_axis": self.form().get_sinput("y_axis"),
"data": '{"application":"%s", "metric":"%s"}' % (
self.form().get_sinput("application"),
self.form().get_sinput("metric")
)
})
if result:
return self.json([{
"type": "success",
"message": _("Metric updated successfully.")
}])
else:
return self.json([{
"type": "error",
"message": _("Error! Something goes wrong while updating metric.")
}])
@allow_if_authenticated
def delete(self, request, metric_id):
if self.__metric.delete_one_by_id(metric_id):
return self.json([{
"type": "success",
"message": _("Metric deleted successfully.")
}])
else:
return self.json([{
"type": "error",
"message": _("Error! Something goes wrong while deleting metric.")
}])
class NewRelicApps(View, Controller):
"""List NewRelic Apps Private Endpoint Controller"""
def __init__(self):
self.__metric = MetricModule()
@allow_if_authenticated
def get(self, request):
result = False
try:
result = self.__metric.get_new_relic_apps()
except Exception as e:
self.logger().error(_("Error while listing newrelic applications: %(error)s") % {
"error": str(e)
})
if result is False:
return self.json([{
"type": "error",
"message": _("Error! Connecting to New Relic.")
}])
return self.json([], {
'apps': result
})
|
from parsed_args import args
import torch
from tqdm import tqdm
from vae_train.vae_utils import *
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, roc_auc_score
# TODO? A lot of these handlers are very similar; however, I think it's simpler to keep them seperate?
def full_iterator(unlabeled_train_iter, unlabeled_trainloader, vocab, model, n_labels):
content_vectors = []
strat_vectors = []
doc_labels = []
mappings = []
orig_fetch = []
while True:
try:
try:
x_u, l_u, y_u, mask1_u, mask2_u, mask3_u, mask4_u, mid_u, sent_len_u, doc_len_u = unlabeled_train_iter.next()
except:
unlabeled_train_iter = iter(unlabeled_trainloader)
x_u, l_u, y_u, mask1_u, mask2_u, mask3_u, mask4_u, mid_u, sent_len_u = unlabeled_train_iter.next()
except:
break
x = torch.cat([x_u], dim = 0)
l = torch.cat([l_u], dim = 0)
y = torch.cat([y_u.long()], dim = 0)
mask1 = torch.cat([mask1_u], dim = 0)
mask2 = torch.cat([mask2_u], dim = 0)
mask3 = torch.cat([mask3_u], dim = 0)
mask4 = torch.cat([mask4_u], dim = 0)
doc_len = torch.cat([doc_len_u], dim = 0)
sent_len = torch.cat([sent_len_u], dim = 0)
batch_size = l.shape[0]
seq_num = x.shape[1]
seq_len = x.shape[2]
mid = mid_u
temp = l.view(-1, 1).long()
l_one_hot = torch.zeros(batch_size*seq_num, n_labels).cuda()
for i in range(0, len(temp)):
if temp[i] != 10:
l_one_hot[i][temp[i]] = 1
l_one_hot = l_one_hot.view(batch_size, seq_num, n_labels)
xs, ys = (x.view(batch_size*seq_num, seq_len), l.view(batch_size*seq_num))
prob = create_generator_inputs(xs, vocab, train = False)
x, prob, l_one_hot, y, l = x.cuda(), prob.cuda(), l_one_hot.cuda(), y.cuda(), l.cuda()
mask1, mask2 = mask1.cuda(), mask2.cuda()
logits, kld_z, q_y, q_y_softmax, t, strategy_embedding, y_in2, content_vec = model(x, prob,
args.tau, mask1, mask2, args.hard, l_one_hot, doc_len = doc_len, sent_len = sent_len)
max_idxs = y_in2.argmax(axis=1)
argmaxed = torch.zeros(y_in2.shape)
argmaxed[torch.arange(y_in2.shape[0]),max_idxs] = 1
y_in2 = (argmaxed.T.cpu() * y_in2.sum(axis=1).cpu()).T
last_dim = int((content_vec.shape[0] * content_vec.shape[1]) / (batch_size * seq_num))
content_vectors.append(content_vec.reshape((batch_size, seq_num, last_dim)).tolist())
curr_strats = y_in2.reshape(batch_size, seq_num, n_labels).tolist()
strat_vectors.append(curr_strats)
doc_labels.append(y.tolist())
orig_fetch.append(mid)
return content_vectors, strat_vectors, doc_labels, orig_fetch
def get_content_strat_vector_details(content_vectors, strat_vectors, doc_labels,
all_mids, attn_content_lstm, return_rate=False):
attns = {
"content": [],
"strategy": [],
"document": []
}
acc = []
labels = []
strategy_orders = []
all_corr = []
all_out = []
with torch.no_grad():
for i, batch in enumerate(content_vectors):
sigmoid_out, content_attn,strategy_attn, s_score = attn_content_lstm(
torch.tensor(content_vectors[i]).cuda().float(),
torch.tensor(strat_vectors[i]).cuda().float())
sigmoid_out = sigmoid_out > .5
attns["document"].append(s_score)
attns["content"].append(content_attn)
attns["strategy"].append(strategy_attn)
out = sigmoid_out.squeeze().tolist()
correct = (np.array(doc_labels[i]) == 1).tolist()
all_corr += correct
all_out += out
strategy_orders.append(torch.tensor(strat_vectors[i]))
labels.append(correct)
# orig-fetch is the same as all mids for the other dataloaders - we technically already compute it, but
# i'm passing it in again just so the return signature is the same :')
if return_rate: return f1_score(all_corr, all_out, average="macro"), attns, labels, strategy_orders, (sum(all_out) / len(all_out))
return f1_score(all_corr, all_out, average="macro"), attns, labels, strategy_orders, all_mids
def get_dataloader_details(dataloader, vae_model, attn_content_lstm, n_labels, vocab):
attns = {
"content": [],
"strategy": [],
"document": []
}
labels = []
strat_orders = []
all_correct = []
all_out = []
all_mids = []
with torch.no_grad():
for batch_idx, (x, l, y, mask1, mask2, mask3, mask4, mid, sent_len, doc_len) in \
tqdm(enumerate(dataloader), position=0, leave=True):
# first, we're going to run our data through a VAE to get content and strategy.
batch_size = l.shape[0]
seq_num = x.shape[1]
seq_len = x.shape[2]
temp = l.view(-1, 1).long()
l_one_hot = torch.zeros(batch_size * seq_num, n_labels).cuda()
for i in range(0, len(temp)):
if temp[i] != 10:
l_one_hot[i][temp[i]] = 1
l_one_hot = l_one_hot.view(batch_size, seq_num, n_labels)
xs, ys = (x.view(batch_size * seq_num, seq_len), l.view(batch_size * seq_num))
prob = create_generator_inputs(xs, vocab, train = False)
x, prob, l_one_hot, y, l = x.cuda(), prob.cuda(), l_one_hot.cuda(), y.cuda(), l.cuda()
mask1, mask2 = mask1.cuda(), mask2.cuda()
logits, kld_z, q_y, q_y_softmax, t, strategy_embedding, y_in2, content_vec = vae_model(x,
prob, args.tau, mask1, mask2, args.hard, l_one_hot, doc_len = doc_len, sent_len = sent_len)
last_dim = int((content_vec.shape[0] * content_vec.shape[1]) / (batch_size * seq_num))
content_vec = content_vec.reshape((batch_size, seq_num, last_dim))
y_in2 = y_in2.reshape(batch_size, seq_num, n_labels)
# next, we're going to pass it through our LSTM
sigmoid_out, content_attn,strategy_attn, s_score = attn_content_lstm(content_vec, y_in2)
strat_orders.append(y_in2)
sigmoid_out = sigmoid_out > .5
attns["document"].append(s_score)
attns["content"].append(content_attn)
attns["strategy"].append(strategy_attn)
# "mid" is just a message id -- this is useful if we want to go pick out samples from our dataset.
all_mids.append(mid)
out = sigmoid_out.squeeze().tolist()
correct = (y == 1).tolist()
labels.append(correct)
all_correct += correct
all_out += out
print(str(f1_score(all_correct, all_out, average="macro")) + " f1")
print(str(precision_score(all_correct, all_out, average="macro")) + " p")
print(str(recall_score(all_correct, all_out, average="macro")) + " r")
print(str(accuracy_score(all_correct, all_out)) + " acc")
print(str(roc_auc_score(all_correct, all_out)) + " roc auc")
return f1_score(all_correct, all_out, average="macro"), attns, labels, strat_orders, all_mids |
import diffie_hellman_key_exchange as dh
dh.part1() |
from .create import Create # noqa
from .module import Module # noqa
|
# Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse
import sys
import json
# %%
import os
import numpy as np
from data_extraction.data_reader.data_loader import Loader
from data_extraction.data_reader.data_reader import Reader
from program_helper.program_reverse_map import ProgramRevMapper
from trainer_vae.infer import BayesianPredictor
class embedding_server():
def __init__(self, save_path):
self.encoder = BayesianPredictor(save_path, batch_size=5, seed=0)
self.config_path = os.path.join(save_path, 'config.json')
self.data_path = save_path
self.prog_mapper = ProgramRevMapper(self.encoder.config.vocab)
self.psi_list = []
return
def reset(self):
self.prog_mapper.reset()
self.psi_list.clear()
def getEmbeddings(self, logdir):
dump_data_path = self.data_path
reader = Reader(dump_data_path=dump_data_path,
infer=True,
infer_vocab_path=self.config_path)
reader.read_file(filename=logdir + '/L4TestProgramList.json')
reader.wrangle()
reader.log_info()
reader.dump()
loader = Loader(dump_data_path, self.encoder.config)
while True:
try:
batch = loader.next_batch()
except StopIteration:
break
psi = self.encoder.get_initial_state_from_next_batch(batch)
psi_ = np.transpose(np.array(psi), [1, 0, 2]) # batch_first
self.psi_list.extend(psi_)
self.prog_mapper.add_batched_data(batch)
print('\nWriting to {}...'.format(''), end='\n')
with open(logdir + '/EmbeddedProgramList.json', 'w') as f:
json.dump({'embeddings': [psi.tolist() for psi in self.psi_list]}, fp=f, indent=2)
return
if __name__=="__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--python_recursion_limit', type=int, default=10000,
help='set recursion limit for the Python interpreter')
parser.add_argument('--save', type=str, default='/home/ubuntu/savedSearchModel',
help='checkpoint model during training here')
clargs = parser.parse_args()
sys.setrecursionlimit(clargs.python_recursion_limit)
|
import csv
import sys
from typing import List
try:
from typing import TypedDict
except ImportError:
from typing_extensions import TypedDict
from arguments_parser import parse_arguments
class InferenceConfiguration(TypedDict):
streams: List[int]
batches: List[int]
INITIAL_CONFIGURATION = InferenceConfiguration(
streams=[1, 4],
batches=[1, 1],
)
REFERENCE_CONFIGURATION = InferenceConfiguration(
streams=[*INITIAL_CONFIGURATION['streams'], 3, 5, 7, 3, 5],
batches=[*INITIAL_CONFIGURATION['batches'], 2, 2, 2, 3, 3],
)
def check_inference_report_file(inference_report_path: str, reference_configuration: dict) -> int:
with open(inference_report_path, 'r', encoding='utf-8') as report_file:
csv_reader = csv.reader(report_file, delimiter=';')
columns = next(csv_reader)
for column in columns:
assert column in ['Stream', 'Batch size', 'Throughput', 'Latency']
for index_row, row in enumerate(csv_reader):
stream, batch = int(row[0]), int(row[1])
throughput, latency = float(row[2]), float(row[3])
assert stream and batch, 'Stream and/or batch are not numeric. Stream: {}, Batch: {}'.format(stream, batch)
assert reference_configuration['streams'][index_row] == stream, \
'Streams are not from the expected configuration. Actual: {}, Expected: {}' \
.format(stream, reference_configuration['streams'][index_row])
assert reference_configuration['batches'][index_row] == batch, \
'Batches are not from the expected configuration. Actual: {}, Expected: {}' \
.format(batch, reference_configuration['batches'][index_row])
assert throughput and latency, 'Throughput and/or latency are not numeric. Throughput: {}, latency: {}' \
.format(throughput, latency)
return 0
if __name__ == '__main__':
args = parse_arguments()
sys.exit(check_inference_report_file(args.inference_report_path, REFERENCE_CONFIGURATION))
|
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from trac.web.auth import BasicAuthentication
#: This is used as the password for synced Trac users.
FAKE_PASSWORD = "!TRAC!"
class TracAuthBackend(ModelBackend):
"""
Authenticates users with Trac's BasicAuthentication. This requires the :setting:`TICKETEER_TRAC_HTPASSWD_PATH` setting.
If a user exists in Trac but not in django, the user will be created. If a trac-generated user exists in Django but not in trac, the user will be deleted.
"""
def get_trac_auth(self):
filename = settings.TICKETEER_TRAC_HTPASSWD_PATH
return BasicAuthentication(filename, None)
def authenticate(self, username=None, password=None):
if username is None or password is None:
return
auth = self.get_trac_auth()
if auth.test(username, password):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User.objects.create(username=username, password=FAKE_PASSWORD)
return user
else:
if username not in auth.hash:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
else:
if user.password == FAKE_PASSWORD:
user.delete()
return None |
# Desafio 74 - Aula 16 : Programa que deve gerar 5 numeros aleatórios e adicionalos a uma TUPLA.
# Depois disso mostre a listagem dos numeros gerados e indique qual é o MAIOR e qual é o MENOR.
from random import randint
tupla = ((randint(1,10)),(randint(1,10)),(randint(1,10)),(randint(1,10)),(randint(1,10)))
for n in tupla:
print(n, end=' ')
print(f'\nO maior valor da tupla é {max(tupla)} e o menor é {min(tupla)}.')
|
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Eduardo S. Scarpellini
# @author: Luiz Ozaki
__copyright__ = "Copyright 2012, Locaweb IDC"
from netl2api.l2api.utils import *
from netl2api.l2api.brocade.netironchecks import *
from netl2api.l2api.brocade.netironexceptions import *
__all__ = ["parse_interface_id", "get_interface_name", "get_short_ifname", "expand_brocade_interface_ids"]
def parse_interface_id(transport, interface_id):
try:
stack, port = interface_id.split("/")
port = int(port)
stack = int(stack)
except (AttributeError, IndexError, ValueError, TypeError):
raise NetIronInvalidParam("Invalid interface => '%s'" % interface_id)
check_stackunit_id(stack)
check_port_id(port)
switch_interface_id = get_interface_name(transport, stack, port)
if interface_id != switch_interface_id:
raise NetIronInvalidParam("No such interface => '%s'" % interface_id)
return switch_interface_id
def get_interface_name(transport, stack, port):
try:
interface = [l for l in transport.execute("show interfaces brief | include %s/%s" \
% (stack, port)).splitlines() if "%s/%s" % (stack, port) in l][0]
except IndexError:
raise NetIronInvalidParam("No such interface => '%s/%s'" % (stack, port))
return interface.split(" ")[0]
get_short_ifname = lambda i: i.split(" ")[1] if " " in i else i
def expand_brocade_interface_ids(ifrange):
interfaces = []
for intf in ifrange.replace("ethernet", "").replace("ethe", "").replace(" to ", "-").strip().split():
if "-" in intf:
intf_range_start, intf_range_end = intf.split("-")
intf_start_module, intf_start_port = intf_range_start.split("/")
intf_end_port = intf_range_end.split("/")[1]
interfaces.extend(["%s/%s" % (intf_start_module, i) for i in expand_int_ranges("%s-%s" % (intf_start_port, intf_end_port))])
continue
if intf:
interfaces.append(intf)
return interfaces
|
import os
import warnings
import numpy as np
import cutde.gpu as cluda
from .TDdispFS import TDdispFS
source_dir = os.path.dirname(os.path.realpath(__file__))
def py_disp(obs_pt, tri, slip, nu):
return TDdispFS(obs_pt, tri, slip, nu)
def solve_types(obs_pts, tris, slips):
type_map = {
np.int32: np.float32,
np.int64: np.float64,
np.float32: np.float32,
np.float64: np.float64,
}
float_type = None
out_arrs = []
for name, arr in [("obs_pts", obs_pts), ("tris", tris), ("slips", slips)]:
dtype = arr.dtype.type
if dtype not in type_map:
raise ValueError(
f"The {name} input array has type {arr.dtype} but must have a float or"
" integer dtype."
)
if float_type is None:
float_type = type_map[dtype]
# If we're using OpenCL, we need to check if float64 is allowed.
# If not, convert to float32.
if cluda.ocl_backend:
import cutde.opencl
cutde.opencl.ensure_initialized()
extensions = (
cutde.opencl.gpu_ctx.devices[0].extensions.strip().split(" ")
)
if "cl_khr_fp64" not in extensions and float_type is np.float64:
warnings.warn(
"The OpenCL implementation being used does not support "
"float64. This will require converting arrays to float32."
)
float_type = np.float32
if dtype != float_type:
warnings.warn(
f"The {name} input array has type {arr.dtype} but needs to be converted"
f" to dtype {np.dtype(float_type)}. Converting {name} to "
f"{np.dtype(float_type)} may be expensive."
)
out_arrs.append(arr.astype(float_type))
elif arr.flags.f_contiguous:
warnings.warn(
f"The {name} input array has Fortran ordering. "
"Converting to C ordering. This may be expensive."
)
out_arrs.append(np.ascontiguousarray(arr))
else:
out_arrs.append(arr)
return float_type, out_arrs
def check_inputs(obs_pts, tris, slips):
if obs_pts.shape[1] != 3:
raise ValueError(
"The second dimension of the obs_pts array must be 3 because the "
"observation points should be locations in three-dimensional space."
)
if tris.shape[1] != 3:
raise ValueError(
"The second dimension of the tris array must be 3 because there must be "
"three vertices per triangle."
)
if tris.shape[2] != 3:
raise ValueError(
"The third dimension of the tris array must be 3 because the triangle "
"vertices should be locations in three-dimensional space."
)
if slips.shape[0] != tris.shape[0]:
raise ValueError(
"The number of input slip vectors must be equal to the number of input"
" triangles."
)
if slips.shape[1] != 3:
raise ValueError(
"The second dimension of the slips array must be 3 because each row "
"should be a vector in the TDE coordinate system (strike-slip, dip-slip,"
" tensile-slip)."
)
def call_clu(obs_pts, tris, slips, nu, fnc_name, out_dim):
if tris.shape[0] != obs_pts.shape[0]:
raise ValueError("There must be one input observation point per triangle.")
check_inputs(obs_pts, tris, slips)
float_type, (obs_pts, tris, slips) = solve_types(obs_pts, tris, slips)
n = obs_pts.shape[0]
block_size = 128
n_blocks = int(np.ceil(n / block_size))
gpu_config = dict(block_size=block_size, float_type=cluda.np_to_c_type(float_type))
module = cluda.load_gpu("fullspace.cu", tmpl_args=gpu_config, tmpl_dir=source_dir)
gpu_results = cluda.empty_gpu(n * out_dim, float_type)
gpu_obs_pts = cluda.to_gpu(obs_pts, float_type)
gpu_tris = cluda.to_gpu(tris, float_type)
gpu_slips = cluda.to_gpu(slips, float_type)
getattr(module, fnc_name)(
gpu_results,
np.int32(n),
gpu_obs_pts,
gpu_tris,
gpu_slips,
float_type(nu),
grid=(n_blocks, 1, 1),
block=(block_size, 1, 1),
)
out = gpu_results.get().reshape((n, out_dim))
return out
def call_clu_all_pairs(obs_pts, tris, slips, nu, fnc_name, out_dim):
check_inputs(obs_pts, tris, slips)
float_type, (obs_pts, tris, slips) = solve_types(obs_pts, tris, slips)
n_obs = obs_pts.shape[0]
n_src = tris.shape[0]
block_size = 16
n_obs_blocks = int(np.ceil(n_obs / block_size))
n_src_blocks = int(np.ceil(n_src / block_size))
gpu_config = dict(block_size=block_size, float_type=cluda.np_to_c_type(float_type))
module = cluda.load_gpu("fullspace.cu", tmpl_args=gpu_config, tmpl_dir=source_dir)
gpu_results = cluda.empty_gpu(n_obs * n_src * out_dim, float_type)
gpu_obs_pts = cluda.to_gpu(obs_pts, float_type)
gpu_tris = cluda.to_gpu(tris, float_type)
gpu_slips = cluda.to_gpu(slips, float_type)
getattr(module, fnc_name + "_all_pairs")(
gpu_results,
np.int32(n_obs),
np.int32(n_src),
gpu_obs_pts,
gpu_tris,
gpu_slips,
float_type(nu),
grid=(n_obs_blocks, n_src_blocks, 1),
block=(block_size, block_size, 1),
)
out = gpu_results.get().reshape((n_obs, n_src, out_dim))
return out
def disp(obs_pts, tris, slips, nu):
return call_clu(obs_pts, tris, slips, nu, "disp_fullspace", 3)
def strain(obs_pts, tris, slips, nu):
return call_clu(obs_pts, tris, slips, nu, "strain_fullspace", 6)
def disp_all_pairs(obs_pts, tris, slips, nu):
return call_clu_all_pairs(obs_pts, tris, slips, nu, "disp_fullspace", 3)
def strain_all_pairs(obs_pts, tris, slips, nu):
return call_clu_all_pairs(obs_pts, tris, slips, nu, "strain_fullspace", 6)
def strain_to_stress(strain, mu, nu):
lam = 2 * mu * nu / (1 - 2 * nu)
trace = np.sum(strain[:, :3], axis=1)
stress = np.empty_like(strain)
stress[:, :3] = 2 * mu * strain[:, :3] + lam * trace[:, np.newaxis]
stress[:, 3:] = 2 * mu * strain[:, 3:]
return stress
|
#-------------gravity.py-------------------------------------------------------#
#
# Purpose: this file intends to provide a quick demonstration of how to use
# blender python to make animations. In particular, we plan to make
# an animation of a ball dropping to the ground and one of a
# solar system
#
# Notes: To create the animation, use the following command:
# blender -b -P gravity.py
# This should create a video for you to play. You can also open the
# animation in blender by running:
# blender -P gravity.py
#
#------------------------------------------------------------------------------#
import bpy
import numpy as np
class particle:
x = 0
y = 0
z = 0
r = 0
g = 0
b = 0
size = 1.0
def __init__(self, xloc, yloc, zloc, rcolor, gcolor, bcolor, s):
self.x = xloc
self.y = yloc
self.z = zloc
self.r = rcolor
self.g = gcolor
self.b = bcolor
self.size = s
def new_sphere(p, ob_id):
sphere = bpy.ops.mesh.primitive_uv_sphere_add(segments = 32,
ring_count = 16,
size = p.size,
location = (p.x, p.y, p.z),
rotation = (0, 0, 0))
ob = bpy.context.active_object
ob.name = str(ob_id)
mat = create_new_material(ob.name, (p.r, p.g, p.b))
ob.data.materials.append(mat)
return sphere
def clear_scene(scene):
for ob in scene.objects:
if ob.name != 'Camera' and ob.name != 'Lamp':
scene.objects.unlink(ob)
def create_scene(scale, scene):
# Creating multiplicative factors to scale by
scene.camera.location.x = scale * 0.43
scene.camera.location.y = scale * 0.634
scene.camera.location.z = scale * 0.24
scene.camera.rotation_mode = 'XYZ'
scene.camera.rotation_euler[0] = (np.pi/180.0) * 75
scene.camera.rotation_euler[1] = (np.pi/180.0) * 0
scene.camera.rotation_euler[2] = (np.pi/180.0) * 145
# change lamp position
bpy.data.objects["Lamp"].location = (0,0,5)
# Set background to be black
bpy.data.worlds['World'].horizon_color = (0,0,0)
return scene
def set_render_options(scene, res_x, res_y):
# Scene resolution
scene.render.resolution_x = res_x
scene.render.resolution_y = res_y
scene = bpy.context.scene
bpy.data.scenes[0].render.image_settings.file_format="PNG"
#bpy.data.scenes[0].render.filepath = "images/image%.5d"
bpy.ops.render.render( write_still=True )
scene.sequence_editor_create()
bpy.data.scenes["Scene"].render.resolution_percentage = 100
bpy.data.scenes["Scene"].render.fps = 60
bpy.data.scenes["Scene"].render.image_settings.file_format = 'FFMPEG'
#bpy.data.scenes["Scene"].render.ffmpeg.audio_codec = 'NONE'
bpy.data.scenes["Scene"].render.ffmpeg.constant_rate_factor='PERC_LOSSLESS'
bpy.data.scenes["Scene"].render.filepath = 'out.mkv'
bpy.data.scenes["Scene"].render.use_file_extension = False
# Renders movie
def render_movie(scene):
print("rendering movie")
bpy.ops.render.render( animation=True )
|
from bs4 import BeautifulSoup as bs
import requests
import pandas as pd
df = {'Language':[],'Tag_Count':[]}
def extract_tagged(url):
#print('Extracting Content')
response=requests.get(url,timeout=5)
content=bs(response.content, "html.parser")
for tag in content.find_all('a',attrs={'class':'post-tag'}):
df['Language'].append(tag.text)
for count in content.find_all('span',attrs={'class':'item-multiplier-count'}):
df['Tag_Count'].append(count.text)
for i in range(1,3):
extract_tagged('http://stackoverflow.com/tags?page='+str(i)+'&tab=popular')
df['Tag_Count']=[int(i) for i in df['Tag_Count']]
df2= pd.DataFrame(df)
print(df2)
|
from datetime import datetime, timedelta
import dateutil.tz
import pytest
from plenum.common.constants import VERSION
from indy_common.constants import START, FORCE
from indy_node.test import waits
from indy_node.test.upgrade.helper import bumpedVersion, \
checkUpgradeScheduled, bumpVersion, sdk_ensure_upgrade_sent
from stp_core.loop.eventually import eventually
@pytest.fixture(scope='module')
def nodeIds(nodeSet):
return nodeSet[0].poolManager.nodeIds
@pytest.fixture(scope='module')
def validUpgrade(nodeIds, tconf):
schedule = {}
unow = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
startAt = unow + timedelta(seconds=100)
acceptableDiff = tconf.MinSepBetweenNodeUpgrades + 1
for i in nodeIds:
schedule[i] = datetime.isoformat(startAt)
startAt = startAt + timedelta(seconds=acceptableDiff + 3)
return dict(name='upgrade-13', version=bumpedVersion(), action=START,
schedule=schedule,
# sha256=get_valid_code_hash(),
sha256='db34a72a90d026dae49c3b3f0436c8d3963476c77468ad955845a1ccf7b03f55',
timeout=1)
@pytest.fixture(scope='module')
def validUpgradeExpForceFalse(validUpgrade):
nup = validUpgrade.copy()
nup.update({FORCE: False})
nup.update({VERSION: bumpVersion(validUpgrade[VERSION])})
return nup
@pytest.fixture(scope='module')
def validUpgradeExpForceTrue(validUpgradeExpForceFalse):
nup = validUpgradeExpForceFalse.copy()
nup.update({FORCE: True})
nup.update({VERSION: bumpVersion(validUpgradeExpForceFalse[VERSION])})
return nup
@pytest.fixture(scope="module")
def validUpgradeSent(looper, nodeSet, tdir, sdk_pool_handle, sdk_wallet_trustee,
validUpgrade):
sdk_ensure_upgrade_sent(looper, sdk_pool_handle,
sdk_wallet_trustee, validUpgrade)
@pytest.fixture(scope="module")
def validUpgradeSentExpForceFalse(
looper,
nodeSet,
tdir,
sdk_pool_handle,
sdk_wallet_trustee,
validUpgradeExpForceFalse):
sdk_ensure_upgrade_sent(looper, sdk_pool_handle,
sdk_wallet_trustee, validUpgradeExpForceFalse)
@pytest.fixture(scope="module")
def validUpgradeSentExpForceTrue(looper, nodeSet, tdir, sdk_pool_handle,
sdk_wallet_trustee, validUpgradeExpForceTrue):
sdk_ensure_upgrade_sent(looper, sdk_pool_handle,
sdk_wallet_trustee, validUpgradeExpForceTrue)
@pytest.fixture(scope="module")
def upgradeScheduled(validUpgradeSent, looper, nodeSet, validUpgrade):
looper.run(
eventually(
checkUpgradeScheduled,
nodeSet,
validUpgrade[VERSION],
retryWait=1,
timeout=waits.expectedUpgradeScheduled()))
@pytest.fixture(scope="module")
def upgradeScheduledExpForceFalse(validUpgradeSentExpForceFalse, looper,
nodeSet, validUpgradeExpForceFalse):
looper.run(eventually(checkUpgradeScheduled, nodeSet,
validUpgradeExpForceFalse[VERSION], retryWait=1,
timeout=waits.expectedUpgradeScheduled()))
@pytest.fixture(scope="module")
def upgradeScheduledExpForceTrue(validUpgradeSentExpForceTrue, looper, nodeSet,
validUpgradeExpForceTrue):
looper.run(eventually(checkUpgradeScheduled, nodeSet,
validUpgradeExpForceTrue[VERSION], retryWait=1,
timeout=waits.expectedUpgradeScheduled()))
@pytest.fixture(scope='module')
def invalidUpgrade(nodeIds, tconf):
schedule = {}
unow = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
startAt = unow + timedelta(seconds=60)
acceptableDiff = tconf.MinSepBetweenNodeUpgrades + 1
for i in nodeIds:
schedule[i] = datetime.isoformat(startAt)
startAt = startAt + timedelta(seconds=acceptableDiff - 3)
return dict(name='upgrade-14', version=bumpedVersion(), action=START,
schedule=schedule,
# sha256=get_valid_code_hash(),
sha256='46c715a90b1067142d548cb1f1405b0486b32b1a27d418ef3a52bd976e9fae50',
timeout=10)
|
import pmpmanager.db_devices as model
import logging
from base_calls import job_exec as bass_job_exec
import json
import datetime
import uuid
def Property(func):
return property(**func())
class job_exec(bass_job_exec):
def __init__(self):
self.remotePrefix = None
self.log = logging.getLogger("job_exec.lsblk_read")
self.cmdln_template = None
@Property
def session():
doc = "Remote upload prefix"
def fget(self):
return self._session
def fset(self, value):
self._session = value
def fdel(self):
del self._session
return locals()
def process_device(self, *args, **kwargs):
session = kwargs.get('session', None)
if session == None:
session = self.session
if session == None:
self.log.error("No session set")
return False
device = kwargs.get('device', None)
if device == None:
device = self.device
if device == None:
self.log.error("No device set")
return False
device = kwargs.get('device', None)
if device == None:
device = self.device
details = kwargs.get('details', None)
if details == None:
self.log.error("No details set")
return False
#for key in details.keys():
# print key,details[key]
found_FSTYPE = details.get("FSTYPE")
found_GROUP = details.get("GROUP")
found_KNAME = details.get("KNAME")
found_MAJ_MIN = details.get("MAJ:MIN")
found_MODE = details.get("MODE")
found_MOUNTPOINT = details.get("MOUNTPOINT")
found_NAME = details.get("NAME")
found_OWNER = details.get("OWNER")
found_PARTUUID = details.get("PARTUUID")
found_RM = details.get("RM")
found_SERIAL = details.get("SERIAL")
found_SIZE = details.get("SIZE")
found_UUID = details.get("UUID")
found_VENDOR = details.get("VENDOR")
found_WWN = details.get("WWN")
found_MAJ = None
found_MIN = None
if found_MAJ_MIN != None:
found_MAJ,found_MIN = found_MAJ_MIN.split(":")
self.log.info("found_NAME=%s" % (found_NAME))
self.log.info("found_KNAME=%s" % (found_KNAME))
devPath = "/dev/%s" % found_KNAME
instance_query = session.query(model.Block).\
filter(model.Block.devPath == devPath)
if instance_query.count() == 0:
self.log.info("Adding device:%s" % (found_KNAME))
newblock = model.Block()
newblock.devPath = devPath
session.add(newblock)
session.commit()
instance_query = session.query(model.Block).\
filter(model.Block.devPath == devPath)
blockinDb = instance_query.one()
changed = False
if found_KNAME != None:
if blockinDb.devPath != devPath:
blockinDb.devPath = devPath
changed = True
self.log.info("Updating device '%s' devName with:%s" % (device,found_KNAME))
if found_MAJ != None:
if blockinDb.devicenodes_major != found_MAJ:
blockinDb.devicenodes_major = found_MAJ
changed = True
self.log.info("Updating device '%s' devicenodes_major with:%s" % (device,found_MAJ))
if found_MIN != None:
if blockinDb.devicenodes_minor != found_MIN:
blockinDb.devicenodes_minor = found_MIN
changed = True
self.log.info("Updating device '%s' devicenodes_minor with:%s" % (device,found_MIN))
if found_RM != None:
if blockinDb.device_removable != found_RM:
blockinDb.device_removable = found_RM
changed = True
self.log.info("Updating device '%s' devicenodes_minor with:%s" % (device,found_RM))
if changed:
session.add(blockinDb)
session.commit()
information = model.BlockUpdateLsblk()
information.fk_block = blockinDb.id
information.created = datetime.datetime.now()
information.lsblk_fstype = found_FSTYPE
information.lsblk_group = found_GROUP
information.lsblk_mode = found_MODE
information.lsblk_mountpoint = found_MOUNTPOINT
information.lsblk_name = found_NAME
information.lsblk_owner = found_OWNER
information.lsblk_partuuid = found_PARTUUID
information.lsblk_rm = found_RM
information.lsblk_serial = found_SERIAL
information.lsblk_size = found_SIZE
information.lsblk_uuid = found_UUID
information.lsblk_vendor = found_VENDOR
information.lsblk_wwn = found_WWN
session.add(information)
session.commit()
def run(self, *args, **kwargs):
self.log = logging.getLogger("job_exec.mount_read")
session = kwargs.get('session', None)
if session == None:
session = self.session
if session == None:
self.log.error("No session set")
return False
json_input = json.loads(self.inputjson)
block_mounts_unfiltered_found = set()
mounts_known = set()
for item in json_input.keys():
#self.log.error("json_input[%s]=%s" % (item,json_input[item]))
block_mounts_unfiltered_found.add(item)
blocks_known = set()
block_query = session.query(model.Block)
for item in block_query:
blocks_known.add(item.devPath)
#self.log.debug("blocks_known=%s" % blocks_known)
block_mounts_filtered_found = blocks_known.intersection(block_mounts_unfiltered_found)
self.log.error("block_mounts_filtered_found=%s" % block_mounts_filtered_found)
filesystem_type_found = set()
mountpoints_found = set()
block_mountpoint_pair = set()
for item in block_mounts_filtered_found:
#self.log.error("process=%s" % item)
mountpoint = json_input[item]["mountpoint"]
mountpoints_found.add(mountpoint)
filesystem_type = json_input[item]["filesystem"]
filesystem_type_found.add(filesystem_type)
block_mountpoint_pair.add((item,mountpoint))
self.log.error("filesystem_type_found=%s" % filesystem_type_found)
self.log.error("mountpoints_found=%s" % mountpoints_found)
self.log.error("block_mounts_filtered_found=%s" % block_mounts_filtered_found)
self.log.error("block_mountpoint_pair=%s" % block_mountpoint_pair)
mountpoint_queried = set()
mountpoint_query = session.query(model.MountPoint)
for item in mountpoint_query:
mountpoint_queried.add(item.MountPoint)
mountpoint_extra = mountpoint_queried.difference(mountpoints_found)
mountpoint_missing = mountpoints_found.difference(mountpoint_queried)
self.log.error("mountpoint_extra=%s" % mountpoint_extra)
self.log.error("mountpoint_missing=%s" % mountpoint_missing)
# update mountpints
changed = False
for item in mountpoint_extra:
foundMp = session.query(model.MountPoint).\
filter(model.MountPoint.mountpoint == item)
for mp in foundMp:
session.delete(mp)
changed = True
for item in mountpoint_missing:
newMp = model.MountPoint()
newMp.mountpoint = item
session.add(newMp)
changed = True
if changed:
session.commit()
# Delete extra mounts:
Changed = False
mount_query = session.query(model.Block,model.MountPoint,model.Mount).\
filter(model.MountPoint.id == model.Mount.fk_mountpoint).\
filter(model.Mount.fk_block == model.Block.id)
for item in mount_query:
block = item[0]
mpount = item[1]
mount = item[2]
if block.devPath in block_mounts_filtered_found:
if mpount.mountpoint in mountpoints_found:
continue
session.delete(mount)
Changed = True
if Changed:
session.commit()
for item in block_mountpoint_pair:
str_device = item[0]
str_mountPoint = item[1]
self.log.error("adding=%s,%s" % (item))
device_query = session.query(model.Block).\
filter(model.Block.devPath == str_device)
device = device_query.one()
mountPoint_query = session.query(model.MountPoint).\
filter(model.MountPoint.mountpoint == str_mountPoint)
mountPoint = mountPoint_query.one()
newMount = model.Mount()
newMount.fk_block = device.id
newMount.fk_mountpoint = mountPoint.id
session.add(newMount)
session.commit()
output = []
for device in json_input.keys():
cmdln = "udevadm info -q all -n /dev/%s" % (device)
device_output = {
"cmdln" : cmdln
}
output.append(device_output)
self.outputjson = json.dumps(output,sort_keys=True, indent=4)
self.returncode = 0
self.stdout = ""
|
"""This is a stand-alone script installed as **pywgrib2**.
It provides the following functionality:
1. List content of a GRIB2 file (same as **wgrib2** without options).
2. Create inventory files.
3. List content of inventory files.
4. Convert GRIB2 files to netCDF4.
5. Convert GRIB2 files to zarr.
6. Emulates **wgrib2** executable.
"""
from functools import partial
import getopt
import glob
from multiprocessing.pool import Pool
import pickle
import os
import sys
from typing import List, Optional # , Sequence
from .inventory import (
FileMetaData,
MetaData,
load_inventory,
make_inventory,
save_inventory,
)
from .template import make_template
from .wgrib2 import wgrib
from .xarray_store import open_dataset
_inv_ext = ".binv"
USAGE = """USAGE: pywgrib2 [-h] | [command [option ...]] argument ...
where:
-h
Print this message and exit.
When command is not specified, the script emulates wgrib2 executable.
command:
list_inv
Displays content of GRIB file(s).
Options:
-L
Long listing (all metadata). Default is short
Arguments:
gribfile ...
One or more GRIB files.
make_inv
Makes inventory file[s]
Options:
-i inv-dir
Directory for inventory files. Will be created if does not exist.
Intended for read-only GRIB directories.
-n num-procs
Number of processes in multiprocessing mode. Default is 1.
-p pattern
"glob.glob" pattern.
-r
Recursively search grib-dir subdirectories.
Arguments:
gribfile ...
Zero (only if -r or -p is specified) or more GRIB files.
cat_inv
Lists content of inventory file[s].
Use when inventories coexist with GRIB files.
Options:
-d dir
Directory of GRIB files
-r
Recursively search subdirectories for inventory files.
Arguments:
gribfile ...
Zero or more GRIB files. The extension ".binv" does not need
to be included.
The final list of files comprises directory entries and explicit
paths.
cat_hash
Lists content of inventory file[s].
Use when inventories are not collocated with GRIB files
(i.e. -i inv_dir was specified for make_inv).
Arguments:
invfile ...
One or more GRIB files. The extension ".binv" does not need
to be included.
template:
Writes template file.
Options:
-i inv_dir
Location of inventory files, if different from GRIB files.
-t reftime
Reference time, necessary when GRIB files have messages
with more than one reference time.
-o template
Output file name. Must be specified.
Arguments:
gribfile ...
One or more GRIB files.
to_nc:
Writes netcdf file.
Options:
-c
Compress file, with zlib and compression level 1.
-o ncfile
Output file name. Must be specified.
-T template
Template file name. Must be specified.
Arguments:
gribfile ...
One or more GRIB files.
to_zarr:
Writes zarr group.
Options:
-c level
Compression level, an integer between 1 and 4. Default is 1.
-o store
Output directory. Must be specified.
-T template
Template file name. Must be specified.
Arguments:
gribfile ...
One or more GRIB files.
"""
def _print_inventory(inventory: Optional[List[MetaData]], listing: str) -> None:
if not inventory:
return
if listing == "full":
for i in inventory:
print(i)
return
file = inventory[0].file
file_inv = FileMetaData(file, inventory)
if listing == "long":
print(repr(file_inv))
else:
print(file_inv)
def list_inv(args: List[str]) -> None:
opts, pargs = getopt.getopt(args, "L")
kwds = dict(opts)
listing = kwds.get("-L", "short")
for p in pargs:
if os.path.isfile(p):
inventory = make_inventory(p)
if inventory:
_print_inventory(inventory, listing)
else:
print("No GRIB messages in {:s}".format(p))
else:
print("{:s} is not a file".format(p))
def _f(p, d):
save_inventory(make_inventory(p), p, d)
def make_inv(args: List[str]) -> None:
opts, pargs = getopt.getopt(args, "hi:n:p:")
kwds = dict(opts)
recursive = "-r" in kwds
inv_dir = kwds.get("-i")
num_processes = int(kwds.get("-n", 1))
if not 1 <= num_processes <= 4:
raise ValueError("Number of processes must be between 1 and 4")
pattern = kwds.get("-p")
if pattern:
files = [
p
for p in glob.glob(pattern, recursive=recursive)
if os.path.isfile(p) and not p.endswith(_inv_ext)
]
else:
files = []
files.extend(pargs)
fun = partial(_f, d=inv_dir)
if num_processes == 1:
for file in files:
fun(file)
else:
with Pool(num_processes) as pool:
pool.map(fun, files)
def cat_inv(args: List[str]) -> None:
opts, pargs = getopt.getopt(args, "d:hi:Lr")
kwds = dict(opts)
recursive = "-r" in kwds
data_dir = kwds.get("-d")
listing = kwds.get("-L", "short")
if data_dir:
if recursive:
pattern = os.path.join(data_dir, "**", "*" + _inv_ext)
else:
pattern = os.path.join(data_dir, "*" + _inv_ext)
files = glob.glob(pattern, recursive=recursive)
else:
files = []
files.extend(pargs)
for file in files:
base, ext = os.path.splitext(file)
f = base if ext == _inv_ext else file
inventory = load_inventory(f)
_print_inventory(inventory, listing)
def cat_hash(args: List[str]) -> None:
opts, pargs = getopt.getopt(args, "L")
kwds = dict(opts)
listing = kwds.get("-L", "short")
for arg in pargs:
base, ext = os.path.splitext(arg)
file = base if ext == _inv_ext else arg
inventory = load_inventory(file)
_print_inventory(inventory, listing)
def mk_tmpl(args) -> None:
opts, pargs = getopt.getopt(args, "i:o:t:v:")
kwds = dict(opts)
inv_dir = kwds.get("-i")
tmplfile = kwds.get("-o")
if tmplfile is None:
raise ValueError("Missing output file")
reftime = kwds.get("-t")
vertlevel = kwds.get("-v")
template = make_template(
pargs, reftime=reftime, invdir=inv_dir, vertlevels=vertlevel
)
with open(tmplfile, "wb") as fp:
pickle.dump(template, fp)
def to_nc(args: List[str]) -> None:
opts, pargs = getopt.getopt(args, "co:T:")
kwds = dict(opts)
compress = "-c" in kwds
ncfile = kwds.get("-o")
if ncfile is None:
raise ValueError("Missing output file")
tmplfile = kwds.get("-T")
if tmplfile is None:
raise ValueError("Missing template file")
with open(tmplfile, "rb") as fp:
template = pickle.load(fp)
ds = open_dataset(pargs, template=template)
vars = list(ds.data_vars.keys())
vars.extend(["longitude", "latitude"])
encoding = dict.fromkeys(vars, {"zlib": True, "complevel": 1}) if compress else None
ds.to_netcdf(ncfile, engine="netcdf4", encoding=encoding)
def to_zarr(args: List[str]) -> None:
import zarr
opts, pargs = getopt.getopt(args, "c:o:T:")
kwds = dict(opts)
clevel = int(kwds.get("-c", 1))
if not 1 <= clevel <= 9:
raise ValueError("Invalid compression level {:d}".format(clevel))
zarrdir = kwds.get("-o")
if zarrdir is None:
raise ValueError("Missing output file")
tmplfile = kwds.get("-T")
if tmplfile is None:
raise ValueError("Missing template file")
with open(tmplfile, "rb") as fp:
template = pickle.load(fp)
ds = open_dataset(pargs, template=template)
compressor = zarr.Blosc(cname="zstd", shuffle=-1, clevel=clevel)
vars = list(ds.data_vars.keys())
vars.extend(["longitude", "latitude"])
encoding = dict.fromkeys(vars, {"compressor": compressor})
ds.to_zarr(zarrdir, consolidated=True, encoding=encoding)
commands = {
"list_inv": list_inv,
"make_inv": make_inv,
"cat_inv": cat_inv,
"cat_hash": cat_hash,
"template": mk_tmpl,
"to_nc": to_nc,
"to_zarr": to_zarr,
}
def main(argv: Optional[List[str]] = None):
if not argv:
# Allow to call main() with arguments
argv = sys.argv[1:]
if argv:
if argv[0] == "-h":
print(USAGE)
raise SystemExit
# if (f := commands.get(argv[1])):
f = commands.get(argv[0])
if f:
f(argv[1:])
else:
wgrib(*argv)
if __name__ == "__main__":
main()
|
#!/usr/local/hoplite/bin/python3
import unittest
import os
import sys
import socket
sys.path.insert(0, os.path.realpath(os.path.join("..", "lib")))
import influxdb2
VALID_JSON = """
{
"links": {
"self": "/api/v2/orgs/1",
"members": "/api/v2/orgs/1/members",
"owners": "/api/v2/orgs/1/owners",
"labels": "/api/v2/orgs/1/labels",
"secrets": "/api/v2/orgs/1/secrets",
"buckets": "/api/v2/buckets?org=myorg",
"tasks": "/api/v2/tasks?org=myorg",
"dashboards": "/api/v2/dashboards?org=myorg"
},
"id": "abcdef0123456789",
"name": "example.com",
"description": "Example Company",
"createdAt": "2019-08-24T14:15:22Z",
"updatedAt": "2019-08-24T14:15:22.4485Z",
"status": "inactive"
}
"""
INVALID_JSON_VALUES = """
{
"links": {
"self": "/api/v2/orgs/1",
"members": "/api/v2/orgs/1/members",
"owners": "/api/v2/orgs/1/owners",
"labels": "/api/v2/orgs/1/labels",
"secrets": "/api/v2/orgs/1/secrets",
"buckets": "/api/v2/buckets?org=myorg",
"tasks": "/api/v2/tasks?org=myorg",
"dashboards": "/api/v2/dashboards?org=myorg"
},
"id": "abcdef0123456789",
"name": "example.com",
"description": "Example Company",
"createdAt": "2019-08-24T14:15:22Z",
"updatedAt": "",
"status": "inactive"
}
"""
INVALID_JSON = """
[}
"""
class TestOrgs(unittest.TestCase):
def test_empty(self):
org = influxdb2.obj.org.Org()
with self.assertRaises(ValueError):
org.name
with self.assertRaises(ValueError):
org.id
assert org.description is None
assert org.updated == 0.0
assert org.created == 0.0
assert org.active == True
def test_invalid(self):
org = influxdb2.obj.org.Org()
with self.assertRaises(ValueError):
org.name = None
def test_valid_json(self):
org = influxdb2.obj.org.Org()
org.from_json(VALID_JSON)
assert org.id == "abcdef0123456789"
assert org.name == "example.com"
assert org.description == "Example Company"
assert org.created == 1566656122.0
assert org.updated == 1566656122.4485
assert org.active == False
assert org.links["self"] == "/api/v2/orgs/1"
def test_invalid_json(self):
org = influxdb2.obj.org.Org()
with self.assertRaises(ValueError):
org.from_json(INVALID_JSON)
with self.assertRaises(ValueError):
org.from_json(INVALID_JSON_VALUES)
# Test the _reset function
with self.assertRaises(ValueError):
org.name
with self.assertRaises(ValueError):
org.id
assert org.description is None
assert org.updated == 0.0
assert org.created == 0.0
assert org.active == True
assert len(org.links) == 0
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ObsPy client for the IRIS Syngine service.
:copyright:
The ObsPy Development Team (devs@obspy.org)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from future.utils import native_str
import collections
import io
import zipfile
import numpy as np
import obspy
from obspy.core import AttribDict
from obspy.core import compatibility
from ..base import WaveformClient, HTTPClient, DEFAULT_USER_AGENT, \
ClientHTTPException
class Client(WaveformClient, HTTPClient):
"""
Client for the IRIS Syngine service.
"""
def __init__(self, base_url="http://service.iris.edu/irisws/syngine/1",
user_agent=DEFAULT_USER_AGENT, debug=False, timeout=20):
"""
Initializes a Syngine Client.
:param base_url: The base URL of the service.
:type base_url: str
:param user_agent: The user agent sent along the HTTP request.
:type user_agent: str
:param debug: Debug on/off.
:type debug: bool
:param timeout: The socket timeout.
:type timeout: float
"""
HTTPClient.__init__(self, debug=debug, timeout=timeout,
user_agent=user_agent)
# Make sure the base_url does not end with a slash.
base_url = base_url.rstrip("/")
self._base_url = base_url
def _get_url(self, path):
return "/".join([self._base_url, path])
def _handle_requests_http_error(self, r):
msg = "HTTP code %i when downloading '%s':\n\n%s" % (
r.status_code, r.url, compatibility.get_text_from_response(r))
raise ClientHTTPException(msg.strip())
def get_model_info(self, model_name):
"""
Get some information about a particular model.
.. rubric:: Example
>>> from obspy.clients.syngine import Client
>>> c = Client()
>>> db_info = c.get_model_info(model_name="ak135f_5s")
>>> print(db_info.period)
5.125
:param model_name: The name of the model. Case insensitive.
:type model_name: str
:returns: A dictionary with more information about any model.
:rtype: :class:`obspy.core.util.attribdict.AttribDict`
"""
model_name = model_name.strip().lower()
r = self._download(self._get_url("info"),
params={"model": model_name})
info = AttribDict(compatibility.get_json_from_response(r))
# Convert slip and sliprate into numpy arrays for easier handling.
info.slip = np.array(info.slip, dtype=np.float64)
info.sliprate = np.array(info.sliprate, dtype=np.float64)
return info
def get_available_models(self):
"""
Get information about all available velocity models.
"""
return compatibility.get_json_from_response(
self._download(self._get_url("models")))
def get_service_version(self):
"""
Get the service version of the remote Syngine server.
"""
r = self._download(self._get_url("version"))
return compatibility.get_text_from_response(r)
def _convert_parameters(self, model, **kwargs):
model = model.strip().lower()
if not model:
raise ValueError("Model must be given.")
params = {"model": model}
# Error handling is mostly delegated to the actual Syngine service.
# Here we just check that the types are compatible.
str_arguments = ["network", "station", "networkcode", "stationcode",
"locationcode", "eventid", "label", "components",
"units", "format"]
float_arguments = ["receiverlatitude", "receiverlongitude",
"sourcelatitude", "sourcelongitude",
"sourcedepthinmeters", "scale", "dt"]
int_arguments = ["kernelwidth"]
time_arguments = ["origintime"]
for keys, t in ((str_arguments, native_str),
(float_arguments, float),
(int_arguments, int),
(time_arguments, obspy.UTCDateTime)):
for key in keys:
try:
value = kwargs[key]
except KeyError:
continue
if value is None:
continue
value = t(value)
# String arguments are stripped and empty strings are not
# allowed.
if t is native_str:
value = value.strip()
if not value:
raise ValueError("String argument '%s' must not be "
"an empty string." % key)
params[key] = t(value)
# These can be absolute times, relative times or phase relative times.
temporal_bounds = ["starttime", "endtime"]
for key in temporal_bounds:
try:
value = kwargs[key]
except KeyError:
continue
if value is None:
continue
# If a number, convert to a float.
elif isinstance(value, (int, float)):
value = float(value)
# If a string like object, attempt to parse it to a datetime
# object, otherwise assume it`s a phase-relative time and let the
# Syngine service deal with the error handling.
elif isinstance(value, (str, native_str)):
try:
value = obspy.UTCDateTime(value)
except Exception:
pass
# Last but not least just try to pass it to the datetime
# constructor without catching the error.
else:
value = obspy.UTCDateTime(value)
params[key] = native_str(value)
# These all have to be lists of floats. Otherwise it fails.
source_mecs = ["sourcemomenttensor",
"sourcedoublecouple",
"sourceforce"]
for key in source_mecs:
try:
value = kwargs[key]
except KeyError:
continue
if value is None:
continue
value = ",".join(["%g" % float(_i) for _i in value])
params[key] = value
return params
def __read_to_stream(self, r):
with io.BytesIO(r.content) as buf:
# First try to read the file in a normal way, otherwise assume
# it's a saczip file.
try:
st = obspy.read(buf)
except Exception:
st = obspy.Stream()
# Seek as some bytes might have been already read.
buf.seek(0, 0)
zip_obj = zipfile.ZipFile(buf)
for name in zip_obj.namelist():
# Skip the log file.
if name.lower() == "syngine.log":
continue
with io.BytesIO(zip_obj.read(name)) as buf_2:
st += obspy.read(buf_2)
return st
def get_waveforms(
self, model, network=None, station=None,
receiverlatitude=None, receiverlongitude=None,
networkcode=None, stationcode=None, locationcode=None,
eventid=None, sourcelatitude=None, sourcelongitude=None,
sourcedepthinmeters=None, sourcemomenttensor=None,
sourcedoublecouple=None, sourceforce=None, origintime=None,
starttime=None, endtime=None, label=None, components=None,
units=None, scale=None, dt=None, kernelwidth=None,
format="miniseed", filename=None):
"""
Request waveforms using the Syngine service.
This method is strongly tied to the actual implementation on the
server side. The default values and all the exception handling are
deferred to the service. Please see `the Syngine documentation
<https://ds.iris.edu/ds/products/syngine/>`_ for more details and the
default values of all parameters.
.. rubric:: Example
>>> from obspy.clients.syngine import Client
>>> client = Client()
>>> st = client.get_waveforms(model="ak135f_5s", network="IU",
... station="ANMO",
... eventid="GCMT:C201002270634A")
>>> print(st) # doctest: +ELLIPSIS
3 Trace(s) in Stream:
IU.ANMO.SE.MXZ | 2010-02-27T06:35:14... - ... | 4.0 Hz, 15520 samples
IU.ANMO.SE.MXN | 2010-02-27T06:35:14... - ... | 4.0 Hz, 15520 samples
IU.ANMO.SE.MXE | 2010-02-27T06:35:14... - ... | 4.0 Hz, 15520 samples
:param model: Specify the model.
:type model: str
:param network: Specify a network code combined with ``station`` to
identify receiver coordinates of an operating station.
:type network: str
:param station: Specify a station code combined with ``network`` to
identify receiver coordinates of an operating station.
:type station: str
:param receiverlatitude: Specify the receiver latitude in degrees.
:type receiverlatitude: float
:param receiverlongitude: Specify the receiver longitude in degrees.
:type receiverlongitude: float
:param networkcode: Specify the network code for the synthetics.
Optional when using ``receiverlatitude`` and ``receiverlongitude``.
:type networkcode: str
:param stationcode: Specify the station code for the synthetics.
Optional when using ``receiverlatitude`` and ``receiverlongitude``.
:type stationcode: str
:param locationcode: Specify the location code for the synthetics.
Optional in any usage.
:type locationcode: str
:param eventid: Specify an event identifier in the form
[catalog]:[eventid]. The centroid time and location and moment
tensor of the solution will be used as the source.
:type eventid: str
:param sourcelatitude: Specify the source latitude.
:type sourcelatitude: float
:param sourcelongitude: Specify the source longitude.
:type sourcelongitude: float
:param sourcedepthinmeters: Specify the source depth in meters.
:type sourcedepthinmeters: float
:param sourcemomenttensor: Specify a source in moment tensor
components as a list: ``Mrr``, ``Mtt``, ``Mpp``, ``Mrt``, ``Mrp``,
``Mtp`` with values in Newton meters (*Nm*).
:type sourcemomenttensor: list of floats
:param sourcedoublecouple: Specify a source as a double couple. The
list of values are ``strike``, ``dip``, ``rake`` [, ``M0`` ],
where strike, dip and rake are in degrees and M0 is the scalar
seismic moment in Newton meters (Nm). If not specified, a value
of *1e19* will be used as the scalar moment.
:type sourcedoublecouple: list of floats
:param sourceforce: Specify a force source as a list of ``Fr``, ``Ft``,
``Fp`` in units of Newtons (N).
:type sourceforce: list of floats
:param origintime: Specify the source origin time. This must be
specified as an absolute date and time.
:type origintime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Specifies the desired start time for the synthetic
trace(s). This may be specified as either:
* an absolute date and time
* a phase-relative offset
* an offset from origin time in seconds
If the value is recognized as a date and time, it is interpreted
as an absolute time. If the value is in the form
``phase[+-]offset`` it is interpreted as a phase-relative time,
for example ``P-10`` (meaning P wave arrival time minus 10
seconds). If the value is a numerical value it is interpreted as an
offset, in seconds, from the ``origintime``.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`, str, or
float
:param endtime: Specifies the desired end time for the synthetic
trace(s). This may be specified as either:
* an absolute date and time
* a phase-relative offset
* an offset from start time in seconds
If the value is recognized as a date and time, it is interpreted
as an absolute time. If the value is in the form
``phase[+-]offset`` it is interpreted as a phase-relative time,
for example ``P+10`` (meaning P wave arrival time plus 10
seconds). If the value is a numerical value it is interpreted as an
offset, in seconds, from the ``starttime``.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`, str,
or float
:param label: Specify a label to be included in file names and HTTP
file name suggestions.
:type label: str
:param components: Specify the orientation of the synthetic
seismograms as a list of any combination of ``Z`` (vertical),
``N`` (north), ``E`` (east), ``R`` (radial), ``T`` (transverse)
:type components: str or list of strings.
:param units: Specify either ``displacement``, ``velocity`` or
``acceleration`` for the synthetics. The length unit is meters.
:type units: str
:param scale: Specify an amplitude scaling factor. The default
amplitude length unit is meters.
:type scale: float
:param dt: Specify the sampling interval in seconds. Only upsampling
is allowed so this value must be larger than the intrinsic interval
of the model database.
:type dt: float
:param kernelwidth: Specify the width of the sinc kernel used for
resampling to requested sample interval (``dt``), relative to the
original sampling rate.
:type kernelwidth: int
:param format: Specify output file to be either miniSEED or a ZIP
archive of SAC files, either ``miniseed`` or ``saczip``.
:type format: str
:param filename: Will download directly to the specified file. If
given, this method will return nothing.
:type filename: str or file-like object
"""
arguments = {
"network": network,
"station": station,
"receiverlatitude": receiverlatitude,
"receiverlongitude": receiverlongitude,
"networkcode": networkcode,
"stationcode": stationcode,
"locationcode": locationcode,
"eventid": eventid,
"sourcelatitude": sourcelatitude,
"sourcelongitude": sourcelongitude,
"sourcedepthinmeters": sourcedepthinmeters,
"sourcemomenttensor": sourcemomenttensor,
"sourcedoublecouple": sourcedoublecouple,
"sourceforce": sourceforce,
"origintime": origintime,
"starttime": starttime,
"endtime": endtime,
"label": label,
"components": components,
"units": units,
"scale": scale,
"dt": dt,
"kernelwidth": kernelwidth,
"format": format,
"filename": filename}
params = self._convert_parameters(model=model, **arguments)
r = self._download(url=self._get_url("query"), params=params,
filename=filename)
# A given filename will write directly to a file.
if filename:
return
return self.__read_to_stream(r=r)
def get_waveforms_bulk(
self, model, bulk,
eventid=None, sourcelatitude=None, sourcelongitude=None,
sourcedepthinmeters=None, sourcemomenttensor=None,
sourcedoublecouple=None, sourceforce=None, origintime=None,
starttime=None, endtime=None, label=None, components=None,
units=None, scale=None, dt=None, kernelwidth=None,
format="miniseed", filename=None, data=None):
"""
Request waveforms for multiple receivers simultaneously.
This method is strongly tied to the actual implementation on the
server side. The default values and all the exception handling are
deferred to the service. Please see the `Syngine documentation
<https://ds.iris.edu/ds/products/syngine/>`_ for more details and the
default values of all parameters.
This method uses the POST functionalities of the Syngine service.
.. rubric:: Example
The `bulk` parameter is a list of either other lists/tuples or
dictionaries. Each item specifies one receiver. Items can be
specified in a number of different ways:
>>> from obspy.clients.syngine import Client
>>> c = Client()
>>> bulk = [
... {"network": "IU", "station": "ANMO"}, # net/sta codes
... {"latitude": 47.0, "longitude": 12.1}, # coordinates
... {"latitude": 47.0, "longitude": 12.1,
... "networkcode": "AA", "stationcode": "BB",
... "locationcode": "CC"}, # optional net/sta/loc
... ["IU", "ANTO"], # net/sta as list
... [33.2, -123.5] # lat/lon as list/tuple
... ]
Just pass that on to the bulk waveform method and retrieve the data.
>>> st = c.get_waveforms_bulk(
... model="ak135f_5s", bulk=bulk, sourcelatitude=12.3,
... sourcelongitude=75.3, sourcedepthinmeters=54321,
... sourcemomenttensor=[1E19, 1E19, 1E19, 0, 0, 0],
... components="Z")
>>> print(st.sort()) # doctest: +ELLIPSIS
5 Trace(s) in Stream:
AA.BB.CC.MXZ | 1900-01-01T00:00:00... - ... | 4.0 Hz, 15520 samples
IU.ANMO.SE.MXZ | 1900-01-01T00:00:00... - ... | 4.0 Hz, 15520 samples
IU.ANTO.SE.MXZ | 1900-01-01T00:00:00... - ... | 4.0 Hz, 15520 samples
XX.S0001.SE.MXZ | 1900-01-01T00:00:00... - ... | 4.0 Hz, 15520 samples
XX.S0002.SE.MXZ | 1900-01-01T00:00:00... - ... | 4.0 Hz, 15520 samples
:param model: Specify the model.
:type model: str
:param bulk: Specify the receivers to download in bulk.
:type bulk: list of lists, tuples, or dictionaries
:param eventid: Specify an event identifier in the form
[catalog]:[eventid]. The centroid time and location and moment
tensor of the solution will be used as the source.
:type eventid: str
:param sourcelatitude: Specify the source latitude.
:type sourcelatitude: float
:param sourcelongitude: Specify the source longitude.
:type sourcelongitude: float
:param sourcedepthinmeters: Specify the source depth in meters.
:type sourcedepthinmeters: float
:param sourcemomenttensor: Specify a source in moment tensor
components as a list: ``Mrr``, ``Mtt``, ``Mpp``, ``Mrt``, ``Mrp``,
``Mtp`` with values in Newton meters (*Nm*).
:type sourcemomenttensor: list of floats
:param sourcedoublecouple: Specify a source as a double couple. The
list of values are ``strike``, ``dip``, ``rake`` [, ``M0`` ],
where strike, dip and rake are in degrees and M0 is the scalar
seismic moment in Newton meters (Nm). If not specified, a value
of *1e19* will be used as the scalar moment.
:type sourcedoublecouple: list of floats
:param sourceforce: Specify a force source as a list of ``Fr``, ``Ft``,
``Fp`` in units of Newtons (N).
:type sourceforce: list of floats
:param origintime: Specify the source origin time. This must be
specified as an absolute date and time.
:type origintime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Specifies the desired start time for the synthetic
trace(s). This may be specified as either:
* an absolute date and time
* a phase-relative offset
* an offset from origin time in seconds
If the value is recognized as a date and time, it is interpreted
as an absolute time. If the value is in the form
``phase[+-]offset`` it is interpreted as a phase-relative time,
for example ``P-10`` (meaning P wave arrival time minus 10
seconds). If the value is a numerical value it is interpreted as an
offset, in seconds, from the ``origintime``.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`, str,
or float
:param endtime: Specifies the desired end time for the synthetic
trace(s). This may be specified as either:
* an absolute date and time
* a phase-relative offset
* an offset from start time in seconds
If the value is recognized as a date and time, it is interpreted
as an absolute time. If the value is in the form
``phase[+-]offset`` it is interpreted as a phase-relative time,
for example ``P+10`` (meaning P wave arrival time plus 10
seconds). If the value is a numerical value it is interpreted as an
offset, in seconds, from the ``starttime``.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`, str,
or float
:param label: Specify a label to be included in file names and HTTP
file name suggestions.
:type label: str
:param components: Specify the orientation of the synthetic
seismograms as a list of any combination of ``Z`` (vertical),
``N`` (north), ``E`` (east), ``R`` (radial), ``T`` (transverse)
:type components: str or list of strings.
:param units: Specify either ``displacement``, ``velocity`` or
``acceleration`` for the synthetics. The length unit is meters.
:type units: str
:param scale: Specify an amplitude scaling factor. The default
amplitude length unit is meters.
:type scale: float
:param dt: Specify the sampling interval in seconds. Only upsampling
is allowed so this value must be larger than the intrinsic interval
of the model database.
:type dt: float
:param kernelwidth: Specify the width of the sinc kernel used for
resampling to requested sample interval (``dt``), relative to the
original sampling rate.
:type kernelwidth: int
:param format: Specify output file to be either miniSEED or a ZIP
archive of SAC files, either ``miniseed`` or ``saczip``.
:type format: str
:param filename: Will download directly to the specified file. If
given, this method will return nothing.
:type filename: str or file-like object
:param data: If specified this will be sent directly sent to the
Syngine service as a POST payload. All other parameters except the
``filename`` parameter will be silently ignored. Likely not that
useful for most people.
:type data: dictionary, bytes, or file-like object
"""
# Send data straight via POST if given.
if data:
r = self._download(url=self._get_url("query"),
data=data, filename=filename)
if filename:
return
return self.__read_to_stream(r=r)
if not bulk:
raise ValueError("Some bulk download information must be given.")
arguments = {
"eventid": eventid,
"sourcelatitude": sourcelatitude,
"sourcelongitude": sourcelongitude,
"sourcedepthinmeters": sourcedepthinmeters,
"sourcemomenttensor": sourcemomenttensor,
"sourcedoublecouple": sourcedoublecouple,
"sourceforce": sourceforce,
"origintime": origintime,
"starttime": starttime,
"endtime": endtime,
"label": label,
"components": components,
"units": units,
"scale": scale,
"dt": dt,
"kernelwidth": kernelwidth,
"format": format,
"filename": filename}
params = self._convert_parameters(model=model, **arguments)
# Assemble the bulk file.
with io.BytesIO() as buf:
# Write the header.
buf.write("model={model}\n".format(**params).encode())
del params["model"]
for key in sorted(params.keys()):
value = params[key]
buf.write("{key}={value}\n".format(key=key,
value=value).encode())
_map = {"networkcode": "NETCODE",
"stationcode": "STACODE",
"locationcode": "LOCCODE"}
# Write the bulk content.
for item in bulk:
# Dictionary like items.
if isinstance(item, collections.Mapping):
if "latitude" in item or "longitude" in item:
if not ("latitude" in item and "longitude" in item):
raise ValueError(
"Item '%s' in bulk must contain both "
"latitude and longitude if either is given." %
str(item))
bulk_item = "{latitude} {longitude}".format(**item)
for _i in ("networkcode", "stationcode",
"locationcode"):
if _i in item:
bulk_item += " %s=%s" % (_map[_i], item[_i])
elif "station" in item and "network" in item:
bulk_item = "{network} {station}".format(
**item)
else:
raise ValueError("Item '%s' in bulk is malformed." %
str(item))
# Iterable items.
elif isinstance(item, collections.Container):
if len(item) != 2:
raise ValueError("Item '%s' in bulk must have two "
"entries." % str(item))
bulk_item = "%s %s" % (item[0], item[1])
else:
raise ValueError("Item '%s' in bulk cannot be parsed." %
str(item))
buf.write((bulk_item.strip() + "\n").encode())
buf.seek(0, 0)
r = self._download(url=self._get_url("query"),
data=buf, filename=filename)
# A given filename will write directly to a file.
if filename:
return
return self.__read_to_stream(r=r)
|
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.get_request import GetRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.find_list_command import FindListCommand
from pyopenproject.business.services.command.project.project_command import ProjectCommand
from pyopenproject.model.budget import Budget
class FindBudgets(ProjectCommand):
def __init__(self, connection, project):
super().__init__(connection)
self.project = project
def execute(self):
try:
request = GetRequest(self.connection, f"{self.CONTEXT}/{self.project.id}/budgets")
return FindListCommand(self.connection, request, Budget).execute()
# for budget in json_obj["_embedded"]["elements"]:
# yield Budget(budget)
except RequestError as re:
raise BusinessError(f"Error finding budgets by id: {self.project.name}") from re
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.