content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import rospy
import message_filters
#from std_msgs.msg import String ## we need it stamped or we cant use TimeSynchronizer
from ros_example_torch_classifier.msg import StringStamped
from ros_example_torch_classifier.utils import check_remap
from std_srvs.srv import Trigger, TriggerResponse
#def check_remap(listo):
# for name in listo:
# rospy.logdebug("the name that %s is resolving to: %s"%(name, rospy.resolve_name(name)))
class ClassifierRotator():
def __init__(self, classifier = None):
##each new split I need to spawn a new RosClassifier
## I am going to have some services here that do things:
rospy.init_node("cfr", anonymous=True, log_level=rospy.DEBUG)
self.classifier_prototype = classifier
self.classifier = None
def __enter__(self):
self.get_next = rospy.Service("~get_next", Trigger, self.do_one_thing)
return self
def __exit__(self, *exc):
self.get_next.shutdown("\n\texc list: {}\n, {}".format(*exc,exc[0]))
def do_one_thing(self, req):
#with RosClassifier() as rc:
#self.classifier = rc
# rospy.spin()
if self.classifier is not None:
self.classifier.stop("Next instance called.")
self.classifier = self.classifier_prototype()
self.classifier.start()
return TriggerResponse(success=True, message= "response")
def do_something_else(self):
pass
class RosClassifier():
def __init__(self):
self.train_data = []
self.test_data = []
### this is awkward; ideally we would have them published together with a custom message,
### TODO: consider if custom messages is better here.
##need to set training input
##need to set label topic
check_remap(["train_in", "train_label", "test_in", "test_label"])
rospy.logdebug("the name that train_in is resolving to: %s"%rospy.resolve_name("train_in"))
self.training_input = message_filters.Subscriber('train_in', StringStamped)
self.training_label = message_filters.Subscriber('train_label', StringStamped)
train_sub_pair = [self.training_input, self.training_label]
self.train_ts = message_filters.TimeSynchronizer(train_sub_pair , 10)
#self.ts = message_filters.ApproximateTimeSynchronizer(train_sub_pair , 10, 0.1, allow_headerless=True)
self.train_ts.registerCallback(self.train_callback)
##need to set test topic
##need to set test labels
self.test_input = message_filters.Subscriber('test_in', StringStamped)
self.test_label = message_filters.Subscriber('test_label', StringStamped)
test_sub_pair = [self.test_input, self.test_label]
self.test_ts = message_filters.TimeSynchronizer(test_sub_pair , 10)
#self.test_ts = message_filters.ApproximateTimeSynchronizer(test_sub_pair , 10, 0.1, allow_headerless=True)
self.test_ts.registerCallback(self.test_callback)
def train_callback(self, data, label):
#maybe we just collect everything until we get a do_train
#this will fail for big dataset strategies where we do not want to keep
#the whole thing ever, but just
rospy.logwarn_once("I'm receiving data.")
self.train_data.append((data,label))
#pass
def test_callback(self, data, label):
self.test_data.append((data,label))
def __enter__(self):
self.start()
return self
def start(self):
self.clf_do = rospy.Service("~classify", Trigger, self.do_train)
self.clf_predict = rospy.Service("~predict", Trigger, self.do_predict)
def stop(self, reason = "No reason given."):
self.clf_do.shutdown(reason)
self.clf_predict.shutdown(reason)
def __exit__(self, *exc):
reason = "\n\texc list: {}\n, {}".format(*exc,exc[0])
self.stop(reason = reason)
def do_train(self, req):
assert(self.train_data)
rospy.logwarn("training not implemented")
return TriggerResponse(success=True, message= "Stub classification done")
def do_predict(self, req):
assert(self.test_data)
rospy.logwarn("prediction not implemented")
return TriggerResponse(success=True, message= "Stub prediction done")
if __name__ == '__main__':
try:
with ClassifierRotator(RosClassifier) as cr:
rospy.spin()
except rospy.ROSInterruptException:
pass
|
from __future__ import unicode_literals
import logging
#: not set object
NOT_SET = object()
class BaseTableModel(object):
"""Base model for table
"""
#: the table object
TABLE = None
def __init__(self, factory, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.factory = factory
self.session = factory.session
assert self.TABLE is not None, 'Table is not set'
def init(self):
"""Called after all models in factory are created to initialized
something like event subscription
"""
def get(self, record_id, raise_error=False):
"""Get a record by id
"""
record = (
self.session
.query(self.TABLE)
.get(record_id)
)
if raise_error and record is None:
raise KeyError(
'{0} {1} does not exist'
.format(self.TABLE.__name__, record_id)
)
return record
def get_list(self, ids=None, offset=None, limit=None):
"""Get record list
"""
from sqlalchemy.orm import class_mapper
query = (
self.session
.query(self.TABLE)
)
if ids is not None:
pk = class_mapper(self.TABLE).primary_key[0]
query = query.filter(pk.in_(ids))
if offset is not None:
query = query.offset(offset)
if limit is not None:
query = query.limit(limit)
return query
def count(self, ids=None):
from sqlalchemy.orm import class_mapper
query = (
self.session
.query(self.TABLE)
)
if ids is not None:
pk = class_mapper(self.TABLE).primary_key[0]
query = query.filter(pk.in_(ids))
return query.count()
|
from django.contrib.auth.models import Group
from dalme_api.serializers import (AgentSerializer, ContentClassSerializer, ContentTypeSerializer,
CountryReferenceSerializer, GroupSerializer, LanguageReferenceSerializer,
LocaleReferenceSerializer, PlaceSerializer, RightsPolicySerializer, SimpleAttributeSerializer)
from dalme_app.models import (Agent, Attribute, Content_class, Content_type, CountryReference,
LanguageReference, LocaleReference, Place, RightsPolicy)
from dalme_api.access_policies import AgentAccessPolicy, GeneralAccessPolicy, RightsAccessPolicy, LocaleAccessPolicy, PlaceAccessPolicy
from ._common import DALMEBaseViewSet
from dalme_api.filters import ContenTypeFilter
class Agents(DALMEBaseViewSet):
""" API endpoint for managing agents """
permission_classes = (AgentAccessPolicy,)
queryset = Agent.objects.all()
serializer_class = AgentSerializer
filterset_fields = ['id', 'type']
search_fields = ['id', 'standard_name', 'notes']
ordering_fields = ['id', 'standard_name', 'type']
ordering = ['type', 'standard_name']
class Attributes(DALMEBaseViewSet):
""" API endpoint for managing attributes """
permission_classes = (GeneralAccessPolicy,)
queryset = Attribute.objects.all().order_by('attribute_type')
serializer_class = SimpleAttributeSerializer
class ContentClasses(DALMEBaseViewSet):
""" API endpoint for managing content classes """
permission_classes = (GeneralAccessPolicy,)
queryset = Content_class.objects.all()
serializer_class = ContentClassSerializer
class ContentTypes(DALMEBaseViewSet):
""" API endpoint for managing content types """
permission_classes = (GeneralAccessPolicy,)
queryset = Content_type.objects.all()
serializer_class = ContentTypeSerializer
filterset_class = ContenTypeFilter
class Countries(DALMEBaseViewSet):
""" API endpoint for managing countries """
permission_classes = (GeneralAccessPolicy,)
queryset = CountryReference.objects.all()
serializer_class = CountryReferenceSerializer
filterset_fields = ['id', 'name', 'alpha_3_code', 'alpha_2_code', 'num_code']
search_fields = ['id', 'name', 'alpha_3_code', 'alpha_2_code', 'num_code']
ordering_fields = ['id', 'name', 'alpha_3_code', 'alpha_2_code', 'num_code']
ordering = ['name']
class Groups(DALMEBaseViewSet):
""" API endpoint for managing user groups """
permission_classes = (GeneralAccessPolicy,)
queryset = Group.objects.all()
serializer_class = GroupSerializer
filterset_fields = ['id', 'name', 'properties__type']
search_fields = ['name']
ordering_fields = ['id', 'name']
ordering = ['name']
class Languages(DALMEBaseViewSet):
""" API endpoint for managing languages """
permission_classes = (GeneralAccessPolicy,)
queryset = LanguageReference.objects.all()
serializer_class = LanguageReferenceSerializer
filterset_fields = ['id', 'name', 'type', 'parent__name', 'iso6393', 'glottocode']
search_fields = ['id', 'name', 'type', 'parent__name', 'iso6393', 'glottocode']
ordering_fields = ['id', 'name', 'type', 'parent', 'iso6393', 'glottocode']
ordering = ['name']
class Locales(DALMEBaseViewSet):
""" API endpoint for managing locales """
permission_classes = (LocaleAccessPolicy,)
queryset = LocaleReference.objects.all()
serializer_class = LocaleReferenceSerializer
filterset_fields = ['id', 'name', 'administrative_region', 'country__name']
search_fields = ['id', 'name', 'administrative_region', 'country__name']
ordering_fields = ['id', 'name', 'administrative_region', 'country', 'latitude', 'longitude']
ordering = ['name']
class Places(DALMEBaseViewSet):
""" API endpoint for managing places """
permission_classes = (PlaceAccessPolicy,)
queryset = Place.objects.all()
serializer_class = PlaceSerializer
filterset_fields = ['id']
search_fields = ['id', 'standard_name', 'notes', 'locale__name', 'locale__country__name']
ordering_fields = ['id', 'standard_name', 'locale__name']
ordering = ['locale__name', 'standard_name']
class Rights(DALMEBaseViewSet):
""" API endpoint for managing rights policies """
permission_classes = (RightsAccessPolicy,)
queryset = RightsPolicy.objects.all()
serializer_class = RightsPolicySerializer
|
from pretalx.event.models import Organiser, Team
def create_organiser_with_user(*, name, slug, user):
organiser = Organiser.objects.create(name=name, slug=slug)
team = Team.objects.create(
organiser=organiser,
name=f'Team {name}',
can_create_events=True,
can_change_teams=True,
can_change_organiser_settings=True,
)
team.members.add(user)
return organiser, team
|
# TODO: handle file mode?
import io
import sys
from ..interface import Contract, ContractNotRespected
from ..syntax import (add_contract, add_keyword, Keyword, W)
inPy2 = sys.version_info[0] == 2
if inPy2:
file_type = (file, io.IOBase)
else:
file_type = io.IOBase
class File(Contract):
def __init__(self, where=None):
Contract.__init__(self, where)
def check_contract(self, context, value, silent):
if not isinstance(value, file_type):
error = 'Expected a file, got %r.' % value.__class__.__name__
raise ContractNotRespected(contract=self, error=error,
value=value, context=context)
def __str__(self):
return 'file'
def __repr__(self):
return 'File()'
@staticmethod
def parse_action(s, loc, _):
where = W(s, loc)
return File(where=where)
file_contract = Keyword('file')
file_contract.setParseAction(File.parse_action)
add_contract(file_contract)
add_keyword('file')
|
import pyshark
import py
import os
CONFIG_PATH = os.path.join(os.path.dirname(pyshark.__file__), 'config.ini')
def get_config():
return py.iniconfig.IniConfig(CONFIG_PATH) |
from pathlib import Path
from magnus.integration import BaseIntegration
class LocalContainerComputeS3Catalog(BaseIntegration):
"""
Integration pattern between Local container and S3 catalog
"""
mode_type = 'local-container'
service_type = 'catalog' # One of secret, catalog, datastore
service_provider = 's3' # The actual implementation of the service
def configure_for_traversal(self, **kwargs):
write_to = self.service.get_aws_credentials_file()
self.executor.volumes[str(Path(write_to).resolve())] = {
'bind': '/root/.aws/credentials',
'mode': 'ro'
}
|
"""
Created on @Time:2019/7/9 15:34
@Author:sliderSun
@FileName: generate_char.py
"""
from qaPairsRelationClassification.utils.preprocess import MyVocabularyProcessor
import numpy as np
def generate(x2_text, x1_text, max_document_length):
print("Building vocabulary")
vocab_processor = MyVocabularyProcessor(max_document_length, min_frequency=0)
vocab_processor.fit_transform(np.concatenate((x2_text, x1_text), axis=0))
f = open("F:\python_work\github\pynlp\qaPairsRelationClassification\data\\vocab.txt", "w",encoding="utf-8")
for i in range(len(vocab_processor.vocabulary_)):
f.write(vocab_processor.vocabulary_.reverse(i)+"\n")
print("Length of loaded vocabulary ={}".format(len(vocab_processor.vocabulary_)))
|
# -*- coding: utf-8 -*-
import sys
import gc
import time
try:
import msvcrt
def kbhit():
return msvcrt.kbhit()
def getch():
return msvcrt.getwch()
except ImportError:
print("Assuming on ESP")
import uselect
def kbhit():
spoll=uselect.poll()
spoll.register(sys.stdin,uselect.POLLIN)
kbch = sys.stdin.read(1) if spoll.poll(0) else None
spoll.unregister(sys.stdin)
return(kbch)
def getch():
while True:
tmp= sys.stdin.read(1)
if tmp is not None:
return tmp
try:
from i2ct import i2ct
i2=i2ct()
print ("Available Slaves:", i2.con.scan())
except ImportError:
print("no i2ct")
from duclas import cserv
from duclas import ccon
username="targon"
rigname = "None"
myCons=[]
serv=cserv()
def newCon(targ,tarnam):
myCons.append(ccon(targ,tarnam,serv.pool_address, serv.pool_port,rigname))
def get_config():
global rigname
conf=open("conf.txt",'r')
rigname=conf.readline().rstrip()
print (rigname)
for l in conf.readlines():
s=l.rstrip().split(" ",1)
print (s[1]+"<")
newCon(int(s[0]),s[1])
def overview():
print("cons",len(myCons))
now=time.ticks_ms()
for c in myCons:
tim=time.ticks_diff(now,c.jobStartTim)
print (c.target, c.getSlStat(),c.sta,c.reqAnz,'/',c.reqAnzTop,"sin", tim)
def info():
gc.collect()
print ("Available Slaves:", i2.con.scan())
print("Mem Alloc",gc.mem_alloc(),'Free',gc.mem_free())
print("Rigname",rigname, 'with',len(myCons),"Cons:")
for c in myCons:
c.coninfo()
def loop(top=0):
global myCons
allbusy=0 #counter subsequent loops
zings=0 # jobs terminated
ms='?'
now=time.ticks_ms()
for c in myCons: # in case of break
c.jobStartTim=now
if top==0: #run unlimited
c.reqAnzTop=0
else: #run top jobs (pi)
c.reqAnz=0
c.reqAnzTop=top
while True:
allbusy=allbusy+1
zings=1
for c in myCons:
ms=c.mach()
if ms != 'B':
allbusy=0
if ms != 'Z':
zings=0
if zings>0:
print("Zinged")
break
if kbhit() is not None:
break
if allbusy>0:
print("All Bus ",allbusy)
gc.collect() # something useful
time.sleep(0.1)
print("Loop Done")
def menu():
global myCons
inpAkt=False
inp=0
myc=0
verbose = True
print (username+", welcome to mydu. Use s to start:")
print ("... then l to loop:")
if len(myCons)==0: #re-running config would duplicate cons
get_config()
loop()
while True:
if not inpAkt: print(rigname,">",end='')
ch = getch()
if ((ch >= '0') and (ch <= '9')):
if (inpAkt) :
inp = inp * 10 + (ord(ch) - 48);
else:
inpAkt = True;
inp = ord(ch) - 48;
print(ch,end='')
else:
print(ch)
inpAkt=False
try:
if ch=="a":
myc=inp
print("myc=",myc)
elif ch=="c":
myCons[inp].conn()
elif ch=="d":
myCons[inp].close()
elif ch=="D":
for c in myCons:
c.close()
elif ch=="e":
myCons[inp].close()
myCons.pop(inp)
elif ch=="h":
myCons[inp].sendRate = not myCons[inp].sendRate
print ("sendRate ",myCons[inp].sendRate)
elif ch=="i":
info()
elif ch=="l":
loop(0)
elif ch=="m":
myCons[inp].mach()
elif ch=="o":
overview()
elif ch=="q":
print (myCons[inp].getSlStat())
elif ch=="s":
get_config()
elif ch=="u":
myCons[inp].statReset()
print ("stats reset for",inp)
elif ch=="v":
verbose = not verbose
for c in myCons:
c.setVerbose(verbose)
elif ch=="w":
inp=myCons[myc].getResult()
print ("inp",inp)
elif ch=="x":
for c in myCons:
c.close()
print ("Thanks for using mydu")
return
elif ch=="y":
loop(inp)
elif ch=="z":
for c in myCons:
c.reqAnzTop=c.reqAnz+inp
print ("Zinging")
loop()
else:
print("else"+str(ord(ch)))
except Exception as inst:
print ("menu Exception "+str(inst))
raise #remove when perfect
menu()
|
# -*- coding: utf-8 -*-
from setuptools import setup
from pyHMI import __version__
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='pyHMI',
python_requires='>=3.2',
version=__version__,
description='A set of class for easy build tkinter HMI with Python',
long_description='',
author='Loic Lefebvre',
author_email='loic.celine@free.fr',
license='MIT',
url='https://github.com/sourceperl/pyHMI',
packages=['pyHMI'],
platforms='any',
install_requires=required,
)
|
from ctypes import cast, c_uint, c_uint32, c_float, CDLL, POINTER
c_uint32p = POINTER(c_uint32)
c_floatp = POINTER(c_float)
import sys
import torch
from data import examples
so = CDLL('cuda/libmodel.so')
init = so.init
init.restype = None
init.argtypes = []
model = so.model
model.restype = None
model.argtypes = [
c_uint32,
c_uint32,
c_uint32,
c_uint32p,
c_uint32p,
c_uint32p,
c_uint32p,
c_floatp
]
if __name__ == '__main__':
init()
test = examples(sys.argv[1], self_loops=False)
for nodes, sources, targets, rules, y in test:
num_nodes = c_uint32(len(nodes))
num_edges = c_uint32(len(sources))
num_rules = c_uint32(len(rules))
nodes = nodes.int()
sources = sources.int()
targets = targets.int()
rules = rules.int()
nodes_ptr = cast(nodes.data_ptr(), c_uint32p)
sources_ptr = cast(sources.data_ptr(), c_uint32p)
targets_ptr = cast(targets.data_ptr(), c_uint32p)
rules_ptr = cast(rules.data_ptr(), c_uint32p)
results = (c_float * len(rules))()
results_ptr = cast(results, c_floatp)
model(
num_nodes,
num_edges,
num_rules,
nodes_ptr,
sources_ptr,
targets_ptr,
rules_ptr,
results_ptr
)
policy = torch.softmax(torch.tensor(list(results)), dim=0)
print(policy)
print(y)
|
import itertools
import math
def read_input():
file = open('input/2015/day2-input.txt', 'r')
return [[int(side) for side in line.split('x')] for line in file.readlines()]
def wrapping(lengths):
"""
>>> wrapping([2, 3, 4])
58
>>> wrapping([1, 1, 10])
43
"""
sides = [(x * y) for x, y in itertools.combinations(lengths, 2)]
return 2 * sum(sides) + min(sides)
def ribbon(lengths):
"""
>>> ribbon([2, 3, 4])
34
>>> ribbon([1, 1, 10])
14
"""
sides = [(x + y) for x, y in itertools.combinations(lengths, 2)]
return 2 * min(sides) + math.prod(lengths)
def part1(data):
"""
>>> part1(read_input())
1598415
"""
return sum(wrapping(parcel) for parcel in data)
def part2(data):
"""
>>> part2(read_input())
3812909
"""
return sum(ribbon(parcel) for parcel in data)
def main():
data = read_input()
print(part1(data))
print(part2(data))
if __name__ == "__main__":
main()
|
"""Tests replicating
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
import numpy as np
import logging
import io
import multiprocessing
import functools
if True: # Ugly, but makes pylint happy
# pylint:disable=import-error
from six.moves import range # pylint: disable=redefined-builtin
from pyexperiment import state
from pyexperiment import Logger
from pyexperiment import log
# from pyexperiment.utils.stdout_redirector import stdout_err_redirector
from pyexperiment.replicate import replicate, collect_results, TargetCreator
from pyexperiment.replicate import SUBSTATE_KEY_PATTERN
FAKE_ERROR = RuntimeError("Foo")
"""Fake error for testing
"""
def experiment():
"""Test experiment, needs to be defined at top level for multiprocessing
"""
state['result'] = "bla"
def experiment2():
"""Test experiment, needs to be defined at top level for multiprocessing
"""
state['result'] = "bla"
_bla = state['result']
del state['result']
def experiment3():
"""Test experiment, needs to be defined at top level for multiprocessing
"""
raise FAKE_ERROR
def experiment4():
"""Test experiment, needs to be defined at top level for multiprocessing
"""
np.random.seed()
state['result'] = np.random.rand(1)
class TestReplicate(unittest.TestCase):
"""Test the replicate function, serial and parallel
"""
def tearDown(self):
"""Teardown test fixture
"""
state.reset_instance()
def test_setting_state(self):
"""Test setting
"""
no_replicates = 25
replicate(experiment, no_replicates)
for i in range(no_replicates):
self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])
self.assertEqual(state[SUBSTATE_KEY_PATTERN % i]['result'], "bla")
def test_getting_state(self):
"""Test getting
"""
no_replicates = 25
replicate(experiment2, no_replicates)
for i in range(no_replicates):
self.assertNotIn('result', state[SUBSTATE_KEY_PATTERN % i])
def test_raises(self):
"""Test raising exception in replicate
"""
no_replicates = 25
try:
replicate(experiment3, no_replicates)
except RuntimeError as err:
self.assertEqual(err, FAKE_ERROR)
else:
assert False
def test_setting_state_parallel(self):
"""Test setting in parallel
"""
no_replicates = 25
replicate(experiment, no_replicates, parallel=True, no_processes=2)
for i in range(no_replicates):
self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])
self.assertEqual(state[SUBSTATE_KEY_PATTERN % i]['result'], "bla")
def test_getting_state_parallel(self):
"""Test getting in parallel
"""
no_replicates = 25
replicate(experiment2, no_replicates, parallel=True, no_processes=2)
for i in range(no_replicates):
self.assertNotIn(SUBSTATE_KEY_PATTERN % i + '.result', state)
# Does not work properly yet
# def test_raises_parallel(self):
# """Test raising exception in parallel (should log error)
# """
# log_stream = io.StringIO()
# buf_out = io.StringIO()
# buf_err = io.StringIO()
# Logger.CONSOLE_STREAM_HANDLER = logging.StreamHandler(log_stream)
# log.reset_instance()
# log.initialize(console_level=logging.ERROR)
# no_replicates = 2
# with stdout_err_redirector(buf_out, buf_err):
# try:
# replicate(experiment3, no_replicates, parallel=True)
# except:
# pass
# log.close()
# # Should have logged errors
# # self.assertNotEqual(len(log_stream.getvalue()), 0)
# print("log", log_stream.getvalue())
# print("out", buf_out.getvalue())
# print("err", buf_err.getvalue())
def test_collecting(self):
"""Test collecting results
"""
no_replicates = 25
replicate(experiment4, no_replicates)
for i in range(no_replicates):
self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])
results = collect_results('result', no_replicates=no_replicates)
self.assertEqual(len(results), no_replicates)
for i, r_1 in enumerate(results):
for k, r_2 in enumerate(results):
if not i == k:
self.assertFalse((r_1 == r_2).all())
def test_collecting_parallel(self):
"""Test collecting results
"""
no_replicates = 25
replicate(experiment4, no_replicates, parallel=True, no_processes=2)
for i in range(no_replicates):
self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])
results = collect_results('result', no_replicates=no_replicates)
self.assertEqual(len(results), no_replicates)
for i, r_1 in enumerate(results):
for k, r_2 in enumerate(results):
if not i == k:
self.assertFalse((r_1 == r_2).all())
class TestTargetCreator(unittest.TestCase):
"""Test the target creator
"""
def tearDown(self):
"""Clean up after the test
"""
log.reset_instance()
def test_basic_functionality(self):
"""Test the basic function of the TargetCreator
"""
log_stream = io.StringIO()
Logger.CONSOLE_STREAM_HANDLER = logging.StreamHandler(log_stream)
log.reset_instance()
log.initialize(console_level=logging.DEBUG)
queue = multiprocessing.Queue()
target_fun = TargetCreator(lambda: None, queue, 'bla')
target_fun()
# Should have logged running
self.assertNotEqual(len(log_stream.getvalue()), 0)
self.assertRegexpMatches(log_stream.getvalue(), r'Running bla')
self.assertTrue(queue.get())
def test_partial(self):
"""Test the basic function of the TargetCreator on a partial target
"""
log_stream = io.StringIO()
Logger.CONSOLE_STREAM_HANDLER = logging.StreamHandler(log_stream)
log.reset_instance()
log.initialize(console_level=logging.DEBUG)
def target(_):
"""Target function
"""
pass
queue = multiprocessing.Queue()
target_fun = TargetCreator(functools.partial(target, None),
queue,
'bla')
target_fun()
# Should have logged running
self.assertNotEqual(len(log_stream.getvalue()), 0)
self.assertRegexpMatches(log_stream.getvalue(), r'Running bla')
self.assertTrue(queue.get())
def test_raises_exception(self):
"""Test the TargetCreator with a function that raises an exception
"""
def target():
"""Test function
"""
raise RuntimeError("bla")
log_stream = io.StringIO()
Logger.CONSOLE_STREAM_HANDLER = logging.StreamHandler(log_stream)
log.reset_instance()
log.initialize(console_level=logging.DEBUG)
queue = multiprocessing.Queue()
target_fun = TargetCreator(target, queue, 'bla')
self.assertRaises(RuntimeError, target_fun)
# Should have logged running
self.assertNotEqual(len(log_stream.getvalue()), 0)
self.assertRegexpMatches(log_stream.getvalue(), r'Running bla')
self.assertRegexpMatches(log_stream.getvalue(),
r'Error in sub-process')
self.assertRegexpMatches(log_stream.getvalue(), r'RuntimeError: bla')
self.assertTrue(queue.get())
if __name__ == '__main__':
unittest.main()
|
class Board():
def __init__(self):
self.size = 8 |
import functools
import importlib
from threading import Lock
from .di import Di
class MapLoader:
def __init__(self, di: Di, map_: dict = None):
self._di = di
self._map = {}
self._loaded = {}
self._stack = []
self._lock = Lock()
if map_:
self.append(map_)
def add(self, name, path):
"""
Add a single map entry
:param name: object name
:param path: object path
:return: none
"""
with self._lock:
self._map[name] = path
def remove(self, name):
with self._lock:
if name in self._map.keys():
del self._map[name]
if name in self._loaded.keys():
del self._loaded[name]
def append(self, map: dict):
"""
Add multiple map entries from a dict
:param map: dict with name:path
:return: none
"""
with self._lock:
for k, v in map.items():
self._map[k] = v
def contains(self, name):
"""
Check if a given name exists
:param name: name to check
:return: bool
"""
return name in self._map.keys()
@functools.lru_cache(maxsize=None)
def get(self, name: str):
"""
Retrieve an object from the map
:param name: name to retrieve
:return: object
"""
if name in self._stack:
raise RuntimeError("get(): circular dependency on object %s" % name)
with self._lock:
if name in self._loaded.keys():
return self._loaded[name]
if name not in self._map.keys():
raise ValueError("get(): name '%s' does not exist in map" % name)
with self._lock:
self._stack.append(name)
path = self._map[name]
module_path, cls_name = path.rsplit('.', 1)
try:
module = importlib.import_module(module_path)
cls = getattr(module, cls_name, None)
if cls is None:
raise RuntimeError("get(): cannot find class '%s' in module '%s'" % (cls_name, module_path))
except ModuleNotFoundError as e:
with self._lock:
self._stack.remove(name)
raise RuntimeError("get(): mapped module '%s' not found when discovering path %s" % (module_path, path))
obj = self.build(cls)
with self._lock:
self._loaded[name] = obj
self._stack.remove(name)
return obj
def build(self, cls) -> object:
"""
Builds the object
:param cls: class
:return: object
"""
return cls(self._di)
|
# -*- coding: utf-8 -*-
__author__ = 'idbord'
from cmm.lexer import Lexer
def lexer(path):
result = []
try:
stream = open(path, 'r')
token_list = Lexer.lexer_analysis(stream)
stream.close()
for i in token_list:
result.append(i.to_string_with_line())
return result
except Exception as e:
print e
|
from flask_script import Manager, Server
from app import app
manager = Manager(app)
manager.add_command("run", Server(host='0.0.0.0', port=5000, use_debugger=True))
if __name__ == '__main__':
manager.run()
|
#!/usr/bin/env python3
import canvas_grab_gui
if __name__ == '__main__':
canvas_grab_gui.Main().main()
|
from django.contrib import admin
from .models import Api, Service, Parameter, ParameterGroup, TagSignature
class ParameterGroupTabularInline(admin.TabularInline):
model = ParameterGroup
@admin.register(Service)
class ServiceAdmin(admin.ModelAdmin):
inlines = [ParameterGroupTabularInline]
list_display = ('name', 'description', 'tag', 'http_method')
class Meta:
model = Service
class ParameterTabularInline(admin.TabularInline):
model = Parameter
"""
@admin.register(ParameterGroup)
class ParameterGroupAdmin(admin.ModelAdmin):
inlines = [ParameterTabularInline]
#list_display = ('name', 'description', 'http_method')
class Meta:
model = ParameterGroup
"""
admin.site.register(Api)
# admin.site.register(Service, ServiceAdmin)
admin.site.register(ParameterGroup)
admin.site.register(Parameter)
admin.site.register(TagSignature)
|
from flask import Flask, abort, request, jsonify
from redis import Redis, RedisError
from message_pb2 import Request, Response
import os
import socket
import logging
import json
redis = Redis(host="redis", port=6379, db=0, socket_connect_timeout=2, socket_timeout=2)
app = Flask(__name__)
def play_station(station):
logging.debug("Sending change station request")
request_message = Request()
request_message.type = Request.SET_CHANNEL
request_message.channel = station['stream_url']
radio_answer = _send_message_to_radio(request_message)
logging.debug("Playing", station['name'], "from url:\n", station['stream_url'])
return radio_answer.success
def get_info():
logging.debug("Sending get info request")
info_request = Request()
info_request.type = Request.INFO
info_answer = _send_message_to_radio(info_request)
return {
"success": True,
"station_info": {
"name": info_answer.name,
"stream_url": None,
"bitrate": info_answer.bitrate,
"codec": info_answer.codec,
"title": info_answer.title,
"location": info_answer.location,
"extra": json.loads(info_answer.extra) if info_answer.extra else None,
"stereo": info_answer.stereo
}
}
def _send_message_to_radio(request_message):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect('/var/sockets/radio-sock')
s.send(request_message.SerializeToString())
s.shutdown(socket.SHUT_WR)
response_message = []
while True:
data = s.recv(4096)
if not data: break
response_message.append(data)
s.close()
response = Response()
response.ParseFromString(b''.join(response_message))
return response
# Used for changing station, or pausing
@app.route('/api/station/', methods=['GET', 'POST'])
def current_station():
if request.method == 'GET':
return jsonify(get_info())
elif request.method == 'POST':
logging.debug("Recieved POST request")
stream_url = request.form['stream_url']
name = request.form['name']
new_station = {'name': name, 'stream_url': stream_url}
success = play_station(new_station)
logging.info("Station changed successfully = {}".format(success))
new_station['bitrate'] = None
new_station['codec'] = None
response = {
"success": success,
"station_info": new_station
}
return jsonify(response)
@app.route('/api/stop', methods=["GET"])
def stop_playing():
stop_request = Request()
stop_request.type = Request.STOP
answer = _send_message_to_radio(stop_request)
logging.debug("Stop request sent")
return jsonify(answer.success)
@app.route('/api/start', methods=["GET"])
def start_playing():
start_request = Request()
start_request.type = Request.PLAY
answer = _send_message_to_radio(start_request)
logging.debug("Start request sent")
return jsonify(answer.success)
@app.route('/api/pause', methods=["GET"])
def pause_playing():
pause_request = Request()
pause_request.type = Request.STOP
answer = _send_message_to_radio(pause_request)
logging.debug("Pause request sent")
return jsonify(answer.success)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
app.run(host="0.0.0.0", port=80, debug=True)
|
import pytest
from .utils import reset_config
from fastapi_jwt_auth import AuthJWT
from fastapi import FastAPI, Depends
from fastapi.testclient import TestClient
from datetime import timedelta
@pytest.fixture(scope='function')
def client():
app = FastAPI()
@app.get('/protected')
def protected(Authorize: AuthJWT = Depends()):
Authorize.jwt_required()
client = TestClient(app)
return client
def test_default_config():
reset_config()
assert AuthJWT._access_token_expires.__class__ == timedelta
assert int(AuthJWT._access_token_expires.total_seconds()) == 900
assert AuthJWT._refresh_token_expires.__class__ == timedelta
assert int(AuthJWT._refresh_token_expires.total_seconds()) == 2592000
assert AuthJWT._blacklist_enabled is None
assert AuthJWT._secret_key is None
assert AuthJWT._algorithm == 'HS256'
assert AuthJWT._token_in_blacklist_callback is None
assert AuthJWT._token is None
def test_token_with_other_value(monkeypatch):
monkeypatch.setenv("AUTHJWT_ACCESS_TOKEN_EXPIRES","60")
monkeypatch.setenv("AUTHJWT_REFRESH_TOKEN_EXPIRES","86400")
reset_config()
assert int(timedelta(minutes=1).total_seconds()) == int(AuthJWT._access_token_expires)
assert int(timedelta(days=1).total_seconds()) == int(AuthJWT._refresh_token_expires)
def test_token_config_not_int(monkeypatch):
monkeypatch.setenv("AUTHJWT_ACCESS_TOKEN_EXPIRES","test")
monkeypatch.setenv("AUTHJWT_REFRESH_TOKEN_EXPIRES","test")
reset_config()
with pytest.raises(ValueError,match=r"AUTHJWT_ACCESS_TOKEN_EXPIRES"):
AuthJWT.create_access_token(identity='test')
with pytest.raises(ValueError,match=r"AUTHJWT_REFRESH_TOKEN_EXPIRES"):
AuthJWT.create_refresh_token(identity='test')
def test_state_class_with_other_value_except_token(monkeypatch):
monkeypatch.setenv("AUTHJWT_BLACKLIST_ENABLED","test")
monkeypatch.setenv("AUTHJWT_SECRET_KEY","test")
monkeypatch.setenv("AUTHJWT_ALGORITHM","test")
reset_config()
assert AuthJWT._blacklist_enabled == 'test'
assert AuthJWT._secret_key == 'test'
assert AuthJWT._algorithm == 'test'
def test_secret_key_not_exist(client):
reset_config()
with pytest.raises(RuntimeError,match=r"AUTHJWT_SECRET_KEY"):
AuthJWT.create_access_token(identity='test')
with pytest.raises(RuntimeError,match=r"AUTHJWT_SECRET_KEY"):
client.get('/protected',headers={"Authorization":"Bearer test"})
def test_blacklist_enabled_without_callback(monkeypatch,client):
# set authjwt_secret_key for create token
monkeypatch.setenv("AUTHJWT_SECRET_KEY","secret-key")
reset_config()
token = AuthJWT.create_access_token(identity='test')
response = client.get('/protected',headers={"Authorization": f"Bearer {token.decode('utf-8')}"})
assert response.status_code == 200
# AuthJWT blacklist won't trigger if value
# env variable AUTHJWT_BLACKLIST_ENABLED not true
monkeypatch.setenv("AUTHJWT_BLACKLIST_ENABLED","false")
reset_config()
response = client.get('/protected',headers={"Authorization": f"Bearer {token.decode('utf-8')}"})
assert response.status_code == 200
monkeypatch.setenv("AUTHJWT_BLACKLIST_ENABLED","true")
reset_config()
with pytest.raises(RuntimeError,match=r"@AuthJWT.token_in_blacklist_loader"):
response = client.get('/protected',headers={"Authorization": f"Bearer {token.decode('utf-8')}"})
|
import re
NUMBER_REGEX = r"\d+(\.\d+)?"
def _create_regex(name, specifier, number_regex=NUMBER_REGEX):
return r"(?P<{name}>{number}){specifier}".format(
name=name, specifier=specifier, number=number_regex
)
TIME_PERIOD_REGEX = (
r"^"
r"({days})?"
r"({hours})?"
r"({minutes})?"
r"({seconds})?"
r"({milliseconds})?"
r"$"
).format(
days=_create_regex("days", specifier="(d|day)"),
hours=_create_regex("hours", specifier="(h|hour)"),
minutes=_create_regex("minutes", specifier="(m|min)"),
seconds=_create_regex("seconds", specifier="(s|sec)"),
milliseconds=_create_regex("milliseconds", specifier="(ms|msec)"),
)
TIME_PERIOD_PATTERN = re.compile(TIME_PERIOD_REGEX)
|
#!/Library/Frameworks/Python.framework/Versions/Current/bin/python
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from chaco.api import HPlotContainer, ArrayPlotData, Plot
from chaco.default_colormaps import color_map_name_dict
from chaco.tools.api import ZoomTool
from enable.component import Component
from pyface.constant import OK
from pyface.file_dialog import FileDialog
from traits.api import HasTraits, Int, Enum, File, Instance, Button, Float, Str, on_trait_change, Bool, Color, List
from traitsui.api import View, Item, VGroup, HGroup, ListEditor, InstanceEditor
# ============= standard library imports ========================
from PIL import Image
from numpy import sum, zeros_like, where, array
# ============= local library imports ==========================
from chaco.tools.image_inspector_tool import ImageInspectorTool, \
ImageInspectorOverlay
from enable.component_editor import ComponentEditor
import os
import sys
from chaco.tools.pan_tool import PanTool
class Band(HasTraits):
center = Int(enter_set=True, auto_set=False)
threshold = Int(enter_set=True, auto_set=False)
color = Color
use = Bool(False)
def traits_view(self):
v = View(HGroup(Item('use', show_label=False,), Item('center'), Item('threshold'), Item('color', style='custom', show_label=False)))
return v
class BandwidthImager(HasTraits):
use_threshold = Bool(False)
low = Int(120, enter_set=True, auto_set=False)
high = Int(150, enter_set=True, auto_set=False)
contrast_low = Int(2, enter_set=True, auto_set=False)
contrast_high = Int(98, enter_set=True, auto_set=False)
histogram_equalize = Bool(False)
container = Instance(HPlotContainer)
plot = Instance(Component)
oplot = Instance(Component)
highlight = Int(enter_set=True, auto_set=False)
highlight_threshold = Int(enter_set=True, auto_set=False)
area = Float
colormap_name_1 = Str('gray')
colormap_name_2 = Str('gray')
save_button = Button('Save')
save_mode = Enum('both', 'orig', 'thresh')
path = File
# save_both = Bool
# save_orig = Bool
# save_thresh = Bool
# calc_area_button = Button
calc_area_value = Int(auto_set=False, enter_set=True)
calc_area_threshold = Int(4, auto_set=False, enter_set=True)
contrast_equalize = Bool(False)
highlight_bands = List(Band)
@on_trait_change('highlight+')
def _highlight_changed(self):
im = Image.open(self.path)
ndim = array(im.convert('L'))
im = array(im.convert('RGB'))
low = self.highlight - self.highlight_threshold
high = self.highlight + self.highlight_threshold
mask = where((ndim > low) & (ndim < high))
im[mask] = [255, 0, 0]
# im = Image.fromarray(im)
plot = self.oplot
imgplot = plot.plots['plot0'][0]
tools = imgplot.tools
overlays = imgplot.overlays
plot.delplot('plot0')
plot.data.set_data('img', im)
img_plot = plot.img_plot('img')[0]
for ti in tools:
ti.component = img_plot
for oi in overlays:
oi.component = img_plot
img_plot.tools = tools
img_plot.overlays = overlays
plot.request_redraw()
# def _histogram_equalize_changed(self):
# if not (self.oplot and self.plot):
# return
# if self.histogram_equalize:
# plot = self.plot
# pychron = self._ndim
# self._hdim = hdim = equalize(pychron) * 255
# plot.data.set_data('img', hdim)
# plot.request_redraw()
#
# elif self.path:
# self._load_image(self.path)
#
# self.container.request_redraw()
# @on_trait_change('contrast+')
# def _contrast_changed(self):
# # if self.path:
# # self._load_image(self.path)
# if not (self.oplot and self.plot):
# return
#
# if self.contrast_equalize:
# plot = self.plot
# pychron = self._ndim
# img_rescale = self._contrast_equalize(pychron)
# plot.data.set_data('img', img_rescale)
# plot.request_redraw()
#
# else:
# if self.path:
# self._load_image(self.path)
# # img_rescale = self._ndim
# #
#
# def _contrast_equalize(self, pychron):
# p2 = percentile(pychron, self.contrast_low)
# p98 = percentile(pychron, self.contrast_high)
# img_rescale = rescale_intensity(pychron,
# in_range=(p2, p98)
# )
# return img_rescale
def _path_changed(self):
self._load_image(self.path)
@on_trait_change('highlight_bands:[center,threshold,color]')
def _refresh_highlight_bands(self, obj, name, old, new):
if self.path:
plot = self.oplot
im = Image.open(self.path)
rgb_arr = array(im.convert('RGB'))
# im_arr=array(im)
gray_im = array(im.convert('L'))
for band in self.highlight_bands:
if band.use:
low = band.center - band.threshold
high = band.center + band.threshold
mask = where((gray_im > low) & (gray_im < high))
# print band.color[:3]
rgb_arr[mask] = band.color[:3]
plot.delplot('plot0')
plot.data.set_data('img', rgb_arr)
img_plot = plot.img_plot('img', colormap=color_map_name_dict[self.colormap_name_1])[0]
plot.request_redraw()
@on_trait_change('calc_area+')
def _calc_area(self):
self.trait_set(low=self.calc_area_value - self.calc_area_threshold,
high=self.calc_area_value + self.calc_area_threshold,
trait_change_notify=False)
self._refresh()
def _save_button_fired(self):
dlg = FileDialog(action='save as')
if dlg.open() == OK:
path = dlg.path
if self.save_mode == 'orig':
p = self.oplot
elif self.save_mode == 'thresh':
p = self.plot
else:
p = self.container
self.render_pdf(p, path)
def render_pdf(self, obj, path):
from chaco.pdf_graphics_context import PdfPlotGraphicsContext
if not path.endswith('.pdf'):
path += '.pdf'
gc = PdfPlotGraphicsContext(filename=path)
# opad = obj.padding_bottom
# obj.padding_bottom = 60
obj.do_layout(force=True)
gc.render_component(obj, valign='center')
gc.gc.drawString(600, 5, 'area:{:0.3f}% low={} high={}'.format(self.area, self.low, self.high))
gc.save()
# obj.padding_bottom = opad
def render_pic(self, obj, path):
from chaco.plot_graphics_context import PlotGraphicsContext
gc = PlotGraphicsContext((int(obj.outer_width), int(obj.outer_height)))
# obj.use_backbuffer = False
gc.render_component(obj)
# obj.use_backbuffer = True
if not path.endswith('.png'):
path += '.png'
gc.save(path)
def _load_image(self, path):
self.container = self._container_factory()
im = Image.open(path)
# oim = array(im)
im = im.convert('L')
odim = ndim = array(im)
# if self.contrast_equalize:
# ndim = self._contrast_equalize(ndim)
# self._ndim = ndim
# low = self.low
# high = self.high
# if self.use_threshold:
# tim = zeros_like(ndim)
# tim[where((ndim > low) & (ndim < high))] = 255
# self.area = (sum(tim) / (ndim.shape[0] * ndim.shape[1])) / 255.
# else:
# tim = ndim
pd = ArrayPlotData()
pd.set_data('img', odim)
plot = Plot(data=pd, padding=[30, 5, 5, 30], default_origin='top left')
img_plot = plot.img_plot('img',
colormap=color_map_name_dict[self.colormap_name_1]
)[0]
self.add_inspector(img_plot)
self.add_tools(img_plot)
self.oplot = plot
# pd = ArrayPlotData()
# pd.set_data('img', tim)
# plot = Plot(data=pd,
# padding=[30, 5, 5, 30], default_origin='top left')
# img_plot = plot.img_plot('img', colormap=color_map_name_dict[self.colormap_name_2])[0]
# self.add_inspector(img_plot)
# self.plot = plot
#
# self.plot.range2d = self.oplot.range2d
self.container.add(self.oplot)
# self.container.add(self.plot)
self.container.request_redraw()
def add_inspector(self, img_plot):
imgtool = ImageInspectorTool(img_plot)
img_plot.tools.append(imgtool)
overlay = ImageInspectorOverlay(component=img_plot, image_inspector=imgtool,
bgcolor="white", border_visible=True)
img_plot.overlays.append(overlay)
def add_tools(self, img_plot):
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
pan = PanTool(component=img_plot, restrict_to_data=True)
img_plot.tools.append(pan)
img_plot.overlays.append(zoom)
@on_trait_change('low,high, use_threshold')
def _refresh(self):
if self.use_threshold:
pd = self.plot.data
low = self.low
high = self.high
if self.histogram_equalize:
ndim = self._hdim
else:
ndim = self._ndim
tim = zeros_like(ndim)
mask = where((ndim > low) & (ndim < high))
tim[mask] = 255
self.area = (sum(tim) / (ndim.shape[0] * ndim.shape[1])) / 255.
pd.set_data('img', tim)
self.plot.request_redraw()
def _colormap_name_1_changed(self):
cmap = color_map_name_dict[self.colormap_name_1]
plot = self.oplot.plots['plot0'][0]
tools = plot.tools
overlays = plot.overlays
self.oplot.delplot('plot0')
im = Image.open(self.path)
ndim = array(im.convert('L'))
self.oplot.data.set_data('img', ndim)
img_plot = self.oplot.img_plot('img', colormap=cmap)[0]
for ti in tools:
ti.component = img_plot
for oi in overlays:
oi.component = img_plot
img_plot.tools = tools
img_plot.overlays = overlays
# self.add_inspector(img_plot)
# self.add_tools(img_plot)
self.oplot.request_redraw()
def _colormap_name_2_changed(self):
cmap = color_map_name_dict[self.colormap_name_2]
# self.plot.colormap = cmp
self.plot.delplot('plot0')
img_plot = self.plot.img_plot('img', colormap=cmap)[0]
self.add_inspector(img_plot)
self.plot.request_redraw()
def _highlight_bands_default(self):
return [Band(color='red'), Band(color='green'), Band(color='blue')]
def traits_view(self):
ctrl_grp = VGroup(Item('path', show_label=False),
Item('highlight_bands', editor=ListEditor(mutable=False,
style='custom', editor=InstanceEditor()))
)
v = View(
ctrl_grp,
Item('container', show_label=False,
editor=ComponentEditor()),
#
title='Color Inspector',
resizable=True,
height=800,
width=900
)
return v
# def traits_view(self):
# lgrp = VGroup(Item('low'),
# Item('low', show_label=False, editor=RangeEditor(mode='slider', low=0, high_name='high')))
# hgrp = VGroup(Item('high'),
# Item('high', show_label=False, editor=RangeEditor(mode='slider', low_name='low', high=255)))
# savegrp = HGroup(Item('save_button', show_label=False),
# Item('save_mode', show_label=False))
# ctrlgrp = VGroup(
# Item('path', show_label=False),
# HGroup(Item('use_threshold'), Item('contrast_equalize'),
# HGroup(Item('contrast_low'), Item('contrast_high'), enabled_when='contrast_equalize'),
# Item('histogram_equalize')
# ),
# HGroup(Item('highlight'), Item('highlight_threshold')),
# HGroup(spring,
# lgrp,
# hgrp,
# VGroup(savegrp,
# Item('calc_area_value', label='Calc. Area For.',
# tooltip='Calculate %area for all pixels with this value'
# ),
# Item('calc_area_threshold', label='Threshold +/- px',
# tooltip='bandwidth= calc_value-threshold to calc_value+threshold'
# )
#
# )
# ),
# HGroup(spring, Item('area', style='readonly', width= -200)),
# HGroup(
# Item('colormap_name_1', show_label=False,
# editor=EnumEditor(values=color_map_name_dict.keys())),
# spring,
# Item('colormap_name_2', show_label=False,
# editor=EnumEditor(values=color_map_name_dict.keys()))),
# )
# v = View(ctrlgrp,
# Item('container', show_label=False,
# editor=ComponentEditor()),
#
# title='Color Inspector',
# resizable=True,
# height=800,
# width=900
#
# )
return v
def _container_factory(self):
pc = HPlotContainer(padding=[5, 5, 5, 20])
return pc
def _container_default(self):
return self._container_factory()
if __name__ == '__main__':
d = BandwidthImager()
if len(sys.argv) > 1:
path = os.path.join(os.getcwd(), sys.argv[1])
d.path = path
d.path = '/Users/argonlab2/Sandbox/R2-03 closeup_1_BSE_1 zoomed2.png'
d.configure_traits()
# ============= EOF =============================================
|
import tensorflow as tf
from idas.models.hyperbolic import hyp_ops
import numpy as np
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
# def hyp_pixel_embedding(incoming, radius=1.0):
#
# def _embed_pixel_by_pixel(x_in, chs_in):
#
# if len(x_in.get_shape()) > 1:
# # in tf.map_fn the parameter parallel_iterations defaults to 10
# return tf.map_fn(lambda x: _embed_pixel_by_pixel(x, chs_in), x_in, back_prop=True)# TODO ?, parallel_iterations=1)
# else:
# x_in = tf.expand_dims(x_in, axis=0) # add one dimension for compatibility with tf_exp_map_zero()
# x_out = hyp_ops.tf_exp_map_zero(v=x_in, c=radius)
# x_out = tf.squeeze(x_out, axis=0) # remove the extra dimension and return
# return x_out
#
# # the output matrix will have shape [None, W, H, C]
# channels_in = incoming.get_shape()[-1]
# out_matrix = _embed_pixel_by_pixel(incoming, channels_in)
#
# return out_matrix
from idas.utils.utils import print_yellow_text
def hyp_pixel_embedding(incoming, radius=1.0):
# the output matrix will have shape [None, W, H, C]
_, W, H, C = incoming.get_shape().as_list()
incoming_flat = tf.reshape(incoming, shape=[-1, C]) # [batch_size * W * H, C])
out_matrix = hyp_ops.tf_exp_map_zero(v=incoming_flat, c=radius)
out_shape = tf.convert_to_tensor([-1, W, H, C]) # [batch_size, W, H, num_classes])
out_matrix = tf.reshape(out_matrix, shape=out_shape)
return out_matrix
def hyp_pixel_classification(incoming, num_classes, radius=1.0):
# the output matrix will have shape [None, W, H, C]
_, W, H, C = incoming.get_shape().as_list()
incoming_flat = tf.reshape(incoming, shape=[-1, C]) # [batch_size * W * H, C])
out_matrix = hyp_mlr(incoming_flat, before_mlr_dim=C, num_classes=num_classes, radius=radius, mlr_geom='hyp')
out_matrix = tf.reshape(out_matrix, shape=[-1, W, H, num_classes]) # [batch_size, W, H, num_classes])
return out_matrix
# def hyp_pixel_classification(incoming, num_classes, radius=1.0):
#
# # number of input channels
# channels_in = incoming.get_shape()[-1]
#
# # batch on last dimension
# incoming = tf.transpose(incoming, [1, 2, 3, 0])
#
# def _classify_pixel_by_pixel(x_in):
#
# if len(x_in.get_shape()) > 2:
# # in tf.map_fn the parameter parallel_iterations defaults to 10
# return tf.map_fn(lambda x: _classify_pixel_by_pixel(x), x_in, back_prop=True)# TODO ?, parallel_iterations=1)
# else:
#
# # put back batch axis on first dimension:
# x_in = tf.transpose(x_in, [1, 0]) # now shape is: [None, K]
#
# x_out = hyp_mlr(x_in, before_mlr_dim=channels_in, num_classes=num_classes, radius=radius)
#
# # put back batch axis on last dimension:
# x_out = tf.transpose(x_out, [1, 0]) # now shape is: [None, K]
#
# return x_out
#
# # the output matrix will have shape [None, W, H, C]
# out_matrix = _classify_pixel_by_pixel(incoming)
#
# # batch on first dimension again
# out_matrix = tf.transpose(out_matrix, [3, 0, 1, 2])
#
# return out_matrix
def hyp_conv2d(incoming):
raise NotImplementedError
def hyp_dense(incoming, shape, activation='id', radius=1.0, bias_geom='hyp', mlr_geom='hyp', dropout=1.0):
""" Fully connected layer in hyperbolic space
:param incoming: incoming tensor
:param shape: [hidden_dim, before_mlr_dim]
:param activation: activation function. Supported values are in ['id', 'relu', 'tanh', 'sigmoid']
:param radius: radius of the Poincaré ball
:param bias_geom: geometry for the bias
:param mlr_geom: TODO
:param dropout: dropout keep probability (defaults to 1.0, no dropout). Values greater than 1.0 are treated as 1.0
:return:
"""
with tf.variable_scope('HyperbolicDenseLayer'):
hidden_dim, before_mlr_dim = shape
# Define variables for the feed-forward layer: W * x + b
W = tf.get_variable('W_hyp', dtype=tf.float32, shape=[hidden_dim, before_mlr_dim],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable('b_hyp', dtype=tf.float32, shape=[1, before_mlr_dim],
initializer=tf.constant_initializer(0.0))
# matrix multiplication:
mat_mul = hyp_ops.tf_mob_mat_mul(W, incoming, radius)
# add bias:
if bias_geom == 'eucl':
b = hyp_ops.tf_exp_map_zero(b, radius)
output = hyp_ops.tf_mob_add(mat_mul, b, radius)
# apply activation function:
output = hyp_ops.tf_hyp_non_lin(output, non_lin=activation,
hyp_output=(mlr_geom == 'hyp' and dropout == 1.0),
c=radius)
# Mobius dropout
if dropout < 1.0:
# If we are here, then output should be Euclidean.
output = tf.nn.dropout(output, keep_prob=dropout)
if mlr_geom == 'hyp': # TODO check MLR (Multi-class logistic regression) or move to hyp_mlr(.)
output = hyp_ops.tf_exp_map_zero(output, radius)
return output
def hyp_mlr(incoming, before_mlr_dim, num_classes, radius=1.0, reuse=tf.AUTO_REUSE,
scope='HyperbolicMLR', mlr_geom='hyp'):
"""
Multi-logistic regression in hyperbolic space.
:param incoming: incoming tensor with shape [batch_size x before_mlr_dim]
:param before_mlr_dim: last dimension of the incoming tensor
:param num_classes: number of output classes
:param radius: radius of the Poincaré ball
:param scope: scope for the operation
:param mlr_geom: TODO
:return:
"""
with tf.variable_scope(scope, reuse=reuse):
A_mlr = []
P_mlr = []
logits_list = []
for cl in range(num_classes):
A_mlr.append(tf.get_variable('A_mlr' + str(cl),
dtype=tf.float32,
shape=[1, before_mlr_dim],
initializer=tf.contrib.layers.xavier_initializer()))
P_mlr.append(tf.get_variable('P_mlr' + str(cl),
dtype=tf.float32,
shape=[1, before_mlr_dim],
initializer=tf.constant_initializer(0.0)))
if mlr_geom == 'eucl':
logits_list.append(tf.reshape(hyp_ops.tf_dot(-P_mlr[cl] + incoming, A_mlr[cl]), [-1]))
elif mlr_geom == 'hyp':
minus_p_plus_x = hyp_ops.tf_mob_add(-P_mlr[cl], incoming, radius)
norm_a = hyp_ops.tf_norm(A_mlr[cl])
lambda_px = hyp_ops.tf_lambda_x(minus_p_plus_x, radius)
px_dot_a = hyp_ops.tf_dot(minus_p_plus_x, tf.nn.l2_normalize(A_mlr[cl]))
logit = 2. / np.sqrt(radius) * norm_a * tf.asinh(np.sqrt(radius) * px_dot_a * lambda_px)
logits_list.append(tf.reshape(logit, [-1]))
logits = tf.stack(logits_list, axis=1)
return logits
|
#!/usr/bin/env python
""" Run a few tests using the King James Bible plain text version as
available from:
http://www.gutenberg.org/etext/10
"""
import re, time
from mx import TextTools, Tools
# Location of the text file
KJB = '/tmp/kjv10.txt'
# Iterations to use for benchmarking
COUNT = 100
def search_bench(word, text):
iterations = Tools.trange(COUNT)
print ('Searching for all occurences of %r using ...' % word)
t0 = time.time()
so = TextTools.TextSearch(word)
for i in iterations:
l = so.findall(text)
t1 = time.time()
count = len(l)
print (' - mx.TextSearch.TextSearch().findall(): %5.3f ms (%i)' %
((t1 - t0) / COUNT * 1000.0, count))
t0 = time.time()
so = re.compile(word)
for i in iterations:
l = so.findall(text)
t1 = time.time()
count = len(l)
print (' - re.compile().findall(): %5.3f ms (%i)' %
((t1 - t0) / COUNT * 1000.0, count))
t0 = time.time()
for i in iterations:
count = text.count(word)
t1 = time.time()
print (' - text.count(): %5.3f ms (%i)' %
((t1 - t0) / COUNT * 1000.0, count))
if __name__ == '__main__':
text = open(KJB).read()
search_bench('God', text)
search_bench('Jesus', text)
search_bench('devil', text)
search_bench('love', text)
search_bench('hate', text)
|
import random
fin = open("D:/Users/tsyac/Documents/GitHub/personal/6883-SOD/data/combined/fin.txt", 'rb')
f_train = open("D:/Users/tsyac/Documents/GitHub/personal/6883-SOD/data/combined/f_train.txt", 'wb')
f_test = open("D:/Users/tsyac/Documents/GitHub/personal/6883-SOD/data/combined/f_test.txt", 'wb')
f_val = open("D:/Users/tsyac/Documents/GitHub/personal/6883-SOD/data/combined/f_val.txt", 'wb')
for line in fin:
r = random.random()
if r <= 0.5:
f_train.write(line)
elif 0.5 < r < 0.75:
f_test.write(line)
else:
f_val.write(line)
fin.close()
f_train.close()
f_test.close()
f_val.close() |
def merge(arr,l,mid,r):
n1 = mid-l+r
n2 = r-mid
temp1 = []
temp2 = []
for i in range(n1):
temp1.append(arr[l+i])
for i in range(n2):
temp2.append(arr[mid+1+i])
i = 0
j = 0
k = l
while(l<n1 and r<n2):
if(temp1[i] < temp2[j]):
arr[k] = temp1[i]
i+=1
else:
arr[k] = temp2[j]
j += 1
k+=1
while(l<n1):
arr[k] = temp1[i]
i+= 1
k+=1
while(r<n2):
arr[k] = temp2[j]
j += 1
k += 1
def mergeSort(arr,l,r):
if(l < r):
mid = (l+r)/2
mergeSort(arr,l,mid)
mergeSort(arr,mid+1,r)
merge(arr,l,mid,r)
n = int(input())
arr = list(map(int,input().split()))
mergeSort(arr,0,n-1)
print(arr) |
from setuptools import find_packages, setup
setup(
name="auto_argparse",
version="0.0.7",
url="https://github.com/neighthan/auto-argparse",
author="Nathan Hunt",
author_email="neighthan.hunt@gmail.com",
description="Automatically create argparse parsers.",
license="MIT",
packages=find_packages(),
)
|
"""Scan text and check if it contains a keyword
"""
from prowl.utility.file import read_file_by_line
def contains_keywords(text: str, keywords: list) -> bool:
"""Check if text contains a keyword from a list of keyword
:param text: text to analyze
:param keywords: list of keywords to match
:return: true if match is found, false if no match
"""
return any(map(text.__contains__, keywords))
def get_keywords(filename: str) -> list:
"""Read keyword from file and return as list of strings
:param filename: path to file to read from
:return: list of keyword
"""
return read_file_by_line(filename)
|
from django.test import TestCase
from .models import Article, Website
class ArticleTestCase(TestCase):
def setUp(self):
self.website_ = "https:://reuters.com"
self.website = Website.objects.create(url=self.website_)
self.article = Article.objects.create(
title="Testing",
text="Testing Text",
url="https://reuters.com/testing",
parent_website=self.website,
)
def test_website_article_fk(self):
self.assertEqual(self.article.parent_website, self.website_)
class WebsiteTestCase(TestCase):
def setUp(self):
self.website = Website.objects.create(url="https://reuters.com")
self.website = Website.objects.create(url="https://apnews.com")
|
import argparse
import sys
def contains_crlf(filename):
with open(filename, mode='rb') as file_checked:
for line in file_checked.readlines():
if line.endswith(b'\r\n'):
return True
return False
def removes_crlf_in_file(filename):
with open(filename, mode='rb') as file_processed:
lines = file_processed.readlines()
lines = [line.replace(b'\r\n', b'\n') for line in lines]
with open(filename, mode='wb') as file_processed:
for line in lines:
file_processed.write(line)
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='filenames to check')
args = parser.parse_args(argv)
files_with_crlf = list(filter(contains_crlf, args.filenames))
for file_with_crlf in files_with_crlf:
print('Removing CRLF end-lines in: {}'.format(file_with_crlf))
removes_crlf_in_file(file_with_crlf)
if files_with_crlf:
print('')
print('CRLF end-lines have been successfully removed. Now aborting the commit.')
print('You can check the changes made. Then simply "git add --update ." and re-commit')
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
import pickle
from collections import deque
import pytest
from validx import exc
NoneType = type(None)
def test_lazyref(module):
v = module.Dict(
{"x": module.Int(), "y": module.LazyRef("foo", maxdepth=2)},
alias="foo",
optional=["x", "y"],
)
data = {"x": 1}
assert v(data) == data
data = {"y": {"x": 1}}
assert v(data) == data
data = {"y": {"y": {"x": 1}}}
assert v(data) == data
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
with pytest.raises(exc.SchemaError) as info:
v({"y": {"y": {"y": {"x": 1}}}})
assert len(info.value) == 1
assert isinstance(info.value[0], exc.RecursionMaxDepthError)
assert info.value[0].context == deque(["y", "y", "y"])
assert info.value[0].expected == 2
assert info.value[0].actual == 3
def test_lazyref_context(module):
class MarkContext(module.Validator):
def __call__(self, value, __context=None):
__context["marked"] = True
return value
MarkContext(alias="foo")
v = module.LazyRef("foo")
context = {}
v(None, context)
assert context["marked"]
v = module.LazyRef("foo", maxdepth=1)
context = {}
v(None, context)
assert context["marked"]
assert context["foo.recursion_depth"] == 0
# =============================================================================
def test_type(module):
v = module.Type(int)
assert v(5) == 5
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
with pytest.raises(exc.InvalidTypeError) as info:
v(5.0)
assert info.value.expected == int
assert info.value.actual == float
def test_type_metaclass(module):
class MetaClass(type):
pass
CustomType = MetaClass("CustomType", (), {})
v = module.Type(CustomType)
obj = CustomType()
assert v(obj) is obj
@pytest.mark.parametrize("nullable", [None, False, True])
def test_type_nullable(module, nullable):
v = module.Type(int, nullable=nullable)
assert v(5) == 5
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if nullable:
assert v(None) is None
else:
with pytest.raises(exc.InvalidTypeError) as info:
v(None)
assert info.value.expected == int
assert info.value.actual == NoneType
@pytest.mark.parametrize("coerce", [None, False, True])
def test_type_coerce(module, coerce):
v = module.Type(int, coerce=coerce)
assert v(5) == 5
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
with pytest.raises(exc.InvalidTypeError) as info:
v("abc")
assert info.value.expected == int
assert info.value.actual == str
if coerce:
assert v(5.5) == 5
assert v("5") == 5
else:
with pytest.raises(exc.InvalidTypeError) as info:
v(5.5)
assert info.value.expected == int
assert info.value.actual == float
with pytest.raises(exc.InvalidTypeError) as info:
v("5")
assert info.value.expected == int
assert info.value.actual == str
@pytest.mark.parametrize("min", [None, 0])
@pytest.mark.parametrize("max", [None, 10])
def test_type_min_max(module, min, max):
v = module.Type(int, min=min, max=max)
assert v(5) == 5
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if min is None:
assert v(-1) == -1
else:
with pytest.raises(exc.MinValueError) as info:
v(-1)
assert info.value.expected == min
assert info.value.actual == -1
if max is None:
assert v(11) == 11
else:
with pytest.raises(exc.MaxValueError) as info:
v(11)
assert info.value.expected == max
assert info.value.actual == 11
@pytest.mark.parametrize("minlen", [None, 2])
@pytest.mark.parametrize("maxlen", [None, 5])
def test_type_minlen_maxlen(module, minlen, maxlen):
if minlen or maxlen:
with pytest.raises(TypeError) as info:
module.Type(int, minlen=minlen, maxlen=maxlen)
assert info.value.args[0] == "Type %r does not provide method '__len__()'" % int
v = module.Type(bytes, minlen=minlen, maxlen=maxlen)
assert v(b"abc") == b"abc"
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if minlen is None:
assert v(b"a") == b"a"
else:
with pytest.raises(exc.MinLengthError) as info:
v(b"a")
assert info.value.expected == minlen
assert info.value.actual == 1
if maxlen is None:
assert v(b"abcdef") == b"abcdef"
else:
with pytest.raises(exc.MaxLengthError) as info:
v(b"abcdef")
assert info.value.expected == maxlen
assert info.value.actual == 6
@pytest.mark.parametrize("options", [None, [5, 6]])
def test_type_options(module, options):
v = module.Type(int, options=options)
assert v(5) == 5
assert v(6) == 6
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
if options is None:
assert v(4) == 4
else:
with pytest.raises(exc.OptionsError) as info:
v(4)
assert info.value.expected == frozenset(options)
assert info.value.actual == 4
# =============================================================================
def test_const(module):
v = module.Const(1)
assert v(1) == 1
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
with pytest.raises(exc.OptionsError) as info:
v(2)
assert info.value.expected == [1]
assert info.value.actual == 2
# =============================================================================
def test_any(module):
v = module.Any()
assert v(None) is None
assert v(True) is True
assert v(1) == 1
assert v("x") == "x"
assert v([1, "x"]) == [1, "x"]
assert v.clone() == v
assert pickle.loads(pickle.dumps(v)) == v
|
from gym import Wrapper
from mujoco_py import MjViewer, MjRenderContextOffscreen
class RenderEnv(Wrapper):
def __init__(self, env,
cam_id=None, cam_pos=None, cam_angle=None, cameras=None,
view="left", zoom=1.0,
width=84, height=None, ):
super().__init__(env)
self.env = env
self.cam_id = cam_id
self.cam_pos = cam_pos
self.cam_angle = cam_angle
self.cameras = cameras or []
self.width = width
self.height = height
self.unwrapped._view = view
self.unwrapped._zoom = zoom
def viewer_setup(self):
# note: this is the front (left side) view. rotate
# side-ways for stereo or to avoid occlusion with
# some of the tasks
# Some of the reference camera views can be found
# here: https://github.com/rlworkgroup/metaworld/issues/35
if self._view == "frontal":
self.viewer.cam.azimuth = 270
self.viewer.cam.elevation = -40
self.viewer.cam.distance = 0.6 * self._zoom
self.viewer.cam.lookat[0] = 0
self.viewer.cam.lookat[1] = 0.9
self.viewer.cam.lookat[2] = 0.3
elif self._view == "left":
self.viewer.cam.azimuth = 0
self.viewer.cam.elevation = -20
self.viewer.cam.distance = 0.4 * self._zoom
self.viewer.cam.lookat[0] = -0.4
self.viewer.cam.lookat[1] = 0.575
self.viewer.cam.lookat[2] = 0.3
# this is the most damming change
def _get_viewer(self, mode):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == 'human':
self.viewer = MjViewer(self.sim)
else:
self.viewer = MjRenderContextOffscreen(self.sim, -1)
self.viewer_setup()
self._viewers[mode] = self.viewer
self.viewer_setup()
return self.viewer
def close(self):
self.viewer = None
self._viewers.clear()
for viewer in self._viewers.items():
import glfw
glfw.destroy_window(viewer.window)
# monkey patch here
from functools import partial
self.unwrapped.viewer_setup = partial(viewer_setup, self=self.unwrapped)
self.unwrapped._get_viewer = lambda mode: _get_viewer(self.unwrapped, mode)
self.unwrapped.close = lambda: close(self)
def reset(self):
env = self.unwrapped
obs = self.env.reset()
old = env.viewer
for env.viewer in env._viewers.values():
self.viewer_setup()
env.viewer = old
return obs
def render(self, mode="human", width=None, height=None):
width = width or self.width
height = height or self.height or width
if mode == "human":
return self.unwrapped.render(mode, width=None, height=None)
viewer = self.unwrapped._get_viewer(mode)
viewer.render(width, height)
data = viewer.read_pixels(width, height, depth=False)
return data[::-1]
def close(self):
try:
self.unwrapped.close()
delattr(self, "unwrapped")
except:
pass |
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 22:37:10 2019
@author: Ghayasuddin Adam
"""
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 07:41:43 2019
@author: Ghayasuddin Adam
"""
import re
import tweepy
from tweepy import Stream
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
#insert Twitter Keys
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_KEY = ''
ACCESS_SECRET = ''
auth = OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
screen_name = 'BarcaWorldwide'
print("\nPeople Who Has mentioned me Search API")
allmention = []
for tweet in tweepy.Cursor(api.search,q='@'+ screen_name,result_type='recent',timeout=999999).items(1000):
if tweet.in_reply_to_status_id is None and tweet.retweeted is False and tweet.user.screen != screen_name:
if len(tweet.entities["user_mentions"]) > 0:
allmention.append(tweet)
mentions = set()
for i in range(0, len(allmention)):
if allmention[i].in_reply_to_screen_name != screen_name:
mentions.add(allmention[i].user.screen_name)
print(mentions)
print("\n People Who I Have mentioned")
alltweets = []
new_tweets = api.user_timeline(screen_name = screen_name,count=200)
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
while len(new_tweets) > 0:
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
mention = set()
for i in range(0,len(alltweets)):
if alltweets[i].in_reply_to_status_id is None and alltweets[i].retweeted is False:
if len(alltweets[i].entities["user_mentions"]) > 0:
for j in range(0, len(alltweets[i].entities["user_mentions"])):
mention.add(alltweets[i].entities["user_mentions"][j]["screen_name"])
print(mention)
|
import os
from typing import List, Optional
from urllib.request import urlopen, Request
from datetime import date
from time import sleep
from pathlib import Path
import json
from bs4 import BeautifulSoup
def wait_for_input(day: Optional[int] = None, year: Optional[int] = None) -> None:
today = date.today()
if day is None:
day = today.day
if year is None:
year = today.year
num_waits = 0
while True:
today = date.today()
if today.year * 32 * 12 + today.month * 32 + today.day >= year * 32 * 12 + 12 * 32 + day:
break
sleep(1)
if num_waits % 60 == 0:
print('Waiting until {:0>4}-12-{:0>2}...'.format(year, day))
num_waits += 1
def input_text(day: Optional[int] = None, year: Optional[int] = None) -> str:
today = date.today()
if day is None:
day = today.day
if year is None:
year = today.year
file_path = Path(__file__).parent.parent.joinpath('inputs/{0:0>4}/input-{0:0>4}{1:0>2}.txt'.format(year, day))
file_path.parent.mkdir(parents=True, exist_ok=True)
file_name = file_path.resolve()
try:
with open(file_name, 'r') as inp_file:
return inp_file.read()
except FileNotFoundError:
req = Request('https://adventofcode.com/{}/day/{}/input'.format(year, day))
with open(os.path.join(os.path.dirname(__file__), 'session.cookie'), 'r') as cookie:
req.add_header('cookie', 'session=' + cookie.read())
with urlopen(req) as conn:
inp = conn.read().decode('utf-8')[:-1]
with open(os.path.join(os.path.dirname(__file__), file_name), 'w') as out_file:
out_file.write(inp)
return inp
def find_test_cases(day: Optional[int] = None, year: Optional[int] = None, cached=False) -> List[str]:
today = date.today()
if day is None:
day = today.day
if year is None:
year = today.year
file_path = Path(__file__).parent.parent.joinpath('inputs/{0:0>4}/testcases-{0:0>4}{1:0>2}.json'.format(year, day))
file_path.parent.mkdir(parents=True, exist_ok=True)
file_name = file_path.resolve()
if cached:
try:
with open(file_name, 'r') as tc_file:
return ['\n'.join(tc) for tc in json.load(tc_file)]
except (FileNotFoundError, json.JSONDecodeError):
pass
req = Request('https://adventofcode.com/{}/day/{}'.format(year, day))
try:
with open(os.path.join(os.path.dirname(__file__), 'session.cookie'), 'r') as cookie:
req.add_header('cookie', 'session=' + cookie.read())
except FileNotFoundError:
pass
with urlopen(req) as conn:
inp = conn.read().decode('utf-8')
page = BeautifulSoup(inp, 'html.parser')
possible_test_cases = [elem.get_text().strip() for elem in page.find_all('pre')]
with open(file_name, 'w') as tc_file:
json.dump([tc.splitlines() for tc in possible_test_cases], tc_file, indent=2)
return possible_test_cases
|
#!/usr/bin/env python
########################
# author:terry #
# 27/11/15 #
########################
import time
import os
import getopt
import sys
filename = ''
name = '' #author name
hasdes = False #whether has description
cusdate = '' #custom date
def usage():
print "FILESIGNER Auth:Terry"
print " Version:1.1.0"
print " Date:2015-12-27"
print
print "Usage: ./filesigner.py -f test.txt -a terry -d"
print
print "-h,--help -show the usage."
print "-f,--file -the file need to sign."
print "-a,--author -the file author name."
print "-d -need to write a description."
print "-c,--cusdate -enter the custom date for file."
print
print
sys.exit(1)
def main():
global filename
global name
global hasdes
global cusdate
if not len(sys.argv[1:]):
usage()
try:
opts,args = getopt.getopt(sys.argv[1:],'hf:a:dc:',
['help','file=','author=','cusdate='])
except getopt.GetoptError,e:
print str(e)
usage()
for o,a in opts:
if o in ('-h','--help'):
usage()
elif o in ('-f','--file'):
filename = a
elif o in ('-a','--author'):
name = 'Author:'+a
elif o in ('-d'):
hasdes = True
elif o in ('-c','--cusdate'):
cusdate = a
else:
assert False,'Unhandled Option.'
#filename = raw_input("please input your filename:")
#name = "autor:"+raw_input("please input your name:")
#description = raw_input("please input your description:")
#date = time.strftime("%d/%m/%Y")
#print name,filename,date
if hasdes:
description = raw_input("Please input your description:")
if cusdate is '':
date = 'Date:'+time.strftime("%d/%m/%Y")
else:
date = 'Date:'+cusdate
markfile = file(filename,"a+")
length = max(len(name),len(filename),len(date))*3/2
marklist = ["#"*length,forstr(length,name),forstr(length,date),"#"*length]
for i in marklist:
markfile.write(i+"\n")
markfile.close()
#formatstring,make the string has same length as ### line
def forstr(length,string):
#lenth of space
spacelen = length - len(string) -3
return "# "+string+" "*spacelen + "#"
if (__name__ == "__main__"):
main()
|
"""
Classes
-------
Additional Classes used in different places around the code.
Mostly to help to get data around.
"""
import numpy as np
from plofeld.utils.constants import RealNumber
class Vector(np.ndarray):
"""Class to collect coordinates.
Basically a named-numpy array of size 2 or 3.
Numpy subclassing of ndarrays is somewhat weird, see:
https://numpy.org/doc/stable/user/basics.subclassing.html"""
def __new__(cls, x: RealNumber, y: RealNumber, z: RealNumber = None):
if z is None:
vector = [x, y]
else:
vector = [x, y, z]
return np.asarray(vector).view(cls)
@property
def x(self):
return self[0]
@property
def y(self):
return self[1]
@property
def z(self):
try:
return self[2]
except IndexError:
return None
def toarray(self):
"""Returns this as a normal numpy array."""
return np.array(self)
def norm(self) -> float:
"""Calculate the euclidean (l2) norm of this coordinates."""
return float(np.sqrt(np.sum(np.square(self))))
def unit(self) -> 'Vector':
"""Returns the Unit Vector from self"""
return self / self.norm()
def distance(self, other: 'Vector') -> float:
"""Calculate the euclidean (l2) distance of this point to other."""
return (other - self).norm()
def __str__(self):
if self.z is None:
return f"x: {self.x}, y: {self.y}"
return f"x: {self.x}, y: {self.y}, z: {self.z}"
def __repr__(self):
return f"Vector({str(self)})"
|
from __future__ import absolute_import
import pytest
from django.conf import settings
from django.test import RequestFactory
from django.dispatch import receiver
from importlib import import_module
from django_cas_ng.models import SessionTicket
from django_cas_ng.backends import CASBackend
from django_cas_ng.signals import cas_user_authenticated, cas_user_logout
from django_cas_ng.views import login, logout
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
@pytest.mark.django_db
def test_signal_when_user_logout_manual(monkeypatch, django_user_model):
session = SessionStore()
session['fake_session_key'] = 'fake-session_value'
session.save()
assert SessionStore(session_key=session.session_key) is not None
factory = RequestFactory()
request = factory.get('/logout')
request.session = session
# Create a fake session ticket and make sure it exists in the db
session_ticket = SessionTicket.objects.create(
session_key=session.session_key,
ticket='fake-ticket'
)
user = django_user_model.objects.create_user('test@example.com', '')
assert user is not None
request.user = user
callback_values = {}
@receiver(cas_user_logout)
def callback(sender, session, **kwargs):
callback_values.update(kwargs)
callback_values['session'] = dict(session)
response = logout(request)
assert request.user.is_anonymous() is True
assert 'user' in callback_values
assert callback_values['user'] == user
assert 'session' in callback_values
assert callback_values['session'].get('fake_session_key') == 'fake-session_value'
assert 'ticket' in callback_values
assert callback_values['ticket'] == 'fake-ticket'
@pytest.mark.django_db
def test_signal_when_user_logout_slo(monkeypatch, django_user_model, settings):
data = {'logoutRequest': '<samlp:LogoutRequest '
'xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol">'
'<samlp:SessionIndex>fake-ticket'
'</samlp:SessionIndex></samlp:LogoutRequest>'
}
settings.CAS_VERSION = 'CAS_2_SAML_1_0'
factory = RequestFactory()
request = factory.post('/login', data)
# user session and current requests.session are different
request.session = {}
user = django_user_model.objects.create_user('test@example.com', '')
assert user is not None
session = SessionStore()
session['fake_session_key'] = 'fake-session_value'
session.save()
assert SessionStore(session_key=session.session_key) is not None
# Create a fake session ticket and make sure it exists in the db
session_ticket = SessionTicket.objects.create(
session_key=session.session_key,
ticket='fake-ticket'
)
callback_values = {}
@receiver(cas_user_logout)
def callback(sender, session, **kwargs):
callback_values.update(kwargs)
callback_values['session'] = dict(session)
response = login(request)
assert 'user' in callback_values
assert 'session' in callback_values
assert callback_values['session'].get('fake_session_key') == 'fake-session_value'
assert 'ticket' in callback_values
assert callback_values['ticket'] == 'fake-ticket'
@pytest.mark.django_db
def test_signal_when_user_is_created(monkeypatch, django_user_model):
"""
Test that when CAS authentication creates a user, the signal is called with
`created = True`
"""
factory = RequestFactory()
request = factory.get('/login/')
request.session = {}
def mock_verify(ticket, service):
return 'test@example.com', {'ticket': ticket, 'service': service}, None
callback_values = {}
@receiver(cas_user_authenticated)
def callback(sender, **kwargs):
callback_values.update(kwargs)
# we mock out the verify method so that we can bypass the external http
# calls needed for real authentication since we are testing the logic
# around authentication.
monkeypatch.setattr('cas.CASClientV2.verify_ticket', mock_verify)
# sanity check
assert not django_user_model.objects.filter(
username='test@example.com',
).exists()
backend = CASBackend()
user = backend.authenticate(
ticket='fake-ticket', service='fake-service', request=request,
)
assert 'user' in callback_values
assert callback_values.get('user') == user
assert callback_values.get('created') == True
assert 'attributes' in callback_values
assert 'ticket' in callback_values
assert 'service' in callback_values
@pytest.mark.django_db
def test_signal_when_user_already_exists(monkeypatch, django_user_model):
"""
Test that when CAS authentication creates a user, the signal is called with
`created = False`
"""
factory = RequestFactory()
request = factory.get('/login/')
request.session = {}
def mock_verify(ticket, service):
return 'test@example.com', {'ticket': ticket, 'service': service}, None
callback_values = {}
@receiver(cas_user_authenticated)
def callback(sender, **kwargs):
callback_values.update(kwargs)
# we mock out the verify method so that we can bypass the external http
# calls needed for real authentication since we are testing the logic
# around authentication.
monkeypatch.setattr('cas.CASClientV2.verify_ticket', mock_verify)
# sanity check
existing_user = django_user_model.objects.create_user(
'test@example.com', '',
)
backend = CASBackend()
user = backend.authenticate(
ticket='fake-ticket', service='fake-service', request=request,
)
assert 'user' in callback_values
assert callback_values.get('user') == user == existing_user
assert callback_values.get('created') == False
assert 'attributes' in callback_values
assert 'ticket' in callback_values
assert 'service' in callback_values
@pytest.mark.django_db
def test_signal_not_fired_if_auth_fails(monkeypatch, django_user_model):
"""
Test that the cas_user_authenticated signal is not fired when CAS
authentication fails.
"""
factory = RequestFactory()
request = factory.get('/login/')
request.session = {}
def mock_verify(ticket, service):
return None, {}, None
callback_values = {}
@receiver(cas_user_authenticated)
def callback(sender, **kwargs):
callback_values.update(kwargs)
# we mock out the verify method so that we can bypass the external http
# calls needed for real authentication since we are testing the logic
# around authentication.
monkeypatch.setattr('cas.CASClientV2.verify_ticket', mock_verify)
# sanity check
backend = CASBackend()
user = backend.authenticate(
ticket='fake-ticket', service='fake-service', request=request,
)
assert user is None
assert callback_values == {}
|
import os
import tempfile
from mock import patch
import dusty.constants
from dusty.systems.known_hosts import ensure_known_hosts
from ....testcases import DustyTestCase
@patch('dusty.systems.known_hosts._get_known_hosts_path')
@patch('dusty.systems.known_hosts.check_output')
class TestKnownHostsSystem(DustyTestCase):
def setUp(self):
super(TestKnownHostsSystem, self).setUp()
self.temp_hosts_path = tempfile.mkstemp()[1]
def tearDown(self):
super(TestKnownHostsSystem, self).tearDown()
os.remove(self.temp_hosts_path)
def test_preserves_existing_content(self, fake_check_output, fake_get_known_hosts):
fake_get_known_hosts.return_value = self.temp_hosts_path
fake_check_output.return_value = 'dusty.host:SOMESHA'
initial_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA'
with open(self.temp_hosts_path, 'w') as f:
f.write(initial_content)
expected_result_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA\ndusty.host:SOMESHA'
ensure_known_hosts(['dusty.host'])
with open(self.temp_hosts_path, 'r') as f:
self.assertEqual(f.read(), expected_result_content)
def test_not_modified(self, fake_check_output, fake_get_known_hosts):
fake_get_known_hosts.return_value = self.temp_hosts_path
fake_check_output.return_value = 'prev.known.host.1:SOMESHA'
initial_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA'
with open(self.temp_hosts_path, 'w') as f:
f.write(initial_content)
ensure_known_hosts(['prev.known.host.1'])
with open(self.temp_hosts_path, 'r') as f:
self.assertEqual(f.read(), initial_content)
def test_redundant_additions(self, fake_check_output, fake_get_known_hosts):
fake_get_known_hosts.return_value = self.temp_hosts_path
fake_check_output.return_value = 'dusty.host:SOMESHA'
initial_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA'
with open(self.temp_hosts_path, 'w') as f:
f.write(initial_content)
expected_result_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA\ndusty.host:SOMESHA'
ensure_known_hosts(['dusty.host', 'dusty.host', 'dusty.host'])
with open(self.temp_hosts_path, 'r') as f:
self.assertEqual(f.read(), expected_result_content)
|
__version__ = '0.1dev'
from ..config import __NTHREADS__
from ..config import __USE_NUMEXPR__
if __USE_NUMEXPR__:
import numexpr
numexpr.set_num_threads(__NTHREADS__)
else:
#import numpy ufunc (c-coded for speed-up)
from numpy import subtract, divide, power, add
import numpy
from numpy import log, exp
from . import stellib
from . import extinction
from . import photometry
from ..tools.decorators import timeit
from ..import proba
def getFluxAttenuation(law, lamb, **kwargs):
""" Get flux attenuations from a given extinciton law
INPUTS:
law extinction.ExtinctionLaw instance of extinction law
lamb np.ndarray[float, ndim=1] array of wavelengths in AA
KEYWORDS:
**kwargs is forwarded to the call of the law instance: law(lamb, **kwargs)
OUTPUTS:
tau np.ndarray[float, ndim=1] tau as in redflux = flux*exp(-tau)
"""
assert(isinstance(law, extinction.ExtinctionLaw))
r = law.function( lamb * 1e-4, Alambda=False, **kwargs)
return r
def computeLogLikelihood(flux, fluxerr, fluxmod, mask=None, normed=False, **kwargs):
""" Compute the log of the chi2 likelihood between data with uncertainties and
perfectly known models
INPUTS:
flux: np.ndarray[float, ndim=1]
array of fluxes
fluxerr: np.ndarray[float, ndim=1] or (np.ndarray[float, ndim=1], np.ndarray[float, ndim=1])
array of flux errors. If fluxerr is a 1d array, the likelihood will
be a Normarl distribution (symmetric errors).
if the array is 2-d or a list of 2 iterables, the likelihood is a Split Normal distribution
(non-symmetric errors, err-, err+)
fluxmod: np.ndarray[float, ndim=2]
array of modeled fluxes (Nfilters , Nmodels)
KEYWORDS:
normed: bool
if set normalize the result
mask: np.ndarray[bool, ndim=1]
mask array to apply during the calculations
assuming mask.shape == flux.shape
lnp_threshold: float
cut the values outside -x, x in lnp
OUTPUTS:
ln(L) np.ndarray[float, ndim=1] array of ln(L) values (Nmodels)
with L = 1/[sqrt(2pi) * sig**2 ] * exp ( - 0.5 * chi2 )
L propto exp ( - 0.5 * chi2 )
"""
s = numpy.shape(fluxerr)
if (len(s) == 2) and (s[0] == 2):
# assuming non-symmetric errors
errm, errp = fluxerr
lnp = proba.SN_logLikelihood(flux, errm, errp, fluxmod, mask=mask)
else:
# assuming symmetric errors
lnp = proba.N_logLikelihood(flux, fluxerr, fluxmod, mask=mask)
if normed is True:
psum = proba.getNorm_lnP(lnp)
lnp -= log(psum)
return lnp
def computeChi2WithASTs(flux, fluxerr, fluxmod, ASTs_bias, ASTs_err):
""" compute the non-reduced chi2 between data with uncertainties and
perfectly known models
inputs:
flux np.ndarray[float, ndim=1] array of fluxes
fluxerr np.ndarray[float, ndim=1] array of flux errors
fluxmod np.ndarray[float, ndim=2] array of modeled fluxes (nfilters , nmodels)
ASTs_bias np.ndarray[float, ndim=2] array of modeled photometry biases (Nfilters , Nmodels)
ASTs_err np.ndarray[float, ndim=2] array of modeled photometry errors (Nfilters , Nmodels)
outputs:
chi2 np.ndarray[float, ndim=1] array of chi2 values (nmodels)
Using ASTs models:
chi2 = (flux - (fluxmod + ASTs_bias) ) **2 / (fluxerr ** 2 + ASTs_err ** 2)
note: using ufunc because ufuncs are written in c (for speed) and linked into numpy.
"""
# make sure errors are not null
fluxerr[fluxerr == 0.] = 1.
if __USE_NUMEXPR__:
return numexpr.evaluate('sum(( (flux - fluxmod - ASTs_bias) ** 2 / (fluxerr ** 2 + ASTs_err ** 2), axis=1)',
local_dict={'flux': flux, 'fluxmod': fluxmod, 'fluxerr': fluxerr,
'ASTs_bias': ASTs_bias, 'ASTs_err': ASTs_err})
else:
# Trying to avoid multiple copies of arrays by using a defined one and
# working inplace as much as possible
# Worth trying and maybe porting to the chi2 without ASTs
#numerator: df ** 2
tmpu = numpy.empty(len(fluxmod), dtype=float)
add(fluxmod, ASTs_bias, tmpu)
subtract(flux[None, :], tmpu, tmpu)
power(tmpu, 2, tmpu)
#denom: err ** 2
tmpb = numpy.empty(len(fluxmod), dtype=float)
power(ASTs_err, 2, tmpb)
add( power(fluxerr, 2)[None, :], tmpb, tmpb)
#ratio
divide( tmpu, tmpb, tmpu )
return tmpu.sum(axis=1)
def computeLogLikelihoodWithASTs(flux, fluxerr, fluxmod, ASTs_bias, ASTs_err, normed=False, mask=None, lnp_threshold=1000.):
""" Compute the log of the chi2 likelihood between data with uncertainties and
perfectly known models
INPUTS:
flux np.ndarray[float, ndim=1] array of fluxes
fluxerr np.ndarray[float, ndim=1] array of flux errors
fluxmod np.ndarray[float, ndim=2] array of modeled fluxes (Nfilters , Nmodels)
ASTs_bias np.ndarray[float, ndim=2] array of modeled photometry biases (Nfilters , Nmodels)
ASTs_err np.ndarray[float, ndim=2] array of modeled photometry errors (Nfilters , Nmodels)
KEYWORDS:
normed bool if set normalize the result
mask np.ndarray[bool, ndim=1] mask array to apply during the calculations
mask.shape = flux.shape
lnp_threshold float cut the values outside -x, x in lnp
OUTPUTS:
ln(L) np.ndarray[float, ndim=1] array of ln(L) values (Nmodels)
with L = 1/[sqrt(2pi) * sig**2 ] * exp ( - 0.5 * chi2 )
L propto exp ( - 0.5 * chi2 )
Using ASTs models the uncertainties differently:
chi2 = (flux - (fluxmod + ASTs_bias) ) **2 / (fluxerr ** 2 + ASTs_err ** 2)
Note: using ufunc because ufuncs are written in C (for speed) and linked into NumPy.
TODO: function(s) that creates ASTs bias and error from raw ASTs.
"""
if __USE_NUMEXPR__:
fluxerr = numexpr.evaluate('where((fluxerr==0.), 1., fluxerr)', local_dict={'fluxerr': fluxerr})
flux = numexpr.evaluate('where((flux==0.), 1e-5, flux)', local_dict={'flux': flux})
else:
fluxerr[fluxerr == 0.] = 1.
flux[flux == 0.] = 1e-5
# ASTs values could be 0 without numerical problems.
if not mask is None:
_m = ~mask
dof = _m.sum()
if __USE_NUMEXPR__:
chi2 = numexpr.evaluate('- 0.5 / b * a', local_dict={'a': computeChi2WithASTs( flux[_m], fluxerr[_m], fluxmod[:, _m], ASTs_bias[:, _m], ASTs_err[:, _m] ), 'b': dof})
else:
chi2 = - 0.5 / dof * computeChi2WithASTs( flux[_m], fluxerr[_m], fluxmod[:, _m], ASTs_bias[:, _m], ASTs_err[:, _m] )
else:
dof = len(flux)
if __USE_NUMEXPR__:
chi2 = numexpr.evaluate('- 0.5 / b * a', local_dict={'a': computeChi2WithASTs( flux, fluxerr, fluxmod, ASTs_bias, ASTs_err ), 'b': dof})
else:
chi2 = - 0.5 / dof * computeChi2WithASTs( flux, fluxerr, fluxmod, ASTs_bias, ASTs_err )
if normed is True:
if __USE_NUMEXPR__:
expchi2 = numexpr.evaluate('exp(chi2)', local_dict={'chi2': chi2})
# not really sure take works as expected with the inf values...
# if it does, then if I follow the documentation we only need:
# expchi2 = expchi2.take(numpy.isfinite(expchi2), mode='clip')
expchi2[numpy.isinf(expchi2)] = expchi2.take(numpy.isfinite(expchi2), mode='clip').max()
else:
expchi2 = exp(chi2)
expchi2[numpy.isinf(expchi2)] = expchi2[ numpy.isfinite(expchi2) ].max()
psum = expchi2.sum()
if __USE_NUMEXPR__:
lnp = numexpr.evaluate('chi2 - log(psum)', local_dict={'chi2': chi2, 'psum': psum})
else:
lnp = chi2 - log(psum)
else:
lnp = chi2
if __USE_NUMEXPR__:
return numexpr.evaluate('where( (lnp < -thresh), -thresh, lnp)', local_dict={'lnp': lnp, 'thresh': lnp_threshold})
else:
lnp[ lnp < -lnp_threshold] = -lnp_threshold
return lnp
def multi_job(lamb, flux, fluxerr, mask, fluxmod, extLaw, **kwargs):
""" Shortcut to compute the log likelihood of multiple SEDs with the models
for a given extinction parameter set.
INPUTS:
lamb np.ndarray[float, ndim=1] array of wavelengths in AA (Nfilters)
flux np.ndarray[float, ndim=2] array of fluxes (Nfilters , Nobs)
fluxerr np.ndarray[float, ndim=2] array of flux errors (Nfilters , Nobs)
mask np.ndarray[bool, ndim=1] mask array to apply during the calculations
mask.shape = flux.shape
fluxmod np.ndarray[float, ndim=2] array of modeled fluxes (Nfilters , Nmodels)
extLaw extinction.ExtinctionLaw instance of extinction law
KEYWORDS:
**kwargs is forwarded to the getFluxAttenuation call
OUTPUTS:
ln(L) np.ndarray[float, ndim=2] array of ln(L) values (Nobs, Nmodels)
"""
#get attenuation
tau = getFluxAttenuation(extLaw, lamb, **kwargs)
tau = exp(tau) # less operations in the loop
#compute lnp
lnp = numpy.empty( ( flux.shape[0], fluxmod.shape[0] ), dtype=float)
#This loop can be //
for k in xrange(flux.shape[0]):
deredflux = flux[k, :] * tau
lnp[k, :] = computeLogLikelihood(deredflux, fluxerr[k, :], fluxmod, mask=mask)
return lnp
def job(lamb, flux, fluxerr, mask, fluxmod, extLaw, **kwargs):
""" Shortcut to compute the log likelihood of the SED with the models
for a given extinction parameter set.
INPUTS:
lamb np.ndarray[float, ndim=1] array of wavelengths in AA
flux np.ndarray[float, ndim=1] array of fluxes
fluxerr np.ndarray[float, ndim=1] array of flux errors
mask np.ndarray[bool, ndim=1] mask array to apply during the calculations
mask.shape = flux.shape
fluxmod np.ndarray[float, ndim=2] array of modeled fluxes (Nfilters , Nmodels)
extLaw extinction.ExtinctionLaw instance of extinction law
KEYWORDS:
**kwargs is forwarded to the getFluxAttenuation call
OUTPUTS:
ln(L) np.ndarray[float, ndim=1] array of ln(L) values (Nmodels)
"""
#get attenuation
tau = getFluxAttenuation(extLaw, lamb, **kwargs)
#deredden the observed flux (faster than adding reddening to all models
deredflux = flux * exp(tau)
ind = (deredflux > 0.)
deredflux[ind] = deredflux[ind]
ind = (fluxmod > 0.)
fluxmod[ind] = fluxmod[ind]
#compute lnp
lnp = computeLogLikelihood(deredflux, fluxerr, fluxmod, normed=False, mask=mask)
#expchi2 = exp(lnp)
#expchi2[numpy.isinf(expchi2)] = expchi2[ numpy.isfinite(expchi2) ].max()
#psum = expchi2.sum()
#lnp = lnp - log(psum)
return lnp
def getSEDs(filter_names, lamb, specs):
"""
Extract integrated fluxes through filters
INPUTS:
filter_names list list of filter names according to the filter lib
"""
flist = photometry.load_filters(filter_names)
r = numpy.empty( (len(specs), len(flist) ), dtype=float)
lf = numpy.empty( len(flist), dtype=float )
for kf in range(len(flist)):
for ks in range(len(specs)):
r[ks, kf] = flist[kf].getFlux(lamb, specs[ks])
lf[kf] = flist[kf].cl
return lf, r
def test_specs():
from tools import figure
osl = stellib.BaSeL()
oAv = extinction.Cardelli()
#fake DATA
#fakein = 2000 # random between 0 & 4523, no idea what this is :p
idx = osl.grid.where('(Teff >= 3.6) & (Teff <= 3.8) & (logG >= 4.5) & (logG <= 4.7) & (Z == 0.02)')
fakein = idx[0][0]
fakesed = numpy.copy(osl.spectra[fakein, :])
Av0 = 0.1
lamb = osl.wavelength
tau = getFluxAttenuation(oAv, lamb, Av=Av0, Rv=3.1)
fakesed *= exp(-tau)
#magerr = 0.05
#fakeerr = fakesed * (1. - 10**(-0.4*magerr) )
fakeerr = 0.5 * fakesed
#get Models
# will be replaced by broad-band SEDs but structure will be identical
seds = numpy.copy(osl.spectra)
#idx = osl.grid.where('(Z == 0.02)')
#seds = osl.spectra[idx]
lamb = osl.wavelength
Av = numpy.arange(0, 1, 0.1)
r = numpy.empty( (seds.shape[0], len(Av)), dtype=float )
with timeit('Likelihood'):
for k in range(len(Av)):
r[:, k] = job(lamb, fakesed, fakeerr, seds, oAv, Av=Av[k], Rv=3.1)
def plot(_r, idx=None):
import pylab as plt
if _r.ndim == 2:
r = _r.sum(1)
else:
r = _r
if idx is None:
idx = numpy.arange(len(r))
n0, bT, bg = numpy.histogram2d(osl.Teff[idx], osl.logg[idx], bins=[25, 11])
n, bT, bg = numpy.histogram2d(osl.Teff[idx], osl.logg[idx], bins=[bT, bg], weights=exp(r) )
n0 = n0.astype(float) / n0.sum()
n = n.astype(float) / n.sum()
n1 = numpy.copy(n[:])
ind = (n0 > 0.)
n[ind] /= n0[ind]
n /= n.sum()
n1 = numpy.ma.masked_where( n0 == 0, n1 )
n = numpy.ma.masked_where( n0 == 0, n )
plt.figure(1, figsize=(10, 10))
plt.clf()
ax0 = plt.subplot(221)
ax0.imshow(n1.T, extent=[min(bT), max(bT), min(bg), max(bg)],
vmin=0., vmax=numpy.max([n1.max(), n.max()]),
origin='lower', aspect='auto')
ax0.plot([osl.Teff[fakein]], [osl.logg[fakein]], 'o', mec='#ff0000', mfc='None', mew=2., ms=10.)
ax0.set_xlabel('logT')
ax0.set_ylabel('logg')
ax0.set_xlim(ax0.get_xlim()[::-1])
ax0.set_ylim(ax0.get_ylim()[::-1])
ax0.set_title('Raw: P0(logT,logg) = K')
ax1 = plt.subplot(222, sharex=ax0, sharey=ax0)
ax1.imshow(n.T, extent=[min(bT), max(bT), min(bg), max(bg)],
vmin=0., vmax=numpy.max([n1.max(), n.max()]),
origin='lower', aspect='auto')
ax1.plot([osl.Teff[fakein]], [osl.logg[fakein]], 'o', mec='#ff0000', mfc='None', mew=2., ms=10.)
ax1.set_xlabel('logT')
ax1.set_ylabel('logg')
ax1.set_xlim(ax1.get_xlim()[::-1])
ax1.set_ylim(ax1.get_ylim()[::-1])
ax1.set_title('Corrected: P/P0(logT,logg) = K')
ax2 = plt.subplot(223)
x = 0.5 * (bT[1:] + bT[:-1])
#ax2.step( bT[:-1], n1.sum(1), where='pre', lw=2.)
#ax2.step( bT[:-1], n.sum(1), where='pre', lw=2.)
ax2.plot( x, n1.sum(1), lw=2., label='raw')
ax2.plot( x, n.sum(1), lw=2., label='cor')
ylim = ax2.get_ylim()
ax2.vlines([osl.Teff[fakein]], ylim[0], ylim[1])
ax2.set_ylim(ylim)
ax2.set_xlabel('log(Teff)')
ax2.set_ylabel(r'P( data $\mid$ log(Teff) ) P(Teff) / P0(log(Teff))')
ax2.legend(loc=0, frameon=False, borderaxespad=2., prop={'size': 14})
ax3 = plt.subplot(224)
x = 0.5 * (bg[1:] + bg[:-1])
#ax3.step( bg[:-1], n1.sum(0), where='pre', lw=2.)
#ax3.step( bg[:-1], n.sum(0), where='pre', lw=2.)
ax3.plot( x, n1.sum(0), lw=2.)
ax3.plot( x, n.sum(0), lw=2.)
ylim = ax3.get_ylim()
ax3.vlines([osl.logg[fakein]], ylim[0], ylim[1])
ax3.set_ylim(ylim)
ax3.set_xlabel('log(g)')
ax3.set_ylabel(r'P( data $\mid$ log(g) ) P(log(g)) / P0(log(g))')
figure.theme(ax=ax0)
figure.theme(ax=ax1)
figure.theme(ax=ax2)
figure.theme(ax=ax3)
plot(r)
return r
if __name__ == '__main__':
pass
def getFake(g, Av0=1., Rv0=3.1, err=0.1):
oAv = extinction.Cardelli()
#idx = g.grid.where('(logT >= 3.6) & (logT <= 3.8) & (logg >= 4.5) & (logg <= 4.7) & (Z == 0.02)')
idx = g.grid.where('(logT >= 3.95) & (logT <= 4.05) & (logg >= 1.85) & (logg <= 1.95) ')
lamb = g.lamb
fakein = idx[0][0]
fakesed = numpy.copy(g.seds[fakein, :])
tau = getFluxAttenuation(oAv, lamb, Av=Av0, Rv=Rv0)
fakesed *= exp(-tau)
#magerr = 0.05
#fakeerr = fakesed * (1. - 10**(-0.4*magerr) )
fakeerr = err * fakesed
return fakein, lamb, fakesed, fakeerr
def test_seds(err=0.1, Av0=1., Z0=0.02):
import grid
#filters = 'hst_wfc3_f225w hst_wfc3_f336w hst_acs_hrc_f475w hst_acs_hrc_f814w hst_wfc3_f110w hst_wfc3_f160w'.upper().split()
#osl = stellib.BaSeL()
oAv = extinction.Cardelli()
g = grid.FileSpectralGrid('libs/SEDs_basel_padovaiso.fits')
lamb = g.lamb # *1e6
#fake DATA
fakein, l, fakesed, fakeerr = getFake(g, Av0, 3.1, err=err)
mask = numpy.zeros(fakesed.shape, dtype=bool)
mask[3] = True
mask[2] = True
Av = numpy.arange(0., 3., 0.1)
r = numpy.empty( (g.seds.shape[0], len(Av)), dtype=float )
with timeit('Likelihood'):
for k in range(len(Av)):
r[:, k] = job(lamb[:], numpy.copy(fakesed), numpy.copy(fakeerr), mask, numpy.copy(g.seds), oAv, Av=Av[k], Rv=3.1)
return g, r, Av, fakein, lamb, fakesed, fakeerr, Av0, Z0
def plotPDFs(g, r, Av, Av0, Z0, fakein, Q='logg logT logL logM logA Av Z' ):
from tools import figure
_q = Q.split()
_r = exp(r)
_r /= _r.sum()
def plotPDF(ax, qk, *args, **kargs):
if qk.upper() != 'AV':
ax.hist(g.grid[qk], weights=_r.sum(1), bins=30)
ax.set_xlabel(qk)
ax.set_ylabel('P(data $\mid$ %s)' % qk )
ylim = ax.get_ylim()
ax.vlines(g.grid[qk][fakein], ylim[0], ylim[1], color='#ff0000')
ax.set_ylim(ylim)
ax.set_xlim(min(g.grid[qk]), max(g.grid[qk]))
else:
ax.hist(Av, weights=_r.sum(0), bins=numpy.linspace(min(Av), max(Av), len(Av) + 1))
ax.set_xlabel(qk)
ax.set_ylabel('P(data $\mid$ %s)' % qk )
ax.set_xlim(min(Av), max(Av))
ylim = ax.get_ylim()
ax.vlines(Av0, ylim[0], ylim[1], color='#ff0000')
ax.set_ylim(ylim)
ncol = 3
nl = len(_q) / ncol + len(_q) % ncol
k = 0
for k in range(len(_q)):
print _q[k] + ": " + str(g.grid[_q[k]][fakein])
ax = figure.subplot(nl, ncol, k + 1)
plotPDF(ax, _q[k])
figure.show()
|
#!/usr/bin/env python3
# so that script can be run from Brickman
import termios, tty, sys
from ev3dev.ev3 import *
# attach large motors to ports B and C, medium motor to port A
motor_left = LargeMotor('outC')
motor_right = LargeMotor('outD')
motor_a = MediumMotor('outA')
motor_b = MediumMotor('outB')
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setcbreak(fd)
ch = sys.stdin.read(1)
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def forward():
motor_left.run_forever(speed_sp=200)
motor_right.run_forever(speed_sp=200)
#==============================================
def back():
motor_left.run_forever(speed_sp=-200)
motor_right.run_forever(speed_sp=-200)
#==============================================
def left():
motor_left.run_forever(speed_sp=-200)
motor_right.run_forever(speed_sp=200)
#==============================================
def right():
motor_left.run_forever(speed_sp=200)
motor_right.run_forever(speed_sp=-200)
#==============================================
def stop():
motor_left.run_forever(speed_sp=0)
motor_right.run_forever(speed_sp=0)
motor_a.run_forever(speed_sp=0)
motor_b.run_forever(speed_sp=0)
#==============================================
def up():
motor_a.run_forever(speed_sp=200)
motor_b.run_forever(speed_sp=-200)
def down():
motor_a.run_forever(speed_sp=-200)
motor_b.run_forever(speed_sp=200)
while True:
k = getch()
print(k)
if k == 's':
back()
if k == 'w':
forward()
if k == 'd':
right()
if k == 'a':
left()
if k == ' ':
stop()
if k == 'o':
up()
if k == 'p':
down()
if k == 'q':
exit() |
import crosscat.tests.component_model_extensions.ContinuousComponentModel as ccmext
import random
import math
import numpy
import six
import unittest
def main():
unittest.main()
class TestContinuousComponentModelExtensions_Constructors(unittest.TestCase):
def setUp(self):
N = 10
self.N = N
random.seed(0)
self.X = numpy.array([[random.normalvariate(0.0, 1.0)] for i in range(N)])
self.params_good = dict(rho=1.0, mu=0.0)
self.params_empty = dict()
self.params_missing_rho = dict(mu=0.0)
self.params_missing_mu = dict(mu=0.0)
self.params_not_dict = [0.0, 1.0]
self.params_negative_rho = dict(rho=-1.0, mu=0.0)
self.params_zero_rho = dict(rho=0.0, mu=0.0)
self.hypers_good = dict(mu=0.0, nu=1.0, r=1.0, s=1.0)
self.hypers_missing_mu = dict(nu=1.0, r=1.0, s=1.0)
self.hypers_missing_nu = dict(mu=0.0, r=1.0, s=1.0)
self.hypers_missing_r = dict(mu=0.0, nu=1.0, s=1.0)
self.hypers_missing_s = dict(mu=0.0, nu=1.0, r=1.0)
self.hypers_low_nu = dict(mu=0.0, nu=-1.0, r=1.0, s=1.0)
self.hypers_low_r = dict(mu=0.0, nu=1.0, r=-1.0, s=1.0)
self.hypers_low_s = dict(mu=0.0, nu=1.0, r=1.0, s=-1.0)
self.hypers_not_dict = [0,1,2,3]
# Test from_parameters conrtuctor
def test_from_parameters_contructor_with_good_complete_params_and_hypers(self):
m = ccmext.p_ContinuousComponentModel.from_parameters(self.N,
data_params=self.params_good,
hypers=self.hypers_good,
gen_seed=0)
assert m is not None
def test_from_parameters_contructor_with_no_params_and_hypers(self):
m = ccmext.p_ContinuousComponentModel.from_parameters(self.N, gen_seed=0)
assert m is not None
def test_from_parameters_contructor_with_bad_params_and_good_hypers(self):
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_empty,
hypers=self.hypers_good,
gen_seed=0)
self.assertRaises(TypeError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_not_dict,
hypers=self.hypers_good,
gen_seed=0)
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_missing_mu,
hypers=self.hypers_good,
gen_seed=0)
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_missing_rho,
hypers=self.hypers_good,
gen_seed=0)
self.assertRaises(ValueError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_negative_rho,
hypers=self.hypers_good,
gen_seed=0)
self.assertRaises(ValueError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_zero_rho,
hypers=self.hypers_good,
gen_seed=0)
def test_from_parameters_contructor_with_good_params_and_bad_hypers(self):
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_good,
hypers=self.hypers_missing_mu,
gen_seed=0)
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_good,
hypers=self.hypers_missing_nu,
gen_seed=0)
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_good,
hypers=self.hypers_missing_r,
gen_seed=0)
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_good,
hypers=self.hypers_missing_s,
gen_seed=0)
self.assertRaises(ValueError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_good,
hypers=self.hypers_low_nu,
gen_seed=0)
self.assertRaises(ValueError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_good,
hypers=self.hypers_low_r,
gen_seed=0)
self.assertRaises(ValueError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_good,
hypers=self.hypers_low_s,
gen_seed=0)
self.assertRaises(TypeError, ccmext.p_ContinuousComponentModel.from_parameters, self.N,
data_params=self.params_good,
hypers=self.hypers_not_dict,
gen_seed=0)
# From data constructor
def test_from_data_contructor_with_good_complete_hypers(self):
m = ccmext.p_ContinuousComponentModel.from_data(self.X,
hypers=self.hypers_good,
gen_seed=0)
assert m is not None
def test_from_data_contructor_with_no_params_and_hypers(self):
m = ccmext.p_ContinuousComponentModel.from_data(self.X,gen_seed=0)
assert m is not None
def test_from_data_contructor_with_bad_hypers(self):
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_data, self.X,
hypers=self.hypers_missing_mu,
gen_seed=0)
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_data, self.X,
hypers=self.hypers_missing_nu,
gen_seed=0)
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_data, self.X,
hypers=self.hypers_missing_r,
gen_seed=0)
self.assertRaises(KeyError, ccmext.p_ContinuousComponentModel.from_data, self.X,
hypers=self.hypers_missing_s,
gen_seed=0)
self.assertRaises(ValueError, ccmext.p_ContinuousComponentModel.from_data, self.X,
hypers=self.hypers_low_nu,
gen_seed=0)
self.assertRaises(ValueError, ccmext.p_ContinuousComponentModel.from_data, self.X,
hypers=self.hypers_low_r,
gen_seed=0)
self.assertRaises(ValueError, ccmext.p_ContinuousComponentModel.from_data, self.X,
hypers=self.hypers_low_s,
gen_seed=0)
self.assertRaises(TypeError, ccmext.p_ContinuousComponentModel.from_data, self.X,
hypers=self.hypers_not_dict,
gen_seed=0)
class TestContinuousComponentModelExtensions_FromParametersConstructor(unittest.TestCase):
def setUp(self):
N = 10
random.seed(0)
self.X = numpy.array([[random.normalvariate(0.0, 1.0)] for i in range(N)])
self.component_model = ccmext.p_ContinuousComponentModel.from_parameters(N,gen_seed=0)
def test_all_hyperparameters_intialized(self):
these_hyperparameters = self.component_model.get_hypers()
# make sure each key exists
for hyperparameter in [b'mu', b'nu', b'r', b's']:
assert hyperparameter in these_hyperparameters
def test_all_suffstats_intialized(self):
these_suffstats = self.component_model.get_suffstats()
# make sure each key exists
for suffstat in [b'sum_x', b'sum_x_squared']:
assert suffstat in these_suffstats
def test_draw_component_model_params(self):
draw = self.component_model.sample_parameters_given_hyper()
assert type(draw) is dict
model_parameter_bounds = self.component_model.get_model_parameter_bounds()
for key, value in six.iteritems(draw):
assert(key in ['mu', 'rho'])
assert(type(value) is float or type(value) is numpy.float64)
assert(not math.isnan(value))
assert(not math.isinf(value))
if key == 'rho':
assert(value > 0.0)
def test_uncollapsed_likelihood(self):
ans = -14.248338610116935
log_likelihood = self.component_model.uncollapsed_likelihood(self.X, {'mu':0.0, 'rho':1.0})
assert log_likelihood < 0.0
assert math.fabs(ans-log_likelihood) < .00000001
class TestContinuousComponentModelExtensions_FromDataConstructor(unittest.TestCase):
def setUp(self):
N = 10
random.seed(0)
self.X = numpy.array([[random.normalvariate(0.0, 1.0)] for i in range(N)])
self.component_model = ccmext.p_ContinuousComponentModel.from_data(self.X,gen_seed=0)
def test_all_hyperparameters_intialized(self):
these_hyperparameters = self.component_model.get_hypers()
# make sure each key exists
for hyperparameter in [b'mu', b'nu', b'r', b's']:
assert hyperparameter in these_hyperparameters
def test_all_suffstats_intialized(self):
these_suffstats = self.component_model.get_suffstats()
# make sure each key exists
for suffstat in [b'sum_x', b'sum_x_squared']:
assert suffstat in these_suffstats
def test_draw_component_model_params(self):
draw = self.component_model.sample_parameters_given_hyper()
assert type(draw) is dict
for key, value in six.iteritems(draw):
assert(key in ['mu', 'rho'])
assert(type(value) is float or type(value) is numpy.float64)
assert(not math.isnan(value))
assert(not math.isinf(value))
if key == 'rho':
assert(value > 0.0)
def test_uncollapsed_likelihood(self):
ans = -20.971295328329504
log_likelihood = self.component_model.uncollapsed_likelihood(self.X, {'mu':0.0, 'rho':1.0})
assert log_likelihood < 0.0
assert math.fabs(ans-log_likelihood) < .00000001
class TestContinuousComponentModelExtensions_static(unittest.TestCase):
def setUp(self):
N = 10
random.seed(0)
self.X = numpy.array([[random.normalvariate(0.0, 1.0)] for i in range(N)])
self.component_class = ccmext.p_ContinuousComponentModel
def test_log_likelihood(self):
X_1 = numpy.array([[1],[0]])
parameters = dict(mu=0.0, rho=1.0)
log_likelihood = self.component_class.log_likelihood(X_1, parameters)
assert log_likelihood < 0.0
assert math.fabs(-2.3378770664093453-log_likelihood) < .00000001
parameters = dict(mu=2.2, rho=12.1)
log_likelihood = self.component_class.log_likelihood(X_1, parameters)
assert log_likelihood < 0.0
assert math.fabs(-37.338671613806667-log_likelihood) < .00000001
def test_log_pdf(self):
# test some answers
X_1 = numpy.array([[1],[0]])
parameters = dict(mu=0.0, rho=1.0)
log_pdf = self.component_class.log_pdf(X_1, parameters)
assert len(log_pdf) == 2
assert math.fabs(-1.4189385332046727-log_pdf[0,0]) < .00000001
assert math.fabs(-0.91893853320467267-log_pdf[1,0]) < .00000001
parameters = dict(mu=2.2, rho=12.1)
log_pdf = self.component_class.log_pdf(X_1, parameters)
assert len(log_pdf) == 2
assert math.fabs(-8.38433580690333-log_pdf[0,0]) < .00000001
assert math.fabs(-28.954335806903334-log_pdf[1,0]) < .00000001
# points that are farther away from the mean should be less likely
parameters = dict(mu=0.0, rho=1.0)
lspc = numpy.linspace(0,10,num=20)
X_2 = numpy.array([[x] for x in lspc])
log_pdf = self.component_class.log_pdf(X_2, parameters)
assert len(log_pdf) == 20
for n in range(1,20):
assert log_pdf[n-1,0] > log_pdf[n,0]
def test_generate_discrete_support(self):
parameters = dict(mu=0.0, rho=1.0)
support = self.component_class.generate_discrete_support(parameters, support=0.95, nbins=100)
assert type(support) is list
assert len(support) == 100
# end points should have the same magnitude
assert support[0] == -support[-1]
# the two points stradding the mean should have the same magnitude
assert support[49] == -support[50]
assert math.fabs(support[0] + 1.959963984540054) < .00000001
assert math.fabs(support[-1] - 1.959963984540054) < .00000001
def test_draw_component_model_hyperparameters_single(self):
draw_list = self.component_class.draw_hyperparameters(self.X)
assert type(draw_list) is list
assert type(draw_list[0]) is dict
draw = draw_list[0]
assert type(draw) is dict
for key, value in six.iteritems(draw):
assert key in ['mu', 'nu', 'r', 's']
assert type(value) is float or type(value) is numpy.float64
assert(not math.isnan(value))
assert(not math.isinf(value))
if key in ['nu', 's', 'r']:
assert value > 0.0
def test_draw_component_model_hyperparameters_multiple(self):
n_draws = 3
draw_list = self.component_class.draw_hyperparameters(self.X, n_draws=n_draws)
assert type(draw_list) is list
assert len(draw_list) == 3
for draw in draw_list:
assert type(draw) is dict
for key, value in six.iteritems(draw):
assert key in ['mu', 'nu', 'r', 's']
assert type(value) is float or type(value) is numpy.float64
assert(not math.isnan(value))
assert(not math.isinf(value))
if key in ['nu', 's', 'r']:
assert value > 0.0
def test_generate_data_from_parameters(self):
N = 10
parameters = dict(mu=0.0, rho=1.0)
X = self.component_class.generate_data_from_parameters(parameters, N)
assert type(X) == numpy.ndarray
assert len(X) == N
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 23 20:02:25 2022
@author: dv516
"""
import numpy as np
import math
from scipy.optimize import minimize
from typing import List, Tuple
from Functions.test_functions import f_d2, f_d3
def optimizer_dummy(f, N_x: int, bounds: List[Tuple[float]], N: int = 100) -> (float, List[float]):
'''
Optimizer aims to optimize a black-box function 'f' using the dimensionality
'N_x', and box-'bounds' on the decision vector
Input:
f: function: taking as input a list of size N_x and outputing a float
N_x: int: number of dimensions
N: int: optional: Evaluation budget
bounds: List of size N where each element i is a tuple conisting of 2 floats
(lower, upper) serving as box-bounds on the ith element of x
Return:
tuple: 1st element: lowest value found for f, f_min
2nd element: list/array of size N_x giving the decision variables
associated with f_min
'''
if N_x != len(bounds):
raise ValueError('Nbr of variables N_x does not match length of bounds')
### Your code here
# x = [np.mean(bounds[i]) for i in range(N_x)]
ND = min(math.ceil(20/N_x), int(np.sqrt(N/2))) ## changed this
BO_test = BO(f, N_x, bounds, ND=ND)
x_opt, y_opt = BO_test.opt_loop(N=N-ND**2)
x = x_opt.flatten().tolist()
###
return f(x), x
def kernel(x, y, l2=0.1, sigma_f=1):
'''
Exponential kernel function
@Input
x: Nx x Dx (each data of x is a column vector)
y: Nx x Dy
@Return:Nx x Ny
'''
# Nx, Dx = np.shape(x)
# Nx, Dy = np.shape(y)
square = np.sum(x ** 2, 0).reshape(-1, 1) + np.sum(y ** 2, 0).reshape(1, -1) - 2 * (x.T @ y)
return sigma_f ** 2 * np.exp(-0.5 * (1 / l2) * square)
def posterior(X, X_test, y, l2=0.1, sigma_y=1e-3):
'''
Compute the expectation and covariance by using a practical algorithm
@Input
X: Nx x Dx (each x sample is a column vector)
X_test: Nx x D_test
y: 1 x Dx
@Return
expect
cov
'''
Nx, Dx = np.shape(X)
# Nx, Dy = np.shape(X_test)
K = kernel(X, X, l2)
L = np.linalg.cholesky(K + sigma_y ** 2 * np.eye(Dx))
alpha_temp = np.linalg.solve(L, y.T) # make y as a col vector
alpha = np.linalg.solve(L.T, alpha_temp)
K_s = kernel(X, X_test, l2)
expect = K_s.T @ alpha
v = np.linalg.solve(L, K_s)
K_ss = kernel(X_test, X_test, l2)
cov = np.diag(K_ss) - v.T @ v
return expect, cov
def define_lcb(X, y, l2=0.1, sigma_y=0, kappa=5):
'''
Define the lcb function for the minimization problem to determine the next sample position
@Input
X: Nx x Dx (each x sample is a column vector)
y: 1 x Dx
@Return
lower_confidence_bound function
'''
def lower_confidence_bound(X_test):
X_test = X_test.reshape(-1,1)
expect, cov = posterior(X, X_test, y, l2, sigma_y)
sigma = np.sqrt(cov)
return expect.item() - kappa * sigma.item()
return lower_confidence_bound
class BO:
'''
Minimal Bayesian Optimization class
TODO: replace sampling with some low-discrepency methods e.g. sobol
TODO: rescale
TODO: tune hyperparameters
TODO: Rewrite in Casadi
'''
def __init__(self, black_fn, Nx, bounds, ND=5):
self.black_fn = black_fn
self.Nx = Nx
self.ND = ND
self.bounds = bounds
X_train, Y_train = self.collect_data()
self.X_train = X_train
self.Y_train = Y_train
def collect_data(self):
# mesh grid
Nx = self.Nx
ND = self.ND
black_fn = self.black_fn
range_list = [np.linspace(bound[0], bound[1], ND) for bound in self.bounds]
grid_list = (np.array(np.meshgrid(*range_list)).T.reshape(-1, Nx)).tolist()
output_list = []
for grid in grid_list:
output_list += [black_fn(grid)]
return np.array(grid_list).T, np.array(np.array(output_list).reshape(1, -1))
def opt_loop(self, N=100):
## initialize with a random point
x_k = np.array([np.random.uniform(bound[0], bound[1], 1) for bound in self.bounds])
for i in range(N):
## calculate f(x) for a given x
# print(x_k)
x_k_list = x_k.flatten().tolist()
y_k = np.array(self.black_fn(x_k_list)).reshape(-1,1)
## update training set
self.X_train = np.hstack([self.X_train, x_k])
self.Y_train = np.hstack([self.Y_train, y_k])
## acquisition fn for the next x
lcb_fn = define_lcb(self.X_train, self.Y_train, l2=0.1, sigma_y=1e-3, kappa=2)
x0_list = x_k_list
res = minimize(lcb_fn, x0_list, method='SLSQP', bounds=self.bounds)
x_k = (res.x).reshape(-1,1)
## update optimum
if i == 0:
y_opt = y_k
x_opt = x_k
else:
if y_k.item() <= y_opt.item():
y_opt = y_k
x_opt = x_k
return x_opt, y_opt
# N_x = 2
# bounds = [(-2.0, 2.0) for i in range(N_x)]
# test1 = optimizer_dummy(f_d2, N_x, bounds, 100)
# N_x = 3
# bounds = [(-2.0, 2.0) for i in range(N_x)]
# test2 = optimizer_dummy(f_d3, N_x, bounds, 100)
# np.testing.assert_array_less(test1[0], 1e-3)
# np.testing.assert_array_less(test2[0], 1e-3)
|
#
# PySNMP MIB module ITOUCH-EVENT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ITOUCH-EVENT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:57:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion")
DateTime, iTouch = mibBuilder.importSymbols("ITOUCH-MIB", "DateTime", "iTouch")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Counter64, TimeTicks, Unsigned32, Bits, ModuleIdentity, MibIdentifier, Integer32, Counter32, IpAddress, Gauge32, NotificationType, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Counter64", "TimeTicks", "Unsigned32", "Bits", "ModuleIdentity", "MibIdentifier", "Integer32", "Counter32", "IpAddress", "Gauge32", "NotificationType", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
xEvent = MibIdentifier((1, 3, 6, 1, 4, 1, 33, 33))
class EventGroup(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58))
namedValues = NamedValues(("appleTalk", 1), ("appleTalkArps", 2), ("appleTalkRtmp", 3), ("appleTalkZip", 4), ("appleTalkNbp", 5), ("appleTalkTraffic", 6), ("atm", 7), ("backup", 8), ("pcmcia", 9), ("chassis", 10), ("circuit", 11), ("clns", 12), ("decNet", 13), ("decNetTraffic", 14), ("egp", 15), ("esis", 16), ("fddi", 17), ("fddiTraffic", 18), ("frame", 19), ("frameRelay", 20), ("hubManagement", 21), ("interface", 22), ("ip", 23), ("ipRip", 24), ("ipRoutes", 25), ("ipTraffic", 26), ("ipx", 27), ("ipxRip", 28), ("ipxSap", 29), ("isdn", 30), ("isdnQ931", 31), ("isdnTrace", 32), ("isis", 33), ("isisHello", 34), ("isisLsp", 35), ("link", 36), ("lmb", 37), ("lqm", 38), ("ospf", 39), ("ospfHello", 40), ("ospfLsaPacket", 41), ("ospfSpf", 42), ("param", 43), ("ppp", 44), ("session", 45), ("spanningTree", 46), ("snmp", 47), ("switchForwarding", 48), ("switchLoopDetect", 49), ("switchManagement", 50), ("system", 51), ("tcp", 52), ("time", 53), ("tokenRingManagement", 54), ("udp", 55), ("ui", 56), ("vlmp", 57), ("x25", 58))
eventTableSize = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 800)).clone(100)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventTableSize.setStatus('mandatory')
if mibBuilder.loadTexts: eventTableSize.setDescription('Controls the size of the event table in number of entries. Event storage begins with entry number one and continues to the upper bound. When the table becomes full, event storeage begins again with entry number one, overwriting the previously stored entry. A newly defined table size will not take effect until the unit is reinitialized.')
eventSeverity = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("discard", 1), ("low", 2), ("medium", 3), ("high", 4))).clone('low')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: eventSeverity.setDescription('The severity of the event to be logged. All events fall into one of the above severity levels. Events are added to the event table if and only if the current value of this object is less than or equal to the severity of the event. If this object is set to discard, no events are logged to the table.')
eventTimestamp = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("date", 2), ("time", 3), ("datetime", 4))).clone('datetime')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventTimestamp.setStatus('mandatory')
if mibBuilder.loadTexts: eventTimestamp.setDescription('This object controls the timestamp embedded into the actual text of the event for event table text object eventTextText. If this object is set to none, no timestamp will be embedded in the text. This object has no effect on the event table text object eventTextDateTime.')
eventLanguage = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("english", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventLanguage.setStatus('mandatory')
if mibBuilder.loadTexts: eventLanguage.setDescription('This object indicates the language of the event text in the table.')
eventClearLog = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ready", 1), ("execute", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventClearLog.setStatus('mandatory')
if mibBuilder.loadTexts: eventClearLog.setDescription('When this object is set to execute, all events are cleared from the event table. Setting this object to ready has no effect.')
eventEnableAll = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ready", 1), ("execute", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventEnableAll.setStatus('mandatory')
if mibBuilder.loadTexts: eventEnableAll.setDescription('When this object is set to execute, all events groups are enabled. Setting this object to ready has no effect.')
eventDisableAll = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ready", 1), ("execute", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventDisableAll.setStatus('mandatory')
if mibBuilder.loadTexts: eventDisableAll.setDescription('When this object is set to execute, all events groups are disabled. Setting this object to ready has no effect.')
eventGroupTable = MibTable((1, 3, 6, 1, 4, 1, 33, 33, 8), )
if mibBuilder.loadTexts: eventGroupTable.setStatus('mandatory')
if mibBuilder.loadTexts: eventGroupTable.setDescription('Table of descriptive and status information about event groups.')
eventGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 33, 33, 8, 1), ).setIndexNames((0, "ITOUCH-EVENT-MIB", "eventGroupIndex"))
if mibBuilder.loadTexts: eventGroupEntry.setStatus('mandatory')
if mibBuilder.loadTexts: eventGroupEntry.setDescription('An entry in the table, containing information about an event group.')
eventGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 8, 1, 1), EventGroup()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventGroupIndex.setStatus('mandatory')
if mibBuilder.loadTexts: eventGroupIndex.setDescription('This variable identifies the event group.')
eventGroupState = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 8, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventGroupState.setStatus('mandatory')
if mibBuilder.loadTexts: eventGroupState.setDescription('This variable controls whether events from a particular event group may be logged to the event table.')
eventTextTable = MibTable((1, 3, 6, 1, 4, 1, 33, 33, 9), )
if mibBuilder.loadTexts: eventTextTable.setStatus('mandatory')
if mibBuilder.loadTexts: eventTextTable.setDescription('Table of descriptive and status information about an event.')
eventTextEntry = MibTableRow((1, 3, 6, 1, 4, 1, 33, 33, 9, 1), ).setIndexNames((0, "ITOUCH-EVENT-MIB", "eventTextGroupIndex"), (0, "ITOUCH-EVENT-MIB", "eventTextEventIndex"))
if mibBuilder.loadTexts: eventTextEntry.setStatus('mandatory')
if mibBuilder.loadTexts: eventTextEntry.setDescription('An entry in the table, containing information about an event.')
eventTextGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 9, 1, 1), EventGroup()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTextGroupIndex.setStatus('mandatory')
if mibBuilder.loadTexts: eventTextGroupIndex.setDescription('This variable identifies the event group.')
eventTextEventIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 9, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTextEventIndex.setStatus('mandatory')
if mibBuilder.loadTexts: eventTextEventIndex.setDescription('This variable identifies the event of the desired group. This number is arbitrary, and translates to nth event of the specified group. This value wraps at the 32 bit maximum.')
eventTextText = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 9, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTextText.setStatus('mandatory')
if mibBuilder.loadTexts: eventTextText.setDescription('The text of the event in the language defined by eventLanguage.')
eventTextDateTime = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 9, 1, 4), DateTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTextDateTime.setStatus('mandatory')
if mibBuilder.loadTexts: eventTextDateTime.setDescription('The timestamp of when the event was posted.')
eventTextSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 9, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4))).clone(namedValues=NamedValues(("low", 2), ("medium", 3), ("high", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTextSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: eventTextSeverity.setDescription('The severity of the event.')
mibBuilder.exportSymbols("ITOUCH-EVENT-MIB", eventTextDateTime=eventTextDateTime, eventGroupEntry=eventGroupEntry, eventSeverity=eventSeverity, eventTextText=eventTextText, eventTextTable=eventTextTable, eventTextSeverity=eventTextSeverity, eventLanguage=eventLanguage, EventGroup=EventGroup, eventGroupIndex=eventGroupIndex, eventGroupState=eventGroupState, eventTextEventIndex=eventTextEventIndex, eventTextEntry=eventTextEntry, eventTableSize=eventTableSize, eventClearLog=eventClearLog, eventGroupTable=eventGroupTable, eventTextGroupIndex=eventTextGroupIndex, xEvent=xEvent, eventDisableAll=eventDisableAll, eventEnableAll=eventEnableAll, eventTimestamp=eventTimestamp)
|
#
# PySNMP MIB module ASCEND-MIBATMATOM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBATMATOM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:10:25 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Gauge32, Counter64, IpAddress, Integer32, Counter32, ObjectIdentity, Unsigned32, ModuleIdentity, TimeTicks, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Gauge32", "Counter64", "IpAddress", "Integer32", "Counter32", "ObjectIdentity", "Unsigned32", "ModuleIdentity", "TimeTicks", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class DisplayString(OctetString):
pass
mibatmVclProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 39))
mibatmVplProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 38))
mibatmVclProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 39, 1), )
if mibBuilder.loadTexts: mibatmVclProfileTable.setStatus('mandatory')
mibatmVclProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1), ).setIndexNames((0, "ASCEND-MIBATMATOM-MIB", "atmVclProfile-Id-Address-PhysicalAddress-Shelf"), (0, "ASCEND-MIBATMATOM-MIB", "atmVclProfile-Id-Address-PhysicalAddress-Slot"), (0, "ASCEND-MIBATMATOM-MIB", "atmVclProfile-Id-Address-PhysicalAddress-ItemNumber"), (0, "ASCEND-MIBATMATOM-MIB", "atmVclProfile-Id-Address-LogicalItem"), (0, "ASCEND-MIBATMATOM-MIB", "atmVclProfile-Id-Vpi"), (0, "ASCEND-MIBATMATOM-MIB", "atmVclProfile-Id-Vci"))
if mibBuilder.loadTexts: mibatmVclProfileEntry.setStatus('mandatory')
atmVclProfile_Id_Address_PhysicalAddress_Shelf = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 1), Integer32()).setLabel("atmVclProfile-Id-Address-PhysicalAddress-Shelf").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_Id_Address_PhysicalAddress_Shelf.setStatus('mandatory')
atmVclProfile_Id_Address_PhysicalAddress_Slot = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 2), Integer32()).setLabel("atmVclProfile-Id-Address-PhysicalAddress-Slot").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_Id_Address_PhysicalAddress_Slot.setStatus('mandatory')
atmVclProfile_Id_Address_PhysicalAddress_ItemNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 3), Integer32()).setLabel("atmVclProfile-Id-Address-PhysicalAddress-ItemNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_Id_Address_PhysicalAddress_ItemNumber.setStatus('mandatory')
atmVclProfile_Id_Address_LogicalItem = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 4), Integer32()).setLabel("atmVclProfile-Id-Address-LogicalItem").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_Id_Address_LogicalItem.setStatus('mandatory')
atmVclProfile_Id_Vpi = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 5), Integer32()).setLabel("atmVclProfile-Id-Vpi").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_Id_Vpi.setStatus('mandatory')
atmVclProfile_Id_Vci = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 6), Integer32()).setLabel("atmVclProfile-Id-Vci").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_Id_Vci.setStatus('mandatory')
atmVclProfile_RxTrafficDesc = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 7), Integer32()).setLabel("atmVclProfile-RxTrafficDesc").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_RxTrafficDesc.setStatus('mandatory')
atmVclProfile_TxTrafficDesc = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 8), Integer32()).setLabel("atmVclProfile-TxTrafficDesc").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_TxTrafficDesc.setStatus('mandatory')
atmVclProfile_AalType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("notPresent", 1), ("aal1", 2), ("aal34", 3), ("aal5", 4), ("aalOther", 5), ("aalUnknown", 6), ("aal2", 7)))).setLabel("atmVclProfile-AalType").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_AalType.setStatus('mandatory')
atmVclProfile_TxSduSize = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 10), Integer32()).setLabel("atmVclProfile-TxSduSize").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_TxSduSize.setStatus('mandatory')
atmVclProfile_RxSduSize = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 11), Integer32()).setLabel("atmVclProfile-RxSduSize").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_RxSduSize.setStatus('mandatory')
atmVclProfile_Aal5Encaps = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("vcmuxRouted", 2), ("vcmuxBridged8023", 3), ("vcmuxBridged8025", 4), ("vcmuxBridged8026", 5), ("vcmuxLanemul8023", 6), ("vcmuxLanemul8025", 7), ("llcEncapsulation", 8), ("multiFrameRelaySscs", 9), ("otherEncapsulation", 10), ("unknownEncapsulation", 11)))).setLabel("atmVclProfile-Aal5Encaps").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_Aal5Encaps.setStatus('mandatory')
atmVclProfile_McastType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4))).clone(namedValues=NamedValues(("p2p", 2), ("p2mproot", 3), ("p2mpleaf", 4)))).setLabel("atmVclProfile-McastType").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_McastType.setStatus('mandatory')
atmVclProfile_CallKind = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("pvc", 2), ("svcIncoming", 3), ("svcOutgoing", 4), ("spvcInitiator", 5), ("spvcTarget", 6)))).setLabel("atmVclProfile-CallKind").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVclProfile_CallKind.setStatus('mandatory')
atmVclProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 39, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("atmVclProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmVclProfile_Action_o.setStatus('mandatory')
mibatmVplProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 38, 1), )
if mibBuilder.loadTexts: mibatmVplProfileTable.setStatus('mandatory')
mibatmVplProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1), ).setIndexNames((0, "ASCEND-MIBATMATOM-MIB", "atmVplProfile-Id-Address-PhysicalAddress-Shelf"), (0, "ASCEND-MIBATMATOM-MIB", "atmVplProfile-Id-Address-PhysicalAddress-Slot"), (0, "ASCEND-MIBATMATOM-MIB", "atmVplProfile-Id-Address-PhysicalAddress-ItemNumber"), (0, "ASCEND-MIBATMATOM-MIB", "atmVplProfile-Id-Address-LogicalItem"), (0, "ASCEND-MIBATMATOM-MIB", "atmVplProfile-Id-Vpi"))
if mibBuilder.loadTexts: mibatmVplProfileEntry.setStatus('mandatory')
atmVplProfile_Id_Address_PhysicalAddress_Shelf = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1, 1), Integer32()).setLabel("atmVplProfile-Id-Address-PhysicalAddress-Shelf").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVplProfile_Id_Address_PhysicalAddress_Shelf.setStatus('mandatory')
atmVplProfile_Id_Address_PhysicalAddress_Slot = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1, 2), Integer32()).setLabel("atmVplProfile-Id-Address-PhysicalAddress-Slot").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVplProfile_Id_Address_PhysicalAddress_Slot.setStatus('mandatory')
atmVplProfile_Id_Address_PhysicalAddress_ItemNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1, 3), Integer32()).setLabel("atmVplProfile-Id-Address-PhysicalAddress-ItemNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVplProfile_Id_Address_PhysicalAddress_ItemNumber.setStatus('mandatory')
atmVplProfile_Id_Address_LogicalItem = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1, 4), Integer32()).setLabel("atmVplProfile-Id-Address-LogicalItem").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVplProfile_Id_Address_LogicalItem.setStatus('mandatory')
atmVplProfile_Id_Vpi = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1, 5), Integer32()).setLabel("atmVplProfile-Id-Vpi").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVplProfile_Id_Vpi.setStatus('mandatory')
atmVplProfile_RxTrafficDesc = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1, 6), Integer32()).setLabel("atmVplProfile-RxTrafficDesc").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVplProfile_RxTrafficDesc.setStatus('mandatory')
atmVplProfile_TxTrafficDesc = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1, 7), Integer32()).setLabel("atmVplProfile-TxTrafficDesc").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVplProfile_TxTrafficDesc.setStatus('mandatory')
atmVplProfile_McastType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4))).clone(namedValues=NamedValues(("p2p", 2), ("p2mproot", 3), ("p2mpleaf", 4)))).setLabel("atmVplProfile-McastType").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVplProfile_McastType.setStatus('mandatory')
atmVplProfile_CallKind = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("pvc", 2), ("svcIncoming", 3), ("svcOutgoing", 4), ("spvcInitiator", 5), ("spvcTarget", 6)))).setLabel("atmVplProfile-CallKind").setMaxAccess("readonly")
if mibBuilder.loadTexts: atmVplProfile_CallKind.setStatus('mandatory')
atmVplProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 38, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("atmVplProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmVplProfile_Action_o.setStatus('mandatory')
mibBuilder.exportSymbols("ASCEND-MIBATMATOM-MIB", atmVplProfile_McastType=atmVplProfile_McastType, atmVclProfile_CallKind=atmVclProfile_CallKind, atmVclProfile_Aal5Encaps=atmVclProfile_Aal5Encaps, atmVplProfile_RxTrafficDesc=atmVplProfile_RxTrafficDesc, atmVclProfile_Id_Vci=atmVclProfile_Id_Vci, atmVplProfile_Id_Vpi=atmVplProfile_Id_Vpi, atmVplProfile_Id_Address_PhysicalAddress_Slot=atmVplProfile_Id_Address_PhysicalAddress_Slot, mibatmVplProfile=mibatmVplProfile, mibatmVclProfileTable=mibatmVclProfileTable, atmVclProfile_Id_Address_LogicalItem=atmVclProfile_Id_Address_LogicalItem, atmVclProfile_Id_Vpi=atmVclProfile_Id_Vpi, atmVclProfile_Id_Address_PhysicalAddress_Slot=atmVclProfile_Id_Address_PhysicalAddress_Slot, atmVplProfile_CallKind=atmVplProfile_CallKind, DisplayString=DisplayString, atmVclProfile_TxTrafficDesc=atmVclProfile_TxTrafficDesc, atmVclProfile_RxSduSize=atmVclProfile_RxSduSize, atmVclProfile_RxTrafficDesc=atmVclProfile_RxTrafficDesc, mibatmVplProfileTable=mibatmVplProfileTable, atmVplProfile_Id_Address_PhysicalAddress_Shelf=atmVplProfile_Id_Address_PhysicalAddress_Shelf, atmVclProfile_AalType=atmVclProfile_AalType, atmVplProfile_TxTrafficDesc=atmVplProfile_TxTrafficDesc, mibatmVclProfile=mibatmVclProfile, mibatmVclProfileEntry=mibatmVclProfileEntry, atmVclProfile_McastType=atmVclProfile_McastType, atmVplProfile_Id_Address_LogicalItem=atmVplProfile_Id_Address_LogicalItem, mibatmVplProfileEntry=mibatmVplProfileEntry, atmVplProfile_Action_o=atmVplProfile_Action_o, atmVplProfile_Id_Address_PhysicalAddress_ItemNumber=atmVplProfile_Id_Address_PhysicalAddress_ItemNumber, atmVclProfile_TxSduSize=atmVclProfile_TxSduSize, atmVclProfile_Action_o=atmVclProfile_Action_o, atmVclProfile_Id_Address_PhysicalAddress_ItemNumber=atmVclProfile_Id_Address_PhysicalAddress_ItemNumber, atmVclProfile_Id_Address_PhysicalAddress_Shelf=atmVclProfile_Id_Address_PhysicalAddress_Shelf)
|
#!/usr//bin/env python3
# T. Vuillaume, 12/09/2019
# merge and copy DL1 data after production
# Modifications by E. Garcia, 21/01/2020
# 1. check job_logs
# 2. check that all files have been created in DL1 based on training and testing lists
# 3. move DL1 files in final place
# 4. merge DL1 files
# 5. move running_dir
import os
import shutil
import argparse
from lstmcpipe.data_management import (
check_job_logs,
read_lines_file,
check_files_in_dir_from_file,
query_continue,
check_and_make_dir,
move_dir_content
)
parser = argparse.ArgumentParser(description="Merge and copy DL1 data after production. \n"
" 1. check job_logs \n"
" 2. check that all files have been created in DL1 based on training "
"and testing lists \n"
" 3. move DL1 files in final place \n"
" 4. merge DL1 files \n"
" 5. move running_dir ")
parser.add_argument('input_dir', type=str,
help='path to the DL1 files directory to merge, copy and move',
)
def main(input_dir, flag_full_workflow=False, particle2jobs_dict={}, particle=None, flag_merge=False,
flag_no_image=True, prod_id=None, gamma_offset=None, source_environment=None):
"""
Merge and copy DL1 data after production.
1. check job_logs
2. check that all files have been created in DL1 based on training and testing lists
3. move DL1 files in final place
4. merge DL1 files
5. move running_dir
Parameters
----------
input_dir : str
path to the DL1 files directory to merge, copy and move. Compulsory argument.
flag_full_workflow : bool
Boolean flag to indicate if this script is run as part of the workflow that converts r0 to dl2 files.
particle2jobs_dict : dict
Dictionary used to retrieve the r0 to dl1 jobids that were sent in the previous step of the r0-dl3 workflow.
This script will NOT start until all the jobs sent before have finished.
COMPULSORY argument when flag_full_workflow is set to True.
particle : str
Type of particle used to create the log and dictionary
COMPULSORY argument when flag_full_workflow is set to True.
flag_merge : bool
Flag to indicate whether the `--smart` argument of the `lstchain_merge_hdf5_files.py` script must be set to
True (smart merge) or False (auto merge).
Default set to True.
flag_no_image : bool
Flaf to indicate whether the `--no-image` argument of the `lstchain_merge_hdf5_files.py` script must be set to
True (--no-image True) or False (--no-image False).
Default set to True.
prod_id : str
prod_id for output filename.
gamma_offset : str
if gamma files have various off0.Xdeg observations, include the offset within the filename for completeness.
source_environment : str
path to a .bashrc file to source (can be configurable for custom runs @ mc_r0_to_dl3 script)
to activate a certain conda environment.
DEFAULT: `source /fefs/aswg/software/virtual_env/.bashrc; conda activate cta`.
! NOTE : train_pipe AND dl1_to_dl2 **MUST** be run with the same environment.
Returns
-------
log_merge : dict (if flag_full_workflow is True)
dictionary of dictionaries containing the log information of this script and the jobid of the batched job,
separated by particle
- log_merge[particle][set_type].keys() = ['logs_script_test or logs_script_train',
'train_path_and_outname_dl1 or test_path_and_outname_dl1', 'jobid']
**** otherwise : (if flag_full_workflow is False, by default) ****
None is returned -- THIS IS APPLIED FOR THE ARGUMENTS SHOWN BELOW TOO
return_jobids4train : str (if flag_full_workflow is True)
jobid of the batched job to be send (for dependencies purposes) to the next stage of the workflow
(train_pipe), by particle
return_jobids_debug ; str
jobids to store in log_reduced.txt - Mainly debug purposes.
"""
if flag_full_workflow:
log_merge = {particle: {'training': {}, 'testing': {}}}
wait_r0_dl1_jobs = particle2jobs_dict[particle]
return_jobids4train = []
return_jobids_debug = []
job_name = {'electron': 'e_merge',
'gamma': 'g_merge',
'gamma-diffuse': 'gd_merge',
'proton': 'p_merge',
'gamma_off0.0deg': 'g0.0_merge',
'gamma_off0.4deg': 'g0.4_merge'
}
else:
print(f"\n ==== START {os.path.basename(__file__)} ==== \n")
JOB_LOGS = os.path.join(input_dir, 'job_logs')
training_filelist = os.path.join(input_dir, 'training.list')
testing_filelist = os.path.join(input_dir, 'testing.list')
running_DL1_dir = os.path.join(input_dir, 'DL1')
DL1_training_dir = os.path.join(running_DL1_dir, 'training')
DL1_testing_dir = os.path.join(running_DL1_dir, 'testing')
final_DL1_dir = input_dir.replace('running_analysis', 'DL1')
logs_destination_dir = input_dir.replace('running_analysis', 'analysis_logs')
# 1. check job logs
check_job_logs(JOB_LOGS)
# 2. check that all files have been created in DL1 based on training and testing lists
# just check number of files first:
if not len(os.listdir(DL1_training_dir)) == len(read_lines_file(training_filelist)):
tf = check_files_in_dir_from_file(DL1_training_dir, training_filelist)
if tf != [] and not flag_full_workflow:
query_continue("{} files from the training list are not in the `DL1/training` directory:\n{} "
"Continue ?".format(len(tf), tf))
if not len(os.listdir(DL1_testing_dir)) == len(read_lines_file(testing_filelist)):
tf = check_files_in_dir_from_file(DL1_testing_dir, testing_filelist)
if tf != [] and not flag_full_workflow:
query_continue("{} files from the testing list are not in the `DL1/testing directory:\n{} "
"Continue ?".format(len(tf), tf))
if not flag_full_workflow:
print("\tmerging starts")
# 3. merge DL1 files
for set_type in ['testing', 'training']:
tdir = os.path.join(running_DL1_dir, set_type)
# dl1 files should (must otherwise you are not trying to merge) already been created
output_filename = os.listdir(tdir)[0]
output_filename = 'dl1_' + os.path.basename(output_filename.split('_run')[0])
if particle == 'gamma-diffuse':
output_filename = output_filename.replace('gamma', 'gamma-diffuse')
if gamma_offset is not None:
output_filename += f'_{gamma_offset}'
if prod_id is not None:
output_filename += f'_{prod_id}'
output_filename += f'_{set_type}.h5'
output_filename = os.path.join(running_DL1_dir, output_filename)
print(f"\t\tmerge output: {output_filename}")
# 3.1 sbatch the jobs (or send them interactively depending) if the script is(not) run as part of the
# whole workflow
# filelist = [os.path.join(tdir, f) for f in os.listdir(tdir)]
cmd = f"lstchain_merge_hdf5_files -d {tdir} -o {output_filename} --no-image {flag_no_image} " \
f"--smart {flag_merge}"
os.system(cmd)
# 4. move DL1 files in final place
check_and_make_dir(final_DL1_dir)
move_dir_content(running_DL1_dir, final_DL1_dir)
print(f"\tDL1 files have been moved to {final_DL1_dir}")
# copy lstchain config file there too. HiPeRTA configs are *.txt
config_files = [os.path.join(input_dir, f) for f in os.listdir(input_dir) if f.endswith(('.json', '.txt'))]
for file in config_files:
shutil.copyfile(file, os.path.join(final_DL1_dir, os.path.basename(file)))
# 5. move running_dir as logs
check_and_make_dir(logs_destination_dir)
move_dir_content(input_dir, logs_destination_dir)
print(f"\tLOGS have been moved to {logs_destination_dir}")
print(f"\n ==== END {os.path.basename(__file__)} ==== \n")
else: # flag_full_workflow == True !
print(f"\n\tmerging starts - {particle}")
# 3. merge DL1 files
wait_both_merges = []
for set_type in ['testing', 'training']:
tdir = os.path.join(running_DL1_dir, set_type)
# just need to take the base name of the file, so we read a processed bunch and take first file
with open(training_filelist, 'r') as f:
output_filename = f.readline()
output_filename = 'dl1_' + os.path.basename(output_filename.split('_run')[0])
if particle == 'gamma-diffuse':
output_filename = output_filename.replace('gamma', 'gamma-diffuse')
if '_off' in particle:
output_filename += f'_{gamma_offset}'
output_filename += f'_{prod_id}_{set_type}'
output_filename += '.h5'
output_filename = os.path.join(running_DL1_dir, output_filename)
print(f"\t\tmerge output: {output_filename}")
# After the workflow the files will be moved, will not stay at output_filename
if set_type == 'training':
log_merge[particle][set_type]['train_path_and_outname_dl1'] = os.path.join(
final_DL1_dir, os.path.basename(output_filename))
else:
log_merge[particle][set_type]['test_path_and_outname_dl1'] = os.path.join(
final_DL1_dir, os.path.basename(output_filename))
cmd = 'sbatch --parsable -p short'
if wait_r0_dl1_jobs != '':
cmd += ' --dependency=afterok:' + wait_r0_dl1_jobs
cmd += f' -J {job_name[particle]} -e slurm-{job_name[particle]}-{set_type}.o ' \
f'-o slurm-{job_name[particle]}-{set_type}.e --wrap="{source_environment} ' \
f'lstchain_merge_hdf5_files -d {tdir} -o {output_filename} --no-image {flag_no_image} ' \
f'--smart {flag_merge}"'
jobid_merge = os.popen(cmd).read().strip('\n')
log_merge[particle][set_type][jobid_merge] = cmd
print(f'\t\tSubmitted batch job {jobid_merge} -- {particle}, {set_type}')
wait_both_merges.append(jobid_merge)
return_jobids_debug.append(jobid_merge)
# Out of testing/training loop !
# 4., 5. & 6. in the case of the full workflow are done in a separate sbatch to wait merge, the three steps:
# 4 --> move DL1 files in final place
# 5 --> copy lstchain config file in final_dir too
# 6 --> move running_dir as logs
print(f"\tDL1 files will be moved to {final_DL1_dir}")
base_cmd = 'sbatch --parsable -p short -J {} -e {} -o {} --dependency=afterok:{} ' \
'--wrap="python batch_dl1_utils-merge_and_copy.py -s {} -d {} --copy_conf {}"'
wait_both_merges = ','.join(wait_both_merges)
# 4 --> move DL1 files in final place
batch_mv_dl1 = base_cmd.format(job_name[particle].split('_')[0]+'_mv_dl1',
f'slurm-{job_name[particle].split("_")[0]}_mv_DL1_files.e',
f'slurm-{job_name[particle].split("_")[0]}_mv_DL1_files.o',
wait_both_merges,
running_DL1_dir,
final_DL1_dir,
'False'
)
jobid_move_dl1 = os.popen(batch_mv_dl1).read().strip('\n')
log_merge[particle][set_type][jobid_move_dl1] = batch_mv_dl1
print(f'\t\tSubmitted batch job {jobid_move_dl1}. It will move dl1 files when {wait_both_merges} finish.')
# 5 --> copy lstchain config file in final_dir too
batch_copy_conf = base_cmd.format(job_name[particle].split('_')[0] + '_cp_conf',
f'slurm-{job_name[particle].split("_")[0]}_cp_config.e',
f'slurm-{job_name[particle].split("_")[0]}_cp_config.o',
jobid_move_dl1,
input_dir,
final_DL1_dir,
'True'
)
jobid_copy_conf = os.popen(batch_copy_conf).read().strip('\n')
log_merge[particle][set_type][jobid_copy_conf] = batch_copy_conf
print(f'\t\tSubmitted batch job {jobid_copy_conf}. It will copy the used config when {jobid_move_dl1} finish.')
# 6 --> move running_dir to final analysis_logs
batch_mv_dir = base_cmd.format(job_name[particle].split('_')[0]+'_mv_dir',
f'slurm-{job_name[particle].split("_")[0]}_mv_DL1_direct.e',
f'slurm-{job_name[particle].split("_")[0]}_mv_DL1_direct.o',
jobid_copy_conf,
input_dir,
logs_destination_dir,
'False'
)
jobid_move_log = os.popen(batch_mv_dir).read().strip('\n')
log_merge[particle][set_type][jobid_move_log] = batch_mv_dir
print(f'\t\tSubmitted batch job {jobid_move_log}. It will move running_dir when {jobid_copy_conf} finish.')
return_jobids4train.append(jobid_move_dl1)
return_jobids_debug.append(jobid_move_dl1)
return_jobids_debug.append(jobid_move_log)
return_jobids_debug.append(jobid_copy_conf)
print(f"\tLOGS will be moved to {logs_destination_dir}")
# Little clarification (it will not be clear in log). These keys are stored here for 2 purposes:
# 1 - train_pipe recover final dl1 names and path.
# 2 - dl1_to_dl2 recover the jobids of the merged dl1 files; (all dl1 files MUST be merged and moved
# to dl1_dir), so instead of storing the jobid that merges all the *particle*_dl1 (jobid_merge), it will
# be store the jobid that move the dl1 final file to dl1_dir. Until this step is not finished, the workflow
# cannot continue.
return_jobids4train = ','.join(return_jobids4train)
return_jobids_debug = ','.join(return_jobids_debug)
return log_merge, return_jobids4train, return_jobids_debug
if __name__ == '__main__':
args = parser.parse_args()
main(args.input_dir)
|
#!/usr/bin/env python
import os
from setuptools import setup
from hyperledger import version
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
'requests >= 2.5.0',
'six >= 1.4.0',
# 'websocket-client >= 0.32.0',
]
exec(open('hyperledger/version.py').read())
with open('README.md') as f:
long_description = f.read()
with open('./test-requirements.txt') as test_reqs_txt:
test_requirements = [line for line in test_reqs_txt]
setup(
name='hyperledger',
version=version,
keywords=('hyperledger', 'blockchain'),
license='Apache License v2.0',
description="Python client for Hyperledger.",
long_description=long_description,
author='Baohua Yang',
author_email='yangbaohua@gmail.com',
url='https://github.com/yeasy/hyperledger-py/',
packages=[
'hyperledger', 'hyperledger.api', 'hyperledger.auth',
'hyperledger.ssladapter', 'hyperledger.utils',
],
platforms='any',
install_requires=requirements,
tests_require=test_requirements,
zip_safe=False,
test_suite='tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
)
|
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack.commands
import json
from stack.exception import CommandError
class __Plugin(stack.commands.Plugin, stack.commands.Command):
def provides(self):
return 'group'
def requires(self):
return [ 'software', 'environment' ]
def run(self, args):
# check if the user would like to import group data
# if there are no args, assume the user would like to import everthing
if args and 'group' not in args:
return
# self.owner.data contains the data from the json file defined in init
if 'group' in self.owner.data:
import_data = self.owner.data['group']
else:
self.owner.log.info('no group data in json file')
return
self.notify('\n\tLoading group')
for group in import_data:
self.owner.try_command('add.group', [ group['name'] ], f'adding group {group["name"]}', 'exists')
|
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2022 NVDA Contributors <http://www.nvda-project.org/>
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
"""global variables module
@var foregroundObject: holds the current foreground object. The object for the last foreground event received.
@var focusObject: holds the current focus object
@var mouseObject: holds the object that is at the position of the mouse pointer
@var mouseOldX: the last x coordinate of the mouse pointer before its current position
@type oldMouseX: int
@var mouseOldY: the last y coordinate of the mouse pointer before its current position
@type oldMouseY: int
@var navigatorObject: holds the current navigator object
"""
import argparse
import os
import typing
if typing.TYPE_CHECKING:
import NVDAObjects # noqa: F401 used for type checking only
class DefautAppArgs(argparse.Namespace):
quit: bool = False
check_running: bool = False
logFileName: typing.Optional[os.PathLike] = ""
logLevel: int = 0
configPath: typing.Optional[os.PathLike] = None
language: str = "en"
minimal: bool = False
secure: bool = False
disableAddons: bool = False
debugLogging: bool = False
noLogging: bool = False
changeScreenReaderFlag: bool = True
install: bool = False
installSilent: bool = False
createPortable: bool = False
createPortableSilent: bool = False
portablePath: typing.Optional[os.PathLike] = None
launcher: bool = False
enableStartOnLogon: typing.Optional[bool] = None
copyPortableConfig: bool = False
easeOfAccess: bool = False
startTime=0
desktopObject: typing.Optional['NVDAObjects.NVDAObject'] = None
foregroundObject: typing.Optional['NVDAObjects.NVDAObject'] = None
focusObject: typing.Optional['NVDAObjects.NVDAObject'] = None
focusAncestors: typing.List['NVDAObjects.NVDAObject'] = []
focusDifferenceLevel=None
mouseObject: typing.Optional['NVDAObjects.NVDAObject'] = None
mouseOldX=None
mouseOldY=None
navigatorObject: typing.Optional['NVDAObjects.NVDAObject'] = None
reviewPosition=None
reviewPositionObj=None
lastProgressValue=0
appArgs = DefautAppArgs()
unknownAppArgs: typing.List[str] = []
settingsRing = None
speechDictionaryProcessing=True
exitCode=0
|
print("----------------------------------------------")
print(" Even/Odd Number Identifier")
print("----------------------------------------------")
play_again = 'Y'
while play_again == 'Y':
user_num = input("Enter any whole number: ")
user_int = int(user_num)
if user_int % 2 == 0:
print("The number entered is EVEN!")
print()
print()
else:
print("The number entered is ODD!")
print()
print()
play_again = input("Do you want to try again? (Y/N)")
if play_again == 'N':
print("Thank you for playing!")
print()
print()
|
from fifo_animal_shelter import __version__
from fifo_animal_shelter.fifo_animal_shelter import AnimalShelter,Cat,Dog
def test_version():
assert __version__ == '0.1.0'
def test_adding_to_shelter():
'''
testing enqueue is working fine
'''
shelter=AnimalShelter()
cat1=Cat('jojo')
assert shelter.enqueue(cat1)!="You can't add other animal than dogs or cats"
def test_adding_to_shelter_multi_dogs_and_cats():
'''
testing enqueue is working fine with adding multi dogs and cats opjects
'''
shelter=AnimalShelter()
cat1=Cat('smoor')
cat2=Cat('tota')
dog1=Dog('toto')
dog2=Dog('smer')
assert shelter.enqueue(cat1)!="You can't add other animal than dogs or cats"
assert shelter.enqueue(cat2)!="You can't add other animal than dogs or cats"
assert shelter.enqueue(dog1)!="You can't add other animal than dogs or cats"
assert shelter.enqueue(dog2)!="You can't add other animal than dogs or cats"
def test_dequeue_from_shelter_dog_or_cat():
'''
testing dequeue is handel remove and return dogs or cats objects
'''
shelter=AnimalShelter()
cat1=Cat('smoor')
dog1=Dog('toto')
cat2=Cat('tota')
dog2=Dog('smer')
shelter.enqueue(cat1)
shelter.enqueue(dog1)
shelter.enqueue(cat2)
shelter.enqueue(dog2)
assert isinstance(shelter.dequeue('dog'), Dog)
assert shelter.dequeue('cat') != None
assert isinstance(shelter.dequeue('cat'), Cat)
assert isinstance(shelter.dequeue('dog'), Dog)
def test_dequeue_from_shelter_another_animal():
'''
testing dequeue is handel returning another animal
'''
shelter=AnimalShelter()
cat1=Cat('smoor')
dog1=Dog('toto')
cat2=Cat('tota')
dog2=Dog('smer')
shelter.dequeue('hamster')
assert shelter.dequeue('hamster') == None
|
class When(object):
state = None
used = False
callableObject = None
def __init__(self, obj):
self.obj = obj
def of(self, item):
self.state = self.obj == item
if not self.state:
try:
self.state = isinstance(self.obj, item)
except Exception:
pass
return self
def then(self, operationToDo, *args, **kwargs):
if self.state:
self.used = True
self.callableObject = (
operationToDo(self.obj, *args, **kwargs)
if callable(operationToDo)
else operationToDo
)
return self
def otherwise(self, operationToDo, *args, **kwargs):
if not self.state and not self.used:
return (
operationToDo(self.obj, *args, **kwargs)
if callable(operationToDo)
else operationToDo
)
else:
return self.callableObject
if __name__ == '__main__':
def firstTest():
testArgument = []
def itsStr(n):
return "its str"
def itsInt(n):
return "its int"
def pop(n):
return "just dance"
def tellMeAboutIt(*arg):
return "wrong!"
value = (
When(testArgument)
.of(str).then(itsStr)
.of(10).then(itsInt)
.of([]).then(pop)
.otherwise(tellMeAboutIt)
)
assert value == "just dance"
def secondTest():
myTestArugment = 100
value = (
When(myTestArugment)
.of(25 * 4).then("you got it right.")
.otherwise("I think you missed the point.")
)
assert value == "you got it right."
value2 = (
When(10)
.of(15).then("yep!")
.otherwise(None)
)
assert value2 == None
def thirdTest():
def mutliplyBy(_, value):
return _ * value
def echoToTheScreen(_):
return "To the Screen!" + _
def castToInt(_):
return int(_)
value = (
When(13.2)
.of(int).then(mutliplyBy, 10)
.of(str).then(echoToTheScreen)
.of(float).then(castToInt)
.otherwise(None)
)
assert value == 13
firstTest()
secondTest()
thirdTest()
|
"""
Parsing mnemonics of a LAS file
===============================
Often, a mnemonic aliasing process looks like the following:
#. List all the mnemonics in the file
#. Group synonymous mnemonics under a single label
#. Make dictionaries with mnemonics and labels
#. Feed dictionaries into welly
The class alaska.Aliaser takes input mnemonics from a LAS file
or a directory of LAS files, and aliases the mnemonics so that
synonymous mnemonics are grouped under the same label.
See example below.
"""
import os
from welly import Project
from alaska import Alias, get_data_path
path = str(get_data_path("testcase1.las"))
# initialize aliaser
a = Alias()
# the parameters can also be customized as such:
# a = Alias(dictionary=True, keyword_extractor=True,
# model=True, prob_cutoff=.5)
# the parse function returns two dictionaries, one for parsed
# and another one for mnemonics not found using the aliaser
parsed, not_found = a.parse(path)
# print aliased mnemonics to screen
print("*" * 10, "Aliased dictionary", "*" * 10)
for i in parsed:
print("{}: {}".format(i, parsed[i]))
print("Not parsed with Aliaser:", not_found)
# feed parsed dictionary into welly, and leave the not aliased
# ones alone
p = Project.from_las(path)
data = p.df(keys=list(parsed.keys()), alias=parsed)
print(data)
# print the heatmap of the aliased mnemonics to visualize results
a.heatmap()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Blue Box Group, Inc.
# Copyright 2014, Craig Tracey <craigtracey@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import traceback
from hashlib import md5
from jinja2 import Environment
UPSTART_TEMPLATE = """
start on {{ start_on }}
stop on {{ stop_on }}
{% if description -%}
description {{ description }}
{% endif -%}
{% if envs -%}
{% for env in envs %}
env {{ env }}
{% endfor %}
{% endif -%}
{% if prestart_script -%}
pre-start script
{{ prestart_script }}
end script
{% endif -%}
{% if respawn -%}
respawn
{% endif -%}
{% if expect -%}
expect {{ expect }}
{% endif -%}
exec start-stop-daemon --start --chuid {{ user }} {{ pidfile }} --exec {{ cmd }} {{ args }}
"""
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(default=None, required=True),
cmd=dict(default=None, required=True),
args=dict(default=None),
user=dict(default=None, required=True),
config_dirs=dict(default=None),
config_files=dict(default=None),
description=dict(default=None),
expect=dict(default=None),
envs=dict(default=None, required=False, type='list'),
state=dict(default='present'),
start_on=dict(default='runlevel [2345]'),
stop_on=dict(default='runlevel [!2345]'),
prestart_script=dict(default=None),
respawn=dict(default=True),
path=dict(default=None),
pidfile=dict(default=None)
)
)
try:
changed = False
service_path = None
if not module.params['path']:
service_path = '/etc/init/%s.conf' % module.params['name']
else:
service_path = module.params['path']
symlink = os.path.join('/etc/init.d/', module.params['name'])
if module.params['state'] == 'absent':
if os.path.exists(service_path):
os.remove(service_path)
changed = True
if os.path.exists(symlink):
os.remove(symlink)
changed = True
if not changed:
module.exit_json(changed=False, result="ok")
else:
module.exit_json(changed=True, result="changed")
pidfile = ''
if module.params['pidfile'] and len(module.params['pidfile']):
pidfile = '--make-pidfile --pidfile %s' % module.params['pidfile']
args = ''
if module.params['args'] or module.params['config_dirs'] or \
module.params['config_files']:
args = '-- '
if module.params['args']:
args += module.params['args']
if module.params['config_dirs']:
for directory in module.params['config_dirs'].split(','):
args += '--config-dir %s ' % directory
if module.params['config_files']:
for filename in module.params['config_files'].split(','):
args += '--config-file %s ' % filename
template_vars = module.params
template_vars['pidfile'] = pidfile
template_vars['args'] = args
env = Environment().from_string(UPSTART_TEMPLATE)
rendered_service = env.render(template_vars)
if os.path.exists(service_path):
file_hash = md5(open(service_path, 'rb').read()).hexdigest()
template_hash = md5(rendered_service).hexdigest()
if file_hash == template_hash:
module.exit_json(changed=False, result="ok")
with open(service_path, "w") as fh:
fh.write(rendered_service)
if not os.path.exists(symlink):
os.symlink('/lib/init/upstart-job', symlink)
module.exit_json(changed=True, result="created")
except Exception as e:
formatted_lines = traceback.format_exc()
module.fail_json(msg="creating the service failed: %s" % (str(e)))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
main()
|
# version 1.0
# 07/21/2018
# swtich to new database structure
# stream name: attribute name
# key: attribute value
# value: enitre record
# version 0.3
# 06/26/2018
# use fixed query to benchmark
# added graphic chart
# version 0.2.1
# 06/25/2018
# added validation function
# version 0.2
# 06/21/2018
# implematation hash pointer
# version 0.11
# 06/21/2018
# clean up the code
# 06/20/2018
# baseline implematation
# Insertion: insert n (n = number of attribute) times copy to blockchain, using attribute as key and entire line as value
# Range query: query from start time, and increase timestamp by 1 every time till end time. The total number of query needed is (start - end)
# And operation: query using single attribute and do AND operation locally
###########################################################################
###########################################################################
import re
import json
# from time import sleep
from random import sample
from util import measure, getAPI
import util
from config import *
from pathlib import Path
import logging
from itertools import combinations
#########################################################################
###################### implematation file ###########################
#########################################################################
# from baseline0_2 import *
# import baseline0_2 as baseline
import importlib
baseline = None
def loadBaseline(file):
global baseline
baseline = importlib.import_module(file)
#########################################################################
###################### benchmark ###########################
#########################################################################
TESTCASE_CONFIG = {
"pointQuery": 8,
"rangeQuery": 1,
"andQuery": 1
}
# 483730
MAX_HEIGHT = 8
RANGE_SCALE = [10**i for i in range(4, MAX_HEIGHT)]
# RANGE_SCALE = [100000]
AND_FIELDS = ATTRIBUTE_NAME
MAX_ROUND = 1
output_json = {'num_nodes': NUM_NODE, 'file_size': FILE_SIZE, 'insertion': 0,
'point_query': {}, 'range_query': {}, 'and_query': {}, 'storage': 0}
dir_path = Path.cwd()
log = logging.getLogger("benchmark")
log.setLevel('INFO')
# log.setLevel('DEBUG')
nodes = None
database = util.database
testcases = {key: [] for key in TESTCASE_CONFIG}
index = 0
def init():
if baseline is None:
log.error("Please call loadBaseline() to load baseline first")
exit
# size = sum(1 for line in open(datadir+'test0.txt'))
database.buildFromFiles([Path(datadir).joinpath(
'test'+str(i)+'.txt') for i in range(NUM_NODE)])
log.info("database size: %d", len(database))
global nodes
nodes = getAPI(auth, NUM_NODE)
baseline.createStreams(nodes[0])
log.info("File Size: %d" % FILE_SIZE)
def loadTestCases(testfile='testcases.json'):
global testcases
temp = [database[i]
for i in sample(range(len(database)), sum(TESTCASE_CONFIG.values()))]
# print(temp)
count = 0
if Path(Path(dir_path).joinpath(testfile)).is_file() is False:
for key in TESTCASE_CONFIG.keys():
for i in range(TESTCASE_CONFIG[key]):
testcases[key].append(temp[count])
count += 1
with open(testfile, 'w') as write_file:
json.dump(testcases, write_file)
else:
with open(testfile, 'r') as read_file:
testcases = json.load(read_file)
activities = {}
resources = {}
def insertionTest():
log.info("Insertion Test:")
total = 0
for i in range(NUM_NODE):
data = [line.rstrip()
for line in open(Path(datadir).joinpath('test'+str(i)+'.txt'))]
# with open(Path.joinpath(datadir, 'test'+str(i)+'.txt')) as f:
# data = []
# for line in f:
# fields = line.split(DELIMITER)
# activity = fields[ATTRIBUTE_INDEX["Activity"]]
# resource = fields[ATTRIBUTE_INDEX["Resource"]]
# fields[ATTRIBUTE_INDEX["Activity"]] = activities.setdefault(
# activity, str(len(activities)))
# fields[ATTRIBUTE_INDEX["Resource"]] = resources.setdefault(
# resource, str(len(resources)))
# temp = 'f'.join(fields)
# # padding to hex string format
# if len(temp) % 2 != 0:
# temp += 'f'
# data.append(temp)
elapsed = measure(baseline.insert, nodes[i], data)
total += elapsed
log.info('Node %d Insertion time: %f' % (i, elapsed))
log.info("total insertion time: %f " % total)
log.info("average insertion time: %f" % (total/NUM_NODE))
output_json['insertion'] = total/NUM_NODE
def getAverageNodeRound(func, *args, rounds=MAX_ROUND, nnode=NUM_NODE):
elapsed = 0
# log.debug(args)
for i in range(rounds):
for j in range(nnode):
# print(*args)
elapsed += measure(func, nodes[j], *args)
return elapsed / (rounds * nnode)
def pointQueryTest():
log.info("Point Field Query Test:")
i = 0
total = 0
for i in range(len(ATTRIBUTE)):
elapsed = 0
fields = testcases['pointQuery'][i].split(" ")
qtime = getAverageNodeRound(baseline.pointQuery,
ATTRIBUTE_NAME[i], fields[i], rounds=10)
total += qtime
log.info('Q%d[%s]: %f' % (i+1, ATTRIBUTE_NAME[i], qtime))
output_json['point_query'][ATTRIBUTE_NAME[i]] = qtime
qtime = elapsed / (MAX_ROUND * NUM_NODE)
total += qtime
log.info('Average Query Time: %f' %
(total / TESTCASE_CONFIG['pointQuery']))
def rangeQueryTest():
log.info("Range Query Test:")
# get timestamp
start = testcases['rangeQuery'][0].split(" ")[0]
log.debug(testcases['rangeQuery'])
log.debug(start)
total = 0
for scale in RANGE_SCALE:
qtime = getAverageNodeRound(
baseline.rangeQuery, int(start), int(start) + scale, rounds=MAX_ROUND, nnode=1)
total += qtime
log.info('Range %.0E: %f' % (scale, qtime))
output_json['range_query'][scale] = qtime
def andQueryTest():
log.info("And Query Test:")
fields = testcases['andQuery'][0].split(" ")
for r in range(2, len(AND_FIELDS)+1):
total_qtime = 0
count = 0
for attr_index_list in combinations(range(len(AND_FIELDS)), r):
attributes = []
values = []
for attr in attr_index_list:
attributes.append(ATTRIBUTE_NAME[attr])
values.append(fields[attr])
qtime = getAverageNodeRound(
baseline.andQuery, list(zip(attributes, values)), rounds=1)
log.debug("%s(%d): %f" % ([AND_FIELDS[i]
for i in attr_index_list], r, qtime))
total_qtime += qtime
count += 1
log.info("%d And Query: %f" % (r, total_qtime/count))
output_json['and_query'][r] = total_qtime/count
def andRangeQueryTest():
log.info("And + Range Query Test:")
# print(AND_FIELDS)
# input()
start = testcases['rangeQuery'][0].split(" ")[0]
total = 0
fields = testcases['andQuery'][0].split(" ")
for scale in RANGE_SCALE:
for r in range(2, len(AND_FIELDS)+1):
total_qtime = 0
count = 0
for attr_index_list in combinations(range(len(AND_FIELDS)), r):
attributes = []
values = []
for attr in attr_index_list:
if attr == 0:
continue
attributes.append(ATTRIBUTE_NAME[attr])
values.append(fields[attr])
# print(attr_index_list)
qtime = getAverageNodeRound(
baseline.andRangeQuery, int(start), int(start) + scale, list(zip(attributes, values)), rounds=1, nnode=1)
log.info("Range %.0E, %s(%d): %f" % (scale, [AND_FIELDS[i]
for i in attr_index_list], r, qtime))
total_qtime += qtime
count += 1
# qtime = getAverageNodeRound(
# baseline.rangeQuery, int(start), int(start) + scale, rounds=MAX_ROUND, nnode=1)
def storageTest():
log.info("Storage Usage:")
api = nodes[0]
num_blocks = api.getinfo()["result"]["blocks"]
size = 0
for block in api.listblocks(str(-num_blocks), True)["result"]:
if block["txcount"] > 1:
size += block["size"]
log.info(size)
output_json['storage'] = size
def save2Json(file='benchmark.json'):
with open(file, 'w') as f:
json.dump(output_json, f)
|
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Provides info about registered luci services."""
import logging
from google.appengine.ext import ndb
from components import config
from components import net
from components import utils
from components.config.proto import service_config_pb2
import common
import storage
import validation
class DynamicMetadataError(Exception):
"""Raised when a service metadata endpoint response is bad."""
@ndb.tasklet
def get_services_async():
"""Returns a list of registered luci services.
The list is stored in services/luci-config:services.cfg. Never returns None.
Cached.
Returns:
A list of service_config_pb2.Service.
"""
cfg = yield storage.get_self_config_async(
common.SERVICES_REGISTRY_FILENAME, service_config_pb2.ServicesCfg)
raise ndb.Return(cfg.services or [])
def _dict_to_dynamic_metadata(data):
validation.validate_service_dynamic_metadata_blob(
data,
config.validation.Context.raise_on_error(exc_type=DynamicMetadataError))
metadata = service_config_pb2.ServiceDynamicMetadata()
validation_meta = data.get('validation')
if validation_meta:
metadata.validation.url = validation_meta['url']
for p in validation_meta.get('patterns', []):
pattern = metadata.validation.patterns.add()
pattern.config_set = p['config_set']
pattern.path = p['path']
return metadata
@ndb.tasklet
def get_metadata_async(service_id):
"""Returns service_config_pb2.ServiceDynamicMetadata for a service.
Raises:
DynamicMetadataError if metadata is not available or no such service.
"""
entity = yield storage.ServiceDynamicMetadata.get_by_id_async(service_id)
if not entity:
raise DynamicMetadataError('No dynamic metadata for "%s"' % service_id)
msg = service_config_pb2.ServiceDynamicMetadata()
if entity.metadata:
msg.ParseFromString(entity.metadata)
raise ndb.Return(msg)
def call_service_async(service, url, method='GET', payload=None):
"""Sends JSON RPC request to a service, with authentication.
Args:
service: service_config_pb2.Service message.
url: full URL to send the request to.
method: HTTP method to use.
payload: JSON-serializable body to send in PUT/POST requests.
Returns:
Deserialized JSON response.
Raises:
net.Error on errors.
"""
return net.json_request_async(
url,
method=method,
payload=payload,
deadline=50,
scopes=None if service.HasField('jwt_auth') else net.EMAIL_SCOPE,
use_jwt_auth=service.HasField('jwt_auth'),
audience=service.jwt_auth.audience or None)
@ndb.tasklet
def _update_service_metadata_async(service):
entity = storage.ServiceDynamicMetadata(id=service.id)
if service.metadata_url:
try:
res = yield call_service_async(service, service.metadata_url)
except net.Error as ex:
raise DynamicMetadataError('Net error: %s' % ex.message)
entity.metadata = _dict_to_dynamic_metadata(res).SerializeToString()
prev_entity = yield storage.ServiceDynamicMetadata.get_by_id_async(service.id)
if not prev_entity or prev_entity.metadata != entity.metadata:
yield entity.put_async()
logging.info('Updated service metadata for %s', service.id)
def cron_request_metadata():
services = get_services_async().get_result()
futs = [_update_service_metadata_async(s) for s in services]
ndb.Future.wait_all(futs)
for s, fut in zip(services, futs):
try:
fut.check_success()
except DynamicMetadataError:
logging.exception('Could not load dynamic metadata for %s', s.id)
|
import argparse
import logging
from pathlib import Path
import pandas as pd
logger = logging.getLogger(__name__)
def ensemble(models_dir: Path):
df_parts = []
for model_dir in [d for d in models_dir.iterdir() if d.is_dir()]:
predictions_file = model_dir / 'test_predictions.csv'
logger.info('Loading %s', predictions_file)
df = pd.read_csv(predictions_file)
df_parts.append(df.set_index('ID'))
df = df_parts[0]
for df_part in df_parts[1:]:
df = df + df_part
df = df.div(df.sum(axis=1), axis=0)
df.reset_index(inplace=True)
logger.info('Combined %d model predictions', len(df_parts))
df.to_csv(models_dir / 'test_ensemble.csv', index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--models', metavar='MODELS_DIR',
help='Directory with any number of model outputs generated by train.py.')
args = parser.parse_args()
models_dir = Path(args.models).resolve()
# Logging initialization
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S',
handlers=[
logging.FileHandler(models_dir / 'ensemble.log', mode='w'),
logging.StreamHandler(),
])
ensemble(models_dir)
|
from app import db, models
pairs = [('https://adamska426.wordpress.com/','Katelyn'),('https://computerscience183923392.wordpress.com','Jessie'),
('https://youngdublog.wordpress.com/','Dustin'),('https://waughblogs.wordpress.com','Chandler'),
('https://basantfoss.wordpress.com','Basanta'),('https://mysoftwarejourney.wordpress.com','Malachi'),
('https://gbondo.wordpress.com/','Moses'),('https://awyoonisj.wordpress.com/','Jamal'),
('https://opensourcewithdove.wordpress.com/','Dove'),('https://johnsone978745682.wordpress.com/','Evan'),
('https://halfeatenapple608323536.wordpress.com','Abi'),('https://csc426blog.wordpress.com/','Cameron')]
students = {'Katelyn':[],'Jessie':[],'Dustin':[],
'Chandler':[],'Basanta':[],'Malachi':[],
'Moses':[],'Jamal':[],'Dove':[],'Evan':[],
'Abi':[],'Cameron':[]}
for i in pairs:
blog = models.Blog()
blog.student = i[1]
blog.url = i[0]
if i[1] == 'Moses':
blog.titletag = 'h2'
else:
blog.titletag = 'h1'
blog.titleclass = 'entry-title'
blog.bodytag = 'div'
blog.bodyclass = 'entry-content'
db.session.add(blog)
db.session.commit()
|
import unittest
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
import numpy as np
from sklearn.utils.estimator_checks import check_estimator
from ITMO_FS.filters.unsupervised import *
from ITMO_FS.filters.univariate import *
np.random.seed(42)
class TestCases(unittest.TestCase): # TODO: add TraceRatioLaplacian tests and tests without target
data, target = np.random.randint(10, size=(100, 20)), np.random.randint(10, size=(100,))
def test_MCFS(self):
# MCFS
res = MCFS(10).fit_transform(self.data, self.target)
assert self.data.shape[0] == res.shape[0]
print("MCFS:", self.data.shape, '--->', res.shape)
def test_UDFS(self):
# UDFS
res = UDFS(10).fit_transform(self.data, self.target)
assert self.data.shape[0] == res.shape[0]
print("UDFS:", self.data.shape, '--->', res.shape)
def test_df(self):
for f in [MCFS(10), UDFS(10)]:
df = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target))
arr = f.fit_transform(self.data, self.target)
np.testing.assert_array_equal(df, arr)
def test_pipeline(self):
# FS
p = Pipeline([('FS1', MCFS(10))])
p.fit(self.data, self.target)
res = p.transform(self.data)
assert self.data.shape[0] == res.shape[0] and res.shape[1] == 10
# FS - estim
p = Pipeline([('FS1', UDFS(10)), ('E1', LogisticRegression())])
p.fit(self.data, self.target)
assert 0 <= p.score(self.data, self.target) <= 1
# FS - FS
p = Pipeline([('FS1', MCFS(10)), ('FS2', UDFS(5))])
p.fit(self.data, self.target)
res = p.transform(self.data)
assert self.data.shape[0] == res.shape[0] and res.shape[1] == 5
# FS - FS - estim
p = Pipeline([('FS1', UDFS(10)), ('FS2', MCFS(5)), ('E1', LogisticRegression())])
p.fit(self.data, self.target)
assert 0 <= p.score(self.data, self.target) <= 1
def test_est(self):
for f in [MCFS(2), UDFS(2)]:
check_estimator(f)
if __name__ == "__main__":
unittest.main()
|
import indieweb_utils
from bs4 import Comment
import datetime
import logging
import mf2py
import json
def add_to_database(full_url, published_on, doc_title, meta_description, heading_info, page,
pages_indexed, page_content, outgoing_links, crawl_budget, nofollow_all, main_page_content,
original_h_card, hash, thin_content=False):
# the program will try to populate all of these values before indexing a document
category = None
special_snippet = {}
h_card = None
favicon = ""
length = len(page_content)
is_homepage = False
nofollow_all = "false"
date_to_record = ""
contains_javascript = False
# get last modified date
if page != "" and page.headers:
length = page.headers.get("content-length")
# remove script and style tags from page_content
for script in page_content(["script", "style"]):
script.decompose()
# remove HTML comments
# we don't want to include these in rankings
comments = page_content.findAll(text=lambda text:isinstance(text, Comment))
[comment.extract() for comment in comments]
# get site favicon
favicon = page_content.find("link", rel="shortcut icon")
if favicon:
favicon = favicon.get("href")
h_entry_object = mf2py.parse(page_content)
# special_snippet, h_card = identify_special_snippet.find_snippet(page_content, h_card)
if h_card == []:
h_card = indieweb_utils.discover_author(h_card, h_entry_object, full_url, original_h_card)
page_as_h_entry = None
mf2_property_type = None
for item in h_entry_object["items"]:
if item["type"] and item["type"] == ["h-entry"]:
page_as_h_entry = item
if item.get("category"):
category = ", ".join(item["category"])
if item.get("like-of"):
mf2_property_type = "like-of"
elif item.get("repost-of"):
mf2_property_type = "repost-of"
elif item.get("bookmark-of"):
mf2_property_type = "bookmark-of"
break
if nofollow_all == True:
nofollow_all = "true"
if type(published_on) == dict and published_on.get("datetime") != None:
date_to_record = published_on["datetime"].split("T")[0]
# find out if page is home page
if full_url.replace("https://", "").replace("http://", "").strip("/").count("/") == 0:
is_homepage = True
# get featured image if one is available
# may be presented in featured snippets
featured_image = None
if page_content.find(".u-featured"):
featured_image = page_content.find(".u-featured").get("src")
elif page_content.find(".u-photo"):
featured_image = page_content.find(".u-photo").get("src")
elif page_content.find("meta", property="og:image"):
featured_image = page_content.find("meta", property="og:image").get("src")
elif page_content.find("meta", property="twitter:image"):
featured_image = page_content.find("meta", property="twitter:image").get("src")
else:
featured_image = ""
# use p-name in place of title tag if one is available
if page_content.select(".p-name"):
title = page_content.select(".p-name")[0].text
else:
title = doc_title
# page contains javascript
if page_content.find("script"):
contains_javascript = True
record = {
"title": title,
"meta_description": meta_description,
"url": full_url,
"published_on": date_to_record,
"h1": ", ".join(heading_info["h1"]),
"h2": ", ".join(heading_info["h2"]),
"h3": ", ".join(heading_info["h3"]),
"h4": ", ".join(heading_info["h4"]),
"h5": ", ".join(heading_info["h5"]),
"h6": ", ".join(heading_info["h6"]),
"length": length,
"page_content": str(page_content),
"incoming_links": 0,
"page_text": page_content.get_text(),
"outgoing_links": outgoing_links,
"domain": full_url.split("/")[2],
"word_count": len(main_page_content.get_text().split(" ")),
"last_crawled": datetime.datetime.today().strftime("%Y-%m-%d %H:%M:%S"),
"favicon": favicon,
"referring_domains_to_site": 0, # updated when index is rebuilt
"internal_incoming_links": 0, # not actively used
"http_headers": str(page.headers),
"page_is_nofollow": nofollow_all,
"h_card": json.dumps(h_card),
"is_homepage": is_homepage,
"category": category,
"featured_image": featured_image,
"thin_content": thin_content,
"contains_javascript": contains_javascript,
"page_hash": hash,
"special_snippet": special_snippet,
"mf2_property_type": mf2_property_type
}
if page_as_h_entry != None:
post_type = indieweb_utils.get_post_type(page_as_h_entry)
record["post_type"] = post_type
with open("results.json", "a+") as f:
f.write(json.dumps(record))
f.write("\n")
logging.info("indexed new page {} ({}/{})".format(full_url, pages_indexed, crawl_budget))
pages_indexed += 1
return pages_indexed |
#!/usr/bin/python
#coding:utf-8
import scrapy
from tutorial.items import TutorialItem
from scrapy.http import Request
from scrapy.spiders import CrawlSpider
from scrapy.selector import Selector
import json
import time
import random
import redis
from scrapy.conf import settings
#zhipin 爬虫
class ScriptSlug(scrapy.Spider):
name = "scriptslug"
allowed_domains = ["scriptslug.com"]
current_page = 1 #开始页码
max_page = 15 #最大页码
start_urls = [
"https://scriptslug.com/scripts?pg=1",
]
custom_settings = {
# "ITEM_PIPELINES":{
# 'tutorial.pipelines.ScriptSlugPipeline': 300,
# },
# "DOWNLOADER_MIDDLEWARES":{
# 'tutorial.middlewares.ScriptSlugMiddleware': 299,
# # 'tutorial.middlewares.ProxyMiddleware':301
# },
"DEFAULT_REQUEST_HEADERS":{
'Accept': 'application/json',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent':'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Mobile Safari/537.36',
'Referer':'https://scriptslug.com/',
'X-Requested-With':"XMLHttpRequest",
"cookie":"lastCity=101020100; JSESSIONID=""; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1532401467,1532435274,1532511047,1532534098; __c=1532534098; __g=-; __l=l=%2Fwww.zhipin.com%2F&r=; toUrl=https%3A%2F%2Fwww.zhipin.com%2Fc101020100-p100103%2F; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1532581213; __a=4090516.1532500938.1532516360.1532534098.11.3.7.11"
}
}
def parse(self, response):
# js = json.loads(response.body)
# html = js['html']
items = response.xpath('//article/a/@href')
print(items)
host = 'https://scriptslug.com'
x = 1
y = 1
for item in items:
detail_url = item.extract()
print('extracting href from alink')
print(item.extract())
# print(item.extract_first())
# position_name = item.css('h4::text').extract_first() #职位名称
# salary = item.css('.salary::text').extract_first() or '' #薪资
# work_year = item.css('.msg em:nth-child(2)::text').extract_first() or '不限' #工作年限
# educational = item.css('.msg em:nth-child(3)::text').extract_first() #教育程度
# meta = {
# "position_name":position_name,
# "salary":salary,
# "work_year":work_year,
# "educational":educational
# }
#
# # time.sleep(int(random.uniform(50, 70)))
# #初始化redis
# pool= redis.ConnectionPool(host='localhost',port=6379,decode_responses=True)
# r=redis.Redis(connection_pool=pool)
# key = settings.get('REDIS_POSITION_KEY')
# position_id = url.split("/")[-1].split('.')[0]
# print('further url:', detail_url)
# print('key:', key, "value:", position_id);
# print('parsing item: ...\n')
# print(meta)
yield Request(detail_url,callback=self.parse_item)
# if (r.sadd(key,position_id)) == 1:
# yield Request(url,callback=self.parse_item,meta=meta)
if self.current_page < self.max_page:
self.current_page += 1
api_url = "https://scriptslug.com/scripts"+"?pg="+str(self.current_page)
time.sleep(int(random.uniform(1, 5)))
yield Request(api_url,callback=self.parse)
pass
def parse_item(self,response):
target = response.css('.script-single__download').xpath('./@href').extract_first()
print('target pdf...')
print(target)
# item = TutorialItem()
# q = response.css
# # item['address'] = q('.location-address::text').extract_first()
# # item['create_time'] = q('.job-tags .time::text').extract_first()
# # item['body'] = q('.text').xpath('string(.)').extract_first()
# # # item['body'] = item['body'].encode('utf-8')
# # # print(item['body'])
# # item['company_name'] = q('.business-info h4::text').extract_first()
# # item['postion_id'] = response.url.split("/")[-1].split('.')[0]
# # item = dict(item, **response.meta )
# pdf_url = q('.script-single__download').extract_first()
# print("parsing PDF...:")
# print(item)
# yield item
yield Request(
url=target,
callback=self.save_pdf
)
def save_pdf(self, response):
path = response.url.split('/')[-1]
self.logger.info('Saving PDF %s', path)
with open(path, 'wb') as f:
f.write(response.body)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shopping', '0027_product_items_in_stock'),
]
operations = [
migrations.AddField(
model_name='customorder',
name='personal_information_consent',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='customorder',
name='personal_information_consent_date',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='customorder',
name='personal_information_consent_years',
field=models.PositiveIntegerField(default=70),
),
]
|
"""
The collision engine of the game.
"""
import pygame as pg
from pygame.math import Vector2
from xcape.common.loader import SFX_RESOURCES
from xcape.common.object import GameObject
from xcape.components.audio import AudioComponent
class CollisionEngine(GameObject):
"""
A specialised (non-scalable) collision engine that handles collisions
between all entities in a scene.
"""
def __init__(self, scene):
"""
:param scene: Scene Class, representing a level.
"""
self.scene = scene
self.audio = AudioComponent(self, isAutoPlay=False)
self.audio.add("explosion", SFX_RESOURCES["cat_coop_jump"])
def __str__(self):
return "collision_engine"
def eventHandler(self, event):
if event.type == pg.KEYDOWN:
if event.key == pg.K_RETURN:
self.resolveDoorCollisions()
try:
p1, p2 = self.scene.players
p1Jump = p1.keybinds["coop_jump"]
p2Jump = p2.keybinds["coop_jump"]
if event.key == p1Jump or event.key == p2Jump:
self.resolvePlayerCollisions(30)
self.audio.state = "explosion"
except ValueError:
pass
def update(self):
self.resolveWallCollisions()
self.resolveSwitchCollisions()
self.resolveSPlatformCollisions()
self.resolveDPlatformCollisions()
self.resolveMPlatformCollisions()
self.resolveSpikeCollisions()
self.resolveBossCollisions()
self.resolveBoundaryCollision()
self.audio.update()
def resolvePlayerCollisions(self, explosionSpeed):
"""
Resolves any collisions between players.
:param explosionSpeed: Integer, the speed at which both players fly
away from each other in the x-axis and y-axis respectively.
"""
try:
p1, p2 = self.scene.players
if pg.sprite.collide_rect(p1, p2):
p1.physics.addVelocityY("collision", -explosionSpeed)
p2.physics.addVelocityY("collision", -explosionSpeed)
if p2.rect.x > p1.rect.x:
p1.physics.addVelocityX("collision", -explosionSpeed)
p2.physics.addVelocityX("collision", explosionSpeed)
else:
p1.physics.addVelocityX("collision", explosionSpeed)
p2.physics.addVelocityX("collision", -explosionSpeed)
except ValueError:
pass
def resolveWallCollisions(self):
"""
Resolves any wall collisions.
"""
for player in self.scene.players:
self._resolveBasicCollision(player, self.scene.walls)
def resolveSPlatformCollisions(self):
"""
Resolves any static platform collisions.
"""
for player in self.scene.players:
self._resolveBasicCollision(player, self.scene.sPlatforms)
def resolveDPlatformCollisions(self):
"""
Resolves any directional platform collisions.
"""
for player in self.scene.players:
hits = pg.sprite.spritecollide(player, self.scene.dPlatforms, False)
for platform in hits:
direction = self._checkCollisionDirection(player, platform)
if direction == "bottom":
tol = abs(player.rect.bottom - platform.rect.top)
if tol < 30:
player.rect.bottom = platform.rect.top
player.isOnGround = True
# Allows conversation of velocity if the player jumps through
if player.physics.velocity.y > 0:
player.physics.velocity.y = 0
def resolveMPlatformCollisions(self):
"""
Resolves any moving platform collisions.
"""
for player in self.scene.players:
hits = pg.sprite.spritecollide(player, self.scene.mPlatforms, False)
self._resolveBasicCollision(player, self.scene.mPlatforms)
for platform in hits:
player.physics.addDisplacementX("platform", platform.dx)
player.physics.addDisplacementY("platform", platform.dy)
def resolveSwitchCollisions(self):
"""
Resolves any switch collisions.
"""
switchesOn = [s for s in self.scene.switches if s.isOn]
for s in switchesOn:
for player in self.scene.players:
if pg.sprite.collide_rect(player, s):
if (player.physics.velocity.x != 0 or
player.physics.velocity.y != 0):
s.turnOff()
def resolveDoorCollisions(self):
"""
Resolves any door collisions.
"""
for player in self.scene.players:
hits = pg.sprite.spritecollide(player, self.scene.doors, False)
doorsClosed = [d for d in self.scene.doors if d.isClosed]
if hits and not doorsClosed:
self.messageScene("complete")
def resolveSpikeCollisions(self):
"""
Resolves any spike collisions.
"""
for player in self.scene.players:
hits = pg.sprite.spritecollide(player, self.scene.spikes, False)
if hits:
self.messageScene("death", player.num)
def resolveBossCollisions(self):
"""
Resolves any boss collisions.
"""
for player in self.scene.players:
hits = pg.sprite.spritecollide(player, self.scene.bosses, False)
if hits:
self.messageScene("death", player.num)
def resolveBoundaryCollision(self):
"""
Checks if the players have 'fallen' out of the level.
"""
w, h = self.scene.rect.size
boundary = pg.Rect(-1000, -1000, w+2000, h+2000)
for player in self.scene.players:
if not pg.Rect.contains(boundary, player):
self.messageScene("death", player.num)
def _resolveBasicCollision(self, moving, group):
"""
Resolves any collisions between a moving object and a group of
objects such that the moving object cannot pass through such objects.
:param moving: GameObject instance, representing a moving scene entity.
:param group: List, containing GameObject instance in a scene.
:return:
"""
hits = pg.sprite.spritecollide(moving, group, False)
for wall in hits:
direction = self._checkCollisionDirection(moving, wall)
if direction == "bottom":
moving.rect.bottom = wall.rect.top
moving.physics.velocity.y = 0
moving.isOnGround = True
elif direction == "left":
moving.rect.left = wall.rect.right
moving.physics.velocity.x = 0
elif direction == "top":
moving.rect.top = wall.rect.bottom
moving.physics.velocity.y = 0
elif direction == "right":
moving.rect.right = wall.rect.left
moving.physics.velocity.x = 0
def _checkCollisionDirection(self, moving, static):
"""
Checks if the moving game object has collided with the static game
object, and determines the direciton of collision.
:param moving: GameObject instance, representing a moving game object.
:param static: GameObject instance, representing a static game object.
:return: String, whether 'bottom', 'left', 'top', or 'right'.
"""
if pg.sprite.collide_rect(moving, static):
# Defining points on the static game object
x, y = static.rect.center
S00 = static.rect.topleft
S10 = static.rect.topright
S11 = static.rect.bottomright
S01 = static.rect.bottomleft
# Defining points on the moving game object
u, v = moving.rect.center
M00 = moving.rect.topleft
M10 = moving.rect.topright
M11 = moving.rect.bottomright
M01 = moving.rect.bottomleft
# Defining vectors on the static game object which will be used in
# accurate collision handling. The vectors are from the center of
# the game object to its corners.
vec_M00 = Vector2(x - S00[0], y - S00[1])
vec_M10 = Vector2(x - S10[0], y - S10[1])
vec_M11 = Vector2(x - S11[0], y - S11[1])
vec_M01 = Vector2(x - S01[0], y - S01[1])
# Defining variables for our new coordinate system based on angles
# (which is mathematically equivalent to bearings)
FULL_ROTATION = 360
origin = vec_M00
# Calculating angles of the static game object vectors
angle_00 = origin.angle_to(vec_M00) % FULL_ROTATION
angle_10 = origin.angle_to(vec_M10) % FULL_ROTATION
angle_11 = origin.angle_to(vec_M11) % FULL_ROTATION
angle_01 = origin.angle_to(vec_M01) % FULL_ROTATION
# Calculating the displacement angle between the moving and
# static game objects
displacement = Vector2(x - u, y - v)
angle = origin.angle_to(displacement) % FULL_ROTATION
# Calculating direction of the collision
isCollideBottom = angle_00 < angle < angle_10
isCollideLeft = angle_10 < angle < angle_11
isCollideTop = angle_11 < angle < angle_01
isCollideRight = angle_01 < angle
if isCollideBottom:
return "bottom"
elif isCollideLeft:
return "left"
elif isCollideTop:
return "top"
elif isCollideRight:
return "right"
|
from core.advbase import *
def module():
return Templar_Hope
class Templar_Hope(Adv):
conf = {}
conf['slots.a'] = [
'The_Shining_Overlord',
'Flash_of_Genius',
'Felyne_Hospitality',
'Sisters_of_the_Anvil',
'His_Clever_Brother'
]
conf['slots.poison.a'] = [
'The_Shining_Overlord',
'Flash_of_Genius',
'Brothers_in_Arms',
'The_Plaguebringer',
'His_Clever_Brother'
]
conf['slots.d'] = 'Vayu'
conf['acl'] = """
`dragon(c3-s-end), cancel
`s3, not buff(s3)
`s4
`s2, cancel
`s1, fsc
`fs, x=2
"""
conf['coabs'] = ['Blade','Dragonyule_Xainfried','Akasha']
conf['share'] = ['Xander']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
import BF_v2
import search_filter
import pickle
from bitarray import bitarray
import os
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from Bio import SeqIO, SeqRecord, Seq
import csv
import Classifier
from OXA_Table import OXATable
#from Bio.Seq import Seq
# Help-Script to generate new default Data, manually train new Bloomfilter, generate svm-data and test the tools
def write_file():
# https://stackoverflow.com/questions/899103/writing-a-list-to-a-file-with-python/899176
itemlist = ['IC1', 'IC2', 'IC3', 'IC4', 'IC5','IC6','IC7','IC8']
with open(r'filter/FilterClonetypes.txt', 'wb') as fp:
pickle.dump(itemlist, fp)
def write_file2():
# https://stackoverflow.com/questions/899103/writing-a-list-to-a-file-with-python/899176
itemlist = ['bla OXA-23', 'bla OXA-24', 'bla OXA-51', 'bla OXA-58', 'bla OXA-134',
'bla OXA-143','bla OXA-211','bla OXA-213','bla OXA-214','bla OXA-229','bla OXA-286']
with open(r'filter/OXAs/FilterOXA.txt', 'wb') as fp:
pickle.dump(itemlist, fp)
def write_file3():
# https://stackoverflow.com/questions/899103/writing-a-list-to-a-file-with-python/899176
itemlist = ["albensis", "apis", "baretiae", "baumannii", "baylyi", "beijerinckii", "bereziniae",
"bohemicus", "boissieri", "bouvetii", "brisouii", "calcoaceticus",
"celticus", "chengduensis", "chinensis", "colistiniresistens","courvalinii", "cumulans",
"defluvii", "dispersus", "equi", "gandensis", "gerneri","gs06","gs16", "guerrae",
"guillouiae", "gyllenbergii", "haemolyticus", "halotolerans", "harbinensis", "idrijaensis", "indicus",
"johnsonii", "junii", "kanungonis", "kookii", "kyonggiensis", "lactucae", "lanii", "larvae",
"lwoffii", "marinus", "modestus", "nectaris", "nosocomialis", "oleivorans", "parvus",
"piscicola", "pittii", "pollinis", "populi", "portensis", "pseudolwoffii", "pullicarnis",
"pragensis", "proteolyticus","puyangensis",
"qingfengensis", "radioresistens", "rathckeae", "rongchengensis", "rudis", "schindleri", "seifertii",
"seohaensis", "shaoyimingii", "sichuanensis", "soli", "stercoris", "tandoii", "terrae",
"terrestris", "tianfuensis", "tjernbergiae", "towneri", "ursingii","variabilis", "venetianus",
"vivianii", "wanghuae", "wuhouensis"]
with open(r'filter/FilterSpecies.txt', 'wb') as fp:
pickle.dump(itemlist, fp)
def train():
files = os.listdir(r'I:\OXA-Gene')
for i in range(len(files) -1, -1, -1):
if 'fasta' not in files[i]:
del files[i]
for i in range(len(files)):
BF = BF_v2.AbaumanniiBloomfilter(80000)
BF.set_arraysize(80000)
BF.set_clonetypes(1)
BF.set_hashes(7)
BF.set_k(20)
path = r'I:/OXA-Gene/' + files[i]
name = files[i][:-6] + '.txt'
print(name)
result = r'C:/Users/SG/Desktop/' + name
BF.train_sequence(path, 0)
BF.save_clonetypes(result)
BF.cleanup()
#changed directory
def train_Core():
"""trains (concatenated-)genomes into BF and saves them"""
#files = os.listdir(r"filter\species_totrain")
files = os.listdir(r'F:\project\genomes\totrain')
for i in range(len(files) -1, -1, -1):
if 'fna' in files[i] or 'fasta' in files[i]:
continue
else:
del files[i]
for i in range(len(files)):
#set BF-parameters
BF = BF_v2.AbaumanniiBloomfilter(115000000)
BF.set_arraysize(115000000)
BF.set_clonetypes(1)
BF.set_hashes(7)
BF.set_k(20)
#path = r"filter/species_totrain/" + files[i]
path = r'F:/project/genomes/totrain/' + files[i]
name = files[i].split('.')[-2] + '.txt'
print(name)
#result = r"filter/species" + name
result = r'F:/project/results/' + name
BF.train_sequence(path, 0)
BF.save_clonetypes(result)
BF.cleanup()
def opene():
with open(r'C:\Users\SG\Desktop\a.baumannii Filter\FilterOXA.txt', 'rb') as fp:
clonetypes = pickle.load(fp)
print(clonetypes)
def openspec():
with open(r'C:\Users\Dominik\Uni\SoSe21\PraktikumBA-Arbeit\ClAssT-Acinetobacter-baumannii-Clone-type-Assignment-Tool-master\ClAssT-Acinetobacter-baumannii-Clone-type-Assignment-Tool-master\filter\FilterSpecies.txt', 'rb') as fp:
clonetypes = pickle.load(fp)
for i in clonetypes:
print(i)
def pw():
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt()
print(bcrypt.generate_password_hash('user'))
print(bcrypt.generate_password_hash('pwd'))
def Test():
temp = bitarray(0)
with open(r'filter\OXA51_IC1.txt', 'rb') as fh:
temp.fromfile(fh)
print(len(temp))
def Test_Core_for_OXA():
with open(r'filter/FilterClonetypes.txt', 'rb') as fp:
clonetypes = pickle.load(fp)
BF = BF_v2.AbaumanniiBloomfilter(22000000)
BF.set_arraysize(22000000)
BF.set_hashes(7)
BF.set_k(20)
# User Options
BF.set_reads(1000)
paths = [r'filter/CoreIC1.txt',
r'filter/CoreIC2.txt',
r'filter/CoreIC3.txt',
r'filter/CoreIC4.txt',
r'filter/CoreIC5.txt',
r'filter/CoreIC6.txt',
r'filter/CoreIC7.txt',
r'filter/CoreIC8.txt']
BF.read_clonetypes(paths, clonetypes)
Oxa_paths = [r'H:\bla-51-like\IC1\OXA69.fasta',
r'H:\bla-51-like\IC1\OXA92.fasta',
r'H:\bla-51-like\IC1\OXA107.fasta',
r'H:\bla-51-like\IC1\OXA110.fasta',
r'H:\bla-51-like\IC2\OXA66.fasta',
r'H:\bla-51-like\IC2\OXA82.fasta',
r'H:\bla-51-like\IC2\OXA172.fasta',
r'H:\bla-51-like\IC2\OXA201.fasta',
r'H:\bla-51-like\IC2\OXA202.fasta',
r'H:\bla-51-like\IC3\OXA71.fasta',
r'H:\bla-51-like\IC3\OXA113.fasta',
r'H:\bla-51-like\IC4\OXA51.fasta',
r'H:\bla-51-like\IC4\OXA219.fasta',
r'H:\bla-51-like\IC5\OXA65.fasta',
r'H:\bla-51-like\IC6\OXA90.fasta',
r'H:\bla-51-like\IC6\OXA200.fasta',
r'H:\bla-51-like\IC7\OXA64.fasta',
r'H:\bla-51-like\IC8\OXA68.fasta',
r'H:\bla-51-like\IC8\OXA128.fasta']
for path in Oxa_paths:
BF.lookup_sequence(path)
score = BF.get_score()
print(score)
def csv_helper():
files = os.listdir(r'F:\project\test-set')
for i in range(len(files) -1, -1, -1):
if 'fna' in files[i] or 'fasta' in files[i]:
continue
else:
del files[i]
with open(r'F:/project/csv/help.csv', 'w') as file:
writer = csv.writer(file)
writer.writerows(files)
def distinct_kmer():
""" creates a fasta-file with the distinct kmers of every species """
files = os.listdir(r'F:\project\genomes\coverage')
for i in range(len(files) -1, -1, -1):
if 'fna' in files[i] or 'fasta' in files[i]:
continue
else:
del files[i]
paths = files[:]
for i in range(len(files)):
paths[i] = r'F:/project/genomes/coverage/' + paths[i]
counter = 0
for j in [41, 51]:
for i in range(len(files)):
counter += 1
print(files[i])
records = []
kmers = []
for sequence in SeqIO.parse(paths[i], "fasta"):
for i in range(len(sequence.seq) - j + 1):
kmers.append(str(sequence.seq[i: i + j]))
#print(len(kmers))
distinct_kmer = []
distinct_kmer = list(dict.fromkeys(kmers))
print(len(distinct_kmer))
for kmer in distinct_kmer:
records.append(SeqRecord.SeqRecord(Seq.Seq(kmer)))
with open(r"F:/project/genomes/coverage/result/distinct_complete_" + str(j) + "kmer_" + str(counter) + ".fasta", "a") as output_handle:
SeqIO.write(records, r"F:/project/genomes/coverage/result/distinct_complete_" + str(j) + "kmer_" + str(counter) + ".fasta", "fasta")
def coverage_plot(min_coverage=7, max_coverage=1000):
"""creates a coverage plot from a histo-file, used for kmer-specific research"""
#http://voorloopnul.com/blog/kmer-analysis-with-python-part-1/
files = os.listdir(r'F:\project\genomes\coverage\result\results\histo')
paths = files[:]
for i in range(len(files)):
paths[i] = r'F:/project/genomes/coverage/result/results/histo/' + paths[i]
for i in range(len(files)):
ext = files[i].split('.')[-2]
if "14" in ext or "16" in ext or "18" in ext:
continue
else:
with open(paths[i]) as file:
data = file.readlines()
dataset = [entry.replace("\n", "") for entry in data]
dataset = [entry.split(" ") for entry in dataset]
#print(dataset)
coverage = [int(entry[0]) for entry in dataset][min_coverage:max_coverage]
frequency = [int(entry[1]) for entry in dataset][min_coverage:max_coverage]
higher_frequency = max(frequency)
plt.plot(coverage, frequency, label="%s"%(ext[-6:],))
leg = plt.legend(loc='lower right', ncol=1, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.ylabel('Frequency')
plt.xlabel('Coverage')
#plt.title(ext)
plt.ylim(0, higher_frequency)
plt.title("Acinetobacter-Coverage comparison with different kmer-lengths")
plt.show()
def count_kmer():
"""creates multiple coverage plots from a set of genomes"""
# https://stackoverflow.com/questions/2600191/how-can-i-count-the-occurrences-of-a-list-item
# https://stackoverflow.com/questions/20950650/how-to-sort-counter-by-value-python
# https://pythonexamples.org/python-sort-list-of-tuples/
files = os.listdir(r'F:\project\genomes\totrain')
for i in range(len(files) -1, -1, -1):
if 'fna' in files[i] or 'fasta' in files[i]:
continue
else:
del files[i]
paths = files[:]
for i in range(len(files)):
paths[i] = r'F:/project/genomes/totrain/' + paths[i]
for i in range(len(files)):
print(files[i])
ext = files[i].split('.')[-2]
kmers = []
intermediat_kmers = []
for sequence in SeqIO.parse(paths[i], "fasta"):
for i in range(len(sequence.seq) - 20 + 1):
kmers.append(str(sequence.seq[i: i + 20]))
kmers_freq = Counter(kmers)
kmers_freq_freq = []
kmers_no_duplicates = list(dict.fromkeys(kmers))
for i in range(len(kmers_freq)):
kmers_freq_freq.append(kmers_freq[kmers_no_duplicates[i]])
kmers_freq_freq = Counter(kmers_freq_freq)
kmers_freq_sorted = kmers_freq_freq.most_common()
kmers_freq_sorted.sort(key = lambda x: x[0])
kmer_index = 0
print(kmers_freq_freq)
print(kmers_freq_sorted)
for i in range(1, len(kmers_freq_sorted)):
if kmers_freq_sorted[i][1] > kmers_freq_sorted[i + 1][1]:
kmer_index += 1
else:
break
for y in kmers:
if kmers_freq[y] > kmer_index:
intermediat_kmers.append(y)
intermediat_kmers = list(dict.fromkeys(intermediat_kmers))
x_achse = []
y_achse = []
for i in range(len(kmers_freq_sorted)):
x_achse.append(kmers_freq_sorted[i][0])
y_achse.append(kmers_freq_sorted[i][1])
plt.plot(x_achse[kmer_index - 1:-1], y_achse[kmer_index - 1:-1])
plt.ylabel('frequency')
plt.xlabel('kmer-coverage')
plt.title('Distribution of '+ ext + '-kmers in Acinetobacter species')
plt.savefig(r'F:/project/kmere-distr/kmer-coverage/full/' + (ext + ".png"))
plt.clf()
def test_genomes():
"""performs a BF-lookup for a set of genomes for testing purpose"""
itemlist = ["albensis", "apis", "baretiae", "baumannii", "baylyi", "beijerinckii", "bereziniae",
"bohemicus", "boissieri", "bouvetii", "brisouii", "calcoaceticus",
"celticus", "chengduensis", "chinensis", "colistiniresistens","courvalinii", "cumulans",
"defluvii", "dispersus", "equi", "gandensis", "gerneri","gs06","gs16", "guerrae",
"guillouiae", "gyllenbergii", "haemolyticus", "halotolerans", "harbinensis", "idrijaensis", "indicus",
"johnsonii", "junii", "kanungonis", "kookii", "kyonggiensis", "lactucae", "lanii", "larvae",
"lwoffii", "marinus", "modestus", "nectaris", "nosocomialis", "oleivorans", "parvus",
"piscicola", "pittii", "pollinis", "populi", "portensis", "pseudolwoffii", "pullicarnis",
"pragensis", "proteolyticus","puyangensis",
"qingfengensis", "radioresistens", "rathckeae", "rongchengensis", "rudis", "schindleri", "seifertii",
"seohaensis", "shaoyimingii", "sichuanensis", "soli", "stercoris", "tandoii", "terrae",
"terrestris", "tianfuensis", "tjernbergiae", "towneri", "ursingii","variabilis", "venetianus",
"vivianii", "wanghuae", "wuhouensis", "sp."]
best_match = ["GCF_000587995",
"GCF_000588015",
"GCF_000588095",
"GCF_000588255",
"GCF_000588335",
"GCF_000588395",
"GCF_000588415",
"GCF_000588515",
"GCF_000588535",
"GCF_000588595",
"GCF_000588715",
"GCF_000589115",
"GCF_000589135",
"GCF_000589155",
"GCF_000589175",
"GCF_000589215",
"GCF_000589235",
"GCF_000589255",
"GCF_000589335",
"GCF_000620525",
"GCF_000681815",
"GCF_000682155",
"GCF_000682355",
"GCF_000682615",
"GCF_000705635",
"GCF_000788125",
"GCF_000800865",
"GCF_001005515",
"GCF_001005525",
"GCF_001054715",
"GCF_001077515",
"GCF_001276505",
"GCF_001278715",
"GCF_001423205",
"GCF_001425285",
"GCF_001471615",
"GCF_001541635",
"GCF_001592855",
"GCF_001605865",
"GCF_001720685",
"GCF_001729365",
"GCF_001866125",
"GCF_001878775",
"GCF_002018895",
"GCF_002018925",
"GCF_002251565",
"GCF_002412355",
"GCF_002797085",
"GCF_002797235",
"GCF_002797255",
"GCF_002899995",
"GCF_002918965",
"GCF_002919885",
"GCF_002934965",
"GCF_900110445",
"GCF_900110525"
]
print("Preparing BloomFilter...")
BF = search_filter.pre_processing()
print("Collecting Input-Data...")
#files = os.listdir(r'F:\project\genomes\all_genomes_edit')
files = os.listdir(r'F:\project\test-set')
#files = os.listdir(r'F:\project\test-set\not_Acinetobacter')
#files = os.listdir(r'F:\project\genomes\unclassified')
for i in range(len(files) -1, -1, -1):
if 'fna' in files[i] or 'fasta' in files[i]:
continue
else:
del files[i]
paths = files[:]
for i in range(len(files)):
#paths[i] = r'F:/project/genomes/all_genomes_edit/' + paths[i]
paths[i] = r'F:/project/test-set/' + paths[i]
#paths[i] = r'F:/project/test-set/not_Acinetobacter/' + paths[i]
#paths[i] = r'F:/project/genomes/unclassified/' + paths[i]
names = []
print("Saving Species-Names...")
for i in range(len(files)):
with open(paths[i]) as file:
head = file.readline()
head = head.split()
try:
names.append(head[2])
except:
names.append("NameError")
GCF_numbers = []
print("Saving GCF_Numbers...")
for i in range(len(files)):
GCF = files[i].split('.')[0]
GCF_numbers.append(GCF)
print("Starting Taxonomic Assignment on Species-Level...")
predictions = []
scores = []
#scores_plot
test = [[0 for i in range(83)] for j in range(83)]
for i in range(len(files)):
if i == int(len(files)/6) or i == int(len(files)/3) or i == int(len(files)/2) or i == int(len(files)/1.5) or i == int(len(files)/1.2):
print("...")
BF.number_of_kmeres = 0
BF.hits_per_filter = [0] * BF.clonetypes
for sequence in SeqIO.parse(paths[i], "fasta"):
for j in range(0, len(sequence.seq) - BF.k, 500):
BF.number_of_kmeres += 1
BF.lookup(str(sequence.seq[j: j + BF.k]))
score = BF.get_score()
score_edit = [str(x) for x in score]
score_edit = ",".join(score_edit)
scores.append(score_edit)
prediction = Classifier.classify(r'Training_data/Training_data_spec+none_new.csv', score, False)
predictions.append(prediction)
# if GCF_numbers[i] in best_match:
#names[i] = prediction
if names[i] in itemlist:
test[itemlist.index(names[i])][itemlist.index(prediction)] += 1
else:
print(GCF_numbers[i])
print("Falscher Name, aus Test ausgeschlossen")
#scores_plot.append([name[i]] + [prediction] + score)
#scores_plot = sorted(scores_plot, key = lambda h: h[0])
#for i in range(len(files)):
#for j in range(len(files)):
#if scores_plot[i][0] = scores_plot[j][0]:
# ## TODO:
#else:
# break
for i in range(len(test)):
summe = sum(test[i])
for j in range(len(test[0])):
try:
test[i][j] = test[i][j] / summe
except:
continue
print("Assignment Done...")
print("Prepare Result-Data...")
excel = []
for i in range(len(files)):
excel.append(GCF_numbers[i] + "," + scores[i] + "," + names[i] + "," + predictions[i])
for i in range(len(excel)):
excel[i] = [excel[i]]
with open(r'F:/project/csv/Test-Set_20.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(excel)
print("Finished!")
def test():
itemlist = ["albensis", "apis", "baretiae", "baumannii", "baylyi", "beijerinckii", "bereziniae",
"bohemicus", "boissieri", "bouvetii", "brisouii", "calcoaceticus",
"celticus", "chengduensis", "chinensis", "colistiniresistens","courvalinii", "cumulans",
"defluvii", "dispersus", "equi", "gandensis", "gerneri","gs06","gs16", "guerrae",
"guillouiae", "gyllenbergii", "haemolyticus", "halotolerans", "harbinensis", "idrijaensis", "indicus",
"johnsonii", "junii", "kanungonis", "kookii", "kyonggiensis", "lactucae", "lanii", "larvae",
"lwoffii", "marinus", "modestus", "nectaris", "nosocomialis", "oleivorans", "parvus",
"piscicola", "pittii", "pollinis", "populi", "portensis", "pseudolwoffii", "pullicarnis",
"pragensis", "proteolyticus","puyangensis",
"qingfengensis", "radioresistens", "rathckeae", "rongchengensis", "rudis", "schindleri", "seifertii",
"seohaensis", "shaoyimingii", "sichuanensis", "soli", "stercoris", "tandoii", "terrae",
"terrestris", "tianfuensis", "tjernbergiae", "towneri", "ursingii","variabilis", "venetianus",
"vivianii", "wanghuae", "wuhouensis", "sp."]
x = [[2,1,1,1,],
[1,1,5,1,],
[1,1,1,1,],
[1,1,1,1,],
[1,1,1,3,]]
test = [[0 for i in range(83)] for j in range(83)]
plt.figure(figsize=(20,15))
ax = sns.heatmap(test, xticklabels = itemlist, yticklabels = itemlist, cmap = "Blues", linewidth=1)
ax.set_yticklabels(ax.get_yticklabels(), fontsize=7)
plt.show()
def main():
#Test_Core_for_OXA()
#write_file()
#opene()
#openspec()
#write_file2()
pw()
#train_Core()
#write_file3()
#write_file4()
#histo()
#count_kmer()
#distinct_kmer()
#coverage_plot()
#csv_helper()
#test_genomes()
#test()
if __name__ == '__main__':
main()
|
__author__ = 'ipetrash'
|
import logging
import os
import subprocess
import sys
from .cd import current_directory
from .filesystem import TempDir
from .utility import Style
def __log_check_output(cmd, verbosity, **kwargs):
shell = not isinstance(cmd, list)
with open(os.devnull, 'w') as devnull:
logging.log(verbosity, __format_command(cmd))
return subprocess.check_output(cmd, stderr=devnull, shell=shell, **kwargs).decode()
def __log_check_call(cmd, verbosity, **kwargs):
shell = not isinstance(cmd, list)
with open(os.devnull, 'w') as devnull:
logging.log(verbosity, __format_command(cmd))
if verbosity < logging.getLogger().getEffectiveLevel():
subprocess.check_call(cmd, stderr=devnull, shell=shell, stdout=devnull, **kwargs)
else:
subprocess.check_call(cmd, stderr=subprocess.STDOUT, shell=shell, **kwargs)
def command(cmd, verbosity=logging.INFO, environment_overrides={}):
__log_check_call(cmd, verbosity, env=__environment(environment_overrides))
def command_output(cmd, verbosity=logging.INFO, environment_overrides={}):
logging.log(verbosity, __format_command(cmd))
return __log_check_output(cmd, verbosity, env=__environment(environment_overrides))
def command_sequence(cmds, verbosity=logging.INFO, environment_overrides={}):
with open(os.devnull, 'w') as devnull:
stderr = devnull if verbosity < logging.getLogger().getEffectiveLevel() else subprocess.STDOUT
stdout = devnull if verbosity < logging.getLogger().getEffectiveLevel() else None
if sys.platform == 'win32':
with TempDir() as d:
path = os.path.join(d, 'script.cmd')
with open(path, 'wb') as f:
f.write('\r\n'.join(cmds).encode())
subprocess.check_call(['cmd', '/c', 'call', path], stderr=stderr, stdout=stdout, env=__environment(environment_overrides))
else:
subprocess.check_call(['sh', '-c', '\n'.join(['set -ex'] + cmds)], stderr=stderr, stdout=stdout, env=__environment(environment_overrides))
def __environment(environment_overrides):
environment_overrides['PWD'] = current_directory()
env = os.environ.copy()
env.update(environment_overrides)
return {key: str(value) for key, value in env.items()}
def __format_command(cmd):
return Style.BRIGHT + '{}'.format(cmd) + Style.RESET_ALL
|
from flask_login import login_required,current_user
from flask import render_template,request,redirect,url_for,abort
from ..models import User,Blog,Category,Comment,Subcription
from .forms import UpdateProfile,BlogForm,CategoryForm,CommentForm
from ..import db,photos
from . import main
from ..requests import get_quotes
import datetime
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
new_category=Category.query.all()
blogs=Blog.query.all()
quote = get_quotes(category)
return render_template('index.html',quote=quote,blogs=blogs,new_category=new_category)
@main.route('/add/category',methods=['GET','POST'])
@login_required
def new_category():
'''
view new group route function that returns a page with a form to create a category
'''
form = CategoryForm()
if form.validate_on_submit():
name = form.name.data
new_category= Category(name=name)
new_category.save_category()
return redirect(url_for('.index'))
title = 'New category'
return render_template('new_category.html',Category_form=form,title=title)
@main.route('/categories/<int:id>')
def category(id):
category =Category.query.get(id)
blogs = Blog.query.filter_by(category=id).all()
return render_template('category.html',blogs=blogs,category=category)
@main.route('/categories/view_blog/add/<int:id>',methods=['GET','POST'])
@login_required
def new_blog(id):
'''
function to check blogs form and from the fields
'''
form = BlogForm()
category= Category.query.filter_by(id=id).first()
if category is None:
abort(404)
if form.validate_on_submit():
content = form.content.data
new_blog= Blog(content=content,category=category.id,user_id=current_user.id)
new_blog.save_blog()
return redirect(url_for('.category',id=category.id))
title='New Blog'
return render_template('new_blog.html',title=title,blog_form = form,category = category)
@main.route('/user/<uname>/blogs',methods=['GET','POST'])
def user_blogs(uname):
user=User.query.filter_by(username=uname).first()
blogs=Blog.query.filter_by(user_id = user.id).all()
return render_template('blog.html', user = user,blogs= blogs)
@main.route('/categories/view_blog/<int:id>',methods=['GET','POST'])
@login_required
def view_blog(id):
'''
function that returns a single blog for comment to be added
'''
print (id)
blogs=Blog.query.get(id)
posted_date=blog.posted.xxxtime('%b,%d,%Y')
if blogs is None:
abort(404)
comment=Comments.get_comments(id)
return redirect (url_for('.blog',id=blog.id))
form = CommentForm()
if form.validate_on_submit():
comment = form.text.data
new_comment = Comment(comment = comment,user= current_user,blog_id = blog)
new_comment.save_comment()
comments =Comment.get_comments(blog)
return return_template('blog.html',blogs=blogs,comment=comment,category_id=id,comment_form=form,date = posted_date)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/subscribe/',methods = ['GET','POST'])
def subscribe():
'''
function thatenables one to make a subcribe on the blog
'''
form=SubscribeForm()
if form.validate_on_submit():
subscription = Subscription(email = form.email.data)
db.session.add(subscription)
db.session.commit()
return redirect(url_for('main.index'))
return render_template('subscribe.html',form=form)
@main.route('/delete/<int:id>',methods = ['GET','POST'])
def delete(id):
blogs =Blog.query.filter_by(id=id).first()
db.session.delete(blogs)
db.session.commit()
return redirect(url_for('.index'))
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
|
from bs4 import BeautifulSoup
from checklib import *
import secrets
import requests
PORT = 5000
class CheckMachine:
@property
def url(self):
return f'http://{self.host}:{self.port}'
def __init__(self, host, port):
self.host = host
self.port = port
def register_in_service(self):
register_url = f'{self.url}/register'
login = secrets.choice(('Sunlover', 'Pomo', 'Johnny', 'alagunto', 'Kekov'))
login = login + '_' + rnd_username()
password = rnd_password()
data = {
'login': login,
'password': password
}
r = requests.post(url=register_url, data=data, allow_redirects=False)
assert_eq(r.status_code, 302, "Can't register in service")
return data
def login_in_service(self, login, password):
login_url = f'{self.url}/login'
session = requests.Session()
r = session.get(url=login_url, allow_redirects=True)
assert_eq(r.status_code, 200, "Can't login in service")
login_data = {
'login': login,
'password': password
}
r = session.post(url=login_url, data=login_data, allow_redirects=True)
assert_eq(r.status_code, 200, "Can't login in service")
return session
def create_threads_and_get_links(self, session):
thread_name = 'Thread_' + rnd_string(10)
create_url = f'{self.url}/create'
r = session.post(url=create_url, data={ 'name': thread_name })
assert_eq(r.status_code, 200, "Can't create thread")
html = r.text
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('a')
result_links = []
for link in links:
if 'threads' in link['href']:
result_links.append(link['href'])
return result_links
def put_flags_and_get_inv_links(self, session, links, flag):
for link in links:
thread_url = self.url + link
r = session.post(thread_url, data={ 'text': flag })
assert_eq(r.status_code, 200, "Can't add message to thread")
soup = BeautifulSoup(r.text, 'html.parser')
invite = soup.find_all('a')[-1]['href']
return invite
cquit(Status.MUMBLE, "Can't get invite link")
def find_by_thread_link(self, session, link):
thread_url = self.url + link
r = session.get(thread_url)
assert_eq(r.status_code, 200, "Can't find thread page")
soup = BeautifulSoup(r.text, 'html.parser')
flags = soup.find_all('h4')[:-1]
flags = [x.string for x in flags]
flags = [x.split()[-1] for x in flags]
return flags
def find_flags_by_login_and_pass(self, session):
list_url = f'{self.url}/list'
r = session.get(list_url)
assert_eq(r.status_code, 200, "Can't get list of threads")
soup = BeautifulSoup(r.text, 'html.parser')
links = soup.find_all('a')
thread = links[-1]['href']
return self.find_by_thread_link(session, thread)
def find_threads_by_invite(self, session, invite):
inv_url = self.url + invite
r = session.get(inv_url)
assert_eq(r.status_code, 200, "Can't get thread by invite link")
r = session.get(f'{self.url}/list')
soup = BeautifulSoup(r.text, 'html.parser')
thread_link = soup.find_all('a')[-1]['href']
return thread_link
def file_upload(self, session, file_content):
file_name = rnd_string(15)
files = { 'file': (file_name, file_content) }
upload_url = f'{self.url}/upload'
r = session.post(upload_url, files=files, allow_redirects=False)
assert_eq(r.status_code, 302, "Can't upload file")
return file_name
def file_download(self, session, file_name):
r = session.get(f'{self.url}/uploads/{file_name}')
assert_eq(r.status_code, 200, "Can't download file")
return r.text
|
import pytest
from app import ic
from app.auth import Group, UserMod
from app.tests.auth_test import VERIFIED_EMAIL_DEMO
@pytest.mark.fixtures
@pytest.mark.skip
def test_fixtures(loop, client, passwd):
async def group_count():
return await Group.all().count()
async def get_user():
return await UserMod.get_or_none(email=VERIFIED_EMAIL_DEMO).only('id')
count = loop.run_until_complete(group_count())
if not count:
# Init
res = client.get('/fixtures/init')
data = res.json()
assert data
ic('SUCCESS: Groups and Permissions')
usermod = loop.run_until_complete(get_user())
if not usermod:
# Users
res = client.get('/fixtures/users')
data = res.json()
assert isinstance(data, dict)
ic('SUCCESS: Users data created')
userid = data.get("id")
ic(userid)
# Options
res = client.get('/fixtures/options')
data = res.json()
assert data
ic('SUCCESS: Options data created')
# Login
d = dict(username=VERIFIED_EMAIL_DEMO, password=passwd)
res = client.post('/authentication/login', data=d)
data = res.json()
ic(f'SUCCESS: Login completed.')
access_token = data.get('access_token')
ic(access_token)
|
"""
flatlanddb.py - Loads the existing flatland database
"""
import sys
import logging
import logging.config
from pathlib import Path
from sqlalchemy import create_engine, MetaData
from sqlalchemy import event
from sqlalchemy.engine import Engine
from sqlite3 import Connection as SQLite3Connection
@event.listens_for(Engine, "connect")
def _set_sqlite_pragma(dbapi_connection, connection_record):
if isinstance(dbapi_connection, SQLite3Connection):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
def Create_relvars():
"""
A relvar is a relational variable as defined by C.J. Date and Hugh Darwin.
In the world of SQL it is effectively a table. Here we define all the relvars
and then have the corresponding table schemas populated into the Sqlalchemy
metadata.
"""
from flatland.database import relvars
FlatlandDB.Relvars = relvars.define(FlatlandDB)
FlatlandDB.MetaData.create_all(FlatlandDB.Engine)
def Populate():
"""
Assign a value to each Flatland relvar (table). A value consists of a set of relations.
In Sqlalchemy terms, the tables are all updated with initial row data.
"""
# We need to append each population subirectory to our module search path
# because when we iterate through the file names in our relvar dictionary
# we don't know which file is in which subdirectory. So we can't just
# refer to each population module by the same path
here = Path(__file__).parent / "population" # Adjacent population directory
pop_dirs = [ # Subdirectories organizing all population modules
here / "connector", here / "decorator", here / "drawing", here / "node",
]
# Convert each Path object to a string and tack it on the end of our module search path
sys.path.extend([str(p) for p in pop_dirs])
# Iterate through the relvar dictionary to get each population and the table it goes into
for instances, relvar in FlatlandDB.Relvars.items():
# Set i to the initial population of row values (set of relation values)
i = __import__(instances + '_instances') # Each population filename ends with '_instances.py'
FlatlandDB.Connection.execute(relvar.insert(), i.population) # Sqlalchemy populates the table schema
class FlatlandDB:
"""
Flatland database containing all predefined Flatland data. We want to avoid having any predefined
data declared in the code itself.
Here we use Sqlalchemy to create the database engine and connection
Attributes
- File -- Local directory location of the sqlite3 database file
- Metadata -- Sqlalchemy metadata
- Connection -- Sqlalchemy database connection
- Engine -- Sqlalchemy database engine
- Relvars -- Dictionary of all relvar names and values (table names and row populations)
"""
File = Path(__file__).parent / "flatland.db"
LogFile = Path(__file__).parent / "db.log"
MetaData = None
Connection = None
Engine = None
Relvars = None
def __init__(self, rebuild: bool):
"""
Create the sqlite3 database using Sqlalchemy
:param rebuild: During development this will usually be true. For deployment it should be false.
"""
self.logger = logging.getLogger(__name__)
self.rebuild = rebuild
if self.rebuild: # DB rebuild requested
# Start with a fresh database
if FlatlandDB.File.exists():
FlatlandDB.File.unlink()
else: # No rebuild requested
if FlatlandDB.File.exists():
self.logger.info("Using existing database")
else: # We're going to have to rebuild it anyway
self.rebuild = True
self.logger.info("No db file, rebuilding flatland database")
db_path_str = str( FlatlandDB.File )
# Configure sql logger
db_file_handler = logging.FileHandler(FlatlandDB.LogFile, 'w')
# db_file_handler.setLevel(logging.DEBUG)
dblogger = logging.getLogger('sqlalchemy.engine')
dblogger.setLevel(logging.DEBUG)
dblogger.addHandler(db_file_handler)
dblogger.propagate = False # To keep sql events from bleeding into the flatland log
FlatlandDB.Engine = create_engine(f'sqlite:///{db_path_str}', echo=False)
FlatlandDB.Connection = FlatlandDB.Engine.connect()
FlatlandDB.MetaData = MetaData(FlatlandDB.Engine)
if self.rebuild:
self.logger.info(f"Re-creating database file at: {db_path_str}")
Create_relvars()
Populate()
else:
# Just interrogate the existing database to get all the relvar/table names
FlatlandDB.MetaData.reflect()
if __name__ == "__main__":
# Rebuild the database
FlatlandDB()
|
# Importing 3-rd party modules:
import requests
from bs4 import BeautifulSoup
import pandas as pd
# Importing base web objects:
from base_objects import BaseWebPageResponse, BaseWebPageIngestionEngine
class EDGARResultsPageResponse(BaseWebPageResponse):
"""
This is a class that inherits from the BaseWebPageResponse object and represents
the results of the SEC's EDGAR response page.
It contains all of the internal methods for extracting relevant data from
the EDGAR Search Results page of a specific ticker url. The url used to
initialize the object can be input directly or built using an external
CIK# to url API. The Attributes listed below are the additional attributes
that are added to the Base class BaseWebPageResponse.
Attributes:
_addr_business (str): A string representing the Business Address of the
company extracted from the BeautifulSoup object via the
__extract_address() method.
_addr_mail (str): A string representing the Mailing Address of the
company extracted from the BeautifulSoup object via the
__extract_address() method.
_reports_tbl (pandas dataframe): A dataframe containing all the contents
that were extracted from the main search results table on the EDGAR
company reports page. The dataframe contains the following columns:
---------------------------------------------------------------------------------------------------------
|filing|filing_description|filing_date|file_id|report_contents_html|report_contents_txt|report_data_href|
|------------------------------------------------------------------|-------------------|----------------|
| str | str | str | str |BeautifulSoup Object| str | str |
---------------------------------------------------------------------------------------------------------
"""
def __init__(self, url, **kwargs):
# Re-deffiing url and kwargs to facilitate pass through to parent object:
self.kwargs = kwargs
self._url = url
# Initalizing the base method:
super().__init__(url, **kwargs)
# Declaring instance variables specific to EDGAR HTML page:
# Company Header Information:
self._addr_mail = self.__extract_address()['mailing'] or 'NaN'
self._addr_business = self.__extract_address()['business'] or 'NaN'
# Company Filing Table Information:
self._reports_tbl = self.__extract_company_report_data() # Pandas dataframe
def __extract_address(self):
'''
The internal method that parses the main BeautifulSoup object for the
<div> tags containing the strings of the company's address information.
The EDGAR result page contains two addresses (Business and Mailing Address).
This method extracts both Business and Mailing address div tags. It
concats each of these addresses into a len(2) dictionary as:
{'mailin_address': mailing_address, 'business_address':business_address}.
Returns:
dict: The two length dict containing both the business and mailing address.
'''
# Searching the main soup for the tag <div class='mailer'>:
mailer_div_tags = self._html_body.find_all('div', class_='mailer')
# Iterating through each of the <div class='mailer'> and concating string
# via list comprehension: [Mailing Address, Business Address]
concat_addr_str = [
' '.join(mailer_div_tag.text.split()) for mailer_div_tag in mailer_div_tags]
# Returning the dict {'mailing', 'business'}:
return {
# Formatting address string to remove 'title markers':
'mailing': concat_addr_str[0].replace('Mailing Address', ''),
'business': concat_addr_str[1].replace('Business Address', '')}
def __extract_company_report_data(self):
'''
Method extracts and pre-processes all the data associated with the
table of reports filed by the company on the EDGAR web page.
In addition to extracting the textual data from basic rows: 'Filings',
'Filing Date' etc the method also navigates to the pages containing the
associated full document in HTML format as well as the summary information
provided in the 'Interactive Data' page stored in .csv format. Both the
document and the .csv are stored within a pandas dataframe alongside the
basic textual data.
Returns:
pandas dataframe: The dataframe containing all relevant information
extracted from the table of reports filed on the EDGAR results page.
'''
# Extracting the table from the main webpage soup:
html_table = self._html_body.find('table', class_='tableFile2')
# Extracting a list of table row objects <tr> from the table:
tbl_row_lst = html_table.find_all('tr')
# First <tr> in tbl_row_lst are headers. Using headers to authenticate
# rest of table:
tbl_headers = [
# Creating a list of the text of each table row:
tbl_row.text for tbl_row in tbl_row_lst.pop(0).find_all('th')]
# Manually comparing the tbl_headers for table validation:
if tbl_headers == ['Filings', 'Format', 'Description', 'Filing Date', 'File/Film Number']:
# TODO: If Logging- Log the headers validation passed.
# Creating list of be populated by each row:
row_lst = []
# Building a list of lists for each row in the table [[row_1],... [row_n]]
# where each row = [cell_1, cell_2, cell_3, cell_4, str(cell_4), cell_5]:
for table_row in tbl_row_lst:
# Extracing all cells for each row:
table_cells = table_row.find_all('td')
# Formatting 'Description' Cell:
description = " ".join(table_cells[2].text.split())
# Formatting Filing Data cell:
filing_date = table_cells[3].text
# Formatting File Number:
file_num = table_cells[4].text or 'NaN'
# Extracting the data from the 'Format' cell:
format_doc = self.__extract_report_html(
table_cells[1].find('a', id='documentsbutton')['href']) or 'NaN'
# Attempting to extract only text from format_doc bs4 object:
format_doc_txt = format_doc.get_text('/n') or 'NaN'
# try-catch to get around ['href'] breaking 'or NaN' convention:
try:
format_data_interactive = self.__extract_report_csv(
table_cells[1].find('a', id='interactiveDataBtn')['href'])
except:
format_data_interactive = 'NaN'
# Creating internal list of cells from table_cells list of len(5):
row = [
table_cells[0].text, # 'Filings' cell needs no Formatting
description, # The description of the Report
filing_date, # The date the report was filed
file_num, # The SEC internal file number for the report
format_doc, # The full HTML contents of the report.
format_doc_txt, # The textual content of the report.
format_data_interactive # The link to the tabular data of the report
]
# Adding row to the list of rows:
row_lst.append(row)
# Converting the list of rows into a pandas dataframe:
report_df = pd.DataFrame(row_lst, columns=[
'filing', 'filing_description', 'filing_date', 'file_id',
'report_contents_html', 'report_contents_txt', 'report_data_href'])
return report_df
else:
raise AssertionError('Table Headers Do Not Match- EDGAR Reports table format may have changed')
def __extract_report_html(self, report_href):
'''
A method that takes the href to the full report document extracted by the
__extract_company_report_data() method and extracts the full HTML file of
the report.
The href parameter routes to the Document Format Files page. The method
navigates to the href on said page that leads to the report, displayed in
the SEC's Inline XBRL Viewer. It manipulates the href into a direct link
to the html version of the report. This html page is extracted and returned
as a bs4 object.
Args:
report_href (str): The href extracted from a previous bs4 object in
string format. This method operates under the assumption that the
href is extracted from the main EDGAR results page and leads to
the SEC's Inline XBRL Viewer.
Returns:
BeautifulSoup obj: A bs4 object containing all the html contents of the
report.
'''
# Appending href onto core url to make funcional url:
doc_selector_url = 'https://www.sec.gov' + report_href
# Sending GET request to new webpage and converting contents to bs4 object:
docs_page = BeautifulSoup(requests.get(doc_selector_url).content, 'html.parser')
# Extracting the <table summary = 'Document Format Files'> from the page:
doc_format_table = docs_page.find('table', summary='Document Format Files')
# Extracting the .htm href from the Document format table. Row=2, Col=3:
table_rows = doc_format_table.find_all('tr')
# Converting table header to list of strings for validation:
table_header = [header.text for header in table_rows[0].find_all('th')]
# Using the headers of the tables: table_rows[0] for validation:
if table_header == ['Seq', 'Description', 'Document', 'Type', 'Size']:
# Extracting the href from the second cell of Row 2 of the table:
document_url = 'https://www.sec.gov' + table_rows[1].find('a')['href']
# Dropping the '/ix?doc=' from the href if it is there so that only HTML
# content is returned:
if '/ix?doc=' in document_url:
document_url = document_url.replace('/ix?doc=', '')
# Performing a GET request for the full report in HTML:
report_response = requests.get(document_url)
# returning the bs4 object of the HTTP response's content:
return BeautifulSoup(report_response.content, 'html.parser')
else:
raise AssertionError('The Table Header for Document Format Files Failed. The Layout May have changed')
def __extract_report_csv(self, report_csv_href):
'''
A method that extracts a .csv of all summary data from the report href.
The method takes the an href to the report's 'Interactive Data' page. It
extracts the href to the .xlsx file containing the tabular data from the
main report (previously extracted via the __extract_report_html() method).
It returns the full url for the .xlsx file.
Args:
report_csv_href (str): The href to the 'Filings Data' page. It is
assumed that hrefs passed into this method are extracted from the
main company reports page's 'Interactive Data' button.
Returns:
str: The full url of the .xlsx file.
'''
# Building a full url to the 'filing Data' page:
filing_data_url = 'https://www.sec.gov' + report_csv_href
# Sending GET request to the page and converting contents to bs4 object:
filing_data_page = BeautifulSoup(requests.get(filing_data_url).content,
'html.parser')
# Parsing the filing data page for the .xlsx download href:
# Assumes only two <a class='xbrlviewer'> on page:
xlsx_href = filing_data_page.find_all('a', class_='xbrlviewer')[1]['href']
# Creating and returning the download url for the .xlsx file:
return 'https://www.sec.gov' + xlsx_href
# Test:
# test = EDGARResultsPageResponse('https://www.sec.gov/cgi-bin/browse-edgar', params={'CIK':'0000320193'})
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-20 05:40
from __future__ import unicode_literals
import books.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0008_auto_20170420_0531'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='photo',
field=models.ImageField(blank=True, upload_to=books.models.getProfilePath),
),
]
|
import copy
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
import seaborn as sns
from sklearn.metrics import r2_score
import torch
from tqdm import tqdm
from behavenet import get_user_dir
from behavenet import make_dir_if_not_exists
from behavenet.fitting.utils import experiment_exists
from behavenet.fitting.utils import get_best_model_and_data
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_lab_example
from behavenet.fitting.utils import get_session_dir
from behavenet.fitting.cond_ae_utils import apply_masks, collect_data
def get_predicted_labels(lrs, latents):
y_pred = []
for lr in lrs:
y_pred.append(lr.predict(latents)[:, None])
return np.hstack(y_pred)
def fit_regression(model, data_generator, label_names, dtype='val', fit_full=False):
"""Fit regression model from latent space to markers."""
from sklearn.linear_model import RidgeCV as Ridge
n_labels = len(label_names)
print('collecting training labels and latents')
ys_tr, zs_tr, masks_tr, trials_tr, sessions_tr = collect_data(
data_generator, model, dtype='train', fit_full=fit_full)
print('done')
print('collecting %s labels and latents' % dtype)
ys, zs, masks, trials, sessions = collect_data(
data_generator, model, dtype=dtype, fit_full=fit_full)
print('done')
print('fitting linear regression model with training data')
ys_mat = np.concatenate(ys_tr, axis=0)
zs_mat = np.concatenate(zs_tr, axis=0)
masks_mat = np.concatenate(masks_tr, axis=0)
lrs = []
for i in range(n_labels):
print('label %i/%i' % (i + 1, n_labels))
lrs.append(Ridge(alphas=(0.01, 0.1, 1, 10, 100, 1000, 10000, 100000), cv=5).fit(
apply_masks(zs_mat, masks_mat[:, i]), apply_masks(ys_mat[:, i], masks_mat[:, i])))
print('done')
# y_baseline = np.mean(ys_mat, axis=0)
y_baseline = np.array(
[np.mean(apply_masks(ys_mat[:, i], masks_mat[:, i]), axis=0) for i in range(n_labels)])
print('computing r2 on %s data' % dtype)
metrics_df = []
for i_test in tqdm(range(data_generator.n_tot_batches[dtype])):
for i in range(n_labels):
y_true = apply_masks(ys[i_test][:, i], masks[i_test][:, i])
if len(y_true) > 10:
y_pred = lrs[i].predict(apply_masks(zs[i_test], masks[i_test][:, i]))
r2 = r2_score(y_true, y_pred, multioutput='variance_weighted')
mse = np.mean(np.square(y_true - y_pred))
else:
r2 = np.nan
mse = np.nan
metrics_df.append(pd.DataFrame({
'Trial': trials[i_test],
'Session': sessions[i_test][0],
'Label': label_names[i],
'R2': r2,
'MSE': mse,
'Model': model.hparams['model_class']}, index=[0]))
mse_base = np.mean(np.square(y_true - y_baseline[i]))
metrics_df.append(pd.DataFrame({
'Trial': trials[i_test],
'Session': sessions[i_test][0],
'Label': label_names[i],
'R2': 0,
'MSE': mse_base,
'Model': 'baseline'}, index=[0]))
print('done')
return pd.concat(metrics_df, sort=True), lrs
def compute_r2(
type, hparams, model, data_generator, version, label_names, dtype='val', overwrite=False,
save_results=True):
"""
Parameters
----------
type
'supervised' | 'unsupervised' | 'full'
hparams
model
data_generator
version
label_names
dtype
overwrite
save_results
Returns
-------
"""
n_labels = len(label_names)
save_file = os.path.join(hparams['expt_dir'], 'version_%i' % version, 'r2_%s.csv' % type)
if type == 'unsupervised':
model_file = os.path.join(os.path.dirname(save_file), 'regressions.pkl')
elif type == 'full':
model_file = os.path.join(os.path.dirname(save_file), 'regressions_full.pkl')
else:
model_file = None
if not os.path.exists(save_file) or overwrite:
if not os.path.exists(save_file):
print('R^2 metrics do not exist; computing from scratch')
else:
print('overwriting metrics at %s' % save_file)
if type == 'supervised':
metrics_df = []
lrs = None
data_generator.reset_iterators(dtype)
for i_test in tqdm(range(data_generator.n_tot_batches[dtype])):
# get next minibatch and put it on the device
data, sess = data_generator.next_batch(dtype)
x = data['images'][0]
y = data['labels'][0].cpu().detach().numpy()
if 'labels_masks' in data:
n = data['labels_masks'][0].cpu().detach().numpy()
else:
n = np.ones_like(y)
z = model.get_transformed_latents(x, dataset=sess)
for i in range(n_labels):
y_true = apply_masks(y[:, i], n[:, i])
y_pred = apply_masks(z[:, i], n[:, i])
if len(y_true) > 10:
r2 = r2_score(y_true, y_pred, multioutput='variance_weighted')
mse = np.mean(np.square(y_true - y_pred))
else:
r2 = np.nan
mse = np.nan
metrics_df.append(pd.DataFrame({
'Trial': data['batch_idx'].item(),
'Session': sess,
'Label': label_names[i],
'R2': r2,
'MSE': mse,
'Model': model.hparams['model_class']}, index=[0]))
metrics_df = pd.concat(metrics_df)
elif type == 'unsupervised':
metrics_df, lrs = fit_regression(
model, data_generator, label_names, dtype=dtype)
elif type == 'full':
metrics_df, lrs = fit_regression(
model, data_generator, label_names, dtype=dtype, fit_full=True)
else:
raise NotImplementedError
print('done')
if save_results:
print('saving results to %s' % save_file)
metrics_df.to_csv(save_file, index=False, header=True)
if type == 'unsupervised' or type == 'full':
print('saving models to %s' % model_file)
with open(model_file, 'wb') as f:
pickle.dump(lrs, f)
else:
print('loading results from %s' % save_file)
metrics_df = pd.read_csv(save_file)
if model_file is not None:
print('loading regression models from %s' % model_file)
with open(model_file, 'rb') as f:
lrs = pickle.load(f)
else:
lrs = None
return metrics_df, lrs
def plot_reconstruction_traces(
traces, names, save_file=None, xtick_locs=None, frame_rate=None, format='png',
scale=0.5, max_traces=8, add_r2=False, add_legend=True):
"""Plot latents and their neural reconstructions.
Parameters
----------
traces : :obj:`list`
each entry is of shape (n_frames, n_dims)
save_file : :obj:`str`, optional
full save file (path and filename)
xtick_locs : :obj:`array-like`, optional
tick locations in units of bins
frame_rate : :obj:`float`, optional
frame rate of behavorial video; to properly relabel xticks
format : :obj:`str`, optional
any accepted matplotlib save format, e.g. 'png' | 'pdf' | 'jpeg'
scale : :obj:`int`, optional
scale magnitude of traces
max_traces : :obj:`int`, optional
maximum number of traces to plot, for easier visualization
add_r2 : :obj:`bool`, optional
print R2 value on plot
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle
"""
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import seaborn as sns
sns.set_style('white')
sns.set_context('poster')
assert len(traces) == len(names)
means = np.nanmean(traces[0], axis=0)
stds = np.nanstd(traces[0]) / scale # scale for better visualization
for m, mean in enumerate(means):
if np.isnan(mean):
means[m] = np.nanmean(traces[1][:, m])
traces_sc = []
for trace in traces:
traces_sc.append((trace - means) / stds)
fig = plt.figure(figsize=(12, 8)) # (12, 6) for ps-vae paper
colors = plt.rcParams['axes.prop_cycle'].by_key()['color'][:(len(traces) - 1)]
colors.insert(0, '#000000')
linewidths = [2] * len(colors)
linewidths[0] = 4
for trace_sc, color, linewidth in zip(traces_sc, colors, linewidths):
plt.plot(trace_sc + np.arange(trace_sc.shape[1]), linewidth=linewidth, color=color)
# add legend
if add_legend:
lines = []
for color in colors:
lines.append(mlines.Line2D([], [], color=color, linewidth=3, alpha=0.7))
plt.legend(
lines, names, loc='lower right', frameon=True, framealpha=0.7, edgecolor=[1, 1, 1])
# if add_r2:
# from sklearn.metrics import r2_score
# r2 = r2_score(traces_ae, traces_neural, multioutput='variance_weighted')
# plt.text(
# 0.05, 0.06, '$R^2$=%1.3f' % r2, horizontalalignment='left', verticalalignment='bottom',
# transform=plt.gca().transAxes,
# bbox=dict(facecolor='white', alpha=0.7, edgecolor=[1, 1, 1]))
if xtick_locs is not None and frame_rate is not None:
if xtick_locs[0] / frame_rate < 1:
plt.xticks(xtick_locs, (np.asarray(xtick_locs) / frame_rate))
else:
plt.xticks(xtick_locs, (np.asarray(xtick_locs) / frame_rate).astype('int'))
plt.xlabel('Time (s)')
else:
plt.xlabel('Time (bins)')
plt.ylabel('Latent state')
plt.yticks([])
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, format=format)
plt.show()
return fig
def plot_label_latent_regressions(
lab, expt, animal, session, alpha, beta, gamma, n_ae_latents, rng_seed_model,
label_names, sssvae_experiment_name, vae_experiment_name, delta=None,
models=['sss-vae-s', 'vae'],
dtype='test', measure='r2', save_results=True, overwrite=False,
save_file=None, format='pdf', **kwargs):
# ------------------------------------------
# perform regressions
# ------------------------------------------
metrics_df = {}
for model in models:
print()
print(model)
# get model
if model == 'vae':
hparams = _get_psvae_hparams(
model_class='ps-vae', alpha=alpha, beta=beta, gamma=gamma,
n_ae_latents=n_ae_latents,
experiment_name=sssvae_experiment_name, rng_seed_model=rng_seed_model, **kwargs)
hparams_vae = copy.deepcopy(hparams)
hparams_vae['model_class'] = 'vae'
hparams_vae['n_ae_latents'] = \
n_ae_latents + len(label_names) + hparams.get('n_background', 0)
hparams_vae['experiment_name'] = vae_experiment_name
hparams_vae['vae.beta'] = 1
hparams_vae['vae.beta_anneal_epochs'] = 100
# programmatically fill out other hparams options
get_lab_example(hparams_vae, lab, expt)
if 'sessions_csv' not in hparams_vae:
hparams_vae['animal'] = animal
hparams_vae['session'] = session
hparams_vae['session_dir'], sess_ids = get_session_dir(hparams_vae)
hparams_vae['expt_dir'] = get_expt_dir(hparams_vae)
version = 0
hparams_vae['n_sessions_per_batch'] = 1
# use data_gen from another model so labels are loaded
model_vae, _ = get_best_model_and_data(hparams_vae, load_data=False, version=version)
model_vae.eval()
elif model[:3] == 'sss':
from behavenet.plotting.cond_ae_utils import _get_sssvae_hparams
hparams = _get_sssvae_hparams(
model_class='sss-vae', alpha=alpha, beta=beta, gamma=gamma,
n_ae_latents=n_ae_latents,
experiment_name=sssvae_experiment_name, rng_seed_model=rng_seed_model, **kwargs)
hparams['n_ae_latents'] += len(label_names) + hparams.get('n_background', 0)
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
if 'sessions_csv' not in hparams:
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
hparams['n_sessions_per_batch'] = 1
model_ae, data_gen = get_best_model_and_data(hparams, load_data=True, version=version)
model_ae.eval()
elif model[:2] == 'ps':
from behavenet.plotting.cond_ae_utils import _get_psvae_hparams
rng_seed_model = 0
hparams = _get_psvae_hparams(
model_class='ps-vae', alpha=alpha, beta=beta, gamma=gamma,
n_ae_latents=n_ae_latents,
experiment_name=sssvae_experiment_name, rng_seed_model=rng_seed_model, **kwargs)
hparams['n_ae_latents'] += len(label_names) + hparams.get('n_background', 0)
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
if 'sessions_csv' not in hparams:
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
hparams['n_sessions_per_batch'] = 1
model_ae, data_gen = get_best_model_and_data(hparams, load_data=True, version=version)
model_ae.eval()
elif model[:4] == 'msps':
from behavenet.plotting.cond_ae_utils import _get_psvae_hparams
hparams = _get_psvae_hparams(
model_class='msps-vae', alpha=alpha, beta=beta, delta=delta,
n_ae_latents=n_ae_latents,
experiment_name=sssvae_experiment_name, rng_seed_model=rng_seed_model, **kwargs)
hparams['n_ae_latents'] += len(label_names) + hparams['n_background']
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
# hparams['animal'] = animal
# hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
hparams['n_sessions_per_batch'] = 1
model_ae, data_gen = get_best_model_and_data(hparams, load_data=True, version=version)
model_ae.eval()
else:
raise Exception
if model == 'vae':
m, lrs_vae = compute_r2(
'unsupervised', hparams_vae, model_vae, data_gen, version, label_names,
dtype=dtype, overwrite=overwrite, save_results=save_results)
metrics_df[model] = m
elif model == 'sss-vae-u' or model == 'ps-vae-u' or model == 'msps-vae-u':
m, lrs_sss = compute_r2(
'unsupervised', hparams, model_ae, data_gen, version, label_names,
dtype=dtype, overwrite=overwrite, save_results=save_results)
metrics_df[model] = m
elif model == 'sss-vae-s' or model == 'ps-vae-s' or model == 'msps-vae-s':
metrics_df[model], _ = compute_r2(
'supervised', hparams, model_ae, data_gen, version, label_names,
dtype=dtype, overwrite=overwrite, save_results=save_results)
elif model == 'sss-vae' or model == 'ps-vae' or model == 'msps-vae':
metrics_df[model], lrs_sssf = compute_r2(
'full', hparams, model_ae, data_gen, version, label_names,
dtype=dtype, overwrite=overwrite, save_results=save_results)
else:
raise Exception
# ------------------------------------------
# collect results
# ------------------------------------------
trials = metrics_df[models[0]].Trial.unique()
m0 = 'vae' # models[0] if (models[0] != 'sss-vae-s' or models[0] != 'ps-vae-s') else models[1]
# make new dataframe that combines two outputs
metrics_dfs = []
for l in label_names:
for j in trials:
for model in models:
if model == 'sss-vae-u':
model_ = 'sss-vae (unsuper. subspace)'
elif model == 'sss-vae-s':
model_ = 'sss-vae (super. subspace)'
elif model == 'ps-vae-u':
model_ = 'ps-vae (unsuper. subspace)'
elif model == 'ps-vae-s':
model_ = 'ps-vae (super. subspace)'
elif model == 'msps-vae-u':
model_ = 'msps-vae (unsuper. subspace)'
elif model == 'msps-vae-s':
model_ = 'msps-vae (super. subspace)'
else:
model_ = model
df = metrics_df[model][
(metrics_df[model].Trial == j)
& (metrics_df[model].Label == l)
& ~(metrics_df[model].Model == 'baseline')]
sessions = df.Session.unique()
if len(sessions) > 1:
for session in sessions:
mse = df[df.Session == session].MSE.values[0]
r2 = df[df.Session == session].R2.values[0]
metrics_dfs.append(pd.DataFrame({
'Trial': j,
'Session': int(session),
'Label': l,
'R2': r2,
'MSE': mse,
'Model': model_}, index=[0]))
else:
mse = df.MSE.values[0]
r2 = df.R2.values[0]
metrics_dfs.append(pd.DataFrame({
'Trial': j,
'Label': l,
'R2': r2,
'MSE': mse,
'Model': model_}, index=[0]))
if model_ == 'vae':
# construct baseline once
df = metrics_df[m0][
(metrics_df[m0].Trial == j)
& (metrics_df[m0].Label == l)
& (metrics_df[m0].Model == 'baseline')]
if len(sessions) > 1:
for session in sessions:
mse = df[df.Session == session].MSE.values[0]
r2 = df[df.Session == session].R2.values[0]
metrics_dfs.append(pd.DataFrame({
'Trial': j,
'Session': int(session),
'Label': l,
'R2': r2,
'MSE': mse,
'Model': 'baseline'}, index=[0]))
else:
mse = df.MSE.values[0]
r2 = df.R2.values[0]
metrics_dfs.append(pd.DataFrame({
'Trial': j,
'Label': l,
'R2': r2,
'MSE': mse,
'Model': 'baseline'}, index=[0]))
metrics_dfs = pd.concat(metrics_dfs)
# ------------------------------------------
# plot data
# ------------------------------------------
sns.set_style('white')
sns.set_context('talk')
if measure == 'r2':
data_queried = metrics_dfs[(metrics_dfs.R2 > 0)]
splt = sns.catplot(x='Label', y='R2', hue='Model', data=data_queried, kind='bar')
splt.ax.set_ylabel('$R^2$')
splt.ax.set_xlabel('Label')
else:
data_queried = metrics_dfs
splt = sns.catplot(x='Label', y='MSE', hue='Model', data=data_queried, kind='bar')
splt.ax.set_xlabel('Label')
splt.ax.set_ylabel('MSE')
splt.ax.set_yscale('log')
splt.set_xticklabels(rotation=45, horizontalalignment='right')
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, format=format)
return metrics_dfs
def plot_reconstruction_traces_wrapper(
lab, expt, animal, session, alpha, beta, gamma, n_ae_latents, rng_seed_model,
label_names, sssvae_experiment_name, vae_experiment_name, trials,
models=['sss-vae-s', 'vae'], xtick_locs=None, frame_rate=None, scale=0.5, add_legend=True,
save_file=None, format='pdf', **kwargs):
if any([m.find('sss') > -1 for m in models]):
from behavenet.plotting.cond_ae_utils import _get_sssvae_hparams
hparams = _get_sssvae_hparams(
model_class='sss-vae', alpha=alpha, beta=beta, gamma=gamma, n_ae_latents=n_ae_latents,
experiment_name=sssvae_experiment_name, rng_seed_model=rng_seed_model, **kwargs)
else:
from behavenet.plotting.cond_ae_utils import _get_psvae_hparams
hparams = _get_psvae_hparams(
model_class='ps-vae', alpha = alpha, beta=beta, gamma=gamma, n_ae_latents=n_ae_latents,
experiment_name=sssvae_experiment_name, rng_seed_model=rng_seed_model, **kwargs)
hparams['n_ae_latents'] += len(label_names)
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version_sss = experiment_exists(hparams, which_version=True)
model_sss, data_gen = get_best_model_and_data(
hparams, Model=None, load_data=True, version=version_sss)
model_sss.eval()
for model in models:
print()
print(model)
if model == 'vae':
hparams_vae = copy.deepcopy(hparams)
hparams_vae['model_class'] = 'vae'
hparams_vae['n_ae_latents'] = n_ae_latents + len(label_names)
hparams_vae['experiment_name'] = vae_experiment_name
hparams_vae['vae.beta'] = 1
hparams_vae['vae.beta_anneal_epochs'] = 100
hparams_vae['expt_dir'] = get_expt_dir(hparams_vae)
version = 0
model_vae, _ = get_best_model_and_data(
hparams_vae, Model=None, load_data=False, version=version)
model_vae.eval()
_, lrs_vae = compute_r2(
'unsupervised', hparams_vae, model_vae, data_gen, version, label_names)
elif model == 'sss-vae-u' or model == 'ps-vae-u':
_, lrs_sss = compute_r2(
'unsupervised', hparams, model_sss, data_gen, version_sss, label_names)
elif model == 'sss-vae-s' or model == 'ps-vae-s':
# use sss-vae model instead of post-hoc regression model
pass
elif model == 'sss-vae' or model == 'ps-vae':
_, lrs_sssf = compute_r2(
'full', hparams, model_sss, data_gen, version_sss, label_names)
else:
raise Exception
# loop over trials to plot
for trial in trials:
batch = data_gen.datasets[0][trial]
labels_og = batch['labels'].detach().cpu().numpy() # [:, 2:]
labels_pred_sss_vae = model_sss.get_predicted_labels(
batch['images'].to(hparams['device'])).detach().cpu().numpy()
if 'labels_masks' in batch:
labels_masks = batch['labels_masks'].detach().cpu().numpy()
labels_og[labels_masks == 0] = np.nan
if save_file is not None:
save_file_trial = save_file + '_trial-%i' % trial
else:
save_file_trial = None
if 'vae' in models and 'sss-vae-u' in models and 'sss-vae-s' in models:
# vae
if hparams_vae['model_class'] == 'ae':
latents, _, _ = model_vae.encoding(batch['images'].to(hparams['device']))
else:
latents, _, _, _ = model_vae.encoding(batch['images'].to(hparams['device']))
labels_pred_vae = get_predicted_labels(lrs_vae, latents.detach().cpu().numpy())
# sss-vae-s
_, latents, _, _, _ = model_sss.encoding(batch['images'].to(hparams['device']))
labels_pred_sss_vae_s = get_predicted_labels(lrs_sss, latents.detach().cpu().numpy())
plot = plot_reconstruction_traces(
[labels_og, labels_pred_vae, labels_pred_sss_vae, labels_pred_sss_vae_s],
['original', 'vae', 'sss-vae (super)', 'sss-vae (unsuper)'],
scale=scale, xtick_locs=xtick_locs, frame_rate=frame_rate, add_legend=add_legend,
save_file=save_file_trial, format=format)
# plot = plot_reconstruction_traces(
# [labels_og, labels_pred_vae, labels_pred_sss_vae],
# ['original', 'vae', 'sss-vae (super)'],
# scale=0.25)
elif 'vae' in models:
if hparams_vae['model_class'] == 'ae':
latents, _, _ = model_vae.encoding(batch['images'].to(hparams['device']))
else:
latents, _, _, _ = model_vae.encoding(batch['images'].to(hparams['device']))
labels_pred_vae = get_predicted_labels(lrs_vae, latents.detach().cpu().numpy())
# plot = plot_reconstruction_traces(
# [labels_og, labels_pred_vae, labels_pred_sss_vae],
# ['original', 'vae', 'sss-vae'],
# scale=0.25)
plot = plot_reconstruction_traces(
[labels_og, labels_pred_sss_vae, labels_pred_vae],
['original', 'ps-vae', 'vae'],
scale=scale, add_legend=add_legend, xtick_locs=xtick_locs, frame_rate=frame_rate,
save_file=save_file_trial, format=format)
elif 'sss-vae' in models or 'ps-vae' in models:
if 'sss-vae' in models:
titles = ['original', 'sss-vae-full', 'sss-vae-s']
else:
titles = ['original', 'ps-vae-full', 'ps-vae-s']
y, w, _, _, _ = model_sss.encoding(batch['images'].to(hparams['device']))
latents = np.hstack([y.detach().cpu().numpy(), w.detach().cpu().numpy()])
labels_pred_full = get_predicted_labels(lrs_sssf, latents)
plot = plot_reconstruction_traces(
[labels_og, labels_pred_full, labels_pred_sss_vae],
titles,
scale=scale, add_legend=add_legend, xtick_locs=xtick_locs, frame_rate=frame_rate,
save_file=save_file_trial, format=format)
else: # compare supervised and unsupervised trace reconstructions
if 'sss-vae-u' in models:
titles = ['original', 'sss-vae-u', 'sss-vae-s']
else:
titles = ['original', 'ps-vae-u', 'ps-vae-s']
_, latents, _, _, _ = model_sss.encoding(batch['images'].to(hparams['device']))
labels_pred = get_predicted_labels(lrs_sss, latents.detach().cpu().numpy())
plot = plot_reconstruction_traces(
[labels_og, labels_pred, labels_pred_sss_vae],
titles,
scale=scale, add_legend=add_legend, xtick_locs=xtick_locs, frame_rate=frame_rate,
save_file=save_file_trial, format=format)
def plot_msps_reconstruction_traces_wrapper(
lab, expt, alpha, beta, delta, n_ae_latents, rng_seed_model,
label_names, mspsvae_experiment_name, vae_experiment_name, trials, sess_idxs,
models=['msps-vae-s', 'vae'], xtick_locs=None, frame_rate=None, scale=0.5, add_legend=True,
save_file=None, format='pdf', **kwargs):
from behavenet.plotting.cond_ae_utils import _get_psvae_hparams
hparams = _get_psvae_hparams(
model_class='msps-vae', alpha=alpha, beta=beta, delta=delta, n_ae_latents=n_ae_latents,
experiment_name=mspsvae_experiment_name, rng_seed_model=rng_seed_model, **kwargs)
hparams['n_ae_latents'] += len(label_names) + hparams['n_background']
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
hparams['n_sessions_per_batch'] = 1
model_msps, data_gen = get_best_model_and_data(hparams, load_data=True, version=version)
model_msps.eval()
for model in models:
print()
print(model)
if model == 'vae':
hparams_vae = copy.deepcopy(hparams)
hparams_vae['model_class'] = 'vae'
hparams_vae['n_ae_latents'] = n_ae_latents + len(label_names) + hparams['n_background']
hparams_vae['experiment_name'] = vae_experiment_name
hparams_vae['vae.beta'] = 1
hparams_vae['vae.beta_anneal_epochs'] = 100
hparams_vae['expt_dir'] = get_expt_dir(hparams_vae)
version = 0
model_vae, _ = get_best_model_and_data(hparams_vae, load_data=False, version=version)
model_vae.eval()
_, lrs_vae = compute_r2(
'unsupervised', hparams_vae, model_vae, data_gen, version, label_names)
elif model == 'msps-vae-u':
_, lrs_msps = compute_r2(
'unsupervised', hparams, model_ae, data_gen, version, label_names)
elif model == 'msps-vae-s':
# use sss-vae model instead of post-hoc regression model
pass
elif model == 'msps-vae':
_, lrs_mspsf = compute_r2(
'full', hparams, model_ae, data_gen, version, label_names)
else:
raise Exception
# loop over trials to plot
for sess_idx in sess_idxs:
if save_file is not None:
save_file_sess = save_file + '_sess-%i' % sess_idx
for trial in trials:
batch = data_gen.datasets[sess_idx][trial]
labels_og = batch['labels'].detach().cpu().numpy() # [:, 2:]
labels_pred_msps_vae = model_msps.get_predicted_labels(
batch['images'].to(hparams['device'])).detach().cpu().numpy()
if 'labels_masks' in batch:
labels_masks = batch['labels_masks'].detach().cpu().numpy()
labels_og[labels_masks == 0] = np.nan
if save_file is not None:
save_file_trial = save_file_sess + '_trial-%i' % trial
else:
save_file_trial = None
if 'vae' in models:
if hparams_vae['model_class'] == 'ae':
latents, _, _ = model_vae.encoding(batch['images'].to(hparams['device']))
else:
latents, _, _, _ = model_vae.encoding(batch['images'].to(hparams['device']))
labels_pred_vae = get_predicted_labels(lrs_vae, latents.detach().cpu().numpy())
plot = plot_reconstruction_traces(
[labels_og, labels_pred_msps_vae, labels_pred_vae],
['original', 'msps-vae', 'vae'],
scale=scale, add_legend=add_legend, xtick_locs=xtick_locs,
frame_rate=frame_rate, save_file=save_file_trial, format=format)
|
import datetime
import dateutil.parser
from django.utils import timezone
from . import models
from . import signals
from . import util
from . import fixed_data
from tatl.models import TatlVerb
def _format_tuple(x):
if hasattr(x, "process_kind"):
return (x.kind, str(x.id), "")
elif hasattr(x, "group_kind"):
if x.group_kind == "Published Artifact Group":
return (x.kind, str(x.id), x.published_name)
else:
return (x.kind, str(x.id), x.dice_name)
else:
return (x.kind, str(x.id), x.dice_name)
def handle_testmetadata(form, user=None, api_o=None, request=None):
artifact = form.cleaned_data.get("artifact")
group = form.cleaned_data.get("group")
process = form.cleaned_data.get("process")
tag = form.cleaned_data.get("tag")
name = form.cleaned_data.get("name")
value = form.cleaned_data.get("value")
timestamp = form.cleaned_data.get("timestamp")
restricted = False
updated=False
if hasattr(fixed_data, "RESTRICTED_METADATA"):
if tag in fixed_data.RESTRICTED_METADATA:
restricted = True
mr, created = models.MajoraMetaRecord.objects.get_or_create(
artifact=artifact,
group=group,
process=process,
meta_tag=tag,
meta_name=name,
value_type="str",
)
if mr.value != value:
updated=True
mr.value=value
mr.timestamp = timestamp
mr.restricted=restricted
mr.save()
return mr, created, updated
def handle_testsequencing(form, user=None, api_o=None, request=None):
sequencing_id = form.cleaned_data.get("sequencing_id")
if sequencing_id:
if form.cleaned_data.get("run_name"):
run_name = form.cleaned_data.get("run_name")
else:
run_name = str(sequencing_id)
p, sequencing_created = models.DNASequencingProcess.objects.get_or_create(pk=sequencing_id, run_name=run_name)
else:
run_name = form.cleaned_data["run_name"]
p, sequencing_created = models.DNASequencingProcess.objects.get_or_create(run_name=run_name)
if not p:
return None, False
if sequencing_created:
# Try and infer a date from the library name...
_dt = util.try_date(run_name)
created_dt = None
if p.start_time:
created_dt = p.start_time
elif _dt:
created_dt = _dt
else:
created_dt = timezone.now()
p.who = user
p.when = created_dt
if api_o:
api_o["new"].append(_format_tuple(p))
TatlVerb(request=request.treq, verb="CREATE", content_object=p).save()
else:
if not p.who:
# abort as CREATE has not finished
# we can tell because p.who should be set here
return None, False
if api_o:
api_o["updated"].append(_format_tuple(p))
TatlVerb(request=request.treq, verb="UPDATE", content_object=p).save()
# Fill in process
run_group = form.cleaned_data.get("run_group")
if not run_group:
run_group = run_name
p.run_group = run_group
p.instrument_make = form.cleaned_data["instrument_make"]
p.instrument_model = form.cleaned_data["instrument_model"]
p.flowcell_type = form.cleaned_data["flowcell_type"]
p.flowcell_id = form.cleaned_data["flowcell_id"]
p.start_time = form.cleaned_data["start_time"]
p.end_time = form.cleaned_data["end_time"]
if p.start_time and p.end_time:
duration = p.end_time - p.start_time
p.save()
# Create placeholder digitalgroup
dgroup, dgroup_created = models.DigitalResourceGroup.objects.get_or_create(
unique_name="sequencing-dummy-tree-%s" % run_name,
dice_name="sequencing-dummy-tree-%s" % run_name,
current_name="sequencing-dummy-tree-%s" % run_name,
physical=False
)
# Create a DNASequencingProcessRecord to link library to run
# One time I put this under the dgroup_created if statement and put thousands
# of genomes down the back of the proverbial sofa so don't do that
# See https://github.com/COG-UK/dipi-group/issues/193
# Assign a unique_name to try and prevent race conditions adding the same
# process record multiple times
in_library = form.cleaned_data.get("library_name")
rec, rec_created = models.DNASequencingProcessRecord.objects.get_or_create(
process=p,
in_artifact=in_library,
out_group=dgroup,
unique_name="%s-%s" % (run_name, in_library.dice_name),
)
rec.save()
if dgroup_created:
# Placeholder basecalling
bio = models.AbstractBioinformaticsProcess(
who = user,
when = p.when,
pipe_kind = "Basecalling",
)
bio.save()
a = models.DigitalResourceArtifact(
dice_name="sequencing-dummy-reads-%s" % run_name,
current_name="sequencing-dummy-reads-%s" % run_name,
current_kind="dummy",
)
a.save()
rec2 = models.MajoraArtifactProcessRecord(
process=bio,
in_group=dgroup,
out_artifact=a,
)
a.created = bio
a.save()
rec2.save()
if api_o:
api_o["new"].append(_format_tuple(dgroup))
api_o["new"].append(_format_tuple(a))
TatlVerb(request=request.treq, verb="CREATE", content_object=dgroup).save()
TatlVerb(request=request.treq, verb="CREATE", content_object=a).save()
# Placeholder downstream pipeline
# This will allow us to accept and overwrite basic bioinformatics information
# see https://github.com/SamStudio8/majora/issues/45
pipe, pipe_created = models.AbstractBioinformaticsProcess.objects.get_or_create(
pipe_kind = "Pipeline",
hook_name = "bioinfo-%s" % run_name,
)
if pipe_created:
pipe.who = user
pipe.when = p.when
# It would be nice to inject the dummy reads as the sources of this process
# but the records have a 1:1 mapping with the FASTA and BAM which won't be
# in scope until Elan sets them up later...
#pipe_record, pipe_record_created = models.MajoraArtifactProcessRecord.objects.get_or_create(
# process = pipe,
# in_artifact = sa,
#)
#pipe_record.save()
if api_o:
api_o["new"].append(_format_tuple(pipe))
TatlVerb(request=request.treq, verb="CREATE", content_object=pipe).save()
pipe.pipe_name = form.cleaned_data.get("bioinfo_pipe_name")
pipe.pipe_version = form.cleaned_data.get("bioinfo_pipe_version")
if not pipe_created:
if api_o:
api_o["updated"].append(_format_tuple(pipe))
TatlVerb(request=request.treq, verb="UPDATE", content_object=pipe).save()
pipe.save()
return p, sequencing_created
def handle_testlibrary(form, user=None, api_o=None, request=None):
library_name = form.cleaned_data["library_name"]
library, library_created = models.LibraryArtifact.objects.get_or_create(
dice_name=library_name)
if library_created:
if api_o:
api_o["new"].append(_format_tuple(library))
TatlVerb(request=request.treq, verb="CREATE", content_object=library).save()
# Try and infer a date from the library name...
_dt = util.try_date(library_name)
# Create the pooling event
pool_p = models.LibraryPoolingProcess(
who = user,
when = _dt if _dt else timezone.now() # useful for sorting
)
pool_p.save()
library.created = pool_p
else:
# 20220221
# Abort if we fetched the library without `created` as it means we have
# caught the library between being created and finally finished
# If I had a time machine I would have made the process first and then
# saved the library with created as a kwarg, but `created` was an afterthought
if not library.created:
return None, None
# Note that deep=False prevents fetching the biosamples, meaning we'll only
# compare the attributes of the library (and any k:v metadata)
library_as_struct = library.as_struct(deep=False)
library.layout_config = form.cleaned_data.get("library_layout_config")
library.layout_read_length = form.cleaned_data.get("library_layout_read_length")
library.layout_insert_length = form.cleaned_data.get("library_layout_insert_length")
library.seq_kit = form.cleaned_data.get("library_seq_kit")
library.seq_protocol = form.cleaned_data.get("library_seq_protocol")
updated_library_as_struct = library.as_struct(deep=False)
# Crudely check if the library has changed before saving, largely avoiding
# the race condition of two requests attempting to create the same library object
if library_as_struct != updated_library_as_struct or library_created:
library.save()
if api_o and not library_created:
# Add the Updated verb if the library was updated and wasn't created
api_o["updated"].append(_format_tuple(library))
TatlVerb(request=request.treq, verb="UPDATE", content_object=library).save()
return library, library_created
def handle_testlibraryrecord(form, user=None, api_o=None, request=None):
biosample = form.cleaned_data.get("central_sample_id") # will return a biosample object
library = form.cleaned_data.get("library_name") # will actually return a library object
if not library.created:
return None, False
pool_rec, created = models.LibraryPoolingProcessRecord.objects.get_or_create(
process=library.created,
bridge_artifact=biosample,
in_artifact=biosample,
out_artifact=library
)
pool_rec.library_source = form.cleaned_data.get("library_source")
pool_rec.library_selection = form.cleaned_data.get("library_selection")
pool_rec.library_strategy = form.cleaned_data.get("library_strategy")
pool_rec.library_protocol = form.cleaned_data.get("library_protocol")
pool_rec.library_primers = form.cleaned_data.get("library_primers")
pool_rec.sequencing_org_received_date = form.cleaned_data.get("sequencing_org_received_date")
pool_rec.save()
if api_o and created:
api_o["updated"].append(_format_tuple(biosample))
TatlVerb(request=request.treq, verb="UPDATE", content_object=biosample).save()
return pool_rec, created
def handle_testdigitalresource(form, user=None, api_o=None, request=None):
res_updated = False
node = form.cleaned_data["node_name"]
path = form.cleaned_data["path"]
lpath = path.split( form.cleaned_data["sep"] )[1:-1]
# Get the directory
parent = util.get_mag(node.node_name, path, sep=form.cleaned_data["sep"], artifact=True, by_hard_path=True, prefetch=False)
if not parent:
if api_o:
api_o["messages"].append("MAG not found from hard path")
parent = node
for i, dir_name in enumerate(lpath):
dir_g, created = models.DigitalResourceGroup.objects.get_or_create(
#group_path=form.cleaned_data["sep"].join(lpath),
current_name=dir_name,
root_group=node,
parent_group=parent,
physical=True)
parent = dir_g
if form.cleaned_data.get("artifact_uuid"):
res, created = models.DigitalResourceArtifact.objects.get_or_create(
id = form.cleaned_data["artifact_uuid"],
)
res.primary_group = parent
res.current_name = form.cleaned_data["current_name"]
res.current_extension = form.cleaned_data["current_fext"]
else:
res, created = models.DigitalResourceArtifact.objects.get_or_create(
primary_group = parent,
current_name = form.cleaned_data["current_name"],
current_extension = form.cleaned_data["current_fext"],
)
if res.current_hash != form.cleaned_data["current_hash"] or res.current_size != form.cleaned_data["current_size"]:
res_updated = True
res.dice_name = str(res.id)
res.current_path = path
res.current_hash = form.cleaned_data["current_hash"]
res.current_size = form.cleaned_data["current_size"]
res.current_kind = form.cleaned_data["resource_type"]
res.save()
if len(form.cleaned_data.get("source_group")) > 0 or len(form.cleaned_data.get("source_artifact")) > 0:
try:
bio, b_created = models.AbstractBioinformaticsProcess.objects.get_or_create(
hook_name = form.cleaned_data["pipe_hook"],
)
except:
api_o["messages"].append("Race condition caught trying to add another ABP. Dropping to GET. End users can ignore this message.")
bio, b_created = models.AbstractBioinformaticsProcess.objects.get_or_create(
hook_name = form.cleaned_data["pipe_hook"],
)
bio.who = user # use the uploading user, not the sequencing submitting user
bio.when = timezone.now()
if b_created:
bio.pipe_kind = form.cleaned_data["pipe_kind"]
bio.pipe_name = form.cleaned_data["pipe_name"]
bio.pipe_version = form.cleaned_data["pipe_version"]
if not bio.pipe_kind or len(bio.pipe_kind) == 0:
bio.pipe_kind = "Pipeline" # for ease of finding later
bio.save()
for sg in form.cleaned_data.get("source_group"):
bior, created = models.MajoraArtifactProcessRecord.objects.get_or_create(
process = bio,
in_group = sg,
out_artifact = res,
)
bior.bridge_artifact = form.cleaned_data.get("bridge_artifact")
bior.save()
for sa in form.cleaned_data.get("source_artifact"):
bior, created = models.MajoraArtifactProcessRecord.objects.get_or_create(
process = bio,
in_artifact = sa,
out_artifact = res,
)
bior.bridge_artifact = form.cleaned_data.get("bridge_artifact")
bior.save()
try:
bio.when = sa.created.when
bio.save()
except:
pass
if created:
res.created = bio
res.save()
if form.cleaned_data.get("publish_group"):
#TODO handle versioning using res_updated
#TODO Likely that only the FASTA changes, how to know to drag the BAM across?
# Perhaps we need the users to give us a version number?
pag, pag_created = models.PublishedArtifactGroup.objects.get_or_create(
published_name=form.cleaned_data.get("publish_group"),
published_version=1,
is_latest=True,
owner=res.created.who,
)
if not pag.published_date:
pag.published_date = timezone.now().date()
res.groups.add(pag)
pag.save()
if form.cleaned_data.get("bridge_artifact"):
b = form.cleaned_data.get("bridge_artifact")
b.groups.add(pag)
b.save()
if pag_created and api_o:
api_o["new"].append(_format_tuple(pag))
TatlVerb(request=request.treq, verb="CREATE", content_object=pag).save()
if form.cleaned_data.get("bridge_artifact"):
api_o["updated"].append(_format_tuple(b))
TatlVerb(request=request.treq, verb="UPDATE", content_object=b).save()
if res and not created and api_o:
api_o["updated"].append(_format_tuple(res))
TatlVerb(request=request.treq, verb="UPDATE", content_object=res).save()
if created and api_o:
api_o["new"].append(_format_tuple(res))
TatlVerb(request=request.treq, verb="CREATE", content_object=res).save()
elif res_updated:
api_o["updated"].append(_format_tuple(res))
TatlVerb(request=request.treq, verb="UPDATE", content_object=res).save()
return res, created
|
import argparse
import pymongo
import config
from twitter_streamer import TwitterStreamer
LOCAL_DB = config.LOCAL_CLIENT.tweets
ATLAS_DB = config.ATLAS_CLIENT.tweets
class Load_DB:
def __init__(self, batch_size, limit):
self.batch_size = batch_size
self.buffer = []
self.limit = limit
self.counter = 0
def load_tweets(self):
'''insert the data into the mongoDB into a collection called tweet_dicts.
if the collection doesn't exist, it will automatically be created.'''
# config.ATLAS_CLIENT.tweets.tweet_dicts.insert_many(self.buffer) #OLD SETUP FOR AWS SERVER
LOCAL_DB.tweet_dicts.insert_many(self.buffer)
def collect_tweets(self, tweet):
self.buffer.append(tweet)
#logic for handling if batch size > limit and if limit % batch_size != 0
if self.limit - self.counter < self.batch_size:
self.batch_size = self.limit - self.counter
if len(self.buffer) >= self.batch_size:
self.load_tweets()
print(f"loaded {int(self.counter + self.batch_size)} tweets of {int(self.limit)}")
self.buffer = []
self.counter += self.batch_size
def populate_database(batch_size, limit, keywords):
twitter_streamer = TwitterStreamer(keywords)
twitter_streamer.stream_tweets(limit, Load_DB(batch_size, limit).collect_tweets)
#Load_DB.collect_tweets() is the callback in the TwitterStreamer.
if __name__ == '__main__':
"""
To view argument parser help in the command line:
'python load_database.py -h'
"""
parser=argparse.ArgumentParser(description='Collect tweets and put them into a database')
parser.add_argument('-k','--keyword_list', nargs='+', help='<Required> Enter any keywords (separated by spaces; no punctuation) that should be included in streamed tweets.', required=True)
parser.add_argument('-b', '--batch_size', type=int, default=10, help='How many tweets do you want to grab at a time?')
parser.add_argument('-n', '--total_number', type=int, default=300, help='How many total tweets do you want to get?')
args = parser.parse_args()
print("loading data to database...\n")
populate_database(args.batch_size, args.total_number, args.keyword_list)
|
import logging
log_level = logging.getLogger().level
import gym
logging.getLogger().setLevel(log_level)
from rllab.envs.base import Env, Step
from rllab.spaces.box import Box
from rllab.core.serializable import Serializable
from conopt.experiments.A7_particle_imitation_two import Experiment
import numpy as np
from sandbox.rocky.analogy.utils import unwrap, using_seed
from rllab.envs.gym_env import convert_gym_space
from conopt import cost
from cached_property import cached_property
from conopt.worldgen.objs import Obj
def fast_residual2cost(r, metric):
if len(r.shape) == 1:
r = np.expand_dims(r, 0)
if metric == "L2":
return 0.5 * np.sum(np.square(r))
else:
import ipdb;
ipdb.set_trace()
def fast_compute_cost(reward_fn, s):
if isinstance(reward_fn, cost.MulCost):
return reward_fn.b * fast_compute_cost(reward_fn.a_cost, s)
elif isinstance(reward_fn, cost.AddCost):
return fast_compute_cost(reward_fn.a_cost, s) * fast_compute_cost(reward_fn.b_cost, s)
elif isinstance(reward_fn, cost.DistCost):
return fast_residual2cost(s[reward_fn.a] - s[reward_fn.b], reward_fn.metric)
elif isinstance(reward_fn, cost.PenaltyCost):
return fast_residual2cost(s[reward_fn.element], reward_fn.metric)
else:
import ipdb;
ipdb.set_trace()
class ConoptParticleEnv(Env, Serializable):
def __init__(self, seed=None, target_seed=None, obs_type='state'):
Serializable.quick_init(self, locals())
self.seed = seed
self.target_seed = target_seed
self.conopt_exp = None
self.conopt_scenario = None
self.conopt_env = None
self.curr_target_idx = 0
self.reset_trial()
def reset_trial(self):
seed = np.random.randint(np.iinfo(np.int32).max)
self.seed = seed
target_seed = np.random.randint(np.iinfo(np.int32).max)
self.target_seed = target_seed
exp = Experiment()
with using_seed(self.seed):
scenario = exp.make_scenario(trial_index=seed)
env = scenario.to_env()
self.conopt_exp = exp
self.conopt_scenario = scenario
self.conopt_env = env
return self.reset()
def reset(self):
return self.conopt_env.reset()
@cached_property
def observation_space(self):
return convert_gym_space(self.conopt_env.observation_space)
@cached_property
def action_space(self):
bounds = self.model.actuator_ctrlrange
lb = bounds[:, 0]
ub = bounds[:, 1]
return Box(lb, ub)
def render(self, mode='human', close=False):#*args, **kwargs):
env = self.conopt_env
if close:
if 'viewer' in env.__dict__:
env.viewer.close()
del env.viewer
else:
img = env.world.model.render(np.expand_dims(env.x, 0))[0]
if mode == 'human':
#import cv2
#img = cv2.resize(img, (50, 50))
if not 'viewer' in env.__dict__:
from gym.envs.classic_control.rendering import SimpleImageViewer
env.viewer = SimpleImageViewer()
env.viewer.imshow(img)
return img
else:
return img
@cached_property
def model(self):
return self.conopt_env.world.model.model
def step(self, action):
env = self.conopt_env
action = action.reshape(env.action_space.shape)
next_obs, rew, done, infos = env.step(action)
return Step(next_obs, rew, done)
|
import unittest
import json
from pyanalysis.mysql import Conn
from pyanalysis.moment import moment as m
from pyghostbt.strategy import Strategy
from pyghostbt.tool.order import *
from pyghostbt.tool.runtime import Runtime
from pyghostbt.const import *
from dateutil import tz
strategy_1st_config = {
"mode": "backtest",
"symbol": "btc_usd",
"exchange": "okex",
"contract_type": "quarter",
"trade_type": "future",
"unit_amount": 100,
"lever": 10,
"interval": "1min",
"db_name": "test",
"db_name_kline": "ghost-etl",
"timezone": "Asia/Shanghai",
"param": {
"position": 0.5,
"max_abs_loss": 0.05,
},
"order": {}
}
class Strategy1st(Strategy):
def __init__(self, kw):
super().__init__(kw)
def get_wait_open(self, timestamp):
moment = m.get(timestamp)
the_last_day = moment.to(self["timezone"] or "Asia/Shanghai").floor("day")
the_start_day = the_last_day.shift(days=-20)
results = self._kline.query(
the_start_day.millisecond_timestamp,
the_last_day.millisecond_timestamp,
KLINE_INTERVAL_1DAY,
standard=True,
)
self["a"].init_account(10)
price = max([result["high"] for result in results])
asset = self._a.get_last_asset(timestamp)["total_account_asset"]
amount = int(asset * price * self["param"]["position"] / 100000000 / self["unit_amount"])
return [FutureOrder(
trade_type=self["trade_type"],
place_type=ORDER_PLACE_TYPE_T_TAKER,
db_name=self["db_name"],
mode=self["mode"],
symbol=self["symbol"],
exchange=self["exchange"],
contract_type=self["contract_type"],
instance_id=self["instance_id"],
sequence=0,
backtest_id=self["backtest_id"],
price=price,
amount=amount,
lever=self["lever"],
)]
def get_opening(self, timestamp):
pass
def get_wait_liquidate(self, timestamp):
pass
def get_liquidating(self, timestamp):
pass
class TestStrategy(unittest.TestCase):
def test_turtles(self):
rt = Runtime(strategy_1st_config)
print(json.dumps(rt))
# s_1st = Strategy1st(strategy_1st_config)
# s_1st.get_wait_open(1572246247000)
# def test_tool_kline(self):
# pass
|
import numpy as np
import cv2
import copy
import math
import os
import time
def double_raster(imgTakein, startRow):
# take in binary image; startRow is the start row of the current image slice
img = normalize(imgTakein)
cur_label=2
coordinates = [None] * 50
eq=[]
for i in range(0,len(img)*len(img[0])):
eq.append(0)
for row in range(0,len(img)):
for col in range(0,len(img[row])):
if(img[row][col]==1):
if(row>0):
up=img[row-1][col]
else:
up=0
if(col>0):
left=img[row,col-1]
else:
left=0
if(up==0 and left==0):
img[row][col]=cur_label
cur_label=cur_label+1
elif(up!=0 and left!=0):
img[row][col]=min(up,left)
if(up!=left):
eq[max(up,left)]=min(up,left)
a=min(up,left)
while(eq[a]!=0):
eq[max(up,left)]=eq[a]
a=eq[a]
elif(up==0 or left==0):
img[row][col]=max(up,left)
# changed nested for loop of the second sweep to below, faster for 5-6 second
max_label = cur_label # record the max label number
labelPixNumber = [0] * max_label # The number of pixels in each label
coorAdded = False # switch of whether the coordinates has been recorded
for label in range(0, max_label):
labelCoor = np.argwhere(img == label) # get the coordinates of pixels with same label
coorNum = len(labelCoor)
labelPixNumber[label] = coorNum
if (eq[label] != 0):
eqLabel = eq[label]
img = eqLabel * (img == label) + img
# Add the number of pixels of the current label to the equiv label
# and set the current label pixel number to 0
labelPixNumber[eqLabel] = labelPixNumber[eqLabel] + labelPixNumber[label]
labelPixNumber[label] = 0
addCoordinates(coorNum, eqLabel, labelCoor, startRow, coordinates)
coorAdded = True
if not coorAdded:
addCoordinates(coorNum, label, labelCoor, startRow, coordinates)
coorAdded = False
centers = get_center(coordinates)
#print("finished double raster for one slice of image")
return centers
def sobel_thresh(img,orient='x',min=0,max=255):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= min) & (scaled_sobel <= max)] = 1
return binary_output
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
gradmag = np.sqrt(sobelx**2 + sobely**2)
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
return binary_output
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
return binary_output
def luv_select(img, channel='l',thresh=(0, 255)):
luv = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
l_channel = luv[:,:,0]
u_channel=luv[:,:,1]
v_channel=luv[:,:,2]
binary_output = np.zeros_like(l_channel)
if(channel=='l'):
binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 1
elif(channel=='u'):
binary_output[(u_channel > thresh[0]) & (u_channel <= thresh[1])] = 1
else:
binary_output[(v_channel > thresh[0]) & (v_channel <= thresh[1])] = 1
return binary_output
def lab_select(img, channel='l',thresh=(0, 255)):
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
l_channel = lab[:,:,0]
a_channel=lab[:,:,1]
b_channel=lab[:,:,2]
binary_output = np.zeros_like(l_channel)
if(channel=='l'):
binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 1
elif(channel=='a'):
binary_output[(a_channel > thresh[0]) & (a_channel <= thresh[1])] = 1
else:
binary_output[(b_channel > thresh[0]) & (b_channel <= thresh[1])] = 1
return binary_output
def hls_select(img,channel='l',thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
h_channel = hls[:,:,0]
l_channel=hls[:,:,1]
s_channel=hls[:,:,2]
binary_output = np.zeros_like(l_channel)
if channel=='h':
binary_output[(h_channel > thresh[0]) & (h_channel< thresh[1])] = 1
elif channel=='l':
binary_output[(l_channel > thresh[0]) & (l_channel< thresh[1])] = 1
else:
binary_output[(s_channel > thresh[0]) & (s_channel< thresh[1])] = 1
return binary_output
def rgb_select(img,thresh=(0,255)):
b=img[:,:,0]
g=img[:,:,1]
r=img[:,:,2]
binary_output=np.zeros_like(r)
binary_output[(b>thresh[0])&(b<thresh[1])&(g>thresh[0])&(g<thresh[1])&(r>thresh[0])&(r<thresh[1])]=1
return binary_output
def get_middle(img):
rowNum = img.shape[0]
colNum = img.shape[1]
rowInterval = rowNum//4
colInterval = colNum//4
midRow = rowNum//2
midCol = colNum//2
# Take the middle one third of the image
croppedImg = img[0:rowNum, midCol-colInterval:midCol+colInterval]
return croppedImg
# Dilation to expand white line after thresholding
def dilation(img):
kernel = np.ones((17,17), np.uint8)
img_dilation = cv2.dilate(img, kernel, iterations=1)
return img_dilation
def normalize(img):
normalizeImg = np.zeros_like(img)
normalizeImg[img == 255] = 1
return normalizeImg
def get_center(coordinates):
index = 0
centers = []
while coordinates[index] != None:
sums = [0,0]
for i in coordinates[index]:
(row, col) = i
sums[0] = sums[0] + row
sums[1] = sums[1] + col
sums[0] = int(math.floor(sums[0] / len(coordinates[index])))
sums[1] = int(math.floor(sums[1] / len(coordinates[index])))
centers.append(tuple(sums))
index = index + 1
return centers
# Switch the row and col for the drawing function
def switchRowCol(origCoor):
col = origCoor[1]
row = origCoor[0]
return [col, row]
# add the coordinates of same label to "coordinates"
def addCoordinates(coorNum, label, labelCoor, startRow, coordinates):
for index in range(0, coorNum):
labelCoor[index][0] = labelCoor[index][0] + startRow
labelCoorT = switchRowCol(labelCoor[index])
#print(label-2)
if coordinates[label-2] != None:
coordinates[label-2].append(labelCoorT)
else:
coordinates[label-2] = [labelCoorT]
def row_segment_centor(img, NUM_SEGS):
# Segment the original image into 20 segments
numSegs = NUM_SEGS
numRows = img.shape[0]
numCols = img.shape[1]
rowInterval = numRows//numSegs
segmentCentors = [None] * numSegs
blockCenters = []
startRow = 0
for i in range(0, numSegs):
imgSeg = img[startRow:startRow+rowInterval, 0:numCols]
# Threshold imageSegments and calculate the centor of each segments
imgSegThreshed = thresholding(imgSeg)
coor = np.argwhere(imgSegThreshed == 255)
if len(coor) == 0:
rmean = img.shape[0]//2
cmean = img.shape[1]//2
else:
rmean=int(math.floor(np.mean(coor[:,0])))
cmean=int(math.floor(np.mean(coor[:,1])))
segmentCentors[i] = (cmean, startRow+rmean)
startRow = startRow + rowInterval # update row
blockCenters.append(double_raster(imgSegThreshed, startRow))
return segmentCentors, blockCenters
def thresholding(img):
rgb_thresh = rgb_select(img,(150,255))
hls_thresh = hls_select(img,channel='l', thresh=(180,240 ))
#lab_thresh = lab_select(img, channel='l',thresh=(190, 240))
#luv_thresh = luv_select(img, channel='l',thresh=(180, 240))
threshholded = np.zeros_like(hls_thresh)
#threshholded[((hls_thresh == 1) & (lab_thresh == 1))& (rgb_thresh==1) & (luv_thresh==1)]=255
threshholded[((hls_thresh == 1)&(rgb_thresh==1))]=255
return threshholded
def img_process(img):
NUM_SEGS=40
img=cv2.GaussianBlur(img,(5,5),0)
pro_img=thresholding(img)
#img=thresholding(img)
pro_img = get_middle(pro_img)
pro_img=dilation(pro_img)
#segmentCentors, blockCenters = row_segment_centor(pro_img, NUM_SEGS)
'''
for i in range(0, NUM_SEGS):
cv2.circle(img, segmentCentors[i], 5, (255,0,0))
for j in range(0, len(blockCenters[i])):
cv2.circle(img, blockCenters[i][j], 5, (0,0,255))
'''
img=get_middle(img)
return pro_img, img
def decide_way(img):
#img=cv2.GaussianBlur(img,(5,5),0)
blur,croppedImg=img_process(img)
coor=np.argwhere(blur==255)
if len(coor) == 0:
rmean = croppedImg.shape[0]//2
cmean = croppedImg.shape[1]//2
else:
rmean=int(math.floor(np.mean(coor[:,0])))
cmean=int(math.floor(np.mean(coor[:,1])))
col=croppedImg.shape[1]//2
if(cmean<col-30):
command='Left'
elif(cmean>col+30):
command='Right'
else:
command='Straight'
cv2.putText(croppedImg,command, (10,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2)
cv2.rectangle(croppedImg,(cmean-20,rmean-20),(cmean+20,rmean+20),(0,255,0),3)
return command,croppedImg, blur
def capture_and_decide(filename):
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
cap.release()
command, img, blur = decide_way(frame)
cv2.imwrite(filename, img)
cv2.imwrite("output" + filename, blur)
print(command)
return command
#folder='mobot/'
#video='output2.avi'
#image='./sample_pictures/320.jpg'
#start=time.time()
#img=cv2.imread(image)
#command,img,blur=decide_way(img)
#end=time.time()
#print(end-start)
#cv2.imshow('frame',img)
#cv2.imshow('f1',blur)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
'''
cap=cv2.VideoCapture(video)
#img=cv2.imread(os.path.join(folder,filename))
while(cap.isOpened()):
ret,frame=cap.read()
command,img,blur=decide_way(frame)
cv2.imshow('frame',blur)
cv2.imshow('img',img)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
'''
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from compas_rv2.rhino import get_scene
from compas.utilities import flatten
from compas_rv2.rhino import rv2_undo
__commandname__ = "RV2pattern_move_vertices"
@rv2_undo
def RunCommand(is_interactive):
scene = get_scene()
if not scene:
return
pattern = scene.get("pattern")[0]
if not pattern:
print("There is no Pattern in the scene.")
return
options = ["ByContinuousEdges", "Manual"]
option = compas_rhino.rs.GetString("Selection Type.", strings=options)
if not option:
return
if option == "ByContinuousEdges":
temp = pattern.select_edges()
keys = list(set(flatten([pattern.datastructure.vertices_on_edge_loop(key) for key in temp])))
elif option == "Manual":
keys = pattern.select_vertices()
if keys:
if pattern.move_vertices(keys):
scene.update()
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
RunCommand(True)
|
import pytest
from dataclasses import dataclass
from gemma import (
Surveyor,
Course,
Attr,
Item,
Compass,
NonNavigableError,
Call,
SuppressedErrors,
PORT,
)
def test_surveyor_chart_raises_navigable():
compass = Compass(target_types=dict)
surveyor = Surveyor(compasses=[compass])
with pytest.raises(NonNavigableError):
surveyor.chart([1, 2, 3])
def test_surveyor_suppress_error():
compass = Compass(target_types=dict)
surveyor = Surveyor(compasses=[compass])
data = {"a": "a value", "list": [1, 2, 3], "list2": [1, 2, 3], "b": "b value"}
courses = list()
raised = None
with pytest.raises(SuppressedErrors):
try:
for x in surveyor.chart_iter(data, exceptions=False):
courses.append(x)
except SuppressedErrors as error:
raised = error
raise error
assert len(raised.errors) == 2
assert isinstance(raised.errors[0], NonNavigableError)
assert isinstance(raised.errors[1], NonNavigableError)
assert courses == [
(PORT / "[a]", "a value"),
(PORT / "list", [1, 2, 3]),
(PORT / "list2", [1, 2, 3]),
(PORT / "[b]", "b value"),
]
def test_surveyor_chart_simple(
surveyor_generic: Surveyor, data_simple, course_empty: Course
):
assert surveyor_generic.chart(data_simple) == [
(course_empty / Attr("a"), "a data"),
(course_empty / Attr("b"), "b data"),
(course_empty / Attr("one"), 1),
(course_empty / Attr("two"), 2),
]
def test_surveyor_chart_nested(
data_structure_1,
surveyor_generic: Surveyor,
course_empty: Course,
data_dict: dict,
data_list: list,
):
root = course_empty
assert surveyor_generic.chart(data_structure_1) == [
(root / Attr("a"), "a data"),
(root / Attr("b"), "b data"),
(root / Attr("one"), 1),
(root / Attr("two"), 2),
(root / Attr("dict_data"), data_dict),
(root / Attr("dict_data") / Item("a dict"), "a value"),
(root / Attr("dict_data") / Item("b dict"), "b value"),
(root / Attr("dict_data") / Item("one dict"), 1),
(root / Attr("dict_data") / Item("two dict"), 2),
(root / Attr("dict_data") / Item(3), "three int"),
(root / Attr("dict_data") / Item(4), "four int"),
(root / Attr("list_data"), data_list),
(root / Attr("list_data") / Item(0), "zero list"),
(root / Attr("list_data") / Item(1), "one list"),
(root / Attr("list_data") / Item(2), "two list"),
(root / Attr("list_data") / Item(3), "three list"),
(root / Attr("list_data") / Item(4), data_dict),
(root / Attr("list_data") / Item(4) / Item("a dict"), "a value"),
(root / Attr("list_data") / Item(4) / Item("b dict"), "b value"),
(root / Attr("list_data") / Item(4) / Item("one dict"), 1),
(root / Attr("list_data") / Item(4) / Item("two dict"), 2),
(root / Attr("list_data") / Item(4) / Item(3), "three int"),
(root / Attr("list_data") / Item(4) / Item(4), "four int"),
(root / Attr("caller_set_value"), None),
]
def test_surveyor_chart_callable(data_callable, course_empty):
compass = Compass(target_types=type(data_callable), calls=["get_a", "two_class"])
surveyor = Surveyor(compasses=[compass, Compass()])
root = course_empty
assert surveyor.chart(data_callable) == [
(root / Attr("a"), "a value"),
(root / Attr("b"), "b value"),
(root / Call("get_a"), "a value called"),
(root / Call("two_class"), 2),
]
def test_surveyor_add_endpoint():
@dataclass
class A:
text: str = "string"
data = {"one": 1, "two": 2, "a": A()}
answer = [(PORT / "one", 1), (PORT / "two", 2), (PORT / "a", A())]
surveyor = Surveyor(end_points_extra=(A,))
assert surveyor.chart(data) == answer
|
from setuptools import setup, find_packages
from os import remove
from pathlib import Path
from json import dump
from ravenml.utils.git import is_repo, git_sha, git_patch_tracked, git_patch_untracked
pkg_name = 'ravenml'
rml_dir = Path(__file__).resolve().parent
with open(rml_dir / 'README.md', encoding='utf-8') as f:
long_description = f.read()
# attempt to write git data to file
# NOTE: does NOT work in the GitHub tarball installation case
# this will work in 3/4 install cases:
# 1. PyPI
# 2. GitHub clone
# 3. Local (editable), however NOTE in this case there is no need
# for the file, as ravenml will find git information at runtime
# in order to include patch data
repo_root = is_repo(rml_dir)
if repo_root:
info = {
'ravenml_git_sha': git_sha(repo_root),
'ravenml_tracked_git_patch': git_patch_tracked(repo_root),
'ravenml_untracked_git_patch': git_patch_untracked(repo_root)
}
with open(rml_dir / pkg_name / 'git_info.json', 'w') as f:
dump(info, f, indent=2)
setup(
name=pkg_name,
version='1.2',
description='ML Training CLI Tool',
long_description = long_description,
long_description_content_type = 'text/markdown',
license='MIT',
author='Carson Schubert, Abhi Dhir, Pratyush Singh',
author_email='carson.schubert14@gmail.com',
keywords= ['machine learning', 'data science'],
download_url = 'https://github.com/autognc/ravenML/archive/v1.2.tar.gz',
packages=find_packages(),
package_data={pkg_name: ['git_info.json']},
install_requires=[
'Click>=7.0',
'questionary>=1.0.2',
'boto3>=1.9.86',
'shortuuid>=0.5.0',
'halo>=0.0.26',
'colorama>=0.3.9',
'pyaml>=19.4.1',
],
tests_require=[
'pytest',
'moto'
],
entry_points={
'console_scripts': [f'{pkg_name}={pkg_name}.cli:cli'],
}
)
# destroy git file after install
# NOTE: this is pointless for GitHub clone case, since the clone is deleted
# after install. It is necessary for local (editable) installs to prevent
# the file from corrupting the git repo, and when creating a dist for PyPI
# for the same reason.
if repo_root:
remove(rml_dir / pkg_name / 'git_info.json')
|
import open3d as o3d
import numpy as np
from numba import jit
def dataset_intrinsics(dataset='tartanair'):
if dataset == 'kitti':
focalx, focaly, centerx, centery = 707.0912, 707.0912, 601.8873, 183.1104
elif dataset == 'euroc':
focalx, focaly, centerx, centery = 458.6539916992, 457.2959899902, 367.2149963379, 248.3750000000
elif dataset == 'tartanair':
focalx, focaly, centerx, centery = 320.0, 320.0, 320.0, 240.0
else:
return None
return focalx, focaly, centerx, centery
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def depth2pcd(depthImg, point3D, cx=320, cy=240, fx=320, fy=320, thresh=10.0): # Function is compiled to machine code when called the first time
for u in range(depthImg.shape[0]): # Numba likes loops
for v in range(depthImg.shape[1]):
d = depthImg[u,v]
if abs(d) < thresh:
point3D[u*depthImg.shape[1]+v, :] = [(v-cx)*d/fx, d, -(u-cy)*d/fy ]
def image_to_cloud(depth) -> o3d.geometry.PointCloud():
cx, cy, f = 320, 240, 320
pcd = np.zeros(shape=(480 * 640, 3), dtype=np.float32)
depth2pcd(depth, pcd, cx=cx, cy=cy, fx=f, fy=f)
rs = o3d.geometry.PointCloud()
rs.points = o3d.utility.Vector3dVector(pcd)
return rs
|
"""Caching for this package."""
from __future__ import annotations
import json
import os.path
from typing import cast
import pywikibot
from pywikibot_extensions.page import get_redirects
import nfcbot
def _get_cache_directory() -> str:
"""Return the cache directory."""
loc = os.environ.get("XDG_CACHE_HOME") or os.path.expanduser("~/.cache")
return os.path.abspath(os.path.join(loc, "nfcbot"))
def build_cache(site: pywikibot.site.APISite) -> Store:
"""Build the cache."""
pywikibot.output("Building cache ...")
store = Store()
for cat in (nfcbot.NFUR_TPL_CAT, nfcbot.FILE_TPL_CAT):
cat_page = pywikibot.Category(site, cat)
tpl = cat_page.articles(recurse=True, namespaces=10)
# Exclude to review
tpl = (p for p in tpl if p.title() != "Template:Information")
tpl_redirects = get_redirects(frozenset(tpl), namespaces=10)
tpl_titles = frozenset(p.title() for p in tpl_redirects)
store[cat] = cast(frozenset[str], tpl_titles)
pywikibot.output("Cache built.")
return store
def clear_cache() -> None:
"""Clear the cache."""
Store().clear()
pywikibot.output("Cache cleared.")
def get_cache(site: pywikibot.site.APISite) -> Store:
"""Get the cache."""
return Store() or build_cache(site)
class Store(dict[str, frozenset[str]]):
"""Cache store."""
def __init__(self) -> None:
"""Initialize."""
super().__init__()
directory = _get_cache_directory()
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
self._file = os.path.join(directory, "store.json")
if os.path.exists(self._file):
self._read()
else:
self._write()
def __getitem__(self, key: str) -> frozenset[str]:
"""Get the item."""
self._read()
return super().__getitem__(key)
def __setitem__(self, key: str, value: frozenset[str]) -> None:
"""Set the item."""
self._read()
super().__setitem__(key, value)
self._write()
def clear(self) -> None:
"""Clear the store."""
super().clear()
self._write()
def _read(self) -> None:
"""Read the store."""
super().clear()
with open(self._file, encoding="utf-8") as f:
for k, v in json.load(f).items():
v = cast(frozenset[str], frozenset(v))
super().__setitem__(k, v)
def _write(self) -> None:
"""Write the store."""
with open(self._file, "w", encoding="utf-8") as f:
json.dump(
{k: list(v) for k, v in self.items()},
f,
indent=4,
sort_keys=True,
)
|
from datetime import datetime
from datetime import timezone
import pytest
from aioresponses import aioresponses
from freezegun import freeze_time
from senor_octopus.sources.whistle import whistle
mock_payload = {
"pets": [
{
"id": 123456,
"gender": "f",
"name": "Rex",
"profile_photo_url_sizes": {
"60x60": None,
"100x100": None,
"200x200": None,
"750x750": None,
},
"realtime_channel": {"channel": "private-dog-123456", "service": "Pusher"},
"subscription_status": "active",
"partner_service_status": None,
"device": {
"model_id": "W04B",
"serial_number": "W04-1234567",
"last_check_in": "2021-03-25T15:52:19-07:00 America/Los_Angeles",
"firmware_version": "0.47-200807 :: 0.47-200807 :: 0.47-200807",
"battery_level": 96,
"battery_status": "on",
"pending_locate": False,
"tracking_status": "not_tracking",
"has_gps": True,
"requires_subscription": True,
"flashlight_status": {
"state": "off",
"pattern": None,
"brightness": None,
},
"partner_record": None,
},
"activity_summary": {
"activity_start_date": "2020-08-22",
"activity_enabled": True,
"current_streak": 0,
"current_minutes_active": 10,
"current_minutes_rest": 827,
"similar_dogs_minutes_active": 44.8529743653839,
"similar_dogs_minutes_rest": 1083.1920639333,
"suggested_activity_range_lower": 26.0,
"suggested_activity_range_upper": 52.0,
"current_activity_goal": {
"minutes": 36,
"started_at": "2020-08-22T07:00:00Z",
"time_zone": "America/Los_Angeles",
},
"upcoming_activity_goal": {
"minutes": 36,
"started_at": "2020-08-22T07:00:00Z",
"time_zone": "America/Los_Angeles",
},
},
"last_location": {
"latitude": 45.0,
"longitude": -135.0,
"timestamp": "2021-03-25T22:52:03Z",
"uncertainty_meters": 0.0,
"reason": "back_in_beacon",
"place": {
"distance": 0,
"distance_units": "feet",
"id": 123456,
"status": "in_beacon_range",
"units": "feet",
},
"description": {
"address": "0 Fool's St",
"place": "Nowhere",
"postcode": "12345",
"region": "California",
"country": "United States",
},
},
"profile": {
"breed": {"id": 1870, "name": "Italian Greyhound Mix"},
"date_of_birth": "2008-05-22",
"age_in_months": 10,
"age_in_years": 12,
"time_zone_name": "America/Los_Angeles",
"weight": 30.0,
"weight_type": "pounds",
"species": "dog",
"overdue_task_occurrence_count": 4,
"is_fixed": True,
"body_condition_score": None,
"pet_food": None,
},
},
],
}
@freeze_time("2021-01-01")
@pytest.mark.asyncio
async def test_whistle() -> None:
with aioresponses() as mock_response:
mock_response.post(
"https://app.whistle.com/api/login",
payload={"auth_token": "XXX"},
)
mock_response.get("https://app.whistle.com/api/pets", payload=mock_payload)
events = [event async for event in whistle("username", "password")]
print(sorted(events, key=lambda e: e["name"]))
assert sorted(events, key=lambda e: e["name"]) == [
{
"timestamp": datetime(2021, 1, 1, 0, 0, tzinfo=timezone.utc),
"name": "hub.whistle.Rex.battery_level",
"value": 96,
},
{
"timestamp": datetime(2021, 1, 1, 0, 0, tzinfo=timezone.utc),
"name": "hub.whistle.Rex.battery_status",
"value": "on",
},
{
"timestamp": datetime(2021, 1, 1, 0, 0, tzinfo=timezone.utc),
"name": "hub.whistle.Rex.geohash",
"value": "c00000000000",
},
{
"timestamp": datetime(2021, 1, 1, 0, 0, tzinfo=timezone.utc),
"name": "hub.whistle.Rex.last_check_in",
"value": "2021-03-25T15:52:19-07:00 America/Los_Angeles",
},
{
"timestamp": datetime(2021, 1, 1, 0, 0, tzinfo=timezone.utc),
"name": "hub.whistle.Rex.location",
"value": (45.0, -135.0),
},
{
"timestamp": datetime(2021, 1, 1, 0, 0, tzinfo=timezone.utc),
"name": "hub.whistle.Rex.tracking_status",
"value": "not_tracking",
},
]
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pprint
import fire
import torch
from fastspeech import DEFAULT_DEVICE
from fastspeech import hparam as hp
from fastspeech.data_load import PadDataLoader
from fastspeech.dataset.text_dataset import TextDataset
from fastspeech.inferencer.fastspeech_inferencer import FastSpeechInferencer
from fastspeech.model.fastspeech import Fastspeech
from fastspeech.trt.fastspeech_trt_inferencer import FastSpeechTRTInferencer
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_cpu_numpy
from collections import OrderedDict
import sys
import numpy as np
from torch.nn import functional as F
# import multiprocessing
# multiprocessing.set_start_method('spawn', True)
pp = pprint.PrettyPrinter(indent=4, width=1000)
np.set_printoptions(threshold=sys.maxsize)
SAMPLE_TEXT = "the more you buy, the more you save."
def verify(hparam="trt.yaml",
text=SAMPLE_TEXT,
**kwargs):
hp.set_hparam(hparam, kwargs)
tprint("Hparams:\n{}".format(pp.pformat(hp)))
tprint("Device count: {}".format(torch.cuda.device_count()))
outs_trt, acts_trt = infer_trt(text)
outs, acts = infer_pytorch(text)
both, pytorch, trt = join_dict(acts, acts_trt)
# print diff
print("## Diff ##\n\n")
for name, (act, act_trt) in both.items():
act = act.float()
act_trt = act_trt.float()
diff = act.reshape(-1) - act_trt.reshape(-1)
is_identical = diff.eq(0).all()
errors = diff[diff.ne(0)]
max_error = torch.max(torch.abs(errors)) if len(errors) > 0 else 0
print("# {} #\n\n[PyTorch]\n{}\n\n[TRT]: \n{}\n\n[Diff]: \n{}\n\n[Errors]: \n{}\n- identical? {}\n- {} errors out of {}\n- max: {}\n\n".format(name,
act,
act_trt,
diff,
errors,
is_identical,
len(errors),
len(diff),
max_error,
))
# print("## PyTorch ##\n\n")
# for name, act in pytorch.items():
# print("[{}]\npytorch:\n{}\n\n".format(name, act))
# print("## TRT ##\n\n")
# for name, act in trt.items():
# print("[{}]\ttrt:\n{}\n\n".format(name, act_trt))
def join_dict(acts, acts_trt):
both = dict()
left = dict()
right = dict()
for k in acts:
if k in acts_trt:
both[k] = (acts[k], acts_trt[k])
else:
left[k] = acts[k]
for k in acts_trt:
if k not in acts:
right[k] = acts_trt[k]
return both, left, right
def infer_trt(text):
# model
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
# dataset
dataset = TextDataset([text for _ in range(hp.batch_size)])
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
drop_last=False)
# inferencer
inferencer = FastSpeechTRTInferencer('fastspeech',
model,
data_loader=data_loader,
ckpt_path=hp.checkpoint_path,
trt_max_ws_size=hp.trt_max_ws_size,
trt_file_path=hp.trt_file_path,
trt_force_build=hp.trt_force_build,
use_fp16=hp.use_fp16,
trt_max_input_seq_len=hp.trt_max_input_seq_len,
trt_max_output_seq_len=hp.trt_max_output_seq_len,
validate_accuracy=True,
)
with inferencer:
acts = dict()
outs = inferencer.infer(acts=acts)
return outs, acts
def infer_pytorch(text):
# model
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
# dataset
dataset = TextDataset([text for _ in range(hp.batch_size)])
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
drop_last=False)
# inferencer
with torch.no_grad():
inferencer = FastSpeechInferencer('fastspeech',
model,
data_loader=data_loader,
ckpt_path=hp.checkpoint_path,
device='cuda',
use_fp16=hp.use_fp16,
)
acts = dict()
outs = inferencer.infer(acts=acts,
seq_input_len=hp.trt_max_input_seq_len,
seq_output_len=hp.trt_max_output_seq_len)
return outs, acts
if __name__ == '__main__':
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
fire.Fire(verify)
|
#!/usr/bin/env python
import sys
from fs.opener import opener
from fs.commands.runner import Command
from fs.utils import print_fs
class FSTree(Command):
usage = """fstree [OPTION]... [PATH]
Recursively display the contents of PATH in an ascii tree"""
def get_optparse(self):
optparse = super(FSTree, self).get_optparse()
optparse.add_option('-l', '--level', dest='depth', type="int", default=5,
help="Descend only LEVEL directories deep", metavar="LEVEL")
optparse.add_option('-g', '--gui', dest='gui', action='store_true', default=False,
help="browse the tree with a gui")
optparse.add_option('-a', '--all', dest='all', action='store_true', default=False,
help="do not hide dot files")
optparse.add_option('-d', '--dirsfirst', dest='dirsfirst', action='store_true', default=False,
help="List directories before files")
return optparse
def do_run(self, options, args):
if not args:
args = ['.']
for fs, path, is_dir in self.get_resources(args, single=True):
if not is_dir:
self.error(u"'%s' is not a dir\n" % path)
return 1
fs.cache_hint(True)
if options.gui:
from fs.browsewin import browse
if path:
fs = fs.opendir(path)
browse(fs, hide_dotfiles=not options.all)
else:
print_fs(fs, path or '',
file_out=self.output_file,
max_levels=options.depth,
terminal_colors=self.terminal_colors,
hide_dotfiles=not options.all,
dirs_first=options.dirsfirst)
def run():
return FSTree().run()
if __name__ == "__main__":
sys.exit(run())
|
from fortunae.fortunae import * #get_stocks, get_fiis, br_stocks, br_fiis |
# -*- coding: utf-8 -*-
import uuid
import importlib
import logging
import time
import functools
from threading import local
try:
from celery import (
Celery,
signals,
)
except ImportError:
raise ImportError('To use queue module, you must install celery.')
try:
from sqlalchemy import event
except:
event = None
logger = logging.getLogger(__name__)
async_ctx = local()
class AsyncTask(object):
__slots__ = (
'task_id', 'module_name', 'func_name', 'args', 'kwargs',
'countdown', 'send_after_commit', 'extra_celery_kwargs', 'apply_queue',
)
def __init__(
self, module_name, func_name, args=None, kwargs=None,
countdown=0, send_after_commit=False,
apply_queue='queue', extra_celery_kwargs=None,
):
mod = importlib.import_module(module_name)
if not hasattr(mod, func_name):
raise ValueError('Invalid API Endpoint is provided.')
self.task_id = uuid.uuid1().hex
self.module_name = module_name
self.func_name = func_name
self.args = args if args is not None else ()
self.kwargs = kwargs if kwargs is not None else {}
self.countdown = countdown if countdown >= 0 else 0
self.send_after_commit = bool(send_after_commit)
self.extra_celery_kwargs = extra_celery_kwargs if extra_celery_kwargs is not None else {}
self.apply_queue = apply_queue
def register(self):
if self.send_after_commit:
if hasattr(async_ctx, 'reged_tasks'):
async_ctx.reged_tasks.add(self)
else:
async_ctx.reged_tasks = {self}
else:
raise ValueError('Cannot register task without send_after_commit flag.')
def send(self, async_api):
return async_api.si(
self.module_name, self.func_name,
*self.args,
**self.kwargs
).apply_async(
countdown=self.countdown,
queue=self.apply_queue,
**self.extra_celery_kwargs
)
def make_send_task(async_api, apply_queue):
return functools.partial(send_task, async_api=async_api, apply_queue=apply_queue)
def send_task(module_name, api_name, *args, countdown=0, async_api=None, apply_queue=None, send_after_commit=False, extra_celery_kwargs=None, **kwargs):
if not async_api or not apply_queue:
raise RuntimeError('create send_task using make_send_task.')
task = AsyncTask(
module_name=module_name,
func_name=api_name,
args=args,
kwargs=kwargs,
countdown=countdown,
send_after_commit=send_after_commit,
extra_celery_kwargs=extra_celery_kwargs,
apply_queue=apply_queue,
)
if send_after_commit:
task.register()
else:
task.send(async_api)
return task.task_id
def make_async_task(function_executor, retry_wait=5):
def _f(self, module_name, api_name, *args, **kwargs):
if 'retry_wait' in kwargs:
retry_wait_ = kwargs['retry_wait']
del kwargs['retry_wait']
else:
retry_wait_ = retry_wait
return async_task(self, module_name, api_name, retry_wait_, function_executor, *args, **kwargs)
return _f
def async_task(self, module_name, api_name, retry_wait=5, func_executor=None, *args, **kwargs):
try:
mod = importlib.import_module(module_name)
func = getattr(mod, api_name)
return func_executor(func)(*args, **kwargs)
except Exception as e:
self.retry(exc=e, countdown=retry_wait)
def register_to_celery(celery_broker, celery_config, async_task, max_retries=12, DBSession=None):
def send_after_commit_tasks(session):
if not hasattr(async_ctx, 'reged_tasks'):
return
for task in async_ctx.reged_tasks:
task.send(async_api)
delattr(async_ctx, 'reged_tasks')
broker = 'amqp://{user}:{password}@{host}:{port}/{vhost}'.\
format(**celery_broker)
app = Celery(broker=broker)
app.conf.update(**celery_config)
async_api = app.task(max_retries=max_retries, bind=True)(async_task)
if DBSession:
if event:
event.listens_for(DBSession, 'after_commit')(send_after_commit_tasks)
else:
raise ImportError('You must install sqlalchemy first.')
return app, async_api
def init_celery_log(loglevel=logging.INFO, **kwargs):
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s [%(process)d] [%(levelname)s] %(name)s: %(message)s')
handler.setFormatter(formatter)
log = logging.getLogger('')
log.addHandler(handler)
log.setLevel(loglevel)
return log
|
"""
Dot product of two vectors implemented as parallel lists
"""
from operator import add, mul
from pyske.core.util import fun
__all__ = ['opt_dot_product', 'dot_product']
# ------------------- Dot Product Variant Example -------------
def dot_product(vector1, vector2):
"""
Compute the dot product of two vectors.
:param vector1: list of numbers representing a vector
:param vector2: list of numbers representing a vector
:return: the dot product of the two vectors
"""
return vector1.map2(mul, vector2).reduce(add, 0)
# ------------------- Dot Product Example -------------------
def opt_dot_product(vector1, vector2, uncurry=fun.uncurry):
"""
Compute the dot product of two vectors.
:param vector1: list of numbers representing a vector
:param vector2: list of numbers representing a vector
:param uncurry: (optional)
:return: the dot product of the two vectors
Examples::
>>> from pyske.core import PList
>>> vector_1 = PList.init(lambda x: x, 10)
>>> vector_2 = PList.init(lambda x: 1, 10)
>>> dot_product(vector_1, vector_2)
45
>>> from pyske.core import PList
>>> vector_1 = PList.init(lambda x: x, 10)
>>> vector_2 = PList.init(lambda x: 9 - x, 10)
>>> dot_product(vector_1, vector_2)
120
"""
return vector1.zip(vector2).map(uncurry(mul)).reduce(add, 0)
|
import sys
sys.path.append('../')
sys.path.append('../../binary_classifier')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import itertools
from tqdm import tqdm
import pickle
from sklearn.cluster import KMeans
import tensorflow as tf
from tensorflow.keras import models
reward_table = []
def blockshaped(arr, nrows, ncols):
"""
Return an array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array should look like n subblocks with
each subblock preserving the "physical" layout of arr.
"""
h, w = arr.shape
return (arr.reshape(h // nrows, nrows, -1, ncols)
.swapaxes(1, 2)
.reshape(-1, nrows, ncols))
class Quantum_T4_2D: # 2D
# in the test environment, we start at a particular location
# then, we start from there, we can not pre-define the blocks.
def possible_actions_from_location(self, location=None):
if location is None:
location = self.current_pos
irow, icol = location
possible_actions = []
if self.isRepeat is True:
if irow > 0: # decrease d1
possible_actions.append(0)
if irow < self.dim[0] - 1: # increase d1
possible_actions.append(1)
if icol > 0: # decrease d2
possible_actions.append(2)
if icol < self.dim[1] - 1: # increase d2
possible_actions.append(3)
if irow < (self.dim[0] - 1) and icol < (self.dim[1] - 1): # decrease d1 and d2
possible_actions.append(4)
if (irow > 0) and (icol > 0): # increase d1 and d2
possible_actions.append(5)
else:
if irow > 0 and self.visit_map[irow - 1, icol] == 0: # up
possible_actions.append(0)
if irow < self.dim[0] - 1 and self.visit_map[irow + 1, icol] == 0: # down
possible_actions.append(1)
if icol > 0 and self.visit_map[irow, icol - 1] == 0: # left
possible_actions.append(2)
if icol < self.dim[1] - 1 and self.visit_map[irow, icol + 1] == 0: # right
possible_actions.append(3)
if irow < (self.dim[0] - 1) and icol < (self.dim[1] - 1) and self.visit_map[
irow + 1, icol + 1] == 0: # decrease d1 and d2
possible_actions.append(4)
if (irow > 0) and (icol > 0) and self.visit_map[irow - 1, icol - 1] == 0: # increase d1 and d2
possible_actions.append(5)
# possible_actions=[0,1,2,3,4,5]
return possible_actions
def construct_block_given_starting_locations(self, starting_pixel_locs):
w1 = self.bh
w2 = self.bw
[idx1, idx2] = starting_pixel_locs
img = self.image
# img=self.image
extended_img = self.extended_image
self.max_d1, self.max_d2 = img.shape
# print('Shape for patching',self.imgheight,self.imgwidth)
self.current_pos = np.copy([np.int(idx1 / w1), np.int(idx2 / w2)])
self.starting_loc = np.copy(self.current_pos)
# scale the data to 0-1
range_data = [np.min(extended_img), np.max(extended_img)]
nDim1 = math.ceil(self.max_d1 / w1)
nDim2 = math.ceil(self.max_d2 / w2)
count = 0
patch_data = [0] * nDim1
image_largepatch_data = [0] * nDim1
image_smallpatch_data = [0] * nDim1
MaxExtImg_d1 = extended_img.shape[0]
MaxExtImg_d2 = extended_img.shape[1]
maxMu, minMu, maxSig, minSig = 0, 10, 0, 10
for ii in range(nDim1):
patch_data[ii] = [0] * nDim2
image_largepatch_data[ii] = [0] * nDim2
image_smallpatch_data[ii] = [0] * nDim2
for jj in range(nDim2):
# expand to 64x64
patch = extended_img[max(0, ii * w1 - w1):min(ii * w1 + w1 + w1, MaxExtImg_d1),
max(0, jj * w2 - w2):min(jj * w2 + w2 + w2, MaxExtImg_d2)] # 2D
count += 1
size_patch = patch.shape
# find p1,p2,p3 equally between l1,l2,l3
mypp = [0] * 3
pp_block = [0] * 2 # 2D
for dd in range(2): # number of dimension
mypp[dd] = [0] * 5
temp = np.linspace(0, size_patch[dd], num=5)
temp = temp.astype(int)
mypp[dd] = temp.tolist()
pp_block[dd] = [[mypp[dd][0], mypp[dd][2]], [mypp[dd][1], mypp[dd][3]],
[mypp[dd][2], mypp[dd][4]]]
pp_blocks = list(itertools.product(*pp_block))
# 27 elements
temp_patch = []
image_smallpatch_data[ii][jj] = [0] * len(pp_blocks)
for kk, mypp in enumerate(pp_blocks): # 27 items
temp = patch[mypp[0][0]:mypp[0][1],
mypp[1][0]:mypp[1][1]]
temp2 = temp
temp_patch += [np.mean(temp2), np.std(temp2)]
image_smallpatch_data[ii][jj][kk] = np.copy(temp2)
minMu = min(minMu, np.mean(temp2))
maxMu = max(maxMu, np.mean(temp2))
minSig = min(minSig, np.std(temp2))
maxSig = max(maxSig, np.std(temp2))
patch_data[ii][jj] = temp_patch
image_largepatch_data[ii][jj] = patch
patch_data[ii][jj] = self.block_splitting(image_largepatch_data[ii][jj],minMu,maxMu,minSig,maxSig)
return patch_data, [nDim1, nDim2], range_data, image_largepatch_data, image_smallpatch_data
def block_splitting(self,measurement,minMu,maxMu,minSig,maxSig):
measurement_size = np.shape(measurement)[0]
n_over_2 = math.floor(measurement_size / 2.0)
n_over_4 = math.floor(measurement_size / 4.0)
n_3_over_4 = math.floor(3 * measurement_size / 4.0)
# Split into blocks based:
block_1 = measurement[0:n_over_2, 0:n_over_2]
block_2 = measurement[0:n_over_2, n_over_2:measurement_size]
block_3 = measurement[n_over_2:measurement_size, 0:n_over_2]
block_4 = measurement[n_over_2:measurement_size, n_over_2:measurement_size]
block_5 = measurement[n_over_4:n_3_over_4, n_over_4:n_3_over_4]
block_6 = measurement[n_over_4:n_3_over_4, 0:n_over_2]
block_7 = measurement[n_over_4:n_3_over_4, n_over_2:measurement_size]
block_8 = measurement[0:n_over_2, n_over_4:n_3_over_4]
block_9 = measurement[n_over_2:measurement_size, n_over_4:n_3_over_4]
# Concatenate data into single 18-feature array:
mean_current = np.array(
[np.mean(block_1), np.mean(block_2), np.mean(block_3), np.mean(block_4), np.mean(block_5), np.mean(block_6),
np.mean(block_7), np.mean(block_8), np.mean(block_9)])
stds_current = np.array(
[np.std(block_1), np.std(block_2), np.std(block_3), np.std(block_4), np.std(block_5), np.std(block_6),
np.std(block_7), np.std(block_8), np.std(block_9)])
normalised_mean = np.zeros_like(mean_current)
normalised_stds = np.zeros_like(stds_current)
for i in range(9):
normalised_mean[i] = mean_current[i] / (maxMu - minMu)
normalised_stds[i] = stds_current[i]/ (maxSig - minSig)
current_statistics = np.concatenate((normalised_mean, normalised_stds))
return current_statistics
def take_reward_table(self):
reward_table = (-0.05) * np.ones((self.dim))
[id1, id2] = np.where(self.isquantum == 1)
reward_table[id1, id2] = 5
return reward_table
def __init__(self, file_name="",image = None, file = True, starting_pixel_loc=[0, 0], bh=18, bw=18, isRepeat=True, offset=0.0):
# img_row_idx and img_col_idx are the pixel indices, not the block indices
self.isRepeat = isRepeat # allow reselect visited location
self.bw = bw # block width
self.bh = bh # block height
# action space
self.K = 6
self.offset = offset
if file == False:
self.image = image
else:
# load multiple data scan into the memory
try:
strFile = "../data/{}.p".format(file_name)
self.image = pickle.load(open(strFile, "rb"))
except:
strFile = "../data/{}.npy".format(file_name)
self.image = np.load(strFile)
self.image = self.image + self.offset
# find min positive
idxPos = np.where(self.image > 0)
min_pos = np.min(self.image[idxPos])
idxNegative = np.where(self.image < 0)
self.image[idxNegative] = min_pos
self.image = (self.image - np.min(self.image)) / (np.max(self.image) - np.min(self.image))
self.img_dim = self.image.shape
# padding to four direction
# self.extended_image=np.pad(self.image, (16, 16), 'constant',constant_values=0)
self.extended_image = np.pad(self.image, (int(self.bh / 2), int(self.bw / 2)), 'edge')
# base on this location, construct the data
self.data, self.dim, self.range_data, self.image_largepatch_data, self.image_smallpatch_data = \
self.construct_block_given_starting_locations(starting_pixel_loc)
# D is the dimension of each patch
# self.dim is the dimension of blocks in the images
self.D = len(self.data[0][0])
self.current_pos = np.copy(self.starting_loc)
self.pre_classify()
self.where_is_quantum()
self.reward_table = self.take_reward_table()
self.visit_map = np.zeros_like(self.reward_table)
def pre_classify(self):
self.mid_point_x = math.floor(len(self.image[:, 0]) / 2.0)
self.mid_point_y = math.floor(len(self.image[0, :]) / 2.0)
self.trace_x = self.image[self.mid_point_x, :]
self.trace_y = self.image[:, self.mid_point_y]
self.trace_range = max(self.trace_x) - min(self.trace_x)
self.threshold_1 = self.trace_range * 0.3
self.threshold_2 = self.trace_range * 0.02
def get_state_and_location(self):
id1, id2 = self.current_pos
self.visit_map = np.zeros_like(self.reward_table)
return np.reshape(self.data[id1][id2], (-1, 2 * 9)), np.copy(self.current_pos)
def get_state(self, positions):
id1, id2 = positions
return np.reshape(self.data[id1][id2], (-1, 2 * 9))
def current_state(self):
id1, id2 = self.current_pos
return np.reshape(self.data[id1][id2], (-1, 2 * 9))
def get_reward(self, positions):
id1, id2 = positions
r = self.reward_table[id1, id2]
r = r - 0.5 * self.visit_map[id1, id2]
return r
def get_neightborMapIndividual(self, location):
id1, id2 = location
norm_factor = 5.0
output = []
# return a 6 dimensional vector
if id1 == 0: # decrease d1
output.append(0)
else:
output.append(self.visit_map[id1 - 1, id2] / norm_factor)
if id1 == self.dim[0] - 1: # increase d1
output.append(0)
else:
output.append(self.visit_map[id1 + 1, id2] / norm_factor)
if id2 == 0: # decrease d2
output.append(0)
else:
output.append(self.visit_map[id1, id2 - 1] / norm_factor)
if id2 == self.dim[1] - 1: # increase d2
output.append(0)
else:
output.append(self.visit_map[id1, id2 + 1] / norm_factor)
if id1 < self.dim[0] - 1 and id2 < self.dim[1] - 1: # decrease d1 and decrease d2
output.append(self.visit_map[id1 + 1, id2 + 1] / norm_factor)
else:
output.append(0)
if id1 > 0 and id2 > 0: # increase d1 and increase d2
output.append(self.visit_map[id1 - 1, id2 - 1] / norm_factor)
else:
output.append(0)
# replace zero by -1
output2 = [-1 / norm_factor if o == 0 else o * 1 for o in output]
return output2
def get_neighborMap(self, locations):
locations = np.asarray(locations)
if len(locations.shape) == 1: # 1 data point
output = self.get_neightborMapIndividual(locations)
else:
output = np.apply_along_axis(self.get_neightborMapIndividual, 1, locations)
return output
def set_session(self, session):
self.session = session
def step(self, action):
# perform an action to move to the next state
# 0: Decrease dim 1
# 1: Increase dim 1
# 2: Decrease dim 2
# 3: Increase dim 2
flagoutside = 0
if action == 0:
if self.current_pos[0] == 0:
flagoutside = 1
print("cannot decrease d1")
else:
self.current_pos[0] = self.current_pos[0] - 1
elif action == 1:
if self.current_pos[0] == self.dim[0] - 1:
flagoutside = 1
print("cannot increase d1")
else:
self.current_pos[0] = self.current_pos[0] + 1
elif action == 2:
if self.current_pos[1] == 0:
flagoutside = 1
print("cannot decrease d2")
else:
self.current_pos[1] = self.current_pos[1] - 1
elif action == 3:
if self.current_pos[1] == self.dim[1] - 1:
flagoutside = 1
print("cannot decrease d2")
else:
self.current_pos[1] = self.current_pos[1] + 1
elif action == 4:
if self.current_pos[0] < self.dim[0] - 1 and self.current_pos[1] < self.dim[1] - 1:
self.current_pos[1] = self.current_pos[1] + 1
self.current_pos[0] = self.current_pos[0] + 1
else:
flagoutside = 1
print("cannot increase both d1 and d2")
elif action == 5:
if self.current_pos[0] > 0 and self.current_pos[1] > 0:
self.current_pos[1] = self.current_pos[1] - 1
self.current_pos[0] = self.current_pos[0] - 1
else:
flagoutside = 1
print("cannot decrease both d1 and d2")
else:
print("action is 0-6")
id1, id2 = self.current_pos
if flagoutside == 1:
loc_x = np.copy(self.current_pos)
r = -8 # terminate
done = True
obs = self.data[id1][id2]
else:
if self.visit_map[id1, id2] == 1:
r = 0
obs = np.zeros_like(self.data[id1][id2])
done = False
loc_x = np.copy(self.current_pos)
return obs, r, done, loc_x
r = self.get_reward(self.current_pos)
self.visit_map[id1, id2] += 1
done = False
obs = self.data[id1][id2]
if self.isquantum is None:
self.where_is_quantum()
if self.isquantum[id1, id2] == 1:
r += 10
done = True
loc_x = np.copy(self.current_pos)
return obs, r, done, loc_x
def normalise(self,x):
x_max = np.amax(x)
x_min = np.amin(x)
y = (x - x_min) / (x_max - x_min)
return y
def load_cnn(self):
model_binary_classifier = models.load_model(
'../../classifier/bias_triangle_binary_classifier.h5')
return model_binary_classifier
def check_for_bias_triangle(self, ii, jj):
statistics = self.data[ii][jj]
means = statistics[:9]
for mean in means:
if (mean > self.threshold_2) and (mean < self.threshold_1):
self.threshold_test[ii, jj] += 1
if self.threshold_test[ii, jj] == 0:
return 0
large_patch = self.image_largepatch_data[ii][jj]
x, y = np.shape(large_patch)
test_image = tf.image.resize(self.normalise(np.array(large_patch)).reshape(-1, x, y, 1), (32, 32))
self.prediction[ii, jj] = self.model_binary_classifier.predict(test_image, steps=1)
if self.prediction[ii, jj] > 0.7:
return 1
else:
return 0
def where_is_quantum(self):
self.model_binary_classifier = self.load_cnn()
# return a map telling the quantum location
ndim1, ndim2 = self.dim
self.isquantum = np.zeros(self.dim)
self.threshold_test = np.zeros(self.dim)
self.prediction = np.zeros(self.dim)
for ii in tqdm(range(ndim1)):
for jj in range(ndim2):
self.isquantum[ii, jj] = self.check_for_bias_triangle(ii, jj)
return self.isquantum
def reset_at_rand_loc(self):
self.current_pos = [np.random.randint(0, self.dim[0]), np.random.randint(0, self.dim[1])]
id1, id2 = self.current_pos
self.visit_map = np.zeros_like(self.reward_table)
return self.data[id1][id2], np.copy(self.current_pos)
def reset_at_loc(self,loc):
self.starting_loc = loc
self.current_pos = loc
id1, id2 = self.current_pos
self.visit_map = np.zeros_like(self.reward_table)
return self.data[id1][id2], np.copy(self.current_pos)
def reset(self):
self.current_pos = np.copy(self.starting_loc)
id1, id2 = self.current_pos
self.visit_map = np.zeros_like(self.reward_table)
return self.data[id1][id2], np.copy(self.current_pos)
|
#Simple assignment
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
import re
import json
import time
import random
bvre = r"https:\/\/www\.bilibili\.com\/video\/(?P<bv>BV\w+)"
options = Options()
options.page_load_strategy = 'eager'
driver = Chrome('D:/zf-download/chromedriver',options=options)
#sub_areas = ['digital', 'application', 'computer_tech', 'industry', 'diy']
#sub_areas = ['science','social_science','humanity_history','business','campus','career','design','skill']
#sub_areas = ['career','design','skill']
#areas = ['douga','music','dance','game','knowledge','tech','sports','car','life','food','animal','fashion','information','ent']
areas = ['game','knowledge','tech','sports','car','food','animal','fashion','information','ent']
def getURL(area:str='tech', sub_area:str='', page=1, period=['09','11']):
if len(sub_area):
return f'https://www.bilibili.com/v/{area}/{sub_area}/#/all/click/0/{page}/2021-{period[0]}-01,2021-{period[1]}-30'
else:
return f'https://www.bilibili.com/v/{area}/'
def getBV(url:str):
return re.match(bvre,url).group('bv')
def get_sub_areas(hrefs):
return [i.get_attribute('href').split('/')[-2] for i in hrefs]
for area in areas:
with open(f'data/{area}.txt','a', encoding='utf-8') as dataFile:
driver.get(getURL(area=area))
hrefs = []
while not len(hrefs):
hrefs = driver.find_elements(by='css selector',value='#subnav a')[1:]
sub_areas = get_sub_areas(hrefs)
for sub_area in sub_areas:
if sub_area in ['match']:
break
empty = False
for page in range(50):
driver.get(getURL(page=page+1, sub_area=sub_area, area=area))
hrefs = []
trial = 0
while(len(hrefs)==0):
time.sleep(1+random.random()*2)
if len(driver.find_elements(by='css selector',value='.empty')) or len(driver.find_elements(by='css selector',value='.error-404')):
empty = True
break
hrefs = driver.find_elements(by='css selector',value='.vd-list-cnt .title')
trial += 1
if trial>60:
driver.get(getURL(page=page+1, sub_area=sub_area))
trial = 0
if empty:
break
for href in hrefs:
print(getBV(href.get_attribute('href')), href.text,file=dataFile) |
import numpy as np
import cv2 as cv
from modules.Node import Node
def calculate_perimiter(bbox_image):
perimiter = 0
for row in bbox_image:
for pix in row:
if pix == 1:
perimiter += 1
return perimiter
def calculate_area(segment):
return len(segment["cordinates"])
def calculate_Malinowska_ratio(area, perimeter):
return perimeter/(2 * np.sqrt(np.pi * area)) - 1
def calculate_moment(p, q, segment):
m = 0
for node in segment["cordinates"]:
m += node.row**p * node.col**q
return m
def find_center(segment):
m10 = calculate_moment(1, 0, segment)
m01 = calculate_moment(0, 1, segment)
m00 = calculate_moment(0, 0, segment)
return m10/m00, m01/m00
def calculate_central_moment(segment, p, q, row_center=-1, col_center=-1):
m_center = 0
if row_center == -1 or col_center == -1:
row_center, col_center = find_center(segment)
for node in segment["cordinates"]:
m_center += (node.row - row_center)**p * (node.col - col_center)**q
return m_center
def calculate_invariants(segment):
results = [0] * 11
m00 = calculate_moment(0, 0, segment)
row_center, col_center = find_center(segment)
M = np.zeros((4, 4))
for p in range(4):
for q in range(4):
M[p, q] = calculate_central_moment(segment, p, q, row_center, col_center)
#M1
results[1] = (M[2, 0] + M[0, 2]) / m00**2
#M2
results[2] = ((M[2, 0] + M[0, 2])**2 + 4 * M[1, 1] ** 2) / m00**4
#M3
results[3] = ((M[3, 0] - 3 * M[1, 2])**2 + (3 * M[2, 1] - M[0, 3])**2) / m00**5
#M4
results[4] = ((M[3, 0] + M[1, 2])**2 + (M[2, 1] + M[0, 3])**2) / m00**5
#M5
results[5] = ((M[3, 0]- 3 * M[1, 2]) * (M[3, 0] + M[1, 2]) *
((M[3, 0]+ M[1, 2])**2 - 3 * (M[2, 1] + M[0, 3])**2) +
(3 * M[2, 1] - M[0, 3]) * (M[2, 1] + M[0, 3]) *
(3 * (M[3, 0] + M[1, 2])**2 - (M[2, 1] + M[0, 3])**2)
) / m00** 10
#M6
results[6] = ((M[2, 0] - M[0, 2])*((M[3, 0] + M[1, 2])**2 - (M[2, 1] + M[0, 3])**2) +
4 * M[1, 1] * (M[3, 0] + M[1, 2]) * (M[2, 1] + M[0, 3])) / m00**7
#M7
results[7] = (M[2, 0] * M[0, 2] - M[1, 1]**2) / m00**4
#M8
results[8] = (M[3, 0] * M[1, 2] + M[2, 1] * M[0, 3] - M[1, 2]**2 - M[2, 1]**2) / pow(m00, 5)
#M9
results[9] = (M[2, 0] * (M[2, 1] * M[0, 3] - M[1, 2]**2) +
M[0, 2] * (M[0, 3] * M[1, 2] - M[2, 1]**2) -
M[1, 1] * (M[3, 0] * M[0, 3] - M[2, 1] * M[1, 2])) / m00**7
#M10
results[10] = ((M[3, 0] * M[0, 3] - M[1, 2] * M[2, 1])**2 -
4*(M[3, 0]*M[1, 2] - M[2, 1]**2)*(M[0, 3] * M[2, 1] - M[1, 2])) / m00**10
return results
def check_segment(features, base_features, std_deviations):
score = 0
for f, bf, sig in zip(features, base_features, std_deviations):
if abs(f - bf) < 5 * sig:
score += 1
else:
score -= 1
if score >= len(features) - 4:
return True
else:
return False |
import random
from time import sleep
nome = str(input('Olá, qual é o seu nome? '))
print('Olá {}, seja bem vindo ao nosso canal de jogos!!'.format(nome))
jogo = str(input('{}, sorteamos um jogo legal para que você conheça um pouco sobre a nossa plataforma de games, o jogo do dado. Você gostaria de conhecer esse jogo? '.format(nome)))
if jogo == ('sim' and 'Sim' and 'SIM'):
print('Que legal, vamos começar a se divertir!!!')
sleep(1)
adv = str(input('Qual numero você acha que vai cair? '))
print('Jogue o dado {}!'.format(nome))
lista = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lista = random.choice(lista)
sleep(2)
print('jogando dado...')
sleep(3)
if adv == lista:
print('O dado caiu com o numero {} virado para cima, você acertou!'.format(lista))
else:
print('Que pena, você errou, tente me superar!!!')
else:
print('Tudo bem, volte outra hora que estarei aqui para brincarmos!!')
|
# Assume you have a method isSubstring which checks if one word is a substring of another. given two strings, s1 and s2,
# write code to check if s2 is a rotation of s1 using only one call to isSubstring(e.g. "waterbottle" is a rotation of "erbottlewat")
def isSubtring(original,substring):
return substring in original
def isRotation(text1, text2):
return isSubtring(text1+text1, text2)
print(isRotation("waterbottle", "erbottlewat")) |
import collections
import pathlib
import numpy as np
import torch
import yolov3
def load_network(
config_path: pathlib.Path,
weights_path: pathlib.Path = None,
device: str = "cuda",
) -> yolov3.Darknet:
model = yolov3.Darknet(config_path, device=device)
if weights_path is not None:
model.load_weights(weights_path)
model.cuda(device=device)
return model
def get_test_input(model: yolov3.Darknet, device: str = "cuda"):
img_h, img_w = model.net_info["height"], model.net_info["width"]
img = np.random.randint(0, 255, (3, img_h, img_w))
inp = img[np.newaxis, ...].astype(np.float32) / 255.0
inp = torch.tensor(inp, device=device)
return inp
def rectify_state_dicts(
dict1: collections.OrderedDict, dict2: collections.OrderedDict
) -> collections.OrderedDict:
rectified = collections.OrderedDict()
for k in dict1:
if k in dict2 and dict1[k].shape == dict2[k].shape:
rectified[k] = dict1[k]
return rectified
if __name__ == "__main__":
orig_config_path = "../../pytorch-yolov3/models/yolov3.cfg"
new_config_path = "yolov3_xview.cfg"
weights_path = "../../pytorch-yolov3/models/yolov3.weights"
net1 = load_network(orig_config_path, weights_path)
net2 = load_network(new_config_path)
state_dict1 = net1.state_dict()
state_dict2 = net2.state_dict()
rectified_state_dict = rectify_state_dicts(state_dict1, state_dict2)
net2.load_state_dict(rectified_state_dict, strict=False)
# Save the new state dict.
torch.save(net2.state_dict(), "yolov3_xview.pth")
|
from time import sleep
from robit.core.alert import Alert
from robit.core.clock import Clock
from robit.core.web_client import post_worker_data_to_monitor
from robit.job.group import Group
from robit.core.health import Health
from robit.core.id import Id
from robit.core.name import Name
from robit.core.status import Status
from robit.worker.web_server import WorkerWebServer
class Worker:
def __init__(
self,
name: str,
web_server: bool = False,
web_server_address: str = '127.0.0.1',
web_server_port: int = 8100,
key: str = None,
monitor_address: str = None,
monitor_port: int = 8200,
monitor_key: str = None,
utc_offset: int = 0,
**kwargs,
):
self.id = Id()
self.name = Name(name)
self.clock = Clock(utc_offset=utc_offset)
self.health = Health()
self.status = Status()
if web_server:
self.web_server = WorkerWebServer(
address=web_server_address,
port=web_server_port,
key=key,
html_replace_dict={'title': self.name.__str__()}
)
else:
self.web_server = None
self.monitor_address = monitor_address
self.monitor_port = monitor_port
self.monitor_key = monitor_key
if 'alert_method' in kwargs:
self.alert = Alert(**kwargs)
else:
self.alert = None
self.group_dict = dict()
def add_group(self, name, **kwargs):
if name not in self.group_dict:
self.group_dict[name] = Group(name=name, utc_offset=self.clock.utc_offset, **kwargs)
def add_job(self, name, method, group='Default', **kwargs):
self.add_group(group)
self.group_dict[group].add_job(name, method, **kwargs)
def as_dict(self):
return {
'id': self.id.__str__(),
'name': self.name.__str__(),
'groups': self.calculate_groups_to_list(),
'health': self.health.__str__(),
'status': self.status.__str__(),
'clock': self.clock.as_dict(),
'job_details': self.job_detail_dict()
}
def as_dict_to_monitor(self):
return {
'id': self.id.__str__(),
'name': self.name.__str__(),
'health': self.health.__str__(),
'clock': self.clock.as_dict(),
}
def calculate_groups_to_list(self):
group_list = list()
for group in self.group_dict.values():
group_list.append(group.as_dict())
return group_list
def calculate_health(self):
self.health.reset()
for group in self.group_dict.values():
self.health.average(group.health.percentage)
def job_detail_dict(self):
job_detail_dict = dict()
for group in self.group_dict.values():
job_detail_dict = {**job_detail_dict, **group.job_list_as_dict_full()}
return job_detail_dict
def restart(self):
pass
def run_group_dict(self):
for group in self.group_dict.values():
group.start()
def start(self):
if self.web_server:
self.web_server.start()
self.run_group_dict()
while True:
self.calculate_health()
if self.alert:
self.alert.check_health_threshold(f'Worker "{self.name}"', self.health)
if self.web_server:
self.web_server.update_api_dict(self.as_dict())
if self.monitor_address:
post_worker_data_to_monitor(self.monitor_address, self.monitor_key, self.as_dict_to_monitor())
sleep(1)
def stop(self):
pass
|
from flask import Flask,render_template,request,redirect,session,url_for,flash
from flask_mysqldb import MySQL
from datetime import datetime,time,date
from flask_login import LoginManager
from flask_login import login_required
from flask_mail import Mail, Message
from dateutil import relativedelta
import uuid
import re
app=Flask(__name__)
login = LoginManager(app)
mail=Mail(app)
app.secret_key = 'your secret key'
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'MYDB'
mysql = MySQL(app)
#app.config['MAIL_SERVER']='smtp.gmail.com'
#app.config['MAIL_PORT'] = 465
#app.config['MAIL_USERNAME'] = 'cyz@gmail.com'
#app.config['MAIL_PASSWORD'] = "uhhgksglkd"
#app.config['MAIL_USE_TLS'] = False
#app.config['MAIL_USE_SSL'] = True
#mail = Mail(app)
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
app.static_folder = 'static'
@app.route('/send')
def send():
return render_template('forgot.html')
@app.route("/mail",methods=['POST','GET'])
def s_mail():
if(request.method=='POST'):
name=request.form['username']
email=request.form['email']
print(name)
msg = Message('Hello', sender = 'cyz@gmail.com', recipients = email)
msg.body = "Hello Flask message sent from Flask-Mail"
return "hello"
@app.route('/')
def start():
return render_template('front.html')
@app.route('/signup')
def signup():
return render_template('signin.html')
def calculate_age(born):
birth_date = born
print(born)
today = datetime.today()
print(today)
years = today.year - birth_date.year
if all((x >= y) for x,y in zip(today.timetuple(), birth_date.timetuple())):
age = years
else:
age = years - 1
return age
@app.route('/signin',methods=['POST','GET'])
def create():
if request.method=='POST':
f_name=request.form['first']
m_name=request.form['middle']
l_name=request.form['last']
email=request.form['email']
password=request.form['password']
city=request.form['city']
state=request.form['state']
zip=request.form['zip']
aadhar=request.form['aadhar']
mobile=request.form['mobile']
username=request.form['name']
date=request.form['date']
if(len(f_name)==0 or len(email)==0 or len(password)==0 or len(city)==0 or len(state)==0 or len(zip)==0 or len(aadhar)==0 or len(mobile)==0 or len(username)==0 or len(date)==0):
flash('*Fill in all the information','all')
return render_template('signin.html')
else:
if(re.search(regex,email)):
print("Valid Email")
else:
flash('Enter a Valid Email','email')
return render_template('signin.html')
flag = 0
if (len(password)<8):
flag = -1
elif not re.search("[a-z]", password):
flag = -1
elif not re.search("[A-Z]", password):
flag = -1
elif not re.search("[_@$]", password):
flag = -1
elif re.search("\s", password):
flag = -1
else:
flag = 0
print("Valid Password")
if flag ==-1:
flash('Length>7 must Contain [@_-!],[A-Z],[a-z]','password')
return render_template('signin.html')
if(len(mobile)<10 or len(mobile)>10):
flash('Enter a Valid Mobile No.','mob')
return render_template('signin.html')
if(len(zip)!=6) :
flash('Invalid','zip')
return render_template('signin.html')
if(len(aadhar)!=12):
flash('Invalid','aadhar')
return render_template('signin.html')
cur = mysql.connection.cursor()
try:
query="INSERT INTO person(first,middle,last,email,password,City,State,Zip,Aadhar,Mobile,name,date,acc_type) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
cur.execute(query,(f_name,m_name,l_name,email,password,city,state,zip,aadhar,mobile,username,date,'User'))
mysql.connection.commit()
cur.close()
flash('Account Created Please LogIn')
return render_template('front.html')
return render_template("signin.html")
except:
return "This Email has already been registered"
#cur.execute("SELECT * FROM users")
#fetchdata=cur.fetchall()
#cur.close()
#print(fetchdata)
@app.route('/login',methods=['POST','GET'])
def login():
if(request.method=='POST'):
user=request.form['tvalue']
name=request.form['username']
email=request.form['email']
password=request.form['password']
if(len(name)==0 or len(email)==0 or len(password)==0):
flash('*Fill in all the information')
return render_template('front.html')
else:
cur = mysql.connection.cursor()
query="SELECT * FROM person WHERE email=%s AND name=%s AND acc_type=%s"
data=(email,name,user)
cur.execute(query,data)
fetchdata=cur.fetchall()
if(len(fetchdata)==0):
flash('*Please enter valid details')
return render_template('front.html')
if(user=='User'):
if(fetchdata[0][5]==password):
session['loggedin'] = True
session['id'] = fetchdata[0][0]
session['username'] = fetchdata[0][11]
page=0
return redirect(url_for('sidebar',name=fetchdata[0][11],email=fetchdata[0][4],page=page))
else:
flash('*Please enter valid details')
return render_template('front.html')
elif(user=='Admin'):
if(fetchdata[0][5]==password):
session['loggedin'] = True
session['id'] = fetchdata[0][0]
session['username'] = fetchdata[0][11]
return redirect(url_for('admin',name=fetchdata[0][11],ap=1))
else:
flash('*Please enter valid details')
return render_template('front.html')
def diff_month(d1, d2):
print(d1.year)
return (d1.year - d2.year) * 12 + d1.month - d2.month
@app.route("/sidebar/<name>/<email>/<int:page>")
def sidebar(name,email,page):
#try:
if(name==session['username']):
cur = mysql.connection.cursor()
query="SELECT * FROM person WHERE email=%s AND name=%s"
data=(email ,name)
cur.execute(query,data)
fetchdata=cur.fetchall()
age=calculate_age(fetchdata[0][12])
al=0
query1="SELECT apply,permanent,renewal,uni_id,date,lost FROM users WHERE email=%s AND name=%s"
data=(email,name)
cur.execute(query1,data)
fetch1=cur.fetchall()
print(fetch1)
if(page==0):
if(len(fetch1)!=0):
return render_template('sidebar.html',l=list(fetchdata[0]),age=age,al=fetch1[0][0],per=fetch1[0][1],re=fetch1[0][2],id=fetch1[0][3],dt=fetch1[0][4],page=0)
else:
return render_template('sidebar.html',l=list(fetchdata[0]),age=age,al=0,per=0,re=0,page=0)
elif(page==1):
if(len(fetch1)!=0):
today=datetime.today()
print(today.year)
print(fetch1[0][4].year)
mn=diff_month(today,fetch1[0][4])
print(mn)
return render_template('perm.html',l=list(fetchdata[0]),age=age,al=fetch1[0][0],per=fetch1[0][1],re=fetch1[0][2],id=fetch1[0][3],dt=fetch1[0][4],mn=mn,page=1)
else:
return render_template('perm.html',l=list(fetchdata[0]),age=age,al=0,per=0,re=0,page=1)
elif(page==2):
if(len(fetch1)!=0):
# tm=calculate_age(fetch1[0][4])
#print(tm)
tm=0
return render_template('renewal.html',l=list(fetchdata[0]),age=age,al=fetch1[0][0],per=fetch1[0][1],re=fetch1[0][2],id=fetch1[0][3],dt=fetch1[0][4],tm=tm,page=2)
else:
return render_template('renewal.html',l=list(fetchdata[0]),age=age,al=0,per=0,re=0,page=2)
elif(page==3):
if(len(fetch1)!=0):
# tm=calculate_age(fetch1[0][4])
#print(tm)
tm=0
return render_template('lost.html',l=list(fetchdata[0]),age=age,al=fetch1[0][0],per=fetch1[0][1],re=fetch1[0][2],id=fetch1[0][3],dt=fetch1[0][4],tm=tm,lo=fetch1[0][5],page=3)
else:
return render_template('lost.html',l=list(fetchdata[0]),age=age,al=0,per=0,re=0,page=3)
elif(page==4):
query="SELECT * FROM person"
cur.execute(query)
fet=cur.fetchone()
print(fet)
return render_template('details.html',l=list(fetchdata[0]),l1=list(fet),page=4)
else:
return "Please Log in Dude"
#except:
# return "error 404"
#admin---------------------------------------------------------admin----------------------------admin-----------------------
@app.route('/admin/<name>/<int:ap>')
def admin(name,ap):
if(ap==1):
if(name==session['username']):
cur = mysql.connection.cursor()
query="SELECT * FROM users "
cur.execute(query, )
fetchdata=cur.fetchall()
print(len(fetchdata))
return render_template('admin.html',l=list(fetchdata),name=name,ap=ap)
elif(ap==2):
if(name==session['username']):
cur = mysql.connection.cursor()
query="SELECT * FROM users"
cur.execute(query, )
fetchdata=cur.fetchall()
print(len(fetchdata))
return render_template('admin.html',l=list(fetchdata),name=name,ap=ap)
elif(ap==3):
if(name==session['username']):
cur = mysql.connection.cursor()
query="SELECT * FROM users "
cur.execute(query, )
fetchdata=cur.fetchall()
print(len(fetchdata))
return render_template('admin.html',l=list(fetchdata),name=name,ap=ap)
elif(ap==4):
if(name==session['username']):
cur = mysql.connection.cursor()
query="SELECT * FROM users "
cur.execute(query, )
fetchdata=cur.fetchall()
print(len(fetchdata))
return render_template('admin.html',l=list(fetchdata),name=name,ap=ap)
elif(ap==5):
if(name==session['username']):
cur = mysql.connection.cursor()
query="SELECT * FROM users"
cur.execute(query)
fetchdata=cur.fetchall()
print(len(fetchdata))
return render_template('admin.html',l=list(fetchdata),name=name,ap=ap)
elif(ap==6):
if(name==session['username']):
cur = mysql.connection.cursor()
query="SELECT * FROM users"
cur.execute(query)
fetchdata=cur.fetchall()
print(len(fetchdata))
return render_template('admin.html',l=list(fetchdata),name=name,ap=ap)
elif(ap==8):
if(name==session['username']):
cur = mysql.connection.cursor()
query="SELECT * FROM users"
cur.execute(query)
fetchdata=cur.fetchall()
query="SELECT count(*) FROM users WHERE date=%s"
today = date.today()
data=(today, )
cur.execute(query,data)
cnt=cur.fetchone()
print(cnt)
query="SELECT count(*) from users WHERE YEAR(date)=%s"
data=("2019", )
cur.execute(query,data)
cnt1=cur.fetchone()
print(cnt1)
query="SELECT count(*) from users WHERE apply=2"
cur.execute(query)
cnt2=cur.fetchone()
print(cnt2)
query="SELECT count(*) from users WHERE permanent=2"
cur.execute(query)
cnt3=cur.fetchone()
print(cnt3)
query="SELECT count(*) from users WHERE permanent=2"
cur.execute(query)
cnt3=cur.fetchone()
print(cnt3)
query="SELECT count(*) from person WHERE acc_type=%s"
data=("User", )
cur.execute(query,data)
cnt4=cur.fetchone()
print(cnt4)
query="SELECT count(*) from person WHERE City=%s AND acc_type=%s"
data=("Kanpur","User")
cur.execute(query,data)
cnt5=cur.fetchone()
print(cnt5)
return render_template('admin.html',l=list(fetchdata),name=name,ap=8,cnt=cnt,cnt1=cnt1,cnt2=cnt2,cnt3=cnt3,cnt4=cnt4,cnt5=cnt5)
@app.route('/check/<name>/<int:ap>', methods=['POST','GET'])
def check(name,ap):
if(request.method=='POST'):
cur = mysql.connection.cursor()
reg=request.form['reg']
query="SELECT * FROM users WHERE uni_id=%s"
data=(reg, )
cur.execute(query,data)
fet=cur.fetchall()
print(fet)
query="SELECT * FROM person WHERE name=%s"
data=(fet[0][1], )
cur.execute(query,data)
fet=cur.fetchone()
query="SELECT * FROM users"
cur.execute(query)
fetchdata=cur.fetchone()
print(fet)
return render_template('admin.html',l=list(fetchdata),name=name,ap=7,l1=list(fet))
@app.route('/admina/<aname>/<name>/<email>/<int:ap>')
def admina(aname,name,email,ap):
if(ap==1):
if(aname==session['username']):
cur = mysql.connection.cursor()
query1=("UPDATE users SET apply=%s WHERE name=%s AND email=%s")
data=(2,name,email)
cur.execute(query1,data)
mysql.connection.commit()
cur.close()
return redirect(url_for('admin',name=aname,ap=ap))
else:
print(name,email)
return "not logged int"
elif(ap==2):
if(aname==session['username']):
print("hkbbbbbbbbbbbbbbbbbdkjfffffffffffffdsnfjnds")
cur = mysql.connection.cursor()
query1=("UPDATE users SET permanent=%s WHERE name=%s AND email=%s")
data=(2,name,email)
cur.execute(query1,data)
mysql.connection.commit()
cur.close()
return redirect(url_for('admin',name=aname,ap=ap))
else:
print(name,email)
return "not logged int"
elif(ap==3):
if(aname==session['username']):
print("hkbbbbbbbbbbbbbbbbbdkjfffffffffffffdsnfjnds")
cur = mysql.connection.cursor()
query1=("UPDATE users SET renewal=%s WHERE name=%s AND email=%s")
data=(2,name,email)
cur.execute(query1,data)
mysql.connection.commit()
cur.close()
return redirect(url_for('admin',name=aname,ap=ap))
else:
print(name,email)
return "not logged int"
elif(ap==4):
if(aname==session['username']):
print("hkbbbbbbbbbbbbbbbbbdkjfffffffffffffdsnfjnds")
cur = mysql.connection.cursor()
query1=("UPDATE users SET lost=%s WHERE name=%s AND email=%s")
data=(2,name,email)
cur.execute(query1,data)
mysql.connection.commit()
cur.close()
return redirect(url_for('admin',name=aname,ap=ap))
else:
print(name,email)
return "not logged int"
elif(ap==5):
if(aname==session['username']):
print("hkbbbbbbbbbbbbbbbbbdkjfffffffffffffdsnfjnds")
cur = mysql.connection.cursor()
query1=("UPDATE users SET lost=%s WHERE name=%s AND email=%s")
data=(2,name,email)
cur.execute(query1,data)
mysql.connection.commit()
cur.close()
return redirect(url_for('admin',name=aname,ap=ap))
else:
print(name,email)
return "not logged int"
elif(ap==6):
if(aname==session['username']):
print("hkbbbbbbbbbbbbbbbbbdkjfffffffffffffdsnfjnds")
cur = mysql.connection.cursor()
query1=("UPDATE users SET lost=%s WHERE name=%s AND email=%s")
data=(2,name,email)
cur.execute(query1,data)
mysql.connection.commit()
cur.close()
return redirect(url_for('admin',name=aname,ap=ap))
else:
print(name,email)
return "not logged int"
#<admin>------------------------------------------------<admin>-------------------------------<admin>------------------------
@app.route('/apply/<name>/<email>',methods=['POST','GET'])
def apply(name,email):
if(request.method=='POST'):
cur = mysql.connection.cursor()
query="SELECT name,email FROM person WHERE email=%s AND name=%s"
data=(email,name)
cur.execute(query,data)
fetchdata=cur.fetchone()
query="SELECT * FROM users WHERE email=%s AND name=%s"
data=(email,name)
cur.execute(query,data)
fetchone=cur.fetchall()
print(len(fetchone))
if(len(list(fetchone))==0):
try:
today = date.today()
query1="INSERT INTO users(name,email,apply,permanent,renewal,lost,uni_id,date) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)"
stri=str(uuid.uuid4().int)
d1 = today.strftime("%Y-%m-%d")
value=(name,email,1,0,0,0,stri[:12],d1)
cur.execute(query1,value)
mysql.connection.commit()
query="INSERT INTO request(reg,name) VALUES (%s,%s)"
value=(stri[:12],name)
cur.execute(query,value)
mysql.connection.commit()
cur.close()
except:
return "cannot be inserted"
return redirect(url_for('sidebar',name=name,email=email,page=0))
@app.route('/permanent/<name>/<email>')
def permanent(name,email):
print(name,email)
cur = mysql.connection.cursor()
query="SELECT name,email FROM person WHERE email=%s AND name=%s"
data=(email,name)
cur.execute(query,data)
fetchdata=cur.fetchone()
query1=("UPDATE users SET permanent=%s WHERE name=%s AND email=%s")
data=(1,name,email)
cur.execute(query1,data)
mysql.connection.commit()
cur.close()
return redirect(url_for('sidebar',name=name,email=email,page=1))
@app.route('/renewal/<name>/<email>')
def renewal(name,email):
print(name,email)
cur = mysql.connection.cursor()
query="SELECT name,email FROM person WHERE email=%s AND name=%s"
data=(email,name)
cur.execute(query,data)
fetchdata=cur.fetchone()
query1=("UPDATE users SET renewal=%s WHERE name=%s AND email=%s")
data=(1,name,email)
cur.execute(query1,data)
mysql.connection.commit()
cur.close()
return redirect(url_for('sidebar',name=name,email=email,page=2))
@app.route('/lost/<name>/<email>',methods=['POST','GET'])
def lost(name,email):
if(request.method=='POST'):
if(len(request.form['fir'])==0):
flash('*Fill F.I.R Num')
return redirect(url_for('sidebar',name=name,email=email,page=3))
else:
cur = mysql.connection.cursor()
query="SELECT name,email FROM person WHERE email=%s AND name=%s"
data=(email,name)
cur.execute(query,data)
fetchdata=cur.fetchone()
query1=("UPDATE users SET lost=%s WHERE name=%s AND email=%s")
data=(1,name,email)
cur.execute(query1,data)
mysql.connection.commit()
cur.close()
return redirect(url_for('sidebar',name=name,email=email,page=3))
@app.route('/logout')
def logout():
session.pop('loggedin', None)
session.pop('id', None)
session.pop('username', None)
return redirect(url_for('start'))
if __name__=='__main__':
app.run(debug=True)
|
from .view3d import view3d
from .chemspace import chemspace
from .landing import landing
pages = {
"3DView Page" : (view3d, False),
"Chemspace" : (chemspace, False),
"Landing" : (landing, True)
} |
"""Code here is only intended for use by developers"""
import time
import pandas as pd
import numpy as np
import copy
from scipy.stats import norm
import random
def parameter_recovery_sweep(sweep_θ_true, model, design_thing, target_param_name):
print("starting parameter recovery sweep")
rows, _ = sweep_θ_true.shape
summary_stats_final_trial = []
summary_stats_all_trials = []
for row in range(rows):
# make local copies
local_model = copy.copy(model)
local_design_thing = copy.deepcopy(design_thing)
# set true parameter values
local_model.θ_true = sweep_θ_true.loc[[row]]
fitted_model, summary_stats = simulated_experiment_trial_loop(
local_design_thing, local_model
)
# get summary stats for the parameter of interest
θ_estimated = fitted_model.get_θ_summary_stats(target_param_name)
summary_stats_final_trial.append(θ_estimated)
summary_stats_all_trials.append(summary_stats)
return (pd.concat(summary_stats_final_trial), summary_stats_all_trials)
def simulated_experiment_trial_loop(design_thing, fitted_model, response_model=None):
"""run a simulated experiment trial loop
If we provide an optional response_model then we use that in order to generate
response data. This allows responses to come from one model and the fitted model to
be another type of model. This allows us to examine model misspecification.
However, if there is no response_model provided, then we generate data and fit with
the same model."""
if response_model is None:
response_model = fitted_model
if response_model.θ_true is None:
raise ValueError("response_model must have θ_true values set")
while True:
#t = time.time()
design = design_thing.get_next_design(fitted_model)
#print("Time to design compute", time.time()-t)
if design is None:
break
response = response_model.simulate_y(design, display=True)
#t = time.time()
design_thing.enter_trial_design_and_response(design, response)
#print('Time to enter response data', time.time() -t)
#t=time.time()
fitted_model.update_beliefs(design_thing.data)
#print("Time to update beliefs", time.time() - t)
return (fitted_model, design_thing)
def simulated_multi_experiment(design_thing, models_to_fit, response_model):
"""Simulate an experiment where we have one response model, but have mulitple models
which we do parameter estimation with and get designs from."""
if response_model.θ_true is None:
raise ValueError("response_model must have θ_true values set")
n_models = len(models_to_fit)
for trial in range(666):
# get the design from a random model
m = random.randint(0, n_models - 1)
design = design_thing.get_next_design(models_to_fit[m])
if design is None:
break
print(f"trial {trial}, design from model: {m}")
# get response from response model
response = response_model.simulate_y(design)
design_thing.enter_trial_design_and_response(design, response)
# update beliefs of all models
[model.update_beliefs(design_thing.data) for model in models_to_fit]
return models_to_fit, design_thing
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.