content stringlengths 5 1.05M |
|---|
# one possible solution
# if it yields a bad result, rerun code
class KM():
def __init__(self, k):
self.k = k
def distances(self, X):
"""Makes a distance matrix to the centroids of shape (n_samples x n_centroids)"""
return np.vstack([np.sum((X-self.centroids[i,:])**2, axis=1) for i in range(self.k)]).T
def assign(self, X):
"""Selects the index of the distance matrix which has the smallest entry"""
dist = self.distances(X)
return np.argmin(dist, axis=1)
def update(self, X):
"""Updates centroid to mean of its constituents. If it has no constituents, respawn randomly"""
for j in range(self.k):
new_c = X[self.y==j,:].mean(axis=0)
if np.any(np.isnan(new_c)):
self.centroids[j,:] = np.random.uniform(X.min(0), X.max(0), X.shape[1])
else: self.centroids[j, :] = new_c
def __call__(self, X):
n, d = X.shape
self._converged = False
self.centroids = X[np.random.randint(0,n, self.k), :] # initialize by random selection of samples
self.y = np.zeros(shape=n) # empty initialize
while True: #repeat until convergence
#old_centroids = self.centroids
old_y = self.y
self.y = self.assign(X)
if np.all(self.y == old_y): return self.y
self.update(X)
return self.y
km = KM(5)
y = km(X)
plt.scatter(X[:,0], X[:,1], c=y)
plt.scatter(km.centroids[:,0], km.centroids[:,1], marker='x', s=150, c='red') |
import os
from dotenv import load_dotenv
from discord_key_bot import bot
from discord_key_bot.keyparse import parse_key
load_dotenv()
bot.run(os.environ["TOKEN"])
|
import random, time
def sleep_random(duration: tuple) -> None:
"""
sleep_random is designed to emulate a human user by pausing execution of the
program (sleeping) for a random duration of time between `a` and `b` seconds.
:param duration: tuple containing two floats: `a` is the minimum sleep duration while `b` is the maximum duration
"""
a: float = 0
b: float = 0
try:
a, b = duration
except ValueError as e:
print(e)
sleep_time = random.uniform(a, b)
print(f'Pausing execution flow for {sleep_time} seconds...')
time.sleep(sleep_time)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
def setup_python3():
# Taken from "distribute" setup.py
from distutils.filelist import FileList
from distutils import dir_util, file_util, util, log
from os.path import join
tmp_src = join("build", "src")
log.set_verbosity(1)
fl = FileList()
for line in open("MANIFEST.in"):
if not line.strip():
continue
fl.process_template_line(line)
dir_util.create_tree(tmp_src, fl.files)
outfiles_2to3 = []
for f in fl.files:
outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1)
if copied and outf.endswith(".py"):
outfiles_2to3.append(outf)
util.run_2to3(outfiles_2to3)
# arrange setup to use the copy
sys.path.insert(0, tmp_src)
return tmp_src
# Find version. We have to do this because we can't import it in Python 3 until
# its been automatically converted in the setup process.
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
__version__ = find_version('rdfextras/__init__.py')
config = dict(
name = 'rdfextras',
version = __version__,
description = "RDFExtras provide tools, extra stores and such for RDFLib.",
author = "Niklas Lindström",
author_email = "lindstream@gmail.com",
url = "http://github.com/RDFLib/rdfextras",
license = "BSD",
platforms = ["any"],
classifiers = ["Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
],
packages = ['rdfextras',
'rdfextras.sparql',
'rdfextras.sparql.results',
'rdfextras.store',
'rdfextras.store.FOPLRelationalModel',
'rdfextras.tools',
'rdfextras.utils']
)
if sys.version_info[0] >= 3:
from setuptools import setup
config.update({'use_2to3': True})
config.update({'src_root': setup_python3()})
install_requires = ['rdflib >= 3.2.1', 'pyparsing']
else:
try:
from setuptools import setup
config.update({'test_suite' : "nose.collector"})
except ImportError:
from distutils.core import setup
install_requires = ['rdflib >= 3.2.1', 'pyparsing<=1.5.7']
tests_require = install_requires + \
[]
extras_require = {
}
config.update(
entry_points = {
'console_scripts': [
'rdfpipe = rdfextras.tools.rdfpipe:main',
'csv2rdf = rdfextras.tools.csv2rdf:main',
'rdf2dot = rdfextras.tools.rdf2dot:main',
'rdfs2dot = rdfextras.tools.rdfs2dot:main',
],
'rdf.plugins.queryprocessor': [
'sparql = rdfextras.sparql.processor:Processor',
],
'rdf.plugins.queryresult': [
'sparql = rdfextras.sparql.query:SPARQLQueryResult',
],
'rdf.plugins.resultserializer': [
'xml = rdfextras.sparql.results.xmlresults:XMLResultSerializer',
'json = rdfextras.sparql.results.jsonresults:JSONResultSerializer',
],
'rdf.plugins.resultparser': [
'xml = rdfextras.sparql.results.xmlresults:XMLResultParser',
'json = rdfextras.sparql.results.jsonresults:JSONResultParser',
],
},
#namespace_packages = ['rdfextras'], # TODO: really needed?
install_requires = install_requires,
tests_require = tests_require,
extras_require = extras_require
)
setup(**config)
|
import json
from django.http import HttpResponse
from django.template import loader, Template
from django.utils.datastructures import SortedDict
from urllib import urlencode
from tiote import forms, sa
from tiote.utils import *
import base
def browse(request):
conn_params = fns.get_conn_params(request, update_db=True)
# row(s) deletion request handling
if request.method == 'POST':
if request.GET.get('upd8') == 'delete':
l = request.POST.get('where_stmt').strip().split(';')
query_data = fns.qd(request.GET)
query_data['conditions'] = fns.get_conditions(l)
return qry.rpr_query(conn_params, 'delete_row', query_data)
else:
return edit(request)
tbl_data = qry.browse_table(conn_params, request.GET, request.POST)
# build table
c = base.TableView(tbl_data=tbl_data,
tbl_props = {'with_checkboxes': True, 'display_row': True, },
tbl_store = {'total_count':tbl_data['total_count'], 'pg': tbl_data['pg'],
'limit': tbl_data['limit'] },
show_tbl_optns = True,
tbl_optn_type='data',
empty_err_msg="This table contains no items",
# columns_desc=tbl_structure,
)
return c.get(request)
def base_struct(request, **kwargs):
conn_params = fns.get_conn_params(request)
# get url prefix
dest_url = SortedDict(); _d = {'sctn':'tbl','v':'struct'}
for k in _d: dest_url[k] = _d[k] # init this way to maintain order
for k in ('db', 'schm','tbl',):
if request.GET.get(k): dest_url[k] = request.GET.get(k)
url_prefix = urlencode(dest_url)
subnav_list = ['cols', 'cons', ] # manually updated as more features are implemented
if conn_params['dialect'] == 'postgresql': subnav_list.append('deps')
props = {'props_table': True }
if kwargs.get('tbl_props'):
props.update(kwargs.get('tbl_props'))
kwargs.pop('tbl_props')
c = base.CompositeTableView(
url_prfx = url_prefix,
subnav_list = subnav_list,
tbl_props = props,
**kwargs)
return c.get(request)
def cols_struct(request):
# inits and first queries
conn_params = fns.get_conn_params(request, update_db=True)
# only needed in the MySQL dialect
if conn_params['dialect'] == 'mysql':
charset_list = qry.common_query(conn_params, 'charset_list')['rows']
supported_engines = qry.common_query(conn_params, 'supported_engines')['rows']
else:
supported_engines = None
charset_list = None
tbl_names = sa.get_table_names(conn_params, request.GET)
tbl_cols = qry.rpr_query(conn_params, 'table_structure', fns.qd(request.GET))
# column editing/deleting
if request.method == 'POST' and request.GET.get('upd8'):
# format the where_stmt to a mapping of columns to values (a dict)
l = request.POST.get('where_stmt').strip().split(';')
conditions = fns.get_conditions(l)
# determine query to run
if request.GET.get('upd8') in ('delete', 'drop',):
q = 'drop_column'
query_data = {'conditions': conditions}
for k in ['db', 'tbl',]: query_data[k] = request.GET.get(k)
if request.GET.get('schm'):
query_data['schm'] = request.GET.get('schm')
return qry.rpr_query(conn_params, q, query_data)
# handle submission of new column form
elif request.method == 'POST':
form = forms.ColumnForm(conn_params['dialect'], engines=supported_engines, charsets=charset_list,
existing_tables=tbl_names, existing_columns=tbl_cols['rows'], label_suffix=':', data=request.POST)
# handle invalid forms
if not form.is_valid():
return fns.response_shortcut(request, template='form_errors',
extra_vars={'form':form,})
# prep form fields: add all type_ together
if conn_params['dialect'] == 'postgresql':
keys = [key for key in form.cleaned_data.keys() if key.startswith('type_')]
for key in keys:
_temp = form.cleaned_data[key].split('|')
form.cleaned_data[key] = _temp[0] # first index is the base type
if _temp[-1] != '_default':
form.cleaned_data[key] += _temp[1] # the array specifier literal
# do column creation and return error
ret = qry.create_column(conn_params, request.GET, form.cleaned_data)
return HttpResponse(json.dumps(ret))
# table view
http_resp = base_struct(request, tbl_data=tbl_cols, show_tbl_optns=True,
tbl_props= {'keys': (('column', 'key'), )}, tbl_optn_type='tbl_like',
subv='cols', empty_err_msg="Table contains no columns")
form = forms.ColumnForm(conn_params['dialect'], engines=supported_engines, charsets=charset_list,
existing_tables=tbl_names, existing_columns=tbl_cols['rows'], label_suffix=':')
form_html= fns.render_template(request, 'tbl/tt_col.html', context={'form': form, 'edit':False,
'table_fields': ['name', 'engine', 'charset', 'inherit', 'of_type'],
'odd_fields': ['type','key','charset', 'not null'],
'dialect': conn_params['dialect'],
# 'table_with_columns': table_with_columns,
}, is_file=True)
http_resp.content += form_html
return http_resp
def cons_struct(request):
conn_params = fns.get_conn_params(request, update_db=True)
if request.method == "POST" and request.GET.has_key('upd8'):
# only drop foreign keys is currently supported
# format the where_stmt to a mapping of keys to values (a dict)
l = request.POST.get('where_stmt').strip().split(';')
query_data = {'conditions': fns.get_conditions(l) }
for k in ['db', 'tbl', 'schm']:
if request.GET.has_key(k): query_data[k] = request.GET.get(k)
# decide query to run
if request.GET.get('upd8') in ('drop', 'delete',): query_type = 'drop_constraint'
# run and return status of the executed query
return qry.rpr_query(conn_params, query_type, query_data)
# view and creation things
tbl_idxs = qry.rpr_query(conn_params, 'indexes', fns.qd(request.GET))
return base_struct(request, tbl_data=tbl_idxs, show_tbl_optns=True, subv='cons',
empty_err_msg="Table contains no constraints",
tbl_props = {'keys': (('name', 'key'), ('type', 'key'), )},
tbl_optn_type= 'tbl_like',
)
def deps_struct(request):
conn_params = fns.get_conn_params(request, update_db=True)
# view and deletion things
tbl_deps = qry.get_dependencies(conn_params, fns.qd(request.GET))
return base_struct(request, tbl_data=tbl_deps, show_tbl_optns=False,
subv='deps', empty_err_msg="This table has no dependents")
def insert(request):
# make queries and inits
conn_params = fns.get_conn_params(request, update_db=True)
tbl_struct_data = qry.rpr_query(conn_params, 'raw_table_structure', fns.qd(request.GET))
# keys = ['column','type','null','default','character_maximum_length','numeric_precision','numeric_scale']
tbl_indexes_data = qry.rpr_query(conn_params, 'indexes', fns.qd(request.GET))
if request.method == 'POST':
# the form is a submission so it doesn't require initialization from a database request
# every needed field would already be in the form (applies to forms for 'edit' view)
form = forms.InsertForm(tbl_struct=tbl_struct_data, dialect=conn_params['dialect'],
tbl_indexes=tbl_indexes_data['rows'], data=request.POST)
# validate form
if form.is_valid():
ret = qry.insert_row(conn_params, fns.qd(request.GET),
fns.qd(request.POST))
return HttpResponse(json.dumps(ret))
else: # form contains error
ret = {'status': 'fail',
'msg': fns.render_template(request,"tt_form_errors.html",
{'form': form}, is_file=True).replace('\n','')
}
return HttpResponse(json.dumps(ret))
form = forms.InsertForm(tbl_struct=tbl_struct_data, dialect=conn_params['dialect'],
tbl_indexes=tbl_indexes_data['rows'])
return fns.response_shortcut(request, extra_vars={'form':form,}, template='form')
def edit(request):
# get METHOD is not allowed. the POST fields which was used to intialized the form
# - would not be availble. Redirect the page to the mother page ('v' of request.GET )
if request.method == 'GET':
h = HttpResponse(''); d = SortedDict()
for key in ('sctn', 'v', 'db', 'schm', 'tbl'):
if request.GET.get(key): d[key] = request.GET.get(key)
h.set_cookie('TT_NEXT', str( urlencode(d) ) )
return h
# inits and queries
conn_params = fns.get_conn_params(request, update_db=True)
tbl_struct_data = qry.rpr_query(conn_params, 'raw_table_structure', fns.qd(request.GET))
# keys = ['column','type','null','default','character_maximum_length','numeric_precision','numeric_scale']
tbl_indexes_data = qry.rpr_query(conn_params, 'primary_keys', fns.qd(request.GET))
# generate the form(s)
if request.method == 'POST' and request.POST.get('where_stmt'):
# parse the POST structure and generate a list of dict.
l = request.POST.get('where_stmt').strip().split(';')
conditions = fns.get_conditions(l)
# loop through the dict, request for the row which have _dict as its where clause
# - and used that information to bind the EditForm
_l_forms = []
for _dict in conditions:
single_row_data = qry.rpr_query(conn_params, 'get_single_row',
fns.qd(request.GET), _dict
)
# make single row data a dict mapping of columns to rows
init_data = dict( zip( single_row_data['columns'], single_row_data['rows'][0] ) )
# create form and store in a the forms list
f = forms.EditForm(tbl_struct=tbl_struct_data, dialect=conn_params['dialect'],
tbl_indexes=tbl_indexes_data['rows'], initial=init_data)
_l_forms.append(f)
return fns.response_shortcut(request, extra_vars={'forms':_l_forms,}, template='multi_form')
# submissions of a form
else:
f = forms.EditForm(tbl_struct=tbl_struct_data, dialect=conn_params['dialect'],
tbl_indexes=tbl_indexes_data['rows'], data = request.POST)
if not f.is_valid():
# format and return form errors
ret = {'status': 'fail',
'msg': fns.render_template(request,"tt_form_errors.html",
{'form': f}, is_file=True).replace('\n','')
}
return HttpResponse(json.dumps(ret))
# from here on we are working on a valid form
# two options during submission: update_row or insert_row
if f.cleaned_data['save_changes_to'] == 'insert_row':
# pretty straight forward (lifted from insert view above)
ret = qry.insert_row(conn_params, fns.qd(request.GET),
f.cleaned_data)
return HttpResponse(json.dumps(ret))
else:
indexed_cols = fns.parse_indexes_query(tbl_indexes_data['rows'])
ret = qry.update_row(conn_params, indexed_cols,
fns.qd(request.GET), f.cleaned_data)
return HttpResponse(json.dumps(ret))
def get_ops_form(conn_params, get_data, data=None):
context = {}
if conn_params['dialect'] == 'postgresql':
# table edit form
schema_list = qry.common_query(conn_params, 'schema_list', get_data)['rows']
tblEditForm = forms.get_dialect_form('TableEditForm', conn_params['dialect'])
context['tbl_edit_form'] = tblEditForm( tbl_name = get_data.get('tbl'),
tbl_schema = get_data.get('schm'),
schemas = schema_list,
data = data
)
# table vacuum form
context['tbl_vacuum_form'] = forms.TableVacuumForm(data=data)
elif conn_params['dialect'] == 'mysql':
# table edit form
charset_list = qry.common_query(conn_params, 'charset_list', get_data)['rows']
tblEditForm = forms.get_dialect_form('TableEditForm', conn_params['dialect'])
context['tbl_edit_form'] = tblEditForm(tbl_name= get_data.get('tbl'),
charsets = charset_list,
data = data
)
# run validation if data is passed for the forms
if data is not None:
for f in context.values(): f.is_valid()
return context
def ops(request):
conn_params = fns.get_conn_params(request, update_db=True)
extra_context = SortedDict({'dialect': conn_params['dialect']})
if request.method == 'POST':
form_contxt = get_ops_form(conn_params, request.GET, data=request.POST)
if request.POST.get('form_type'):
if request.POST.get('form_type') in ('tbl_vacuum_form', 'tbl_edit_form'):
form_data = form_contxt[request.POST.get('form_type')].cleaned_data
else:
form_data = {} # the other operations don't submit forms
msg = qry.run_tbl_operations(conn_params, request.POST.get('form_type'), request.GET, form_data)
return HttpResponse(json.dumps(msg))
else:
pass
else:
form_contxt = get_ops_form(conn_params, request.GET)
extra_context.update(form_contxt)
retrn = fns.render_template(request, 'tbl/tt_ops.html', extra_context, is_file=True )
return HttpResponse(retrn)
# view router
def route(request):
if request.GET.get('v') == 'browse':
if request.GET.get('subv') == 'edit':
return edit(request)
return browse(request)
elif request.GET.get('v') in ('structure', 'struct'):
if request.GET.get('subv') == 'cons':
return cons_struct(request)
elif request.GET.get('subv') == 'deps':
return deps_struct(request)
return cols_struct(request) # default
elif request.GET.get('v') in ('insert', 'ins'):
return insert(request)
elif request.GET.get('v') in ('operations', 'ops'):
return ops(request)
else:
return fns.http_500('malformed URL of section "table"')
|
from django.db import models
from django.utils import timezone
import json
class Spy(models.Model):
CREATE = 'create'
CHANGE = 'change'
DELETE = 'delete'
ACTION_CHOICES = (
(CREATE, 'CREATE'),
(CHANGE, 'CHANGE'),
(DELETE, 'DELETE'),
)
id = models.AutoField(primary_key=True)
user = models.ForeignKey('common.User', null=True, blank=True, on_delete=models.SET_NULL)
action = models.CharField(max_length=25, choices=ACTION_CHOICES)
object_name = models.CharField(max_length=100, blank=True, null=True)
object_id = models.IntegerField(null=True)
object_str = models.CharField(max_length=255, null=True, blank=True)
changes = models.TextField(blank=True, null=True)
client = models.ForeignKey('clients.Client', null=True, blank=True, default=None, on_delete=models.SET_NULL)
time = models.DateTimeField(blank=True, null=True)
class Meta:
managed = True
db_table = 'spy_log'
def changed(self, instance=None, old_instance=None, form=None, request=None):
try:
user = request.user
except AttributeError:
user = None
self.log(object=instance,
old_object=old_instance,
form=form,
user=user,
action=Spy.CHANGE)
def created(self, instance=None, form=None, request=None, client=None):
try:
user = request.user
except AttributeError:
user = None
self.log(object=instance, form=form, action=Spy.CREATE, user=user, client=client)
def deleted(self, instance=None, request=None, client=None):
self.log(object=instance, user=request.user, client=client, action=Spy.DELETE)
def log(self, object=None, old_object=None, form=None, user=None, action=None, client=None):
changes = {}
if form:
for field_name in form.changed_data:
changes[field_name] = {'new': str(form.cleaned_data[field_name])}
if old_object is not None:
changes[field_name]['old'] = str(getattr(old_object, field_name))
if client:
client_id = client.pk
elif hasattr(object, 'related_client_id'):
client_id = getattr(object, 'related_client_id')
else:
client_id = None
# Заполняем все поля
self.object_name = object.__class__.__name__
self.object_str = str(object)
self.object_id = object.pk
self.user = user
self.action = action
if len(changes) > 0:
self.changes = json.dumps(changes, ensure_ascii=False)
self.client_id = client_id
self.time = timezone.now()
self.save()
def json_changes(self):
if self.changes:
return self.changes.replace("\\\"", '\\\\"')
else:
return None
|
import os
from google.cloud.storage import Bucket
from google.cloud import storage
from google.api_core.exceptions import NotFound
from ...Models.Interfaces.IStorageClientWrapper import IStorageClientWrapper
from ..DTOs.CloudStorageMetadataDTO import CloudStorageMetadataDTO
from ...Models.Converters.JSONConverter import deserialize_json_to_object, serialize_object_to_json
class GoogleCloudClientWrapper(IStorageClientWrapper):
def __init__(self, json_auth_file_path: str, prj_root_dir: str):
self.__storage_client = storage.Client.from_service_account_json(json_auth_file_path)
self.__prj_root_dir = prj_root_dir
# --------- ABSTRACT METHOD IMPLEMENTATIONS ----------------------------------------------
def download_from_storage_client(self, **kwargs):
"""Instructs the google cloud client to download a by (JSON/user) parameters specified blob from to a
specified or default bucket. """
storage_retrieval_script_path = kwargs.get("srs_path")
folder_path = kwargs.get("folder")
bucket_name = kwargs.get('bucket')
blob_name = kwargs.get('blob')
destination = kwargs.get("download_to") # path of the dir to download the file too; if this is -
# specified then it should overwrite the file name identifiers
if bucket_name is None:
bucket_name = os.path.split(self.__prj_root_dir)[-1]
print(bucket_name)
if storage_retrieval_script_path is not None:
# download from json file
with open(str(storage_retrieval_script_path), 'r') as fp:
json_model: CloudStorageMetadataDTO = deserialize_json_to_object(fp) # read in the json file
fp.close()
file_name = json_model.filename
blob_name = file_name
destination_file_name = self.__return_destination(destination, blob_name)
directory = os.path.split(destination_file_name)[-2]
dir_exists = os.path.isdir(directory) # check if the directory exists
if not dir_exists: # if directory doesn't exist create it
os.makedirs(directory)
self.__download_blob(json_model.bucketname, file_name, destination_file_name)
elif folder_path is not None:
# Download all files in a bucket that come from the same folder
bucket_obj = self.__return_bucket_obj_by_name(bucket_name)
# get all blob names
all_blob_names = self.__get_blobs_names_in_bucket(bucket_obj)
for blob_name in all_blob_names:
if folder_path in blob_name:
destination_file_name = self.__return_destination(destination, blob_name)
directory = os.path.split(destination_file_name)[-2]
dir_exists = os.path.isdir(directory) # check if the directory exists
if not dir_exists: # if directory doesn't exist create it
os.makedirs(directory)
self.__download_blob(bucket_obj, blob_name, destination_file_name)
elif folder_path is None and blob_name is None:
# Download all files in bucket
print("No folder or file specified, downloading all files in bucket: ", bucket_name)
# if bucket_name is None:
# raise ValueError( 'Specify Bucket to download folder from!' )
bucket_obj = self.__return_bucket_obj_by_name(bucket_name)
all_blob_names = self.__get_blobs_names_in_bucket(bucket_obj)
for blob_name in all_blob_names:
destination_file_name = self.__return_destination(destination, blob_name)
directory = os.path.split(destination_file_name)[-2]
dir_exists = os.path.isdir(directory) # check if the directory exists
if not dir_exists: # if directory doesn't exist create it
os.makedirs(directory)
self.__download_blob(bucket_obj, blob_name, destination_file_name)
else:
# download by bucket and blob name
if blob_name is None:
raise ValueError('Specify file name to download!')
bucket_obj = self.__return_bucket_obj_by_name(bucket_name)
destination_file_name = self.__return_destination(destination, blob_name)
directory = os.path.split(destination_file_name)[-2]
dir_exists = os.path.isdir(directory) # check if the directory exists
if not dir_exists: # if directory doesn't exist create it
os.makedirs(directory)
# self.client.download_blob(bucket_obj, blob_name, destination_file_name)
bucket = self.__storage_client.get_bucket(bucket_obj)
blob = bucket.blob(blob_name)
blob.download_to_filename(destination_file_name)
print('Blob {} downloaded to {}.'.format(blob_name, destination_file_name))
def upload_to_storage_client(self, **kwargs):
"""Instructs the google cloud client to upload a user specified blob from to a specified or default bucket."""
current_bucket_names: [tuple] = [(b.id, b) for b in self.__get_buckets()]
if current_bucket_names is None:
raise ValueError('Could not index the current bucket names inside the google cloud!')
path_of_file_to_be_uploaded = kwargs.get('abs_path_to_file')
if path_of_file_to_be_uploaded is None:
raise ValueError('Specify path of the file - or folder - to be uploaded to Google Cloud!')
bucket_name = kwargs.get('bucket_name') # see if the user specified a bucket
bucket = None
if bucket_name is not None:
# if a bucket IS specified see if it exists and return the object.
if self.__check_bucket_exists(bucket_name):
bucket = self.__return_bucket_obj_by_name(bucket_name)
else:
# if a bucket doesn't exists create it from the project root dir
bucket_name = os.path.split(self.__prj_root_dir)[-1]
print('When a bucket isn\'t named then the projects root folder will be used as the bucket:',
bucket_name)
bucket_exists = self.__check_bucket_exists(bucket_name)
if bucket_exists:
# if the bucket exists return its obj
bucket = self.__return_bucket_obj_by_name(bucket_name)
else:
# if bucket doesn't exists create it
bucket = self.__storage_client.create_bucket(bucket_name)
# Now that we have our bucket, we can upload the file(s)
if os.path.isdir(path_of_file_to_be_uploaded):
# if the file path is to a folder, upload all files in the folder
# print("is folder")
self.__recursive_folder_upload(path_of_file_to_be_uploaded, bucket)
else:
to_be_stored_blob_name = kwargs.get('blob_name')
if to_be_stored_blob_name is None:
# to_be_stored_blob_name = path_of_file_to_be_uploaded.split( '\\' )[-1]
filepath = os.path.join(path_of_file_to_be_uploaded)
to_be_stored_blob_name = filepath.replace(self.__prj_root_dir, "")
print('User did not set custom file/blob-name to upload under, using default file name: '.format(
to_be_stored_blob_name))
# self.client.upload_blob(bucket, path_of_file_to_be_uploaded, to_be_stored_blob_name)
# upload_blob:
if self.__check_bucket_exists(bucket_name):
bucket = self.__return_bucket_obj_by_name(bucket_name)
else:
bucket = self.__storage_client.create_bucket(bucket_name)
blob = bucket.blob(to_be_stored_blob_name)
blob.upload_from_filename(path_of_file_to_be_uploaded)
print('File {} uploaded to {}.'.format(path_of_file_to_be_uploaded, to_be_stored_blob_name))
# identifiers = to_be_stored_blob_name.split("\\")[:-1]
metadata = {'project_bucket_name': bucket_name, 'file_name': to_be_stored_blob_name,
'generation': self.__getBlobGeneration(bucket, to_be_stored_blob_name),
'updated': self.__getBlobUpdated(bucket, to_be_stored_blob_name)}
self.generate_json_upload_parameters(**metadata)
def show_files_stored_by_storage_client(self):
"""Instructs the google cloud client to display all blob's within the wrapper managed bucket."""
all_buckets = list(self.__storage_client.list_buckets())
for b in all_buckets:
self.__list_blobs(b)
def delete_file_managed_by_storage_client(self, **kwargs):
"""Instructs the google cloud client to delete a user specified blob from a bucket."""
if 'bucket_name' in kwargs:
bucket_name = kwargs.get('bucket_name')
else:
raise ValueError("To delete a file from the google cloud a \'bucket_name\' must be specified.")
if 'blob_name' in kwargs:
blob_name = kwargs.get('blob_name')
else:
raise ValueError("To delete a file from the google cloud a \'blob_name\' must be specified.")
bucket_exists = self.__check_bucket_exists(bucket_name)
if bucket_exists:
bucket_obj = self.__return_bucket_obj_by_name(bucket_name)
self.__delete_blob(bucket_obj, blob_name)
else:
raise ValueError("Specified deletion bucket or blob target does not exist!")
def generate_json_upload_parameters(self, **kwargs):
prj_bucket = kwargs.get("project_bucket_name")
identifiers = kwargs.get("identifiers")
file_name = kwargs.get("file_name")
generation = kwargs.get("generation")
updated = kwargs.get("updated")
out_name = os.path.basename(file_name).split(".")[0] + ".json" # the name of the output json file
# dir_path = os.path.join( "json files" ) # will return a path for the json files folder
dir_path = os.path.join(self.__prj_root_dir, "json_files")
json_model = CloudStorageMetadataDTO(filename=file_name,
bucketname=prj_bucket,
upload_date_time=str(updated),
generationKey=generation)
if os.path.isdir(dir_path): # if the folder exists
with open(os.path.join(dir_path, out_name), 'w') as fp:
serialize_object_to_json(json_model, fp)
print("Generated retrieval parameters for file {} in JSON Format. Stored in: {}".format(file_name,
dir_path))
else: # if the folder doesn't exist yet.
os.makedirs(dir_path)
with open(os.path.join(dir_path, out_name), 'w') as fp:
serialize_object_to_json(json_model, fp)
print("Generated retrieval parameters for file {} in JSON Format. Stored in: {}".format(file_name,
dir_path))
# --------- HELPER METHODS ----------------------------------------------
def __get_blobs_names_in_bucket(self, bucket: object):
blobs = self.__get_blobs(bucket)
return blobs
def __return_destination(self, destination: str, blob_name: str):
if destination is not None:
return os.path.join(destination, os.path.split(blob_name)[-1])
else:
return os.path.join(self.__prj_root_dir + blob_name)
def __get_buckets(self):
buckets = list(self.__storage_client.list_buckets())
return buckets
def __get_bucket(self, bucket: object):
return self.__storage_client.get_bucket(bucket)
def __upload_blob(self, bucket: object, source_file_name: str, destination_blob_name: str):
"""Uploads a file to the bucket."""
bucket = self.__storage_client.get_bucket(bucket)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print('File {} uploaded to {}.'.format(
source_file_name,
destination_blob_name))
def __download_blob(self, bucket: object, source_blob_name: str, destination_file_name: str):
"""Downloads a blob from the bucket."""
bucket = self.__storage_client.get_bucket(bucket)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
print('Blob {} downloaded to {}.'.format(
source_blob_name,
destination_file_name))
def __list_blobs(self, bucket: object):
"""Lists all the blobs in the bucket."""
# Note: Client.list_blobs requires at least package version 1.17.0.
blobs = self.__storage_client.list_blobs(bucket)
print(bucket, ":")
for blob in blobs:
print("\t", blob.name)
def __get_blobs(self, bucket: object):
blobs = self.__storage_client.list_blobs(bucket)
return [blob.name for blob in blobs]
@staticmethod
def __delete_blob(bucket: Bucket, blob_name: str):
"""Deletes a blob from the bucket."""
# bucket_name = "your-bucket-name"
# blob_name = "your-object-name"
blob = bucket.blob(blob_name)
try:
blob.delete()
print("Blob {} deleted from bucket: {}.".format(blob_name, bucket))
except NotFound:
print("File:", blob_name, "doesn't exists in bucket:", bucket)
@staticmethod
def __getBlobGeneration(bucket: Bucket, blobName):
"""
Takes a bucket object and blob name and returns the blobs generation key
:param self:
:param bucket: Bucket
:param blobName: String
:return:
"""
return bucket.get_blob(blobName).generation
@staticmethod
def __getBlobUpdated(bucket: Bucket, blobName):
"""
Takes a bucket object and blob name and returns the blobs generation key
:param self:
:param bucket: Bucket
:param blobName: String
:return:
"""
return bucket.get_blob(blobName).updated
# interesting conflict here: method is private, but pycharm suggests it to be static
# a method is suggested as static when it doesn't use instance variables
def __getBlobMetadata(self, bucket, blobName: str):
"""Prints a blobs/objects metadata"""
b = self.__storage_client.get_bucket(bucket) # get a bucket by name
blob = b.get_blob(blobName) # get a blob/object in that bucket by name
# create object metadata_dict, so you can access these attributes with metadata_dict.blob/.content-type/etc.
# To easy access to storage, for outside use where template is not available.
metadata_dict = {"Blob": blob.name, "Bucket": blob.bucket.name, "Storage class": blob.storage_class,
"ID": blob.id, "Size (bytes)": blob.size, "Updated": blob.updated,
"Generation": blob.generation, "Metageneration": blob.metageneration, "Etag": blob.etag,
"Owner": blob.owner, "Component count": blob.component_count, "Crc32c": blob.crc32c,
"md5_hash": blob.md5_hash, "Cache-control": blob.cache_control,
"Content-type": blob.content_type, "Content-disposition": blob.content_disposition,
"Metadata": blob.metadata}
if blob.temporary_hold:
metadata_dict["Temporary hold"] = "enabled"
else:
metadata_dict["Temporary hold"] = "disabled"
if blob.event_based_hold:
metadata_dict["Event based hold"] = "enabled"
else:
metadata_dict["Event based hold"] = "disabled"
if blob.retention_expiration_time:
metadata_dict["RetentionExpirationTime"] = blob.retention_expiration_time
return metadata_dict
@staticmethod
def __view_bucket_iam_members(bucket: Bucket): # type inference on bucket, to make method more protected.
policy = bucket.get_iam_policy()
for role in policy:
members = policy[role]
print('Role: {}, Members: {}'.format(role, members))
def __list_blobs_with_prefix(self, bucket: object, prefix, delimiter=None):
"""Lists all the blobs in the bucket that begin with the prefix.
This can be used to list all blobs in a "folder", e.g. "public/".
The delimiter argument can be used to restrict the results to only the
"files" in the given "folder". Without the delimiter, the entire tree under
the prefix is returned. For example, given these blobs:
/a/1.txt
/a/b/2.txt
If you just specify prefix = '/a', you'll get back:
/a/1.txt
/a/b/2.txt
However, if you specify prefix='/a' and delimiter='/', you'll get back:
/a/1.txt
"""
# Note: Client.list_blobs requires at least package version 1.17.0.
blobs = self.__storage_client.list_blobs(bucket, prefix=prefix,
delimiter=delimiter)
print('Blobs:')
for blob in blobs:
print(blob.name)
if delimiter:
print('Prefixes:')
for prefix in blobs.prefixes:
print(prefix)
def __check_bucket_exists(self, bucketName: str):
all_buckets = self.__get_buckets()
for bucket in all_buckets:
if bucket.name == bucketName:
return True
return False
def __return_bucket_obj_by_name(self, bucket_name: str):
"""Returns the bucket object - assuming it exists"""
all_buckets = [(b.id, b) for b in self.__get_buckets()]
for x in all_buckets:
if x[0] == bucket_name:
return x[1]
return None
def __recursive_folder_upload(self, path_to_folder, bucket: object):
"""If we choose to upload a folder then there might be other folders nested as such we need to recursively
upload each one. """
for filename in os.listdir(path_to_folder):
if os.path.isdir(os.path.join(path_to_folder, filename)):
self.__recursive_folder_upload(os.path.join(path_to_folder, filename), bucket)
else:
filepath = os.path.join(path_to_folder, filename)
upload_name = filepath.replace(self.__prj_root_dir, "")
self.__upload_blob(bucket, filepath, upload_name)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version
if torch.__version__ == 'parrots':
TORCH_VERSION = torch.__version__
else:
# torch.__version__ could be 1.3.1+cu92, we only need the first two
# for comparison
TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
def adaptive_avg_pool2d(input, output_size):
"""Handle empty batch dimension to adaptive_avg_pool2d.
Args:
input (tensor): 4D tensor.
output_size (int, tuple[int,int]): the target output size.
"""
if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
if isinstance(output_size, int):
output_size = [output_size, output_size]
output_size = [*input.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(input, output_size)
return empty
else:
return F.adaptive_avg_pool2d(input, output_size)
class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d):
"""Handle empty batch dimension to AdaptiveAvgPool2d."""
def forward(self, x):
# PyTorch 1.9 does not support empty tensor inference yet
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
output_size = self.output_size
if isinstance(output_size, int):
output_size = [output_size, output_size]
else:
output_size = [
v if v is not None else d
for v, d in zip(output_size,
x.size()[-2:])
]
output_size = [*x.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(x, output_size)
return empty
return super().forward(x)
|
import os
import sys
lib_path = os.path.realpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'lib'))
if lib_path not in sys.path:
sys.path[0:0] = [lib_path]
PORT = 9090
import redis
REDIS_SYNC = redis.StrictRedis(db=0)
import brukva
REDIS_ASYNC = brukva.Client(selected_db=0)
import celery
CELERY = celery.Celery('spider', broker='redis://localhost:6379/0')
STORE = os.path.realpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'store'))
CONFIDENCE = 0.5
SCORE = 1000.0
try:
from settings_local import *
except ImportError:
pass
|
from netbox.api import OrderedDefaultRouter
from . import views
router = OrderedDefaultRouter()
router.APIRootView = views.TopologyViewsRootView
router.register('preselectdeviceroles', views.PreSelectDeviceRolesViewSet)
router.register('preselecttags', views.PreSelectTagsViewSet)
router.register('search', views.SearchViewSet, basename='search')
router.register('save-coords', views.SaveCoordsViewSet, basename='save_coords')
urlpatterns = router.urls |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import (
Union,
AsyncIterator,
Awaitable,
Callable,
Optional,
Set,
)
from google.cloud.pubsub_v1.subscriber.message import Message
from google.cloud.pubsublite.cloudpubsub.internal.single_subscriber import (
AsyncSubscriberFactory,
AsyncSingleSubscriber,
)
from google.cloud.pubsublite.cloudpubsub.subscriber_client_interface import (
AsyncSubscriberClientInterface,
)
from google.cloud.pubsublite.types import (
SubscriptionPath,
FlowControlSettings,
Partition,
)
from overrides import overrides
async def _iterate_subscriber(
subscriber: AsyncSingleSubscriber, on_failure: Callable[[], Awaitable[None]]
) -> AsyncIterator[Message]:
try:
while True:
batch = await subscriber.read()
for message in batch:
yield message
except: # noqa: E722
await on_failure()
raise
class MultiplexedAsyncSubscriberClient(AsyncSubscriberClientInterface):
_underlying_factory: AsyncSubscriberFactory
_live_clients: Set[AsyncSingleSubscriber]
def __init__(self, underlying_factory: AsyncSubscriberFactory):
self._underlying_factory = underlying_factory
self._live_clients = set()
@overrides
async def subscribe(
self,
subscription: Union[SubscriptionPath, str],
per_partition_flow_control_settings: FlowControlSettings,
fixed_partitions: Optional[Set[Partition]] = None,
) -> AsyncIterator[Message]:
if isinstance(subscription, str):
subscription = SubscriptionPath.parse(subscription)
subscriber = self._underlying_factory(
subscription, fixed_partitions, per_partition_flow_control_settings
)
await subscriber.__aenter__()
self._live_clients.add(subscriber)
return _iterate_subscriber(
subscriber, lambda: self._try_remove_client(subscriber)
)
@overrides
async def __aenter__(self):
return self
async def _try_remove_client(self, client: AsyncSingleSubscriber):
if client in self._live_clients:
self._live_clients.remove(client)
await client.__aexit__(None, None, None)
@overrides
async def __aexit__(self, exc_type, exc_value, traceback):
live_clients = self._live_clients
self._live_clients = set()
for client in live_clients:
await client.__aexit__(None, None, None)
|
from typing import Dict, Union, Generator, Set
from .Grid import Grid, Coordinate
from .PuzzleErrors import *
class Puzzle:
@staticmethod
def __get_block_index(value):
return (value - 1) // 3 + 1
@staticmethod
def __conflicting_cells(position: Coordinate) -> Generator[Coordinate, None, None]:
block = Puzzle.__get_block_index(position.x), Puzzle.__get_block_index(position.y)
for col in range(1, 10):
col_block = Puzzle.__get_block_index(col)
if col_block == block[0]:
if col == position.x:
for y in (y for y in range(1, 10) if y != position.y):
yield Coordinate(col, y)
else:
for y in range(1, 4):
yield Coordinate(col, y + (block[1] - 1) * 3)
else:
yield Coordinate(col, position.y)
@staticmethod
def __all_cells_generator():
return (Coordinate(x, y) for x in range(1, 10) for y in range(1, 10))
def __from_starting_cell_generator(self, start: Coordinate):
next_cell = self.grid.get_next_coordinate(start)
while next_cell:
yield next_cell
next_cell = self.grid.get_next_coordinate(next_cell)
def get_solved_positions(self):
return self.__get_solves(self.__all_cells_generator())
def get_unsolved_positions(self):
return self.__get_possibilities(self.__all_cells_generator())
def get_next_unsolved(self, position):
for cell in self.__from_starting_cell_generator(position):
values = self.get(cell)
if isinstance(values, set):
return cell, values
return None
def is_solved(self):
return sum(value for pos, value in self.get_solved_positions()) == 405
def __get_solves(self, pos_iter):
return ((pos, solves) for (pos, solves) in ((pos, self.grid.get(pos)) for pos in pos_iter) if
isinstance(solves, int))
def __get_possibilities(self, pos_iter):
return ((pos, possibilities) for (pos, possibilities) in ((pos, self.grid.get(pos)) for pos in pos_iter) if
isinstance(possibilities, set))
@classmethod
def from_string(cls, text: str):
index = 0
p = Puzzle()
for val in str(text).strip("[] ").split(','):
if val.strip():
p.set(Coordinate(index % 9 + 1, index // 9 + 1), int(val))
index += 1
return p
@classmethod
def from_dict(cls, values: Dict[Coordinate, int]):
p = Puzzle()
for pos, value in values:
p.set(pos, value)
return p
def __init__(self):
self.grid = Grid(9, 9)
for x in range(1, 10):
for y in range(1, 10):
self.grid.set(Coordinate(x, y), set(range(1, 10)))
def set(self, position, value: int):
if isinstance(self.get(position), int):
self.unset(position)
if not (0 < value < 10):
raise ValueError("Cannot set {} to {}".format(position, value))
self.__update_puzzle(position, value)
def unset(self, position: Coordinate):
new_valid_values = set(range(1, 10)).difference(
val for pos, val in self.__get_solves(self.__conflicting_cells(position)))
self.grid.set(position, new_valid_values)
for other_pos in self.__conflicting_cells(position):
if not isinstance(self.get(other_pos), int):
self.grid.set(other_pos,
set(range(1, 10)).difference(
val for pos, val in self.__get_solves(self.__conflicting_cells(other_pos))))
def get(self, position) -> Union[int, Set[int]]:
return self.grid.get(position)
def __update_puzzle(self, position, value):
for pos, val in self.__get_solves(self.__conflicting_cells(position)):
if val == value:
raise DuplicateError(position, pos, value)
for pos, val in self.__get_possibilities(self.__conflicting_cells(position)):
new_vals = self.grid.get(pos).difference({value})
if len(new_vals) == 0:
raise InvalidSetError(position, pos, value)
self.grid.set(pos, new_vals)
self.grid.set(position, value)
def __get_str(self, position: Coordinate):
val = self.grid.get(position)
if val is None or not isinstance(val, int):
return "_"
return str(val)
def __str__(self):
return '\n'.join(''.join(self.__get_str(Coordinate(x, y)) for x in range(1, 10)) for y in range(1, 10))
|
from turtle import *
from colorsys import *
sides = 2
tracer(5)
speed(1)
delay(0)
hideturtle()
bgcolor("black")
hue=0
for i in range(1000):
color(hsv_to_rgb(hue,1,1))
hue+=0.003
fd(i * 3 // sides + i)
lt(360 / sides + 1)
width(i * sides / 250)
done()
|
#!/usr/bin/env python
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from collections import defaultdict
import csv
from GenericTsvReader import GenericTsvReader
from shared_utils import count_lines
def parseOptions():
# Process arguments
desc = '''Create gene table that handles the total_alterations_in_gene and tissue_types_affected'''
epilog = '''
'''
parser = ArgumentParser(description=desc, formatter_class=RawDescriptionHelpFormatter, epilog=epilog)
parser.add_argument("ds_file", type=str, help="COSMIC datasource filename. For example, 'CosmicCompleteExport_v62_261112.tsv' ")
parser.add_argument("output_file", type=str, help="TSV filename for output. File will be overwritten if it already exists.")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parseOptions()
inputFilename = args.ds_file
outputFilename = args.output_file
outputHeaders = ['gene', 'total_alterations_in_gene', 'tissue_types_affected']
tsvReader = GenericTsvReader(inputFilename)
headers = tsvReader.getFieldNames()
print('Found headers (input): ' + str(headers))
if "Gene name" not in headers:
raise NotImplementedError("Could not find Gene name column in the input file.")
if 'Primary site' not in headers:
raise NotImplementedError("Could not find Primary site column in the input file.")
# Construct dictionary that is [gene][histology/tissue type] = count, where count is the total for that histology
# and that gene
last_i = 0
num_lines = count_lines(inputFilename)
geneDictionary = defaultdict(dict)
for i, line in enumerate(tsvReader):
gene = line['Gene name']
# Skip blank genes
if gene is None or gene.strip() == "":
continue
site = line['Primary site']
if site not in geneDictionary[gene].keys():
geneDictionary[gene][site] = 0
geneDictionary[gene][site] += 1
# Progress...
if i - last_i > round(float(num_lines)/100.0):
print("{:.0f}% complete".format(100 * float(i)/float(num_lines)))
last_i = i
# Write tsv output file.
print("Writing output...")
tsvWriter = csv.DictWriter(file(outputFilename,'w'), outputHeaders, delimiter='\t', lineterminator="\n")
tsvWriter.fieldnames = outputHeaders
tsvWriter.writeheader()
sortedGenes = sorted(geneDictionary.keys())
for g in sortedGenes:
row = dict()
# Generate
row['gene'] = g
tissues = []
total = 0
for h in sorted(geneDictionary[g].keys()):
tissues.append(h + "(" + str(geneDictionary[g][h]) + ")")
total += geneDictionary[g][h]
row['total_alterations_in_gene'] = str(total)
row['tissue_types_affected'] = "|".join(tissues)
tsvWriter.writerow(row)
print("Done")
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import (
absolute_import,
print_function
)
import io
from glob import glob
from os.path import (
basename,
dirname,
join,
splitext
)
from setuptools import find_packages, setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='zentweepy',
version='0.1.0',
license='', # Recommended: 'Apache 2.0'
description='',
author='',
author_email='',
url='', # Repository (bitbucket, github, gitlab...)
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
install_requires=[
'zentropi>=0.1.3, <0.2.0',
],
entry_points={
'console_scripts': [
'zentweepy = zentweepy.cli:main',
]
},
)
|
from CommonServerPython import *
def main():
params = {k: v for k, v in demisto.params().items() if v is not None}
params['indicator_type'] = FeedIndicatorType.IP
params['url'] = 'http://www.malwaredomainlist.com/hostslist/ip.txt'
params['indicator'] = json.dumps({
"regex": r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}",
})
# Call the main execution of the HTTP API module.
feed_main('Malware Domain List Active IPs Feed', params, 'malwaredomainlist')
from HTTPFeedApiModule import * # noqa: E402
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
|
import random
from collections import namedtuple
from utils.Segment_tree import SumSegmentTree, MinSegmentTree
import numpy as np
class Memory:
def __init__(self, size):
self.size = size
self.currentPosition = 0
self.states = []
self.actions = []
self.rewards = []
self.newStates = []
self.finals = []
def getMiniBatch(self, size):
indices = random.sample(population=range(len(self.states)), k=min(size, len(self.states)))
miniBatch = []
for index in indices:
miniBatch.append({'state': self.states[index], 'action': self.actions[index], 'reward': self.rewards[index],
'newState': self.newStates[index], 'isFinal': self.finals[index]})
return miniBatch
def getCurrentSize(self):
return len(self.states)
def getMemory(self, index):
return {'state': self.states[index], 'action': self.actions[index], 'reward': self.rewards[index],
'newState': self.newStates[index], 'isFinal': self.finals[index]}
def addMemory(self, state, action, reward, newState, isFinal):
if (self.currentPosition >= self.size - 1):
self.currentPosition = 0
if (len(self.states) > self.size):
self.states[self.currentPosition] = state
self.actions[self.currentPosition] = action
self.rewards[self.currentPosition] = reward
self.newStates[self.currentPosition] = newState
self.finals[self.currentPosition] = isFinal
else:
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
self.newStates.append(newState)
self.finals.append(isFinal)
self.currentPosition += 1
Supervised_Experience = namedtuple('Supervised_Experience', ['state', 'label', 'action'])
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'done', 'next_state'])
class ExperienceReplay:
"""
This class provides an abstraction to store the [s, a, r, s'] elements of each iteration.
using Experience object which contains the s-a-r-s-a transition information in an object oriented way
"""
def __init__(self, size):
self.size = size
self.currentPosition = 0
self.buffer = []
def add_memory(self, state, action, reward, next_state, is_done):
exp = Experience(state, action, reward, is_done, next_state)
if len(self.buffer) < self.size:
self.buffer.append(exp)
else:
self.buffer[self.currentPosition] = exp
self.currentPosition = (self.currentPosition + 1) % self.size
def getMiniBatch(self, batch_size):
indices = random.sample(population=range(len(self.buffer)), k=min(batch_size, len(self.buffer)))
return [self.buffer[index] for index in indices]
class Supervised_ExperienceReplay(ExperienceReplay):
"""
This class provides an object to store the [state, Supervised_Q_value] values of each iteration
"""
def __init__(self, size):
super(Supervised_ExperienceReplay, self).__init__(size)
def add_memory(self, state, Supervised_Q_value, action):
exp = Supervised_Experience(state, Supervised_Q_value, action)
if len(self.buffer) < self.size:
self.buffer.append(exp)
else:
self.buffer[self.currentPosition] = exp
self.currentPosition = (self.currentPosition + 1) % self.size
class ExperienceReplayMultistep(ExperienceReplay):
"""
Multi-step experience replay reviewed in the Rainbow paper, this is basically TD(lamda) that is taught in Silver course.
we accumulate N steps in episode buffer, and then store a transition based on the first and last state of the N step series
this is done by the add_to_buffer signal which tells us when to store a transition in buffer
Note:
if N is very large it can introduce high variance to the training phase, so be careful with the number of steps.
"""
def __init__(self, size, gamma):
super(ExperienceReplayMultistep, self).__init__(size)
self.gamma = gamma
self.episode_buffer = []
def create_first_last_exp(self):
if self.episode_buffer[-1].done and len(self.episode_buffer) <= 1: # address a special case at restart
last_state = None
else:
last_state = self.episode_buffer[-1].next_state
total_reward = 0.0
for exp in reversed(self.episode_buffer):
total_reward *= self.gamma
total_reward += exp.reward
first_exp = self.episode_buffer[0]
exp = Experience(state=first_exp.state, action=first_exp.action, reward=total_reward, next_state=last_state, done=self.episode_buffer[-1].done)
self.buffer.append(exp)
if len(self.buffer) > self.size:
self.buffer.pop(0)
def add_memory(self, state, action, reward, next_state, is_done, add_to_buffer):
exp = Experience(state, action, reward, is_done, next_state)
self.episode_buffer.append(exp)
if add_to_buffer or is_done:
self.create_first_last_exp()
self.episode_buffer.clear()
class PrioritizedExperienceReplay(ExperienceReplay):
"""
taken from https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
with adjustments to our code
"""
def __init__(self, size, alpha):
super(PrioritizedExperienceReplay, self).__init__(size)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size: # it_capacity is a power of 2
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add_memory(self, state, action, reward, next_state, is_done):
idx = self.currentPosition
super(PrioritizedExperienceReplay, self).add_memory( state, action, reward, next_state, is_done)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, len(self.buffer) - 1) # total priority sum in tree
every_range_len = p_total / batch_size # divide into segments, we pick a transition from each segment
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len # mass depicts the rank of the transition
idx = self._it_sum.find_prefixsum_idx(mass) # the index of the transition in the tree
res.append(idx)
return res
def getMiniBatch(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns:
1) batch with s-a-r-s-a transitions represented as experience object
2) weights for each of those transactions
3) their indexes
"""
assert beta > 0
N = len(self.buffer)
# function to sample via probability of the transactions
indexes = self._sample_proportional(batch_size)
weights = []
batch_transitions = []
sum = self._it_sum.sum()
prob_min = self._it_min.min() / sum
max_weight = (prob_min * N) ** (-beta) # according to PER paper,
# max weight is used to normalize the weights
for idx in indexes:
prob_sample = self._it_sum[idx] / sum
weight = (prob_sample * N) ** (-beta) # fixes the bias high prob transaction introduce
weights.append(weight)
batch_transitions.append(self.buffer[idx])
weights /= np.ones_like(weights) * max_weight # normalize
return batch_transitions, weights, indexes
def update_priorities(self,indexes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index indexes[i] in buffer
to priorities[i].
Parameters
----------
indexes: [int]
List of indexes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(indexes) == len(priorities)
for index, priority in zip(indexes, priorities):
assert priority > 0 and 0 <= index < len(self.buffer)
self._it_sum[index] = priority ** self._alpha
self._it_min[index] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
class MultiStepPrioritizedExperienceReplay(PrioritizedExperienceReplay):
def __init__(self, size, alpha, gamma):
super(MultiStepPrioritizedExperienceReplay, self).__init__(size=size, alpha=alpha)
self.gamma = gamma
self.episode_buffer = []
def create_first_last_exp(self):
if self.episode_buffer[-1].done and len(self.episode_buffer) <= 1: # address a special case at restart
last_state = None
else:
last_state = self.episode_buffer[-1].next_state
total_reward = 0.0
for exp in reversed(self.episode_buffer):
total_reward *= self.gamma
total_reward += exp.reward
first_exp = self.episode_buffer[0]
super(MultiStepPrioritizedExperienceReplay, self).add_memory(state=first_exp.state,
action=first_exp.action,
reward=total_reward,
next_state=last_state,
is_done=self.episode_buffer[-1].done)
def add_memory(self, state, action, reward, next_state, is_done, add_to_buffer):
exp = Experience(state, action, reward, is_done, next_state)
self.episode_buffer.append(exp)
if add_to_buffer or is_done:
self.create_first_last_exp()
self.episode_buffer.clear()
class Supervised_Prioritzed_ExperienceReplay(PrioritizedExperienceReplay):
"""
This class provides an object to store the [state, Supervised_Q_value] values of each iteration
also, this class incorporates Priority for the experience replay for better training with policy_distillation
"""
def __init__(self, size, alpha):
super(Supervised_Prioritzed_ExperienceReplay, self).__init__(size=size, alpha=alpha)
def add_memory(self, state, Supervised_Q_value, action):
exp = Supervised_Experience(state, Supervised_Q_value, action)
idx = self.currentPosition
if len(self.buffer) < self.size:
self.buffer.append(exp)
else:
self.buffer[self.currentPosition] = exp
self.currentPosition = (self.currentPosition + 1) % self.size
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import submitit
from compert.train import train_compert, parse_arguments
import json
import sys
if __name__ == "__main__":
json_file = sys.argv[1]
with open(json_file, "r") as f:
commands = [json.loads(line) for line in f.readlines()]
executor = submitit.SlurmExecutor(folder="/checkpoint/dlp/sweep_jsonl/")
executor.update_parameters(
time=3 * 24 * 60,
gpus_per_node=1,
array_parallelism=512,
cpus_per_task=4,
comment="Deadline nat biotech this week",
partition="priority")
executor.map_array(train_compert, commands)
|
import sqlite3
class DataBase:
def __init__(self):
self.connection = sqlite3.connect("VKMessages.db")
self.cursor = self.connection.cursor()
self.create_table()
def create_table(self):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS messages (
message TEXT,
user TEXT,
date TEXT)
""")
self.connection.commit()
def insert_data(self, message_value, user, date):
self.cursor.execute("""INSERT INTO messages(
message,
user,
date
) VALUES (?, ?, ?)
""", (message_value, user, date))
self.connection.commit()
def get_cursor(self):
return self.cursor
|
import pytest
from solid_toolbox.units import Vec, Vec2d
def test_vec_ops():
assert Vec(1, 2, 3) + Vec(4, 5, 6) == Vec(5, 7, 9)
assert Vec(1, 2, 3) - Vec(4, 5, 6) == Vec(-3, -3, -3)
assert Vec(1, 2, 3) * Vec(4, 5, 6) == Vec(4, 10, 18)
assert Vec(1, 2, 3) / Vec(4, 5, 6) == Vec(0.25, 0.4, 0.5)
assert Vec(1, 2, 3) // Vec(4, 5, 6) == Vec(0, 0, 0)
assert Vec(17, 17, 17) // Vec(2, 3, 4) == Vec(8, 5, 4)
assert Vec(1, 2, 3) @ Vec(4, 5, 6) == 32
assert Vec(1, 3, -5) @ Vec(4, -2, -1) == 3
def test_vec_scalar_ops():
assert Vec(1, 2, 3) * 10 == Vec(10, 20, 30)
assert 10 * Vec(1, 2, 3) == Vec(10, 20, 30)
assert Vec(1, 2, 3) / 10 == Vec(0.1, 0.2, 0.3)
assert 10 / Vec(1, 2, 4) == Vec(10, 5, 2.5)
assert Vec(1, 2, 3) // 10 == Vec(0, 0, 0)
assert 10 // Vec(1, 2, 4) == Vec(10, 5, 2)
with pytest.raises(TypeError):
Vec(1, 2, 3) + 10
with pytest.raises(TypeError):
10 + Vec(1, 2, 3)
with pytest.raises(TypeError):
Vec(1, 2, 3) - 10
with pytest.raises(TypeError):
10 - Vec(1, 2, 3)
def test_mutation_forbidden():
foo = Vec(1, 2, 3)
with pytest.raises(TypeError):
foo[0] = 5
with pytest.raises(AttributeError):
foo.x = 6
assert foo == Vec(1, 2, 3)
assert foo.x == 1
def test_bad_add():
with pytest.raises(TypeError):
Vec(1, 2, 3) + Vec2d(1, 2)
with pytest.raises(TypeError):
Vec2d(1, 2) + Vec(1, 2, 3)
def test_type_preservation():
assert type(Vec(1, 2, 3) + Vec(4, 5, 6)) == Vec
assert type(Vec2d(1, 2) + Vec2d(4, 5)) == Vec2d
def test_hashing():
assert hash(Vec(1, 2, 3)) == hash(Vec(1, 2, 3))
assert hash(Vec(1, 2, 3)) != hash(Vec(3, 2, 1))
assert hash(Vec2d(1, 2)) == hash(Vec2d(1, 2))
assert hash(Vec2d(1, 2)) != hash(Vec2d(2, 1))
assert hash(Vec2d(1, 2)) != hash(Vec(1, 2, 0))
|
"""Generic testing tools that do NOT depend on Twisted.
In particular, this module exposes a set of top-level assert* functions that
can be used in place of nose.tools.assert* in method generators (the ones in
nose can not, at least as of nose 0.10.4).
Note: our testing package contains testing.util, which does depend on Twisted
and provides utilities for tests that manage Deferreds. All testing support
tools that only depend on nose, IPython or the standard library should go here
instead.
Authors
-------
- Fernando Perez <Fernando.Perez@berkeley.edu>
"""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import re
import sys
import tempfile
from contextlib import contextmanager
from io import StringIO
try:
# These tools are used by parts of the runtime, so we make the nose
# dependency optional at this point. Nose is a hard dependency to run the
# test suite, but NOT to use ipython itself.
import nose.tools as nt
has_nose = True
except ImportError:
has_nose = False
from IPython.config.loader import Config
from IPython.utils.process import find_cmd, getoutputerror
from IPython.utils.text import list_strings, getdefaultencoding
from IPython.utils.io import temp_pyfile, Tee
from IPython.utils import py3compat
from . import decorators as dec
from . import skipdoctest
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# Make a bunch of nose.tools assert wrappers that can be used in test
# generators. This will expose an assert* function for each one in nose.tools.
_tpl = """
def %(name)s(*a,**kw):
return nt.%(name)s(*a,**kw)
"""
if has_nose:
for _x in [a for a in dir(nt) if a.startswith('assert')]:
exec _tpl % dict(name=_x)
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
# The docstring for full_path doctests differently on win32 (different path
# separator) so just skip the doctest there. The example remains informative.
doctest_deco = skipdoctest.skip_doctest if sys.platform == 'win32' else dec.null_deco
@doctest_deco
def full_path(startPath,files):
"""Make full paths for all the listed files, based on startPath.
Only the base part of startPath is kept, since this routine is typically
used with a script's __file__ variable as startPath. The base of startPath
is then prepended to all the listed files, forming the output list.
Parameters
----------
startPath : string
Initial path to use as the base for the results. This path is split
using os.path.split() and only its first component is kept.
files : string or list
One or more files.
Examples
--------
>>> full_path('/foo/bar.py',['a.txt','b.txt'])
['/foo/a.txt', '/foo/b.txt']
>>> full_path('/foo',['a.txt','b.txt'])
['/a.txt', '/b.txt']
If a single file is given, the output is still a list:
>>> full_path('/foo','a.txt')
['/a.txt']
"""
files = list_strings(files)
base = os.path.split(startPath)[0]
return [ os.path.join(base,f) for f in files ]
def parse_test_output(txt):
"""Parse the output of a test run and return errors, failures.
Parameters
----------
txt : str
Text output of a test run, assumed to contain a line of one of the
following forms::
'FAILED (errors=1)'
'FAILED (failures=1)'
'FAILED (errors=1, failures=1)'
Returns
-------
nerr, nfail: number of errors and failures.
"""
err_m = re.search(r'^FAILED \(errors=(\d+)\)', txt, re.MULTILINE)
if err_m:
nerr = int(err_m.group(1))
nfail = 0
return nerr, nfail
fail_m = re.search(r'^FAILED \(failures=(\d+)\)', txt, re.MULTILINE)
if fail_m:
nerr = 0
nfail = int(fail_m.group(1))
return nerr, nfail
both_m = re.search(r'^FAILED \(errors=(\d+), failures=(\d+)\)', txt,
re.MULTILINE)
if both_m:
nerr = int(both_m.group(1))
nfail = int(both_m.group(2))
return nerr, nfail
# If the input didn't match any of these forms, assume no error/failures
return 0, 0
# So nose doesn't think this is a test
parse_test_output.__test__ = False
def default_argv():
"""Return a valid default argv for creating testing instances of ipython"""
return ['--quick', # so no config file is loaded
# Other defaults to minimize side effects on stdout
'--colors=NoColor', '--no-term-title','--no-banner',
'--autocall=0']
def default_config():
"""Return a config object with good defaults for testing."""
config = Config()
config.TerminalInteractiveShell.colors = 'NoColor'
config.TerminalTerminalInteractiveShell.term_title = False,
config.TerminalInteractiveShell.autocall = 0
config.HistoryManager.hist_file = tempfile.mktemp(u'test_hist.sqlite')
config.HistoryManager.db_cache_size = 10000
return config
def ipexec(fname, options=None):
"""Utility to call 'ipython filename'.
Starts IPython witha minimal and safe configuration to make startup as fast
as possible.
Note that this starts IPython in a subprocess!
Parameters
----------
fname : str
Name of file to be executed (should have .py or .ipy extension).
options : optional, list
Extra command-line flags to be passed to IPython.
Returns
-------
(stdout, stderr) of ipython subprocess.
"""
if options is None: options = []
# For these subprocess calls, eliminate all prompt printing so we only see
# output from script execution
prompt_opts = [ '--PromptManager.in_template=""',
'--PromptManager.in2_template=""',
'--PromptManager.out_template=""'
]
cmdargs = ' '.join(default_argv() + prompt_opts + options)
_ip = get_ipython()
test_dir = os.path.dirname(__file__)
ipython_cmd = find_cmd('ipython3' if py3compat.PY3 else 'ipython')
# Absolute path for filename
full_fname = os.path.join(test_dir, fname)
full_cmd = '%s %s %s' % (ipython_cmd, cmdargs, full_fname)
#print >> sys.stderr, 'FULL CMD:', full_cmd # dbg
out = getoutputerror(full_cmd)
# `import readline` causes 'ESC[?1034h' to be the first output sometimes,
# so strip that off the front of the first line if it is found
if out:
first = out[0]
m = re.match(r'\x1b\[[^h]+h', first)
if m:
# strip initial readline escape
out = list(out)
out[0] = first[len(m.group()):]
out = tuple(out)
return out
def ipexec_validate(fname, expected_out, expected_err='',
options=None):
"""Utility to call 'ipython filename' and validate output/error.
This function raises an AssertionError if the validation fails.
Note that this starts IPython in a subprocess!
Parameters
----------
fname : str
Name of the file to be executed (should have .py or .ipy extension).
expected_out : str
Expected stdout of the process.
expected_err : optional, str
Expected stderr of the process.
options : optional, list
Extra command-line flags to be passed to IPython.
Returns
-------
None
"""
import nose.tools as nt
out, err = ipexec(fname, options)
#print 'OUT', out # dbg
#print 'ERR', err # dbg
# If there are any errors, we must check those befor stdout, as they may be
# more informative than simply having an empty stdout.
if err:
if expected_err:
nt.assert_equals(err.strip(), expected_err.strip())
else:
raise ValueError('Running file %r produced error: %r' %
(fname, err))
# If no errors or output on stderr was expected, match stdout
nt.assert_equals(out.strip(), expected_out.strip())
class TempFileMixin(object):
"""Utility class to create temporary Python/IPython files.
Meant as a mixin class for test cases."""
def mktmp(self, src, ext='.py'):
"""Make a valid python temp file."""
fname, f = temp_pyfile(src, ext)
self.tmpfile = f
self.fname = fname
def tearDown(self):
if hasattr(self, 'tmpfile'):
# If the tmpfile wasn't made because of skipped tests, like in
# win32, there's nothing to cleanup.
self.tmpfile.close()
try:
os.unlink(self.fname)
except:
# On Windows, even though we close the file, we still can't
# delete it. I have no clue why
pass
pair_fail_msg = ("Testing {0}\n\n"
"In:\n"
" {1!r}\n"
"Expected:\n"
" {2!r}\n"
"Got:\n"
" {3!r}\n")
def check_pairs(func, pairs):
"""Utility function for the common case of checking a function with a
sequence of input/output pairs.
Parameters
----------
func : callable
The function to be tested. Should accept a single argument.
pairs : iterable
A list of (input, expected_output) tuples.
Returns
-------
None. Raises an AssertionError if any output does not match the expected
value.
"""
name = getattr(func, "func_name", getattr(func, "__name__", "<unknown>"))
for inp, expected in pairs:
out = func(inp)
assert out == expected, pair_fail_msg.format(name, inp, expected, out)
if py3compat.PY3:
MyStringIO = StringIO
else:
# In Python 2, stdout/stderr can have either bytes or unicode written to them,
# so we need a class that can handle both.
class MyStringIO(StringIO):
def write(self, s):
s = py3compat.cast_unicode(s, encoding=getdefaultencoding())
super(MyStringIO, self).write(s)
notprinted_msg = """Did not find {0!r} in printed output (on {1}):
{2!r}"""
class AssertPrints(object):
"""Context manager for testing that code prints certain text.
Examples
--------
>>> with AssertPrints("abc", suppress=False):
... print "abcd"
... print "def"
...
abcd
def
"""
def __init__(self, s, channel='stdout', suppress=True):
self.s = s
self.channel = channel
self.suppress = suppress
def __enter__(self):
self.orig_stream = getattr(sys, self.channel)
self.buffer = MyStringIO()
self.tee = Tee(self.buffer, channel=self.channel)
setattr(sys, self.channel, self.buffer if self.suppress else self.tee)
def __exit__(self, etype, value, traceback):
self.tee.flush()
setattr(sys, self.channel, self.orig_stream)
printed = self.buffer.getvalue()
assert self.s in printed, notprinted_msg.format(self.s, self.channel, printed)
return False
class AssertNotPrints(AssertPrints):
"""Context manager for checking that certain output *isn't* produced.
Counterpart of AssertPrints"""
def __exit__(self, etype, value, traceback):
self.tee.flush()
setattr(sys, self.channel, self.orig_stream)
printed = self.buffer.getvalue()
assert self.s not in printed, notprinted_msg.format(self.s, self.channel, printed)
return False
@contextmanager
def mute_warn():
from IPython.utils import warn
save_warn = warn.warn
warn.warn = lambda *a, **kw: None
try:
yield
finally:
warn.warn = save_warn
@contextmanager
def make_tempfile(name):
""" Create an empty, named, temporary file for the duration of the context.
"""
f = open(name, 'w')
f.close()
try:
yield
finally:
os.unlink(name)
|
import logging
from typing import NamedTuple
from uuid import uuid4
from dsm.epaxos.cmd.state import Command, Checkpoint
from dsm.epaxos.inst.state import Slot, Ballot, State, Stage
from dsm.epaxos.inst.store import InstanceStoreState
from dsm.epaxos.net.packet import Packet, PACKET_CLIENT, PACKET_LEADER, PACKET_ACCEPTOR, ClientRequest
from dsm.epaxos.replica.acceptor.main import AcceptorCoroutine
from dsm.epaxos.replica.client.main import ClientsActor
from dsm.epaxos.replica.corout import coroutiner
from dsm.epaxos.replica.leader.ev import LeaderStart
from dsm.epaxos.replica.leader.main import LeaderCoroutine
from dsm.epaxos.replica.main.ev import Reply, Wait, Tick
from dsm.epaxos.replica.net.ev import Send
from dsm.epaxos.replica.net.main import NetActor, ClientNetComm
from dsm.epaxos.replica.config import ReplicaState
from dsm.epaxos.replica.state.ev import LoadCommandSlot, Load, Store, InstanceState
from dsm.epaxos.replica.state.main import StateActor
logger = logging.getLogger(__name__)
STATE_MSGS = (LoadCommandSlot, Load, Store)
STATE_EVENTS = (InstanceState,)
LEADER_MSGS = (LeaderStart,)
NET_MSGS = (Send,)
class Unroutable(Exception):
def __init__(self, payload):
self.payload = payload
super().__init__(payload)
pass
class MainCoroutine(NamedTuple):
state: None
clients: None
leader: None
acceptor: None
net: None
trace: bool = True
def route(self, req, d=0):
if isinstance(req, STATE_MSGS):
yield from self.run_sub(self.state, req, d)
elif isinstance(req, LEADER_MSGS):
yield from self.run_sub(self.leader, req, d)
elif isinstance(req, STATE_MSGS):
yield from self.run_sub(self.state, req, d)
elif isinstance(req, NET_MSGS):
yield from self.run_sub(self.net, req, d)
elif isinstance(req, Reply):
yield req
else:
self._trace(f'Unroutable {req}')
raise Unroutable(req)
def _trace(self, *args):
# print(*args)
pass
def run_sub(self, corout, ev, d=1):
self._trace(' ' * d + 'BEGIN', ev)
rep = coroutiner(corout, ev)
self._trace(' ' * (d + 1) + '>', rep)
prev_rep = None
# When do we assume that
while not isinstance(rep, Wait):
prev_rep = rep
# reqx = None
try:
zzz_iter = self.route(rep, d + 2)
rep2 = next(zzz_iter)
while not isinstance(rep2, Reply):
rep3 = yield rep2
rep2 = zzz_iter.send(rep3)
reqx = rep2
assert isinstance(reqx, Reply), reqx
self._trace(' ' * (d + 1) + '>', 'DONE', reqx)
except Unroutable as e:
self._trace(' ' * (d + 1) + '>', 'UNROUTABLE', rep)
reqx = yield rep
self._trace(' ' * (d + 1) + '>', 'UNROUTABLE RTN', reqx)
if not isinstance(reqx, Reply):
corout.throw(Unroutable(reqx))
# assert isinstance(reqx, Reply), reqx
rep = coroutiner(corout, reqx.payload)
self._trace(' ' * (d + 1) + '>', rep)
assert isinstance(prev_rep, Reply)
self._trace(' ' * d + 'END', prev_rep)
yield Reply(prev_rep.payload)
def run(self):
while True:
ev = yield Wait()
if isinstance(ev, Tick):
yield from self.run_sub(self.acceptor, ev)
elif isinstance(ev, Packet):
if isinstance(ev.payload, PACKET_CLIENT):
yield from self.run_sub(self.clients, ev)
elif isinstance(ev.payload, PACKET_LEADER):
yield from self.run_sub(self.leader, ev)
elif isinstance(ev.payload, PACKET_ACCEPTOR):
yield from self.run_sub(self.acceptor, ev)
else:
assert False, ev
elif isinstance(ev, STATE_EVENTS):
yield from self.run_sub(self.clients, ev)
else:
assert False, ev
def main():
st = ReplicaState(
0,
0,
[1, 2, 3, 4],
[1, 2, 3, 4],
)
state = StateActor().run()
clients = ClientsActor().run()
leader = LeaderCoroutine(st).run()
acceptor = AcceptorCoroutine(st).run()
net = NetActor().run()
next(state)
next(clients)
next(leader)
next(acceptor)
next(net)
m = MainCoroutine(
state,
clients,
leader,
acceptor,
net,
).run()
# print('S', next(m))
while True:
x = next(m)
assert isinstance(x, Wait)
x = m.send(
Packet(
0,
0,
'ClientRequest',
ClientRequest(
Command(
uuid4(),
Checkpoint(
1
)
)
)
)
)
while isinstance(x, ClientNetComm):
print(x)
x = m.send(Reply(True))
print('>>>>>>>>', x)
x = m.send(
Reply(True)
)
print(next(m))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import sys
import platform
from glob import glob
VERSION = "1.2.2"
APP = ['kickstart.py']
COPYRIGHT = "Copyright 2016-2017 by Revar Desmera"
with open('README.rst') as f:
LONG_DESCR = f.read()
extra_options = {}
data_files = []
py2app_options = dict(
argv_emulation=True,
includes=[
'belfrywidgets', 'mudclientprotocol', 'pymuv', 'appdirs',
'six', 'packaging', 'packaging.requirements',
'packaging.version', 'packaging.specifiers'
],
plist=dict(
CFBundleIconFile="MufSim.icns",
CFBundleIdentifier="com.belfry.mufsimulator",
CFBundleGetInfoString="MufSimulator v%s, %s" % (VERSION, COPYRIGHT),
NSHumanReadableCopyright=COPYRIGHT,
NSHighResolutionCapable=True,
CFBundleDocumentTypes=[
dict(
CFBundleTypeName="MUF File",
CFBundleTypeRole="Viewer",
LSHandlerRank="Alternate",
CFBundleTypeMIMETypes=["text/x-muf", "application/x-muf"],
LSItemContentTypes=["org.fuzzball.muf"],
CFBundleTypeExtensions=["muf"],
),
dict(
CFBundleTypeName="MUV File",
CFBundleTypeRole="Viewer",
LSHandlerRank="Alternate",
CFBundleTypeMIMETypes=["text/x-muv", "application/x-muv"],
LSItemContentTypes=["com.belfry.muv"],
CFBundleTypeExtensions=["muv"],
),
]
)
)
py2exe_options = dict(
bundle_files=2,
dist_dir='dist-win',
excludes=["tests", "dist", "build", "docs"],
)
if platform.system() == 'Windows':
import py2exe
data_files.append(
(
"Microsoft.VC90.CRT",
glob(r'C:\Windows\WinSxS\x86_microsoft.vc90.crt_*\*.*')
)
)
sys.path.append(
glob(r'C:\Windows\WinSxS\x86_microsoft.vc90.crt_*')
)
extra_options['windows'] = APP
extra_options['zipfile'] = None
elif platform.system() == 'Darwin':
extra_options['app'] = APP
setup(
name='MufSim',
version=VERSION,
description='Muf language simulator and debugger.',
long_description=LONG_DESCR,
author='Revar Desmera',
author_email='revarbat@gmail.com',
url='https://github.com/revarbat/mufsim',
download_url='https://github.com/revarbat/mufsim/archive/master.zip',
packages=find_packages(
exclude=[
'build', 'dist', 'docs', 'examples', 'icons',
'tests', 'tools',
]
),
license='MIT License',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Interpreters',
'Topic :: Software Development :: Testing',
],
keywords='muf muv debugger development',
entry_points={
'console_scripts': ['mufsim=mufsim.console:main'],
'gui_scripts': ['mufsimgui=mufsim.gui:main']
},
install_requires=[
'setuptools',
'belfrywidgets>=1.0.3',
'mudclientprotocol>=0.1.0',
'ssltelnet>=0.9.2',
'pymuv>=0.9.8',
'appdirs>=1.4.0',
'six',
'packaging',
],
data_files=data_files,
options={
'py2app': py2app_options,
'py2exe': py2exe_options,
},
# setup_requires=['py2app'],
**extra_options
)
|
import re
from configparser import RawConfigParser
import os
class ConfigObject:
"""
"""
def __init__(self):
self.headers = {}
self.raw_config_parser = None # set by calling parse_file
def __getitem__(self, x):
return self.get_header(x)
def add_header(self, header_name, header):
"""Adds the given header into this object with the name `header_name`"""
self.headers[header_name] = header
return header
def add_header_name(self, header_name, is_indexed=False):
"""
Adds a new header with the name header_name
:param header_name: The name of the header as it would appear in the config
:param is_indexed: If true that means that the same value is spread across an indexed list.
:return: The newly created header.
"""
header = ConfigHeader()
header.is_indexed = is_indexed
self.headers[header_name] = header
return header
def set_value(self, header_name, option, value, index=None):
self.get_header(header_name).set_value(option, value, index)
def get_header(self, header_name):
"""
Returns a header with that name, creates it if it does not exist.
"""
if header_name in self.headers:
return self.headers[header_name]
return self.add_header_name(header_name)
def get(self, section, option, index=None):
return self.get_header(section).get(option, index=index)
def getint(self, section, option, index=None):
return self.get_header(section).getint(option, index=index)
def getboolean(self, section, option, index=None):
return self.get_header(section).getboolean(option, index=index)
def getfloat(self, section, option, index=None):
return self.get_header(section).getfloat(option, index=index)
def init_indices(self, max_index):
for header in self.headers.values():
header.init_indices(max_index)
def parse_file(self, config, max_index=None):
"""
Parses the file internally setting values
:param config: an instance of RawConfigParser or a string to a .cfg file
:return: None
"""
if isinstance(config, str):
if not os.path.isfile(config):
raise FileNotFoundError(config)
self.raw_config_parser = RawConfigParser()
self.raw_config_parser.read(config)
config = self.raw_config_parser
elif isinstance(config, RawConfigParser):
self.raw_config_parser = config
elif not isinstance(config, ConfigObject):
raise TypeError("The config should be a String, RawConfigParser of a ConfigObject")
for header_name, header in self.headers.items():
try:
header.parse_file(config[header_name], max_index=max_index)
except KeyError:
pass # skip this header as it does not exist
return self
def reset(self):
for header_name in self.headers:
header = self.headers[header_name]
header.reset()
def __str__(self):
string = ''
for header_name, header in self.headers.items():
string += '[' + header_name + ']\n' + str(header) + '\n'
return string
def copy(self):
new_object = ConfigObject()
for header_name, header in self.headers.items():
new_object.add_header(header_name, header.copy())
return new_object
def has_section(self, header_name):
"""Returns true if the header exists"""
return header_name in self.headers
def get_raw_file(self):
"""Returns the raw file from the parser so it can be used to be parsed by other config objects."""
return self.raw_config_parser
class ConfigHeader:
def __init__(self):
self.values = {}
self.is_indexed = False # if True then indexes will be applied to all values otherwise they will not be
self.max_index = -1
def __getitem__(self, x):
return self.values[x]
def add_value(self, name, value_type, default=None, description=None, value=None):
"""
Adds a new value to this config header
:param name: The name of the value as it would appear in a config file
:param value_type: The type of value: bool, str, int, float
:param default: The value used when the config does not set any value.
:param description: The human readable description of the value
:param value: An optional value, if this header is indexed then the value needs to be a list.
:return: an instance of itself so that you can chain adding values together.
"""
if description is None:
description = name
if value is not None and self.is_indexed and not isinstance(value, list):
raise Exception('Indexed values must be a list')
self.values[name] = ConfigValue(value_type, default=default, description=description, value=value)
return self
def add_config_value(self, name, value):
self.values[name] = value
def set_value(self, option, value, index=None):
"""
Sets the value on the given option.
:param option: The name of the option as it appears in the config file
:param value: The value that is being applied, if this section is indexed value must be a list
:return: an instance of itself so that you can chain setting values together.
"""
# Should raise error if indexed and there's no list or if indexed and no index given
if self.is_indexed and index is None:
if not isinstance(value, list):
raise TypeError("Value should be a list when not giving an index in an indexed header")
else:
raise IndexError("Index cannot be None when not giving a list in an indexed header")
self.values[option].set_value(value=value, index=index)
return self
def get(self, option, index=None):
return self.values[option].get_value(index=index)
def getint(self, option, index=None):
return int(self.values[option].get_value(index=index))
def getboolean(self, option, index=None):
return bool(self.values[option].get_value(index=index))
def getfloat(self, option, index=None):
return float(self.values[option].get_value(index=index))
def init_indices(self, max_index):
if not self.is_indexed:
return
self.max_index = max_index
for value_name in self.values:
self.values[value_name].init_indices(max_index)
def parse_file(self, config_parser, max_index=None):
if self.is_indexed and max_index is None:
return # if we do not know the index lets skip instead of crashing
if not self.is_indexed:
max_index = None
else:
self.max_index = max_index
for value_name in self.values:
self.values[value_name].parse_file(config_parser, value_name, max_index=max_index)
def reset(self):
for value_name in self.values:
self.values[value_name].reset()
def __str__(self):
string = ''
for value_name in self.values:
if self.is_indexed:
string += self.get_indexed_string(value_name)
string += '\n'
else:
string += self.get_string(value_name)
return string
def copy(self):
new_header = ConfigHeader()
new_header.is_indexed = self.is_indexed
new_header.max_index = self.max_index
for value_name, value in self.values.items():
new_header.values[value_name] = value.copy()
return new_header
def get_indexed_string(self, value_name):
value = self.values[value_name]
string = value.comment_description() + '\n'
for i in range(self.max_index):
string += value_name + '_' + str(i) + ' = ' + str(value.get_value(index=i)) + '\n'
return string
def get_string(self, value_name):
value = self.values[value_name]
string = value.comment_description() + '\n'
string += value_name + ' = ' + str(value.get_value()) + '\n'
return string
class ConfigValue:
def __init__(self, value_type, default=None, description="", value=None):
self.type = value_type
self.value = value
self.default = default
self.description = description
def get_value(self, index=None):
"""
Returns the default if value is none.
:param index:
:return: A value.
"""
if self.value is None:
return self.default
if index is not None:
if self.value[index] is None:
return self.default
else:
value = self.value[index]
else:
value = self.value
return value
def comment_description(self):
return '# ' + re.sub(r'\n\s*', '\n# ', str(self.description))
def __str__(self):
return str(self.get_value()) + ' ' + self.comment_description()
def copy(self):
return ConfigValue(self.type, self.default, self.description, self.value)
def init_indices(self, max_index):
self.value = [None]*max_index
def parse_file(self, config_parser, value_name, max_index=None):
if isinstance(config_parser, ConfigHeader):
self.value = config_parser[value_name].value
if max_index is None:
value = self.get_parser_value(config_parser, value_name)
self.value = value
else:
self.value = []
for i in range(max_index):
self.value.append(self.get_parser_value(config_parser, value_name + '_' + str(i)))
def get_parser_value(self, config_parser, value_name):
if self.type == bool:
return config_parser.getboolean(value_name)
if self.type == int:
return config_parser.getint(value_name)
if self.type == float:
return config_parser.getfloat(value_name)
return config_parser.get(value_name)
def set_value(self, value, index=None):
if index is not None:
self.value[index] = value
else:
self.value = value
def reset(self):
self.value = None
|
m, n = map(int, input().split())
a = [int(i) for i in input().split()]
|
from random import randrange as rr
from math import sqrt
import timeit
def distance(l):
s=0
for i in range(1,len(l)):
x1,y1=dic[l[i-1]][0],dic[l[i-1]][1]
x2,y2=dic[l[i]][0],dic[l[i]][1]
s+=sqrt((x2-x1)**2+(y2-y1)**2)
return s
def dist(i,j):
return float(format(sqrt((i[1]-j[1])**2+(i[0]-j[0])**2),".1f"))
dic={}
siz=int(input())
for _ in range(siz):
inp=list(map(float,input().strip().split()))
dic[int(inp[0])]=(inp[1],inp[2])
dic[siz+1]=dic[1]
l=[i+1 for i in range(siz)]
l=l+[siz+1]
#l=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
cities=list(l)
len_c=len(cities)
len_c-=1
population=len_c*6
# permutations
paths=[]
i=0
while i<population:
r1=rr(1,len_c)
r2=rr(1,len_c)
while r1==r2:
r1=rr(1,len_c)
r2=rr(1,len_c)
cities[r1],cities[r2]=cities[r2],cities[r1]
if cities not in paths:
paths.append(list(cities))
i+=1
ll=list(l)
"""
# grredy cell------------------------++++++++++++++++++++++++++++++++++++++++
for i in range(0,len_c-1):
c=distance([l[i],l[i+1]])
jj=i+1
for j in range(i+2,len_c-1):
if distance([l[i],l[j]])<c:
jj=j
c=distance([l[i],l[j]])
l[i+1],l[jj]=l[jj],l[i+1]
paths.append(list(l))
l=list(ll)
# grredy cell------------------------++++++++++++++++++++++++++++++++++++++++
#b & b -----------------------------------------++++++++++++++++++++++++++++
n=len(l)
n-=1
costm=[]
for i in range(1,n+1):
te=[]
for j in range(1,n+1):
if i==j:
te.append("inf")
else:
te.append(dist(dic[i],dic[j]))
costm.append(te)
def reduc(matr,n):
cost=0
for i in range(n):
min_num=10000000
for j in range(n):
if matr[i][j]!='inf':
if min_num>matr[i][j]:
min_num=matr[i][j]
if min_num!=0 and min_num!=10000000:
for j in range(n):
if matr[i][j]!='inf':
matr[i][j]-=min_num
cost+=min_num
for i in range(n):
min_num=10000000
for j in range(n):
if matr[j][i]!='inf':
if min_num>matr[j][i]:
min_num=matr[j][i]
if min_num!=0 and min_num!=10000000:
for j in range(n):
if matr[j][i]!='inf':
matr[j][i]-=min_num
cost+=min_num
return cost
cost1=reduc(costm,n)
def makeinf(m,n,x,y):
for i in range(n):
m[x][i]='inf'
m[i][y]='inf'
m[y][0]='inf'
for i in range(0,n-1):
mij=costm[l[i]-1][l[i+1]-1]
newmat=[list(costm[_]) for _ in range(n)]
makeinf(newmat,n,l[i]-1,l[i+1]-1)
if mij=='inf':
mij=100000
costmin=cost1+reduc(newmat,n)+ mij
z=i+1
confmat=[list(newmat[_]) for _ in range(n)]
for j in range(i+2,n):
mij=costm[l[i]-1][l[j]-1]
if mij=='inf':
mij=100000
newmat=[list(costm[_]) for _ in range(n)]
makeinf(newmat,n,l[i]-1,l[j]-1)
curcost=(cost1+reduc(newmat,n)+ mij)
if curcost<costmin:
costmin=curcost
z=j
confmat=[list(newmat[_]) for _ in range(n)]
cost1=costmin
costm=[list(confmat[_]) for _ in range(n)]
l[i+1],l[z]=l[z],l[i+1]
print(l)
paths.append(l)
l=list(ll)
"""
#b&b----------------------------------------------------------++++++++++++++++++++
#crossovers
def crossover():
j=0
crossovers=[]
parents=[]
while j<population//2:
r1=rr(0,population)
r2=rr(0,population)
while r1==r2 and r1 in parents and r2 in parents:
r1=rr(0,population)
r2=rr(0,population)
parents.append(r1)
parents.append(r2)
p1=list(paths[r1])
p2=list(paths[r2])
r1=rr(1,len_c)
r2=rr(1,len_c)
while r1==r2:
r1=rr(1,len_c)
r2=rr(1,len_c)
p1[r1:r2+1],p2[r1:r2+1]=p2[r1:r2+1],p1[r1:r2+1]
xtra=list(set(l)-set(p1))
len_x=len(xtra)
for i in range(len(p1)):
if p1.count(p1[i])>1:
x=0#rr(0,len_x)
p1[i]=xtra[x]
del xtra[x]
len_x-=1
xtra=list(set(l)-set(p2))
len_x=len(xtra)
for i in range(len(p2)):
if p2.count(p2[i])>1:
x=0#rr(0,len_x)
p2[i]=xtra[x]
del xtra[x]
len_x-=1
if p1 not in crossovers and p2 not in crossovers and p1 not in paths and p2 not in paths:
crossovers.append(list(p1))
crossovers.append(list(p2))
j+=1
#mutation
mutations=[]
i=0
while i<(population):
mutagens=list(crossovers[i])
r1=rr(1,len_c)
r2=rr(1,len_c)
while r1==r2:
r1=rr(1,len_c)
r2=rr(1,len_c)
mutagens[r1],mutagens[r2]=mutagens[r2],mutagens[r1]
if mutagens not in paths and mutagens not in crossovers and mutagens not in mutations:
mutations.append(list(mutagens))
i+=1
ultimatelist=paths+crossovers+mutations
distances={}
for i in range(len(ultimatelist)):
distances[distance(ultimatelist[i])]=ultimatelist[i]
dissort=sorted(distances.keys())
print(dissort[0])
for i in range(min(population,len(dissort))):
paths[i]=distances[dissort[i]]
c=distance(paths[0])
for i in range(len_c-1,0,-1):
jj=i
for j in range(1,len_c):
paths[0][i],paths[0][j]=paths[0][j],paths[0][i]
if distance(l)<c:
jj=j
c=distance(paths[0])
paths[0][i],paths[0][j]=paths[0][j],paths[0][i]
paths[0][i],paths[0][jj]=paths[0][jj],paths[0][i]
generations=int(input())
start=timeit.default_timer()
while generations>0:
crossover()
generations-=1
print(paths[0])
stop=timeit.default_timer()
print(stop-start)
|
"""
MIT License
Copyright (c) 2017 s0hvaperuna
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
from collections import deque
import disnake
from aiohttp import ClientSession
from disnake import DMChannel
from disnake.ext.commands import cooldown
from bot.bot import command
from bot.paginator import Paginator
from cogs.cog import Cog
logger = logging.getLogger('terminal')
class SearchItem:
def __init__(self, **kwargs):
self.url = kwargs.pop('link', 'None')
self.title = kwargs.pop('title', 'Untitled')
def __str__(self):
return '{0.url}'.format(self)
class Search(Cog):
def __init__(self, bot):
super().__init__(bot)
self.last_search = deque()
self.key = bot.config.google_api_key
self.cx = self.bot.config.custom_search
@command(aliases=['im', 'img'])
@cooldown(2, 5)
async def image(self, ctx, *, query):
"""Google search an image"""
#logger.debug('Image search query: {}'.format(query))
if ctx.guild and ctx.guild.id == 217677285442977792 and query.strip().lower() == 'penile hemorrhage':
return
safe = 'off' if not isinstance(ctx.channel, DMChannel) and ctx.channel.nsfw else 'active'
return await self._search(ctx, query, True, safe=safe)
@command()
@cooldown(2, 5)
async def google(self, ctx, *, query):
#logger.debug('Web search query: {}'.format(query))
safe = 'off' if not isinstance(ctx.channel, DMChannel) and ctx.channel.nsfw else 'active'
return await self._search(ctx, query, safe=safe)
async def _search(self, ctx, query, image=False, safe='off'):
params = {'key': self.key,
'cx': self.cx,
'q': query,
'safe': safe}
if image:
params['searchType'] = 'image'
async with ClientSession() as client:
async with client.get('https://www.googleapis.com/customsearch/v1', params=params) as r:
if r.status == 200:
json = await r.json()
if 'error' in json:
reason = json['error'].get('message', 'Unknown reason')
return await ctx.send('Failed to search because of an error\n```{}```'.format(reason))
#logger.debug('Search result: {}'.format(json))
total_results = json['searchInformation']['totalResults']
if int(total_results) == 0:
return await ctx.send('No results with the keywords "{}"'.format(query))
if 'items' in json:
items = []
for item in json['items']:
items.append(SearchItem(**item))
def gen_page(i: int):
return str(items[i])
paginator = Paginator(items, show_stop_button=True, generate_page=gen_page)
try:
await paginator.send(ctx)
except disnake.HTTPException:
pass
return
elif r.status == 403:
return await ctx.send('Search quota filled for today. Resets every day at midnight Pacific Time (PT)')
else:
return await ctx.send('Http error {}'.format(r.status))
def setup(bot):
bot.add_cog(Search(bot))
|
import torch
import numpy as np
import SimpleITK as sitk
num_images = 10
class MyDataset:
def __init__(self, paths):
self.paths = paths
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
image = sitk.ReadImage(self.paths[index])
resampler = sitk.ResampleImageFilter()
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
resampler.SetReferenceImage(image)
resampler.SetDefaultPixelValue(0.0)
resampler.SetOutputPixelType(sitk.sitkFloat32)
print('Resampling...')
resampled = resampler.Execute(image)
print('Resampled!')
array = sitk.GetArrayFromImage(resampled)
return array
paths = []
for i in range(num_images):
image = sitk.GetImageFromArray(np.random.rand(10, 20, 30))
path = f'/tmp/image_{i}.nii.gz'
sitk.WriteImage(image, path)
paths.append(path)
my_dataset = MyDataset(paths)
loader_sp = torch.utils.data.DataLoader(my_dataset, batch_size=4, num_workers=0)
loader_mp = torch.utils.data.DataLoader(my_dataset, batch_size=4, num_workers=2)
print('Extracting batch using one worker...')
batch_sp = next(iter(loader_sp))
print(batch_sp.shape)
print('Extracting batch using two workers...')
batch_mp = next(iter(loader_mp))
print(batch_mp.shape)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module of TinyCSSParser interface.
"""
from urllib.parse import urljoin
import requests
import tinycss
from tinycss.css21 import RuleSet
from hatemile import helper
from hatemile.util.css.stylesheetparser import StyleSheetParser
from hatemile.util.html.htmldomparser import HTMLDOMParser
from .tinycssrule import TinyCSSRule
class TinyCSSParser(StyleSheetParser):
"""
The TinyCSSParser class is official implementation of
:py:class:`hatemile.util.css.stylesheetparser.StyleSheetParser` for
tinycss.
"""
def __init__(self, css_or_hp, current_url=None):
"""
Initializes a new object that encapsulate the tinycss.
:param css_or_hp: The HTML parser or CSS code of page.
:type css_or_hp: str or hatemile.util.html.htmldomparser.HTMLDOMParser
:param current_url: The current URL of page.
:type current_url: str
"""
helper.require_not_none(css_or_hp)
helper.require_valid_type(css_or_hp, str, HTMLDOMParser)
helper.require_valid_type(current_url, str)
if isinstance(css_or_hp, str):
self.stylesheet = tinycss.make_parser().parse_stylesheet(css_or_hp)
else:
self._create_parser(css_or_hp, current_url)
def _create_parser(self, html_parser, current_url):
"""
Create the tinycss stylesheet.
:param html_parser: The HTML parser.
:type html_parser: hatemile.util.html.htmldomparser.HTMLDOMParser
:param current_url: The current URL of page.
:type current_url: str
"""
css_code = ''
elements = html_parser.find(
'style,link[rel="stylesheet"]'
).list_results()
for element in elements:
if element.get_tag_name() == 'STYLE':
css_code = css_code + element.get_text_content()
else:
css_code = css_code + requests.get(
urljoin(current_url, element.get_attribute('href'))
).text
self.stylesheet = tinycss.make_parser().parse_stylesheet(css_code)
def get_rules(self, properties):
rules = list()
for rule in self.stylesheet.rules:
if isinstance(rule, RuleSet):
auxiliar_rule = TinyCSSRule(rule)
for property_name in properties:
if auxiliar_rule.has_property(property_name):
rules.append(auxiliar_rule)
break
return rules
|
import typing
from PIL import ImageDraw
from PIL import Image
import numpy as np
from .data_enhancer import DataEnhancer
class EyeVectorProjEnhancer(DataEnhancer):
"""Draws projections of eye vectors on a 2d white surface"""
def __init__(self, **kwargs):
"""Constructor"""
super().__init__(**kwargs)
def process(self, pic: Image.Image, np_points: np.ndarray,
eye_vector_left: np.ndarray = None,
eye_vector_right: np.ndarray = None) -> typing.Tuple[Image.Image, dict]:
"""Apply - Draw projections...
:param pic:
:param np_points: facial landmarks
:param eye_vector_left:
:param eye_vector_right:
:return:
"""
if eye_vector_right is None:
eye_vector_right = [0, 0, 1]
if eye_vector_left is None:
eye_vector_left = [0, 0, 1]
pic, output = super().process(pic, np_points)
drawer = ImageDraw.Draw(pic)
drawer.rectangle([0, 0, 1920, 1080], fill=(255, 255, 255))
middle = [960, 540]
eye_vector_right *= 1000
eye_vector_left *= 1000
drawer.line([middle[0], middle[1], middle[0] + eye_vector_left[0],
middle[1] + eye_vector_left[2]], fill=(0, 255, 0))
drawer.line([middle[0], middle[1], middle[0] + eye_vector_right[0],
middle[1] + eye_vector_right[2]], fill=(0, 255, 0))
return pic, output
|
# system imports
import numpy as np
import os
import argparse
# BEAST imports
from beast.tools import beast_settings, setup_batch_beast_trim
from beast.tools.run import create_filenames
from difflib import SequenceMatcher
def make_trim_scripts(
beast_settings_info,
num_subtrim=1,
nice=None,
prefix=None,
):
"""
`setup_batch_beast_trim.py` uses file names to create batch trim files. This
generates all of the file names for that function.
NOTE: This assumes you're using source density or background dependent noise
models.
Parameters
----------
beast_settings_info : string or beast.tools.beast_settings.beast_settings instance
if string: file name with beast settings
if class: beast.tools.beast_settings.beast_settings instance
num_subtrim : int (default = 1)
number of trim batch jobs
nice : int (default = None)
set this to an integer (-20 to 20) to prepend a "nice" level
to the trimming command
prefix : string (default=None)
Set this to a string (such as 'source activate astroconda') to prepend
to each batch file (use '\n's to make multiple lines)
Returns
-------
job_files : list of strings
Names of the newly created job files
"""
# process beast settings info
if isinstance(beast_settings_info, str):
settings = beast_settings.beast_settings(beast_settings_info)
elif isinstance(beast_settings_info, beast_settings.beast_settings):
settings = beast_settings_info
else:
raise TypeError(
"beast_settings_info must be string or beast.tools.beast_settings.beast_settings instance"
)
# make lists of file names
file_dict = create_filenames.create_filenames(
settings, use_sd=True, nsubs=settings.n_subgrid,
)
# extract some useful ones
photometry_files = file_dict["photometry_files"]
modelsedgrid_files = file_dict["modelsedgrid_files"]
noise_files = file_dict["noise_files"]
modelsedgrid_trim_files = file_dict["modelsedgrid_trim_files"]
noise_trim_files = file_dict["noise_trim_files"]
# the unique sets of things
unique_sedgrid = [
x for i, x in enumerate(modelsedgrid_files) if i == modelsedgrid_files.index(x)
]
# save the list of job files
job_file_list = []
# iterate through each model grid
for i in range(settings.n_subgrid):
# indices for this model grid
grid_ind = [
ind
for ind, mod in enumerate(modelsedgrid_files)
if mod == unique_sedgrid[i]
]
# create corresponding files for each of those
input_noise = [noise_files[ind] for ind in grid_ind]
input_phot = [photometry_files[ind] for ind in grid_ind]
# to get the trim prefix, find the common string between trimmed noise
# files and trimmed SED files
input_trim_prefix = []
for ind in grid_ind:
str1 = modelsedgrid_trim_files[ind]
str2 = noise_trim_files[ind]
# find longest match
match = SequenceMatcher(None, str1, str2).find_longest_match(
0, len(str1), 0, len(str2)
)
# grab that substring (and remove trailing "_")
input_trim_prefix.append(str1[match.a : match.a + match.size][:-1])
# check if the trimmed grids exist before moving on
check_trim = [os.path.isfile(noise_trim_files[ind]) for ind in grid_ind]
# if any aren't trimmed for this model grid, set up trimming
if np.sum(check_trim) < len(input_noise):
job_path = "./{0}/trim_batch_jobs/".format(settings.project)
if settings.n_subgrid > 1:
file_prefix = "BEAST_gridsub" + str(i)
if settings.n_subgrid == 1:
file_prefix = "BEAST"
# generate trimming at-queue commands
setup_batch_beast_trim.generic_batch_trim(
unique_sedgrid[i],
input_noise,
input_phot,
input_trim_prefix,
settings.obs_colnames,
job_path=job_path,
file_prefix=file_prefix,
num_subtrim=num_subtrim,
nice=nice,
prefix=prefix,
)
job_file_list.append(job_path + file_prefix + "_batch_trim.joblist")
return job_file_list
if __name__ == "__main__": # pragma: no cover
# commandline parser
parser = argparse.ArgumentParser()
parser.add_argument(
"beast_settings_file",
type=str,
help="file name with beast settings",
)
parser.add_argument(
"--num_subtrim", type=int, default=1, help="number of trim batch jobs",
)
parser.add_argument(
"--nice",
type=int,
default=None,
help="prepend a 'nice' level to the trimming command",
)
parser.add_argument(
"--prefix", type=str, default=None, help="string to prepend to each batch file",
)
args = parser.parse_args()
make_trim_scripts(
beast_settings_info=args.beast_settings_file,
num_subtrim=args.num_subtrim,
nice=args.nice,
prefix=args.prefix,
)
|
#!/usr/bin/env python
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 24
__author__ = 'wesc+api@google.com (Wesley Chun)'
"""
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.ext import ndb
from conference import ConferenceApi
from conference import MEMCACHE_FEATURED_SPEAKER_KEY
from models import Session
from models import Speaker
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SetFeaturedSpeakerHandler(webapp2.RequestHandler):
def post(self):
"""Set Featured Speaker in Memcache."""
print self.request
conf = ndb.Key(urlsafe=self.request.get('conf_key')).get()
spkr = ndb.Key(Speaker, self.request.get('speakers')).get()
sessions = Session.query(ancestor=conf.key)
sessions = sessions.filter(
Session.speakerKeys == self.request.get('speakers'))
if sessions.count() > 1:
# If there are more than one session with the same speaker,
# format featured speaker and set it in memcache
featured_speakers = '%s will speak at %s during the' \
'following sessions: %s' % (
spkr.displayName, conf.name,
(', '.join(sess.name for sess in sessions))
)
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, featured_speakers)
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/set_featured_speaker', SetFeaturedSpeakerHandler),
], debug=True)
|
from typing import Union
from datetime import datetime
import numpy as np
import pandas as pd
Timepoint = Union[datetime, np.datetime64, pd.Timestamp, str]
class Task:
def __init__(
self,
name: str,
start: Timepoint,
end: Timepoint,
**tags
):
"""Individual task to be plotted on a Gantt chart.
:param name: Name of task
:type name: str
:param start: Start time of this task
:type start: Timepoint
:param end: End time of this task
:type end: Timepoint
:param tags: Tags to apply to task for styling (optional)
"""
self.name = name
self.start = pd.Timestamp(start)
self.end = pd.Timestamp(end)
self.tags = tags
if self.start > self.end:
raise ValueError("Start must come before end.")
def to_dict(self):
ret_dict = {
"name": self.name,
"start": self.start,
"end": self.end
}
ret_dict.update({tag: value for tag, value in self.tags.items()})
return ret_dict
def __str__(self) -> str:
tag_str = " "
if self.tags is not None:
tag_list = [f"{k}: {v}" for k, v in self.tags.items()]
tag_str = f" [{','.join(tag_list)}] "
return f"Task ({self.name}){tag_str}: {self.start} - {self.end}"
def __repr__(self) -> str:
return self.__str__()
|
# Copyright 2002-2011 Nick Mathewson. See LICENSE for licensing information.
"""mixminion.server.EventStats
Classes to gather time-based server statistics"""
__all__ = [ 'EventLog', 'NilEventLog' ]
import os
from threading import RLock
from time import time
from mixminion.Common import formatTime, LOG, previousMidnight, floorDiv, \
createPrivateDir, MixError, readPickled, tryUnlink, writePickled
# _EVENTS: a list of all recognized event types.
_EVENTS = [ 'ReceivedPacket',
'ReceivedConnection',
'AttemptedConnect', 'SuccessfulConnect', 'FailedConnect',
'AttemptedRelay', 'SuccessfulRelay',
'FailedRelay', 'UnretriableRelay',
'AttemptedDelivery', 'SuccessfulDelivery',
'FailedDelivery', 'UnretriableDelivery',
]
class NilEventLog:
"""Null implementation of EventLog interface: ignores all events and
logs nothing.
"""
def __init__(self):
pass
def save(self, now=None):
"""Flushes this eventlog to disk."""
pass
def rotate(self, now=None):
"""Move the pending events from this EventLog into a
summarized text listing, and start a new pool. Requires
that it's time to rotate.
"""
pass
def getNextRotation(self):
"""Return a time after which it's okay to rotate the log."""
return 0
def _log(self, event, arg=None):
"""Notes that an event has occurred.
event -- the type of event to note
arg -- an optional topic of the event.
"""
pass
def receivedPacket(self, arg=None):
"""Called whenever a packet is received via MMTP."""
self._log("ReceivedPacket", arg)
def receivedConnection(self, arg=None):
"""Called whenever we get an incoming MMTP connection."""
self._log("ReceivedConnection", arg)
def attemptedConnect(self, arg=None):
"""Called whenever we try to connect to an MMTP server."""
self._log("AttemptedConnect", arg)
def successfulConnect(self, arg=None):
"""Called whenever we successfully connect to an MMTP server."""
self._log("SuccessfulConnect", arg)
def failedConnect(self, arg=None):
"""Called whenever we fail to connect to an MMTP server."""
self._log("FailedConnect", arg)
def attemptedRelay(self, arg=None):
"""Called whenever we attempt to relay a packet via MMTP."""
self._log("AttemptedRelay", arg)
def successfulRelay(self, arg=None):
"""Called whenever packet delivery via MMTP succeeds"""
self._log("SuccessfulRelay", arg)
def failedRelay(self, arg=None):
"""Called whenever packet delivery via MMTP fails retriably"""
self._log("FailedRelay", arg)
def unretriableRelay(self, arg=None):
"""Called whenever packet delivery via MMTP fails unretriably"""
self._log("UnretriableRelay", arg)
def attemptedDelivery(self, arg=None):
"""Called whenever we attempt to deliver a message via an exit
module.
"""
self._log("AttemptedDelivery", arg)
def successfulDelivery(self, arg=None):
"""Called whenever we successfully deliver a message via an exit
module.
"""
self._log("SuccessfulDelivery", arg)
def failedDelivery(self, arg=None):
"""Called whenever an attempt to deliver a message via an exit
module fails retriably.
"""
self._log("FailedDelivery", arg)
def unretriableDelivery(self, arg=None):
"""Called whenever an attempt to deliver a message via an exit
module fails unretriably.
"""
self._log("UnretriableDelivery", arg)
BOILERPLATE = """\
# Mixminion server statistics
#
# NOTE: These statistics _do not_ necessarily cover the current interval
# of operation. To see pending statistics that have not yet been flushed
# to this file, run 'mixminion server-stats'.
"""
class EventLog(NilEventLog):
"""An EventLog records events, aggregates them according to some time
periods, and logs the totals to disk.
Currently we retain two log files: one holds an interval-by-interval
human-readable record of past intervals; the other holds a pickled
record of events in the current interval.
We take some pains to avoid flushing the statistics when too
little time has passed. We only rotate an aggregated total to disk
when:
- An interval has passed since the last rotation time
AND
- We have accumulated events for at least 75% of an interval's
worth of time.
The second requirement prevents the following unpleasant failure mode:
- We set the interval to '1 day'. At midnight on Monday,
we rotate. At 00:05, we go down. At 23:55 we come back
up. At midnight at Tuesday, we noticing that it's been one
day since the last rotation, and rotate again -- thus making
a permanent record that reflects 10 minutes worth of traffic,
potentially exposing more about individual users than we should.
"""
### Fields:
# count: a map from event name -> argument|None -> total events received.
# lastRotation: the time at which we last flushed the log to disk and
# reset the log.
# filename, historyFile: Names of the pickled and long-term event logs.
# rotateInterval: Interval after which to flush the current statistics
# to disk.
# _lock: a threading.RLock object that must be held when modifying this
# object.
# accumulatedTime: number of seconds since last rotation that we have
# been logging events.
# lastSave: last time we saved the file.
### Pickled format:
# Map from {"count","lastRotation","accumulatedTime"} to the values
# for those fields.
def __init__(self, filename, historyFile, interval):
"""Initializes an EventLog that caches events in 'filename', and
periodically writes to 'historyFile' every 'interval' seconds."""
NilEventLog.__init__(self)
if os.path.exists(filename):
self.__dict__.update(readPickled(filename))
assert self.count is not None
assert self.lastRotation is not None
assert self.accumulatedTime is not None
for e in _EVENTS:
if not self.count.has_key(e):
self.count[e] = {}
else:
self.count = {}
for e in _EVENTS:
self.count[e] = {}
self.lastRotation = time()
self.accumulatedTime = 0
self.filename = filename
self.historyFilename = historyFile
for fn in filename, historyFile:
parent = os.path.split(fn)[0]
createPrivateDir(parent)
self.rotateInterval = interval
self.lastSave = time()
self._setNextRotation()
self._lock = RLock()
self.save()
def save(self, now=None):
"""Write the statistics in this log to disk, rotating if necessary."""
try:
self._lock.acquire()
self._save(now)
finally:
self._lock.release()
def _save(self, now=None):
"""Implements 'save' method. For internal use. Must hold self._lock
to invoke."""
LOG.debug("Syncing statistics to disk")
if not now: now = time()
tmpfile = self.filename + "_tmp"
tryUnlink(tmpfile)
self.accumulatedTime += int(now-self.lastSave)
self.lastSave = now
writePickled(self.filename, { 'count' : self.count,
'lastRotation' : self.lastRotation,
'accumulatedTime' : self.accumulatedTime,
})
def _log(self, event, arg=None):
try:
self._lock.acquire()
try:
self.count[event][arg] += 1
except KeyError:
try:
self.count[event][arg] = 1
except KeyError:
raise KeyError("No such event: %r" % event)
finally:
self._lock.release()
def getNextRotation(self):
return self.nextRotation
def rotate(self,now=None):
if now is None: now = time()
if now < self.nextRotation:
raise MixError("Not ready to rotate event stats")
try:
self._lock.acquire()
self._rotate(now)
finally:
self._lock.release()
def _rotate(self, now=None):
"""Flush all events since the last rotation to the history file,
and clears the current event log."""
# Must hold lock
LOG.debug("Flushing statistics log")
if now is None: now = time()
starting = not os.path.exists(self.historyFilename)
f = open(self.historyFilename, 'a')
if starting:
f.write(BOILERPLATE)
self.dump(f, now)
f.close()
self.count = {}
for e in _EVENTS:
self.count[e] = {}
self.lastRotation = now
self._save(now)
self.accumulatedTime = 0
self._setNextRotation(now)
def dump(self, f, now=None):
"""Write the current data to a file handle 'f'."""
if now is None: now = time()
try:
self._lock.acquire()
startTime = self.lastRotation
endTime = now
print >>f, "========== From %s to %s:" % (formatTime(startTime,1),
formatTime(endTime,1))
for event in _EVENTS:
count = self.count[event]
if len(count) == 0:
print >>f, " %s: 0" % event
continue
elif len(count) == 1 and count.keys()[0] is None:
print >>f, " %s: %s" % (event, count[None])
continue
print >>f, " %s:" % event
total = 0
args = count.keys()
args.sort()
length = max([ len(str(arg)) for arg in args ])
length = max((length, 10))
fmt = " %"+str(length)+"s: %s"
for arg in args:
v = count[arg]
if arg is None: arg = "{Unknown}"
print >>f, fmt % (arg, v)
total += v
print >>f, fmt % ("Total", total)
finally:
self._lock.release()
def _setNextRotation(self, now=None):
"""Helper function: calculate the time when we next rotate the log."""
# ???? Lock to 24-hour cycle
# This is a little weird. We won't save *until*:
# - .75 * rotateInterval seconds are accumulated.
# AND - rotateInterval seconds have elapsed since the last
# rotation.
#
# IF the rotation interval is divisible by one hour, we also
# round to the hour, up to 5 minutes down and 55 up.
if not now: now = time()
accumulatedTime = self.accumulatedTime + (now - self.lastSave)
secToGo = max(0, self.rotateInterval * 0.75 - accumulatedTime)
self.nextRotation = max(self.lastRotation + self.rotateInterval,
now + secToGo)
if self.nextRotation < now:
self.nextRotation = now
if (self.rotateInterval % 3600) == 0:
mid = previousMidnight(self.nextRotation)
rest = self.nextRotation - mid
self.nextRotation = mid + 3600 * floorDiv(rest+55*60, 3600)
def configureLog(config):
"""Given a configuration file, set up the log. May replace the log global
variable.
"""
global log
if config['Server']['LogStats']:
LOG.info("Enabling statistics logging")
statsfile = config.getStatsFile()
if not os.path.exists(os.path.split(statsfile)[0]):
# create parent if needed.
os.makedirs(os.path.split(statsfile)[0], 0700)
workfile = os.path.join(config.getWorkDir(), "stats.tmp")
log = EventLog(
workfile, statsfile, config['Server']['StatsInterval'].getSeconds())
import mixminion.MMTPClient
mixminion.MMTPClient.useEventStats()
LOG.info("Statistics logging enabled")
else:
log = NilEventLog()
LOG.info("Statistics logging disabled")
# Global variable: The currently configured event log.
log = NilEventLog()
|
'''https://leetcode.com/problems/climbing-stairs/
70. Climbing Stairs
Easy
8813
261
Add to List
Share
You are climbing a staircase. It takes n steps to reach the top.
Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
Example 1:
Input: n = 2
Output: 2
Explanation: There are two ways to climb to the top.
1. 1 step + 1 step
2. 2 steps
Example 2:
Input: n = 3
Output: 3
Explanation: There are three ways to climb to the top.
1. 1 step + 1 step + 1 step
2. 1 step + 2 steps
3. 2 steps + 1 step
Constraints:
1 <= n <= 45'''
# for given n steps stairs
# Each time you can either climb 1 or 2 steps.
# In how many distinct ways can you climb to the top?
def brute_force(n):
if n == 1 or n == 2:
return n
return brute_force(n-1)+brute_force(n-2)
memo = {}
def recursion_memo(n, memo):
if n == 1 or n == 2:
return n
if n in memo.keys():
return memo[n]
value = recursion_memo(n-1, memo) + recursion_memo(n-2, memo)
memo.update({n: value})
return memo[n]
def dynamic(n):
if n == 1 or n == 2:
return n
result = [0]*(n)
result[0] = 1
result[1] = 2
for i in range(2, n):
result[i] = result[i-1]+result[i-2]
return result[n-1]
if __name__ == '__main__':
print(recursion_memo(10, memo))
|
#!/usr/bin/env python
# encoding: utf-8
import datetime
from unittest2 import TestCase
from tempodb import DataPoint
class DataPointTest(TestCase):
def test_init(self):
now = datetime.datetime.now()
dp = DataPoint(now, 12.34)
self.assertEqual(dp.ts, now)
self.assertEqual(dp.value, 12.34)
def test_to_json(self):
ts = datetime.datetime(2012, 3, 27, 1, 2, 3, 4)
dp = DataPoint(ts, 12.34)
expected = {
't': '2012-03-27T01:02:03.000004',
'v': 12.34
}
json = dp.to_json()
self.assertEqual(json, expected)
def test_from_json(self):
json = {
't': '2012-03-27T01:02:03.000004',
'v': 12.34
}
dp = DataPoint.from_json(json)
ts = datetime.datetime(2012, 3, 27, 1, 2, 3, 4)
expected = DataPoint(ts, 12.34)
self.assertEqual(dp, expected)
|
import aiorun
from .server import Server
from backup.config import Config
from backup.module import BaseModule
from injector import Injector
async def main():
config = Config.fromEnvironment()
module = BaseModule(config, override_dns=False)
injector = Injector(module)
await injector.get(Server).start()
if __name__ == '__main__':
print("Starting")
aiorun.run(main())
|
#!/usr/bin/env python
import sys
import logging
from xml.dom.minidom import parseString
from screensketch.screenspec.reader import TextReader
from screensketch.screenspec.writer import TextWriter
from screensketch.screenspec.writer import XMLWriter
from screensketch.screenspec.visualization import HTMLRenderer
FORMAT = '%(levelname)-7s %(message)s'
logging.basicConfig(format = FORMAT, level = logging.INFO)
#logging.disable(logging.DEBUG)
logger = logging.getLogger("root")
logger.info("START")
number = 2 # see data folder
frmt = "txt" # txt or xml
input_data = open("../samples/screenspec/example%02d.txt" % number).read()
retval = TextReader(input_data).execute()
#TextWriter(retval).execute(sys.stdout)
#XMLWriter(retval).execute(sys.stdout)
#print retval.children[0].children[1].identifier
#retval.children[0].children[1]._grid = [["a", "b"], ["c", "d"]]
#print retval.children[0].children[0].values
#f = open("tar.xml",'w');
#XMLWriter(retval).execute(f)
#f.close();
HTMLRenderer(retval).execute(sys.stdout)
logger.info("END")
|
n = 600851475143
prime_factor = 1
i = 2
while i <= n / i:
if n % i == 0:
prime_factor = i
n /= i
else:
i += 1
if prime_factor < n:
prime_factor = n
print(prime_factor) |
# Copyright (c) 2019, Corey Smith
# Distributed under the MIT License.
# See LICENCE file in root directory for full terms.
"""
Neural network latent matrix factorization library.
"""
import torch
import torch.nn as nn
# from tqdm import tqdm
from pathlib import Path
from ..recommender.nn_layers import ScaledEmbedding, ZeroEmbedding
class NNMatrixFactorization(torch.nn.Module):
"""Matrix factorization using pytorch."""
def __init__(
self,
n_users,
n_products,
n_factors=20,
optimizer=torch.optim.SGD,
lr=0.001,
l2=0,
momentum=0,
loss_fn=nn.BCEWithLogitsLoss,
activation=nn.Sigmoid,
):
"""
Initalize the user and product embedding vectors in latent space.
Args:
n_users (int): Number of users with prior purchases.
n_products (int): Total number of products purchased.
n_factors (integer, optional): Dimension of the latent embedding space.
"""
super(NNMatrixFactorization, self).__init__()
self.l2 = l2
self.lr = lr
self.momentum = momentum
self.user_factors = ScaledEmbedding(n_users, n_factors)
self.product_factors = ScaledEmbedding(n_products, n_factors)
self.user_bias = ZeroEmbedding(n_users, 1)
self.product_bias = ZeroEmbedding(n_products, 1)
self.activation = activation()
self.loss_fn = loss_fn()
self.optimizer = optimizer(
self.parameters(), lr=self.lr, weight_decay=self.l2, momentum=self.momentum
)
def forward(self, user, item):
"""
Matrix multiplication between user and product
embedding vectors.
"""
item_emb = self.product_factors(item.view(-1)) + self.product_bias(
item.view(-1)
)
user_emb = self.user_factors(user.view(-1)) + self.user_bias(user.view(-1))
mat_mult = (item_emb * user_emb).sum(1)
return mat_mult
def _prob_to_class(self, forward):
"""
Convert the probabilities from the final activation into a
binary classification.
"""
predict_pos = self.activation(forward)
predict_neg = 1 - predict_pos
return torch.stack((predict_neg, predict_pos)).argmax(0).float()
def prediction(self, user, item):
"""
Use product and user embedding vectors to calculate
a probability for positive interaction.
"""
return self._prob_to_class(self(user, item))
def loss(self, forward, rating):
"""Calculate the loss of the predicted ratings."""
return self.loss_fn(forward, rating.float().view(-1))
def compute_accuracy(self, data_loader):
"""
Compute the accuracy of our predictions against the true ratings.
"""
correct = 0
total = 0
self.eval()
with torch.no_grad():
for user, item, true_rating in data_loader:
forward = self(user, item)
predicted = self._prob_to_class(forward)
total += predicted.numel()
correct += (predicted == true_rating.view(-1)).sum().item()
return total, correct
def train_model(self, data_loader):
"""
Train the model on the data generated by the dataloader and compute
the training loss and training accuracy.
"""
train_squared_loss = 0
correct = 0
total = 0
self.train()
for user, item, rating in data_loader:
self.optimizer.zero_grad()
forward = self(user, item)
predicted = self._prob_to_class(forward)
loss = self.loss(forward, rating)
train_squared_loss += loss.item() * len(user)
total += predicted.numel()
correct += (predicted == rating.view(-1)).sum().item()
loss.backward()
self.optimizer.step()
mean_loss = train_squared_loss / total
return mean_loss, f"{(100 * correct / total):.2f}"
def evaluate(self, dataloader):
"""
Calculate the loss and accuracy of the model on the validation
or test data set.
"""
squared_loss = 0
correct = 0
total = 0
self.eval()
with torch.no_grad():
for user, item, rating in dataloader:
forward = self(user, item)
predicted = self._prob_to_class(forward)
squared_loss += self.loss(forward, rating).item() * len(user)
total += predicted.numel()
correct += (predicted == rating.view(-1)).sum().item()
mean_loss = squared_loss / total
return mean_loss, f"{(100 * correct / total):.2f}"
@classmethod
def load(
cls,
saved_filename,
optimizer=torch.optim.SGD,
lr=0.001,
l2=0,
momentum=0,
loss_fn=nn.BCEWithLogitsLoss,
activation=nn.Sigmoid,
):
"""
"""
if not Path(saved_filename).exists():
raise ValueError("Filename does not exist.")
pass
# model_save = torch.load(saved_filename)
# # before training user-item interaction matrix
# user_em = model_save["user_factors.weight"]
# item_em = model_save["product_factors.weight"]
# user_b = model_save["user_bias.weight"]
# item_b = model_save["product_bias.weight"]
# user_item_array = ((item_em + item_b) @ (user_em + user_b).transpose(0,1))
# pre_probs = model.activation(user_item_array).numpy()
# pre_preds = model._prob_to_class(user_item_array).numpy()
# cls(
# n_users,
# n_products,
# n_factors=20,
# optimizer=torch.optim.SGD,
# lr=0.001,
# l2=0,
# momentum=0,
# loss_fn=nn.BCEWithLogitsLoss,
# activation=nn.Sigmoid,
# )
def create_user_item_array(self):
"""
Use the trained embedding vectors to compute the predicted
interaction for all users.
"""
user_em = self.user_factors.weight.detach()
item_em = self.product_factors.weight.detach()
user_b = self.user_bias.weight.detach()
item_b = self.product_bias.weight.detach()
user_item_array = (item_em + item_b) @ (user_em + user_b).transpose(0, 1)
preds = self._prob_to_class(user_item_array).numpy()
return preds
|
import json
from adapters.base_adapter import Adapter
from devices.switch.on_off_switch import OnOffSwitch
class WeiserLock(Adapter):
def __init__(self, devices):
super().__init__(devices)
self.switch = OnOffSwitch(devices, 'switch', 'state')
self.devices.append(self.switch)
def convert_message(self, message):
message = super().convert_message(message)
if 'state' in message.raw:
state = message.raw['state']
message.raw['state'] = 'ON' if state == 'LOCK' else 'OFF'
return message
def handle_command(self, alias, device, command, level, color):
device_data = self._get_legacy_device_data()
self.switch.handle_command(device_data, command, level, color)
return {
'topic': device_data['friendly_name'] + '/set',
'payload': json.dumps({
"state": 'LOCK' if command.upper() == 'ON' else 'UNLOCK'
})
}
|
"""Tests for AIOSkybell."""
import pathlib
EMAIL = "test@test.com"
PASSWORD = "securepass"
def load_fixture(filename) -> str:
"""Load a fixture."""
return (
pathlib.Path(__file__)
.parent.joinpath("fixtures", filename)
.read_text(encoding="utf8")
)
|
from heapq import heappush, heappop
def find_kth_largest(nums: list, k: int) -> int:
h = []
[heappush(h, -num) for num in nums]
[heappop(h) for _ in range(k-1)]
return -heappop(h)
if __name__ == "__main__":
nums = list(map(int,input("Enter elements of array: ").split(' ')))
k = int(input("Enter the number k: "))
largestElement = find_kth_largest(nums,k)
print("The Kth ("+str(k)+") Largest element is",largestElement)
|
import FWCore.ParameterSet.Config as cms
import TrackingTools.TrackFitters.KFTrajectoryFitter_cfi
LooperTrajectoryFitter = TrackingTools.TrackFitters.KFTrajectoryFitter_cfi.KFTrajectoryFitter.clone(
ComponentName = cms.string('LooperFitter'),
Propagator = cms.string('PropagatorWithMaterialForLoopers')
)
import TrackingTools.TrackFitters.KFTrajectorySmoother_cfi
LooperTrajectorySmoother = TrackingTools.TrackFitters.KFTrajectorySmoother_cfi.KFTrajectorySmoother.clone(
ComponentName = cms.string('LooperSmoother'),
Propagator = cms.string('PropagatorWithMaterialForLoopers'),
errorRescaling = cms.double(10.0),
)
import TrackingTools.TrackFitters.KFFittingSmoother_cfi
LooperFittingSmoother = TrackingTools.TrackFitters.KFFittingSmoother_cfi.KFFittingSmoother.clone(
ComponentName = cms.string('LooperFittingSmoother'),
Fitter = cms.string('LooperFitter'),
Smoother = cms.string('LooperSmoother'),
EstimateCut = cms.double(20.0),
# ggiurgiu@fnal.gov : Any value lower than -15 turns off this cut.
# Recommended default value: -14.0. This will reject only the worst hits with negligible loss in track efficiency.
LogPixelProbabilityCut = cms.double(-14.0),
MinNumberOfHits = cms.int32(3)
)
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('admin', '0001_initial'),
('testapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LogEntryWithGroup',
fields=[
('logentry_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='admin.LogEntry')),
('group', models.ForeignKey(blank=True, to='auth.Group', null=True)),
],
options={
},
bases=('admin.logentry',),
),
] |
from scrapfishin.schema import Recipe
_base = {
'source': 'Hello Fresh',
'prep_time': 10,
'difficulty': 'level 1',
'tags': [{'descriptor': 'spice mix'}],
}
tuscan_heat_spice = Recipe.parse_obj({
**_base,
'title': 'Tuscan Heat Spice',
'ingredient_amounts': [
{'ingredient': {'food': 'basil'}, 'amount': '4', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'rosemary'}, 'amount': '2', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'oregano'}, 'amount': '2', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'garlic powder'}, 'amount': '2', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'cayenne'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground fennel'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}}
]
})
blackening_spice = Recipe.parse_obj({
**_base,
'title': 'Blackening Spice',
'ingredient_amounts': [
{'ingredient': {'food': 'smoked paprika'}, 'amount': '3', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'paprika'}, 'amount': '1.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'onion powder'}, 'amount': '1.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'garlic powder'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'white pepper'}, 'amount': '0.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'black pepper'}, 'amount': '0.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'thyme'}, 'amount': '0.25', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'oregano'}, 'amount': '0.25', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'cayenne'}, 'amount': '0.125', 'measurement': {'unit': 'teaspoon'}}
]
})
smoky_cinammon_paprika_spice = Recipe.parse_obj({
**_base,
'title': 'Smoky Cinnamon Paprika Spice',
'ingredient_amounts': [
{'ingredient': {'food': 'ground cloves'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'onion powder'}, 'amount': '8', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground cinnamon'}, 'amount': '8', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'smoked paprika'}, 'amount': '6', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'mustard powder'}, 'amount': '16', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'sweet paprika'}, 'amount': '24', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'white granulated sugar'}, 'amount': '24', 'measurement': {'unit': 'teaspoon'}}
]
})
fall_harvest_spice_blend = Recipe.parse_obj({
**_base,
'title': 'Fall Harvest Spice Blend',
'ingredient_amounts': [
{'ingredient': {'food': 'thyme'}, 'amount': '3', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground sage'}, 'amount': '3', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'garlic powder'}, 'amount': '2', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'onion powder'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
]
})
southwest_spice_blend = Recipe.parse_obj({
**_base,
'title': 'Southwest Spice Blend',
'ingredient_amounts': [
{'ingredient': {'food': 'garlic powder'}, 'amount': '4', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'cumin'}, 'amount': '2', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'chili powder'}, 'amount': '2', 'measurement': {'unit': 'teaspoon'}}
]
})
tunisian_spice_blend = Recipe.parse_obj({
**_base,
'title': 'Tunisian Spice Blend',
'ingredient_amounts': [
{'ingredient': {'food': 'ground caraway seed'}, 'amount': '4', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground coriander'}, 'amount': '4', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'smoked paprika'}, 'amount': '4', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'turmeric'}, 'amount': '4', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'chili powder'}, 'amount': '4', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'garlic powder'}, 'amount': '4', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'cayenne'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground cinnamon'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'black pepper'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}}
]
})
steak_spice_blend = Recipe.parse_obj({
**_base,
'title': 'Steak Spice Blend',
'ingredient_amounts': [
{'ingredient': {'food': 'red chili flake'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground coriander seed'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground dill seed'}, 'amount': '2', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground mustard seed'}, 'amount': '3', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'garlic powder'}, 'amount': '4', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'black pepper'}, 'amount': '4', 'measurement': {'unit': 'teaspoon'}}
]
})
mexican_spice_blend = Recipe.parse_obj({
**_base,
'title': 'Mexican Spice Blend',
'ingredient_amounts': [
{'ingredient': {'food': 'chili powder'}, 'amount': '6', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'cumin'}, 'amount': '3', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'salt'}, 'amount': '1.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground black pepper'}, 'amount': '1.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'paprika'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'red pepper flakes'}, 'amount': '0.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'oregano'}, 'amount': '0.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'garlic powder'}, 'amount': '0.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'onion powder'}, 'amount': '0.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground cayenne pepper'}, 'amount': '0.25', 'measurement': {'unit': 'teaspoon'}}
]
})
zaatar_spice_blend = Recipe.parse_obj({
**_base,
'title': 'Za\'atar Spice Blend',
'ingredient_amounts': [
{'ingredient': {'food': 'toasted sesame seeds'}, 'amount': '3', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'salt'}, 'amount': '0.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground'}, 'amount': 'cumin 0.5', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'dried'}, 'amount': 'thyme 3', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'oregano'}, 'amount': '3', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'marjoram'}, 'amount': '3', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'sumac'}, 'amount': '9', 'measurement': {'unit': 'teaspoon'}}
]
})
mediterranean_spice_blend = Recipe.parse_obj({
**_base,
'title': 'Mediterranean Spice Blend',
'ingredient_amounts': [
{'ingredient': {'food': 'dried oregano'}, 'amount': '2', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'dried mint'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'sumac'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}},
{'ingredient': {'food': 'ground coriander'}, 'amount': '1', 'measurement': {'unit': 'teaspoon'}}
]
})
# TODO: further reading:
# https://www.reddit.com/r/hellofresh/comments/bawnby/hello_fresh_diy_spice_blends/
#
# SELECT i.ingredient
# FROM ingredient AS i
# WHERE i.ingredient LIKE '%spice%'
#
# ranch spice
# fajita spice blend
# enchilada spice blend
# taco spice blend
# cajun spice blend
spices = [
tuscan_heat_spice,
blackening_spice,
smoky_cinammon_paprika_spice,
fall_harvest_spice_blend,
southwest_spice_blend,
tunisian_spice_blend,
steak_spice_blend,
mexican_spice_blend,
zaatar_spice_blend,
mediterranean_spice_blend
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
import json
import logging
import operator
import pickle
import smtplib
import sys
import time
from collections import defaultdict
from datetime import date, datetime
from email.message import EmailMessage
from io import BytesIO
from pathlib import Path
from typing import (Any, Dict, Iterable, List, MutableMapping, Optional, Set,
Tuple, Union)
from uuid import uuid4
from zipfile import ZipFile
import dns.rdatatype
import dns.resolver
from har2tree import CrawledTree, Har2TreeError, HarFile, HostNode, URLNode
from PIL import Image # type: ignore
from pymisp import MISPAttribute, MISPEvent, MISPObject
from pymisp.tools import FileObject, URLObject
from redis import ConnectionPool, Redis
from redis.connection import UnixDomainSocketConnection
from werkzeug.useragents import UserAgent
from .capturecache import CaptureCache
from .context import Context
from .exceptions import (LookylooException, MissingCaptureDirectory,
MissingUUID, NoValidHarFile)
from .helpers import (CaptureStatus, get_captures_dir, get_config,
get_email_template, get_homedir, get_resources_hashes,
get_socket_path, get_splash_url, get_taxonomies,
load_pickle_tree, remove_pickle_tree, try_make_file,
uniq_domains)
from .indexing import Indexing
from .modules import (MISP, PhishingInitiative, SaneJavaScript, UniversalWhois,
UrlScan, VirusTotal, Phishtank)
class Lookyloo():
def __init__(self) -> None:
self.logger = logging.getLogger(f'{self.__class__.__name__}')
self.logger.setLevel(get_config('generic', 'loglevel'))
self.indexing = Indexing()
self.is_public_instance = get_config('generic', 'public_instance')
self.public_domain = get_config('generic', 'public_domain')
self.taxonomies = get_taxonomies()
self.redis_pool: ConnectionPool = ConnectionPool(connection_class=UnixDomainSocketConnection,
path=get_socket_path('cache'), decode_responses=True)
self.capture_dir: Path = get_captures_dir()
self.splash_url: str = get_splash_url()
self._priority = get_config('generic', 'priority')
# Initialize 3rd party components
self.pi = PhishingInitiative(get_config('modules', 'PhishingInitiative'))
if not self.pi.available:
self.logger.warning('Unable to setup the PhishingInitiative module')
self.vt = VirusTotal(get_config('modules', 'VirusTotal'))
if not self.vt.available:
self.logger.warning('Unable to setup the VirusTotal module')
self.sanejs = SaneJavaScript(get_config('modules', 'SaneJS'))
if not self.sanejs.available:
self.logger.warning('Unable to setup the SaneJS module')
self.misp = MISP(get_config('modules', 'MISP'))
if not self.misp.available:
self.logger.warning('Unable to setup the MISP module')
self.uwhois = UniversalWhois(get_config('modules', 'UniversalWhois'))
if not self.uwhois.available:
self.logger.warning('Unable to setup the UniversalWhois module')
self.urlscan = UrlScan(get_config('modules', 'UrlScan'))
if not self.urlscan.available:
self.logger.warning('Unable to setup the UrlScan module')
self.phishtank = Phishtank(get_config('modules', 'Phishtank'))
if not self.phishtank.available:
self.logger.warning('Unable to setup the Phishtank module')
self.context = Context(self.sanejs)
self._captures_index: Dict[str, CaptureCache] = {}
@property
def redis(self):
return Redis(connection_pool=self.redis_pool)
def _get_capture_dir(self, capture_uuid: str, /) -> Path:
'''Use the cache to get a capture directory from a capture UUID'''
capture_dir: Optional[str]
to_return: Path
# Try to get from the in-class cache
if capture_uuid in self._captures_index:
to_return = self._captures_index[capture_uuid].capture_dir
if to_return.exists():
return to_return
self.redis.delete(str(to_return))
self._captures_index.pop(capture_uuid)
# Try to get from the recent captures cache in redis
capture_dir = self.redis.hget('lookup_dirs', capture_uuid)
if capture_dir:
to_return = Path(capture_dir)
if to_return.exists():
return to_return
# The capture was either removed or archived, cleaning up
self.redis.hdel('lookup_dirs', capture_uuid)
self.redis.delete(capture_dir)
# Try to get from the archived captures cache in redis
capture_dir = self.redis.hget('lookup_dirs_archived', capture_uuid)
if capture_dir:
to_return = Path(capture_dir)
if to_return.exists():
return to_return
self.redis.hdel('lookup_dirs_archived', capture_uuid)
# The capture was removed, remove the UUID
self.logger.warning(f'UUID ({capture_uuid}) linked to a missing directory ({capture_dir}).')
raise MissingCaptureDirectory(f'UUID ({capture_uuid}) linked to a missing directory ({capture_dir}).')
raise MissingUUID(f'Unable to find UUID {capture_uuid}.')
def _cache_capture(self, capture_uuid: str, /) -> CrawledTree:
'''Generate the pickle, set the cache, add capture in the indexes'''
def _ensure_meta(capture_dir: Path, tree: CrawledTree) -> None:
'''Make sure the meta file is present, it contains information about the User Agent used for the capture.'''
metafile = capture_dir / 'meta'
if metafile.exists():
return
ua = UserAgent(tree.root_hartree.user_agent)
to_dump = {}
if ua.platform:
to_dump['os'] = ua.platform
if ua.browser:
if ua.version:
to_dump['browser'] = f'{ua.browser} {ua.version}'
else:
to_dump['browser'] = ua.browser
if ua.language:
to_dump['language'] = ua.language
if not to_dump:
# UA not recognized
self.logger.info(f'Unable to recognize the User agent: {ua}')
to_dump['user_agent'] = ua.string
with metafile.open('w') as f:
json.dump(to_dump, f)
capture_dir = self._get_capture_dir(capture_uuid)
har_files = sorted(capture_dir.glob('*.har'))
lock_file = capture_dir / 'lock'
pickle_file = capture_dir / 'tree.pickle'
if try_make_file(lock_file):
# Lock created, we can process
with lock_file.open('w') as f:
f.write(datetime.now().isoformat())
else:
# The pickle is being created somewhere else, wait until it's done.
while lock_file.exists():
time.sleep(5)
keep_going = 5
while (ct := load_pickle_tree(capture_dir)) is None:
keep_going -= 1
if not keep_going:
raise LookylooException(f'Unable to get tree for {capture_uuid}')
time.sleep(5)
return ct
# NOTE: We only index the public captures
index = True
try:
ct = CrawledTree(har_files, capture_uuid)
_ensure_meta(capture_dir, ct)
self._resolve_dns(ct)
self.context.contextualize_tree(ct)
cache = self.capture_cache(capture_uuid)
if not cache:
raise LookylooException(f'Broken cache for {capture_dir}')
if self.is_public_instance:
if cache.no_index:
index = False
if index:
self.indexing.index_cookies_capture(ct)
self.indexing.index_body_hashes_capture(ct)
self.indexing.index_url_capture(ct)
categories = list(self.categories_capture(capture_uuid).keys())
self.indexing.index_categories_capture(capture_uuid, categories)
except Har2TreeError as e:
raise NoValidHarFile(e)
except RecursionError as e:
raise NoValidHarFile(f'Tree too deep, probably a recursive refresh: {e}.\n Append /export to the URL to get the files.')
else:
with pickle_file.open('wb') as _p:
# Some pickles require a pretty high recursion limit, this kindof fixes it.
# If the capture is really broken (generally a refresh to self), the capture
# is discarded in the RecursionError above.
default_recursion_limit = sys.getrecursionlimit()
sys.setrecursionlimit(int(default_recursion_limit * 1.1))
try:
pickle.dump(ct, _p)
except RecursionError as e:
raise NoValidHarFile(f'Tree too deep, probably a recursive refresh: {e}.\n Append /export to the URL to get the files.')
sys.setrecursionlimit(default_recursion_limit)
finally:
lock_file.unlink(missing_ok=True)
return ct
def _set_capture_cache(self, capture_dir: Path):
'''Populate the redis cache for a capture. Mostly used on the index page.
NOTE: Doesn't require the pickle.'''
with (capture_dir / 'uuid').open() as f:
uuid = f.read().strip()
cache: Dict[str, Union[str, int]] = {'uuid': uuid, 'capture_dir': str(capture_dir)}
if (capture_dir / 'error.txt').exists():
# Something went wrong
with (capture_dir / 'error.txt').open() as _error:
content = _error.read()
try:
error_to_cache = json.loads(content)
if isinstance(error_to_cache, dict) and error_to_cache.get('details'):
error_to_cache = error_to_cache.get('details')
except json.decoder.JSONDecodeError:
# old format
error_to_cache = content
cache['error'] = f'The capture {capture_dir.name} has an error: {error_to_cache}'
if (har_files := sorted(capture_dir.glob('*.har'))):
try:
har = HarFile(har_files[0], uuid)
cache['title'] = har.initial_title
cache['timestamp'] = har.initial_start_time
cache['url'] = har.root_url
if har.initial_redirects and har.need_tree_redirects:
# try to load tree from disk, get redirects
if (ct := load_pickle_tree(capture_dir)):
cache['redirects'] = json.dumps(ct.redirects)
cache['incomplete_redirects'] = 0
else:
# Pickle not available
cache['redirects'] = json.dumps(har.initial_redirects)
cache['incomplete_redirects'] = 1
else:
cache['redirects'] = json.dumps(har.initial_redirects)
cache['incomplete_redirects'] = 0
except Har2TreeError as e:
cache['error'] = str(e)
else:
cache['error'] = f'No har files in {capture_dir.name}'
if (cache.get('error')
and isinstance(cache['error'], str)
and 'HTTP Error' not in cache['error']):
self.logger.warning(cache['error'])
if (capture_dir / 'categories').exists():
with (capture_dir / 'categories').open() as _categories:
cache['categories'] = json.dumps([c.strip() for c in _categories.readlines()])
if (capture_dir / 'no_index').exists():
# If the folders claims anonymity
cache['no_index'] = 1
if (capture_dir / 'parent').exists():
# The capture was initiated from an other one
with (capture_dir / 'parent').open() as f:
cache['parent'] = f.read().strip()
p = self.redis.pipeline()
p.hset('lookup_dirs', uuid, str(capture_dir))
p.hmset(str(capture_dir), cache)
p.execute()
self._captures_index[uuid] = CaptureCache(cache)
def _resolve_dns(self, ct: CrawledTree):
'''Resolves all domains of the tree, keeps A (IPv4), AAAA (IPv6), and CNAME entries
and store them in ips.json and cnames.json, in the capture directory.
Updates the nodes of the tree accordingly so the information is available.
'''
def _build_cname_chain(known_cnames: Dict[str, Optional[str]], hostname) -> List[str]:
'''Returns a list of CNAMEs starting from one hostname.
The CNAMEs resolutions are made in `_resolve_dns`. A hostname can have a CNAME entry
and the CNAME entry can have an other CNAME entry, and so on multiple times.
This method loops over the hostnames until there are no CNAMES.'''
cnames: List[str] = []
to_search = hostname
while True:
if known_cnames.get(to_search) is None:
break
# At this point, known_cnames[to_search] must exist and be a str
cnames.append(known_cnames[to_search]) # type: ignore
to_search = known_cnames[to_search]
return cnames
cnames_path = ct.root_hartree.har.path.parent / 'cnames.json'
ips_path = ct.root_hartree.har.path.parent / 'ips.json'
host_cnames: Dict[str, Optional[str]] = {}
if cnames_path.exists():
with cnames_path.open() as f:
host_cnames = json.load(f)
host_ips: Dict[str, List[str]] = {}
if ips_path.exists():
with ips_path.open() as f:
host_ips = json.load(f)
for node in ct.root_hartree.hostname_tree.traverse():
if node.name not in host_cnames or node.name not in host_ips:
# Resolve and cache
try:
response = dns.resolver.resolve(node.name, search=True)
for answer in response.response.answer:
if answer.rdtype == dns.rdatatype.RdataType.CNAME:
host_cnames[str(answer.name).rstrip('.')] = str(answer[0].target).rstrip('.')
else:
host_cnames[str(answer.name).rstrip('.')] = None
if answer.rdtype in [dns.rdatatype.RdataType.A, dns.rdatatype.RdataType.AAAA]:
host_ips[str(answer.name).rstrip('.')] = list(set(str(b) for b in answer))
except Exception:
host_cnames[node.name] = None
host_ips[node.name] = []
cnames = _build_cname_chain(host_cnames, node.name)
if cnames:
node.add_feature('cname', cnames)
if cnames[-1] in host_ips:
node.add_feature('resolved_ips', host_ips[cnames[-1]])
elif node.name in host_ips:
node.add_feature('resolved_ips', host_ips[node.name])
with cnames_path.open('w') as f:
json.dump(host_cnames, f)
with ips_path.open('w') as f:
json.dump(host_ips, f)
return ct
def add_context(self, capture_uuid: str, /, urlnode_uuid: str, *, ressource_hash: str,
legitimate: bool, malicious: bool, details: Dict[str, Dict[str, str]]):
'''Adds context information to a capture or a URL node'''
if malicious:
self.context.add_malicious(ressource_hash, details['malicious'])
if legitimate:
self.context.add_legitimate(ressource_hash, details['legitimate'])
def add_to_legitimate(self, capture_uuid: str, /, hostnode_uuid: Optional[str]=None, urlnode_uuid: Optional[str]=None):
'''Mark a full captyre as legitimate.
Iterates over all the nodes and mark them all as legitimate too.'''
ct = self.get_crawled_tree(capture_uuid)
self.context.mark_as_legitimate(ct, hostnode_uuid, urlnode_uuid)
def remove_pickle(self, capture_uuid: str, /) -> None:
'''Remove the pickle from a specific capture.'''
capture_dir = self._get_capture_dir(capture_uuid)
remove_pickle_tree(capture_dir)
def rebuild_cache(self) -> None:
'''Flush and rebuild the redis cache. Doesn't remove the pickles.
The cached captures will be rebuild when loading the index.'''
self.redis.flushdb()
def rebuild_all(self) -> None:
'''Flush and rebuild the redis cache, and delete all the pickles.
The captures will be rebuilt by the background indexer'''
[remove_pickle_tree(capture_dir) for capture_dir in self.capture_dir.iterdir() if capture_dir.is_dir()] # type: ignore
self.rebuild_cache()
def get_urlnode_from_tree(self, capture_uuid: str, /, node_uuid: str) -> URLNode:
'''Get a URL node from a tree, by UUID'''
ct = self.get_crawled_tree(capture_uuid)
return ct.root_hartree.get_url_node_by_uuid(node_uuid)
def get_hostnode_from_tree(self, capture_uuid: str, /, node_uuid: str) -> HostNode:
'''Get a host node from a tree, by UUID'''
ct = self.get_crawled_tree(capture_uuid)
return ct.root_hartree.get_host_node_by_uuid(node_uuid)
def get_statistics(self, capture_uuid: str, /) -> Dict[str, Any]:
'''Get the statistics of a capture.'''
ct = self.get_crawled_tree(capture_uuid)
return ct.root_hartree.stats
def get_info(self, capture_uuid: str, /) -> Dict[str, Any]:
'''Get basic information about the capture.'''
ct = self.get_crawled_tree(capture_uuid)
to_return = {'url': ct.root_url, 'title': ct.root_hartree.har.initial_title,
'capture_time': ct.start_time.isoformat(), 'user_agent': ct.user_agent,
'referer': ct.referer}
return to_return
def get_meta(self, capture_uuid: str, /) -> Dict[str, str]:
'''Get the meta informations from a capture (mostly, details about the User Agent used.)'''
capture_dir = self._get_capture_dir(capture_uuid)
meta = {}
if (capture_dir / 'meta').exists():
with open((capture_dir / 'meta'), 'r') as f:
meta = json.load(f)
return meta
def categories_capture(self, capture_uuid: str, /) -> Dict[str, Any]:
'''Get all the categories related to a capture, in MISP Taxonomies format'''
capture_dir = self._get_capture_dir(capture_uuid)
# get existing categories if possible
if (capture_dir / 'categories').exists():
with (capture_dir / 'categories').open() as f:
current_categories = [line.strip() for line in f.readlines()]
return {e: self.taxonomies.revert_machinetag(e) for e in current_categories}
return {}
def categorize_capture(self, capture_uuid: str, /, category: str) -> None:
'''Add a category (MISP Taxonomy tag) to a capture.'''
if not get_config('generic', 'enable_categorization'):
return
# Make sure the category is mappable to a taxonomy.
self.taxonomies.revert_machinetag(category)
capture_dir = self._get_capture_dir(capture_uuid)
# get existing categories if possible
if (capture_dir / 'categories').exists():
with (capture_dir / 'categories').open() as f:
current_categories = set(line.strip() for line in f.readlines())
else:
current_categories = set()
current_categories.add(category)
with (capture_dir / 'categories').open('w') as f:
f.writelines(f'{t}\n' for t in current_categories)
def uncategorize_capture(self, capture_uuid: str, /, category: str) -> None:
'''Remove a category (MISP Taxonomy tag) from a capture.'''
if not get_config('generic', 'enable_categorization'):
return
capture_dir = self._get_capture_dir(capture_uuid)
# get existing categories if possible
if (capture_dir / 'categories').exists():
with (capture_dir / 'categories').open() as f:
current_categories = set(line.strip() for line in f.readlines())
else:
current_categories = set()
current_categories.remove(category)
with (capture_dir / 'categories').open('w') as f:
f.writelines(f'{t}\n' for t in current_categories)
def trigger_modules(self, capture_uuid: str, /, force: bool=False, auto_trigger: bool=False) -> Dict:
'''Launch the 3rd party modules on a capture.
It uses the cached result *if* the module was triggered the same day.
The `force` flag re-triggers the module regardless of the cache.'''
try:
ct = self.get_crawled_tree(capture_uuid)
except LookylooException:
self.logger.warning(f'Unable to trigger the modules unless the tree ({capture_uuid}) is cached.')
return {'error': f'UUID {capture_uuid} is either unknown or the tree is not ready yet.'}
self.uwhois.capture_default_trigger(ct, force=force, auto_trigger=auto_trigger)
to_return: Dict[str, Dict] = {'PhishingInitiative': {}, 'VirusTotal': {}, 'UrlScan': {}}
capture_cache = self.capture_cache(capture_uuid)
to_return['PhishingInitiative'] = self.pi.capture_default_trigger(ct, force=force, auto_trigger=auto_trigger)
to_return['VirusTotal'] = self.vt.capture_default_trigger(ct, force=force, auto_trigger=auto_trigger)
to_return['UrlScan'] = self.urlscan.capture_default_trigger(
self.get_info(capture_uuid),
visibility='unlisted' if (capture_cache and capture_cache.no_index) else 'public',
force=force, auto_trigger=auto_trigger)
to_return['Phishtank'] = self.phishtank.capture_default_trigger(ct, auto_trigger=auto_trigger)
return to_return
def get_modules_responses(self, capture_uuid: str, /) -> Optional[Dict[str, Any]]:
'''Get the responses of the modules from the cached responses on the disk'''
try:
ct = self.get_crawled_tree(capture_uuid)
except LookylooException:
self.logger.warning(f'Unable to get the modules responses unless the tree ({capture_uuid}) is cached.')
return None
to_return: Dict[str, Any] = {}
if self.vt.available:
to_return['vt'] = {}
if ct.redirects:
for redirect in ct.redirects:
to_return['vt'][redirect] = self.vt.get_url_lookup(redirect)
else:
to_return['vt'][ct.root_hartree.har.root_url] = self.vt.get_url_lookup(ct.root_hartree.har.root_url)
if self.pi.available:
to_return['pi'] = {}
if ct.redirects:
for redirect in ct.redirects:
to_return['pi'][redirect] = self.pi.get_url_lookup(redirect)
else:
to_return['pi'][ct.root_hartree.har.root_url] = self.pi.get_url_lookup(ct.root_hartree.har.root_url)
if self.phishtank.available:
to_return['phishtank'] = {}
if ct.redirects:
for redirect in ct.redirects:
to_return['phishtank'][redirect] = self.phishtank.get_url_lookup(redirect)
else:
to_return['phishtank'][ct.root_hartree.har.root_url] = self.phishtank.get_url_lookup(ct.root_hartree.har.root_url)
if self.urlscan.available:
info = self.get_info(capture_uuid)
to_return['urlscan'] = {'submission': {}, 'result': {}}
to_return['urlscan']['submission'] = self.urlscan.get_url_submission(info)
if to_return['urlscan']['submission'] and 'uuid' in to_return['urlscan']['submission']:
# The submission was done, try to get the results
result = self.urlscan.url_result(info)
if 'error' not in result:
to_return['urlscan']['result'] = result
return to_return
def hide_capture(self, capture_uuid: str, /) -> None:
"""Add the capture in the hidden pool (not shown on the front page)
NOTE: it won't remove the correlations until they are rebuilt.
"""
capture_dir = self._get_capture_dir(capture_uuid)
self.redis.hset(str(capture_dir), 'no_index', 1)
(capture_dir / 'no_index').touch()
if capture_uuid in self._captures_index:
self._captures_index[capture_uuid].no_index = True
@property
def capture_uuids(self) -> List[str]:
'''All the capture UUIDs present in the cache.'''
return self.redis.hkeys('lookup_dirs')
def sorted_capture_cache(self, capture_uuids: Optional[Iterable[str]]=None) -> List[CaptureCache]:
'''Get all the captures in the cache, sorted by timestamp (new -> old).'''
if capture_uuids is None:
# Sort all captures
capture_uuids = self.capture_uuids
if not capture_uuids:
# No captures at all on the instance
return []
all_cache: List[CaptureCache] = [self._captures_index[uuid] for uuid in capture_uuids
if (uuid in self._captures_index
and not self._captures_index[uuid].incomplete_redirects)]
captures_to_get = set(capture_uuids) - set(self._captures_index.keys())
if captures_to_get:
p = self.redis.pipeline()
for directory in self.redis.hmget('lookup_dirs', *captures_to_get):
if not directory:
continue
p.hgetall(directory)
for uuid, c in zip(captures_to_get, p.execute()):
try:
if not c:
c = self.capture_cache(uuid)
if not c:
continue
else:
c = CaptureCache(c)
except LookylooException as e:
self.logger.warning(e)
continue
if hasattr(c, 'timestamp'):
all_cache.append(c)
self._captures_index[c.uuid] = c
all_cache.sort(key=operator.attrgetter('timestamp'), reverse=True)
return all_cache
def get_capture_status(self, capture_uuid: str, /) -> CaptureStatus:
if self.redis.zrank('to_capture', capture_uuid) is not None:
return CaptureStatus.QUEUED
elif self.redis.hexists('lookup_dirs', capture_uuid):
return CaptureStatus.DONE
elif self.redis.sismember('ongoing', capture_uuid):
return CaptureStatus.ONGOING
return CaptureStatus.UNKNOWN
def try_error_status(self, capture_uuid: str, /) -> Optional[str]:
return self.redis.get(f'error_{capture_uuid}')
def capture_cache(self, capture_uuid: str, /) -> Optional[CaptureCache]:
"""Get the cache from redis."""
if capture_uuid in self._captures_index and not self._captures_index[capture_uuid].incomplete_redirects:
return self._captures_index[capture_uuid]
try:
capture_dir = self._get_capture_dir(capture_uuid)
cached = self.redis.hgetall(str(capture_dir))
if not cached or cached.get('incomplete_redirects') == '1':
self._set_capture_cache(capture_dir)
else:
self._captures_index[capture_uuid] = CaptureCache(cached)
except MissingCaptureDirectory as e:
# The UUID is in the captures but the directory is not on the disk.
self.logger.warning(e)
return None
except MissingUUID:
if self.get_capture_status(capture_uuid) not in [CaptureStatus.QUEUED, CaptureStatus.ONGOING]:
self.logger.warning(f'Unable to find {capture_uuid} (not in the cache and/or missing capture directory).')
return None
except LookylooException as e:
self.logger.warning(e)
return None
except Exception as e:
self.logger.critical(e)
return None
else:
return self._captures_index[capture_uuid]
def get_crawled_tree(self, capture_uuid: str, /) -> CrawledTree:
'''Get the generated tree in ETE Toolkit format.
Loads the pickle if it exists, creates it otherwise.'''
capture_dir = self._get_capture_dir(capture_uuid)
ct = load_pickle_tree(capture_dir)
if not ct:
ct = self._cache_capture(capture_uuid)
return ct
def enqueue_capture(self, query: MutableMapping[str, Any], source: str, user: str, authenticated: bool) -> str:
'''Enqueue a query in the capture queue (used by the UI and the API for asynchronous processing)'''
def _get_priority(source: str, user: str, authenticated: bool) -> int:
src_prio: int = self._priority['sources'][source] if source in self._priority['sources'] else -1
if not authenticated:
usr_prio = self._priority['users']['_default_anon']
# reduce priority for anonymous users making lots of captures
queue_size = self.redis.zscore('queues', f'{source}|{authenticated}|{user}')
if queue_size is None:
queue_size = 0
usr_prio -= int(queue_size / 10)
else:
usr_prio = self._priority['users'][user] if self._priority['users'].get(user) else self._priority['users']['_default_auth']
return src_prio + usr_prio
priority = _get_priority(source, user, authenticated)
perma_uuid = str(uuid4())
p = self.redis.pipeline()
for key, value in query.items():
if isinstance(value, bool):
query[key] = 1 if value else 0
if isinstance(value, list):
query[key] = json.dumps(value)
if priority < -10:
# Someone is probably abusing the system with useless URLs, remove them from the index
query['listing'] = 0
p.hmset(perma_uuid, query)
p.zadd('to_capture', {perma_uuid: priority})
p.zincrby('queues', 1, f'{source}|{authenticated}|{user}')
p.set(f'{perma_uuid}_mgmt', f'{source}|{authenticated}|{user}')
p.execute()
return perma_uuid
def send_mail(self, capture_uuid: str, /, email: str='', comment: str='') -> None:
'''Send an email notification regarding a specific capture'''
if not get_config('generic', 'enable_mail_notification'):
return
redirects = ''
initial_url = ''
cache = self.capture_cache(capture_uuid)
if cache:
initial_url = cache.url
if cache.redirects:
redirects = "Redirects:\n"
redirects += '\n'.join(cache.redirects)
else:
redirects = "No redirects."
email_config = get_config('generic', 'email')
msg = EmailMessage()
msg['From'] = email_config['from']
if email:
msg['Reply-To'] = email
msg['To'] = email_config['to']
msg['Subject'] = email_config['subject']
body = get_email_template()
body = body.format(
recipient=msg['To'].addresses[0].display_name,
domain=self.public_domain,
uuid=capture_uuid,
initial_url=initial_url,
redirects=redirects,
comment=comment,
sender=msg['From'].addresses[0].display_name,
)
msg.set_content(body)
try:
s = smtplib.SMTP(email_config['smtp_host'], email_config['smtp_port'])
s.send_message(msg)
s.quit()
except Exception as e:
self.logger.exception(e)
self.logger.warning(msg.as_string())
def _get_raw(self, capture_uuid: str, /, extension: str='*', all_files: bool=True) -> BytesIO:
'''Get file(s) from the capture directory'''
try:
capture_dir = self._get_capture_dir(capture_uuid)
except MissingUUID:
return BytesIO(f'Capture {capture_uuid} not unavailable, try again later.'.encode())
except MissingCaptureDirectory:
return BytesIO(f'No capture {capture_uuid} on the system (directory missing).'.encode())
all_paths = sorted(list(capture_dir.glob(f'*.{extension}')))
if not all_files:
# Only get the first one in the list
with open(all_paths[0], 'rb') as f:
return BytesIO(f.read())
to_return = BytesIO()
# Add uuid file to the export, allows to keep the same UUID across platforms.
all_paths.append(capture_dir / 'uuid')
with ZipFile(to_return, 'w') as myzip:
for path in all_paths:
if path.name.endswith('pickle'):
continue
myzip.write(path, arcname=f'{capture_dir.name}/{path.name}')
to_return.seek(0)
return to_return
def get_html(self, capture_uuid: str, /, all_html: bool=False) -> BytesIO:
'''Get rendered HTML'''
return self._get_raw(capture_uuid, 'html', all_html)
def get_cookies(self, capture_uuid: str, /, all_cookies: bool=False) -> BytesIO:
'''Get the cookie(s)'''
return self._get_raw(capture_uuid, 'cookies.json', all_cookies)
def get_screenshot(self, capture_uuid: str, /) -> BytesIO:
'''Get the screenshot(s) of the rendered page'''
return self._get_raw(capture_uuid, 'png', all_files=False)
def get_screenshot_thumbnail(self, capture_uuid: str, /, for_datauri: bool=False, width: int=64) -> Union[str, BytesIO]:
'''Get the thumbnail of the rendered page. Always crop to a square.'''
to_return = BytesIO()
size = width, width
try:
s = self.get_screenshot(capture_uuid)
orig_screenshot = Image.open(s)
to_thumbnail = orig_screenshot.crop((0, 0, orig_screenshot.width, orig_screenshot.width))
except Image.DecompressionBombError as e:
# The image is most probably too big: https://pillow.readthedocs.io/en/stable/reference/Image.html
self.logger.warning(f'Unable to generate the screenshot thumbnail of {capture_uuid}: image too big ({e}).')
error_img: Path = get_homedir() / 'website' / 'web' / 'static' / 'error_screenshot.png'
to_thumbnail = Image.open(error_img)
to_thumbnail.thumbnail(size)
to_thumbnail.save(to_return, 'png')
to_return.seek(0)
if for_datauri:
return base64.b64encode(to_return.getvalue()).decode()
else:
return to_return
def get_capture(self, capture_uuid: str, /) -> BytesIO:
'''Get all the files related to this capture.'''
return self._get_raw(capture_uuid)
def get_urls_rendered_page(self, capture_uuid: str, /):
ct = self.get_crawled_tree(capture_uuid)
return sorted(set(ct.root_hartree.rendered_node.urls_in_rendered_page)
- set(ct.root_hartree.all_url_requests.keys()))
def get_body_hash_investigator(self, body_hash: str, /) -> Tuple[List[Tuple[str, str]], List[Tuple[str, float]]]:
'''Returns all the captures related to a hash (sha512), used in the web interface.'''
total_captures, details = self.indexing.get_body_hash_captures(body_hash, limit=-1)
cached_captures = self.sorted_capture_cache([d[0] for d in details])
captures = [(cache.uuid, cache.title) for cache in cached_captures]
domains = self.indexing.get_body_hash_domains(body_hash)
return captures, domains
def get_body_hash_full(self, body_hash: str, /) -> Tuple[Dict[str, List[Dict[str, str]]], BytesIO]:
'''Returns a lot of information about the hash (sha512) and the hits in the instance.
Also contains the data (base64 encoded)'''
details = self.indexing.get_body_hash_urls(body_hash)
body_content = BytesIO()
# get the body from the first entry in the details list
for _, entries in details.items():
ct = self.get_crawled_tree(entries[0]['capture'])
urlnode = ct.root_hartree.get_url_node_by_uuid(entries[0]['urlnode'])
if urlnode.body_hash == body_hash:
# the hash we're looking for is the whole file
body_content = urlnode.body
else:
# The hash is an embedded resource
for _, blobs in urlnode.body_hash.embedded_ressources.items():
for h, b in blobs:
if h == body_hash:
body_content = b
break
break
return details, body_content
def get_latest_url_capture(self, url: str, /) -> Optional[CaptureCache]:
'''Get the most recent capture with this URL'''
captures = self.sorted_capture_cache(self.indexing.get_captures_url(url))
if captures:
return captures[0]
return None
def get_url_occurrences(self, url: str, /, limit: int=20) -> List[Dict]:
'''Get the most recent captures and URL nodes where the URL has been seen.'''
captures = self.sorted_capture_cache(self.indexing.get_captures_url(url))
to_return: List[Dict] = []
for capture in captures[:limit]:
ct = self.get_crawled_tree(capture.uuid)
to_append: Dict[str, Union[str, Dict]] = {'capture_uuid': capture.uuid,
'start_timestamp': capture.timestamp.isoformat(),
'title': capture.title}
urlnodes: Dict[str, Dict[str, str]] = {}
for urlnode in ct.root_hartree.url_tree.search_nodes(name=url):
urlnodes[urlnode.uuid] = {'start_time': urlnode.start_time.isoformat(),
'hostnode_uuid': urlnode.hostnode_uuid}
if hasattr(urlnode, 'body_hash'):
urlnodes[urlnode.uuid]['hash'] = urlnode.body_hash
to_append['urlnodes'] = urlnodes
to_return.append(to_append)
return to_return
def get_hostname_occurrences(self, hostname: str, /, with_urls_occurrences: bool=False, limit: int=20) -> List[Dict]:
'''Get the most recent captures and URL nodes where the hostname has been seen.'''
captures = self.sorted_capture_cache(self.indexing.get_captures_hostname(hostname))
to_return: List[Dict] = []
for capture in captures[:limit]:
ct = self.get_crawled_tree(capture.uuid)
to_append: Dict[str, Union[str, List, Dict]] = {'capture_uuid': capture.uuid,
'start_timestamp': capture.timestamp.isoformat(),
'title': capture.title}
hostnodes: List[str] = []
if with_urls_occurrences:
urlnodes: Dict[str, Dict[str, str]] = {}
for hostnode in ct.root_hartree.hostname_tree.search_nodes(name=hostname):
hostnodes.append(hostnode.uuid)
if with_urls_occurrences:
for urlnode in hostnode.urls:
urlnodes[urlnode.uuid] = {'start_time': urlnode.start_time.isoformat(),
'url': urlnode.name,
'hostnode_uuid': urlnode.hostnode_uuid}
if hasattr(urlnode, 'body_hash'):
urlnodes[urlnode.uuid]['hash'] = urlnode.body_hash
to_append['hostnodes'] = hostnodes
if with_urls_occurrences:
to_append['urlnodes'] = urlnodes
to_return.append(to_append)
return to_return
def get_cookie_name_investigator(self, cookie_name: str, /) -> Tuple[List[Tuple[str, str]], List[Tuple[str, float, List[Tuple[str, float]]]]]:
'''Returns all the captures related to a cookie name entry, used in the web interface.'''
cached_captures = self.sorted_capture_cache([entry[0] for entry in self.indexing.get_cookies_names_captures(cookie_name)])
captures = [(cache.uuid, cache.title) for cache in cached_captures]
domains = [(domain, freq, self.indexing.cookies_names_domains_values(cookie_name, domain))
for domain, freq in self.indexing.get_cookie_domains(cookie_name)]
return captures, domains
def hash_lookup(self, blob_hash: str, url: str, capture_uuid: str) -> Tuple[int, Dict[str, List[Tuple[str, str, str, str, str]]]]:
'''Search all the captures a specific hash was seen.
If a URL is given, it splits the results if the hash is seen on the same URL or an other one.
Capture UUID avoids duplicates on the same capture'''
captures_list: Dict[str, List[Tuple[str, str, str, str, str]]] = {'same_url': [], 'different_url': []}
total_captures, details = self.indexing.get_body_hash_captures(blob_hash, url, filter_capture_uuid=capture_uuid)
for h_capture_uuid, url_uuid, url_hostname, same_url in details:
cache = self.capture_cache(h_capture_uuid)
if cache:
if same_url:
captures_list['same_url'].append((h_capture_uuid, url_uuid, cache.title, cache.timestamp.isoformat(), url_hostname))
else:
captures_list['different_url'].append((h_capture_uuid, url_uuid, cache.title, cache.timestamp.isoformat(), url_hostname))
return total_captures, captures_list
def get_ressource(self, tree_uuid: str, /, urlnode_uuid: str, h: Optional[str]) -> Optional[Tuple[str, BytesIO, str]]:
'''Get a specific resource from a URL node. If a hash s also given, we want an embeded resource'''
try:
url = self.get_urlnode_from_tree(tree_uuid, urlnode_uuid)
except IndexError:
# unable to find the uuid, the cache is probably in a weird state.
return None
if url.empty_response:
return None
if not h or h == url.body_hash:
# we want the body
return url.filename if url.filename else 'file.bin', url.body, url.mimetype
# We want an embedded ressource
if h not in url.resources_hashes:
return None
for mimetype, blobs in url.embedded_ressources.items():
for ressource_h, blob in blobs:
if ressource_h == h:
return 'embedded_ressource.bin', blob, mimetype
return None
def __misp_add_ips_to_URLObject(self, obj: URLObject, hostname_tree: HostNode) -> None:
hosts = obj.get_attributes_by_relation('host')
if hosts:
hostnodes = hostname_tree.search_nodes(name=hosts[0].value)
if hostnodes and hasattr(hostnodes[0], 'resolved_ips'):
obj.add_attributes('ip', *hostnodes[0].resolved_ips)
def __misp_add_vt_to_URLObject(self, obj: MISPObject) -> Optional[MISPObject]:
urls = obj.get_attributes_by_relation('url')
url = urls[0]
self.vt.url_lookup(url.value)
report = self.vt.get_url_lookup(url.value)
if not report:
return None
vt_obj = MISPObject('virustotal-report', standalone=False)
vt_obj.add_attribute('first-submission', value=datetime.fromtimestamp(report['attributes']['first_submission_date']), disable_correlation=True)
vt_obj.add_attribute('last-submission', value=datetime.fromtimestamp(report['attributes']['last_submission_date']), disable_correlation=True)
vt_obj.add_attribute('permalink', value=f"https://www.virustotal.com/gui/url/{report['id']}/detection", disable_correlation=True)
obj.add_reference(vt_obj, 'analysed-with')
return vt_obj
def __misp_add_urlscan_to_event(self, capture_uuid: str, visibility: str) -> Optional[MISPAttribute]:
response = self.urlscan.url_submit(self.get_info(capture_uuid), visibility)
if 'result' in response:
attribute = MISPAttribute()
attribute.value = response['result']
attribute.type = 'link'
return attribute
return None
def misp_export(self, capture_uuid: str, /, with_parent: bool=False) -> Union[List[MISPEvent], Dict[str, str]]:
'''Export a capture in MISP format. You can POST the return of this method
directly to a MISP instance and it will create an event.'''
cache = self.capture_cache(capture_uuid)
if not cache:
return {'error': 'UUID missing in cache, try again later.'}
if cache.incomplete_redirects:
ct = self._cache_capture(capture_uuid)
cache = self.capture_cache(capture_uuid)
if not cache:
return {'error': 'UUID missing in cache, try again later.'}
else:
ct = self.get_crawled_tree(capture_uuid)
event = MISPEvent()
event.info = f'Lookyloo Capture ({cache.url})'
lookyloo_link: MISPAttribute = event.add_attribute('link', f'https://{self.public_domain}/tree/{capture_uuid}') # type: ignore
if not self.is_public_instance:
lookyloo_link.distribution = 0
initial_url = URLObject(cache.url)
initial_url.comment = 'Submitted URL'
self.__misp_add_ips_to_URLObject(initial_url, ct.root_hartree.hostname_tree)
redirects: List[URLObject] = []
for nb, url in enumerate(cache.redirects):
if url == cache.url:
continue
obj = URLObject(url)
obj.comment = f'Redirect {nb}'
self.__misp_add_ips_to_URLObject(obj, ct.root_hartree.hostname_tree)
redirects.append(obj)
if redirects:
redirects[-1].comment = f'Last redirect ({nb})'
if redirects:
prec_object = initial_url
for u_object in redirects:
prec_object.add_reference(u_object, 'redirects-to')
prec_object = u_object
initial_obj = event.add_object(initial_url)
initial_obj.add_reference(lookyloo_link, 'captured-by', 'Capture on lookyloo')
for u_object in redirects:
event.add_object(u_object)
final_redirect = event.objects[-1]
screenshot: MISPAttribute = event.add_attribute('attachment', 'screenshot_landing_page.png', data=self.get_screenshot(capture_uuid), disable_correlation=True) # type: ignore
try:
fo = FileObject(pseudofile=ct.root_hartree.rendered_node.body, filename=ct.root_hartree.rendered_node.filename)
fo.comment = 'Content received for the final redirect (before rendering)'
fo.add_reference(final_redirect, 'loaded-by', 'URL loading that content')
fo.add_reference(screenshot, 'rendered-as', 'Screenshot of the page')
event.add_object(fo)
except Har2TreeError:
pass
except AttributeError:
# No `body` in rendered node
pass
if self.vt.available:
for e_obj in event.objects:
if e_obj.name != 'url':
continue
vt_obj = self.__misp_add_vt_to_URLObject(e_obj)
if vt_obj:
event.add_object(vt_obj)
if self.urlscan.available:
urlscan_attribute = self.__misp_add_urlscan_to_event(
capture_uuid,
visibility='unlisted' if (cache and cache.no_index) else 'public')
if urlscan_attribute:
event.add_attribute(**urlscan_attribute)
if with_parent and cache.parent:
parent = self.misp_export(cache.parent, with_parent)
if isinstance(parent, dict):
# Something bad happened
return parent
event.extends_uuid = parent[-1].uuid
parent.append(event)
return parent
return [event]
def get_misp_occurrences(self, capture_uuid: str, /) -> Optional[Dict[str, Set[str]]]:
if not self.misp.available:
return None
try:
ct = self.get_crawled_tree(capture_uuid)
except LookylooException:
self.logger.warning(f'Unable to get the modules responses unless the tree ({capture_uuid}) is cached.')
return None
nodes_to_lookup = ct.root_hartree.rendered_node.get_ancestors() + [ct.root_hartree.rendered_node]
to_return: Dict[str, Set[str]] = defaultdict(set)
for node in nodes_to_lookup:
hits = self.misp.lookup(node, ct.root_hartree.get_host_node_by_uuid(node.hostnode_uuid))
for event_id, values in hits.items():
if not isinstance(values, set):
continue
to_return[event_id].update(values)
return to_return
def get_hashes(self, tree_uuid: str, /, hostnode_uuid: Optional[str]=None, urlnode_uuid: Optional[str]=None) -> Set[str]:
"""Return hashes of resources.
Only tree_uuid: All the hashes
tree_uuid and hostnode_uuid: hashes of all the resources in that hostnode (including embedded ressources)
tree_uuid, hostnode_uuid, and urlnode_uuid: hash of the URL node body, and embedded resources
"""
container: Union[CrawledTree, HostNode, URLNode]
if urlnode_uuid:
container = self.get_urlnode_from_tree(tree_uuid, urlnode_uuid)
elif hostnode_uuid:
container = self.get_hostnode_from_tree(tree_uuid, hostnode_uuid)
else:
container = self.get_crawled_tree(tree_uuid)
return get_resources_hashes(container)
def get_hostnames(self, tree_uuid: str, /, hostnode_uuid: Optional[str]=None, urlnode_uuid: Optional[str]=None) -> Set[str]:
"""Return all the unique hostnames:
* of a complete tree if no hostnode_uuid and urlnode_uuid are given
* of a HostNode if hostnode_uuid is given
* of a URLNode if urlnode_uuid is given
"""
if urlnode_uuid:
node = self.get_urlnode_from_tree(tree_uuid, urlnode_uuid)
return {node.hostname}
elif hostnode_uuid:
node = self.get_hostnode_from_tree(tree_uuid, hostnode_uuid)
return {node.name}
else:
ct = self.get_crawled_tree(tree_uuid)
return {node.name for node in ct.root_hartree.hostname_tree.traverse()}
def get_urls(self, tree_uuid: str, /, hostnode_uuid: Optional[str]=None, urlnode_uuid: Optional[str]=None) -> Set[str]:
"""Return all the unique URLs:
* of a complete tree if no hostnode_uuid and urlnode_uuid are given
* of a HostNode if hostnode_uuid is given
* of a URLNode if urlnode_uuid is given
"""
if urlnode_uuid:
node = self.get_urlnode_from_tree(tree_uuid, urlnode_uuid)
return {node.name}
elif hostnode_uuid:
node = self.get_hostnode_from_tree(tree_uuid, hostnode_uuid)
return {urlnode.name for urlnode in node.urls}
else:
ct = self.get_crawled_tree(tree_uuid)
return {node.name for node in ct.root_hartree.url_tree.traverse()}
def get_hostnode_investigator(self, capture_uuid: str, /, node_uuid: str) -> Tuple[HostNode, List[Dict[str, Any]]]:
'''Gather all the informations needed to display the Hostnode investigator popup.'''
def _normalize_known_content(h: str, /, known_content: Dict[str, Any], url: URLNode) -> Tuple[Optional[Union[str, List[Any]]], Optional[Tuple[bool, Any]]]:
''' There are a few different sources to figure out known vs. legitimate content,
this method normalize it for the web interface.'''
known: Optional[Union[str, List[Any]]] = None
legitimate: Optional[Tuple[bool, Any]] = None
if h not in known_content:
return known, legitimate
if known_content[h]['type'] in ['generic', 'sanejs']:
known = known_content[h]['details']
elif known_content[h]['type'] == 'legitimate_on_domain':
legit = False
if url.hostname in known_content[h]['details']:
legit = True
legitimate = (legit, known_content[h]['details'])
elif known_content[h]['type'] == 'malicious':
legitimate = (False, known_content[h]['details'])
return known, legitimate
ct = self.get_crawled_tree(capture_uuid)
hostnode = ct.root_hartree.get_host_node_by_uuid(node_uuid)
known_content = self.context.find_known_content(hostnode)
self.uwhois.query_whois_hostnode(hostnode)
urls: List[Dict[str, Any]] = []
for url in hostnode.urls:
# For the popup, we need:
# * https vs http
# * everything after the domain
# * the full URL
to_append: Dict[str, Any] = {
'encrypted': url.name.startswith('https'),
'url_path': url.name.split('/', 3)[-1],
'url_object': url,
}
if not url.empty_response:
# Index lookup
# %%% Full body %%%
freq = self.indexing.body_hash_fequency(url.body_hash)
to_append['body_hash_details'] = freq
if freq and 'hash_freq' in freq and freq['hash_freq'] and freq['hash_freq'] > 1:
to_append['body_hash_details']['other_captures'] = self.hash_lookup(url.body_hash, url.name, capture_uuid)
# %%% Embedded ressources %%%
if hasattr(url, 'embedded_ressources') and url.embedded_ressources:
to_append['embedded_ressources'] = {}
for mimetype, blobs in url.embedded_ressources.items():
for h, blob in blobs:
if h in to_append['embedded_ressources']:
# Skip duplicates
continue
freq_embedded = self.indexing.body_hash_fequency(h)
to_append['embedded_ressources'][h] = freq_embedded
to_append['embedded_ressources'][h]['body_size'] = blob.getbuffer().nbytes
to_append['embedded_ressources'][h]['type'] = mimetype
if freq_embedded['hash_freq'] > 1:
to_append['embedded_ressources'][h]['other_captures'] = self.hash_lookup(h, url.name, capture_uuid)
for h in to_append['embedded_ressources'].keys():
known, legitimate = _normalize_known_content(h, known_content, url)
if known:
to_append['embedded_ressources'][h]['known_content'] = known
elif legitimate:
to_append['embedded_ressources'][h]['legitimacy'] = legitimate
known, legitimate = _normalize_known_content(url.body_hash, known_content, url)
if known:
to_append['known_content'] = known
elif legitimate:
to_append['legitimacy'] = legitimate
# Optional: Cookies sent to server in request -> map to nodes who set the cookie in response
if hasattr(url, 'cookies_sent'):
to_display_sent: Dict[str, Set[Iterable[Optional[str]]]] = defaultdict(set)
for cookie, contexts in url.cookies_sent.items():
if not contexts:
# Locally created?
to_display_sent[cookie].add(('Unknown origin', ))
continue
for context in contexts:
to_display_sent[cookie].add((context['setter'].hostname, context['setter'].hostnode_uuid))
to_append['cookies_sent'] = to_display_sent
# Optional: Cookies received from server in response -> map to nodes who send the cookie in request
if hasattr(url, 'cookies_received'):
to_display_received: Dict[str, Dict[str, Set[Iterable[Optional[str]]]]] = {'3rd_party': defaultdict(set), 'sent': defaultdict(set), 'not_sent': defaultdict(set)}
for domain, c_received, is_3rd_party in url.cookies_received:
if c_received not in ct.root_hartree.cookies_sent:
# This cookie is never sent.
if is_3rd_party:
to_display_received['3rd_party'][c_received].add((domain, ))
else:
to_display_received['not_sent'][c_received].add((domain, ))
continue
for url_node in ct.root_hartree.cookies_sent[c_received]:
if is_3rd_party:
to_display_received['3rd_party'][c_received].add((url_node.hostname, url_node.hostnode_uuid))
else:
to_display_received['sent'][c_received].add((url_node.hostname, url_node.hostnode_uuid))
to_append['cookies_received'] = to_display_received
urls.append(to_append)
return hostnode, urls
def get_stats(self) -> Dict[str, List]:
'''Gather statistics about the lookyloo instance'''
today = date.today()
calendar_week = today.isocalendar()[1]
stats_dict = {'submissions': 0, 'submissions_with_redirects': 0, 'redirects': 0}
stats: Dict[int, Dict[int, Dict[str, Any]]] = {}
weeks_stats: Dict[int, Dict] = {}
for cache in self.sorted_capture_cache():
date_submission: datetime = cache.timestamp
if date_submission.year not in stats:
stats[date_submission.year] = {}
if date_submission.month not in stats[date_submission.year]:
stats[date_submission.year][date_submission.month] = defaultdict(dict, **stats_dict)
stats[date_submission.year][date_submission.month]['uniq_urls'] = set()
stats[date_submission.year][date_submission.month]['submissions'] += 1
stats[date_submission.year][date_submission.month]['uniq_urls'].add(cache.url)
if len(cache.redirects) > 0:
stats[date_submission.year][date_submission.month]['submissions_with_redirects'] += 1
stats[date_submission.year][date_submission.month]['redirects'] += len(cache.redirects)
stats[date_submission.year][date_submission.month]['uniq_urls'].update(cache.redirects)
if ((date_submission.year == today.year and calendar_week - 1 <= date_submission.isocalendar()[1] <= calendar_week)
or (calendar_week == 1 and date_submission.year == today.year - 1 and date_submission.isocalendar()[1] in [52, 53])):
if date_submission.isocalendar()[1] not in weeks_stats:
weeks_stats[date_submission.isocalendar()[1]] = defaultdict(dict, **stats_dict)
weeks_stats[date_submission.isocalendar()[1]]['uniq_urls'] = set()
weeks_stats[date_submission.isocalendar()[1]]['submissions'] += 1
weeks_stats[date_submission.isocalendar()[1]]['uniq_urls'].add(cache.url)
if len(cache.redirects) > 0:
weeks_stats[date_submission.isocalendar()[1]]['submissions_with_redirects'] += 1
weeks_stats[date_submission.isocalendar()[1]]['redirects'] += len(cache.redirects)
weeks_stats[date_submission.isocalendar()[1]]['uniq_urls'].update(cache.redirects)
statistics: Dict[str, List] = {'weeks': [], 'years': []}
for week_number in sorted(weeks_stats.keys()):
week_stat = weeks_stats[week_number]
urls = week_stat.pop('uniq_urls')
week_stat['week_number'] = week_number
week_stat['uniq_urls'] = len(urls)
week_stat['uniq_domains'] = len(uniq_domains(urls))
statistics['weeks'].append(week_stat)
for year in sorted(stats.keys()):
year_stats: Dict[str, Union[int, List]] = {'year': year, 'months': [], 'yearly_submissions': 0, 'yearly_redirects': 0}
for month in sorted(stats[year].keys()):
month_stats = stats[year][month]
urls = month_stats.pop('uniq_urls')
month_stats['month_number'] = month
month_stats['uniq_urls'] = len(urls)
month_stats['uniq_domains'] = len(uniq_domains(urls))
year_stats['months'].append(month_stats) # type: ignore
year_stats['yearly_submissions'] += month_stats['submissions']
year_stats['yearly_redirects'] += month_stats['redirects']
statistics['years'].append(year_stats)
return statistics
|
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
VERSION = (0, 0, 3)
__version__ = ".".join(map(str, VERSION))
def get_comment_model():
setting = getattr(settings, "TREE_COMMENTS_TREE_COMMENT_MODEL", "tree_comments.TreeComment")
try:
return django_apps.get_model(setting, require_ready=False)
except ValueError:
raise ImproperlyConfigured("TREE_COMMENTS_TREE_COMMENT_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"TREE_COMMENTS_TREE_COMMENT_MODEL refers to model '%s' that has not been installed" % setting
)
|
"""Util."""
import csv
import logging
import os
from decimal import ROUND_CEILING, ROUND_FLOOR, Decimal
from pathlib import Path
from typing import Any
logger = logging.getLogger(__name__)
def get_config_path() -> Path:
"""Get configuration file's path from the environment variable."""
config = os.getenv('TOM_CONFIG', '')
return Path(config)
def get_datadir() -> Path:
"""Get data directory path from the environment variable."""
config = os.getenv('TOM_DATA', '')
return Path(config)
def load_csv(path: Path) -> Any:
"""Load data from csv file."""
with open(path, newline='') as csvfile:
reader = csv.DictReader(csvfile)
items = list(reader)
return items
def round_up(amount: Decimal) -> Decimal:
"""Round number to upper with cent precision."""
return Decimal(amount.quantize(Decimal('.01'), rounding=ROUND_CEILING))
def round_down(amount: Decimal) -> Decimal:
"""Round number to smaller with cent precision."""
return Decimal(amount.quantize(Decimal('.01'), rounding=ROUND_FLOOR))
|
#Dennis Durant
import re
def print_starsystems(content:str) -> str:
pattern = re.compile("\"StarSystem\":\"(.+?)\"")
result = pattern.findall(content)
list = []
if result:
for r in result:
list.append(r)
return list
|
#!/usr/bin/env python3
import fnmatch
import os
from pathlib import Path
from time import perf_counter
import mistune
from jinja2 import Environment as Env
from jinja2 import FileSystemLoader, StrictUndefined
from minicli import cli, run, wrap
HERE = Path(".")
environment = Env(loader=FileSystemLoader(str(HERE / "src")), undefined=StrictUndefined)
markdown = mistune.create_markdown(escape=False)
def each_markdown_from(source_dir, file_name="*.md"):
"""Walk across the `source_dir` and return the md file paths."""
for filename in fnmatch.filter(os.listdir(source_dir), file_name):
yield os.path.join(source_dir, filename), filename
def build_responses(source_dir):
"""Extract and convert markdown from a `source_dir` directory into a dict."""
responses = {}
for file_path, filename in each_markdown_from(source_dir):
html_content = markdown.read(file_path)
# Remove empty comments set to hack markdown rendering
# when we do not want paragraphs.
html_content = html_content.replace("<!---->", "")
responses[filename[: -len(".md")]] = html_content
return responses
@cli
def index():
"""Build the index with contents from markdown dedicated folder."""
template = environment.get_template("template.html")
responses = build_responses(Path("") / "contenus")
content = template.render(**responses)
open(HERE / "src" / "index.html", "w").write(content)
@wrap
def perf_wrapper():
start = perf_counter()
yield
elapsed = perf_counter() - start
print(f"Done in {elapsed:.5f} seconds.")
if __name__ == "__main__":
run()
|
from imageai.Detection.Custom import DetectionModelTrainer
trainer = DetectionModelTrainer()
trainer.setModelTypeAsYOLOv3()
trainer.setDataDirectory(data_directory="Pothole")
trainer.setTrainConfig(object_names_array=["Pothole Severity Low","Pothole Severity High","Pothole Severity Medium"], batch_size=4, num_experiments=30, train_from_pretrained_model="pretrained-yolov3.h5") #download pre-trained model via https://github.com/OlafenwaMoses/ImageAI/releases/download/essential-v4/pretrained-yolov3.h5
trainer.trainModel()
|
import numpy as np
from knn import KNN
from metrics import binary_classification_metrics, multiclass_accuracy
def calc_accuracy_multiclass(train_X,train_y, test_X, test_y,num_folds,K):
knn_classifier = KNN(k=K)
knn_classifier.fit(train_X, train_y)
predict = knn_classifier.predict(test_X)
#rint('predicted ',predict)
#print('real value',test_y)
accuracy = multiclass_accuracy(predict, test_y)
print("Accuracy: %4.2f" % accuracy)
return accuracy
def culc_f1_score(train_folds_X,train_folds_y, val_X, val_y,num_folds,K):
binary_train_mask = (train_folds_y == 0) | (train_folds_y == 9)
binary_train_X = train_folds_X[binary_train_mask] #test
#print('binary_train_X (new data set)=', binary_train_X.shape) #expect new_size (~121), 32,32,3
binary_train_y_test = train_folds_y[binary_train_mask]
#print('binary_train_y_test shape =', binary_train_y_test.shape)
#print('binary_train_y_test[0-10, new lavel set]', binary_train_y_test[0:10]) #expect 0s and 9s
#print('binary_train_y_test[0] type', type(binary_train_y_test[0])) #expect numpy.uint8
binary_train_y = train_folds_y[binary_train_mask] == 0
#print('binary_train_y shape =', binary_train_y.shape) #expect 121,
#print('binary_train_y[0:10', binary_train_y[:10]) #extect Folse, True
binary_test_mask = (val_y == 0) | (val_y == 9)
binary_test_X = val_X[binary_test_mask]
#print('binary_test_X.shape =', binary_test_X.shape) #expect !16
binary_test_y = val_y[binary_test_mask] == 0
# Reshape to 1-dimensional array [num_samples, 32*32*3]
#print('binary_train_x shape befor =', binary_train_X.shape) #expect 161,32,32,3
binary_train_X = binary_train_X.reshape(binary_train_X.shape[0], -1)
#print('binary_train_X.shape[0]',binary_train_X.shape[0]) #expect 121
#print('binary_train_x shape after =', binary_train_X.shape) #expect 121,32*32*3 = 3072
binary_test_X = binary_test_X.reshape(binary_test_X.shape[0], -1)
#print('binary_test_x shape after =', binary_test_X.shape) #expect 16,32*32*3 = 3072
#print('------------classify ')
# Create the classifier and call fit to train the model
# KNN just remembers all the data
knn_classifier = KNN(k=K)
knn_classifier.fit(binary_train_X, binary_train_y)
#print('----------------calculate the dists, no loops ')
dists = knn_classifier.compute_distances_no_loops(binary_test_X)
#print(dists)
#print(dists.shape)
assert np.isclose(dists[0, 10], np.sum(np.abs(binary_test_X[0] - binary_train_X[10])))
#print('----------------calculate the time ')
# Lets look at the performance difference
#%timeit knn_classifier.compute_distances_two_loops(binary_test_X)
#%timeit knn_classifier.compute_distances_one_loop(binary_test_X)
#%timeit knn_classifier.compute_distances_no_loops(binary_test_X)
prediction = knn_classifier.predict(binary_test_X)
#print('real value=', binary_test_y)
#print('predicted ',prediction)
#print('----------------calculate metrics ')
precision, recall, f1, accuracy = binary_classification_metrics(prediction, binary_test_y)
#print("KNN with k = %s" % knn_classifier.k)
#print("Accuracy: %4.2f, Precision: %4.2f, Recall: %4.2f, F1: %4.2f" % (accuracy, precision, recall, f1))
return f1 |
import pytest
from hamcrest import *
from vinyldns_python import VinylDNSClient
def test_health(shared_zone_test_context):
"""
Tests that the health check endpoint works
"""
client = shared_zone_test_context.ok_vinyldns_client
client.health()
|
import py
import tox
from tox.reporter import error, info, verbosity0, verbosity2, warning
from tox.util.lock import hold_lock
from .builder import build_package
from .local import resolve_package
from .view import create_session_view
@tox.hookimpl
def tox_package(session, venv):
"""Build an sdist at first call return that for all calls"""
if not hasattr(session, "package"):
session.package, session.dist = get_package(session)
return session.package
def get_package(session):
""""Perform the package operation"""
config = session.config
if config.skipsdist:
info("skipping sdist step")
return None
lock_file = session.config.toxworkdir.join("{}.lock".format(session.config.isolated_build_env))
with hold_lock(lock_file, verbosity0):
package = acquire_package(config, session)
session_package = create_session_view(package, config.temp_dir)
return session_package, package
def acquire_package(config, session):
"""acquire a source distribution (either by loading a local file or triggering a build)"""
if not config.option.sdistonly and (config.sdistsrc or config.option.installpkg):
path = get_local_package(config)
else:
try:
path = build_package(config, session)
except tox.exception.InvocationError as exception:
error("FAIL could not package project - v = {!r}".format(exception))
return None
sdist_file = config.distshare.join(path.basename)
if sdist_file != path:
info("copying new sdistfile to {!r}".format(str(sdist_file)))
try:
sdist_file.dirpath().ensure(dir=1)
except py.error.Error:
warning("could not copy distfile to {}".format(sdist_file.dirpath()))
else:
path.copy(sdist_file)
return path
def get_local_package(config):
path = config.option.installpkg
if not path:
path = config.sdistsrc
py_path = py.path.local(resolve_package(path))
info("using package {!r}, skipping 'sdist' activity ".format(str(py_path)))
return py_path
@tox.hookimpl
def tox_cleanup(session):
for tox_env in session.venv_dict.values():
if hasattr(tox_env, "package") and isinstance(tox_env.package, py.path.local):
package = tox_env.package
if package.exists():
verbosity2("cleanup {}".format(package))
package.remove()
py.path.local(package.dirname).remove(ignore_errors=True)
|
#!/usr/bin/python3
"""
Class Primer contains all methods needed to get the primer the user needs.
"""
class Primer:
def __init__(self):
"""\
Init method gathers the primer datafile and uses it in do_primerdict
"""
self.primerfile = "./deps/primer.data"
self.primerdict = self.do_primerdict(self.primerfile)
@staticmethod
def do_primerdict(primerfile):
"""/
Method loads the data of the primer datafileself.
This file is structured as name:sequence:amplicon
The method reads the datafile and condenses the data to a dictionary
with the primer name as key and sequence and amplicon as values
"""
primerlist=[]
primerdict={}
with open(primerfile,'r') as primers:
for line in primers:
if not line.isspace():
primerlist.append(line.strip("\n").split(':'))
for primer in primerlist:
primerdict[primer[0]] = [primer[1],primer[2]]
return primerdict
def check_input(self, name, primer, fragment):
"""/
Method checks if primer input by user is in the right formatself.
It checks all characters in the primer for invalid characters
It checks if the name of the primer is already added before
It checks if the sequence has already been added before under a
different name
It checks the length of the primer (>6)
It checks if the provided amplicon is valid (ITS1 or ITS2)
If none are violated, the method returns True. It returns False
for the first violation.
"""
goodchars = ['A','C','G','T','R','Y','W','D','S','B','V','K','H','M']
goodfrags = ['its1','its2']
for char in primer:
if char not in goodchars:
return False, "Sequence invalid"
if name in self.primerdict:
return False, "Name already present"
present_primers = [x[0] for x in self.primerdict.values()]
if primer in present_primers:
present = list(self.primerdict)[present_primers.index(primer)]
return False, "Primer already present ({primer})".format(primer=present)
if len(primer) < 6:
return False, "Primer too short, please add a sequence of >6 bases"
if fragment.lower() not in goodfrags:
return False, "Fragment not recognized. Please use ITS1 or ITS2"
return True,"Primer added"
def add_primer(self, name, primer, fragment):
"""/
Method adds primers to the primer datafile. It gets a verdict
on validity from the check_input method. If check_input returned True
the primer is written to the primer datafile and to the dictionary
containing the datafile on runtime.
"""
proper, message = self.check_input(name, primer, fragment)
if proper:
with open(self.primerfile,'a') as writefile:
writefile.write("%s:%s:%s\n"%(name, primer,fragment.upper()))
self.primerdict[name]=primer
return proper, message
def get_fragment(self, name):
"""/
Method returns the amplicon of a primer based on the primer name.
If the primer name does not exist, it returns False
"""
try:
return self.primerdict[name][1]
except KeyError:
return False
def get_primer(self, name):
"""/
Method returns sequence of a primer based on primer name. If the name
does not exist, the method returns False.
"""
try:
return self.primerdict[name][0]
except KeyError:
return False
def get_revcomp(self, name):
"""/
Method returns reverse complement of a sequence based on the complement
dictionary containing all valid nucleotides and wildcards
"""
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'R':'Y','Y':'R',
'S':'S', 'W':'W', 'K':'M','B':'T','V':'B','D':'H','H':'D'}
reverse_complement = "".join(complement.get(base, base) for base in reversed(self.primerdict[name][0]))
return reverse_complement
|
#!/usr/bin/env python
import xml.etree.ElementTree
from conf import breathe_default_project, breathe_projects
doxyxml_dir = breathe_projects[breathe_default_project]
index_xml = '{}/index.xml'.format(doxyxml_dir)
tree = xml.etree.ElementTree.parse(index_xml).getroot()
with open('Structs.rst', 'w') as file:
file.write('===========\n')
file.write('Structures\n')
file.write('===========\n')
file.write('\n')
for node in tree.findall('compound'):
if node.attrib['kind'] == 'struct':
name = 'unknown'
members = []
for child in list(node):
if child.tag == 'name':
name = child.text
elif child.tag == 'member':
for member in list(child):
members.append(member.text)
file.write('.. doxygenstruct:: {}\n'.format(name))
file.write(' :project: {}\n'.format(breathe_default_project))
file.write(' :members: \n')
file.write('\n')
|
def merge_dict_list(dst, src):
stack = [(dst, src)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if isinstance(current_src[key], dict) and isinstance(current_dst[key], dict) :
stack.append((current_dst[key], current_src[key]))
elif isinstance(current_src[key], list) and isinstance(current_dst[key], list) :
current_dst[key] += current_src[key]
elif isinstance(current_src[key], tuple) and isinstance(current_dst[key], tuple) :
current_dst[key] += current_src[key]
else:
current_dst[key] = current_src[key]
return dst
if __name__ == '__main__':
dst = dict(a=1,b=2,c=dict(ca=31, cc=33, cd=dict(cca=1)), d=4, f=6, g=7, l=[1,2,3])
src = dict(b='u2',c=dict(cb='u32', cd=dict(cda=dict(cdaa='u3411', cdab='u3412'))), e='u5', h=dict(i='u4321'), l=[4,5,6])
r = merge_dict_list(dst, src)
assert r is dst
assert r['a'] == 1 and r['d'] == 4 and r['f'] == 6
assert r['b'] == 'u2' and r['e'] == 'u5'
assert dst['c'] is r['c']
assert dst['c']['cd'] is r['c']['cd']
assert r['c']['cd']['cda']['cdaa'] == 'u3411'
assert r['c']['cd']['cda']['cdab'] == 'u3412'
assert r['g'] == 7
assert src['h'] is r['h']
assert r['l'] == [1,2,3,4,5,6]
from pprint import pprint
pprint(r) |
from django.db import models
# Create your models here.
class TestPlan(models.Model):
id = models.CharField(max_length=16, verbose_name='Test Plan ID', primary_key=True)
name = models.CharField(null=True, max_length=100, verbose_name='Plan Name')
comment = models.CharField(null=True, max_length=200, verbose_name='comment')
tearDown = models.CharField(max_length=8, default='true', verbose_name='tearDown_on_shutdown')
serialize = models.CharField(max_length=8, default='true', verbose_name='serialize_threadgroups')
type = models.IntegerField(default=1, verbose_name='run type, 0-Thread, 1-TPS')
schedule = models.IntegerField(default=0, verbose_name='schedule type, 0-Regular, 1-Crontab')
server_num = models.IntegerField(default=1, verbose_name='pressure server number')
target_num = models.IntegerField(default=1, verbose_name='target num')
duration = models.IntegerField(default=600, verbose_name='duration (second)')
time_setting = models.CharField(null=True, max_length=8, verbose_name='time setting run')
is_valid = models.CharField(max_length=8, verbose_name='true, false')
variables = models.JSONField(null=True, verbose_name='variables')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='Create time')
update_time = models.DateTimeField(auto_now=True, verbose_name='Update time')
operator = models.CharField(max_length=50, verbose_name='operator')
objects = models.Manager()
class Meta:
db_table = 'jmeter_test_plan'
class ThreadGroup(models.Model):
id = models.CharField(max_length=16, verbose_name='thread group id', primary_key=True)
plan = models.ForeignKey(TestPlan, on_delete=models.PROTECT, verbose_name='test plan id')
name = models.CharField(null=True, max_length=100, verbose_name='thread group name')
is_valid = models.CharField(max_length=8, verbose_name='true, false')
num_threads = models.CharField(default='${num_threads}', max_length=20, verbose_name='num_threads')
ramp_time = models.IntegerField(null=True, verbose_name='ramp_time')
duration = models.IntegerField(default=10, verbose_name='duration (seconds)')
scheduler = models.CharField(default='true', max_length=8, verbose_name='scheduler')
cookie = models.JSONField(null=True, verbose_name='cookie')
file = models.JSONField(null=True, verbose_name='CSVDataSet')
comment = models.CharField(null=True, max_length=200, verbose_name='comment')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='Create time')
update_time = models.DateTimeField(auto_now=True, verbose_name='Update time')
operator = models.CharField(max_length=50, verbose_name='operator')
objects = models.Manager()
class Meta:
db_table = 'jmeter_thread_group'
class TransactionController(models.Model):
id = models.CharField(max_length=16, verbose_name='Controller id', primary_key=True)
thread_group = models.ForeignKey(ThreadGroup, on_delete=models.CASCADE, verbose_name='thread group id')
name = models.CharField(null=True, max_length=100, verbose_name='Controller name')
is_valid = models.CharField(max_length=8, verbose_name='true, false')
comment = models.CharField(null=True, max_length=200, verbose_name='comment')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='Create time')
update_time = models.DateTimeField(auto_now=True, verbose_name='Update time')
operator = models.CharField(max_length=50, verbose_name='operator')
objects = models.Manager()
class Meta:
db_table = 'jmeter_controller'
class HTTPRequestHeader(models.Model):
id = models.CharField(max_length=16, verbose_name='http header id', primary_key=True)
name = models.CharField(null=True, max_length=100, verbose_name='name')
method = models.CharField(max_length=8, verbose_name='method')
value = models.JSONField(null=True, verbose_name='value')
comment = models.CharField(null=True, max_length=200, verbose_name='comment')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='Create time')
update_time = models.DateTimeField(auto_now=True, verbose_name='Update time')
operator = models.CharField(max_length=50, verbose_name='operator')
objects = models.Manager()
class Meta:
db_table = 'jmeter_http_header'
class HTTPSampleProxy(models.Model):
id = models.CharField(max_length=16, verbose_name='http sample id', primary_key=True)
controller = models.ForeignKey(TransactionController, on_delete=models.CASCADE, verbose_name='controller id')
name = models.CharField(null=True, max_length=100, verbose_name='http sample name')
is_valid = models.CharField(max_length=8, verbose_name='true, false')
comment = models.CharField(null=True, max_length=200, verbose_name='comment')
domain = models.CharField(null=True, max_length=20, verbose_name='domian or host')
port = models.CharField(null=True, max_length=6, verbose_name='port')
protocol = models.CharField(max_length=8, verbose_name='protocol')
path = models.CharField(null=True, max_length=64, verbose_name='path')
method = models.CharField(max_length=8, verbose_name='request method')
contentEncoding = models.CharField(null=True, max_length=8, verbose_name='contentEncoding')
argument = models.JSONField(null=True, verbose_name='request arguments')
http_header = models.ForeignKey(HTTPRequestHeader, on_delete=models.PROTECT, verbose_name='http header id')
assert_type = models.IntegerField(null=True, verbose_name='test type, 2-contain, 1-match, 8-equal')
assert_content = models.CharField(null=True, max_length=32, verbose_name='test strings')
extractor = models.JSONField(null=True, verbose_name='post extractor')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='Create time')
update_time = models.DateTimeField(auto_now=True, verbose_name='Update time')
operator = models.CharField(max_length=50, verbose_name='operator')
objects = models.Manager()
class Meta:
db_table = 'jmeter_http_sample'
class PerformanceTestTask(models.Model):
id = models.CharField(max_length=16, verbose_name='task id', primary_key=True)
plan = models.ForeignKey(TestPlan, on_delete=models.CASCADE, verbose_name='plan id')
number_samples = models.IntegerField(default=1, verbose_name='number of http samples')
ratio = models.FloatField(verbose_name='ratio, target_num * ratio')
status = models.IntegerField(verbose_name='status, 0-pending run, 1-running, 2-success, 3-stop, 4-failure')
samples = models.IntegerField(default=0, verbose_name='# Samples')
average_rt = models.FloatField(null=True, verbose_name='Average Response Time (ms)')
tps = models.FloatField(null=True, verbose_name='TPS (/s)')
error = models.FloatField(default=0, verbose_name='error(%)')
path = models.CharField(null=True, max_length=64, verbose_name='all files using test, *.zip')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='Create time')
update_time = models.DateTimeField(auto_now=True, verbose_name='Update time')
start_time = models.DateTimeField(null=True, verbose_name='task start time')
end_time = models.DateTimeField(null=True, verbose_name='task end time')
operator = models.CharField(max_length=50, verbose_name='operator')
objects = models.Manager()
class Meta:
db_table = 'jmeter_test_task'
|
from tqdm import tqdm
import csv
import logging
import multiprocessing as mp
import os
import shutil
import time
import types
from typing import List
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from mpi4py.MPI import COMM_WORLD
from mpi4py.futures import MPIPoolExecutor
import gym
import numpy as np
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from stable_baselines import PPO1
from stable_baselines.bench import Monitor
from stable_baselines.common.callbacks import BaseCallback
from stable_baselines.common.policies import FeedForwardPolicy
from stable_baselines.results_plotter import load_results, ts2xy
from simulator import network
from simulator.constants import BYTES_PER_PACKET
from simulator.trace import generate_trace, Trace, generate_traces
from common.utils import set_tf_loglevel, pcc_aurora_reward
from plot_scripts.plot_packet_log import PacketLog, plot
from udt_plugins.testing.loaded_agent import LoadedModel
if type(tf.contrib) != types.ModuleType: # if it is LazyLoader
tf.contrib._warning = None
set_tf_loglevel(logging.FATAL)
class MyMlpPolicy(FeedForwardPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch,
reuse=False, **_kwargs):
super(MyMlpPolicy, self).__init__(sess, ob_space, ac_space, n_env,
n_steps, n_batch, reuse, net_arch=[
{"pi": [32, 16], "vf": [32, 16]}],
feature_extraction="mlp", **_kwargs)
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using
``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
def __init__(self, itx, aurora, check_freq: int, log_dir: str, val_traces: List = [],
verbose=0, steps_trained=0, config_file=None, tot_trace_cnt=100, total_timesteps=1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.itx = itx
self.progress_bar = tqdm(total=total_timesteps)
self.aurora = aurora
self.check_freq = check_freq
self.log_dir = log_dir
# self.save_path = os.path.join(log_dir, 'saved_models')
self.save_path = log_dir
self.best_mean_reward = -np.inf
self.val_traces = val_traces
self.config_file = config_file
self.tot_trace_cnt=tot_trace_cnt
if self.aurora.comm.Get_rank() == 0:
self.val_log_writer = csv.writer(
open(os.path.join(log_dir, 'validation_log_{}.csv'.format(itx)), 'w', 1),
delimiter='\t', lineterminator='\n')
self.val_log_writer.writerow(
['n_calls', 'num_timesteps', 'mean_validation_reward', 'loss',
'throughput', 'latency', 'sending_rate', 'tot_t_used(min)',
'val_t_used(min)', 'train_t_used(min)'])
else:
self.val_log_writer = None
self.best_val_reward = -np.inf
self.val_times = 0
self.t_start = time.time()
self.prev_t = time.time()
self.steps_trained = steps_trained
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
# print("Steps trained = {}".format(self.steps_trained))
self.progress_bar.update(1)
self.steps_trained += 1
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
# x, y = ts2xy(load_results(self.log_dir), 'timesteps')
# if len(x) > 0:
# # Mean training reward over the last 100 episodes
# mean_reward = np.mean(y[-100:])
# if self.verbose > 0:
# print("Num timesteps: {}".format(self.num_timesteps))
# print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(
# self.best_mean_reward, mean_reward))
#
# # New best model, you could save the agent here
# if mean_reward > self.best_mean_reward:
# self.best_mean_reward = mean_reward
# # Example for saving best model
# if self.verbose > 0:
# print("Saving new best model to {}".format(self.save_path))
# # self.model.save(self.save_path)
if self.aurora.comm.Get_rank() == 0 and self.val_log_writer is not None:
with self.model.graph.as_default():
saver = tf.train.Saver()
saver.save(
self.model.sess, os.path.join(self.save_path, "bo_{}_model_step_{}.ckpt".format(self.itx, self.n_calls)))
avg_tr_bw = []
avg_tr_min_rtt = []
avg_tr_loss = []
avg_rewards = []
avg_losses = []
avg_tputs = []
avg_delays = []
avg_send_rates = []
val_start_t = time.time()
for idx, val_trace in tqdm(enumerate(self.val_traces), total=len(self.val_traces)):
# print(np.mean(val_trace.bandwidths))
avg_tr_bw.append(val_trace.avg_bw)
avg_tr_min_rtt.append(val_trace.avg_bw)
ts_list, val_rewards, loss_list, tput_list, delay_list, \
send_rate_list, action_list, obs_list, mi_list, pkt_log = self.aurora.test(
val_trace, self.log_dir)
# pktlog = PacketLog.from_log(pkt_log)
avg_rewards.append(np.mean(np.array(val_rewards)))
avg_losses.append(np.mean(np.array(loss_list)))
avg_tputs.append(float(np.mean(np.array(tput_list))))
avg_delays.append(np.mean(np.array(delay_list)))
avg_send_rates.append(
float(np.mean(np.array(send_rate_list))))
# avg_rewards.append(pktlog.get_reward())
# avg_losses.append(pktlog.get_loss_rate())
# avg_tputs.append(np.mean(pktlog.get_throughput()[1]))
# avg_delays.append(np.mean(pktlog.get_rtt()[1]))
# avg_send_rates.append(np.mean(pktlog.get_sending_rate()[1]))
cur_t = time.time()
self.val_log_writer.writerow(
map(lambda t: "%.3f" % t,
[float(self.n_calls), float(self.num_timesteps),
np.mean(np.array(avg_rewards)),
np.mean(np.array(avg_losses)),
np.mean(np.array(avg_tputs)),
np.mean(np.array(avg_delays)),
np.mean(np.array(avg_send_rates)),
(cur_t - self.t_start) / 60,
(cur_t - val_start_t) / 60, (val_start_t - self.prev_t) / 60]))
self.prev_t = cur_t
return True
def save_model_to_serve(model, export_dir):
if os.path.exists(export_dir):
shutil.rmtree(export_dir)
with model.graph.as_default():
pol = model.policy_pi # act_model
obs_ph = pol.obs_ph
act = pol.deterministic_action
sampled_act = pol.action
obs_input = tf.saved_model.utils.build_tensor_info(obs_ph)
outputs_tensor_info = tf.saved_model.utils.build_tensor_info(act)
stochastic_act_tensor_info = tf.saved_model.utils.build_tensor_info(
sampled_act)
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs={"ob": obs_input},
outputs={"act": outputs_tensor_info,
"stochastic_act": stochastic_act_tensor_info},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
signature_map = {tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature}
model_builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
model_builder.add_meta_graph_and_variables(
model.sess, tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map=signature_map,
clear_devices=True)
model_builder.save(as_text=True)
class Aurora():
cc_name = 'aurora'
def __init__(self, seed: int, log_dir: str, timesteps_per_actorbatch: int,
pretrained_model_path=None, gamma: float = 0.99,
tensorboard_log=None, delta_scale=1):
init_start = time.time()
self.comm = COMM_WORLD
self.delta_scale = delta_scale
self.seed = seed
self.log_dir = log_dir
self.pretrained_model_path = pretrained_model_path
self.steps_trained = 0
dummy_trace = generate_trace(
(10, 10), (2, 2), (2, 2), (50, 50), (0, 0), (1, 1), (0, 0), (0, 0))
env = gym.make('PccNs-v0', traces=[dummy_trace],
train_flag=True, delta_scale=self.delta_scale)
# Load pretrained model
# print('create_dummy_env,{}'.format(time.time() - init_start))
if pretrained_model_path is not None:
if pretrained_model_path.endswith('.ckpt'):
model_create_start = time.time()
self.model = PPO1(MyMlpPolicy, env, verbose=1, seed=seed,
optim_stepsize=0.001, schedule='constant',
timesteps_per_actorbatch=timesteps_per_actorbatch,
optim_batchsize=int(
timesteps_per_actorbatch/12),
optim_epochs=12, gamma=gamma,
tensorboard_log=tensorboard_log,
n_cpu_tf_sess=1)
# print('create_ppo1,{}'.format(time.time() - model_create_start))
tf_restore_start = time.time()
with self.model.graph.as_default():
saver = tf.train.Saver()
saver.restore(self.model.sess, pretrained_model_path)
try:
self.steps_trained = int(os.path.splitext(
pretrained_model_path)[0].split('_')[-1])
except:
self.steps_trained = 0
# print('tf_restore,{}'.format(time.time()-tf_restore_start))
else:
# model is a tensorflow model to serve
self.model = LoadedModel(pretrained_model_path)
else:
self.model = PPO1(MyMlpPolicy, env, verbose=1, seed=seed,
optim_stepsize=0.001, schedule='constant',
timesteps_per_actorbatch=timesteps_per_actorbatch,
optim_batchsize=int(timesteps_per_actorbatch/12),
optim_epochs=12, gamma=gamma,
tensorboard_log=tensorboard_log, n_cpu_tf_sess=1)
self.timesteps_per_actorbatch = timesteps_per_actorbatch
def train(self, itx, config_file,
# training_traces, validation_traces,
total_timesteps, tot_trace_cnt,
tb_log_name=""):
assert isinstance(self.model, PPO1)
training_traces = generate_traces(config_file, tot_trace_cnt,
duration=30, constant_bw=False)
for i, train_trace in enumerate(training_traces):
from pathlib import Path
Path(os.path.join(self.log_dir, "train_trace")).mkdir(exist_ok=True, parents=True)
train_trace.dump(os.path.join(self.log_dir, "train_trace", "%03d.json"%i))
# generate validation traces
validation_traces = generate_traces(
config_file, 20, duration=30, constant_bw=False)
env = gym.make('PccNs-v0', traces=training_traces,
train_flag=True, delta_scale=self.delta_scale, config_file=config_file)
env.seed(self.seed)
# env = Monitor(env, self.log_dir)
self.model.set_env(env)
# Create the callback: check every n steps and save best model
callback = SaveOnBestTrainingRewardCallback(itx,
self, check_freq=self.timesteps_per_actorbatch, log_dir=self.log_dir,
steps_trained=self.steps_trained, val_traces=validation_traces,
config_file=config_file, tot_trace_cnt=tot_trace_cnt, total_timesteps=total_timesteps)
self.model.learn(total_timesteps=total_timesteps,
tb_log_name=tb_log_name, callback=callback)
def test_on_traces(self, traces: List[Trace], save_dirs: List[str]):
results = []
pkt_logs = []
for trace, save_dir in zip(traces, save_dirs):
ts_list, reward_list, loss_list, tput_list, delay_list, \
send_rate_list, action_list, obs_list, mi_list, pkt_log = self.test(
trace, save_dir)
result = list(zip(ts_list, reward_list, send_rate_list, tput_list,
delay_list, loss_list, action_list, obs_list, mi_list))
pkt_logs.append(pkt_log)
results.append(result)
return results, pkt_logs
# results = []
# pkt_logs = []
# n_proc=mp.cpu_count()//2
# arguments = [(self.pretrained_model_path, trace, save_dir, self.seed) for trace, save_dir in zip(traces, save_dirs)]
# with mp.Pool(processes=n_proc) as pool:
# for ts_list, reward_list, loss_list, tput_list, delay_list, \
# send_rate_list, action_list, obs_list, mi_list, pkt_log in pool.starmap(test_model, arguments):
# result = list(zip(ts_list, reward_list, send_rate_list, tput_list,
# delay_list, loss_list, action_list, obs_list, mi_list))
# pkt_logs.append(pkt_log)
# results.append(result)
# return results, pkt_logs
# results = []
# pkt_logs = []
# with MPIPoolExecutor(max_workers=4) as executor:
# iterable = ((trace, save_dir) for trace, save_dir in zip(traces, save_dirs))
# for ts_list, reward_list, loss_list, tput_list, delay_list, \
# send_rate_list, action_list, obs_list, mi_list, pkt_log in executor.starmap(self.test, iterable):
# result = list(zip(ts_list, reward_list, send_rate_list, tput_list,
# delay_list, loss_list, action_list, obs_list, mi_list))
# pkt_logs.append(pkt_log)
# results.append(result)
# return results, pkt_logs
# results = []
# pkt_logs = []
# size = self.comm.Get_size()
# count = int(len(traces) / size)
# remainder = int(len(traces) % size)
# rank = self.comm.Get_rank()
# start = rank * count + min(rank, remainder)
# stop = (rank + 1) * count + min(rank + 1, remainder)
# for i in range(start, stop):
# ts_list, reward_list, loss_list, tput_list, delay_list, \
# send_rate_list, action_list, obs_list, mi_list, pkt_log = self.test(
# traces[i], save_dirs[i])
# result = list(zip(ts_list, reward_list, send_rate_list, tput_list,
# delay_list, loss_list, action_list, obs_list, mi_list))
# pkt_logs.append(pkt_log)
# results.append(result)
# results = self.comm.gather(results, root=0)
# pkt_logs = self.comm.gather(pkt_logs, root=0)
# # need to call reduce to retrieve all return values
# return results, pkt_logs
def save_model(self):
raise NotImplementedError
def load_model(self):
raise NotImplementedError
def test(self, trace: Trace, save_dir: str, plot_flag=False):
reward_list = []
loss_list = []
tput_list = []
delay_list = []
send_rate_list = []
ts_list = []
action_list = []
mi_list = []
obs_list = []
os.makedirs(save_dir, exist_ok=True)
with open(os.path.join(save_dir, 'aurora_simulation_log.csv'), 'w', 1) as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(['timestamp', "target_send_rate", "send_rate",
'recv_rate', 'max_recv_rate', 'latency',
'loss', 'reward', "action", "bytes_sent",
"bytes_acked", "bytes_lost", "MI",
"send_start_time",
"send_end_time", 'recv_start_time',
'recv_end_time', 'latency_increase',
"packet_size", 'min_lat', 'sent_latency_inflation',
'latency_ratio', 'send_ratio',
'bandwidth', "queue_delay",
'packet_in_queue', 'queue_size', 'cwnd',
'ssthresh', "rto", "recv_ratio", "srtt"])
env = gym.make(
'PccNs-v0', traces=[trace], delta_scale=self.delta_scale)
env.seed(self.seed)
obs = env.reset()
# print(obs)
# heuristic = my_heuristic.MyHeuristic()
while True:
pred_start = time.time()
if isinstance(self.model, LoadedModel):
obs = obs.reshape(1, -1)
action = self.model.act(obs)
action = action['act'][0]
else:
if env.net.senders[0].got_data:
action, _states = self.model.predict(
obs, deterministic=True)
else:
action = np.array([0])
# print("pred,{}".format(time.time() - pred_start))
# print(env.senders[0].rate * 1500 * 8 / 1e6)
# get the new MI and stats collected in the MI
# sender_mi = env.senders[0].get_run_data()
sender_mi = env.senders[0].history.back() #get_run_data()
# if env.net.senders[0].got_data:
# action = heuristic.step(obs, sender_mi)
# # action = my_heuristic.stateless_step(env.senders[0].send_rate,
# # env.senders[0].avg_latency, env.senders[0].lat_diff, env.senders[0].start_stage,
# # env.senders[0].max_tput, env.senders[0].min_rtt, sender_mi.rtt_samples[-1])
# # action = my_heuristic.stateless_step(*obs)
# else:
# action = np.array([0])
# max_recv_rate = heuristic.max_tput
max_recv_rate = env.senders[0].max_tput
throughput = sender_mi.get("recv rate") # bits/sec
send_rate = sender_mi.get("send rate") # bits/sec
latency = sender_mi.get("avg latency")
loss = sender_mi.get("loss ratio")
avg_queue_delay = sender_mi.get('avg queue delay')
sent_latency_inflation = sender_mi.get('sent latency inflation')
latency_ratio = sender_mi.get('latency ratio')
send_ratio = sender_mi.get('send ratio')
recv_ratio = sender_mi.get('recv ratio')
reward = pcc_aurora_reward(
throughput / 8 / BYTES_PER_PACKET, latency, loss,
np.mean(trace.bandwidths) * 1e6 / 8 / BYTES_PER_PACKET, np.mean(trace.delays) * 2/ 1e3)
writer.writerow([
env.net.get_cur_time(), round(env.senders[0].rate * BYTES_PER_PACKET * 8, 0),
round(send_rate, 0), round(throughput, 0), round(max_recv_rate), latency, loss,
reward, action.item(), sender_mi.bytes_sent, sender_mi.bytes_acked,
sender_mi.bytes_lost, sender_mi.send_end - sender_mi.send_start,
sender_mi.send_start, sender_mi.send_end,
sender_mi.recv_start, sender_mi.recv_end,
sender_mi.get('latency increase'), sender_mi.packet_size,
sender_mi.get('conn min latency'), sent_latency_inflation,
latency_ratio, send_ratio,
env.links[0].get_bandwidth(
env.net.get_cur_time()) * BYTES_PER_PACKET * 8,
avg_queue_delay, env.links[0].pkt_in_queue, env.links[0].queue_size,
env.senders[0].cwnd, env.senders[0].ssthresh, env.senders[0].rto, recv_ratio, env.senders[0].estRTT])
reward_list.append(reward)
loss_list.append(loss)
delay_list.append(latency * 1000)
tput_list.append(throughput / 1e6)
send_rate_list.append(send_rate / 1e6)
ts_list.append(env.net.get_cur_time())
action_list.append(action.item())
mi_list.append(sender_mi.send_end - sender_mi.send_start)
obs_list.append(obs.tolist())
step_start = time.time()
obs, rewards, dones, info = env.step(action)
# print("step,{}".format(time.time() - step_start))
if dones:
break
with open(os.path.join(save_dir, "aurora_packet_log.csv"), 'w', 1) as f:
pkt_logger = csv.writer(f, lineterminator='\n')
pkt_logger.writerow(['timestamp', 'packet_event_id', 'event_type',
'bytes', 'cur_latency', 'queue_delay',
'packet_in_queue', 'sending_rate', 'bandwidth'])
pkt_logger.writerows(env.net.pkt_log)
if plot_flag:
pkt_log = PacketLog.from_log(env.net.pkt_log)
plot(trace, pkt_log, save_dir, "aurora")
return ts_list, reward_list, loss_list, tput_list, delay_list, send_rate_list, action_list, obs_list, mi_list, env.net.pkt_log
|
import math
import random
import cv2
import numpy as np
from ..utils import *
def seg_augmentation_wo_kpts(img, seg):
img_h, img_w = img.shape[:2]
fg_mask = seg.copy()
coords1 = np.where(fg_mask)
img_top, img_bot = np.min(coords1[0]), np.max(coords1[0])
shift_range_ratio = 0.2
down_shift = True if not fg_mask[0, :].any() else False
if down_shift:
down_space = int((img_h - img_top) * shift_range_ratio)
old_bot = img_h
down_offset = random.randint(0, down_space)
old_bot -= down_offset
old_top = 0
cut_height = old_bot - old_top
new_bot = img_h
new_top = new_bot - cut_height
else:
old_bot, old_top = img_h, 0
new_bot, new_top = old_bot, old_top
coords2 = np.where(fg_mask[old_top:old_bot, :])
img_left, img_right = np.min(coords2[1]), np.max(coords2[1])
left_shift = True if not fg_mask[old_top:old_bot, -1].any() else False
right_shift = True if not fg_mask[old_top:old_bot, 0].any() else False
if left_shift and right_shift:
if random.random() > 0.5:
right_shift = False
else:
left_shift = False
if left_shift:
left_space = int(img_right * shift_range_ratio)
old_left = 0
left_offset = random.randint(0, left_space)
old_left += left_offset
old_right = img_w
cut_width = old_right - old_left
new_left = 0
new_right = new_left + cut_width
if right_shift:
right_space = int((img_w - img_left) * shift_range_ratio)
old_right = img_w
right_offset = random.randint(0, right_space)
old_right -= right_offset
old_left = 0
cut_width = old_right - old_left
new_right = img_w
new_left = new_right - cut_width
if not (left_shift or right_shift):
old_left, old_right = 0, img_w
new_left, new_right = old_left, old_right
img_new = np.zeros_like(img)
seg_new = np.zeros_like(seg)
img_new[new_top:new_bot, new_left:new_right] = img[
old_top:old_bot, old_left:old_right
]
seg_new[new_top:new_bot, new_left:new_right] = seg[
old_top:old_bot, old_left:old_right
]
return img_new, seg_new
def random_bg_augment(img, img_path="", brightness_aug=True, flip_aug=True):
if brightness_aug:
brightness_val = random.randint(50, 225)
img = change_mean_brightness(img, None, brightness_val, 20, img_path)
img = img.astype("uint8")
if flip_aug:
do_flip = bool(random.getrandbits(1))
if do_flip:
img = cv2.flip(img, 1)
return img
def resize_bg(fg_shape, bg_img):
fg_h, fg_w = fg_shape[:2]
bg_h, bg_w = bg_img.shape[:2]
if bg_h < fg_h or bg_w < fg_w:
fb_h_ratio = float(fg_h) / bg_h
fb_w_ratio = float(fg_w) / bg_w
bg_resize_ratio = max(fb_h_ratio, fb_w_ratio)
bg_img = cv2.resize(
bg_img,
(
int(math.ceil(bg_img.shape[1] * bg_resize_ratio)),
int(math.ceil(bg_img.shape[0] * bg_resize_ratio)),
),
)
bg_h, bg_w = bg_img.shape[:2]
bg_h_offset_range = max(bg_h - fg_h, 0)
bg_w_offset_range = max(bg_w - fg_w, 0)
bg_h_offset = random.randint(0, bg_h_offset_range)
bg_w_offset = random.randint(0, bg_w_offset_range)
bg_img = bg_img[
bg_h_offset : bg_h_offset + fg_h, bg_w_offset : bg_w_offset + fg_w, :3
]
return bg_img
def add_alpha_image_to_bg(alpha_img, bg_img):
alpha_s = np.repeat((alpha_img[:, :, 3] / 255.0)[:, :, np.newaxis], 3, axis=2)
alpha_l = 1.0 - alpha_s
combined_img = np.multiply(alpha_s, alpha_img[:, :, :3]) + np.multiply(
alpha_l, bg_img
)
return combined_img
def add_alpha_border(hand_img):
fg_mask = (hand_img[:, :, -1] == 0).astype(np.uint8)
fg_mask = cv2.dilate(fg_mask, np.ones((3, 3)))
alpha_mask = fg_mask * 255
alpha_mask = 255 - cv2.GaussianBlur(alpha_mask, (7, 7), 0)
hand_img[:, :, -1] = alpha_mask
hand_seg = alpha_mask > 200
hand_all_seg = alpha_mask > 0
return hand_img, hand_seg, hand_all_seg
def merge_hands(top_hand_img, bot_hand_img, bg_img, bg_resize=True):
if top_hand_img is not None and bot_hand_img is not None:
bot_hand_img, _, _ = add_alpha_border(bot_hand_img)
top_hand_img, _, _ = add_alpha_border(top_hand_img)
bg_img_resized = resize_bg(bot_hand_img.shape, bg_img) if bg_resize else bg_img
combined_hand_img = add_alpha_image_to_bg(bot_hand_img, bg_img_resized)
combined_hand_img = add_alpha_image_to_bg(top_hand_img, combined_hand_img)
else:
top_hand_img, _, _ = add_alpha_border(top_hand_img)
bg_img_resized = resize_bg(top_hand_img.shape, bg_img) if bg_resize else bg_img
combined_hand_img = add_alpha_image_to_bg(top_hand_img, bg_img_resized)
return combined_hand_img, bg_img_resized
def change_mean_brightness(img, seg, brightness_val, jitter_range=20, img_path=""):
if seg is not None:
old_mean_val = np.mean(img[seg])
else:
old_mean_val = np.mean(img)
assert old_mean_val != 0, f"ERROR: {img_path} has mean of 0"
new_mean_val = brightness_val + random.uniform(-jitter_range / 2, jitter_range / 2)
img *= new_mean_val / old_mean_val
img = np.clip(img, 0, 255)
return img
def random_smoothness(img, smooth_rate=0.3):
smooth_rate_tick = smooth_rate / 5
rand_val = random.random()
if rand_val < smooth_rate:
if rand_val < smooth_rate_tick:
kernel_size = 3
elif rand_val < smooth_rate_tick * 2:
kernel_size = 5
elif rand_val < smooth_rate_tick * 3:
kernel_size = 7
elif rand_val < smooth_rate_tick * 4:
kernel_size = 9
else:
kernel_size = 11
img[:, :, :3] = cv2.blur(img[:, :, :3], (kernel_size, kernel_size))
return img
def normalize_tensor(tensor, mean, std):
for t in tensor:
t.sub_(mean).div_(std)
return tensor
def gen_e2h_eval_mask(left_seg_path, right_seg_path, save_path):
left_seg = cv2.imread(left_seg_path, cv2.IMREAD_GRAYSCALE)
right_seg = cv2.imread(right_seg_path, cv2.IMREAD_GRAYSCALE)
seg = np.zeros_like(left_seg)
seg[left_seg > 0] = 255
seg[right_seg > 0] = 127
cv2.imwrite(save_path, seg)
|
from __future__ import print_function
# to make sure print() on both python 2 and python 3 properly
#declare two numbers
number1 = 5
number2 = 10
#perform addition of two numbers
answer = number1 + number2
#print answer
print("Answer is:"+str(answer))
|
##Copyright (c) 2014 - 2020, The Trustees of Indiana University.
##
##Licensed under the Apache License, Version 2.0 (the "License");
##you may not use this file except in compliance with the License.
##You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
##Unless required by applicable law or agreed to in writing, software
##distributed under the License is distributed on an "AS IS" BASIS,
##WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
##See the License for the specific language governing permissions and
##limitations under the License.
#!/usr/bin/python3
import os
import random
import numpy
import pandas
import sklearn.model_selection
from sklearn.utils import class_weight
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from random import shuffle
import seaborn as sns
def Hdf5_generator(X, y, batch_size, nsamples):
start_idx = 0
while True:
if start_idx + batch_size > nsamples:
start_idx = 0
x_batch = X[start_idx:start_idx+batch_size, ...][:,:,0:5]
y_batch = y[start_idx:start_idx+batch_size, ...]
start_idx += batch_size
yield numpy.asarray(x_batch), numpy.asarray(y_batch)
def get_class_weight(y_train):
label_weights = class_weight.compute_class_weight('balanced', numpy.unique(y_train), y_train)
class_weight_dict = dict(enumerate(label_weights))
return class_weight_dict
def get_lables_train_valid_filenames(data_dir):
label_file = os.path.join(data_dir, "label.csv")
file_data = pandas.read_csv(label_file, delimiter=',', header=None)
data_files = file_data[0].tolist()
labels = file_data[1].tolist()
train_data_files, validation_data_files, train_labels, validation_labels = sklearn.model_selection.train_test_split(data_files, labels, test_size=0.2, shuffle=False)
return train_data_files, validation_data_files, train_labels, validation_labels
def split_data_single(data_files, labels):
train_data_files = data_files[0:int(0.8*len(data_files))]
train_labels = labels[0:int(0.8*len(labels))]
validation_data_files = data_files[int(0.8*len(data_files)):]
validation_labels = labels[int(0.8*len(labels)):]
test_data_files = []
test_labels = []
return train_data_files, validation_data_files, test_data_files, train_labels, validation_labels, test_labels
def split_data_single_2(data_files, labels):
train_data_files = []
train_labels = []
validation_data_files = []
validation_labels = []
test_data_files = data_files
test_labels = labels
return train_data_files, validation_data_files, test_data_files, train_labels, validation_labels, test_labels
def split_data(data_files, labels):
train_data_files = data_files[0:int(0.6*len(data_files))]
train_labels = labels[0:int(0.6*len(labels))]
validation_data_files = data_files[int(0.6*len(data_files)):int(0.8*len(data_files))]
validation_labels = labels[int(0.6*len(labels)):int(0.8*len(labels))]
test_data_files = data_files[int(0.8*len(data_files)):]
test_labels = labels[int(0.8*len(labels)):]
return train_data_files, validation_data_files, test_data_files, train_labels, validation_labels, test_labels
def split_data_train(data_files, labels):
train_data_files = data_files[0:int(0.8*len(data_files))]
train_labels = labels[0:int(0.8*len(labels))]
validation_data_files = data_files[int(0.8*len(data_files)):]
validation_labels = labels[int(0.8*len(labels)):]
return train_data_files, validation_data_files, train_labels, validation_labels
def shuffle_data(labels_dict):
data_files = list(labels_dict.keys())
labels = list(labels_dict.values())
c = list(zip(data_files, labels))
shuffle(c)
data_files, labels = zip(*c)
return list(data_files), list(labels)
def shuffle_data_test(labels_dict):
data_files = list(labels_dict.keys())
labels = list(labels_dict.values())
c = list(zip(data_files, labels))
#shuffle(c)
data_files, labels = zip(*c)
return list(data_files), list(labels)
def get_param_dict(data_dir):
param_file = os.path.join(data_dir, "parameters.csv")
params_pd = pandas.read_csv(param_file, delimiter=',', header=None)
params = dict([(i,[a,b,c]) for i, a,b,c in zip(params_pd[0], params_pd[1],params_pd[2],params_pd[3])])
return params
def get_label_dict(data_dir):
label_file = os.path.join(data_dir, "label.csv")
label_pd = pandas.read_csv(label_file, delimiter=',', header=None)
labels = dict([(i,a) for i, a in zip(label_pd[0], label_pd[1])])
return labels
def shortlist_dictionaries(params, labels):
rm_keys = []
for k, v in params.items():
if v[1] == "C" or v[1] == "Z+1" or v[1] == "A" or v[1] == "X" or v[1] == "B-Water" or v[1] == "Y-Water" or v[1] == "B-Ammonia" or v[1] == "Y-Ammonia" or v[1] == "B-1" or v[1] == "Y-1" or v[1] == "B+1" or v[1] == "Y+1":
rm_keys.append(k)
for k in rm_keys:
del params[k]
del labels[k]
def shuffle_split_data(a, b):
combined = list(zip(a, b))
random.shuffle(combined)
a[:], b[:] = zip(*combined)
def update_dict(data_dir, params):
new_keys = []
for k, v in params.items():
new_keys.append(os.path.join(data_dir, k[2:]))
d1 = dict( zip( list(params.keys()), new_keys) )
return {d1[oldK]: value for oldK, value in params.items()}
def create_output_directory(dir_name):
ouput_dir = os.path.join(os.getcwd(), dir_name)
if os.path.isdir(ouput_dir) == False:
os.mkdir(ouput_dir)
return ouput_dir
def shortlist_data(test_data, test_labels, test_params, anno):
shortlisted_test_data = []
shortlisted_test_labels = []
for idx in range(0, len(test_labels)):
param = test_params[idx].split(',')
if param[2] == anno:
shortlisted_test_data.append(test_data[idx])
shortlisted_test_labels.append(test_labels[idx])
x_train = numpy.stack(shortlisted_test_data)
y_train = numpy.array(shortlisted_test_labels)
return x_train, y_train
def print_training_history(history, ouput_dir):
""" Print Losses and Accuracy from model training and validation into CSV file """
training_file_name = os.path.join(ouput_dir, "TrainingHistory.txt")
f = open(training_file_name, "w")
f.writelines("Training Data History") # Write a string to a file
f.writelines("\nTraining_Accuracy :" + str(history.history['acc']) + " \nValidation_Accuracy :" + str(history.history['val_acc']) ) # Write a string to a file
f.writelines("\nTraining_Loss :" + str(history.history['loss']) + " \nValidation_Loss :" + str(history.history['val_loss']) ) # Write a string to a file
f.close()
def plot_training_graphs(history, ouput_dir):
""" Calls functions to draw loss and accuracy graphs """
_plot_loss_graph(history, ouput_dir)
_plot_accuracy_graph(history, ouput_dir)
def _plot_loss_graph(history, ouput_dir):
""" Plot Losses from model training and validation """
graph_file_name = os.path.join(ouput_dir, "ModelLoss.png")
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig(graph_file_name, dpi=250)
plt.close()
def _plot_accuracy_graph(history, ouput_dir):
""" Plot Accuracy from model training and validation """
graph_file_name = os.path.join(ouput_dir, "ModelAccuracy.png")
plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig(graph_file_name, dpi=250)
plt.close()
|
import requests
import html2text as ht
import time
import os
import json
class dropin_wiki_detail(object):
'''获取多频百科的某一词条的具体信息'''
def wiki_spider(self, typeId, dataId):
time.sleep(2.31)
#http://duopin_app_api.hearinmusic.com/app/ency/encyDetail?typeId=13&dataId=377
url = 'http://duopin_app_api.hearinmusic.com/app/ency/encyDetail?'
params = {
'typeId':str(typeId),
'dataId':str(dataId)
}
headers = {'User-Agent':'Mozilla/5.0 (Linux; Android 6.0; NEM-AL10 Build/HONORNEM-AL10; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043906 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060133) NetType/WIFI Language/zh_CN'}
response = requests.get(url=url, headers=headers, params=params).json()['data']
#print(response)
return response
def save_style(self, wiki):
'''该模块可以把保存风格为md文件'''
if not os.path.exists('./style/md'):
os.mkdir('./style/md')
text_maker = ht.HTML2Text()
text = text_maker.handle(wiki['content'])
md_path = f"./style/md/{wiki['name']}.md"
with open(md_path, 'w+', encoding='utf8') as fp:
fp.write(f"#{wiki['name']}\n")
fp.write(f'##基本信息\n')
fp.write(f"###发源时间:{wiki['startTimeText']}\n###风格类型:{wiki['styleCategory']}\n")
fp.write(f'##详情\n')
fp.write(text)
fp.write(f"###{wiki['remarks']}\n")
fp.write(f"###{wiki['url']}\n")
print(f"{wiki['name']}.md 保存完成")
def typeId_chinese(self, typeId):
typeId_dict = {
'12':'艺人',#(最大10000)
'13':'风格',#(最大379)
'14':'厂牌',#(最大1933)
'15':'场所',#(最大40)
'16':'电音节',#(最大200)
'17':'事件',#(最大3)
'11':'其他'#(最大45)
}
return typeId_dict[typeId]
def save_json(self, wiki, typeId):
'''保存json模块'''
path = f'./{self.typeId_chinese(typeId)}'
if not os.path.exists(f'{path}/json'):
os.mkdir(f'{path}/json')
json_path = f"{path}/json/{wiki['name']}.json"
fp = open(json_path, 'w', encoding='utf8')
json.dump(wiki, fp = fp, ensure_ascii=False, indent=True)
print(f"{wiki['name']}.json 保存完成")
def run(self, typeId, dataId):
response = self.wiki_spider(typeId, dataId)
#命名合理化
intab = "?*/\|.:><\""
outtab = " "
trantab = str.maketrans(intab, outtab)
response['name'] = response['name'].translate(trantab)
self.save_json(response, str(typeId))
'''
if typeId == 13:
# style
#self.save_style(response)
self.save_json(response, str(typeId))
elif typeId == 12:
# music
#self.save_musician(response)
self.save_json(response)
'''
if __name__ == "__main__":
dropin_music = dropin_wiki_detail()
#typeId, dataId
dropin_music.run(13, 154)
|
from ibeatles.step1.plot import Step1Plot
from ibeatles.utilities.retrieve_data_infos import RetrieveGeneralFileInfos, RetrieveSelectedFileDataInfos
class Step3GuiHandler(object):
def __init__(self, parent=None):
self.parent = parent
def load_normalized_changed(self, tab_index=0):
if tab_index == 0:
data_preview_box_label = "Sample Image Preview"
o_general_infos = RetrieveGeneralFileInfos(parent = self.parent,
data_type = 'sample')
o_selected_infos = RetrieveSelectedFileDataInfos(parent = self.parent,
data_type = 'sample')
else:
data_preview_box_label = "Open Beam Image Preview"
o_general_infos = RetrieveGeneralFileInfos(parent = self.parent,
data_type = 'ob')
o_selected_infos = RetrieveSelectedFileDataInfos(parent = self.parent,
data_type = 'ob')
self.parent.ui.data_preview_box.setTitle(data_preview_box_label)
o_general_infos.update()
o_selected_infos.update()
def select_normalized_row(self, row=0):
self.parent.ui.list_normalized.setCurrentRow(row)
# o_step1_plot = Step1Plot(parent = self.parent)
# o_step1_plot.display_2d_preview()
def check_time_spectra_widgets(self):
time_spectra_data = self.parent.data_metadata['time_spectra']['normalized_folder']
if self.parent.ui.material_display_checkbox_2.isChecked():
if time_spectra_data == []:
_display_error_label = True
else:
_display_error_label = False
else:
_display_error_label = False
self.parent.ui.display_warning_2.setVisible(_display_error_label)
def check_widgets(self):
if self.parent.data_files['normalized'] == []:
status = False
else:
status = True
self.parent.ui.actionRotate_Images.setEnabled(True)
|
import discord
from redbot.core import commands
from .pcx_lib import type_message
class bully(commands.Cog):
#This Cog takes the previous message and turns it into CaMeL cAsInG
@commands.command(aliases=["b"])
async def sarcasm(self, ctx: commands.Context):
#Define the command for RedBot
message = (await ctx.channel.history(limit=2).flatten())[1].content
if not message:
message = "I can't translate that!"
await type_message(
ctx.channel,
self.sarcog_string(message),
allowed_mentions=discord.AllowedMentions(
everyone=False, users=False, roles=False),
)
@staticmethod
def sarcog_string(x):
#Sarcasm and return string
output = []
for let in range(len(x)):
if let%2==0:
output.append(x[let].lower())
else:
output.append(x[let].upper())
return "".join(output)
|
#!/usr/bin/env python
#
# Usage:
# CC6UL SBC with PCF8574 Multiplexer connected on I2C1 (pin 33, 34 of RGB connector)
#
import time
import sys
from subprocess import call
CMD = "i2cset"
# I2C-0
BUS = "%d" % 0
# address on bus is 0x38
ADDR = "0x%0.2X" % 0x38
DELAYSHORT = 0.060
DELAYMIN = 0.005
test = call(["which", CMD])
if (test == 1):
sys.exit("\nERROR: ic2set not found!\n")
print("\nSupercar Kitt LED\n")
while (True):
j = 0
for l in [1, 3, 2, 6, 4, 12, 8, 24, 16]:
a = call([CMD, BUS, ADDR, "0", "0x%0.2X" % l, "-y"])
if (j == 0):
time.sleep(DELAYSHORT)
j = 1
else:
time.sleep(DELAYMIN)
j = 0
j = 0
for l in [16, 24, 8, 12, 4, 6, 2, 3, 1]:
a = call([CMD, BUS, ADDR, "0", "0x%0.2X" % l, "-y"])
if (j == 0):
time.sleep(DELAYSHORT)
j = 1
else:
time.sleep(DELAYMIN)
j = 0
print("done!\n") |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 10 23:56:26 2021
@author: babraham
"""
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import os
# ===========PLOTTING PROPERTIES/VARIABLES=====================#
D3 = px.colors.qualitative.D3
PLOTLY = px.colors.qualitative.Plotly
# plotting properties defined below
# ======= LOTTING PROPERTIES ==============#
def load_lookup_dicts():
lookup = dict()
# dictionary mapping time labels to days (or all)
lookup["time_dict"] = {"today": 1,
"last week": 7,
"last month": 30,
"last 3 months": 30.4 * 3,
"last 6 months": 30.4 * 6, # last 182.5 days
"last year": 365,
# assuming all VAs occured in last 30 years
"all": "all"}
# dictionary mapping demographic variable names to corresponding VA survey columns
lookup["demo_to_col"] = {
"age group": "age_group",
"sex": "Id10019",
"place of death": "Id10058",
}
# colors used for plotting - override with whatever color scheme you'd like
lookup["color_list"] = [
"rgb(24,162,185)", # turquoise
"rgb(201,0,1)", # burgandy
"rgb(8,201,0)", # green
"rgb(240,205,21)", # gold
"rgb(187,21,240)", # purple
"rgb(162,162,162)", # gray
"rgb(239,86,59)", # dark orange
"rgb(20,72,123)", # midnight blue,
"rgb(9,112,13)", # dark green
"rgb(239,49,255)" # fuschia
]
# colorscale used for map
lookup["colorscales"] = {
"primary": [
(0.0, "rgb(255,255,255)"),
(1e-20, "rgb(0, 147, 146)"),
(0.167, "rgb(0, 147, 146)"),
(0.167, "rgb(57, 177, 133)"),
(0.333, "rgb(57, 177, 133)"),
(0.333, "rgb(156, 203, 134)"),
(0.5, "rgb(156, 203, 134)"),
(0.5, "rgb(233, 226, 156)"),
(0.667, "rgb(233, 226, 156)"),
(0.667, "rgb(238, 180, 121)"),
(0.833, "rgb(238, 180, 121)"),
(0.833, "rgb(232, 132, 113)"),
(1.0, "rgb(232, 132, 113)"),
],
"secondary": [
(0.0, "rgb(255,255,255)"),
(0.001, "rgb(230,230,230)"),
(1.0, "rgb(230,230,230)"),
],
}
lookup["line_colors"] = {"primary": "black", "secondary": "gray"}
# dictionary mapping certain concepts to display names
lookup["display_names"] = {
# Internal CODs to human-readable titles
"all": "All Causes",
"Coded VAs": "Coded VAs",
"Mean Age of Death": "Mean Age of Death",
"HIV/AIDS related death": "HIV/AIDS",
"Diabetes mellitus": "Diabetes Mellitus",
"Acute resp infect incl pneumonia": "Pneumonia",
"Other and unspecified cardiac dis": "Other Cardiac",
"Diarrhoeal diseases": "Diarrhoeal Diseases",
"Other and unspecified neoplasms": "Unspecified Neoplasm",
"Renal failure": "Renal Failure",
"Liver cirrhosis": "Liver Cirrhosis",
"Digestive neoplasms": "Digestive Neoplasm",
"Other and unspecified infect dis": "Other infectious disease",
"Other and unspecified maternal CoD": "Other maternal CoD",
"Pregnancy-induced hypertension": "Hypertension from Pregnancy",
# internal cod group labels to plot titles
"All CODs": "All Causes",
"ncd": "Non-Communicable",
"parasitic": "Parasitic",
"infectious": "Infectious",
"zambia_notifiable_disease": "Zambian Notifiable",
"respiratory": "Respiratory",
# place of death names to more human-readable names
"on_route_to_hospital_or_facility": "En Route to Facility",
"dk": "Unknown",
"other_health_facility": "Other Health Facility",
"ref": "Refused to Answer"
}
# formats for montly, weekly, and yearly dates
lookup["date_display_formats"] = {
"week": "%d/%m/%Y",
"month": "%m/%Y",
"year": "%Y",
}
# Toolbar configurations for plots
# map config
lookup['graph_config'] = {"displayModeBar": True,
"scrollZoom": True, "displaylogo": False,
"modeBarButtonsToRemove":["zoomInGeo", "zoomOutGeo", "select2d", "lasso2d"]}
# chart config (for all charts)
lookup['chart_config'] = {"displayModeBar": True,
"displaylogo":False,
"modeBarButtonsToRemove":["pan2d", "zoom2d", "select2d", \
"lasso2d", "zoomIn2d", "zoomOut2d", "autoScale2d", "resetScale2d"]}
return lookup
LOOKUP = load_lookup_dicts()
# get counts of a categorical field from va data. Field name can either be a
# column name in the dataframe or a demographic lookup key. Change final column name with display_name argument.
def get_field_counts(va_df, field_name, full_labels=False, display_name=None):
if field_name not in va_df.columns:
# if no matching column in va_df for field name, try lookup in the demo_to_col dict.
if not display_name:
# if no display name provided, use original key
display_name = field_name
field_name = LOOKUP["demo_to_col"].get(field_name, field_name)
assert field_name in va_df.columns
va_df = (
va_df[field_name]
.value_counts()
.reset_index()
.assign(index=lambda df: df["index"].str.capitalize())
.rename(columns={field_name: "count", "index": "group"})
.assign(
percent=lambda df: df.apply(
lambda x: np.round(100 * x["count"] / df["count"].sum(), 1), axis=1
)
)
.assign(label=lambda df: df["percent"].astype(str) + "%")
)
if display_name:
va_df = va_df.rename(columns={"group": display_name})
if full_labels:
va_df["label"] = (
va_df["count"].astype(str) + "<br> (" + va_df["percent"].astype(str) + "%)"
)
return va_df
# ===========DEMOGRAPHIC PLOT LOGIC=========================#
# create a multiplot of va counts by gender, age, and place of death
def demographic_plot(va_df, no_grids=True, column_widths=None, height=600, title=None):
if not column_widths:
first_width = 0.4
column_widths = [first_width, 1 - first_width]
comb_fig = make_subplots(
rows=2,
cols=2,
specs=[[{"type": "bar"}, {"type": "bar"}], [{"colspan": 2}, None]],
subplot_titles=("Gender", "Age Group", "Place of Death"),
column_widths=column_widths,
vertical_spacing=0.15,
)
if va_df.size > 0:
# gender
sex_df = get_field_counts(va_df, "sex", display_name="gender")
comb_fig.add_trace(
go.Bar(
name="Gender",
x=sex_df["gender"],
y=sex_df["count"],
text=sex_df["label"],
textposition="auto",
showlegend=False,
marker_color=PLOTLY,
),
row=1,
col=1,
)
# age groups
age_df = get_field_counts(va_df, "age group", display_name="age_group")
comb_fig.add_trace(
go.Bar(
name="Age Group",
x=age_df["age_group"],
y=age_df["count"],
text=age_df["label"],
textposition="auto",
showlegend=False,
marker_color=D3,
),
row=1,
col=2,
)
# place of death
loc_cts = get_field_counts(va_df, "place of death", display_name="location")
loc_cts["location"] = loc_cts["location"].apply(
lambda x: LOOKUP["display_names"].get(x.lower(), x.capitalize())
)
comb_fig.add_trace(
go.Bar(
name="Place of Death",
x=loc_cts["count"],
y=loc_cts["location"],
orientation="h",
showlegend=False,
text=loc_cts["label"],
textposition="auto",
marker_color=D3[4],
),
row=2,
col=1,
)
else:
title = "No Data for Selected Criteria"
no_grids = False
gender_labels, gender_counts = [''], [0]
comb_fig.add_trace(
go.Bar(
name="Gender",
x=gender_labels,
y=gender_counts,
textposition="auto",
showlegend=False,
),
)
age_labels, age_counts = [''], [0]
comb_fig.add_trace(
go.Bar(
name="Age Group",
x=age_labels,
y=age_counts,
#text=age_labels,
#textposition="auto",
showlegend=False,
marker_color=D3,
),
row=1, col=2,
)
place_labels, place_counts = [''], [0]
comb_fig.add_trace(
go.Bar(
name="Place of Death",
x=place_counts,
y=place_labels,
orientation="h",
showlegend=False,
),
row=2,col=1,
)
comb_fig.update_xaxes(range=[0,1])
comb_fig.update_yaxes(range=[0,1])
if no_grids:
comb_fig.update_xaxes(showgrid=False)
comb_fig.update_yaxes(showgrid=False)
if title:
comb_fig.update_layout(title_text=title)
return comb_fig.update_layout(height=height)
# ===========CAUSE OF DEATH PLOTTING LOGIC=========================#
# function to load cod groupings csv file used for cod group plotting.
# each cod is a row, and each column is a possible group.
def load_cod_groupings(data_dir=None, grouping_file="cod_groupings.csv"):
group_data = pd.DataFrame()
if not data_dir:
data_dir = "va_explorer/va_analytics/dash_apps/dashboard_data"
if not grouping_file.endswith(".csv"):
raise AttributeError("Must provide grouping file in csv format")
fname = f"{data_dir}/{grouping_file}"
if os.path.isfile(fname):
group_data = pd.read_csv(fname)
else:
print(f'WARNING: couldnt find {fname}' )
return group_data
# get all vas with cods in a certain cod group
def cod_group_data(va_df, group, cod_groups=pd.DataFrame(), N=10):
va_filtered = pd.DataFrame()
if cod_groups.size == 0:
data_dir = "va_explorer/va_analytics/dash_apps/dashboard_data"
cod_groups = pd.read_csv(f"{data_dir}/cod_groupings.csv")
group = group.lower()
if group == 'neonatal':
age_col = 'ageInYears' if 'ageInYears' in va_df.columns else 'ageinyears'
va_df[age_col] = va_df[age_col].astype(float, errors='ignore')
# definition from 2016 WHO VA form
va_filtered = va_df[va_df[age_col] < 1]
else:
# don't filter if group starts with 'all'
if group.startswith('all'):
top_cods = va_df['cause'].value_counts().sort_values(ascending=False).head(N).index
va_filtered = va_df[va_df["cause"].isin(set(top_cods))]
elif group in cod_groups.columns:
cod_group = cod_groups.loc[cod_groups[group]==1, 'cod']
va_filtered = va_df[va_df['cause'].isin(cod_group)]
return va_filtered
# turn two columns from a va dataframe into a pivot table
def get_pivot_counts(va_df, index_col, factor_col):
if factor_col.lower() in ['all', 'overall']:
counts = va_df.groupby(index_col).count().iloc[:,[1]]
counts.columns = ['overall']
else:
if factor_col not in va_df.columns:
factor_col = LOOKUP["demo_to_col"][factor_col]
assert factor_col in va_df.columns and index_col in va_df.columns
counts = va_df.pivot_table(
index=index_col,
columns=factor_col,
values="id",
aggfunc=pd.Series.nunique,
fill_value=0,
margins=True,
)
return counts
def cod_group_plot(va_df, cod_groups=[], demographic="overall", N=10, height=None, vertical_spacing=.15,\
chosen_cod="all"):
figure = go.Figure()
# if no demographic chosen (i.e. overall), color-code by cause-of-death group. Otherwise, color-code by demographic
demographic = demographic.lower()
if demographic in ['overall', 'all']:
color_keys = cod_groups
else:
demo_column = LOOKUP["demo_to_col"].get(demographic, demographic)
color_keys = va_df[demo_column].unique().tolist()
color_dict = {color_key: LOOKUP["color_list"][i] for i, color_key in enumerate(color_keys)}
# build subplots incrementally and store them in a list of dictionaries
subplots = []
legend_names = set()
if va_df.size > 0:
# create 1 subplot per group and store data
for i, cod_group in enumerate(cod_groups):
# default values for group's height and traces.
group_height, group_traces = 1, []
# filter va data down to only group of interest
cod_data = cod_group_data(va_df, cod_group, N=N)
# only proceed if any group data
if cod_data.size > 0:
# get demographic pivot and remove row total (not a COD)
cod_pivot = get_pivot_counts(cod_data, "cause", demographic).query("cause != 'All'")
# get top N by total count (last column of pivot table) and flip order for plotting
cod_pivot = cod_pivot.sort_values(by=cod_pivot.columns[-1], ascending=False).head(N).iloc[::-1]
cod_pivot.index = [LOOKUP["display_names"].get(x,x) for x in cod_pivot.index]
cod_pivot["cod"] = cod_pivot.index
demo_groups = sorted(list(cod_pivot.columns.difference(['All', 'cod'])))
# set relative group height to at most N
group_height = cod_pivot.shape[0]
if cod_group.lower().startswith("all"):
group_title = "Top CODs Overall"
else:
group_title = group_title = "Top <b>{}</b> CODs".format(LOOKUP['display_names'].get(cod_group, cod_group.capitalize()))
# make a subplot trace for each demographic
for j, demo_group in enumerate(demo_groups):
if demographic not in ['overall', 'all']:
group_key = demo_group
denominators = cod_pivot[demo_groups].sum(axis=1)
else:
group_key = cod_group
denominators = cod_pivot[demo_group].sum()
counts = cod_pivot.loc[cod_pivot[demo_group] > 0, ["cod", demo_group]]
counts = (counts[counts["cod"] != "All"])#.sort_values(by=demo_group, ascending=True))
counts[f"{demo_group}_pct"] = np.round(100 * counts[demo_group] / denominators, 2)
counts["text"] = f"<i>{demo_group}</i><br>" +\
counts[demo_group].astype(str) +\
" (" + counts[f"{demo_group}_pct"].astype(str) + " %)"
lines = [LOOKUP["line_colors"]["secondary"]] * counts.shape[0]
widths = np.repeat(1, len(counts['cod']))
# if a specific cod is chosen from global dropdown, highlight it if present in group CODs
if chosen_cod != "all":
chosen_cod = LOOKUP["display_names"].get(chosen_cod, chosen_cod)
# only highlight if chosen_cod present
if chosen_cod in counts.index:
chosen_idx = counts.index.get_loc(chosen_cod)
lines[chosen_idx] = "#e0b816"
widths[chosen_idx] = 3
counts["cod"][chosen_idx] = "<b>" + counts["cod"][chosen_idx] + "</b>"
# create traces (for both counts and %s) and add it to group's traces
for k, trace_type in enumerate([demo_group, f"{demo_group}_pct"]):
show_legend = (group_key not in legend_names and k==0)
group_legend_name = LOOKUP["display_names"].get(group_key, group_key.capitalize())
trace = go.Bar(
x=counts[trace_type],
y=counts["cod"],
text=counts["text"],
name=group_legend_name,
visible=(k==0), # only show counts first
showlegend=show_legend,
orientation="h",
hovertemplate = "<b>%{y}</b> <br>%{text}<extra></extra>",
marker=dict(
color= color_dict[group_key],
line=dict(color=lines, width=widths)
),
)
group_traces.insert(0, trace)
legend_names.add(group_key)
else:
group_title = f"No Observed <b>{cod_group.capitalize()}</b> CODs"
# store all data needed to create group subplot in a single dictionary
subplot = {"group": cod_group, "title": group_title, "data": group_traces, "height": group_height}
subplots.insert(0, subplot)
# sort subplots by height (i.e. amount of data)
subplots = sorted(subplots, key=lambda subplot: subplot["height"])
## Step 2: Combine info across groups to form plot layout, data, and annotation variables
data, axes, title_annotations = [], {}, []
y_min, y_max = 0,0
total_height = sum(subplot["height"] for subplot in subplots)
# total area to allocate for subplots, accounting for title spaces
total_available_area = 1 - vertical_spacing * len(subplots)
for i, subplot in enumerate(subplots):
# create axes
axis_idx = "" if i == 0 else i+1
xaxis_i = {"domain": [0,1], "anchor": f"y{axis_idx}"}
subplot_height = total_available_area * subplot["height"] / total_height
# update upper limit of y range to reflect subplot height
y_max = np.round(y_max + subplot_height, 3)
yaxis_i = {"domain": [y_min, y_max], "anchor": f"x{axis_idx}"}
# add new axes to axes dict
axes[f"xaxis{axis_idx}"] = xaxis_i
axes[f"yaxis{axis_idx}"] = yaxis_i
# add each trace to data list
for trace in subplot["data"]:
# add axes references to current trace
trace["xaxis"] = f"x{axis_idx}"
trace["yaxis"] = f"y{axis_idx}"
data.insert(0, trace)
# make annotation for group title and add to plot annotations
title_annotation = {'text': subplot["title"], 'font': {'size': 16}, 'showarrow': False,
'x': 0.5, 'xref': 'paper','y': y_max, 'yanchor': 'bottom','yref': 'paper'}
title_annotations.insert(0, title_annotation)
# add spacing for title to y_max
y_max += vertical_spacing
# move y_min forward for next plot
y_min = y_max
# create figure with necessary data and axes
figure = go.Figure(data=data, layout=axes)
# add titles and other finishing touches to plot
figure.update_layout(annotations=title_annotations,
barmode="stack",
height=height,
legend=dict(traceorder='normal', yanchor="top", y=1-vertical_spacing),
margin={'t':30})
# add count/percent buttons
buttons = create_percent_count_buttons(num_groups=int(len(figure.data)/2), y=1, traces=figure.data)
figure.update_layout(updatemenus=[buttons])
else:
figure.update_xaxes(range=[0,1])
figure.update_yaxes(range=[0,1])
figure.update_layout(title_text="No Data for Selected Criteria")
return figure
# plot top N causes of death in va_data either overall or by factor/demographic
def cause_of_death_plot(va_df, factor, N=10, chosen_cod="all", title=None, height=None):
figure, factor, factor_title = go.Figure(), factor.lower(), "Overall"
if factor not in ["all", "overall"]:
factor_title = "by " + factor.capitalize()
plot_title = "Top Causes of Death {}".format(factor_title) if not title else title
# get cause counts by chosen factor (by default, overall counts)
counts = get_pivot_counts(va_df, "cause", factor).rename(columns={"overall": "All"})
if va_df.size > 0:
# make index labels pretty
counts.index = [LOOKUP["display_names"].get(x, x) for x in counts.index]
counts["cod"] = counts.index
counts = (counts[counts["cod"] != "All"].sort_values(by="All", ascending=False).head(N))
groups = list(set(counts.columns).difference(set(["cod"])))
lines = [LOOKUP["line_colors"]["secondary"]] * counts.shape[0]
widths = np.repeat(1, len(counts['cod']))
if chosen_cod != "all":
chosen_cod = LOOKUP["display_names"].get(chosen_cod, chosen_cod.capitalize())
if chosen_cod in counts.index:
chosen_idx = counts.index.get_loc(chosen_cod)
lines[chosen_idx] = "#e0b816"
widths[chosen_idx] = 4
counts["cod"][chosen_idx] = "<b>" + counts["cod"][chosen_idx] + "</b>"
if factor not in ["all", "overall"]:
groups.remove("All")
for i, group in enumerate(groups):
if factor in ["all", "overall"]:
# calculate percent as % of all cods (column-wise calculation)
counts[f"{group}_pct"] = np.round(100 * counts[group] / counts[group].sum(), 1)
else:
# calculate percent as % across groups for specific cod (row-wise calculation)
counts[f"{group}_pct"] = counts.apply(lambda row: np.round(100 * row[group] / row[groups].sum(), 1), axis=1)
counts["text"] = f"<i>{group}</i><br>" + counts[group].astype(str) + " (" + counts[f"{group}_pct"].astype(str) + " %)"
# add traces for counts and percents to enable toggling
for j, trace_type in enumerate([group, f"{group}_pct"]):
figure.add_trace(
go.Bar(
x=counts[trace_type],
y=counts["cod"],
text=counts["text"],
name=group.capitalize(),
orientation="h",
visible = (j==0),
hovertemplate = "<b>%{x}</b> <br>%{text}<extra></extra>",
marker=dict(
color= LOOKUP["color_list"][i],
line=dict(color=lines, width=widths),
),
)
)
figure.update_layout(
barmode="stack",
title_text=plot_title,
#xaxis_tickangle=-45,
xaxis_title="Count",
height=height,
updatemenus=[
create_percent_count_buttons(num_groups=len(groups))
])
else:
cod_labels, cod_counts = [''], []
# counts
figure.add_trace(
go.Bar(
y=cod_counts,
x=cod_labels,
text=cod_labels,
name="no data",
orientation="v",
)
)
figure.update_xaxes(range=[0,1])
figure.update_yaxes(range=[0,1])
figure.update_layout(
barmode="stack",
title_text="No Data for Selected Criteria",
xaxis_tickangle=-45,
yaxis_title="Count",
)
return figure
# create toggle to switch between counts and percents for barcharts
def create_percent_count_buttons(num_groups, x=1, y=1.2, traces=None):
buttons = dict(
type="buttons",
direction="right",
x=x, y=y,
active=0,
buttons=list([
dict(label="Counts",
method="update",
args=[{"visible": [True, False] * num_groups}
]),
dict(label="Percents",
method="update",
args = [{"visible": [False, True] * num_groups},
]),
]))
# if traces provided, add logic to figure out which traces to show in legend when toggling between counts and percents
if traces:
# initial vector of which count traces to display in legend
counts_show_legend = [trace.showlegend for trace in traces]
# for percents, shift values one trace forward so percent equivalents are shown in legend
percents_show_legend = counts_show_legend.copy()
percents_show_legend.insert(0, False)
percents_show_legend.pop(-1)
# add showlegends argument to count button update function
buttons['buttons'][0]['args'][0]["showlegend"] = counts_show_legend
# add showlegends argument to count percent update function
buttons['buttons'][1]['args'][0]["showlegend"] = percents_show_legend
return buttons
# ========TREND/TIMESERIES PLOT LOGI======================#
def va_trend_plot(va_df, group_period, factor="All", title=None, search_term_ids=None, height=None):
#figure = go.Figure()
group_period = group_period.lower()
aggregate_title = group_period.capitalize()
factor = factor.lower()
plot_fn = go.Scatter
if va_df.size > 0:
va_df["date"] = pd.to_datetime(va_df["date"])
va_df["timegroup"] = pd.to_datetime(va_df["date"])
if group_period == "week":
va_df["timegroup"] = pd.to_datetime(
va_df["date"].dt.to_period("W").apply(lambda x: x.strftime("%Y-%m-%d"))
)
elif group_period == "month":
va_df["timegroup"] = pd.to_datetime(
va_df["date"].dt.to_period("M").apply(lambda x: x.strftime("%Y-%m"))
)
elif group_period == "year":
va_df["timegroup"] = va_df["date"].dt.to_period("Y").astype(str)
if not search_term_ids:
search_term_ids = {"All CODs": va_df.index.tolist()}
# build and store traces for later
subplots = []
legend_names = set()
for term, ids in search_term_ids.items():
subplot_traces = []
trace_df = va_df.loc[ids,:]
# build pivot based on filtered data
if factor not in ["all", "overall"]:
assert factor in LOOKUP["demo_to_col"]
factor_col = LOOKUP["demo_to_col"][factor]
trend_counts = trace_df.pivot_table(
index="timegroup",
columns=factor_col,
values="id",
aggfunc=pd.Series.nunique,
fill_value=0,
margins=False,
)
else:
trend_counts = (
trace_df[["timegroup", "id"]]
.groupby("timegroup")
.count()
.rename(columns={"id": "all"})
)
# iterate through groups and add their traces to the plot
for i, demo_group in enumerate(trend_counts.columns.tolist()):
demo_group_name = LOOKUP["display_names"].get(demo_group.lower(), demo_group.capitalize())
show_legend = (demo_group not in ["all", "overall"]) and (demo_group_name not in legend_names)
trace_data = plot_fn(
y=trend_counts[demo_group],
x=trend_counts.index,
name=demo_group_name,
showlegend=show_legend,
marker=dict(
color=LOOKUP["color_list"][i],
line=dict(color=LOOKUP["color_list"][i], width=1),
),
)
subplot_traces.insert(0, trace_data)
legend_names.add(demo_group_name)
if "." in term:
term_name, term_type = term.split(".")
else:
term_name, term_type = term, ""
term_title = LOOKUP["display_names"].get(term_name, term_name.capitalize())
if term_type == "group":
term_title += " CODs"
subplot_title = f"<b>{term_title}</b> by {aggregate_title}"
subplot = {"group": term, "title": subplot_title, "data": subplot_traces}
subplots.insert(0, subplot)
# make figure with one subplot per search term
figure = make_subplots(
rows=len(search_term_ids.keys()),cols=1,
specs=[[{"type": "scatter"}]] * len(search_term_ids.keys()),
subplot_titles=[s["title"] for s in subplots],
vertical_spacing=0.15,
)
# add subplot data to figure one at a time
for i, subplot in enumerate(subplots):
for trace in subplot["data"]:
figure.add_trace(trace, row=(i+1), col=1)
else:
figure.update_xaxes(range=[0,1])
figure.update_yaxes(range=[0,1])
figure.update_layout(
title_text="No Data for Selected Criteria",
yaxis_title="Count",
)
figure.update_layout(height=height)
return figure
# load options for VA trends time series
def load_ts_options(va_data, cod_groups=pd.DataFrame()):
if cod_groups.empty:
data_dir = "va_explorer/va_analytics/dash_apps/dashboard_data"
cod_groups = load_cod_groupings(data_dir=data_dir)
# load cod groups
all_options = [(cod_group, "group") for cod_group in cod_groups.columns[2:].tolist()]
# load unique cods in selected data
va_data = pd.DataFrame(va_data)
if va_data.size > 0:
unique_cods = va_data["cause"].unique().tolist()
all_options += [(cod_name, "cod") for cod_name in unique_cods]
# always load all-cause option
all_options.append(("All Causes", "All causes.all"))
return all_options
|
def dogcal (age):
hum = age * 7
return hum
age = int(input("Dog age?"))
print(dogcal(age)) |
# -*- coding: utf-8 -*-
import json
import os
import re
import tempfile
from hashlib import md5
from oslo.config import cfg
from ping import quiet_ping
from dnsdb.constant.constant import ZONE_MAP, VIEW_ZONE, NORMAL_TO_VIEW
from . import commit_on_success
from . import db
from .models import DnsHeader
from .models import DnsRecord
from .models import DnsSerial
from .models import IpPool
from .models import ViewDomainNameState
from .models import ViewIsps
from .models import ViewRecords
from ..library.exception import BadParam
from ..library.log import getLogger
log = getLogger(__name__)
CONF = cfg.CONF
VIEW_TO_CNAME = {NORMAL_TO_VIEW[zone]: cname_zone for zone, cname_zone in ZONE_MAP.iteritems()}
def _make_glbs_cname(domain, abbr):
for zone, cname_zone in VIEW_TO_CNAME.iteritems():
if domain.endswith('.' + zone):
return '%s.%s.%s' % (domain.replace('.' + zone, ''), abbr, cname_zone)
raise BadParam('Domain name format wrong: %s' % domain)
class ZoneRecordDal(object):
@staticmethod
def list_zone_header():
zones = DnsHeader.query.all()
results = [zone.zone_name for zone in zones if not zone.zone_name.endswith('.IN-ADDR.ARPA')]
return sorted(results)
@staticmethod
def list_zone_ttl():
pattern = re.compile(r'\$TTL\s+(\d+)\s?')
zone_ttl = {}
for zone, header in db.session.query(DnsHeader.zone_name, DnsHeader.header_content):
if pattern.search(header) is None:
raise BadParam('Can get ttl of zone %s' % zone, msg_ch=u'无法获取zone的ttl' % zone)
zone_ttl[zone] = pattern.search(header).group(1)
return zone_ttl
@staticmethod
def select_zone(domain):
zones = set([zone.zone_name for zone in DnsSerial.query.all()])
for index in range(1, len(domain.split('.'))):
best_match = domain.split('.', index)[-1]
if best_match in zones:
return best_match
return None
@staticmethod
def get_zone_header(zone_name):
header = DnsHeader.query.filter_by(zone_name=zone_name).first()
if not header:
raise BadParam('not header for zone: %s' % zone_name)
serial = DnsSerial.query.filter_by(zone_name=zone_name).first()
if not serial:
raise BadParam('not serial for zone: %s' % zone_name)
return dict(header=header.header_content, serial_num=serial.serial_num)
@staticmethod
def get_zone_need_update(group_name):
return [item.zone_name for item in (DnsSerial.query.
filter_by(zone_group=group_name).
filter(DnsSerial.update_serial_num < DnsSerial.serial_num))]
@staticmethod
def _get_records_of_view_zone(isp_map):
# These are 'normal' records since they are in DnsRecord, should included by all isp.
cname_ttl = CONF.view.cname_ttl
records = (DnsRecord.query.
filter_by(zone_name=VIEW_ZONE).
order_by(DnsRecord.domain_name, DnsRecord.record).all())
res = {isp: [] for isp in isp_map.keys()}
for record in records:
for isp in isp_map.iterkeys():
res[isp].append({"name": record.domain_name, "record": record.record,
"type": record.record_type, "ttl": record.ttl})
states = ViewDomainNameState.query.order_by(ViewDomainNameState.domain_name).all()
for state in states:
res[state.isp].append(
{"name": state.domain_name, "record": _make_glbs_cname(state.domain_name, isp_map[state.isp]),
"type": 'CNAME', "ttl": cname_ttl})
return res
@staticmethod
def _get_records_of_view_domain(zone, isp_map):
states = ViewDomainNameState.query.all()
records = (ViewRecords.query.
filter_by(zone_name=zone).
order_by(ViewRecords.domain_name, ViewRecords.property).
all())
merge_states = {}
for state in states:
if state.domain_name not in merge_states:
merge_states[state.domain_name] = []
state.enabled_rooms = json.loads(state.enabled_rooms)
merge_states[state.domain_name].append(state)
active_records = []
for record in records:
if record.record_type != 'A' and record.record_type != 'CNAME':
raise BadParam('ViewRecord type error: only [A, CNAME] allow.')
states = merge_states[record.domain_name]
for state in states:
if state.state == 'disabled':
continue
# It's an A record and state record indicates that the A record is being used.
if state.state == 'A' and record.record_type == 'A' and record.property in state.enabled_rooms:
active_records.append({"name": _make_glbs_cname(state.domain_name, isp_map[state.isp]),
"record": record.record, "type": record.record_type, "ttl": record.ttl})
# It's a CNAME record and state record says the CNAME the record is in use.
elif record.record_type == 'CNAME' and str(record.id) == state.state:
active_records.append({"name": _make_glbs_cname(state.domain_name, isp_map[state.isp]),
"record": record.record, "type": record.record_type, "ttl": record.ttl})
return sorted(active_records, key=lambda record: (record['name'], record['record']))
@staticmethod
def _get_records_of_ordinary_zone(zone_name):
records = DnsRecord.query.filter_by(zone_name=zone_name).all()
record_info = []
for record in records:
if record.ttl != 0:
record_info.append(
{"name": record.domain_name, "record": record.record,
"type": record.record_type, 'ttl': record.ttl})
else:
record_info.append({"name": record.domain_name, "record": record.record, "type": record.record_type})
return sorted(record_info, key=lambda x: (x['name'], x['record']))
@staticmethod
def get_zone_records(zone_name):
isp_map = {item.name_in_english: item.abbreviation for item in ViewIsps.query.all()}
if zone_name == VIEW_ZONE:
return ZoneRecordDal._get_records_of_view_zone(isp_map)
elif zone_name in ZONE_MAP.values():
return ZoneRecordDal._get_records_of_view_domain(zone_name, isp_map)
else:
return ZoneRecordDal._get_records_of_ordinary_zone(zone_name)
@staticmethod
@commit_on_success
def update_serial_num(zone_name):
item = DnsSerial.query.filter_by(zone_name=zone_name).first()
if not item:
raise BadParam('No such zone: %s' % zone_name)
serial_num = item.serial_num
item.update_serial_num = serial_num
return serial_num
@staticmethod
def has_no_mx_txt_record(zone, domain_name):
# 是否存在MX TXT记录
zone_header = ZoneRecordDal.get_zone_header(zone)['header']
pattern = r'\s{0}[\s\d]+IN\s+MX|\s{0}[\s\d]+IN\s+TXT'.format(domain_name.replace('.' + zone, ''))
if re.search(pattern, zone_header.header_content):
raise BadParam('%s has mx or txt record in zone: %s' % (domain_name, zone))
@staticmethod
def check_dns_restriction(zone, domain_name, record_type):
"""
1 有TXT/MX/A/CNAME 任意一种记录,都不允许添加cname
2 有cname记录不允许添加a记录
"""
if record_type == 'A' and DnsRecord.query.filter_by(domain_name=domain_name, record_type='CNAME').first():
raise BadParam('%s has CNAME record.' % domain_name, msg_ch=u'域名已有CNAME记录')
if record_type == 'CNAME':
if DnsRecord.query.filter(domain_name=domain_name).first():
raise BadParam('%s has %s record.' % domain_name, msg_ch=u'域名只能有一条CNAME记录')
ZoneRecordDal.has_no_mx_txt_record(zone, domain_name)
@staticmethod
def check_zone_syntax(zone_name, header_content):
'''
named-checkzone zone zone-file
这个工具检查的时候在非master主机如果zone名与里面ns记录域名相同,检查失败,因为头文件没有相应ns的A记录。
所以检查的时候把zone名加上check-前缀
'''
tmp_file = os.path.join(CONF.tmp_dir, zone_name)
with open(tmp_file, 'w') as f:
f.write(header_content)
if CONF.etc.env != 'dev':
err_file = tempfile.mktemp(prefix='err_', dir='/tmp')
if os.system("named-checkzone -k fail %s %s >%s 2>&1" % (
zone_name, tmp_file, err_file)) != 0:
with open(err_file) as f:
error_log = f.read()
raise BadParam('Check header failed:%s' % error_log)
@staticmethod
def check_zone_header(zone_name, header_content):
headers = DnsHeader.query.filter_by(zone_name=zone_name).all()
if len(headers) != 1:
raise BadParam('No this zone header: %s' % zone_name, msg_ch=u'没有相关记录')
zone_name = zone_name.strip()
if 'pre_serial' not in header_content:
raise BadParam('check header failed: no pre_serial', msg_ch=u'文件中必须包含pre_serial,用于serial占位')
if not header_content.endswith('\n'):
raise BadParam('check header failed: end line must be line break', msg_ch=u'文件末尾需要有空行')
header_content = header_content.replace('pre_serial', '1')
ZoneRecordDal.check_zone_syntax(zone_name, header_content)
# 检查里面的MX和TXT记录是否有对应的cname记录
pattern = r'(?<=[\s;])(([\w-]+\.)*[-@\w]+)(\s+\d+)?\s+IN\s+(MX|TXT)'
if re.search(r'\sIN\s(MX|TXT)\s', header_content):
domains = set(
['{}.{}'.format(domain.group(1), zone_name) for domain in re.finditer(pattern, header_content)]
)
records = DnsRecord.query.filter(DnsRecord.domain_name.in_(domains)).filter(
DnsRecord.record_type == 'CNAME').all()
if records:
conflict_domain = [dns.name for dns in records]
raise BadParam('%s already has cname record, can not add MX or TXT record.' % conflict_domain,
msg_ch=u'域名%s已有CNAME记录,不能有MX/TXT记录')
@staticmethod
@commit_on_success
def increase_serial_num(zone_name):
serials = DnsSerial.query.filter_by(zone_name=zone_name).all()
if len(serials) != 1:
raise BadParam('Zone serial should be unique: %s' % zone_name, msg_ch=u'zone serial记录不存在或者不唯一')
serial = serials[0]
serial.serial_num += 1
return serial.serial_num
@staticmethod
def update_zone_header(zone_name, header_content):
ZoneRecordDal.check_zone_header(zone_name, header_content)
old_header = DnsHeader.query.filter_by(zone_name=zone_name).first().header_content
if md5(header_content).hexdigest() == md5(old_header).hexdigest():
raise BadParam('No change for this header: %s' % zone_name, msg_ch=u'内容没有变化')
# 更新数据库中的header
with db.session.begin(subtransactions=True):
headers = DnsHeader.query.filter_by(zone_name=zone_name).all()
if len(headers) != 1:
raise BadParam('Zone header should be unique: %s' % zone_name, msg_ch=u'header记录不存在或者不唯一')
DnsHeader.query.filter_by(zone_name=zone_name).update({
'header_content': header_content
})
serial_num = ZoneRecordDal.increase_serial_num(zone_name)
return serial_num
@staticmethod
@commit_on_success
def add_record(domain_name, record, record_type, ttl, username):
zone = ZoneRecordDal.select_zone(domain_name)
ZoneRecordDal.check_dns_restriction(zone, domain_name, record_type)
# 保证域名唯一
if DnsRecord.query.filter_by(domain_name=domain_name, record=record).first():
raise BadParam("Domain name has already exists which repells this new record.", msg_ch=u'记录已存在')
other_record = DnsRecord.query.filter_by(domain_name=domain_name).first()
# 如果有其他记录 ttl默认参数与其他记录保持一致
if other_record:
if ttl == 0:
ttl = other_record.ttl
insert_record = DnsRecord(domain_name=domain_name, record=record,
zone_name=zone, update_user=username,
record_type=record_type, ttl=ttl)
db.session.add(insert_record)
# 更新zone
return ZoneRecordDal.increase_serial_num(zone)
@staticmethod
@commit_on_success
def auto_add_record(domain_name, region, username):
zone = ZoneRecordDal.select_zone(domain_name)
# Select an unsed ip if @domain name has no records existed.
records = IpPool.query.outerjoin(DnsRecord, DnsRecord.record == IpPool.fixed_ip).add_columns(
IpPool.fixed_ip,
DnsRecord.record).filter(IpPool.region == region, DnsRecord.record.is_(None),
IpPool.allocated.is_(True)).order_by(IpPool.fixed_ip)
for item in records:
ip = item.fixed_ip
# By sending 8 icmp packets with 64 bytes to this ip within 0.1 second,
# we can probably make sure if an ip is alive.
# If this ip does not answer any pings in 0.2 second, it will be presumed to be unused.
if CONF.etc.env != 'dev' and quiet_ping(ip, 0.1, 8, 64)[0] != 100:
IpPool.query.filter_by(fixed_ip=ip).update({'allocated': False})
log.error("%s should have been set allocated=False since it is ping-able." % ip)
continue
with db.session.begin(subtransactions=True):
try:
iprecord = IpPool.query.filter_by(fixed_ip=ip).with_for_update(nowait=True, of=IpPool)
except Exception:
log.error("%s has been locked by other process" % ip)
continue
if DnsRecord.query.filter_by(record=ip).first():
continue
insert_record = DnsRecord(domain_name=domain_name, record=ip,
zone_name=zone, update_user=username,
record_type='A')
db.session.add(insert_record)
return ZoneRecordDal.increase_serial_num(zone)
else:
raise BadParam("No unused ip for region:%s." % region, msg_ch=u'没有可用的ip')
@staticmethod
@commit_on_success
def modify_record(domain_name, origin_record, update_dict, username):
zone = ZoneRecordDal.select_zone(domain_name)
if update_dict.get('record_type', None) == 'CNAME':
ZoneRecordDal.has_no_mx_txt_record(zone, domain_name)
records = DnsRecord.query.filter_by(domain_name=domain_name, record=origin_record).all()
if len(records) > 1:
raise BadParam("More than one record,check database!")
if len(records) == 0:
raise BadParam("Can not find this record!")
update_dict.pop('check_record')
update_dict['update_user'] = username
DnsRecord.query.filter_by(domain_name=domain_name, record=origin_record).update(update_dict)
if update_dict.get('record_type') == 'A':
# 保证没有重复的A记录
if DnsRecord.query.filter_by(domain_name=domain_name, record=update_dict['record']).count() > 1:
raise BadParam('Domain %s already have record %s' % (domain_name, update_dict['record']),
msg_ch=u'记录与存在')
# 同一域名的不同记录保持ttl一致
ttl = update_dict.get('ttl')
if ttl is not None:
DnsRecord.query.filter_by(domain_name=domain_name).update({'ttl': ttl})
# 如果是修改为CNAME,删除所有A记录
if update_dict.get('record_type') == 'CNAME':
DnsRecord.query.filter_by(domain_name=domain_name, record_type='A').delete()
return ZoneRecordDal.increase_serial_num(zone)
@staticmethod
@commit_on_success
def delete_record(domain_name, record, record_type):
dns_records = DnsRecord.query.filter_by(domain_name=domain_name, record=record, record_type=record_type).all()
if len(dns_records) == 0:
raise BadParam("No such a record:[domain name:%s, record:%s, type:%s]" %
(domain_name, record, record_type), msg_ch=u'记录不存在')
zone = dns_records[0].zone_name
DnsRecord.query.filter_by(domain_name=domain_name, record=record, record_type=record_type).delete()
return ZoneRecordDal.increase_serial_num(zone)
@staticmethod
def get_domain_records(**kwargs):
return [item.json_serialize() for item in DnsRecord.query.filter_by(**kwargs)]
@staticmethod
def search_domain_records(field, pattern):
return [item.json_serialize() for item in DnsRecord.query.filter(getattr(DnsRecord, field).like(pattern))]
|
from django.conf import settings
from django.contrib.auth.backends import ModelBackend as BaseModelBackend
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_model
class ModelBackend(BaseModelBackend):
def authenticate(self, username=None, password=None):
try:
user = self.user_class.objects.get(username__iexact=username)
if user.check_password(password):
return user
except self.user_class.DoesNotExist:
return None
def get_user(self, user_id):
try:
return self.user_class.objects.get(pk=user_id)
except self.user_class.DoesNotExist:
return None
@property
def user_class(self):
if not hasattr(self, '_user_class'):
self._user_class = get_model(*settings.USER_MODEL.split('.', 2))
if not self._user_class:
raise ImproperlyConfigured('Could not get custom user model')
return self._user_class
|
import copy
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.links import link_acl_list
from mayan.apps.documents.permissions import permission_document_view
from mayan.apps.navigation.classes import Link
from mayan.apps.navigation.utils import get_cascade_condition
from .icons import (
icon_review_list, icon_review_create, icon_review_edit, icon_review_delete, icon_candidate_create,
icon_review_view, icon_candidate_list, icon_candidate_edit, icon_candidate_delete
)
from .permissions import (
permission_review_create, permission_review_view, permission_candidate_create, permission_candidate_view
)
# Review Links
link_review_create = Link(
icon=icon_review_create, permissions=(permission_review_create,),
text=_('Create Review'), view='reviews:review_create'
)
link_review_list = Link(
condition=get_cascade_condition(
app_label='reviews', model_name='ReviewForm',
object_permission=permission_review_view,
), icon=icon_review_list,
text=_('All Reviews'), view='reviews:review_list'
)
link_review_delete = Link(
args='object.pk', icon=icon_review_delete,
tags='dangerous', text=_('Delete'),
view='reviews:review_delete'
)
link_review_view = Link(
args='object.pk', icon=icon_review_view,
text=_('Details'), view='reviews:review_detail'
)
link_review_edit = Link(
args='object.pk', icon=icon_review_edit,
text=_('Edit'), view='reviews:review_edit'
)
# Candidate Links
link_candidate_create = Link(
icon=icon_candidate_create, permissions=(permission_candidate_create,),
text=_('Create Candidate'), view='reviews:candidate_create'
)
link_candidate_list = Link(
condition=get_cascade_condition(
app_label='reviews', model_name='Candidate',
object_permission=permission_candidate_view,
), icon=icon_candidate_list,
text=_('All Candidates'), view='reviews:candidate_list'
)
link_candidate_edit = Link(
args='object.pk', icon=icon_candidate_edit,
text=_('Edit'), view='reviews:candidate_edit'
)
link_candidate_delete = Link(
args='object.pk', icon=icon_candidate_delete,
tags='dangerous', text=_('Delete'),
view='reviews:candidate_delete'
)
link_candidate_review_list = Link(
args='object.id', icon=icon_review_list,
text=_('Reviews'), view='reviews:candidate_review_list'
) |
import sys
from app import app
# Init httplib2
import httplib2
h = httplib2.Http(".cache")
# HTTP GET /vin:Winery
url = "%svin:Winery" % app.REST_API_URL
print "GET %s" % url
resp, content = h.request(url, "GET")
print content
# Benchmark
from time import time
start = time()
count = 80
for i in range(count):
resp, content = h.request(url, "GET")
sys.stdout.write(".")
end = time()
print("")
print("BENCHMARK")
print("HTTP GET: %s requests / sec" % (count/(end-start)))
|
import sugartensor as tf
__author__ = 'namju.kim@kakaobrain.com'
#
# hyper parameters
#
latent_dim = 400 # hidden layer dimension
num_blocks = 3 # dilated blocks
# residual block
@tf.sg_sugar_func
def sg_res_block(tensor, opt):
# default rate
opt += tf.sg_opt(size=3, rate=1, causal=False, is_first=False)
# input dimension
in_dim = tensor.get_shape().as_list()[-1]
with tf.sg_context(name='block_%d_%d' % (opt.block, opt.rate)):
# reduce dimension
input_ = (tensor
.sg_bypass(act='relu', ln=(not opt.is_first), name='bypass') # do not
.sg_conv1d(size=1, dim=in_dim/2, act='relu', ln=True, name='conv_in'))
# 1xk conv dilated
out = (input_
.sg_aconv1d(size=opt.size, rate=opt.rate, causal=opt.causal, act='relu', ln=True, name='aconv'))
# dimension recover and residual connection
out = out.sg_conv1d(size=1, dim=in_dim, name='conv_out') + tensor
return out
# inject residual multiplicative block
tf.sg_inject_func(sg_res_block)
#
# encode graph ( atrous convolution )
#
def encode(x):
with tf.sg_context(name='encoder'):
res = x
# loop dilated conv block
for i in range(num_blocks):
res = (res
.sg_res_block(size=5, block=i, rate=1, is_first=True)
.sg_res_block(size=5, block=i, rate=2)
.sg_res_block(size=5, block=i, rate=4)
.sg_res_block(size=5, block=i, rate=8)
.sg_res_block(size=5, block=i, rate=16))
return res
#
# decode graph ( causal convolution )
#
def decode(x, voca_size):
with tf.sg_context(name='decoder'):
res = x
# loop dilated causal conv block
for i in range(num_blocks):
res = (res
.sg_res_block(size=3, block=i, rate=1, causal=True, is_first=True)
.sg_res_block(size=3, block=i, rate=2, causal=True)
.sg_res_block(size=3, block=i, rate=4, causal=True)
.sg_res_block(size=3, block=i, rate=8, causal=True)
.sg_res_block(size=3, block=i, rate=16, causal=True))
# final fully convolution layer for softmax
res = res.sg_conv1d(size=1, dim=voca_size, name='conv_final')
return res
|
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use("ggplot")
iris = datasets.load_iris() # loads as a Bunch, similar to a dictionary
X = iris.data # neat way to drop target: X = df.drop('target', axis=1).values
y = iris.target
df = pd.DataFrame(X, columns=iris.feature_names)
scatter_matrix = pd.plotting.scatter_matrix(
df, c=y, figsize=[8, 8], s=80, marker="D"
) # c = color, s=market size
knn = KNeighborsClassifier(n_neighbors=6)
assert (
iris["data"].shape[0] == iris["target"].shape[0]
) # same number of observations in features and target
clf = knn.fit(
iris["data"], iris["target"]
) # features must be continuous not categorical
X_new = np.array(
[
[5.6, 2.8, 3.9, 1.1],
[5.7, 2.6, 3.8, 1.3],
[4.7, 3.2, 1.3, 0.3],
]
) # 3 new observations, each with 4 features
assert (
iris["data"].shape[1] == X_new.shape[1]
) # same number of features trained on and in new
y_pred = knn.predict(X_new)
# better approach is to test and split the data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=23, stratify=y
)
# stratified sampling aims at splitting a data set so that each split is similar with respect to something
# i.e. the labels are distributed in train and test as they are in the original dataset
# y is the array containing the labels
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
knn.score(X_test, y_test)
|
"""Simulation of random streams of data.
This module defines:
- a generator object `data` modeling an infinite stream of integers
- a function `make_finite_stream()` that creates finite streams of data
The probability distribution underlying the integers is Gaussian-like with a
mean of 42 and a standard deviation of 8. The left tail of the distribution is
cut off meaning that the streams only produce non-negative numbers. Further,
one in a hundred random numbers has an increased chance to be an outlier.
"""
import itertools as _itertools
import random as _random
_random.seed(87)
def _infinite_stream():
"""Internal generator function to simulate an infinite stream of data."""
while True:
number = max(0, int(_random.gauss(42, 8)))
if _random.randint(1, 100) == 1:
number *= 2
yield number
def make_finite_stream(min_=5, max_=15):
"""Simulate a finite stream of data.
The returned stream is finite, but the number of elements to be produced
by it is still random. This default behavior may be turned off by passing
in `min_` and `max_` arguments with `min_ == max_`.
Args:
min_ (optional, int): minimum numbers in the stream; defaults to 5
max_ (optional, int): maximum numbers in the stream; defaults to 15
Returns:
finite_stream (generator)
Raises:
ValueError: if max_ < min_
"""
stream = _infinite_stream()
n = _random.randint(min_, max_)
yield from _itertools.islice(stream, n)
data = _infinite_stream()
|
# Generated from /Users/nhphung/Documents/fromSamsungLaptop/Monhoc/KS-NNLT/Materials/Assignments/MC/MC1-Python/Assignment2/upload/src/main/mc/parser/MC.g4 by ANTLR 4.7.1
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
from lexererr import *
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\20")
buf.write("N\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3\3\3\3\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\5\6\5/\n\5\r\5\16\5\60\3\6\6\6\64")
buf.write("\n\6\r\6\16\6\65\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13")
buf.write("\3\13\3\f\6\fC\n\f\r\f\16\fD\3\f\3\f\3\r\3\r\3\16\3\16")
buf.write("\3\17\3\17\2\2\20\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23")
buf.write("\13\25\f\27\r\31\16\33\17\35\20\3\2\5\4\2C\\c|\3\2\62")
buf.write(";\5\2\13\f\17\17\"\"\2P\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3")
buf.write("\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2")
buf.write("\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2")
buf.write("\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\3\37\3\2\2\2\5")
buf.write("$\3\2\2\2\7(\3\2\2\2\t.\3\2\2\2\13\63\3\2\2\2\r\67\3\2")
buf.write("\2\2\179\3\2\2\2\21;\3\2\2\2\23=\3\2\2\2\25?\3\2\2\2\27")
buf.write("B\3\2\2\2\31H\3\2\2\2\33J\3\2\2\2\35L\3\2\2\2\37 \7o\2")
buf.write("\2 !\7c\2\2!\"\7k\2\2\"#\7p\2\2#\4\3\2\2\2$%\7k\2\2%&")
buf.write("\7p\2\2&\'\7v\2\2\'\6\3\2\2\2()\7x\2\2)*\7q\2\2*+\7k\2")
buf.write("\2+,\7f\2\2,\b\3\2\2\2-/\t\2\2\2.-\3\2\2\2/\60\3\2\2\2")
buf.write("\60.\3\2\2\2\60\61\3\2\2\2\61\n\3\2\2\2\62\64\t\3\2\2")
buf.write("\63\62\3\2\2\2\64\65\3\2\2\2\65\63\3\2\2\2\65\66\3\2\2")
buf.write("\2\66\f\3\2\2\2\678\7*\2\28\16\3\2\2\29:\7+\2\2:\20\3")
buf.write("\2\2\2;<\7}\2\2<\22\3\2\2\2=>\7\177\2\2>\24\3\2\2\2?@")
buf.write("\7=\2\2@\26\3\2\2\2AC\t\4\2\2BA\3\2\2\2CD\3\2\2\2DB\3")
buf.write("\2\2\2DE\3\2\2\2EF\3\2\2\2FG\b\f\2\2G\30\3\2\2\2HI\13")
buf.write("\2\2\2I\32\3\2\2\2JK\13\2\2\2K\34\3\2\2\2LM\13\2\2\2M")
buf.write("\36\3\2\2\2\6\2\60\65D\3\b\2\2")
return buf.getvalue()
class MCLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
INTTYPE = 2
VOIDTYPE = 3
ID = 4
INTLIT = 5
LB = 6
RB = 7
LP = 8
RP = 9
SEMI = 10
WS = 11
ERROR_CHAR = 12
UNCLOSE_STRING = 13
ILLEGAL_ESCAPE = 14
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'main'", "'int'", "'void'", "'('", "')'", "'{'", "'}'", "';'" ]
symbolicNames = [ "<INVALID>",
"INTTYPE", "VOIDTYPE", "ID", "INTLIT", "LB", "RB", "LP", "RP",
"SEMI", "WS", "ERROR_CHAR", "UNCLOSE_STRING", "ILLEGAL_ESCAPE" ]
ruleNames = [ "T__0", "INTTYPE", "VOIDTYPE", "ID", "INTLIT", "LB", "RB",
"LP", "RP", "SEMI", "WS", "ERROR_CHAR", "UNCLOSE_STRING",
"ILLEGAL_ESCAPE" ]
grammarFileName = "MC.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
# MIT License
#
# Copyright (c) 2022 Quandela
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import perceval as pcvl
import quandelibc as qc
import thewalrus
import numpy as np
import time
# benchmark inspired from https://the-walrus.readthedocs.io/en/latest/gallery/permanent_tutorial.html
a0 = 300.
anm1 = 2
n = 28
r = (anm1/a0)**(1./(n-1))
nreps = [(int)(a0*(r**((i)))) for i in range(n)]
times_walrus = np.empty(n)
times_qc_1 = np.empty(n)
times_qc_4 = np.empty(n)
times_qc_0 = np.empty(n)
for ind, reps in enumerate(nreps):
#print(ind+1,reps)
matrices = []
for i in range(reps):
size = ind+1
nth = 1
matrices.append(pcvl.Matrix.random_unitary(size))
start_walrus = time.time()
for matrix in matrices:
res = thewalrus.perm(matrix)
end_walrus = time.time()
start_qc_1 = time.time()
for matrix in matrices:
res = qc.permanent_cx(matrix, 2)
end_qc_1 = time.time()
start_qc_4 = time.time()
for matrix in matrices:
res = qc.permanent_cx(matrix, 4)
end_qc_4 = time.time()
start_qc_0 = time.time()
for matrix in matrices:
res = qc.permanent_cx(matrix, 0)
end_qc_0 = time.time()
times_walrus[ind] = (end_walrus - start_walrus)/reps
times_qc_1[ind] = (end_qc_1 - start_qc_1)/reps
times_qc_4[ind] = (end_qc_4 - start_qc_4)/reps
times_qc_0[ind] = (end_qc_0 - start_qc_0)/reps
print(ind+1, times_walrus[ind], times_qc_1[ind], times_qc_4[ind], times_qc_0[ind])
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_formats=['svg']
plt.semilogy(np.arange(1,n+1),times_walrus,"+")
plt.semilogy(np.arange(1,n+1),times_qc_1,"*")
plt.semilogy(np.arange(1,n+1),times_qc_4,"-")
plt.semilogy(np.arange(1,n+1),times_qc_0,"x")
plt.xlabel(r"Matrix size $n$")
|
import math
import datetime
import os
import pandas
import matplotlib.pyplot as plt
import numpy
import random
from unityagents import UnityEnvironment
import torch
import torch.nn
import torch.optim
from collections import deque, namedtuple
import click
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, buffer_size):
self.memory = deque(maxlen=buffer_size)
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self, batch_size: int, device):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=batch_size)
states = torch.from_numpy(numpy.vstack([e.state for e in experiences])).float().to(device)
actions = torch.from_numpy(numpy.vstack([e.action for e in experiences])).long().to(device)
rewards = torch.from_numpy(numpy.vstack([e.reward for e in experiences])).float().to(device)
next_states = torch.from_numpy(numpy.vstack([e.next_state for e in experiences])).float().to(device)
dones = torch.from_numpy(numpy.vstack([e.done for e in experiences]).astype(numpy.uint8)).float().to(device)
return states, actions, rewards, next_states, dones
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class QNet(torch.nn.Module):
""" Deep Q Network approximating the state-action value function """
def __init__(self, input_dim: int, action_no):
super().__init__()
self._net = torch.nn.Sequential(
torch.nn.Linear(input_dim, 96),
torch.nn.ReLU(),
torch.nn.Linear(96, 96),
torch.nn.ReLU(),
torch.nn.Linear(96, action_no)
)
def forward(self, x):
return self._net(x)
class Agent0:
LEARNING_RATE = 0.0005
UPDATE_EVERY = 4
REPLAY_BUFFER_SIZE = 100_000
BATCH_SIZE = 128
GAMMA = 0.99
def __init__(self, state_space_dim: int, no_actions: int, device):
self.state_space_dim = state_space_dim
self.no_actions = no_actions
self.device = device
self.q_net = QNet(self.state_space_dim, self.no_actions)
self.q_net.to(device)
self.optimizer = torch.optim.Adam(self.q_net.parameters(), lr=self.LEARNING_RATE)
self.loss = torch.nn.MSELoss()
self._replay_buffer = ReplayBuffer(self.REPLAY_BUFFER_SIZE)
self.t = 1 # counts the calls to the learn() method
def load_weights(self, file_name: str):
""" Loads the DQN weights from a file and sets the Agent to test mode """
self.q_net.load_state_dict(torch.load(file_name))
self.q_net.eval()
self.t = 1800.0*300.0
def save_weights(self, file_name: str):
""" Save DQN weights to file """
torch.save(self.q_net.state_dict(), file_name)
def epsilon(self):
""" Returns the probability of taking a random action during the training time """
return math.exp(-self.t*0.00002)
def get_action(self, state):
""" Produce an optimal action for a given state """
if random.random() <= self.epsilon():
return random.randint(0, self.no_actions-1)
state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)
self.q_net.eval()
with torch.no_grad():
action_values = self.q_net(state)
self.q_net.train()
return numpy.argmax(action_values.cpu().detach().numpy())
def learn(self, state, action, reward, next_state, done):
self.t += 1
self._replay_buffer.add(state, action, reward, next_state, done)
if self.t % self.UPDATE_EVERY != 0:
return
if len(self._replay_buffer) < self.BATCH_SIZE:
return
states, actions, rewards, next_states, dones = self._replay_buffer.sample(self.BATCH_SIZE, self.device)
# Get max predicted Q values (for next states) from target model
q_targets_next = self.q_net(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
q_targets = rewards + (self.GAMMA * q_targets_next * (1 - dones))
# Get expected Q values from local model
q_expected = self.q_net(states).gather(1, actions)
loss_value = self.loss(q_expected, q_targets)
self.optimizer.zero_grad()
loss_value.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
# self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class UnityEnvWrapper:
""" This class provides gym-like wrapper around the unity environment """
def __init__(self, env_file: str = 'Banana_Linux_NoVis/Banana.x86_64'):
self._env = UnityEnvironment(file_name=env_file)
self._brain_name = self._env.brain_names[0]
self._brain = self._env.brains[self._brain_name]
env_info = self._env.reset(train_mode=True)[self._brain_name]
state = env_info.vector_observations[0]
self.state_space_dim = len(state)
self.action_space_size = self._brain.vector_action_space_size
def reset(self, train_mode: bool = False):
env_info = self._env.reset(train_mode)[self._brain_name]
state = env_info.vector_observations[0]
return state
def step(self, action):
env_info = self._env.step(action)[self._brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
return next_state, reward, done, None
def close(self):
self._env.close()
def train(max_episodes: int):
""" Train the agent using a head-less environment and save the DQN weights when done """
env = UnityEnvWrapper('Banana_Linux_NoVis/Banana.x86_64')
agent = Agent0(env.state_space_dim, env.action_space_size, DEVICE)
data = []
scores = []
for episode in range(1, max_episodes):
state = env.reset(train_mode=True)
score = 0
for step in range(1, 300):
action = agent.get_action(state)
next_state, reward, done, _ = env.step(action)
agent.learn(state, action, reward, next_state, done)
score += reward
state = next_state
if done:
break
scores.append(score)
rolling_average_score = sum(scores[-100:])/min(episode, 100)
data.append([score, rolling_average_score])
print(f'Episode {episode}. Final score {score}. Average score (last 100 episodes) {rolling_average_score}.')
# Save weights and score series
now_str = datetime.datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
os.makedirs('runs', exist_ok=True)
agent.save_weights(f'runs/weights-{now_str}.bin')
# Plot average scores
df = pandas.DataFrame(data=data, index=range(1, max_episodes), columns=['score', 'rolling_avg_score'])
df.to_csv(f'runs/scores-{now_str}.csv')
plt.figure(figsize=(8, 6), dpi=120)
plt.tight_layout()
df['rolling_avg_score'].plot(grid=True, colormap='cubehelix')
plt.savefig(f'runs/scores-{now_str}.png')
def test(weights_file_name: str):
""" Load DQN weights and run the agent """
env = UnityEnvWrapper('Banana_Linux/Banana.x86_64')
agent = Agent0(env.state_space_dim, env.action_space_size, DEVICE)
agent.load_weights(weights_file_name)
state = env.reset(train_mode=False)
score = 0
for step in range(1, 300):
action = agent.get_action(state)
next_state, reward, done, _ = env.step(action)
print(f'Step {step}. Action {action}. Reward {reward}.')
score += reward
state = next_state
if done:
break
print(f'Final score {score}.')
env.close()
@click.group()
@click.version_option()
def cli():
""" deep_banana_eater -- command line interface """
@cli.command('train')
@click.option('--max-episodes', type=click.INT, default=2000)
def train_command(max_episodes: int):
""" Train the agent using a head-less environment and save the DQN weights when done """
train(max_episodes)
@cli.command('test')
@click.option('--load-weights-from', type=click.Path(dir_okay=False, file_okay=True, readable=True, exists=True))
def test_command(load_weights_from: str):
""" Load DQN weights and run the agent """
test(load_weights_from)
if __name__ == '__main__':
cli()
|
from flopy.mbase import Package
class Mt3dPhc(Package):
'''
PHC package class for PHT3D
'''
def __init__(self, model, os=2, temp=25, asbin=0, eps_aqu=0, eps_ph=0,
scr_output=1, cb_offset=0, smse=['pH', 'pe'], mine=[], ie=[],
surf=[], mobkin=[], minkin=[], surfkin=[], imobkin=[],
extension='phc'):
#Call ancestor's init to set self.parent, extension, name and
#unit number
Package.__init__(self, model, extension, 'PHC', 38)
self.os = os
self.temp = temp
self.asbin = asbin
self.eps_aqu = eps_aqu
self.eps_ph = eps_ph
self.scr_output = scr_output
self.cb_offset = cb_offset
self.smse = smse
self.nsmse = len(self.smse)
self.mine = mine
self.nmine = len(self.mine)
self.ie = ie
self.nie = len(self.ie)
self.surf = surf
self.nsurf = len(self.surf)
self.mobkin = mobkin
self.nmobkin = len(self.mobkin)
self.minkin = minkin[0]
self.nminkin = len(self.minkin)
self.minkin_parms = minkin[1]
self.surfkin = surfkin
self.nsurfkin = len(self.surfkin)
self.imobkin = imobkin
self.nimobkin = len(self.imobkin)
self.parent.add_package(self)
return
def __repr__( self ):
return 'PHC package class for PHT3D'
def write_file(self):
# Open file for writing
f_phc = open(self.fn_path, 'w')
f_phc.write('%3d%10f%3d%10f%10f%3d\n' % (self.os, self.temp,
self.asbin, self.eps_aqu,
self.eps_ph, self.scr_output))
f_phc.write('%10f\n' % (self.cb_offset))
f_phc.write('%3d\n' % (self.nsmse))
f_phc.write('%3d\n' % (self.nmine))
f_phc.write('%3d\n' % (self.nie))
f_phc.write('%3d\n' % (self.nsurf))
f_phc.write('%3d%3d%3d%3d\n' % (self.nmobkin, self.nminkin,
self.nsurfkin, self.nimobkin))
for s in self.smse:
f_phc.write('%s\n' % (s))
i = 0
for m in self.minkin:
f_phc.write('%s %d\n' % (m, len(self.minkin_parms[i])))
for n in self.minkin_parms[i]:
f_phc.write('\t%10f\n' % (n))
i = i + 1
f_phc.close()
return
|
from logging import getLogger
import numpy as np
import pandas as pd
logger = getLogger('predict').getChild('BaseDataTranslater')
if 'ConfigReader' not in globals():
from ..ConfigReader import ConfigReader
if 'LikeWrapper' not in globals():
from ..commons.LikeWrapper import LikeWrapper
class BaseDataTranslater(ConfigReader, LikeWrapper):
def __init__(self):
pass
def _calc_raw_data(self):
train_path = self.configs['data']['train_path']
test_path = self.configs['data']['test_path']
delim = self.configs['data'].get('delimiter')
if delim:
train_df = pd.read_csv(train_path, delimiter=delim)
test_df = pd.read_csv(test_path, delimiter=delim)
else:
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
self.pred_df = train_df[self.pred_cols]
self.train_df = train_df.drop(self.pred_cols, axis=1)
self.test_df = test_df
self.raw_pred_df = self.pred_df.copy()
self.raw_train_df = self.train_df.copy()
self.raw_test_df = self.test_df.copy()
return
def get_df_data(self):
output = {
'raw_train_df': self.raw_train_df,
'raw_test_df': self.raw_test_df,
'raw_pred_df': self.raw_pred_df,
'train_df': self.train_df,
'test_df': self.test_df,
'pred_df': self.pred_df,
}
return output
def write_train_data(self):
savename = self.configs['pre'].get('savename')
if not savename:
logger.warning('NO SAVENAME')
return
savename += '.npy'
output_path = self.configs['data']['output_dir']
np.save(
f'{output_path}/feature_columns_{savename}',
self.feature_columns)
np.save(f'{output_path}/train_ids_{savename}', self.train_ids)
np.save(f'{output_path}/test_ids_{savename}', self.test_ids)
np.save(f'{output_path}/X_train_{savename}', self.X_train)
np.save(f'{output_path}/Y_train_{savename}', self.Y_train)
np.save(f'{output_path}/X_test_{savename}', self.X_test)
return savename
def get_train_data(self):
logger.info(f'X_train shape: {self.X_train.shape}')
logger.info(f'Y_train shape: {self.Y_train.shape}')
logger.info(f'X_test shape: {self.X_test.shape}')
output = {
'feature_columns': self.feature_columns,
'train_ids': self.train_ids,
'test_ids': self.test_ids,
'X_train': self.X_train,
'Y_train': self.Y_train,
'X_test': self.X_test,
}
return output
def get_pre_processers(self):
output = {}
if hasattr(self, 'encoding_model'):
output['encoding_model'] = \
self.target_encoding_model
return output
def get_post_processers(self):
output = {}
return output
def _translate_y_pre(self):
if self.configs['pre']['train_mode'] != 'reg':
return
y_pre = self.configs['pre'].get('y_pre')
if not y_pre:
return
logger.info('translate y_train with %s' % y_pre)
if y_pre == 'log':
self.Y_train = np.log(self.Y_train)
else:
logger.error('NOT IMPLEMENTED FIT Y_PRE: %s' % y_pre)
raise Exception('NOT IMPLEMENTED')
return
|
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
from nose import SkipTest
import numpy as np
from numpy.testing import assert_allclose, assert_raises, assert_equal
from scipy.sparse import isspmatrix
from scipy.spatial.distance import cdist, pdist, squareform
from megaman.geometry import (Geometry, compute_adjacency_matrix, Adjacency,
adjacency_methods)
try:
import pyflann as pyf
NO_PYFLANN = False
except ImportError:
NO_PYFLANN = True
def test_adjacency_methods():
assert_equal(set(adjacency_methods()),
{'auto', 'pyflann', 'ball_tree',
'cyflann', 'brute', 'kd_tree'})
def test_adjacency_input_validation():
X = np.random.rand(20, 3)
# need to specify radius or n_neighbors
assert_raises(ValueError, compute_adjacency_matrix, X)
# cannot specify both radius and n_neighbors
assert_raises(ValueError, compute_adjacency_matrix, X,
radius=1, n_neighbors=10)
def test_adjacency():
X = np.random.rand(100, 3)
Gtrue = {}
exact_methods = [m for m in Adjacency.methods()
if not m.endswith('flann')]
def check_kneighbors(n_neighbors, method):
if method == 'pyflann' and NO_PYFLANN:
raise SkipTest("pyflann not installed")
G = compute_adjacency_matrix(X, method=method,
n_neighbors=n_neighbors)
assert isspmatrix(G)
assert G.shape == (X.shape[0], X.shape[0])
if method in exact_methods:
assert_allclose(G.toarray(), Gtrue[n_neighbors].toarray())
def check_radius(radius, method):
if method == 'pyflann' and NO_PYFLANN:
raise SkipTest("pyflann not installed")
G = compute_adjacency_matrix(X, method=method,
radius=radius)
assert isspmatrix(G)
assert G.shape == (X.shape[0], X.shape[0])
if method in exact_methods:
assert_allclose(G.toarray(), Gtrue[radius].toarray())
for n_neighbors in [5, 10, 15]:
Gtrue[n_neighbors] = compute_adjacency_matrix(X, method='brute',
n_neighbors=n_neighbors)
for method in Adjacency.methods():
yield check_kneighbors, n_neighbors, method
for radius in [0.1, 0.5, 1.0]:
Gtrue[radius] = compute_adjacency_matrix(X, method='brute',
radius=radius)
for method in Adjacency.methods():
yield check_radius, radius, method
def test_unknown_method():
X = np.arange(20).reshape((10, 2))
assert_raises(ValueError, compute_adjacency_matrix, X, 'foo')
def test_all_methods_close():
rand = np.random.RandomState(36)
X = rand.randn(10, 2)
D_true = squareform(pdist(X))
D_true[D_true > 0.5] = 0
def check_method(method):
kwargs = {}
if method == 'pyflann':
try:
import pyflann as pyf
except ImportError:
raise SkipTest("pyflann not installed.")
flindex = pyf.FLANN()
flindex.build_index(X, algorithm='kmeans',
target_precision=0.9)
kwargs['flann_index'] = flindex
this_D = compute_adjacency_matrix(X, method=method, radius=0.5,
**kwargs)
assert_allclose(this_D.toarray(), D_true, rtol=1E-5)
for method in ['auto', 'cyflann', 'pyflann', 'brute']:
yield check_method, method
def test_custom_adjacency():
class CustomAdjacency(Adjacency):
name = "custom"
def adjacency_graph(self, X):
return squareform(pdist(X))
rand = np.random.RandomState(42)
X = rand.rand(10, 2)
D = compute_adjacency_matrix(X, method='custom', radius=1)
assert_allclose(D, cdist(X, X))
Adjacency._remove_from_registry("custom")
|
import matplotlib.pyplot as plt
from pkg_resources import resource_filename
from .analysis import model_history_plot, prediction_parity_plot
from .augmentations import (
mixture_animation,
prototypical_spectra_plot,
single_source_animation,
)
from .base import _colorbar, eem_plot
from .preprocessing import (
absorbance_plot,
calibration_curves_plot,
preprocessing_routine_plot,
water_raman_peak_animation,
water_raman_peak_plot,
water_raman_timeseries,
)
pyeem_base_style = resource_filename("pyeem.plots", "pyeem_base.mplstyle")
plt.style.use(pyeem_base_style)
__all__ = [
"eem_plot",
"absorbance_plot",
"water_raman_peak_plot",
"water_raman_peak_animation",
"water_raman_timeseries",
"preprocessing_routine_plot",
"calibration_curves_plot",
"prototypical_spectra_plot",
"single_source_animation",
"mixture_animation",
"model_history_plot",
"prediction_parity_plot",
]
|
import csv
LINES_CSV = 'lines.csv'
MVR_CSV = 'mvr.csv'
OUT_CSV = 'out.csv'
WORD_LEN = 4
INJURY_WORDS = ['injury', 'fatal', 'pi', 'homicide', 'death']
with open(OUT_CSV, 'w', newline='') as out_csvfile:
fieldnames = ['svc_code', 'description', 'augusta_risk_type', 'bodily_injury']
writer = csv.DictWriter(out_csvfile, fieldnames=fieldnames, delimiter='\t')
writer.writeheader()
with open(MVR_CSV, newline='') as mvr_csvfile:
mvr_reader = csv.DictReader(mvr_csvfile, delimiter='\t')
for mvr_row in mvr_reader:
matching_indexes = []
injury_flag = False
with open(LINES_CSV, newline='') as lines_csvfile:
lines_reader = csv.DictReader(lines_csvfile, delimiter='\t')
for lines_row in lines_reader:
lines_words = [
''.join(ch for ch in x if ch.isalnum())
for x in lines_row['line_def'].split(' ')
if len(x) > WORD_LEN
]
if any([word in mvr_row['desc'] for word in lines_words]):
# Add line index to outputs for this row
matching_indexes.append(lines_row['index'])
injury_flag = any([word in mvr_row['desc'] for word in INJURY_WORDS])
if not matching_indexes:
matching_indexes.append('Unknown')
writer.writerow({
'svc_code': mvr_row['svc_code'],
'description': mvr_row['desc'].upper(),
'augusta_risk_type': ', '.join(matching_indexes),
'bodily_injury': injury_flag
})
|
import pytest
import asyncio
from async_generator import asynccontextmanager
import ssz
from p2p.peer import (
MsgBuffer,
)
from eth2.beacon.types.blocks import (
BeaconBlock,
)
from trinity.constants import TO_NETWORKING_BROADCAST_CONFIG
from trinity.protocol.bcc.commands import (
BeaconBlocks,
)
from trinity.protocol.bcc.events import GetBeaconBlocksEvent
from trinity.protocol.bcc.servers import BCCRequestServer
from trinity.protocol.bcc.peer import BCCPeerPoolEventServer
from trinity.tools.bcc_factories import (
BeaconBlockFactory,
BeaconContextFactory,
AsyncBeaconChainDBFactory,
BCCPeerPoolFactory,
BCCPeerPairFactory,
)
from tests.core.integration_test_helpers import (
run_peer_pool_event_server,
)
from eth2.beacon.fork_choice.higher_slot import higher_slot_scoring
from eth2.beacon.state_machines.forks.serenity import SERENITY_CONFIG
@asynccontextmanager
async def get_request_server_setup(request, event_loop, event_bus, chain_db):
genesis = await chain_db.coro_get_canonical_block_by_slot(
SERENITY_CONFIG.GENESIS_SLOT,
BeaconBlock,
)
alice_chain_db = AsyncBeaconChainDBFactory(blocks=(genesis,))
alice_context = BeaconContextFactory(chain_db=alice_chain_db)
bob_context = BeaconContextFactory(chain_db=chain_db)
peer_pair = BCCPeerPairFactory(
alice_peer_context=alice_context,
bob_peer_context=bob_context,
event_bus=event_bus,
)
async with peer_pair as (alice, bob):
async with BCCPeerPoolFactory.run_for_peer(bob) as bob_peer_pool: # noqa: E501
response_buffer = MsgBuffer()
alice.add_subscriber(response_buffer)
async with run_peer_pool_event_server(
event_bus, bob_peer_pool, handler_type=BCCPeerPoolEventServer
):
bob_request_server = BCCRequestServer(
event_bus, TO_NETWORKING_BROADCAST_CONFIG, bob_context.chain_db)
asyncio.ensure_future(bob_request_server.run())
await event_bus.wait_until_all_endpoints_subscribed_to(GetBeaconBlocksEvent)
def finalizer():
event_loop.run_until_complete(bob_request_server.cancel())
request.addfinalizer(finalizer)
yield alice, response_buffer
@pytest.mark.asyncio
async def test_get_single_block_by_slot(request, event_loop, event_bus):
block = BeaconBlockFactory()
chain_db = AsyncBeaconChainDBFactory(blocks=(block,))
async with get_request_server_setup(
request, event_loop, event_bus, chain_db
) as (alice, response_buffer):
alice.sub_proto.send_get_blocks(block.signing_root, 1, request_id=5)
response = await response_buffer.msg_queue.get()
assert isinstance(response.command, BeaconBlocks)
assert response.payload == {
"request_id": 5,
"encoded_blocks": (ssz.encode(block),),
}
@pytest.mark.asyncio
async def test_get_single_block_by_root(request, event_loop, event_bus):
block = BeaconBlockFactory()
chain_db = AsyncBeaconChainDBFactory(blocks=(block,))
async with get_request_server_setup(
request, event_loop, event_bus, chain_db
) as (alice, response_buffer):
alice.sub_proto.send_get_blocks(block.slot, 1, request_id=5)
response = await response_buffer.msg_queue.get()
assert isinstance(response.command, BeaconBlocks)
assert response.payload == {
"request_id": 5,
"encoded_blocks": (ssz.encode(block),),
}
@pytest.mark.asyncio
async def test_get_no_blocks(request, event_loop, event_bus):
block = BeaconBlockFactory()
chain_db = AsyncBeaconChainDBFactory(blocks=(block,))
async with get_request_server_setup(
request, event_loop, event_bus, chain_db
) as (alice, response_buffer):
alice.sub_proto.send_get_blocks(block.slot, 0, request_id=5)
response = await response_buffer.msg_queue.get()
assert isinstance(response.command, BeaconBlocks)
assert response.payload == {
"request_id": 5,
"encoded_blocks": (),
}
@pytest.mark.asyncio
async def test_get_unknown_block_by_slot(request, event_loop, event_bus):
block = BeaconBlockFactory()
chain_db = AsyncBeaconChainDBFactory(blocks=(block,))
async with get_request_server_setup(
request, event_loop, event_bus, chain_db
) as (alice, response_buffer):
alice.sub_proto.send_get_blocks(block.slot + 100, 1, request_id=5)
response = await response_buffer.msg_queue.get()
assert isinstance(response.command, BeaconBlocks)
assert response.payload == {
"request_id": 5,
"encoded_blocks": (),
}
@pytest.mark.asyncio
async def test_get_unknown_block_by_root(request, event_loop, event_bus):
block = BeaconBlockFactory()
chain_db = AsyncBeaconChainDBFactory(blocks=(block,))
async with get_request_server_setup(
request, event_loop, event_bus, chain_db
) as (alice, response_buffer):
alice.sub_proto.send_get_blocks(b"\x00" * 32, 1, request_id=5)
response = await response_buffer.msg_queue.get()
assert isinstance(response.command, BeaconBlocks)
assert response.payload == {
"request_id": 5,
"encoded_blocks": (),
}
@pytest.mark.asyncio
async def test_get_canonical_block_range_by_slot(request, event_loop, event_bus):
chain_db = AsyncBeaconChainDBFactory(blocks=())
genesis = BeaconBlockFactory()
base_branch = BeaconBlockFactory.create_branch(3, root=genesis)
non_canonical_branch = BeaconBlockFactory.create_branch(
3,
root=base_branch[-1],
state_root=b"\x00" * 32,
)
canonical_branch = BeaconBlockFactory.create_branch(
4,
root=base_branch[-1],
state_root=b"\x11" * 32,
)
for branch in [[genesis], base_branch, non_canonical_branch, canonical_branch]:
scorings = (higher_slot_scoring for block in branch)
await chain_db.coro_persist_block_chain(branch, BeaconBlock, scorings)
async with get_request_server_setup(
request, event_loop, event_bus, chain_db
) as (alice, response_buffer):
alice.sub_proto.send_get_blocks(genesis.slot + 2, 4, request_id=5)
response = await response_buffer.msg_queue.get()
assert isinstance(response.command, BeaconBlocks)
assert response.payload["request_id"] == 5
blocks = tuple(
ssz.decode(block, BeaconBlock) for block in response.payload["encoded_blocks"])
assert len(blocks) == 4
assert [block.slot for block in blocks] == [genesis.slot + s for s in [2, 3, 4, 5]]
assert blocks == base_branch[1:] + canonical_branch[:2]
@pytest.mark.asyncio
async def test_get_canonical_block_range_by_root(request, event_loop, event_bus):
chain_db = AsyncBeaconChainDBFactory(blocks=())
genesis = BeaconBlockFactory()
base_branch = BeaconBlockFactory.create_branch(3, root=genesis)
non_canonical_branch = BeaconBlockFactory.create_branch(
3,
root=base_branch[-1],
state_root=b"\x00" * 32,
)
canonical_branch = BeaconBlockFactory.create_branch(
4,
root=base_branch[-1],
state_root=b"\x11" * 32,
)
for branch in [[genesis], base_branch, non_canonical_branch, canonical_branch]:
scorings = (higher_slot_scoring for block in branch)
await chain_db.coro_persist_block_chain(branch, BeaconBlock, scorings)
async with get_request_server_setup(
request, event_loop, event_bus, chain_db
) as (alice, response_buffer):
alice.sub_proto.send_get_blocks(base_branch[1].signing_root, 4, request_id=5)
response = await response_buffer.msg_queue.get()
assert isinstance(response.command, BeaconBlocks)
assert response.payload["request_id"] == 5
blocks = tuple(
ssz.decode(block, BeaconBlock) for block in response.payload["encoded_blocks"])
assert len(blocks) == 4
assert [block.slot for block in blocks] == [genesis.slot + s for s in [2, 3, 4, 5]]
assert blocks == base_branch[1:] + canonical_branch[:2]
@pytest.mark.asyncio
async def test_get_incomplete_canonical_block_range(request, event_loop, event_bus):
chain_db = AsyncBeaconChainDBFactory(blocks=())
genesis = BeaconBlockFactory()
base_branch = BeaconBlockFactory.create_branch(3, root=genesis)
non_canonical_branch = BeaconBlockFactory.create_branch(
3,
root=base_branch[-1],
state_root=b"\x00" * 32,
)
canonical_branch = BeaconBlockFactory.create_branch(
4,
root=base_branch[-1],
state_root=b"\x11" * 32,
)
for branch in [[genesis], base_branch, non_canonical_branch, canonical_branch]:
scorings = (higher_slot_scoring for block in branch)
await chain_db.coro_persist_block_chain(branch, BeaconBlock, scorings)
async with get_request_server_setup(
request, event_loop, event_bus, chain_db
) as (alice, response_buffer):
alice.sub_proto.send_get_blocks(genesis.slot + 3, 10, request_id=5)
response = await response_buffer.msg_queue.get()
assert isinstance(response.command, BeaconBlocks)
assert response.payload["request_id"] == 5
blocks = tuple(
ssz.decode(block, BeaconBlock) for block in response.payload["encoded_blocks"])
assert len(blocks) == 5
assert [block.slot for block in blocks] == [genesis.slot + s for s in [3, 4, 5, 6, 7]]
assert blocks == base_branch[-1:] + canonical_branch
@pytest.mark.asyncio
async def test_get_non_canonical_branch(request, event_loop, event_bus):
chain_db = AsyncBeaconChainDBFactory(blocks=())
genesis = BeaconBlockFactory()
base_branch = BeaconBlockFactory.create_branch(3, root=genesis)
non_canonical_branch = BeaconBlockFactory.create_branch(
3,
root=base_branch[-1],
state_root=b"\x00" * 32,
)
canonical_branch = BeaconBlockFactory.create_branch(
4,
root=base_branch[-1],
state_root=b"\x11" * 32,
)
for branch in [[genesis], base_branch, non_canonical_branch, canonical_branch]:
scorings = (higher_slot_scoring for block in branch)
await chain_db.coro_persist_block_chain(branch, BeaconBlock, scorings)
async with get_request_server_setup(
request, event_loop, event_bus, chain_db
) as (alice, response_buffer):
alice.sub_proto.send_get_blocks(non_canonical_branch[1].signing_root, 3, request_id=5)
response = await response_buffer.msg_queue.get()
assert isinstance(response.command, BeaconBlocks)
assert response.payload["request_id"] == 5
blocks = tuple(
ssz.decode(block, BeaconBlock) for block in response.payload["encoded_blocks"])
assert len(blocks) == 1
assert blocks[0].slot == genesis.slot + 5
assert blocks[0] == non_canonical_branch[1]
|
# Generated by Django 2.0 on 2019-01-31 07:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0050_auto_20190129_0808'),
]
operations = [
migrations.AddField(
model_name='projecttodoitem',
name='personal_list_order',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='projecttodoitem',
name='project_list_order',
field=models.FloatField(null=True),
),
]
|
import threading
class StoppableThread(threading.Thread):
"""
Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition.
"""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop_event = threading.Event()
def stop(self):
self._stop_event.set()
print(f"{type(self).__name__} has stopped")
def stopped(self):
return self._stop_event.is_set()
def ProgressHook(hook):
pass
class YDTLThread(StoppableThread):
def __init__(self, url, options, logger, debug=False):
super(YDTLThread, self).__init__()
self.debug = debug
self.url = url
self.options = options
self.options["progress_hooks"] = ProgressHook
self.logger = logger
def start(self):
pass
|
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from towhee.dataframe.dataframe import DFIterator, DataFrame
from towhee.engine.operator_context import OperatorContext
class Variable:
"""
A Variable can be part of an Operator's inputs or outputs.
"""
def __init__(self, name: str, df: DataFrame, iter: DFIterator, op_ctx: OperatorContext):
"""
Args:
name: the Variable's name
df: the DataFrame this Variable belongs to
iter: the DataFrame's iterator
op_ctx: the OperatorContext this Variable belongs to.
"""
self.name = name
self.df = df
self.iter = iter
self.op_ctx = op_ctx |
ACCESS_KEY = '@@{cred_aws.username}@@'
SECRET_KEY = '@@{cred_aws.secret}@@'
AWS_REGION = '@@{clusters_geolocation}@@'
INSTANCE_ID = '@@{ec2_instance_id}@@'
import boto3
boto3.setup_default_session(
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
region_name=AWS_REGION
)
client = boto3.client('ec2')
try:
# Allocate elatic PublicIp
allocation = client.allocate_address(Domain='vpc')
print("Allocation Id: "+ allocation['AllocationId'] + " Public IP: " + allocation['PublicIp'])
# Associate Elastic IP with an ec2 instance
response = client.associate_address(AllocationId=allocation['AllocationId'],
InstanceId=INSTANCE_ID)
print(response)
except ClientError as e:
print(e)
|
#!/usr/bin/env python3
import re
MASK_RE = re.compile(r'mask\s*=\s*([01X]+)\s*$')
MEM_RE = re.compile(r'mem\[(\d+)\]\s*=\s*(\d+)\s*$')
zero_mask = 0
one_mask = 0
mem = {}
with open('input.txt', 'r') as instructions:
for inst in instructions:
if inst.startswith('mask'):
m = MASK_RE.match(inst)
zero_mask = 0
one_mask = 0
for c in m[1]:
if c == '0':
zero_mask = zero_mask * 2
one_mask = one_mask * 2
elif c == '1':
zero_mask = zero_mask * 2 + 1
one_mask = one_mask * 2 + 1
else:
if c != 'X':
print("Surprised character", c, inst)
zero_mask = zero_mask * 2 + 1
one_mask = one_mask * 2
elif inst.startswith('mem'):
m = MEM_RE.match(inst)
value = int(m[2])
value = value | one_mask
value = value & zero_mask
print(m[2], hex(int(m[2])), hex(value))
mem[int(m[1])] = value
else:
print("UNKNOWN INSTRUCTION", inst)
print(sum(mem.values())) |
import boto3
import botocore
import os
import sys
from urllib.parse import unquote_plus
import urllib.request
import requests
import json
import datetime
from pytz import timezone
from dateutil import parser
from requests.auth import HTTPBasicAuth
try:
requests.packages.urllib3.disable_warnings()
except:
pass
thousandeyes_user = os.environ['THOUSANDEYES_USER']
thousandeyes_token = os.environ['THOUSANDEYES_TOKEN']
tousanndeyes_agentname = 'thousandeyes-va'
if ('THOUSANDEYES_AGENTNAME' in os.environ):
tousanndeyes_agentname = os.environ['THOUSANDEYES_AGENTNAME']
# LINE notify's API
LINE_TOKEN = os.environ['LINE_TOKEN']
LINE_NOTIFY_URL = "https://notify-api.line.me/api/notify"
line_ok_msg = os.environ['LINE_OK_MSG']
line_ng_msg = os.environ['LINE_NG_MSG']
s3_bucket = os.environ['S3_BUCKET']
s3_client = boto3.client('s3')
s3 = boto3.resource('s3')
def send_info(msg):
method = "POST"
headers = {"Authorization": "Bearer %s" % LINE_TOKEN}
payload = {"message": msg}
try:
payload = urllib.parse.urlencode(payload).encode("utf-8")
req = urllib.request.Request(
url=LINE_NOTIFY_URL, data=payload, method=method, headers=headers)
urllib.request.urlopen(req)
except Exception as e:
print ("Exception Error: ", e)
sys.exit(1)
def agent_status():
url = "https://api.thousandeyes.com/v6/agents.json"
# Initialize the requests session
api_session = requests.Session()
response = api_session.request("GET", url, auth=HTTPBasicAuth(thousandeyes_user, thousandeyes_token), verify=False)
# If successfully able to get list of flows
if (response.status_code == 200):
agents = json.loads(response.content)["agents"]
for agent in agents:
#print(json.dumps(flow, indent=4)) # formatted print
if (agent['agentName'] == tousanndeyes_agentname):
print(agent['agentState'])
print(agent['lastSeen'])
lastSeen_JST = parser.parse(agent['lastSeen'] + '+00:00').astimezone(timezone('Asia/Tokyo'))
print(lastSeen_JST)
if (agent['agentState'] == 'Offline'):
print('Offline line_ng_msg: %s' % line_ng_msg)
try:
s3.Object(s3_bucket, 'status_ng').load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
# The object does not exist.
print('send NG message')
s3.Object(s3_bucket, 'status_ng').put()
msg = tousanndeyes_agentname + ' ' + line_ng_msg + ' lastSeen_JST: ' + str(lastSeen_JST)
print(msg)
send_info(msg)
else:
# Something else has gone wrong.
raise
else:
# The object does exist.
print('continue NG')
elif (agent['agentState'] == 'Online'):
print('Online line_ok_msg: %s' % line_ok_msg)
try:
s3.Object(s3_bucket, 'status_ng').load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
# The object does not exist.
print('continue OK')
else:
# Something else has gone wrong.
raise
else:
# The object does exist.
print('send OK message')
s3.Object(s3_bucket, 'status_ng').delete()
msg = tousanndeyes_agentname + ' ' + line_ok_msg + ' lastSeen_JST: ' + str(lastSeen_JST)
print(msg)
send_info(msg)
else:
print("An error has ocurred, while fetching agentlists, with the following code {}".format(response.status_code))
def lambda_handler(event, context):
agent_status()
|
"""
PyRSM is a python package for exoplanets detection which applies the Regime Switching Model (RSM) framework on ADI (and ADI+SDI) sequences (see Dahlqvist et al. A&A, 2020, 633, A95).
The RSM map algorithm relies on one or several PSF subtraction techniques to process one or multiple ADI sequences before computing a final probability map. Considering the large
set of parameters needed for the computation of the RSM detection map (parameters for the selected PSF-subtraction techniques as well as the RSM algorithm itself), a parameter selection framework
called auto-RSM (Dahlqvist et al., 2021 in prep) is proposed to automatically select the optimal parametrization. The proposed multi-step parameter optimization framework can be divided into
three main steps, (i) the selection of the optimal set of parameters for the considered PSF-subtraction techniques, (ii) the optimization of the RSM approach parametrization, and (iii) the
selection of the optimal set of PSF-subtraction techniques and ADI sequences to be considered when generating the final detection map.
The add_cube and add_model functions allows to consider several cubes and models to generate
the cube of residuals used to compute the RSM map. The cube should be provided by the same instrument
or rescaled to a unique pixel size. The class can be used with only ADI and ADI+SDI. A specific PSF should
be provided for each cube. In the case of ADI+SDI a single psf should be provided per cube (typically the PSF
average over the set of frequencies Five different models and two forward model variants are available.
Each model can be parametrized separately. Five different models and two forward model variants are available.
The function like_esti allows the estimation of a cube of likelihoods containing for each pixel
and each frame the likelihood of being in the planetary or the speckle regime. These likelihood cubes
are then used by the probmap_esti function to provide the final probability map based on the RSM framework.
The second set of funtions regroups the four main functions used by the auto-RSM/auto-S/N framework.
The opti_model function allows the optimization of the PSF subtraction techniques parameters based on the
minimisation of the average contrast. The opti_RSM function takes care of the optimization of the parameters
of the RSM framework (all related to the computation of the likelihood associated to every pixels and frames). The
third function RSM_combination, relies on a greedy selection algorithm to define the optimal set of
ADI sequences and PSF-subtraction techniques to consider when generating the final detection map using the RSM
approach. Finally, the opti_map function allows to compute the final RSM detection map. The optimization of
the parameters can be done using the reversed parallactic angles, blurring potential planetary signals while
keeping the main characteristics of the speckle noise. An S/N map based code is also proposed and encompasses
the opti_model, the RSM_combination and the opti_map functions. For the last two functions, the SNR
parameter should be set to True.
Part of the code have been directly inspired by the VIP and PyKLIP packages for the estimation of the cube
of residuas and forward model PSF.
"""
__author__ = 'Carl-Henrik Dahlqvist'
import numpy as np
from scipy.optimize import curve_fit
from skimage.draw import circle
import vip_hci as vip
from vip_hci.var import get_annulus_segments,frame_center
from vip_hci.preproc import frame_crop,cube_derotate,cube_crop_frames,cube_collapse
from vip_hci.metrics import cube_inject_companions, frame_inject_companion, normalize_psf
from hciplot import plot_frames
from vip_hci.conf.utils_conf import pool_map, iterable
from multiprocessing import Pool, RawArray
import photutils
from scipy import stats
import pickle
import multiprocessing as mp
import sklearn.gaussian_process as gp
from scipy.stats import norm
from .utils import (llsg_adisdi,loci_adisdi,do_pca_patch,_decompose_patch,
annular_pca_adisdi,NMF_patch,nmf_adisdi,LOCI_FM,KLIP_patch,perturb,KLIP,
get_time_series,poly_fit,interpolation,remove_outliers,check_delta_sep ,rot_scale)
var_dict = {}
def init_worker(X, X_shape):
var_dict['X'] = X
var_dict['X_shape'] = X_shape
class PyRSM:
def __init__(self,fwhm,minradius,maxradius,pxscale=0.1,ncore=1,max_r_fm=None,opti_mode='full-frame',inv_ang=True,opti_type='Contrast',trunc=None,imlib='opencv', interpolation='lanczos4'):
"""
Initialization of the PyRSM object on which the add_cube and add_model
functions will be applied to parametrize the model. The functions
like_esti and prob_esti will then allow the computation of the
likelihood cubes and the final RSM map respectively.
Parameters
----------
fwhm: int
Full width at half maximum for the instrument PSF
minradius : int
Center radius of the first annulus considered in the RSM probability
map estimation. The radius should be larger than half
the value of the 'crop' parameter
maxradius : int
Center radius of the last annulus considered in the RSM probability
map estimation. The radius should be smaller or equal to half the
size of the image minus half the value of the 'crop' parameter
pxscale : float
Value of the pixel in arcsec/px. Only used for printing plots when
``showplot=True`` in like_esti.
ncore : int, optional
Number of processes for parallel computing. By default ('ncore=1')
the algorithm works in single-process mode.
max_r_fm: int, optional
Largest radius for which the forward model version of KLIP or LOCI
are used, when relying on forward model versions of RSM. Forward model
versions of RSM have a higher performance at close separation, considering
their computation time, their use should be restricted to small angular distances.
Default is None, i.e. the foward model version are used for all considered
angular distance.
opti_mode: str, optional
In the 'full-frame' mode, the parameter optimization is based on a reduced
set of angular separations and a single global set of parameters is selected
(the one maximizing the global normalized average contrast). In 'annular' mode,
a separate optimization is done for every consecutive annuli of width equal to
one FWHM and separated by a distance of one FWHM. For each annulus, a separate
optimal set of parameters is computed. Default is 'full-frame'.
inv_ang: bool, optional
If True, the sign of the parallactic angles of all ADI sequence is flipped for
the entire optimization procedure. Default is True.
opti_type: str, optional
'Contrast' for an optimization based on the average contrast and 'RSM' for
an optimization based on the ratio of the peak probability of the injected
fake companion on the peak (noise) probability in the remaining of the
considered annulus (much higher computation time). Default is 'Contrast'.
trunc: int, optional
Maximum angular distance considered for the full-frame parameter optimization. Defaullt is None.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
"""
self.ncore = ncore
self.minradius = minradius
self.maxradius = maxradius
self.fwhm = fwhm
self.pxscale = pxscale
self.imlib=imlib
self.interpolation=interpolation
self.opti_mode=opti_mode
self.inv_ang=inv_ang
self.param_opti_mode=opti_type
self.trunc=trunc
if max_r_fm is not None:
self.max_r=max_r_fm
else:
self.max_r=maxradius
self.psf = []
self.cube=[]
self.pa=[]
self.scale_list=[]
self.model = []
self.delta_rot = []
self.delta_sep = []
self.nsegments = []
self.ncomp = []
self.rank = []
self.tolerance = []
self.asize=[]
self.opti_bound=[]
self.psf_fm=[]
self.intensity = []
self.distri = []
self.distrifit=[]
self.var = []
self.interval=[]
self.crop=[]
self.crop_range=[]
self.like_fin=[]
self.flux_FMMF=[]
self.distrisel=[]
self.mixval=[]
self.fiterr=[]
self.probmap=None
self.param=None
self.opti=False
self.contrast=[]
self.ini_esti=[]
self.opti_sel=None
self.threshold=None
def add_cube(self,psf, cube, pa, scale_list=None):
"""
Function used to add an ADI seuqence to the set of cubes considered for
the RSM map estimation.
Parameters
----------
psf : numpy ndarray 2d
2d array with the normalized PSF template, with an odd shape.
The PSF image must be centered wrt to the array! Therefore, it is
recommended to run the function ``normalize_psf`` to generate a
centered and flux-normalized PSF template.
cube : numpy ndarray, 3d or 4d
Input cube (ADI sequences), Dim 1 = temporal axis, Dim 2-3 = spatial axis
Input cube (ADI + SDI sequences), Dim 1 = temporal axis, Dim 2=wavelength
Dim 3-4 = spatial axis
pa : numpy ndarray, 1d
Parallactic angles for each frame of the ADI sequences.
scale_list: numpy ndarray, 1d, optional
Scaling factors in case of IFS data (ADI+mSDI cube). Usually, the
scaling factors are the central channel wavelength divided by the
shortest wavelength in the cube (more thorough approaches can be used
to get the scaling factors). This scaling factors are used to re-scale
the spectral channels and align the speckles. Default is None
"""
if cube.shape[-1]%2==0:
raise ValueError("Images should have an odd size")
self.psf.append(psf)
self.cube.append(cube)
self.pa.append(pa)
self.scale_list.append(scale_list)
self.like_fin.append([])
self.flux_FMMF.append([])
self.distrisel.append([])
self.mixval.append([])
self.fiterr.append([])
self.psf_fm.append([])
def add_method(self, model,delta_rot=0.5,delta_sep=0.1,asize=5,nsegments=1,ncomp=20,rank=5,tolerance=1e-2,interval=[5],intensity='Annulus',distri='A',var='ST',distrifit=False,crop_size=5, crop_range=1,ini_esti=60,opti_bound=None):
"""
Function used to add a model to the set of post-processing techniques used to generate
the cubes of residuals on which is based the computation of the likelihood of being
in either the planetary of the background regime. These likelihood matrices allow
eventually the definition of the final RSM map.
Parameters
----------
model : str
Selected ADI-based post-processing techniques used to
generate the cubes of residuals feeding the Regime Switching model.
'APCA' for annular PCA, NMF for Non-Negative Matrix Factorization, LLSG
for Local Low-rank plus Sparse plus Gaussian-noise decomposition, LOCI
for locally optimized combination of images and'KLIP' for Karhunen-Loeve
Image Projection. There exitsts a foward model variant of KLIP and LOCI called
respectively 'FM KLIP' and 'FM LOCI'.
delta_rot : float, optional
Factor for tunning the parallactic angle threshold, expressed in FWHM.
Default is 0.5 (excludes 0.5xFHWM on each side of the considered frame).
delta_sep : float, optional
The threshold separation in terms of the mean FWHM (for ADI+mSDI data).
Default is 0.1.
asize : int, optional
Width in pixels of each annulus.When a single. Default is 5.
n_segments : int, optional
The number of segments for each annulus. Default is 1 as we work annulus-wise.
ncomp : int, optional
Number of components used for the low-rank approximation of the
speckle field with 'APCA', 'KLIP' and 'NMF'. Default is 20.
rank : int, optional
Expected rank of the L component of the 'LLSG' decomposition. Default is 5.
tolerance: float, optional
Tolerance level for the approximation of the speckle field via a linear
combination of the reference images in the LOCI algorithm. Default is 1e-2.
interval: list of float or int, optional
List of values taken by the delta parameter defining, when mutliplied by the
standard deviation, the strengh of the planetary signal in the Regime Switching model.
Default is [5]. The different delta paramaters are tested and the optimal value
is selected via maximum likelmihood.
intensity: str, optionnal
If 'Pixel', the intensity parameter used in the RSM framework is computed
pixel-wise via a gaussian maximum likelihood by comparing the set of observations
and the PSF or the forward model PSF in the case of 'FM KLIP' and 'FM LOCI'.
If 'Annulus', the intensity parameter is estimated annulus-wise and defined as
a multiple of the annulus residual noise variance. If multiple multiplicative paramters
are provided in PyRSM init (multi_factor), the multiplicative factor applied to the noise
variance is selected via the maximisation of the total likelihood of the regime switching
model for the selected annulus. Default is 'Annulus'.
distri: str, optional
Probability distribution used for the estimation of the likelihood
of both regimes (planetary or noise) in the Regime Switching framework.
Default is Gaussian 'G' but four other possibilities exist, Laplacian 'L',
Huber loss 'H', auto 'A' and mix 'M'.
'A': allow the automatic selection of the optimal distribution ('Laplacian'
or 'Gaussian') depending on the fitness of these distributions compared to
the empirical distribution of the residual noise in the considered annulus.
For each cubes and ADI-based post-processing techniques, the distribution
leading to the lowest fitness error is automatically selected.
'M': use both the 'Gaussian'and 'Laplacian' distribution to get closer to
the empirical distribution by fitting a mix parameter providing the ratio
of 'Laplacian' distribution compared to the 'Gaussian' one.
var: str, optional
Model used for the residual noise variance estimation. Five different approaches
are proposed: 'ST', 'SM', 'FR', 'FM', and 'TE'. While all six can be used when
intensity='Annulus', only the last three can be used when intensity='Pixel'.
When using ADI+SDI dataset only 'FR' and 'FM' can be used. Default is 'ST'.
'ST': consider every frame and pixel in the selected annulus with a
width equal to asize (default approach)
'SM': consider for every pixels a segment of the selected annulus with
a width equal to asize. The segment is centered on the selected pixel and has
a size of three FWHM. A mask of one FWHM is applied on the selected pixel and
its surrounding. Every frame are considered.
'FR': consider the pixels in the selected annulus with a width equal to asize
but separately for every frame.
'FM': consider the pixels in the selected annulus with a width
equal to asize but separately for every frame. Apply a mask one FWHM
on the selected pixel and its surrounding.
'TE': rely on the method developped in PACO to estimate the
residual noise variance (take the pixels in a region of one FWHM arround
the selected pixel, considering every frame in the derotated cube of residuals
except for the selected frame)
distrifit: bool, optional
If true, the estimation of the mean and variance of the selected distribution
is done via an best fit on the empirical distribution. If False, basic
estimation of the mean and variance using the set of observations
contained in the considered annulus, without taking into account the selected
distribution.
modtocube: bool, optional
Parameter defining if the concatenated cube feeding the RSM model is created
considering first the model or the different cubes. If 'modtocube=False',
the function will select the first cube then test all models on it and move
to the next one. If 'modtocube=True', the model will select one model and apply
it to every cubes before moving to the next model. Default is True.
crop_size: int, optional
Part of the PSF tempalte considered is the estimation of the RSM map
crop_range: int, optional
Range of crop sizes considered in the estimation of the RSM map, starting with crop_size
and increasing the crop size incrementally by 2 pixels up to a crop size of
crop_size + 2 x (crop_range-1).
ini_esti: int, optional
Number of loss function computations (average contrast) to initialize the Gaussian
process used during the Bayesian optimization of the PSF-subtraction technique parameters
(APCA, LOCI, KLIP FM and LOCI FM). Default is 60.
opti_bound: list, optional
List of boundaries used for the parameter optimization.
- For APCA: [[L_ncomp,U_ncomp],[L_nseg,U_nseg],[L_delta_rot,U_delta_rot]]
Default is [[15,45],[1,4],[0.25,1]]
- For NMF: [[L_ncomp,U_ncomp]]
Default is [[2,20]]
- For LLSG: [[L_ncomp,U_ncomp],[L_nseg,U_nseg]]
Default is [[1,10],[1,4]]
- For LOCI: [[L_tolerance,U_tolerance],[L_delta_rot,U_delta_rot]]
Default is [[1e-3,1e-2],[0.25,1]]
- For FM KLIP: [[L_ncomp,U_ncomp],[L_delta_rot,U_delta_rot]]
Default is [[15,45],[0.25,1]]
- For FM LOCI: [[L_tolerance,U_tolerance],[L_delta_rot,U_delta_rot]]
Default is [[1e-3,1e-2],[0.25,1]]
with L_ the lower bound and U_ the Upper bound.
"""
if crop_size+2*(crop_range-1)>=2*round(self.fwhm)+1:
raise ValueError("Maximum cropsize should be lower or equal to two FWHM, please change accordingly either 'crop_size' or 'crop_range'")
if any(var in myvar for myvar in ['ST','SM','FM','TE','FR'])==False:
raise ValueError("'var' not recognized")
if any(distri in mydistri for mydistri in ['G','L','A','M','H'])==False:
raise ValueError("'distri' not recognized")
if intensity=='Pixel' and any(var in myvar for myvar in ['FR','FM','TE'])==False:
raise ValueError("'var' not recognized for intensity='Pixel'. 'var' should be 'FR','FM' or 'TE'")
for c in range(len(self.cube)):
if self.cube[c].ndim==4:
if any(var in myvar for myvar in ['ST','SM','TE'])==True:
raise ValueError("'var' not recognized for ADI+SDI cube'. 'var' should be 'FR' or 'FM'")
if any(model in mymodel for mymodel in ['FM KLIP','FM LOCI','KLIP'])==True:
raise ValueError("ADI+SDI sequences can only be used with APCA, NMF, LLSG and LOCI")
check_delta_sep(self.scale_list[c],delta_sep,self.minradius,self.fwhm,c)
self.model.append(model)
self.delta_rot.append(np.array([np.repeat(delta_rot,(len(self.cube)))]*(self.maxradius+asize)))
self.delta_sep.append(np.array([np.repeat(delta_sep,(len(self.cube)))]*(self.maxradius+asize)))
self.nsegments.append(np.array([np.repeat(nsegments,(len(self.cube)))]*(self.maxradius+asize)))
self.ncomp.append(np.array([np.repeat(ncomp,(len(self.cube)))]*(self.maxradius+asize)))
self.rank.append(np.array([np.repeat(rank,(len(self.cube)))]*(self.maxradius+asize)))
self.tolerance.append(np.array([np.repeat(tolerance,(len(self.cube)))]*(self.maxradius+asize)))
self.interval.append(np.array([[np.repeat(interval,(len(self.cube)))]]*(self.maxradius+asize)))
self.asize.append(asize)
self.opti_bound.append(opti_bound)
self.distrifit.append(np.array([np.repeat(distrifit,(len(self.cube)))]*(self.maxradius+asize)))
self.intensity.append(np.array([np.repeat(intensity,(len(self.cube)))]*(self.maxradius+asize)))
self.var.append(np.array([np.repeat(var,(len(self.cube)))]*(self.maxradius+asize)))
self.crop.append(np.array([np.repeat(crop_size,(len(self.cube)))]*(self.maxradius+asize)))
self.crop_range.append(crop_range)
self.distri.append(distri)
self.ini_esti.append(ini_esti)
for i in range(len(self.cube)):
if model=='FM KLIP' or model=='FM LOCI':
self.psf_fm[i].append(list([None]*(self.maxradius+1)))
else:
self.psf_fm[i].append(None)
self.like_fin[i].append(None)
self.flux_FMMF[i].append(None)
self.distrisel[i].append(None)
self.mixval[i].append(None)
self.fiterr[i].append(None)
def save_parameters(self,folder,name):
with open(folder+name+'.pickle', "wb") as save:
pickle.dump([self.model, self.delta_rot,self.nsegments,self.ncomp,self.rank,self.tolerance,self.asize,self.psf_fm,self.intensity,self.distri,self.distrifit,self.var,self.crop,self.crop_range,self.opti_sel,self.threshold,self.opti_mode,self.flux_opti,self.opti_theta,self.interval],save)
def load_parameters(self,name):
with open(name+'.pickle', "rb") as read:
saved_param = pickle.load(read)
self.model= saved_param[0]
self.delta_rot = saved_param[1]
self.nsegments = saved_param[2]
self.ncomp = saved_param[3]
self.rank = saved_param[4]
self.tolerance = saved_param[5]
self.asize= saved_param[6]
self.psf_fm= saved_param[7]
self.intensity = saved_param[8]
self.distri = saved_param[9]
self.distrifit= saved_param[10]
self.var = saved_param[11]
self.crop= saved_param[12]
self.crop_range= saved_param[13]
self.opti_sel=saved_param[14]
self.threshold=saved_param[15]
self.opti_mode=saved_param[16]
self.flux_opti=saved_param[17]
self.opti_theta=saved_param[18]
self.interval=saved_param[19]
def likelihood(self,ann_center,cuben,modn,mcube,cube_fc=None,verbose=True):
if type(mcube) is not np.ndarray:
mcube = np.frombuffer(var_dict['X']).reshape(var_dict['X_shape'])
if mcube.ndim==4:
z,n,y,x=mcube.shape
else:
n,y,x=mcube.shape
z=1
range_int=len(self.interval[modn][ann_center,cuben])
likemap=np.zeros(((n*z)+1,x,y,range_int,2,self.crop_range[modn]))
def likfcn(cuben,modn,mean,var,mixval,max_hist,mcube,ann_center,distrim,evals=None,evecs_matrix=None, KL_basis_matrix=None,refs_mean_sub_matrix=None,sci_mean_sub_matrix=None,resicube_klip=None,probcube=0,var_f=None, ind_ref_list=None,coef_list=None):
phi=np.zeros(2)
n,y,x=mcube.shape
ceny, cenx = frame_center(mcube[0])
indicesy,indicesx=get_time_series(mcube,ann_center)
range_int=len(self.interval[modn][ann_center,cuben])
if self.model[modn]=='FM KLIP' or self.model[modn]=='FM LOCI':
if self.psf_fm[cuben][modn][ann_center] is not None:
psf_formod=True
else:
psf_formod=False
psf_fm_out=np.zeros((len(indicesx),mcube.shape[0],2*round(self.fwhm)+1,2*round(self.fwhm)+1))
if (self.crop[modn][ann_center,cuben]+2*(self.crop_range[modn]-1))!=self.psf[cuben].shape[-1]:
psf_temp=frame_crop(self.psf[cuben],int(self.crop[modn][ann_center,cuben]+2*(self.crop_range[modn]-1)),cenxy=[int(self.psf[cuben].shape[1]/2),int(self.psf[cuben].shape[1]/2)],verbose=False)
else:
psf_temp=self.psf[cuben]
for i in range(0,len(indicesy)):
psfm_temp=None
cubind=0
poscenty=indicesy[i]
poscentx=indicesx[i]
#PSF forward model computation for KLIP
if self.model[modn]=='FM KLIP':
an_dist = np.sqrt((poscenty-ceny)**2 + (poscentx-cenx)**2)
theta = np.degrees(np.arctan2(poscenty-ceny, poscentx-cenx))
if psf_formod==False:
model_matrix=cube_inject_companions(np.zeros_like(mcube), self.psf[cuben], self.pa[cuben], flevel=1, plsc=0.1,rad_dists=an_dist, theta=theta, n_branches=1,verbose=False)
pa_threshold = np.rad2deg(2 * np.arctan(self.delta_rot[modn][ann_center,cuben] * self.fwhm / (2 * ann_center)))
mid_range = np.abs(np.amax(self.pa[cuben]) - np.amin(self.pa[cuben])) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
pa_threshold = float(mid_range - mid_range * 0.1)
psf_map=np.zeros_like(model_matrix)
indices = get_annulus_segments(mcube[0], ann_center-int(self.asize[modn]/2),int(self.asize[modn]),1)
for b in range(0,n):
psf_map_temp = perturb(b,model_matrix[:, indices[0][0], indices[0][1]], self.ncomp[modn][ann_center,cuben],evals_matrix, evecs_matrix,
KL_basis_matrix,sci_mean_sub_matrix,refs_mean_sub_matrix, self.pa[cuben], self.fwhm, pa_threshold, ann_center)
psf_map[b,indices[0][0], indices[0][1]]=psf_map_temp-np.mean(psf_map_temp)
psf_map_der = cube_derotate(psf_map, self.pa[cuben], imlib='opencv',interpolation='lanczos4')
psfm_temp=cube_crop_frames(psf_map_der,2*round(self.fwhm)+1,xy=(poscentx,poscenty),verbose=False)
psf_fm_out[i,:,:,:]=psfm_temp
else:
psfm_temp=self.psf_fm[cuben][modn][ann_center][i,:,:,:]
psf_fm_out[i,:,:,:]=psfm_temp
#PSF forward model computation for LOCI
if self.model[modn]=='FM LOCI':
an_dist = np.sqrt((poscenty-ceny)**2 + (poscentx-cenx)**2)
theta = np.degrees(np.arctan2(poscenty-ceny, poscentx-cenx))
if psf_formod==False:
model_matrix=cube_inject_companions(np.zeros_like(mcube), self.psf[cuben], self.pa[cuben], flevel=1, plsc=0.1,
rad_dists=an_dist, theta=theta, n_branches=1,verbose=False)
indices = get_annulus_segments(self.cube[cuben][0], ann_center-int(self.asize[modn]/2),int(self.asize[modn]),1)
values_fc = model_matrix[:, indices[0][0], indices[0][1]]
cube_res_fc=np.zeros_like(model_matrix)
matrix_res_fc = np.zeros((values_fc.shape[0], indices[0][0].shape[0]))
for e in range(values_fc.shape[0]):
recon_fc = np.dot(coef_list[e], values_fc[ind_ref_list[e]])
matrix_res_fc[e] = values_fc[e] - recon_fc
cube_res_fc[:, indices[0][0], indices[0][1]] = matrix_res_fc
cube_der_fc = cube_derotate(cube_res_fc-np.mean(cube_res_fc), self.pa[cuben], imlib='opencv', interpolation='lanczos4')
psfm_temp=cube_crop_frames(cube_der_fc,2*round(self.fwhm)+1,xy=(poscentx,poscenty),verbose=False)
psf_fm_out[i,:,:,:]=psfm_temp
else:
psfm_temp=self.psf_fm[cuben][modn][ann_center][i,:,:,:]
psf_fm_out[i,:,:,:]=psfm_temp
#Flux parameter estimation via Gaussian maximum likelihood (matched filtering)
if self.intensity[modn][ann_center,cuben]=='Pixel':
flux_esti=np.zeros((self.crop_range[modn]))
for v in range(0,self.crop_range[modn]):
cropf=int(self.crop[modn][ann_center,cuben]+2*v)
num=[]
denom=[]
for j in range(n):
if self.var[modn][ann_center,cuben]=='FR':
svar=var_f[j,v]
elif self.var[modn][ann_center,cuben]=='FM' :
svar=var_f[i,j,v]
elif self.var[modn][ann_center,cuben]=='TE':
svar=var_f[i,j,v]
if psfm_temp is not None:
psfm_temp2=psfm_temp[j]
else:
psfm_temp2=psf_temp
if psfm_temp2.shape[0]==cropf:
psfm=psfm_temp2
else:
psfm=frame_crop(psfm_temp2,cropf,cenxy=[int(psfm_temp2.shape[0]/2),int(psfm_temp2.shape[0]/2)],verbose=False)
num.append(np.multiply(frame_crop(mcube[j],cropf,cenxy=[poscentx,poscenty],verbose=False),psfm).sum()/svar)
denom.append(np.multiply(psfm,psfm).sum()/svar)
flux_esti[v]=sum(num)/sum(denom)
probcube[n,indicesy[i],indicesx[i],0,0,v]=flux_esti[v]
# Reverse the temporal direction when moving from one patch to the next one to respect the temporal proximity of the pixels and limit potential non-linearity
if i%2==1:
range_sel=range(n)
else:
range_sel=range(n-1,-1,-1)
# Likelihood computation for the patch i
for j in range_sel:
for m in range(range_int):
if psfm_temp is not None:
psfm_temp2=psfm_temp[j]
else:
psfm_temp2=psf_temp
for v in range(0,self.crop_range[modn]):
cropf=int(self.crop[modn][ann_center,cuben]+2*v)
if psfm_temp2.shape[0]==cropf:
psfm=psfm_temp2
else:
psfm=frame_crop(psfm_temp2,cropf,cenxy=[int(psfm_temp2.shape[1]/2),int(psfm_temp2.shape[1]/2)],verbose=False)
if self.var[modn][ann_center,cuben]=='ST':
svar=var[v]
alpha=mean[v]
mv=mixval[v]
sel_distri=distrim[v]
maxhist=max_hist[v]
phi[1]=self.interval[modn][ann_center,cuben][m]*np.sqrt(svar)
elif self.var[modn][ann_center,cuben]=='FR':
svar=var[j,v]
alpha=mean[j,v]
mv=mixval[j,v]
sel_distri=distrim[j,v]
maxhist=max_hist[j,v]
phi[1]=self.interval[modn][ann_center,cuben][m]*np.sqrt(svar)
elif self.var[modn][ann_center,cuben]=='SM':
svar=var[i,v]
alpha=mean[i,v]
mv=mixval[i,v]
sel_distri=distrim[i,v]
maxhist=max_hist[i,v]
phi[1]=self.interval[modn][ann_center,cuben][m]*np.sqrt(svar)
elif self.var[modn][ann_center,cuben]=='FM' :
svar=var[i,j,v]
alpha=mean[i,j,v]
mv=mixval[i,j,v]
sel_distri=distrim[i,j,v]
maxhist=max_hist[i,j,v]
phi[1]=self.interval[modn][ann_center,cuben][m]*np.sqrt(svar)
elif self.var[modn][ann_center,cuben]=='TE':
svar=var[i,j,v]
alpha=mean[i,j,v]
mv=mixval[i,j,v]
sel_distri=distrim[i,j,v]
maxhist=max_hist[i,j,v]
phi[1]=self.interval[modn][ann_center,cuben][m]*np.sqrt(svar)
if self.intensity[modn][ann_center,cuben]=='Pixel':
phi[1]=np.where(flux_esti[v]<=0,0,flux_esti[v])
if self.intensity[modn][ann_center,cuben]=='Annulus' and (self.model[modn]=='FM KLIP' or self.model[modn]=='FM LOCI'):
phi[1]=5*phi[1]
for l in range(0,2):
#Likelihood estimation
ff=frame_crop(mcube[cubind],cropf,cenxy=[poscentx,poscenty],verbose=False)-phi[l]*psfm-alpha
if sel_distri==0:
cftemp=(1/np.sqrt(2 * np.pi*svar))*np.exp(-0.5*np.multiply(ff,ff)/svar)
elif sel_distri==1:
cftemp=1/(np.sqrt(2*svar))*np.exp(-abs(ff)/np.sqrt(0.5*svar))
elif sel_distri==2:
cftemp=(mv*(1/np.sqrt(2 * np.pi*svar))*np.exp(-0.5*np.multiply(ff,ff)/svar)+(1-mv)*1/(np.sqrt(2*svar))*np.exp(-abs(ff)/np.sqrt(0.5*svar)))
elif sel_distri==3:
abs_x=abs(ff)
cftemp=maxhist*np.exp(-np.where(abs_x < svar, mv * abs_x**2,2*svar*mv*abs_x -mv*svar**2))
probcube[int(cubind),int(indicesy[i]),int(indicesx[i]),int(m),l,v]=cftemp.sum()
cubind+=1
if self.model[modn]=='FM KLIP' or self.model[modn]=='FM LOCI':
return probcube,psf_fm_out
else:
return probcube
if verbose==True:
print("Radial distance: "+"{}".format(ann_center))
#Estimation of the KLIP cube of residuals for the selected annulus
evals_matrix=[]
evecs_matrix=[]
KL_basis_matrix=[]
refs_mean_sub_matrix=[]
sci_mean_sub_matrix=[]
resicube_klip=None
ind_ref_list=None
coef_list=None
if self.opti==True and cube_fc is not None:
cube_test=cube_fc+self.cube[cuben]
else:
cube_test=self.cube[cuben]
if self.model[modn]=='FM KLIP' or (self.opti==True and self.model[modn]=='KLIP'):
resicube_klip=np.zeros_like(self.cube[cuben])
pa_threshold = np.rad2deg(2 * np.arctan(self.delta_rot[modn][ann_center,cuben] * self.fwhm / (2 * (ann_center))))
mid_range = np.abs(np.amax(self.pa[cuben]) - np.amin(self.pa[cuben])) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
pa_threshold = float(mid_range - mid_range * 0.1)
indices = get_annulus_segments(self.cube[cuben][0], ann_center-int(self.asize[modn]/2),int(self.asize[modn]),1)
for k in range(0,self.cube[cuben].shape[0]):
evals_temp,evecs_temp,KL_basis_temp,sub_img_rows_temp,refs_mean_sub_temp,sci_mean_sub_temp =KLIP_patch(k,cube_test[:, indices[0][0], indices[0][1]], self.ncomp[modn][ann_center,cuben], self.pa[cuben], self.asize[modn], pa_threshold, ann_center)
resicube_klip[k,indices[0][0], indices[0][1]] = sub_img_rows_temp
evals_matrix.append(evals_temp)
evecs_matrix.append(evecs_temp)
KL_basis_matrix.append(KL_basis_temp)
refs_mean_sub_matrix.append(refs_mean_sub_temp)
sci_mean_sub_matrix.append(sci_mean_sub_temp)
mcube=cube_derotate(resicube_klip,self.pa[cuben],imlib=self.imlib, interpolation=self.interpolation)
elif self.model[modn]=='FM LOCI':
resicube, ind_ref_list,coef_list=LOCI_FM(cube_test, self.psf[cuben], ann_center, self.pa[cuben],None, self.asize[modn], self.fwhm, self.tolerance[modn][ann_center,cuben],self.delta_rot[modn][ann_center,cuben],None)
mcube=cube_derotate(resicube,self.pa[cuben])
# Computation of the annular LOCI (used during the parameter optimization)
elif (self.opti==True and self.model[modn]=='LOCI'):
cube_rot_scale,angle_list,scale_list=rot_scale('ini',cube_test,None,self.pa[cuben],self.scale_list[cuben],self.imlib, self.interpolation)
if scale_list is not None:
resicube=np.zeros_like(cube_rot_scale)
for i in range(int(max(scale_list)*ann_center/self.asize[modn])):
indices = get_annulus_segments(cube_rot_scale[0], ann_center-int(self.asize[modn]/2)+self.asize[modn]*i,int(self.asize[modn]),int(self.nsegments[modn][ann_center,cuben]))
resicube[:,indices[0][0], indices[0][1]]=LOCI_FM(cube_rot_scale, self.psf[cuben], ann_center,angle_list,scale_list, self.asize[modn], self.fwhm, self.tolerance[modn][ann_center,cuben],self.delta_rot[modn][ann_center,cuben],self.delta_sep[modn][ann_center,cuben])[0][:,indices[0][0], indices[0][1]]
else:
resicube, ind_ref_list,coef_list=LOCI_FM(cube_rot_scale, self.psf[cuben], ann_center, self.pa[cuben],None, self.fwhm, self.fwhm, self.tolerance[modn][ann_center,cuben],self.delta_rot[modn][ann_center,cuben],None)
mcube=rot_scale('fin',self.cube[cuben],resicube,angle_list,scale_list,self.imlib, self.interpolation)
# Computation of the annular APCA (used during the parameter optimization)
elif (self.opti==True and self.model[modn]=='APCA'):
cube_rot_scale,angle_list,scale_list=rot_scale('ini',cube_test,None,self.pa[cuben],self.scale_list[cuben],self.imlib, self.interpolation)
resicube=np.zeros_like(cube_rot_scale)
if scale_list is not None:
range_adisdi=range(int(max(scale_list)*ann_center/self.asize[modn]))
else:
range_adisdi=range(1)
for i in range_adisdi:
pa_threshold = np.rad2deg(2 * np.arctan(self.delta_rot[modn][ann_center,cuben] * self.fwhm / (2 * (ann_center+self.asize[modn]*i))))
mid_range = np.abs(np.amax(self.pa[cuben]) - np.amin(self.pa[cuben])) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
pa_threshold = float(mid_range - mid_range * 0.1)
indices = get_annulus_segments(cube_rot_scale[0], ann_center-int(self.asize[modn]/2)+self.asize[modn]*i,int(self.asize[modn]),int(self.nsegments[modn][ann_center,cuben]))
for k in range(0,cube_rot_scale.shape[0]):
for l in range(self.nsegments[modn][ann_center,cuben]):
resicube[k,indices[l][0], indices[l][1]],v_resi,data_shape=do_pca_patch(cube_rot_scale[:, indices[l][0], indices[l][1]], k, angle_list,scale_list, self.fwhm, pa_threshold,self.delta_sep[modn][ann_center,cuben], ann_center+self.asize[modn]*i,
svd_mode='lapack', ncomp=self.ncomp[modn][ann_center,cuben],min_frames_lib=2, max_frames_lib=200, tol=1e-1,matrix_ref=None)
mcube=rot_scale('fin',self.cube[cuben],resicube,angle_list,scale_list,self.imlib, self.interpolation)
# Computation of the annular NMF (used during the parameter optimization)
elif (self.opti==True and self.model[modn]=='NMF'):
cube_rot_scale,angle_list,scale_list=rot_scale('ini',cube_test,None,self.pa[cuben],self.scale_list[cuben],self.imlib, self.interpolation)
resicube=np.zeros_like(cube_rot_scale)
if scale_list is not None:
range_adisdi=range(int(max(scale_list)*ann_center/self.asize[modn]))
else:
range_adisdi=range(1)
for i in range_adisdi:
indices = get_annulus_segments(cube_rot_scale[0], ann_center-int(self.asize[modn]/2)+self.asize[modn]*i,int(self.asize[modn]),int(self.nsegments[modn][ann_center,cuben]))
for l in range(self.nsegments[modn][ann_center,cuben]):
resicube[:,indices[l][0], indices[l][1]]= NMF_patch(cube_rot_scale[:, indices[l][0], indices[l][1]], ncomp=self.ncomp[modn][ann_center,cuben], max_iter=100,random_state=None)
mcube=rot_scale('fin',self.cube[cuben],resicube,angle_list,scale_list,self.imlib, self.interpolation)
# Computation of the annular LLSG (used during the parameter optimization)
elif (self.opti==True and self.model[modn]=='LLSG'):
cube_rot_scale,angle_list,scale_list=rot_scale('ini',cube_test,None,self.pa[cuben],self.scale_list[cuben],self.imlib, self.interpolation)
resicube=np.zeros_like(cube_rot_scale)
if scale_list is not None:
range_adisdi=range(int(max(scale_list)*ann_center/self.asize[modn]))
else:
range_adisdi=range(1)
for i in range_adisdi:
indices = get_annulus_segments(cube_rot_scale[0], ann_center-int(self.asize[modn]/2)+self.asize[modn]*i,int(self.asize[modn]),int(self.nsegments[modn][ann_center,cuben]))
for l in range(self.nsegments[modn][ann_center,cuben]):
resicube[:,indices[l][0], indices[l][1]]= _decompose_patch(indices,l, cube_rot_scale,self.nsegments[modn][ann_center,cuben],
self.rank[modn][ann_center,cuben], low_rank_ref=False, low_rank_mode='svd', thresh=1,thresh_mode='soft', max_iter=40, auto_rank_mode='noise', cevr=0.9,
residuals_tol=1e-1, random_seed=10, debug=False, full_output=False).T
mcube=rot_scale('fin',self.cube[cuben],resicube,angle_list,scale_list,self.imlib, self.interpolation)
zero_test=abs(mcube.sum(axis=1).sum(axis=1))
if np.min(zero_test)==0:
mcube[np.argmin(zero_test),:,:]=mcube.mean(axis=0)
# Fitness error computation for the noise distribution(s),
# if automated probability distribution selection is activated (var='A')
# the fitness errors allow the determination of the optimal distribution
def vm_esti(modn,arr,var_e,mean_e):
def gaus(x,x0,var):
return 1/np.sqrt(2 * np.pi*var)*np.exp(-(x-x0)**2/(2*var))
def lap(x,x0,var):
bap=np.sqrt(var/2)
return (1/(2*bap))*np.exp(-np.abs(x-x0)/bap)
def mix(x,x0,var,a):
bap=np.sqrt(var/2)
return a*(1/(2*bap))*np.exp(-np.abs(x-x0)/bap)+(1-a)*1/np.sqrt(2 * np.pi*var)*np.exp(-(x-x0)**2/(2*var))
def huber_loss(x,x0,delta,a):
abs_x=abs(x-x0)
return np.exp(-np.where(abs_x < delta, a * abs_x ** 2, 2*a*delta*abs_x -a*delta**2))
def te_f_mh(func1,func2,bin_m,hist,p0_1,bounds_1,p0_2,bounds_2,mean,var,distri):
try:
popt,pcov = curve_fit(func1,bin_m,hist,p0=p0_1,bounds=bounds_1)
fiter=sum(abs(func1(bin_m,*popt)-hist))
mean,var,a=popt
except (RuntimeError, ValueError, RuntimeWarning):
try:
popt,pcov = curve_fit(func2,bin_m,hist,p0=p0_2,bounds=bounds_2)
fiter=sum(abs(func2(bin_m,*popt)-hist))
a=popt
except (RuntimeError, ValueError, RuntimeWarning):
a=1
fiter=sum(abs(func2(bin_m,a)-hist))
return mean,a,var,fiter,distri
def te_f_gl(func,bin_m,hist,p0_1,bounds_1,mean,var,distri):
try:
popt,pcov = curve_fit(func,bin_m,hist,p0=p0_1,bounds=bounds_1)
mean,var=popt
fiter=sum(abs(func(bin_m,*popt)-hist))
except (RuntimeError, ValueError, RuntimeWarning):
mean,var=[mean,var]
fiter=sum(abs(func(bin_m,mean,var)-hist))
return mean,None,var,fiter,distri
def te_h(func1,func2,bin_m,hist,p0_1,bounds_1,p0_2,bounds_2,mean,delta,distri):
try:
popt,pcov = curve_fit(func1,bin_m,hist,p0=p0_1,bounds=bounds_1)
delta,a=popt
fiter=sum(abs(func1(bin_m,*popt)-hist))
except (RuntimeError, ValueError, RuntimeWarning):
try:
popt,pcov = curve_fit(func2,bin_m,hist,p0=p0_2,bounds=bounds_2)
fiter=sum(abs(func2(bin_m,*popt)-hist))
a=popt
except (RuntimeError, ValueError, RuntimeWarning):
a=1
fiter=sum(abs(func2(bin_m,a)-hist))
print('error hubert loss')
return mean,a,delta,fiter,distri
def te_m(func,bin_m,hist,p0,bounds,mean,var,distri):
try:
popt,pcov = curve_fit(func,bin_m,hist,p0=p0,bounds=bounds)
mixval=popt
return mean,mixval,var,sum(abs(func(bin_m,*popt)-hist)),distri
except (RuntimeError, ValueError, RuntimeWarning):
return mean,0.5,var,sum(abs(func(bin_m,0.5)-hist)),distri
def te_gl(func,bin_m,hist,mean,var,distri):
return mean,None,var,sum(abs(func(bin_m,mean,var)-hist)),distri
mixval_temp=None
hist, bin_edge =np.histogram(arr,bins='auto',density=True)
bin_mid=(bin_edge[0:(len(bin_edge)-1)]+bin_edge[1:len(bin_edge)])/2
if self.distrifit[modn][ann_center,cuben]==False:
fixmix = lambda binm, mv: mix(binm,mean_e,var_e,mv)
hl1 = lambda binm, delta,a: huber_loss(binm,mean_e,delta,a)
hl2 = lambda binm, a: huber_loss(binm,mean_e,np.mean(abs(bin_mid)),a)
if self.distri[modn]=='G':
mean_temp,mixval_temp,var_temp,fiterr_temp,distrim_temp=te_gl(gaus,bin_mid,hist,mean_e,var_e,0)
elif self.distri[modn]=='L':
mean_temp,mixval_temp,var_temp,fiterr_temp,distrim_temp=te_gl(lap,bin_mid,hist,mean_e,var_e,1)
elif self.distri[modn]=='M':
mean_temp,mixval_temp,var_temp,fiterr_temp,distrim_temp=te_m(fixmix,bin_mid,hist,[0.5],[(0),(1)],mean_e,var_e,2)
elif self.distri[modn]=='H':
mean_temp,mixval_temp,var_temp,fiterr_temp,distrim_temp=te_h(hl1,hl2,bin_mid,hist/max(hist),[np.mean(abs(bin_mid)),0.15],[(min(abs(bin_mid)),0.0001),(max(abs(bin_mid)),2)],[0.15],[(0.0001),(2)],mean_e,1,3)
fiterr_temp=fiterr_temp*max(hist)
elif self.distri[modn]=='A':
res=[]
res.append(te_gl(gaus,bin_mid,hist,mean_e,var_e,0))
res.append(te_gl(lap,bin_mid,hist,mean_e,var_e,1))
res.append(te_m(fixmix,bin_mid,hist,[0.5],[(0),(1)],mean_e,var_e,2))
res.append(te_h(hl1,hl2,bin_mid,hist/max(hist),[np.mean(abs(bin_mid)),0.15],[(min(abs(bin_mid)),0.0001),(max(abs(bin_mid)),2)],[0.15],[(0.0001),(2)],mean_e,1,3))
fiterr=list([res[0][3],res[1][3],res[2][3],res[3][3]*max(hist)])
distrim_temp=fiterr.index(min(fiterr))
fiterr_temp=min(fiterr)
mean_temp=res[distrim_temp][0]
mixval_temp=res[distrim_temp][1]
var_temp=res[distrim_temp][2]
fiterr_temp=res[distrim_temp][3]
else:
fixmix = lambda binm, mv: mix(binm,mean_e,var_e,mv)
hl = lambda binm, a: huber_loss(binm,mean_e,np.mean(abs(bin_mid)),a)
if self.distri[modn]=='G':
mean_temp,mixval_temp,var_temp,fiterr_temp,distrim_temp=te_f_gl(gaus,bin_mid,hist,[mean_e,var_e],[(mean_e-np.sqrt(var_e),0),(mean_e+np.sqrt(var_e),2*var_e)],mean_e,var_e,0)
elif self.distri[modn]=='L':
mean_temp,mixval_temp,var_temp,fiterr_temp,distrim_temp=te_f_gl(lap,bin_mid,hist,[mean_e,var_e],[(mean_e-np.sqrt(var_e),0),(mean_e+np.sqrt(var_e),2*var_e)],mean_e,var_e,1)
elif self.distri[modn]=='M':
mean_temp,mixval_temp,var_temp,fiterr_temp,distrim_temp=te_f_mh(mix,fixmix,bin_mid,hist,[mean_e,var_e,0.5],[(mean_e-np.sqrt(var_e),0,0),(mean_e+np.sqrt(var_e),4*var_e,1)],[0.5],[(0),(1)],mean_e,var_e,2)
elif self.distri[modn]=='H':
mean_temp,mixval_temp,var_temp,fiterr_temp,distrim_temp=te_f_mh(huber_loss,hl,bin_mid,hist/max(hist),[mean_e,np.mean(abs(bin_mid)),0.15],[(mean_e-np.sqrt(var_e),min(abs(bin_mid)),0.0001),(mean_e+np.sqrt(var_e),max(abs(bin_mid)),2)],[0.15],[(0.0001),(2)],mean_e,1,3)
fiterr_temp=fiterr_temp*max(hist)
elif self.distri[modn]=='A':
res=[]
res.append(te_f_gl(gaus,bin_mid,hist,[mean_e,var_e],[(mean_e-np.sqrt(var_e),0),(mean_e+np.sqrt(var_e),2*var_e)],mean_e,var_e,0))
res.append(te_f_gl(lap,bin_mid,hist,[mean_e,var_e],[(mean_e-np.sqrt(var_e),0),(mean_e+np.sqrt(var_e),2*var_e)],mean_e,var_e,1))
res.append(te_f_mh(mix,fixmix,bin_mid,hist,[mean_e,var_e,0.5],[(mean_e-np.sqrt(var_e),0,0),(mean_e+np.sqrt(var_e),4*var_e,1)],[0.5],[(0),(1)],mean_e,var_e,2))
res.append(te_f_mh(huber_loss,hl,bin_mid,hist/max(hist),[mean_e,np.mean(abs(bin_mid)),0.15],[(mean_e-np.sqrt(var_e),min(abs(bin_mid)),0.0001),(mean_e+np.sqrt(var_e),max(abs(bin_mid)),2)],[0.15],[(0.0001),(2)],mean_e,1,3))
fiterr=list([res[0][3],res[1][3],res[2][3],res[3][3]*max(hist)])
distrim_temp=fiterr.index(min(fiterr))
fiterr_temp=min(fiterr)
mean_temp=res[distrim_temp][0]
mixval_temp=res[distrim_temp][1]
var_temp=res[distrim_temp][2]
fiterr_temp=res[distrim_temp][3]
return mean_temp,var_temp,fiterr_temp,mixval_temp,distrim_temp,max(hist)
# Noise distribution parameter estimation considering the selected region (ST, F, FM, SM or T)
var_f=None
if self.var[modn][ann_center,cuben]=='ST':
var=np.zeros(self.crop_range[modn])
mean=np.zeros(self.crop_range[modn])
mixval=np.zeros(self.crop_range[modn])
fiterr=np.zeros(self.crop_range[modn])
distrim=np.zeros(self.crop_range[modn])
max_hist=np.zeros(self.crop_range[modn])
for v in range(0,self.crop_range[modn]):
cropf=int(self.crop[modn][ann_center,cuben]+2*v)
indices = get_annulus_segments(mcube[0], ann_center-int(cropf/2),cropf,1)
poscentx=indices[0][1]
poscenty=indices[0][0]
arr = np.ndarray.flatten(mcube[:,poscenty,poscentx])
mean[v],var[v],fiterr[v],mixval[v],distrim[v],max_hist[v]=vm_esti(modn,arr,np.var(mcube[:,poscenty,poscentx]),np.mean(mcube[:,poscenty,poscentx]))
elif self.var[modn][ann_center,cuben]=='FR':
var=np.zeros(((n*z),self.crop_range[modn]))
var_f=np.zeros(((n*z),self.crop_range[modn]))
mean=np.zeros(((n*z),self.crop_range[modn]))
mixval=np.zeros(((n*z),self.crop_range[modn]))
fiterr=np.zeros(((n*z),self.crop_range[modn]))
distrim=np.zeros(((n*z),self.crop_range[modn]))
max_hist=np.zeros(((n*z),self.crop_range[modn]))
for v in range(0,self.crop_range[modn]):
cropf=int(self.crop[modn][ann_center,cuben]+2*v)
indices = get_annulus_segments(mcube[0], ann_center-int(cropf/2),cropf,1)
poscentx=indices[0][1]
poscenty=indices[0][0]
for a in range((n*z)):
arr = np.ndarray.flatten(mcube[a,poscenty,poscentx])
mean[a,v],var[a,v],fiterr[a,v],mixval[a,v],distrim[a,v],max_hist[a,v]=vm_esti(modn,arr,np.var(mcube[a,poscenty,poscentx]),np.mean(mcube[a,poscenty,poscentx]))
if self.intensity[modn][ann_center,cuben]=='Pixel':
var_f[a,v]=np.var(mcube[a,poscenty,poscentx])
elif self.var[modn][ann_center,cuben]=='SM':
indicesy,indicesx=get_time_series(mcube,ann_center)
var=np.zeros((len(indicesy),self.crop_range[modn]))
mean=np.zeros((len(indicesy),self.crop_range[modn]))
mixval=np.zeros((len(indicesy),self.crop_range[modn]))
fiterr=np.zeros((len(indicesy),self.crop_range[modn]))
distrim=np.zeros((len(indicesy),self.crop_range[modn]))
max_hist=np.zeros((len(indicesy),self.crop_range[modn]))
size_seg=2
for v in range(0,self.crop_range[modn]):
cropf=int(self.crop[modn][ann_center,cuben]+2*v)
for a in range(len(indicesy)):
if (a+int(cropf*3/2)+size_seg)>(len(indicesy)-1):
posup= a+int(cropf*3/2)+size_seg-len(indicesy)-1
else:
posup=a+int(cropf*3/2)+size_seg
indc=circle(indicesy[a], indicesx[a],cropf/2)
radist_b=np.sqrt((indicesx[a-int(cropf*3/2)-size_seg-1]-int(x/2))**2+(indicesy[a-int(cropf*3/2)-size_seg-1]-int(y/2))**2)
if (indicesx[a-int(cropf*3/2)-size_seg-1]-int(x/2))>=0:
ang_b= np.arccos((indicesy[a-int(cropf*3/2)-size_seg-1]-int(y/2))/radist_b)/np.pi*180
else:
ang_b= 360-np.arccos((indicesy[a-int(cropf*3/2)-size_seg-1]-int(y/2))/radist_b)/np.pi*180
radist_e=np.sqrt((indicesx[posup]-int(x/2))**2+(indicesy[posup]-int(y/2))**2)
if (indicesx[posup]-int(x/2))>=0:
ang_e= np.arccos((indicesy[posup]-int(y/2))/radist_e)/np.pi*180
else:
ang_e= 360-np.arccos((indicesy[posup]-int(y/2))/radist_e)/np.pi*180
if ang_e<ang_b:
diffang=(360-ang_b)+ang_e
else:
diffang=ang_e-ang_b
indices = get_annulus_segments(mcube[0], ann_center-int(cropf/2),cropf,int(360/diffang),ang_b)
positionx=[]
positiony=[]
for k in range(0,len(indices[0][1])):
if len(set(np.where(indices[0][1][k]==indc[1])[0]) & set(np.where(indices[0][0][k]==indc[0])[0]))==0:
positionx.append(indices[0][1][k])
positiony.append(indices[0][0][k])
arr = np.ndarray.flatten(mcube[:,positiony,positionx])
mean[a,v],var[a,v],fiterr[a,v],mixval[a,v],distrim[a,v],max_hist[a,v]=vm_esti(modn,arr,np.var(mcube[:,positiony,positionx]),np.mean(mcube[:,positiony,positionx]))
elif self.var[modn][ann_center,cuben]=='FM' :
indicesy,indicesx=get_time_series(mcube,ann_center)
var=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
var_f=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
mean=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
mixval=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
fiterr=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
distrim=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
max_hist=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
for v in range(0,self.crop_range[modn]):
cropf=int(self.crop[modn][ann_center,cuben]+2*v)
indices = get_annulus_segments(mcube[0], ann_center-int(cropf/2),cropf,1)
for a in range(0,len(indicesy)):
indc=circle(indicesy[a], indicesx[a],3)
positionx=[]
positiony=[]
for k in range(0,len(indices[0][1])):
if len(set(np.where(indices[0][1][k]==indc[1])[0]) & set(np.where(indices[0][0][k]==indc[0])[0]))==0:
positionx.append(indices[0][1][k])
positiony.append(indices[0][0][k])
for b in range((n*z)):
arr = np.ndarray.flatten(mcube[b,positiony,positionx])
mean[a,b,v],var[a,b,v],fiterr[a,b,v],mixval[a,b,v],distrim[a,b,v],max_hist[a,b,v]=vm_esti(modn,arr,np.var(np.asarray(mcube[b,positiony,positionx])),np.mean(np.asarray(mcube[b,positiony,positionx])))
if self.intensity[modn][ann_center,cuben]=='Pixel':
var_f[a,b,v]=np.var(mcube[b,positiony,positionx])
elif self.var[modn][ann_center,cuben]=='TE' :
indicesy,indicesx=get_time_series(mcube,ann_center)
var=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
var_f=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
mean=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
mixval=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
fiterr=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
distrim=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
max_hist=np.zeros((len(indicesy),(n*z),self.crop_range[modn]))
if self.cube[cuben].ndim==4:
pa_temp=np.hstack([self.pa[cuben]]*self.cube[cuben].shape[0])
else:
pa_temp=self.pa[cuben]
mcube_derot=cube_derotate(mcube,-pa_temp)
for v in range(0,self.crop_range[modn]):
cropf=int(self.crop[modn][ann_center,cuben]+2*v)
for a in range(0,len(indicesy)):
radist=np.sqrt((indicesx[a]-int(x/2))**2+(indicesy[a]-int(y/2))**2)
if (indicesy[a]-int(y/2))>=0:
ang_s= np.arccos((indicesx[a]-int(x/2))/radist)/np.pi*180
else:
ang_s= 360-np.arccos((indicesx[a]-int(x/2))/radist)/np.pi*180
for b in range((n*z)):
twopi=2*np.pi
sigposy=int(y/2 + np.sin((ang_s-pa_temp[b])/360*twopi)*radist)
sigposx=int(x/2+ np.cos((ang_s-pa_temp[b])/360*twopi)*radist)
y0 = int(sigposy - int(cropf/2))
y1 = int(sigposy + int(cropf/2)+1) # +1 cause endpoint is excluded when slicing
x0 = int(sigposx - int(cropf/2))
x1 = int(sigposx + int(cropf/2)+1)
mask = np.ones(mcube_derot.shape[0],dtype=bool)
mask[b]=False
mcube_sel=mcube_derot[mask,y0:y1,x0:x1]
arr = np.ndarray.flatten(mcube_sel)
mean[a,b,v],var[a,b,v],fiterr[a,b,v],mixval[a,b,v],distrim[a,b,v],max_hist[a,b,v]=vm_esti(modn,arr,np.var(np.asarray(mcube_sel)),np.mean(np.asarray(mcube_sel)))
if self.intensity[modn][ann_center,cuben]=='Pixel':
var_f[a,b,v]=np.var(np.asarray(mcube_sel))
#Estimation of the cube of likelihoods
#print(np.bincount(distrim.flatten(order='C').astype(int)))
res=likfcn(cuben,modn,mean,var,mixval,max_hist,mcube,ann_center,distrim,evals_matrix,evecs_matrix, KL_basis_matrix,refs_mean_sub_matrix,sci_mean_sub_matrix,resicube_klip,likemap,var_f,ind_ref_list,coef_list)
indicesy,indicesx=get_time_series(mcube,ann_center)
if self.model[modn]=='FM KLIP' or self.model[modn]=='FM LOCI':
return ann_center,res[0][:,indicesy,indicesx],res[1]
else:
return ann_center,res[:,indicesy,indicesx]
def lik_esti(self, sel_cube=None,showplot=False,verbose=True):
"""
Function allowing the estimation of the likelihood of being in either the planetary regime
or the background regime for the different cubes. The likelihood computation is based on
the residual cubes generated with the considered set of models.
Parameters
----------
showplot: bool, optional
If True, provides the plots of the final residual frames for the selected
ADI-based post-processing techniques along with the final RSM map. Default is False.
fulloutput: bool, optional
If True, provides the selected distribution, the fitness erros and the mixval
(for distri='mix') for every annulus in respectively obj.distrisel, obj.fiterr
and obj.mixval (the length of these lists are equall to maxradius - minradius, the
size of the matrix for each annulus depends on the approach selected for the variance
estimation, see var in add_model)
verbose : bool, optional
If True prints intermediate info. Default is True.
"""
def init(probi):
global probcube
probcube = probi
for i in range(len(self.model)):
for j in range(len(self.cube)):
#Computation of the cube of residuals
if sel_cube is None or [j,i] in sel_cube:
if self.opti==False:
if self.model[i]=='APCA':
print("Annular PCA estimation")
residuals_cube_, frame_fin = annular_pca_adisdi(self.cube[j], self.pa[j], self.scale_list[j], fwhm=self.fwhm, ncomp=self.ncomp[i][0,j], asize=self.asize[i],
delta_rot=self.delta_rot[i][0,j],delta_sep=self.delta_sep[i][0,j],radius_int=self.minradius, svd_mode='lapack', n_segments=int(self.nsegments[i][0,j]), nproc=self.ncore,full_output=True,verbose=False)
if showplot:
plot_frames(frame_fin,title='APCA', colorbar=True,ang_scale=True, axis=False,pxscale=self.pxscale,ang_legend=True,show_center=True)
elif self.model[i]=='NMF':
print("NMF estimation")
residuals_cube_, frame_fin = nmf_adisdi(self.cube[j], self.pa[j], self.scale_list[j], ncomp=self.ncomp[i][0,j], max_iter=100, random_state=0, mask_center_px=None,full_output=True,verbose=False)
if showplot:
plot_frames(frame_fin,title='NMF', colorbar=True,ang_scale=True, axis=False,pxscale=self.pxscale,ang_legend=True,show_center=True)
elif self.model[i]=='LLSG':
print("LLSGestimation")
residuals_cube_, frame_fin = llsg_adisdi(self.cube[j], self.pa[j],self.scale_list[j], self.fwhm, rank=self.rank[i][0,j],asize=self.asize[i], thresh=1,n_segments=int(self.nsegments[i][0,j]), max_iter=40, random_seed=10, nproc=self.ncore,full_output=True,verbose=False)
if showplot:
plot_frames(frame_fin,title='LLSG', colorbar=True,ang_scale=True, axis=False,pxscale=self.pxscale,ang_legend=True,show_center=True)
elif self.model[i]=='LOCI':
print("LOCI estimation")
residuals_cube_,frame_fin=loci_adisdi(self.cube[j], self.pa[j],self.scale_list[j], fwhm=self.fwhm,asize=self.asize[i],radius_int=self.minradius, n_segments=int(self.nsegments[i][0,j]),tol=self.tolerance[i][0,j], nproc=self.ncore, optim_scale_fact=2,delta_rot=self.delta_rot[i][0,j],delta_sep=self.delta_sep[i][0,j],verbose=False,full_output=True)
if showplot:
plot_frames(frame_fin,title='LOCI', colorbar=True,ang_scale=True, axis=False,pxscale=self.pxscale,ang_legend=True,show_center=True)
elif self.model[i]=='KLIP':
print("KLIP estimation")
cube_out, residuals_cube_, frame_fin = KLIP(self.cube[j], self.pa[j], ncomp=self.ncomp[i][0,j], fwhm=self.fwhm, asize=self.asize[i],
delta_rot=self.delta_rot[i][0,j],full_output=True,verbose=False)
if showplot:
plot_frames(frame_fin,title='KLIP', colorbar=True,ang_scale=True, axis=False,pxscale=self.pxscale,ang_legend=True,show_center=True)
elif self.model[i]=='FM LOCI' or self.model[i]=='FM KLIP':
residuals_cube_=np.zeros_like(self.cube[j])
frame_fin=np.zeros_like(self.cube[j][0])
zero_test=abs(residuals_cube_.sum(axis=1).sum(axis=1))
if np.min(zero_test)==0:
residuals_cube_[np.argmin(zero_test),:,:]=residuals_cube_.mean(axis=0)
else:
residuals_cube_=np.zeros_like(rot_scale('ini',self.cube[j],None,self.pa[j],self.scale_list[j],self.imlib, self.interpolation)[0])
frame_fin=np.zeros_like(residuals_cube_[0])
#Likelihood computation for the different models and cubes
if self.model[i]=='FM KLIP' or self.model[i]=='FM LOCI':
max_rad=self.max_r+1
else:
max_rad=self.maxradius+1
like_temp=np.zeros(((residuals_cube_.shape[0]+1),residuals_cube_.shape[1],residuals_cube_.shape[2],len(self.interval[i][0,j]),2,self.crop_range[i]))
X_shape=residuals_cube_.shape
X = RawArray('d', int(np.prod(X_shape)))
X_np = np.frombuffer(X).reshape(X_shape)
np.copyto(X_np, residuals_cube_)
time_out=10/250*residuals_cube_.shape[0]*max_rad
results=[]
pool=Pool(processes=self.ncore, initializer=init_worker, initargs=(X, X_shape))
for e in range(self.minradius,max_rad):
results.append(pool.apply_async(self.likelihood,args=(e,j,i,0,None,True)))
[result.wait(timeout=time_out) for result in results]
it=self.minradius
for result in results:
try:
res=result.get(timeout=1)
indicesy,indicesx=get_time_series(self.cube[0],res[0])
if self.model[i]=='FM LOCI' or self.model[i]=='FM KLIP':
like_temp[:,indicesy,indicesx,:,:,:]=res[1]
self.psf_fm[j][i][res[0]]=res[2]
else:
like_temp[:,indicesy,indicesx,:,:,:]=res[1]
except mp.TimeoutError:
pool.terminate()
pool.join()
res=self.likelihood(it,j,i,residuals_cube_,None,True)
indicesy,indicesx=get_time_series(self.cube[0],res[0])
if self.model[i]=='FM LOCI' or self.model[i]=='FM KLIP':
like_temp[:,indicesy,indicesx,:,:,:]=res[1]
self.psf_fm[j][i][res[0]]=res[2]
else:
like_temp[:,indicesy,indicesx,:,:,:]=res[1]
it+=1
like=[]
SNR_FMMF=[]
for k in range(self.crop_range[i]):
like.append(like_temp[0:residuals_cube_.shape[0],:,:,:,:,k])
SNR_FMMF.append(like_temp[residuals_cube_.shape[0],:,:,0,0,k])
self.like_fin[j][i]=like
def likfcn(self,ann_center,like_cube,estimator,ns):
if type(like_cube) is not np.ndarray:
like_cube = np.frombuffer(var_dict['X']).reshape(var_dict['X_shape'])
from vip_hci.var import frame_center
def forback(obs,Trpr,prob_ini):
#Forward backward model relying on past and future observation to
#compute the probability based on a two-states Markov chain
scalefact_fw=np.zeros(obs.shape[1])
scalefact_bw=np.zeros(obs.shape[1])
prob_fw=np.zeros((2,obs.shape[1]))
prob_bw=np.zeros((2,obs.shape[1]))
prob_fin=np.zeros((2,obs.shape[1]))
prob_pre_fw=0
prob_pre_bw=0
lik=0
for i in range(obs.shape[1]):
if obs[:,i].sum()!=0:
j=obs.shape[1]-1-i
if i==0:
prob_cur_fw=np.dot(np.diag(obs[:,i]),Trpr).dot(prob_ini)
prob_cur_bw=np.dot(Trpr,np.diag(obs[:,j])).dot(prob_ini)
else:
prob_cur_fw=np.dot(np.diag(obs[:,i]),Trpr).dot(prob_pre_fw)
prob_cur_bw=np.dot(Trpr,np.diag(obs[:,j])).dot(prob_pre_bw)
scalefact_fw[i]=prob_cur_fw.sum()
if scalefact_fw[i]==0:
prob_fw[:,i]=0
else:
prob_fw[:,i]=prob_cur_fw/scalefact_fw[i]
prob_pre_fw=prob_fw[:,i]
scalefact_bw[j]=prob_cur_bw.sum()
if scalefact_bw[j]==0:
prob_bw[:,j]=0
else:
prob_bw[:,j]=prob_cur_bw/scalefact_bw[j]
prob_pre_bw=prob_bw[:,j]
scalefact_fw_tot=(scalefact_fw).sum()
scalefact_bw_tot=(scalefact_bw).sum()
for k in range(obs.shape[1]):
if (prob_fw[:,k]*prob_bw[:,k]).sum()==0:
prob_fin[:,k]=0
else:
prob_fin[:,k]=(prob_fw[:,k]*prob_bw[:,k])/(prob_fw[:,k]*prob_bw[:,k]).sum()
lik = scalefact_fw_tot+scalefact_bw_tot
return prob_fin, lik
def RSM_esti(obs,Trpr,prob_ini):
#Original RSM approach involving a forward two-states Markov chain to compute the probabilities
prob_fin=np.zeros((2,obs.shape[1]))
prob_pre=0
lik=0
for i in range(obs.shape[1]):
if obs[:,i].sum()!=0:
if i==0:
cf=obs[:,i]*np.dot(Trpr,prob_ini)
else:
cf=obs[:,i]*np.dot(Trpr,prob_pre)
f=sum(cf)
lik+=np.log(f)
prob_fin[:,i]=cf/f
prob_pre=prob_fin[:,i]
else:
prob_fin[:,i]=np.nan
return prob_fin, lik
probmap = np.zeros((like_cube.shape[0],like_cube.shape[1],like_cube.shape[2]))
ceny, cenx = frame_center(like_cube[0,:,:,0,0])
indicesy,indicesx=get_time_series(like_cube[:,:,:,0,0],ann_center)
npix = len(indicesy)
pini=[1-ns/(like_cube.shape[0]*(npix)),1/(like_cube.shape[0]*ns),ns/(like_cube.shape[0]*(npix)),1-1/(like_cube.shape[0]*ns)]
prob=np.reshape([pini],(2, 2))
Trpr= prob
#Initialization of the Regime Switching model
#I-prob
mA=np.concatenate((np.diag(np.repeat(1,2))-prob,[np.repeat(1,2)]))
#sol
vE=np.repeat([0,1],[2,1])
#mA*a=vE -> mA'mA*a=mA'*vE -> a=mA'/(mA'mA)*vE
prob_ini=np.dot(np.dot(np.linalg.inv(np.dot(mA.T,mA)),mA.T),vE)
cf=np.zeros((2,len(indicesy)*like_cube.shape[0],like_cube.shape[3]))
totind=0
for i in range(0,len(indicesy)):
poscenty=indicesy[i]
poscentx=indicesx[i]
for j in range(0,like_cube.shape[0]):
for m in range(0,like_cube.shape[3]):
cf[0,totind,m]=like_cube[j,poscenty,poscentx,m,0]
cf[1,totind,m]=like_cube[j,poscenty,poscentx,m,1]
totind+=1
#Computation of the probability cube via the regime switching framework
prob_fin=[]
lik_fin=[]
for n in range(like_cube.shape[3]):
if estimator=='Forward':
prob_fin_temp,lik_fin_temp=RSM_esti(cf[:,:,n],Trpr,prob_ini)
elif estimator=='Forward-Backward':
prob_fin_temp,lik_fin_temp=forback(cf[:,:,n],Trpr,prob_ini)
prob_fin.append(prob_fin_temp)
lik_fin.append(lik_fin_temp)
cub_id1=0
for i in range(0,len(indicesy)):
cub_id2=0
for j in range(like_cube.shape[0]):
probmap[cub_id2,indicesy[i],indicesx[i]]=prob_fin[lik_fin.index(max(lik_fin))][1,cub_id1]
cub_id1+=1
cub_id2+=1
return probmap[:,indicesy,indicesx],ann_center
def probmap_esti(self,modthencube=True,ns=1,sel_crop=None, estimator='Forward',colmode='median',ann_center=None,sel_cube=None):
"""
Function allowing the estimation of the final RSM map based on the likelihood computed with
the lik_esti function for the different cubes and different post-processing techniques
used to generate the speckle field model. The RSM map estimation may be based on a forward
or forward-backward approach.
Parameters
----------
modtocube: bool, optional
Parameter defining if the concatenated cube feeding the RSM model is created
considering first the model or the different cubes. If 'modtocube=False',
the function will select the first cube then test all models on it and move
to the next one. If 'modtocube=True', the model will select one model and apply
it to every cubes before moving to the next model. Default is True.
ns: float , optional
Number of regime switches. Default is one regime switch per annulus but
smaller values may be used to reduce the impact of noise or disk structures
on the final RSM probablity map.
sel_crop: list of int or None, optional
Selected crop sizes from proposed crop_range (crop size = crop_size + 2 x (sel_crop)).
A specific sel_crop should be provided for each mode. Default is crop size = [crop_size]
estimator: str, optional
Approach used for the probability map estimation either a 'Forward' model
(approach used in the original RSM map algorithm) which consider only the
past observations to compute the current probability or 'Forward-Backward' model
which relies on both past and future observations to compute the current probability
colmode:str, optional
Method used to generate the final probability map from the three-dimensionnal cube
of probabilities generated by the RSM approach. It is possible to chose between the 'mean',
the 'median' of the 'max' value of the probabilities along the time axis. Default is 'median'.
ann_center:int, optional
Selected annulus if the probabilities are computed for a single annulus
(Used by the optimization framework). Default is None
sel_cube: list of arrays,optional
List of selected PSF-subtraction techniques and ADI sequences used to generate
the final probability map. [[i1,j1],[i2,j2],...] with i1 the first considered PSF-subtraction
technique and j1 the first considered ADI sequence, i2 the second considered PSF-subtraction
technique, etc. Default is None whih implies that all PSF-subtraction techniques and all
ADI sequences are used to compute the final probability map.
"""
import numpy as np
if type(sel_crop)!=np.ndarray:
sel_crop=np.zeros(len(self.model)*len(self.cube))
if sel_cube==None:
if modthencube==True:
for i in range(len(self.model)):
for j in range(len(self.cube)):
if (i+j)==0:
if self.like_fin[j][i][int(int(sel_crop[i]))].shape[3]==1:
like_cube=np.repeat(self.like_fin[j][i][int(sel_crop[i])],len(self.interval[i][0,j]),axis=3)
else:
like_cube=self.like_fin[j][i][int(sel_crop[i])]
else:
if self.like_fin[j][i][int(sel_crop[i])].shape[3]==1:
like_cube=np.append(like_cube,np.repeat(self.like_fin[j][i][int(sel_crop[i])],len(self.interval[i][0,j]),axis=3),axis=0)
else:
like_cube=np.append(like_cube,self.like_fin[j][i][int(sel_crop[i])],axis=0)
else:
for i in range(len(self.cube)):
for j in range(len(self.model)):
if (i+j)==0:
if self.like_fin[i][j][int(sel_crop[j])].shape[3]==1:
like_cube=np.repeat(self.like_fin[i][j][int(sel_crop[j])],len(self.interval[i][0,j]),axis=3)
else:
like_cube=self.like_fin[i][j][int(sel_crop[j])]
else:
if self.like_fin[i][j][int(sel_crop[j])].shape[3]==1:
like_cube=np.append(like_cube,np.repeat(self.like_fin[i][j][int(sel_crop[j])],len(self.interval[i][0,j]),axis=3),axis=0)
else:
like_cube=np.append(like_cube,self.like_fin[i][j][int(sel_crop[j])],axis=0)
else:
for i in range(len(sel_cube)):
if i==0:
if self.like_fin[sel_cube[i][0]][sel_cube[i][1]][int(sel_crop[i])].shape[3]==1:
like_cube=np.repeat(self.like_fin[sel_cube[i][0]][sel_cube[i][1]][int(sel_crop[i])],len(self.interval[sel_cube[i][1]][0,sel_cube[i][0]]),axis=3)
else:
like_cube=self.like_fin[sel_cube[i][0]][sel_cube[i][1]][int(sel_crop[i])]
else:
if self.like_fin[sel_cube[i][0]][sel_cube[i][1]][int(sel_crop[i])].shape[3]==1:
like_cube=np.append(like_cube,np.repeat(self.like_fin[sel_cube[i][0]][sel_cube[i][1]][int(sel_crop[i])],len(self.interval[sel_cube[i][1]][0,sel_cube[i][0]]),axis=3),axis=0)
else:
like_cube=np.append(like_cube,self.like_fin[sel_cube[i][0]][sel_cube[i][1]][int(sel_crop[i])],axis=0)
n,y,x,l_int,r_n =like_cube.shape
probmap = np.zeros((like_cube.shape[0],like_cube.shape[1],like_cube.shape[2]))
if ann_center is not None:
indicesy,indicesx=get_time_series(self.cube[0],ann_center)
probmap[:,indicesy,indicesx]=self.likfcn(ann_center,like_cube,estimator,ns)[0]
else:
X_shape=like_cube.shape
X = RawArray('d', int(np.prod(X_shape)))
X_np = np.frombuffer(X).reshape(X_shape)
np.copyto(X_np, like_cube)
time_out=5/250*like_cube.shape[0]*self.maxradius
results=[]
pool=Pool(processes=self.ncore, initializer=init_worker, initargs=(X, X_shape))
for e in range(self.minradius,self.maxradius+1):
results.append(pool.apply_async(self.likfcn,args=(e,0,estimator,ns)))
[result.wait(timeout=time_out) for result in results]
it=self.minradius
for result in results:
try:
res=result.get(timeout=1)
indicesy,indicesx=get_time_series(self.cube[0],res[1])
probmap[:,indicesy,indicesx]=res[0]
except mp.TimeoutError:
pool.terminate()
pool.join()
res=self.likfcn,args=(it,like_cube,estimator,ns)
indicesy,indicesx=get_time_series(self.cube[0],res[1])
probmap[:,indicesy,indicesx]=res[0]
it+=1
if colmode == 'mean':
self.probmap= np.nanmean(probmap, axis=0)
elif colmode == 'median':
self.probmap= np.nanmedian(probmap, axis=0)
elif colmode == 'sum':
self.probmap= np.sum(probmap, axis=0)
elif colmode == 'max':
self.probmap= np.max(probmap, axis=0)
def model_esti(self,modn,cuben,ann_center,cube):
"""
Function used during the optimization process to compute the cube of residuals
for a given annulus whose center is defined by ann_center. The PSF-subtraction
techniques index is given by modn, the ADI sequence index by cuben
"""
if self.model[modn]=='FM KLIP' or self.model[modn]=='KLIP':
resicube=np.zeros_like(self.cube[cuben])
pa_threshold = np.rad2deg(2 * np.arctan(self.delta_rot[modn][ann_center,cuben] * self.fwhm / (2 * ann_center)))
mid_range = np.abs(np.amax(self.pa[cuben]) - np.amin(self.pa[cuben])) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
pa_threshold = float(mid_range - mid_range * 0.1)
indices = get_annulus_segments(self.cube[cuben][0], ann_center-int(self.asize[modn]/2),int(self.asize[modn]),1)
for k in range(0,self.cube[cuben].shape[0]):
evals_temp,evecs_temp,KL_basis_temp,sub_img_rows_temp,refs_mean_sub_temp,sci_mean_sub_temp =KLIP_patch(k,cube[:, indices[0][0], indices[0][1]], self.ncomp[modn][ann_center,cuben], self.pa[cuben], self.crop[modn][ann_center,cuben], pa_threshold, ann_center)
resicube[k,indices[0][0], indices[0][1]] = sub_img_rows_temp
resicube_der=cube_derotate(resicube,self.pa[cuben])
frame_fin=cube_collapse(resicube_der, mode='median')
elif self.model[modn]=='FM LOCI':
resicube, ind_ref_list,coef_list=LOCI_FM(cube, self.psf[cuben], ann_center, self.pa[cuben],None, self.asize[modn], self.fwhm, self.tolerance[modn][ann_center,cuben],self.delta_rot[modn][ann_center,cuben],None)
resicube_der=cube_derotate(resicube,self.pa[cuben])
frame_fin=cube_collapse(resicube_der, mode='median')
elif self.model[modn]=='LOCI':
cube_rot_scale,angle_list,scale_list=rot_scale('ini',cube,None,self.pa[cuben],self.scale_list[cuben],self.imlib, self.interpolation)
if scale_list is not None:
resicube=np.zeros_like(cube_rot_scale)
for i in range(int(max(scale_list)*ann_center/self.asize[modn])):
indices = get_annulus_segments(cube_rot_scale[0], ann_center-int(self.asize[modn]/2)+self.asize[modn]*i,int(self.asize[modn]),int(self.nsegments[modn][ann_center,cuben]))
resicube[:,indices[0][0], indices[0][1]]=LOCI_FM(cube_rot_scale, self.psf[cuben], ann_center,angle_list,scale_list, self.asize[modn], self.fwhm, self.tolerance[modn][ann_center,cuben],self.delta_rot[modn][ann_center,cuben],self.delta_sep[modn][ann_center,cuben])[0][:,indices[0][0], indices[0][1]]
else:
resicube, ind_ref_list,coef_list=LOCI_FM(cube_rot_scale, self.psf[cuben], ann_center, self.pa[cuben],None, self.fwhm, self.fwhm, self.tolerance[modn][ann_center,cuben],self.delta_rot[modn][ann_center,cuben],None)
resicube_der=rot_scale('fin',self.cube[cuben],resicube,angle_list,scale_list,self.imlib, self.interpolation)
frame_fin=cube_collapse(resicube_der, mode='median')
elif self.model[modn]=='APCA':
cube_rot_scale,angle_list,scale_list=rot_scale('ini',cube,None,self.pa[cuben],self.scale_list[cuben],self.imlib, self.interpolation)
resicube=np.zeros_like(cube_rot_scale)
if scale_list is not None:
range_adisdi=range(int(max(scale_list)*ann_center/self.asize[modn]))
else:
range_adisdi=range(1)
for i in range_adisdi:
pa_threshold = np.rad2deg(2 * np.arctan(self.delta_rot[modn][ann_center,cuben] * self.fwhm / (2 * (ann_center+self.asize[modn]*i))))
mid_range = np.abs(np.amax(self.pa[cuben]) - np.amin(self.pa[cuben])) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
pa_threshold = float(mid_range - mid_range * 0.1)
indices = get_annulus_segments(cube_rot_scale[0], ann_center-int(self.asize[modn]/2)+self.asize[modn]*i,int(self.asize[modn]),int(self.nsegments[modn][ann_center,cuben]))
for k in range(0,cube_rot_scale.shape[0]):
for l in range(self.nsegments[modn][ann_center,cuben]):
resicube[k,indices[l][0], indices[l][1]],v_resi,data_shape=do_pca_patch(cube_rot_scale[:, indices[l][0], indices[l][1]], k, angle_list,scale_list, self.fwhm, pa_threshold,self.delta_sep[modn][ann_center,cuben], ann_center+self.asize[modn]*i,
svd_mode='lapack', ncomp=self.ncomp[modn][ann_center,cuben],min_frames_lib=2, max_frames_lib=200, tol=1e-1,matrix_ref=None)
resicube_der=rot_scale('fin',self.cube[cuben],resicube,angle_list,scale_list,self.imlib, self.interpolation)
frame_fin=cube_collapse(resicube_der, mode='median')
elif self.model[modn]=='NMF':
cube_rot_scale,angle_list,scale_list=rot_scale('ini',cube,None,self.pa[cuben],self.scale_list[cuben],self.imlib, self.interpolation)
resicube=np.zeros_like(cube_rot_scale)
if self.opti_mode=='full-frame':
nfr = cube_rot_scale.shape[0]
matrix = np.reshape(cube_rot_scale, (nfr, -1))
res= NMF_patch(matrix, ncomp=self.ncomp[modn][ann_center,cuben], max_iter=100,random_state=None,sklearn=True)
resicube=np.reshape(res,(cube_rot_scale.shape[0],cube_rot_scale.shape[1],cube_rot_scale.shape[2]))
else:
if scale_list is not None:
range_adisdi=range(int(max(scale_list)*ann_center/self.asize[modn]))
else:
range_adisdi=range(1)
for i in range_adisdi:
indices = get_annulus_segments(cube_rot_scale[0], ann_center-int(self.asize[modn]/2)+self.asize[modn]*i,int(self.asize[modn]),int(self.nsegments[modn][ann_center,cuben]))
for l in range(self.nsegments[modn][ann_center,cuben]):
resicube[:,indices[l][0], indices[l][1]]= NMF_patch(cube_rot_scale[:, indices[l][0], indices[l][1]], ncomp=self.ncomp[modn][ann_center,cuben], max_iter=100,random_state=None)
resicube_der=rot_scale('fin',self.cube[cuben],resicube,angle_list,scale_list,self.imlib, self.interpolation)
frame_fin=cube_collapse(resicube_der, mode='median')
elif self.model[modn]=='LLSG':
cube_rot_scale,angle_list,scale_list=rot_scale('ini',cube,None,self.pa[cuben],self.scale_list[cuben],self.imlib, self.interpolation)
resicube=np.zeros_like(cube_rot_scale)
if scale_list is not None:
range_adisdi=range(int(max(scale_list)*ann_center/self.asize[modn]))
else:
range_adisdi=range(1)
for i in range_adisdi:
indices = get_annulus_segments(cube_rot_scale[0], ann_center-int(self.asize[modn]/2)+self.asize[modn]*i,int(self.asize[modn]),int(self.nsegments[modn][ann_center,cuben]))
for l in range(self.nsegments[modn][ann_center,cuben]):
resicube[:,indices[l][0], indices[l][1]]= _decompose_patch(indices,l, cube_rot_scale,self.nsegments[modn][ann_center,cuben],
self.rank[modn][ann_center,cuben], low_rank_ref=False, low_rank_mode='svd', thresh=1,thresh_mode='soft', max_iter=40, auto_rank_mode='noise', cevr=0.9,
residuals_tol=1e-1, random_seed=10, debug=False, full_output=False).T
resicube_der=rot_scale('fin',self.cube[cuben],resicube,angle_list,scale_list,self.imlib, self.interpolation)
frame_fin=cube_collapse(resicube_der, mode='median')
return frame_fin,resicube_der
def contrast_esti(self,param):
"""
Function used during the PSF-subtraction techniques optimization process
to compute the average contrast for a given annulus via multiple injection
of fake companions, relying on the approach developed by Mawet et al. (2014),
Gonzales et al. (2017) and Dahlqvist et Al. (2021). The PSF-subtraction
techniques index is given by self.param[1], the ADI sequence index by self.param[0]
and the annulus center by self.param[2]. The parameters for the PSF-subtraction
technique are contained in param.
"""
cuben=self.param[0]
modn=self.param[1]
ann_center=self.param[2]
if self.model[modn]=='APCA':
self.ncomp[modn][ann_center,cuben]=int(param[0])
self.nsegments[modn][ann_center,cuben]=int(param[1])
self.delta_rot[modn][ann_center,cuben]=abs(param[2])
elif self.model[modn]=='NMF':
self.ncomp[modn][ann_center,cuben]=abs(int(param))
elif self.model[modn]=='LLSG':
self.rank[modn][ann_center,cuben]=int(param[0])
self.nsegments[modn][ann_center,cuben]=int(param[1])
elif self.model[modn]=='LOCI' or self.model[modn]=='FM LOCI':
self.tolerance[modn][ann_center,cuben]=abs(param[0])
self.delta_rot[modn][ann_center,cuben]=abs(param[1])
elif self.model[modn]=='KLIP' or self.model[modn]=='FM KLIP':
self.ncomp[modn][ann_center,cuben]=abs(int(param[0]))
self.delta_rot[modn][ann_center,cuben]=abs(param[1])
ceny, cenx = frame_center(self.cube[cuben])
frame_nofc=self.model_esti(modn,cuben,ann_center,self.cube[cuben])[0]
psf_template = normalize_psf(self.psf[cuben], fwhm=self.fwhm, verbose=False,size=self.psf[cuben].shape[1])
if self.cube[cuben].ndim==4:
psf_template =np.stack([psf_template]*self.cube[cuben].shape[0])
# Noise computation using the approach proposed by Mawet et al. (2014)
ang_step=360/((np.deg2rad(360)*ann_center)/self.fwhm)
tempx=[]
tempy=[]
for l in range(int(((np.deg2rad(360)*ann_center)/self.fwhm))):
newx = ann_center * np.cos(np.deg2rad(ang_step * l+self.opti_theta[cuben,ann_center]))
newy = ann_center * np.sin(np.deg2rad(ang_step * l+self.opti_theta[cuben,ann_center]))
tempx.append(newx)
tempy.append(newy)
tempx=np.array(tempx)
tempy = np.array(tempy) +int(ceny)
tempx = np.array(tempx) + int(cenx)
apertures = photutils.CircularAperture(np.array((tempx, tempy)).T, round(self.fwhm/2))
fluxes = photutils.aperture_photometry(frame_nofc, apertures)
fluxes = np.array(fluxes['aperture_sum'])
n_aper = len(fluxes)
ss_corr = np.sqrt(1 + 1/(n_aper-1))
sigma_corr = stats.t.ppf(stats.norm.cdf(5), n_aper)*ss_corr
noise = np.std(fluxes)
flux = sigma_corr*noise
fc_map = np.ones((self.cube[cuben].shape[-1],self.cube[cuben].shape[-1])) * 1e-6
fcy=[]
fcx=[]
cube_fc =self.cube[cuben]
# Average contrast computation via multiple injections of fake companions
ang_fc=range(int(self.opti_theta[cuben,ann_center]),int(360+self.opti_theta[cuben,ann_center]),int(360//min((len(fluxes)/2),8)))
for i in range(len(ang_fc)):
cube_fc = cube_inject_companions(cube_fc, psf_template,
self.pa[cuben], flux, self.pxscale,
rad_dists=ann_center,
theta=ang_fc[i],
verbose=False)
y = int(ceny) + ann_center * np.sin(np.deg2rad(
ang_fc[i]))
x = int(cenx) + ann_center * np.cos(np.deg2rad(
ang_fc[i]))
if self.cube[cuben].ndim==4:
fc_map = frame_inject_companion(fc_map, psf_template[0], y, x,
flux)
else:
fc_map = frame_inject_companion(fc_map, psf_template, y, x,
flux)
fcy.append(y)
fcx.append(x)
frame_fc=self.model_esti(modn,cuben,ann_center,cube_fc)[0]
contrast=[]
for j in range(len(ang_fc)):
apertures = photutils.CircularAperture(np.array(([fcx[j],fcy[j]])), round(self.fwhm/2))
injected_flux = photutils.aperture_photometry(fc_map, apertures)['aperture_sum']
recovered_flux = photutils.aperture_photometry((frame_fc - frame_nofc), apertures)['aperture_sum']
throughput = float(recovered_flux / injected_flux)
if flux/throughput>0:
contrast.append(flux / throughput)
if len(contrast)!=0:
contrast_mean=np.mean(contrast)
else:
contrast_mean=-1
if self.param_opti_mode=='Contrast':
return np.where(contrast_mean<0,0,1/contrast_mean), contrast_mean,param
elif self.param_opti_mode=='RSM':
# When self.param_opti_mode=='RSM', the average contrast is replaced by
#the ratio of peak probability of the fake companion injected at the median
#flux position with the average contrast defined previously and the peak (noise)
#probability in the remaining of the annulus
if contrast_mean>0:
self.flux_opti[cuben,modn,ann_center]=contrast_mean
self.RSM_test(cuben,modn,ann_center,self.opti_theta[cuben,ann_center],contrast_mean)
contrast=[]
for i in range(self.crop_range[modn]):
self.probmap_esti(modthencube=True,ns=1,sel_crop=[i], estimator='Forward',colmode='median',ann_center=ann_center,sel_cube=[[cuben,modn]])
contrast.append(self.perf_esti(cuben,modn,ann_center,self.opti_theta[cuben,ann_center]))
return contrast/(flux/throughput),flux/throughput,param
else:
return np.where(contrast_mean<0,0,1/contrast_mean), contrast_mean,param
def bayesian_optimisation(self,n_iters, loss_function, bounds,param_type, prev_res=None, n_random_esti=40, random_search=100 ,ncore=1):
""" bayesian_optimisation
Uses Gaussian Processes to optimise the loss function `loss_function`.
Parameters
----------
n_iters: integer.
Number of iterations used for the Bayesian optimization.
loss_loss: function.
Function to be optimised, in our case the contrast_esti function.
bounds: numpy ndarray, 2d
Lower and upper bounds of the parameters used to compute the cube of
residuals allowing the estimation of the average contrast.
param_type: list
Type of parameters used by the PSF-subtraction technique to compute the cube
of residuals allowing the estimation of the average contrast ('int' or 'float')
prev_res: None or numpy ndarray, 2d
Parmater sets and corresponding average contrasts generated at the previous
angular distance. Allow to smooth the transition from one annulus to another
during the optimization process for the annular mode
n_random_esti: int, optional
Number of sets of parameters for which the loss function is computed to
initialize the Gaussian process. Default is 40.
random_search: int, optional
Number of random searches for the selection of the next set of parameters to
sample based on the maximisation of the expected immprovement. Default is 100.
"""
def expected_improvement(x, gauss_proc, eval_loss, space_dim):
x_p = x.reshape(-1, space_dim)
mu, sigma = gauss_proc.predict(x_p, return_std=True)
opti_loss = np.max(eval_loss)
# In case sigma equals zero
with np.errstate(divide='ignore'):
Z = (mu - opti_loss) / sigma
expected_improvement = (mu - opti_loss) * norm.cdf(Z) + sigma * norm.pdf(Z)
expected_improvement[sigma == 0.0] == 0.0
return -1* expected_improvement
if prev_res==None:
x_ini = []
y_ini = []
else:
x_ini = prev_res[0]
y_ini = prev_res[1]
flux_fin=[]
space_dim = bounds.shape[0]
ann_center=self.param[2]
modn=self.param[1]
cuben=self.param[0]
if self.opti_mode=='full-frame':
params_m=[]
for i in range(len(param_type)):
if param_type[i]=='int':
params_m.append(np.random.random_integers(bounds[i, 0], bounds[i, 1], (n_random_esti)))
else:
params_m.append(np.random.uniform(bounds[i, 0], bounds[i, 1], (n_random_esti)))
if self.model[modn]=='FM KLIP' or self.model[modn]=='FM LOCI':
max_rad=self.max_r+1
else:
max_rad=self.maxradius+1
# Determination of the considered angular distances for the optimization process
if self.trunc is not None:
max_rad=min(self.trunc*self.asize[modn],max_rad)
if max_rad>self.minradius+3*self.asize[modn]+self.asize[modn]//2:
range_sel = list(range(self.minradius+self.asize[modn]//2,self.minradius+3*self.asize[modn]+self.asize[modn]//2,self.asize[modn]))
if max_rad>self.minradius+7*self.asize[modn]:
range_sel.extend(list(range(self.minradius+3*self.asize[modn]+self.asize[modn]//2,self.minradius+7*self.asize[modn],2*self.asize[modn])))
range_sel.extend(list(range(self.minradius+7*self.asize[modn]+self.asize[modn]//2,max_rad-3*self.asize[modn]//2-1,4*self.asize[modn])))
range_sel.append(self.minradius+(max_rad-self.minradius)//self.asize[modn]*self.asize[modn]-self.asize[modn]//2-1)
else:
range_sel.extend(list(range(self.minradius+3*self.asize[modn]+self.asize[modn]//2,max_rad-self.asize[modn]//2,2*self.asize[modn])))
if max_rad==self.minradius+7*self.asize[modn]:
range_sel.append(self.minradius+(max_rad-self.minradius)//self.asize[modn]*self.asize[modn]-self.asize[modn]//2-1)
else:
range_sel=list(range(self.minradius+self.asize[modn]//2,max_rad-self.asize[modn]//2,self.asize[modn]))
it=0
for j in range_sel:
self.param[2]=j
res_param = pool_map(ncore, loss_function, iterable(np.array(params_m).T))
res_mean_temp=[]
for res_temp in res_param:
res_mean_temp.append(res_temp[0])
self.mean_opti[cuben,modn,j]=np.mean(np.asarray(res_mean_temp))
y_ini_temp=[]
for res_temp in res_param:
if j==self.minradius+self.asize[modn]//2:
x_ini.append(res_temp[2])
y_ini_temp.append(res_temp[0]/self.mean_opti[cuben,modn,j])
y_ini.append([y_ini_temp])
it+=1
print(self.model[modn]+' Gaussian process initialization: annulus {} done!'.format(j))
y_ini=list(np.asarray(y_ini).sum(axis=0))
else:
self.param[2]=ann_center
params_m=[]
for i in range(len(param_type)):
if param_type[i]=='int':
params_m.append(np.random.random_integers(bounds[i, 0], bounds[i, 1], (n_random_esti)))
else:
params_m.append(np.random.uniform(bounds[i, 0], bounds[i, 1], (n_random_esti)))
res_param = pool_map(ncore, loss_function, iterable(np.array(params_m).T))
res_mean_temp=[]
for res_temp in res_param:
res_mean_temp.append(res_temp[0])
self.mean_opti[cuben,modn,self.param[2]]=np.mean(np.asarray(res_mean_temp))
for res_temp in res_param:
if res_temp[0].prod()>0:
if prev_res is not None:
del x_ini[0]
del y_ini[0]
x_ini.append(res_temp[2])
y_ini.append(res_temp[0]/self.mean_opti[cuben,modn,self.param[2]])
# Creation the Gaussian process
kernel = gp.kernels.RBF(1.0, length_scale_bounds=(0.5,5))
model = gp.GaussianProcessRegressor( kernel,
alpha=1e-2,
n_restarts_optimizer=0,
normalize_y=False)
param_f=[]
flux_f=[]
optires=[]
if self.param_opti_mode=='Contrast':
crop_r=1
else:
crop_r=self.crop_range[self.param[1]]
self.crop_range[self.param[1]]=1
for j in range(crop_r):
x_fin = []
y_fin = []
params_m=[]
self.param[2]=ann_center
if self.opti_mode=='full-frame':
if self.param_opti_mode=='Contrast':
y_i=y_ini[0]
else:
y_i=y_ini[0][:,j]
else:
if self.param_opti_mode=='Contrast':
y_i=np.array(np.array(y_ini))
else:
y_i=np.array(np.array(y_ini)[:,j])
x_cop=x_ini.copy()
y_cop=list(y_i.copy())
if self.opti_mode=='full-frame':
it2=0
flux_fin=np.zeros((len(range_sel),n_iters))
for n in range(n_iters):
if len(x_fin)>0:
x_cop.append(x_fin[-1])
y_cop.append(y_fin[-1])
model.fit(np.array(x_cop), np.asarray(y_cop))
else:
model.fit(np.array(x_ini),y_i)
# Selection of next parameter set via maximisation of the expected improvement
if random_search:
x_random=[]
for i in range(len(param_type)):
if param_type[i]=='int':
x_random.append(np.random.random_integers(bounds[i, 0], bounds[i, 1], (random_search)))
else:
x_random.append(np.random.uniform(bounds[i, 0], bounds[i, 1], (random_search)))
x_random=np.array(x_random).T
ei = -1 * expected_improvement(x_random, model, y_i, space_dim=space_dim)
params_m = x_random[np.argmax(ei), :]
if self.opti_mode=='full-frame':
it1=0
y_fin_temp=[]
for k in range_sel:
self.param[2]=k
res_temp =loss_function(params_m)
flux_fin[it1,it2]=res_temp[1]
if k==self.minradius+self.asize[modn]//2:
x_fin.append(res_temp[2])
y_fin_temp.append((res_temp[0]/self.mean_opti[cuben,modn,self.param[2]]))
it1+=1
y_fin.append(np.asarray(y_fin_temp).sum(axis=0))
it2+=1
else:
res_temp = loss_function(params_m)
x_fin.append(res_temp[2])
y_fin.append(res_temp[0]/self.mean_opti[cuben,modn,self.param[2]])
flux_fin.append(res_temp[1])
if self.opti_mode=='full-frame':
param_f.append(x_fin[np.argmax(np.array(y_fin))])
flux_f.append(flux_fin[:,np.argmax(np.array(y_fin))])
optires.append(max(y_fin))
else:
param_f.append(x_fin[np.argmax(np.array(y_fin))])
flux_f.append(flux_fin[np.argmax(np.array(y_fin))])
optires.append(max(y_fin))
if self.param_opti_mode=='RSM':
self.crop[self.param[1]][ann_center,self.param[0]]=self.crop[self.param[1]][ann_center,self.param[0]]+optires.index(max(optires)) *2
self.crop_range[self.param[1]]=crop_r
if self.opti_mode=='full-frame':
print(self.model[modn]+' Bayesian optimization done!')
return param_f[optires.index(max(optires))],max(optires), [x_ini, y_ini],flux_f[optires.index(max(optires))]
def opti_model(self,maxiter=60,filt=True):
"""
Function allowing the optimization of the PSF-subtraction techniques parameters
relying either on the 'full-frame' or 'annular' mode (for more details see Dahlqvist et al. 2021)
Parameters
----------
maxiter: int, optional
Maximum number of iterations of the Bayesian optimization algorithm for
APCA, LOCI, KLIP FM and LOCI FM. Default is 60.
filt: True, optional
If True, a Hampel Filter is applied on the set of parameters for the annular mode
in order to avoid outliers due to potential bright artefacts.
"""
if (any('FM KLIP'in mymodel for mymodel in self.model) or any('FM LOCI'in mydistri for mydistri in self.model)) and len(self.model)==1:
self.max_r=self.maxradius
elif(any('FM KLIP'in mymodel for mymodel in self.model) and any('FM LOCI'in mydistri for mydistri in self.model)) and len(self.model)==2:
self.max_r=self.maxradius
self.opti=True
if self.inv_ang==True:
for n in range(len(self.cube)):
self.pa[n]=-self.pa[n]
self.opti_theta=np.zeros((len(self.cube),self.maxradius+5))
self.contrast=np.zeros((len(self.cube),len(self.model),self.maxradius+5))
self.flux_opti=np.zeros((len(self.cube),len(self.model),self.maxradius+5))
self.mean_opti=np.zeros((len(self.cube),len(self.model),self.maxradius+5))
for k in range(len(self.cube)):
for j in range(len(self.model)):
simures=None
self.ini=True
if (self.opti_mode=='full-frame' or self.opti_mode=='annular') and self.max_r%self.asize[j]>0:
raise ValueError("For opti_mode equal to 'full-frame' or 'annular', max_r_fm should be a multiple of asize.")
if self.model[j]=='FM KLIP' or self.model[j]=='FM LOCI':
max_rad=self.max_r+1
else:
max_rad=self.maxradius+1
# Determination of the considered angular distances for the optimization process
if self.opti_mode=='full-frame':
if self.trunc is not None:
max_rad=min(self.trunc*self.asize[j],max_rad)
if max_rad>self.minradius+3*self.asize[j]+self.asize[j]//2:
range_sel = list(range(self.minradius+self.asize[j]//2,self.minradius+3*self.asize[j]+self.asize[j]//2,self.asize[j]))
if max_rad>self.minradius+7*self.asize[j]:
range_sel.extend(list(range(self.minradius+3*self.asize[j]+self.asize[j]//2,self.minradius+7*self.asize[j],2*self.asize[j])))
range_sel.extend(list(range(self.minradius+7*self.asize[j]+self.asize[j]//2,max_rad-3*self.asize[j]//2-1,4*self.asize[j])))
range_sel.append(self.minradius+(max_rad-self.minradius)//self.asize[j]*self.asize[j]-self.asize[j]//2-1)
else:
range_sel.extend(list(range(self.minradius+3*self.asize[j]+self.asize[j]//2,max_rad-self.asize[j]//2,2*self.asize[j])))
if max_rad==self.minradius+7*self.asize[j]:
range_sel.append(self.minradius+(max_rad-self.minradius)//self.asize[j]*self.asize[j]-self.asize[j]//2-1)
else:
range_sel=list(range(self.minradius+self.asize[j]//2,max_rad-self.asize[j]//2,self.asize[j]))
elif self.opti_mode=='annular':
range_sel=range(self.minradius+self.asize[j]//2,max_rad-self.asize[j]//2,self.asize[j])
for i in range_sel:
indicesy,indicesx=get_time_series(self.cube[k],i)
cube_derot,angle_list,scale_list=rot_scale('ini',self.cube[k],None,self.pa[k],self.scale_list[k], self.imlib, self.interpolation)
cube_derot=rot_scale('fin',self.cube[k],cube_derot,angle_list,scale_list, self.imlib, self.interpolation)
apertures = photutils.CircularAperture(np.array((indicesx, indicesy)).T, round(self.fwhm/2))
fluxes = photutils.aperture_photometry(cube_derot.sum(axis=0), apertures)
fluxes = np.array(fluxes['aperture_sum'])
x_sel=indicesx[np.argsort(fluxes)[len(fluxes)//2]]
y_sel=indicesy[np.argsort(fluxes)[len(fluxes)//2]]
ceny, cenx = frame_center(cube_derot[0])
self.opti_theta[k,i]=np.degrees(np.arctan2(y_sel-ceny, x_sel-cenx))
for i in range_sel:
if self.model[j]=='APCA':
self.param=[k,j,i]
if self.opti_bound[j] is None:
bounds=np.array([[15,45],[1,4],[0.25,1]])
else:
bounds=np.array(self.opti_bound[j])
param_type=['int','int','float']
if self.opti_mode=='full-frame':
opti_param,self.contrast[k,j,range_sel],simures,self.flux_opti[k,j,range_sel]=self.bayesian_optimisation(maxiter, self.contrast_esti, bounds,param_type, prev_res=simures, n_random_esti=self.ini_esti[j],
random_search=100,ncore=self.ncore)
self.ncomp[j][:,k]=int(opti_param[0])
self.nsegments[j][:,k]=opti_param[1]
self.delta_rot[j][:,k]=opti_param[2]
break
else:
opti_param,self.contrast[k,j,i],simures,self.flux_opti[k,j,i]=self.bayesian_optimisation(maxiter, self.contrast_esti, bounds,param_type, prev_res=simures, n_random_esti=self.ini_esti[j],
random_search=100, ncore=self.ncore)
self.ncomp[j][i,k]=int(opti_param[0])
self.nsegments[j][i,k]=opti_param[1]
self.delta_rot[j][i,k]=opti_param[2]
print('APCA Bayesian optimization: annulus {} done!'.format(i))
elif self.model[j]=='NMF':
self.param=[k,j,i]
optires=[]
flux=[]
test_param=[]
sel_param=[]
if self.opti_bound[j] is None:
bounds=[2,20]
else:
bounds=self.opti_bound[j][0]
if self.opti_mode=='full-frame':
param_range=range(bounds[0],bounds[1]+1)
flux=np.zeros((len(range_sel),len(param_range)))
it1=0
for h in range_sel:
self.param=[k,j,h]
res_param=[]
for l in param_range:
res_param.append(self.contrast_esti(l))
res_mean=[]
for res_temp in res_param:
res_mean.append(res_temp[0])
if self.param_opti_mode=='Contrast':
res_mean=np.mean(np.asarray(res_mean))
else:
res_mean=[np.mean(np.asarray(res_mean)[:,k]) for k in range(len(res_mean[0]))]
it2=0
for res_temp in res_param:
flux[it1,it2]=res_temp[1]
if h==self.minradius+self.asize[j]//2:
sel_param.append(res_temp[2])
optires.append(np.asarray(res_temp[0])/np.asarray(res_mean))
else:
if self.param_opti_mode=='Contrast':
optires[it2]+=res_temp[0]/res_mean
else:
optires[it2]=[optires[it2][k] + res_temp[0][k]/res_mean[k] for k in range(len(res_temp[0]))]
it2+=1
it1+=1
print('NMF optimization done!')
optires=np.array(optires)
if self.param_opti_mode!='Contrast':
self.crop[j][:,k]=self.crop[j][i,k]+np.unravel_index(optires.argmax(), optires.shape)[1]*2
self.ncomp[j][:,k]=sel_param[np.unravel_index(optires.argmax(), optires.shape)[0]]
self.contrast[k,j,range_sel]=optires.max()
self.flux_opti[k,j,range_sel]=flux[:,np.unravel_index(optires.argmax(), optires.shape)[0]]
break
else:
if self.opti_bound[j] is None:
bounds=[2,20]
else:
bounds=self.opti_bound[j][0]
param_range=range(bounds[0],bounds[1]+1)
res_param = pool_map(self.ncore, self.contrast_esti, iterable(param_range))
for res_temp in res_param:
optires.append(res_temp[0])
flux.append(res_temp[1])
sel_param.append(res_temp[2])
optires=np.array(optires)
if self.param_opti_mode!='Contrast':
self.crop[j][i,k]=self.crop[j][i,k]+np.unravel_index(optires.argmax(), optires.shape)[1]*2
self.ncomp[j][i,k]=sel_param[np.unravel_index(optires.argmax(), optires.shape)[0]]
self.contrast[k,j,i]=optires.max()
self.flux_opti[k,j,i]=flux[np.unravel_index(optires.argmax(), optires.shape)[0]]
print('NMF optimization: annulus {} done!'.format(i))
elif self.model[j]=='LLSG':
self.param=[k,j,i]
optires=[]
flux=[]
test_param=[]
sel_param=[]
if self.opti_bound[j] is None:
bounds=[[1,10],[1,4]]
else:
bounds=self.opti_bound[j]
for l in range(bounds[0][0],bounds[0][1]+1):
for m in range(bounds[1][0],bounds[1][1]+1):
test_param.append([l,m])
if self.opti_mode=='full-frame':
flux=np.zeros((len(range_sel),len(test_param)))
it1=0
for h in range_sel:
self.param=[k,j,h]
res_param = pool_map(self.ncore, self.contrast_esti, iterable(np.array(test_param)))
res_mean=[]
for res_temp in res_param:
res_mean.append(res_temp[0])
if self.param_opti_mode=='Contrast':
res_mean=np.mean(np.asarray(res_mean))
else:
res_mean=[np.mean(np.asarray(res_mean)[:,k]) for k in range(len(res_mean[0]))]
it2=0
for res_temp in res_param:
flux[it1,it2]=res_temp[1]
if h==self.minradius+self.asize[j]//2:
sel_param.append(res_temp[2])
optires.append(np.asarray(res_temp[0])/np.asarray(res_mean))
else:
if self.param_opti_mode=='Contrast':
optires[it2]+=res_temp[0]/res_mean
else:
optires[it2]=[optires[it2][k] + res_temp[0][k]/res_mean[k] for k in range(len(res_temp[0]))]
it2+=1
it1+=1
print('LLSG optimization done!')
optires=np.array(optires)
if self.param_opti_mode!='Contrast':
self.crop[j][:,k]=self.crop[j][i,k]+np.unravel_index(optires.argmax(), optires.shape)[1]*2
self.rank[j][:,k]=sel_param[np.unravel_index(optires.argmax(), optires.shape)[0]][0]
self.nsegments[j][:,k]=sel_param[np.unravel_index(optires.argmax(), optires.shape)[0]][1]
self.contrast[k,j,range_sel]=optires.max()
self.flux_opti[k,j,range_sel]=flux[:,np.unravel_index(optires.argmax(), optires.shape)[0]]
break
else:
res_param = pool_map(self.ncore, self.contrast_esti, iterable(np.array(test_param)))
for res_temp in res_param:
optires.append(res_temp[0])
flux.append(res_temp[1])
sel_param.append(res_temp[2])
optires=np.array(optires)
if self.param_opti_mode!='Contrast':
self.crop[j][i,k]=self.crop[j][i,k]+np.unravel_index(optires.argmax(), optires.shape)[0]*2
self.rank[j][i,k]=sel_param[np.unravel_index(optires.argmax(), optires.shape)[0]][0]
self.nsegments[j][i,k]=sel_param[np.unravel_index(optires.argmax(), optires.shape)[0]][1]
self.contrast[k,j,i]=optires.max()
self.flux_opti[k,j,i]=flux[np.unravel_index(optires.argmax(), optires.shape)[0]]
print('LLSG optimization: annulus {} done!'.format(i))
elif self.model[j]=='LOCI':
self.param=[k,j,i]
if self.opti_bound[j] is None:
bounds=np.array([[1e-3,1e-2],[0.25,1]])
else:
bounds=np.array(self.opti_bound[j])
param_type=['float','float']
if self.opti_mode=='full-frame':
opti_param,self.contrast[k,j,range_sel],simures,self.flux_opti[k,j,range_sel]=self.bayesian_optimisation(maxiter, self.contrast_esti, bounds,param_type, prev_res=simures, n_random_esti=self.ini_esti[j],
random_search=100,ncore=self.ncore)
self.tolerance[j][:,k]=opti_param[0]
self.delta_rot[j][:,k]=opti_param[1]
break
else:
opti_param,self.contrast[k,j,i],simures,self.flux_opti[k,j,i]=self.bayesian_optimisation(maxiter, self.contrast_esti, bounds,param_type, prev_res=simures, n_random_esti=self.ini_esti[j],
random_search=100, ncore=self.ncore)
self.tolerance[j][i,k]=opti_param[0]
self.delta_rot[j][i,k]=opti_param[1]
print('LOCI Bayesian optimization: annulus {} done!'.format(i))
elif self.model[j]=='FM LOCI':
opti_mode=np.copy(self.param_opti_mode)
self.param_opti_mode='Contrast'
self.param=[k,j,i]
if self.opti_bound[j] is None:
bounds=np.array([[1e-3,1e-2],[0.25,1]])
else:
bounds=np.array(self.opti_bound[j])
param_type=['float','float']
if self.opti_mode=='full-frame':
opti_param,self.contrast[k,j,range_sel],simures,self.flux_opti[k,j,range_sel]=self.bayesian_optimisation(maxiter, self.contrast_esti, bounds,param_type, prev_res=simures, n_random_esti=self.ini_esti[j],
random_search=100, ncore=self.ncore)
self.tolerance[j][:,k]=opti_param[0]
self.delta_rot[j][:,k]=opti_param[1]
break
else:
opti_param,self.contrast[k,j,i],simures,self.flux_opti[k,j,i]=self.bayesian_optimisation(maxiter, self.contrast_esti, bounds,param_type, prev_res=simures, n_random_esti=self.ini_esti[j],
random_search=100, ncore=self.ncore)
self.tolerance[j][i,k]=opti_param[0]
self.delta_rot[j][i,k]=opti_param[1]
print('FM LOCI Bayesian optimization: annulus {} done!'.format(i))
self.param_opti_mode=opti_mode
elif self.model[j]=='KLIP':
self.param=[k,j,i]
if self.opti_bound[j] is None:
bounds=np.array([[15,45],[0.25,1]])
else:
bounds=np.array(self.opti_bound[j])
param_type=['int','float']
if self.opti_mode=='full-frame':
opti_param,self.contrast[k,j,range_sel],simures,self.flux_opti[k,j,range_sel]=self.bayesian_optimisation(maxiter, self.contrast_esti, bounds,param_type, prev_res=simures, n_random_esti=self.ini_esti[j],
random_search=100, ncore=self.ncore)
self.ncomp[j][:,k]=opti_param[0]
self.delta_rot[j][:,k]=opti_param[1]
break
else:
opti_param,self.contrast[k,j,i],simures,self.flux_opti[k,j,i]=self.bayesian_optimisation(maxiter, self.contrast_esti, bounds,param_type, prev_res=simures, n_random_esti=self.ini_esti[j],
random_search=100,ncore=self.ncore)
self.ncomp[j][i,k]=opti_param[0]
self.delta_rot[j][i,k]=opti_param[1]
print('KLIP Bayesian optimization: annulus {} done!'.format(i))
elif self.model[j]=='FM KLIP':
opti_mode=np.copy(self.param_opti_mode)
self.param_opti_mode='Contrast'
self.param=[k,j,i]
if self.opti_bound[j] is None:
bounds=np.array([[15,45],[0.25,1]])
else:
bounds=np.array(self.opti_bound[j])
param_type=['int','float']
if self.opti_mode=='full-frame':
opti_param,self.contrast[k,j,range_sel],simures,self.flux_opti[k,j,range_sel]=self.bayesian_optimisation(maxiter, self.contrast_esti, bounds,param_type, prev_res=simures, n_random_esti=self.ini_esti[j],
random_search=100, ncore=self.ncore)
self.ncomp[j][:,k]=opti_param[0]
self.delta_rot[j][:,k]=opti_param[1]
break
else:
opti_param,self.contrast[k,j,i],simures,self.flux_opti[k,j,i]=self.bayesian_optimisation(maxiter, self.contrast_esti, bounds,param_type, prev_res=simures, n_random_esti=self.ini_esti[j],
random_search=100, ncore=self.ncore)
self.ncomp[j][i,k]=opti_param[0]
self.delta_rot[j][i,k]=opti_param[1]
print('FM KLIP Bayesian optimization: annulus {} done!'.format(i))
self.param_opti_mode=opti_mode
print('Model parameters selection: Cube {} : Model {} : Radius {} done!'.format(k, j,i))
if self.opti_mode=='annular' and filt==True:
self.ncomp[j][range_sel,k]=remove_outliers(self.ncomp[j][:,k],range_sel)
self.rank[j][range_sel,k]=remove_outliers(self.rank[j][:,k],range_sel)
self.nsegments[j][range_sel,k]=remove_outliers(self.nsegments[j][:,k],range_sel).astype(int)
self.nsegments[j][:,k]=np.where(self.nsegments[j][:,k]==0,1,self.nsegments[j][:,k])
self.delta_rot[j][range_sel,k]=remove_outliers(self.delta_rot[j][:,k],range_sel)
self.tolerance[j][range_sel,k]=remove_outliers(self.tolerance[j][:,k],range_sel)
for j in range(len(self.model)):
if self.param_opti_mode=='RSM':
self.crop_range[j]=1
if self.inv_ang==True:
for n in range(len(self.cube)):
self.pa[n]=-self.pa[n]
def RSM_test(self,cuben,modn,ann_center,sel_theta,sel_flux,thresh=False):
"""
Function computing the cube of likelihoods for a given PSF-subtraction
techniques 'modn', a given ADI sequence 'cuben' and a given angular distance 'ann_center'
with or without the injection of a fake companion (respc. thresh=False and thresh=False).
Sel_theta indicate the azimuth of the injected fake companion and sel_flux the flux
associated to the fake companion. This function is used by the RSM optimization function (opti_RSM).
"""
if thresh==False:
psf_template = normalize_psf(self.psf[cuben], fwhm=self.fwhm, verbose=False,size=self.psf[cuben].shape[1])
if self.cube[cuben].ndim==4:
psf_template =np.stack([psf_template]*self.cube[cuben].shape[0])
cube_fc= cube_inject_companions(np.zeros_like(self.cube[cuben]), psf_template,
self.pa[cuben], sel_flux, self.pxscale,
rad_dists=ann_center,
theta=sel_theta,
verbose=False)
result = self.likelihood(ann_center,cuben,modn,np.zeros_like(self.cube[cuben]),cube_fc,False)
else:
result = self.likelihood(ann_center,cuben,modn,np.zeros_like(self.cube[cuben]),None,False)
like_temp=np.zeros(((rot_scale('ini',self.cube[cuben],None,self.pa[cuben],self.scale_list[cuben],self.imlib, self.interpolation)[0].shape[0]+1),self.cube[cuben].shape[-2],self.cube[cuben].shape[-1],len(self.interval[modn][ann_center,cuben]),2,self.crop_range[modn]))
indicesy,indicesx=get_time_series(self.cube[cuben],ann_center)
if self.model[modn]=='FM LOCI' or self.model[modn]=='FM KLIP':
like_temp[:,indicesy,indicesx,:,:,:]=result[1]
self.psf_fm[cuben][modn][result[0]]=result[2]
else:
like_temp[:,indicesy,indicesx,:,:,:]=result[1]
like=[]
flux_FMMF=[]
for k in range(self.crop_range[modn]):
like.append(like_temp[0:(like_temp.shape[0]-1),:,:,:,:,k])
flux_FMMF.append(like_temp[(like_temp.shape[0]-1),:,:,0,0,k])
self.like_fin[cuben][modn]=like
self.flux_FMMF[cuben][modn]=flux_FMMF
def perf_esti(self,cuben,modn,ann_center,sel_theta):
"""
Function computing the performance index used for the RSM optimization
based on the cube of likelihoods generated by a PSF-subtraction
techniques 'modn', relying on the ADI sequence 'cuben'. The performance
index is defined as the ratio of the peak probability of
an injected fake companion (injected at an angular distance 'ann_center'
and an azimuth 'sel_theta') on the peak (noise) probability in the
remaining of the considered annulus.
This function is used by the RSM optimization function (opti_RSM).
"""
ceny, cenx = frame_center(self.cube[cuben])
twopi=2*np.pi
sigposy=int(ceny + np.sin(sel_theta/360*twopi)*ann_center)
sigposx=int(cenx+ np.cos(sel_theta/360*twopi)*ann_center)
indc = circle(sigposy, sigposx,int(self.fwhm/2)+1)
max_detect=self.probmap[indc[0],indc[1]].max()
self.probmap[indc[0],indc[1]]=0
indicesy,indicesx=get_time_series(self.cube[cuben],ann_center)
bg_noise=np.max(self.probmap[indicesy,indicesx])
return max_detect/bg_noise
def opti_RSM_crop(self,ann_center,cuben,modn,estimator,colmode):
"""
Function computing the performance index of the RSM detection map for a given range
of crop sizes self.crop_range for the annulus ann_center on the cube of likelihoods generated
by a PSF-subtraction techniques 'modn', relying on the ADI sequence 'cuben'.
The detection map on which the perforance index is computed is using the
estimator ('Forward' or 'Forward-Bakward') probability computation mode and
the colmode ('mean', 'median' or 'max') the sum the obtained probabilities along the time axis.
This function is used by the RSM optimization function (opti_RSM).
"""
opti_res=[]
self.RSM_test(cuben,modn,ann_center,self.opti_theta[cuben,ann_center],self.flux_opti[cuben,modn,ann_center])
for l in range(self.crop_range[modn]):
self.probmap_esti(modthencube=True,ns=1,sel_crop=[l], estimator=estimator,colmode=colmode,ann_center=ann_center,sel_cube=[[cuben,modn]])
opti_res.append(self.perf_esti(cuben,modn,ann_center,self.opti_theta[cuben,ann_center]))
return np.asarray(opti_res),ann_center
def opti_RSM_var_annular(self,ann_center,cuben,modn,estimator,colmode):
"""
Function computing the performance index of the RSM detection map for the different
possible regions used to compute the noise mean and variance ('ST','FR','FM','SM','TE')
and the two estimation mode (Gaussian maximum likelihood or variance base estimator, resp.
flux= True or False) for the annulus ann_center on the cube of likelihoods generated
by a PSF-subtraction techniques 'modn', relying on the ADI sequence 'cuben' and the annular
optimization mode. The detection map on which the performance index is computed uses the
estimator ('Forward' or 'Forward-Bakward') probability computation mode and
the colmode ('mean', 'median' or 'max') to sum the obtained probabilities along the time axis.
This function is used by the RSM optimization function (opti_RSM).
"""
optires=np.zeros((4))
if self.cube[cuben].ndim==4:
var_list=[['FM',],['FM']]
else:
var_list=[['FM','TE','ST','SM'],['FM','TE']]
if self.intensity[modn][ann_center,cuben]=='Pixel':
n=1
else:
n=0
for m in range(len(var_list[0])):
if (n==1 and m<2) or n==0:
self.var[modn][ann_center,cuben]=var_list[n][m]
self.RSM_test(cuben,modn,ann_center,self.opti_theta[cuben,ann_center],self.flux_opti[cuben,modn,ann_center])
self.probmap_esti(modthencube=True,ns=1,sel_crop=[0], estimator=estimator,colmode=colmode,ann_center=ann_center,sel_cube=[[cuben,modn]])
optires[m]=self.perf_esti(cuben,modn,ann_center,self.opti_theta[cuben,ann_center])
return optires,ann_center
def opti_RSM_var_full(self,ann_center,cuben,modn,estimator,colmode):
"""
Function computing the performance index of the RSM detection map for the different
possible regions used to compute the noise mean and variance ('ST','FR','FM','SM','TE')
and the two estimation mode (Gaussian maximum likelihood or variance base estimator, resp.
flux= True or False) for the annulus ann_center on the cube of likelihoods generated
by a PSF-subtraction techniques 'modn', relying on the ADI sequence 'cuben' and the full-frame
optimization mode. The detection map on which the performance index is computed uses the
estimator ('Forward' or 'Forward-Bakward') probability computation mode and
the colmode ('mean', 'median' or 'max') to sum the obtained probabilities along the time axis.
This function is used by the RSM optimization function (opti_RSM).
"""
self.RSM_test(cuben,modn,ann_center,self.opti_theta[cuben,ann_center],self.flux_opti[cuben,modn,ann_center])
self.probmap_esti(modthencube=True,ns=1,sel_crop=[0], estimator=estimator,colmode=colmode,ann_center=ann_center,sel_cube=[[cuben,modn]])
optires=self.perf_esti(cuben,modn,ann_center,self.opti_theta[cuben,ann_center])
return optires,ann_center
def opti_RSM(self,estimator='Forward',colmode='median'):
"""
Function optimizing five parameters of the RSM algorithm, the crop size, the method used
to compute the intensity parameter (pixel-wise Gaussian maximum likelihood or annulus-wise,
variance based estimation), the region used for the computation of the noise mean and variance
('ST', 'FR', 'FM', 'SM', 'TE', see Dalqvist et al. 2021 for the definition), and define
if the noise mean and variance estimation should be performed empiricaly or via best fit.
For the variance based estimation of the intensity parameter, an additional parameter,
the multiplicative factor, is also optimized. The detection map on which the performance index
is computed uses the estimator ('Forward' or 'Forward-Bakward') probability computation mode and
the colmode ('mean', 'median' or 'max') to sum the obtained probabilities along the time axis.
Parameters
----------
estimator: str, optional
Approach used for the probability map estimation either a 'Forward' model
(approach used in the original RSM map algorithm) which consider only the
past observations to compute the current probability or 'Forward-Backward' model
which relies on both past and future observations to compute the current probability
colmode:str, optional
Method used to generate the final probability map from the three-dimensionnal cube
of probabilities generated by the RSM approach. It is possible to chose between the 'mean',
the 'median' of the 'max' value of the probabilities along the time axis. Default is 'median'.
"""
if (any('FM KLIP'in mymodel for mymodel in self.model) or any('FM LOCI'in mydistri for mydistri in self.model)) and len(self.model)==1:
self.max_r=self.maxradius
elif(any('FM KLIP'in mymodel for mymodel in self.model) and any('FM LOCI'in mydistri for mydistri in self.model)) and len(self.model)==2:
self.max_r=self.maxradius
self.opti=True
if self.inv_ang==True:
for n in range(len(self.cube)):
self.pa[n]=-self.pa[n]
self.distri[:]=np.repeat('A',len(self.model))
self.crop_noflux=self.crop.copy()
self.crop_flux=self.crop.copy()
self.opti_theta=np.zeros((len(self.cube),self.maxradius+5))
for j in range(len(self.model)):
for i in range(len(self.cube)):
# Determination of the considered angular distances for the optimization process
if self.model[j]=='FM KLIP' or self.model[j]=='FM LOCI':
max_rad=self.max_r+1
else:
max_rad=self.maxradius+1
interval=int(self.interval[j][0,i])
res_interval_crop=np.zeros((max_rad,interval,self.crop_range[j]))
if self.opti_mode=='full-frame':
if self.trunc is not None:
max_rad=min(self.trunc*self.asize[j],max_rad)
if max_rad>self.minradius+3*self.asize[j]+self.asize[j]//2:
range_sel = list(range(self.minradius+self.asize[j]//2,self.minradius+3*self.asize[j]+self.asize[j]//2,self.asize[j]))
if max_rad>self.minradius+7*self.asize[j]:
range_sel.extend(list(range(self.minradius+3*self.asize[j]+self.asize[j]//2,self.minradius+7*self.asize[j],2*self.asize[j])))
range_sel.extend(list(range(self.minradius+7*self.asize[j]+self.asize[j]//2,max_rad-3*self.asize[j]//2-1,4*self.asize[j])))
range_sel.append(self.minradius+(max_rad-self.minradius)//self.asize[j]*self.asize[j]-self.asize[j]//2-1)
else:
range_sel.extend(list(range(self.minradius+3*self.asize[j]+self.asize[j]//2,max_rad-self.asize[j]//2,2*self.asize[j])))
if max_rad==self.minradius+7*self.asize[j]:
range_sel.append(self.minradius+(max_rad-self.minradius)//self.asize[j]*self.asize[j]-self.asize[j]//2-1)
else:
range_sel=list(range(self.minradius+self.asize[j]//2,max_rad-self.asize[j]//2,self.asize[j]))
elif self.opti_mode=='annular':
range_sel=range(self.minradius+self.asize[j]//2,max_rad-self.asize[j]//2,self.asize[j])
# Computation of the median flux position in the original ADI sequence which will be used during the RSM optimization
for k in range_sel:
indicesy,indicesx=get_time_series(self.cube[i],k)
cube_derot,angle_list,scale_list=rot_scale('ini',self.cube[i],None,self.pa[i],self.scale_list[i], self.imlib, self.interpolation)
cube_derot=rot_scale('fin',self.cube[i],cube_derot,angle_list,scale_list, self.imlib, self.interpolation)
apertures = photutils.CircularAperture(np.array((indicesx, indicesy)).T, round(self.fwhm/2))
fluxes = photutils.aperture_photometry(cube_derot.sum(axis=0), apertures)
fluxes = np.array(fluxes['aperture_sum'])
x_sel=indicesx[np.argsort(fluxes)[len(fluxes)//2]]
y_sel=indicesy[np.argsort(fluxes)[len(fluxes)//2]]
ceny, cenx = frame_center(cube_derot[0])
self.opti_theta[i,k]=np.degrees(np.arctan2(y_sel-ceny, x_sel-cenx))
# Step-1: Selection of the optimal crop size, the intensity parameter estimator, and the multiplicative factor for the variance based estimator
self.distrifit[j][:,i]=False
self.var[j][:,i]='FR'
for k in range(1,interval+1):
self.interval[j][:,i]=[k]
res_param=pool_map(self.ncore, self.opti_RSM_crop, iterable(range_sel),i,j,estimator,colmode)
for res_temp in res_param:
res_interval_crop[res_temp[1],k-1,:]=res_temp[0]
if self.opti_mode=='full-frame':
interval_crop_sum=np.asarray(res_interval_crop).sum(axis=0)
self.interval[j][:,i]=np.unravel_index(interval_crop_sum.argmax(), interval_crop_sum.shape)[0]+1
self.crop_noflux[j][:,i]=self.crop[j][0,i]+2*np.unravel_index(interval_crop_sum.argmax(), interval_crop_sum.shape)[1]
if self.opti_mode=='annular':
interval_crop_inter=np.copy(res_interval_crop)
for n in range(interval):
for m in range(self.crop_range[j]):
interval_crop_inter[range_sel[0]:(range_sel[-1]+1),n,m]=interpolation(interval_crop_inter[:,n,m],range_sel)
for l in range(self.minradius,max_rad):
self.interval[j][l,i]=np.unravel_index(interval_crop_inter[l,:,:].argmax(), interval_crop_inter[l,:,:].shape)[0]+1
self.crop_noflux[j][l,i]=self.crop[j][0,i]+2*np.unravel_index(interval_crop_inter[l,:,:].argmax(), interval_crop_inter[l,:,:].shape)[1]
print('Interval selected: Cube {}, Model {}'.format(i,j))
self.intensity[j][:,i]='Pixel'
res_param=pool_map(self.ncore, self.opti_RSM_crop, iterable(range_sel),i,j,estimator,colmode)
crop_sel=np.zeros((max_rad,self.crop_range[j]))
for res_temp in res_param:
crop_sel[res_temp[1],:]=res_temp[0]
if self.opti_mode=='full-frame':
crop_sum=np.asarray(crop_sel).sum(axis=0)
self.crop_flux[j][:,i]=self.crop[j][0,i]+2*crop_sum.argmax()
if self.opti_mode=='annular':
for m in range(self.crop_range[j]):
crop_sel[range_sel[0]:(range_sel[-1]+1),m]=interpolation(crop_sel[:,m],range_sel)
for l in range(self.minradius,max_rad):
self.crop_flux[j][l,i]=self.crop[j][0,i]+2*crop_sel[l,:].argmax()
print('Crop size selected: Cube {} : Model {}'.format(i,j))
# Step-2: selection of the optimal region to compute the noise mean and variance
optires=np.zeros((5))
var_sel=np.zeros((max_rad,5))
if self.opti_mode=='full-frame':
if interval_crop_sum.max()>crop_sum.max():
self.intensity[j][:,i]='Annulus'
self.crop[j][:,i]=self.crop_noflux[j][:,i]
optires[0]=interval_crop_sum.max()
else:
self.intensity[j][:,i]='Pixel'
self.crop[j][:,i]=self.crop_flux[j][:,i]
optires[0]=crop_sum.max()
if self.opti_mode=='annular':
for l in range(self.minradius,max_rad):
if interval_crop_inter[l,:,:].max()>crop_sel[l,:].max():
self.intensity[j][l,i]='Annulus'
self.crop[j][l,i]=self.crop_noflux[j][l,i]
var_sel[l,0]=interval_crop_inter[l,:,:].max()
else:
self.intensity[j][l,i]='Pixel'
self.crop[j][l,i]=self.crop_flux[j][l,i]
var_sel[l,0]=crop_sel[l,:].max()
crop_range_temp=np.copy(self.crop_range[j])
self.crop_range[j]=1
fit_sel=np.zeros((max_rad,2))
if self.opti_mode=='full-frame':
if self.cube[i].ndim==4:
var_list=[['FR','FM',],['FR','FM']]
else:
var_list=[['FR','FM','TE','ST','SM'],['FR','FM','TE']]
if self.intensity[j][0,i]=='Pixel':
n=1
else:
n=0
for m in range(1,len(var_list[0])):
if (n==1 and m<3) or n==0:
self.var[j][:,i]=var_list[n][m]
res_param=pool_map(self.ncore, self.opti_RSM_var_full, iterable(range_sel),i,j,estimator,colmode)
opti_temp=0
for res_temp in res_param:
opti_temp+=res_temp[0]
optires[m]=opti_temp
self.var[j][:,i]=['FR','FM','TE','ST','SM'][optires.argmax()]
fit_sel[0,0]=optires.max()
if self.opti_mode=='annular':
res_param=pool_map(self.ncore, self.opti_RSM_var_annular, iterable(range_sel),i,j,estimator,colmode)
for res_temp in res_param:
var_sel[res_temp[1],1:5]=res_temp[0]
for m in range(5):
var_sel[range_sel[0]:(range_sel[-1]+1),m]=interpolation(var_sel[:,m],range_sel)
for l in range(self.minradius,max_rad):
self.var[j][l,i]=['FR','FM','TE','ST','SM'][var_sel[l,:].argmax()]
if self.intensity[j][l,i]=='Pixel' and (self.var[j][l,i]=='ST' or self.var[j][l,i]=='SM'):
self.var[j][l,i]='FR'
fit_sel[l,0]=var_sel[l,:].max()
print('Variance estimation method selected: Cube {} : Model {}'.format(i,j))
# Step-3: definition of the approacch to compute the noise meaan and variance, either empirically or via best fit
self.distrifit[j][:,i]=True
res_param=pool_map(self.ncore, self.opti_RSM_var_full, iterable(range_sel),i,j,estimator,colmode)
for res_temp in res_param:
fit_sel[res_temp[1],1]=res_temp[0]
if self.opti_mode=='full-frame':
fit_sum=np.asarray(fit_sel).sum(axis=0)
self.distrifit[j][:,i]==[False,True][fit_sum.argmax()]
if self.opti_mode=='annular':
fit_sel[range_sel[0]:(range_sel[-1]+1),1]=interpolation(fit_sel[:,1],range_sel)
for l in range(self.minradius,max_rad):
self.distrifit[j][l,i]=[False,True][fit_sel[l,:].argmax()]
print('Fit method selected: Cube {} : Model {}'.format(i,j))
self.crop_range[j]=crop_range_temp
self.crop_range[j]=1
if self.inv_ang==True:
for n in range(len(self.cube)):
self.pa[n]=-self.pa[n]
def opti_combi_full(self,l,estimator,colmode,op_sel,SNR=False):
"""
Function computing the performance index of the RSM detection map for the tested sets of
likelihhod cubes/residuals cubes used by the optimal subset selection function in the case of the full-frame
optimization mode. The performance index is computed based on the set of likelihood cubes op_sel
for the radial distance l, relying either on the RSM algorithm or S/N map to compute the detection map.
When using the RSM algorithm to gennerated the final probability map, the detection map is computed
using the estimator ('Forward' or 'Forward-Bakward') probability computation mode and the colmode
('mean', 'median' or 'max') to sum the obtained probabilities along the time axis.
This function is used by the optimal subset of likelihood cubes selection function (RSM_combination).
"""
if SNR==True:
mod_sel=[]
for k in range(len(op_sel)):
i=op_sel[k][0]
j=op_sel[k][1]
if (self.model[j]!='FM KLIP' and self.model[j]!='FM LOCI') or (self.model[j]=='FM KLIP' and l<self.max_r) or (self.model[j]=='FM LOCI' and l<self.max_r):
mod_sel.append(op_sel[k])
if len(mod_sel)>0:
return self.contrast_multi_snr(l,mod_sel=mod_sel)
else:
return 0
else:
mod_del=[]
if self.contrast_sel=='Max':
opti_pos=np.unravel_index(self.flux_opti[:,:,l].argmax(), self.flux_opti[:,:,l].shape)
elif self.contrast_sel=='Median':
opti_pos=np.unravel_index(np.argmin(abs(self.flux_opti[:,:,l]-np.median(self.flux_opti[:,:,l]))), self.flux_opti[:,:,l].shape)
elif self.contrast_sel=='Min':
opti_pos=np.unravel_index(self.flux_opti[:,:,l].argmin(), self.flux_opti[:,:,l].shape)
flux_opti=self.flux_opti[opti_pos[0],opti_pos[1],l]
opti_theta=self.opti_theta[opti_pos[0],l]
for k in range(len(op_sel)):
i=op_sel[k][0]
j=op_sel[k][1]
if (self.model[j]=='FM KLIP' and l>=self.max_r) or (self.model[j]=='FM LOCI' and l>=self.max_r):
mod_del.append(k)
else:
self.RSM_test(i,j,l,opti_theta,flux_opti)
if len(mod_del)>0:
for index in sorted(mod_del, reverse=True):
del op_sel[index]
if len(op_sel)>0:
self.probmap_esti(ns=1, estimator=estimator,colmode=colmode,ann_center=l,sel_cube=op_sel)
return self.perf_esti(i,j,l,opti_theta)
else:
return 0
def opti_combi_annular(self,k,estimator,colmode,SNR=False):
"""
Function computing the performance index of the RSM detection map for the tested sets of
likelihhod cubes/residuals cubes used by the optimal subset selection function in the case of the annular
optimization mode. The performance index is computed based on the set of likelihood cubes op_sel
for the radial distance l, relying either on the RSM algorithm or S/N map to compute the detection map.
When using the RSM algorithm to gennerated the final probability map, the detection map is computed
using the estimator ('Forward' or 'Forward-Bakward') probability computation mode and the colmode
('mean', 'median' or 'max') to sum the obtained probabilities along the time axis.
This function is used by the optimal subset of likelihood cubes selection function (RSM_combination).
"""
op_sel=[]
res_sep=[]
sel_cube=[]
if self.contrast_sel=='Max':
opti_pos=np.unravel_index(self.flux_opti[:,:,k].argmax(), self.flux_opti[:,:,k].shape)
elif self.contrast_sel=='Median':
opti_pos=np.unravel_index(np.argmin(abs(self.flux_opti[:,:,k]-np.median(self.flux_opti[:,:,k]))), self.flux_opti[:,:,k].shape)
elif self.contrast_sel=='Min':
opti_pos=np.unravel_index(self.flux_opti[:,:,k].argmin(), self.flux_opti[:,:,k].shape)
if self.combination=='Top-Down':
mod_sel=[[0,0]]*(len(self.cube)*len(self.model))
it=0
for i in range(len(self.cube)):
for j in range(len(self.model)):
mod_sel[it]=[i,j]
it+=1
for i in range(len(self.cube)):
for j in range(len(self.model)):
if SNR==True:
mod_sel=[]
for k in range(len(op_sel)):
i=op_sel[k][0]
j=op_sel[k][1]
if (self.model[j]=='FM KLIP' and k>=self.max_r) or (self.model[j]=='FM LOCI' and k>=self.max_r):
del mod_sel[mod_sel.index([i,j])]
if len(mod_sel)>0:
res_opti=self.contrast_multi_snr(k,mod_sel=mod_sel)
else:
res_opti==0
else:
if (self.model[j]!='FM KLIP' and self.model[j]!='FM LOCI') or (self.model[j]=='FM KLIP' and k<self.max_r) or (self.model[j]=='FM LOCI' and k<self.max_r):
self.RSM_test(i,j,k,self.opti_theta[opti_pos[0],k] ,self.flux_opti[opti_pos[0],opti_pos[1],k])
else:
del mod_sel[mod_sel.index([i,j])]
if SNR==False:
self.probmap_esti(ns=1, estimator=estimator,colmode=colmode,ann_center=k,sel_cube=mod_sel)
res_opti=self.perf_esti(i,j,k,self.opti_theta[opti_pos[0],k])
prev_res_opti=0
while res_opti>prev_res_opti:
prev_res_opti=res_opti
res_temp=[]
for i in range(len(mod_sel)):
temp_sel=mod_sel.copy()
del temp_sel[i]
if SNR==True:
res_temp.append(self.contrast_multi_snr(k,mod_sel=temp_sel))
else:
self.probmap_esti(ns=1, estimator=estimator,colmode=colmode,ann_center=k,sel_cube=temp_sel)
res_temp.append(self.perf_esti(i,j,k,self.opti_theta[opti_pos[0],k]) )
res_opti=max(res_temp)
if res_opti>prev_res_opti:
del mod_sel[np.argmax(res_temp)]
op_sel=mod_sel
elif self.combination=='Bottom-Up':
for i in range(len(self.cube)):
for j in range(len(self.model)):
if SNR==True:
res_sep.append(self.contrast_multi_snr(k,mod_sel=[[i,j]]))
sel_cube.append([i,j])
else:
if (self.model[j]!='FM KLIP' and self.model[j]!='FM LOCI') or (self.model[j]=='FM KLIP' and k<self.max_r) or (self.model[j]=='FM LOCI' and k<self.max_r):
self.RSM_test(i,j,k,self.opti_theta[opti_pos[0],k] ,self.flux_opti[opti_pos[0],opti_pos[1],k])
self.probmap_esti(ns=1, estimator=estimator,colmode=colmode,ann_center=k,sel_cube=[[i,j]])
res_sep.append(self.perf_esti(i,j,k,self.opti_theta[opti_pos[0],k]))
sel_cube.append([i,j])
op_sel.append(sel_cube[np.argmax(np.array(res_sep))])
opti_res=max(res_sep)
del sel_cube[np.argmax(np.array(res_sep))]
prev_opti_res=0
while opti_res>prev_opti_res and len(sel_cube)>0:
res_temp=[]
mod_del=[]
for l in range(len(sel_cube)):
op_sel.append(sel_cube[l])
if SNR==True:
res_temp.append(self.contrast_multi_snr(k,mod_sel=op_sel))
else:
self.probmap_esti(ns=1, estimator=estimator,colmode=colmode,ann_center=k,sel_cube=op_sel)
res_temp.append(self.perf_esti(i,j,k,self.opti_theta[opti_pos[0],k]))
del op_sel[len(op_sel)-1]
if res_temp[-1]<opti_res:
mod_del.append(l)
if max(res_temp)>opti_res:
prev_opti_res=opti_res
opti_res=max(res_temp)
op_sel.append(sel_cube[np.argmax(np.array(res_temp))])
mod_del.append(np.argmax(np.array(res_temp)))
if len(mod_del)>0:
for index in sorted(mod_del, reverse=True):
del sel_cube[index]
else:
prev_opti_res=opti_res
print('Greedy selection: Radius {} done!'.format(k))
return op_sel
def opti_combination(self,estimator='Forward',colmode='median',threshold=True,contrast_sel='Max',combination='Bottom-Up',SNR=False):
"""
Function selecting the sets of likelihhod cubes/residuals cubes maximizing the annulus-wise
perfomance index for the annular or the global performance index for full-frame optimization
mode, for respectively the auto-RSM and auto-S/N frameworks.
Parameters
----------
estimator: str, optional
Approach used for the probability map estimation either a 'Forward' model
(approach used in the original RSM map algorithm) which consider only the
past observations to compute the current probability or 'Forward-Backward' model
which relies on both past and future observations to compute the current probability
colmode:str, optional
Method used to generate the final probability map from the three-dimensionnal cube
of probabilities generated by the RSM approach. It is possible to chose between the 'mean',
the 'median' of the 'max' value of the probabilities along the time axis. Default is 'median'.
threshold: bool, optional
When True a radial treshold is computed on the final detection map with parallactic angles reversed.
For a given angular separation, the radial threshold is defined as the maximum probability observed
within the annulus. The radia thresholds are checked for outliers and smoothed via a Hampel filter.
Only used when relying on the auto-RSM framework. Default is True.
contrast_sel: str,optional
Contrast and azimuth definition for the optimal likelihood cubes/ residuall cubes selection.
If 'Max' ('Min' or 'Median'), the largest (smallest or median) contrast obtained during the
PSF-subtraction techniques optimization will be chosen along the corresponding
azimuthal position for the likelihood cubes selection. Default is 'Max'.
combination: str,optional
Type of greedy selection algorithm used for the selection of the optimal set of cubes
of likelihoods/cubes of residuals (either 'Bottom-Up' or 'Top-Down'). For more details
see Dahlqvist et al. (2021). Default is 'Bottom-Up'.
SNR: bool,optional
If True, the auto-S/N framework is used, resulting in an optimizated final S/N map when using
subsequently the opti_map. If False the auto-RSM framework is used, providing an optimized
probability map when using subsequently the opti_map.
"""
if (any('FM KLIP'in mymodel for mymodel in self.model) or any('FM LOCI'in mydistri for mydistri in self.model)) and len(self.model)==1:
self.maxradius=self.max_r
elif(any('FM KLIP'in mymodel for mymodel in self.model) and any('FM LOCI'in mydistri for mydistri in self.model)) and len(self.model)==2:
self.maxradius=self.max_r
self.opti=True
if self.inv_ang==True:
for n in range(len(self.cube)):
self.pa[n]=-self.pa[n]
self.opti_sel=list(np.zeros((self.maxradius+1)))
self.threshold=np.zeros((self.maxradius+1))
self.contrast_sel=contrast_sel
self.combination=combination
# Selection of the optimal set of cubes of likelihoods / cubes of residuals via top-down or bottom-up greedy selection
if self.opti_mode=='full-frame':
# Determination of the considered angular distances for the optimization process
if self.trunc is not None:
max_rad=min(self.trunc*self.asize[0],self.maxradius+1)
else:
max_rad=self.maxradius+1
if max_rad>self.minradius+3*self.asize[0]+self.asize[0]//2:
range_sel = list(range(self.minradius+self.asize[0]//2,self.minradius+3*self.asize[0]+self.asize[0]//2,self.asize[0]))
if max_rad>self.minradius+7*self.asize[0]:
range_sel.extend(list(range(self.minradius+3*self.asize[0]+self.asize[0]//2,self.minradius+7*self.asize[0],2*self.asize[0])))
range_sel.extend(list(range(self.minradius+7*self.asize[0]+self.asize[0]//2,max_rad-3*self.asize[0]//2-1,4*self.asize[0])))
range_sel.append(self.minradius+(max_rad-self.minradius)//self.asize[0]*self.asize[0]-self.asize[0]//2-1)
else:
range_sel.extend(list(range(self.minradius+3*self.asize[0]+self.asize[0]//2,max_rad-self.asize[0]//2,2*self.asize[0])))
if max_rad==self.minradius+7*self.asize[0]:
range_sel.append(self.minradius+(max_rad-self.minradius)//self.asize[0]*self.asize[0]-self.asize[0]//2-1)
else:
range_sel=list(range(self.minradius+self.asize[0]//2,max_rad-self.asize[0]//2,self.asize[0]))
if self.combination=='Top-Down':
mod_sel=[[0,0]]*(len(self.cube)*len(self.model))
it=0
for i in range(len(self.cube)):
for j in range(len(self.model)):
mod_sel[it]=[i,j]
it+=1
results=pool_map(self.ncore, self.opti_combi_full,iterable(range_sel),estimator,colmode,mod_sel,SNR)
res_opti=sum(results)
prev_res_opti=0
print('Initialization done!')
while res_opti>prev_res_opti:
prev_res_opti=res_opti
res_temp=[]
for i in range(len(mod_sel)):
temp_sel=mod_sel.copy()
del temp_sel[i]
results=pool_map(self.ncore, self.opti_combi_full,iterable(range_sel),estimator,colmode,temp_sel,SNR)
res_temp.append(sum(results))
res_opti=max(res_temp)
if res_opti>prev_res_opti:
del mod_sel[np.argmax(res_temp)]
print('Round done!')
self.opti_sel=list([mod_sel]*(self.maxradius+1))
print('Greedy selection done!')
elif self.combination=='Bottom-Up':
op_sel=[]
res_sep=[]
sel_cube=[]
for i in range(len(self.cube)):
for j in range(len(self.model)):
results=pool_map(self.ncore, self.opti_combi_full,iterable(range_sel),estimator,colmode,[[i,j]],SNR)
res_sep.append(sum(results))
sel_cube.append([i,j])
print('Initialization done!')
op_sel.append(sel_cube[np.argmax(np.array(res_sep))])
opti_res=max(res_sep)
del sel_cube[np.argmax(np.array(res_sep))]
prev_opti_res=0
while opti_res>prev_opti_res and len(sel_cube)>0:
res_temp=[]
mod_del=[]
for l in range(len(sel_cube)):
op_sel.append(sel_cube[l])
results=pool_map(self.ncore, self.opti_combi_full,iterable(range_sel),estimator,colmode,op_sel,SNR)
res_temp.append(sum(results))
if sum(results)<opti_res:
mod_del.append(l)
del op_sel[len(op_sel)-1]
if max(res_temp)>opti_res:
prev_opti_res=opti_res
opti_res=max(res_temp)
op_sel.append(sel_cube[np.argmax(np.array(res_temp))])
mod_del.append(np.argmax(np.array(res_temp)))
if len(mod_del)>0:
for index in sorted(mod_del, reverse=True):
del sel_cube[index]
else:
prev_opti_res=opti_res
print('Round done!')
self.opti_sel=list([op_sel]*(self.maxradius+1))
print('Greedy selection done!')
elif self.opti_mode=='annular':
range_sel=range(self.minradius+self.asize[0]//2,self.maxradius+1-self.asize[0]//2,self.asize[0])
results=pool_map(self.ncore, self.opti_combi_annular,iterable(range_sel),estimator,colmode,SNR)
it=0
for result in results:
for l in range(self.asize[0]):
self.opti_sel[(self.minradius+it*self.asize[0]):(self.minradius+(it+1)*self.asize[0])]=self.asize[0]*[result]
it+=1
# Computation of the radial thresholds
if threshold==True and SNR==False:
# if sum(self.threshold[(self.max_r+1):(self.maxradius+1)])==0:
# range_sel= range(self.minradius,self.max_r+1)
#else:
self.threshold_esti(estimator=estimator,colmode=colmode,Full=False)
if self.inv_ang==True:
for n in range(len(self.cube)):
self.pa[n]=-self.pa[n]
def threshold_esti(self,estimator='Forward',colmode='median',Full=False):
if (any('FM KLIP'in mymodel for mymodel in self.model) or any('FM LOCI'in mydistri for mydistri in self.model)) and len(self.model)==1:
self.maxradius=self.max_r
elif(any('FM KLIP'in mymodel for mymodel in self.model) and any('FM LOCI'in mydistri for mydistri in self.model)) and len(self.model)==2:
self.maxradius=self.max_r
if self.opti_sel==None:
self.opti_sel=list([[[0,0]]]*(self.maxradius+1))
if Full==True:
mod_sel=[[0,0]]*(len(self.cube)*len(self.model))
it=0
for i in range(len(self.cube)):
for j in range(len(self.model)):
mod_sel[it]=[i,j]
it+=1
self.opti_sel=list([mod_sel]*(self.maxradius+1))
range_sel= range(self.minradius,self.maxradius+1)
self.opti=False
if self.inv_ang==False:
for n in range(len(self.cube)):
self.pa[n]=-self.pa[n]
self.opti_map(estimator=estimator,colmode=colmode,threshold=False,Full=False)
if self.inv_ang==False:
for n in range(len(self.cube)):
self.pa[n]=-self.pa[n]
for k in range_sel:
indicesy,indicesx=get_time_series(self.cube[0],k)
self.threshold[k]=np.max(self.final_map[indicesy,indicesx])
if self.opti_mode=='full-frame':
self.threshold=poly_fit(self.threshold,range_sel,3)
print('Threshold determination done!')
def opti_map(self,estimator='Forward',colmode='median',threshold=True,Full=False,SNR=False):
"""
Function computing the final detection map using the optimal set of parameters for the
PSF-subtraction techniques (and for the RSM algorithm in the case of auto-RSM) and the
optimal set of cubes of likelihoods/ cubes of residuals for respectively the auto-RSM
and auto-S/N frameworks.
Parameters
----------
estimator: str, optional
Approach used for the probability map estimation either a 'Forward' model
(approach used in the original RSM map algorithm) which consider only the
past observations to compute the current probability or 'Forward-Backward' model
which relies on both past and future observations to compute the current probability
colmode:str, optional
Method used to generate the final probability map from the three-dimensionnal cube
of probabilities generated by the RSM approach. It is possible to chose between the 'mean',
the 'median' of the 'max' value of the probabilities along the time axis. Default is 'median'.
threshold: bool, optional
When True the radial treshold is computed during the RSM_combination is applied on the
final detection map with the original parallactic angles. Only used when relying on the auto-RSM
framework. Default is True.
Full: bool,optional
If True, the entire set of ADI-sequences and PSF-subtraction techniques are used to
generate the final detection map. If performed after RSM_combination, the obtained optimal set
is repkaced by the entire set of cubes. Please make ure you have saved the optimal set
via the save_parameter function. Default is 'False'.
SNR: bool,optional
If True, the auto-S/N framework is used, resulting in an optimizated final S/N map when using
subsequently the opti_map. If False the auto-RSM framework is used, providing an optimized
probability map when using subsequently the opti_map.
"""
if (any('FM KLIP'in mymodel for mymodel in self.model) or any('FM LOCI'in mydistri for mydistri in self.model)) and len(self.model)==1:
self.maxradius=self.max_r
elif(any('FM KLIP'in mymodel for mymodel in self.model) and any('FM LOCI'in mydistri for mydistri in self.model)) and len(self.model)==2:
self.maxradius=self.max_r
self.final_map=np.zeros((self.cube[0].shape[-2],self.cube[0].shape[-1]))
if self.opti_sel==None:
self.opti_sel=list([[[0,0]]]*(self.maxradius+1))
if type(self.threshold)!=np.ndarray:
self.threshold=np.zeros((self.maxradius+1))
if Full==True:
mod_sel=[[0,0]]*(len(self.cube)*len(self.model))
it=0
for i in range(len(self.cube)):
for j in range(len(self.model)):
mod_sel[it]=[i,j]
it+=1
self.opti_sel=list([mod_sel]*(self.maxradius+1))
# Computation of the final detection map for the auto-RSM or auto-S/N for the annular case
if self.opti_mode=='annular':
if SNR==True:
self.final_map=self.SNR_esti_annular(sel_cube=self.opti_sel,verbose=True)
else:
self.opti=False
it=0
for k in range(self.minradius+self.asize[0]//2,self.maxradius+1-self.asize[0]//2,self.asize[0]):
for m in range(len(self.opti_sel[k])):
residuals_cube_=np.zeros_like(rot_scale('ini',self.cube[self.opti_sel[k][m][0]],None,self.pa[self.opti_sel[k][m][0]],self.scale_list[self.opti_sel[k][m][0]],self.imlib, self.interpolation)[0])
for l in range(k-self.asize[self.opti_sel[k][m][1]],k+self.asize[self.opti_sel[k][m][1]]+1,self.asize[self.opti_sel[k][m][1]]):
indices = get_annulus_segments(residuals_cube_[0], l-int(self.asize[self.opti_sel[k][m][1]]/2),int(self.asize[self.opti_sel[k][m][1]]),1)
residuals_cube_temp=self.model_esti(self.opti_sel[k][m][1],self.opti_sel[k][m][0],l,self.cube[self.opti_sel[k][m][0]])[1]
residuals_cube_[:,indices[0][0],indices[0][1]]=residuals_cube_temp[:,indices[0][0],indices[0][1]]
range_sel=range((self.minradius+it*self.asize[self.opti_sel[k][m][1]]),(self.minradius+(it+1)*self.asize[self.opti_sel[k][m][1]]))
like_temp=np.zeros(((residuals_cube_.shape[0]+1),self.cube[self.opti_sel[k][m][0]].shape[-2],self.cube[self.opti_sel[k][m][0]].shape[-1],len(self.interval[self.opti_sel[k][m][1]][k,self.opti_sel[k][m][0]]),2,self.crop_range[self.opti_sel[k][m][1]]))
time_out=120/250*residuals_cube_.shape[0]*self.asize[0]
results=[]
pool=Pool(processes=self.ncore)
for e in range_sel:
results.append(pool.apply_async(self.likelihood,args=(e,self.opti_sel[k][m][0],self.opti_sel[k][m][1],residuals_cube_,None,True)))
[result.wait(timeout=time_out) for result in results]
it1=k-self.asize[self.opti_sel[k][m][1]]
for result in results:
try:
res=result.get(timeout=1)
indicesy,indicesx=get_time_series(self.cube[0],res[0])
if self.model[self.opti_sel[k][m][1]]=='FM LOCI' or self.model[self.opti_sel[k][m][1]]=='FM KLIP':
like_temp[:,indicesy,indicesx,:,:,:]=res[1]
self.psf_fm[self.opti_sel[k][m][0]][self.opti_sel[k][m][1]][res[0]]=res[2]
else:
like_temp[:,indicesy,indicesx,:,:,:]=res[1]
except mp.TimeoutError:
pool.terminate()
pool.join()
res=self.likelihood(it1,self.opti_sel[k][m][0],self.opti_sel[k][m][1],residuals_cube_,None,True)
indicesy,indicesx=get_time_series(self.cube[0],res[0])
if self.model[self.opti_sel[k][m][1]]=='FM LOCI' or self.model[self.opti_sel[k][m][1]]=='FM KLIP':
like_temp[:,indicesy,indicesx,:,:,:]=res[1]
self.psf_fm[self.opti_sel[k][m][0]][self.opti_sel[k][m][1]][res[0]]=res[2]
else:
like_temp[:,indicesy,indicesx,:,:,:]=res[1]
it1+=1
like=[]
for n in range(self.crop_range[self.opti_sel[k][m][1]]):
like.append(like_temp[0:residuals_cube_.shape[0],:,:,:,:,n])
self.like_fin[self.opti_sel[k][m][0]][self.opti_sel[k][m][1]]=like
for l in range_sel:
indicesy,indicesx=get_time_series(self.cube[0],l)
self.probmap_esti(ns=1, estimator=estimator,colmode=colmode,ann_center=l,sel_cube=self.opti_sel[l])
if threshold==True:
self.final_map[indicesy,indicesx]=self.probmap[indicesy,indicesx]-self.threshold[l]
else:
self.final_map[indicesy,indicesx]=self.probmap[indicesy,indicesx]
it+=1
# Computation of the final detection map for the auto-RSM or auto-S/N for the full-frame case
elif self.opti_mode=='full-frame':
if SNR==True:
self.final_map=self.SNR_esti_full(sel_cube=self.opti_sel[0],verbose=True)
else:
range_sel= range(self.minradius,self.maxradius+1)
self.opti=False
mod_sel=self.opti_sel.copy()
self.lik_esti(sel_cube=mod_sel[0])
if 'FM KLIP' not in self.model and 'FM LOCI' not in self.model:
self.probmap_esti(ns=1, estimator=estimator,colmode=colmode,ann_center=None,sel_cube=mod_sel[0])
for k in range_sel:
indicesy,indicesx=get_time_series(self.cube[0],k)
if threshold==True:
self.final_map[indicesy,indicesx]=self.probmap[indicesy,indicesx]-self.threshold[k]
else:
self.final_map[indicesy,indicesx]=self.probmap[indicesy,indicesx]
else:
for k in range_sel:
if k>=self.max_r:
try:
del mod_sel[list(np.asarray(mod_sel)[:,1]).index(self.model.index('FM KLIP'))]
except (ValueError,IndexError):
pass
try:
del mod_sel[list(np.asarray(mod_sel)[:,1]).index(self.model.index('FM LOCI'))]
except (ValueError,IndexError):
pass
if len(mod_sel)==0:
break
self.probmap_esti(ns=1, estimator=estimator,colmode=colmode,ann_center=k,sel_cube=mod_sel[0])
else:
self.probmap_esti(ns=1, estimator=estimator,colmode=colmode,ann_center=k,sel_cube=mod_sel[0])
indicesy,indicesx=get_time_series(self.cube[0],k)
if threshold==True:
self.final_map[indicesy,indicesx]=self.probmap[indicesy,indicesx]-self.threshold[k]
else:
self.final_map[indicesy,indicesx]=self.probmap[indicesy,indicesx]
print('Final RSM map computation done!')
def contrast_multi_snr(self,ann_center,mod_sel=[[0,0]]):
"""
Function computing the performance index of the S/N detection map (the contrast) when using of multiple cubes
of residuals to generate the final S/N map (auto-S/N framweork). The final S/N map is obtained
by averaging the S/N maps of the selected cubes of residuals. The performance index is computed for a
radial distane ann_center using the set of cubes of residuals mod_sel. [[i1,j1],[i2,j2],...] with i1
the first considered PSF-subtraction technique and j1 the first considered ADI sequence, i2 the
second considered PSF-subtraction technique, etc. This function is used by the optimal subset of
likelihood cubes selection function (RSM_combination).
"""
ceny, cenx = frame_center(self.cube[0])
init_angle = 0
ang_step=360/((np.deg2rad(360)*ann_center)/self.fwhm)
tempx=[]
tempy=[]
for l in range(int(((np.deg2rad(360)*ann_center)/self.fwhm))):
newx = ann_center * np.cos(np.deg2rad(ang_step * l+init_angle))
newy = ann_center * np.sin(np.deg2rad(ang_step * l+init_angle))
tempx.append(newx)
tempy.append(newy)
tempx=np.array(tempx)
tempy = np.array(tempy) +int(ceny)
tempx = np.array(tempx) + int(cenx)
apertures = photutils.CircularAperture(np.array((tempx, tempy)).T, round(self.fwhm/2))
flux=np.zeros(len(mod_sel))
injected_flux=np.zeros((len(mod_sel),min(len(apertures)//2,8)))
recovered_flux=np.zeros((len(mod_sel),min(len(apertures)//2,8)))
for k in range(len(mod_sel)):
cuben=mod_sel[k][0]
modn=mod_sel[k][1]
frame_nofc=self.model_esti(modn,cuben,ann_center,self.cube[cuben])[0]
apertures = photutils.CircularAperture(np.array((tempx, tempy)).T, round(self.fwhm/2))
fluxes = photutils.aperture_photometry(frame_nofc, apertures)
fluxes = np.array(fluxes['aperture_sum'])
n_aper = len(fluxes)
ss_corr = np.sqrt(1 + 1/(n_aper-1))
sigma_corr = stats.t.ppf(stats.norm.cdf(5), n_aper)*ss_corr
noise = np.std(fluxes)
flux[k] = sigma_corr*noise
psf_template = normalize_psf(self.psf[cuben], fwhm=self.fwhm, verbose=False,size=self.psf[cuben].shape[1])
if self.cube[cuben].ndim==4:
psf_template =np.stack([psf_template]*self.cube[cuben].shape[0])
fc_map = np.ones((self.cube[cuben].shape[-2],self.cube[cuben].shape[-1])) * 1e-6
fcy=[]
fcx=[]
cube_fc =self.cube[cuben]
ang_fc=range(int(init_angle),int(360+init_angle),int(360//min((len(fluxes)//2),8)))
for i in range(len(ang_fc)):
cube_fc = cube_inject_companions(cube_fc, psf_template,
self.pa[cuben], flux[k], self.pxscale,
rad_dists=ann_center,
theta=ang_fc[i],
verbose=False)
y = int(ceny) + ann_center * np.sin(np.deg2rad(
ang_fc[i]))
x = int(cenx) + ann_center * np.cos(np.deg2rad(
ang_fc[i]))
fc_map = frame_inject_companion(fc_map, psf_template[0], y, x,
flux[k])
fcy.append(y)
fcx.append(x)
frame_fc=self.model_esti(modn,cuben,ann_center,cube_fc)[0]
for j in range(len(ang_fc)):
apertures = photutils.CircularAperture(np.array(([fcx[j],fcy[j]])), round(self.fwhm/2))
injected_flux[k,j] = photutils.aperture_photometry(fc_map, apertures)['aperture_sum']
recovered_flux[k,j] = photutils.aperture_photometry((frame_fc - frame_nofc), apertures)['aperture_sum']
contrast=[]
for j in range(len(ang_fc)):
recovered_flux_conso=0
injected_flux_conso=0
if len(mod_sel)==1:
recovered_flux_conso=recovered_flux[0,j]
injected_flux_conso=injected_flux[0,j]
else:
for k in range(len(mod_sel)):
temp_list=np.array(range(len(mod_sel)))
temp_list=np.delete(temp_list,k)
recovered_flux_conso+=recovered_flux[k,j]*np.prod(flux[temp_list])
injected_flux_conso+=injected_flux[k,j]*np.prod(flux[temp_list])
throughput = float(recovered_flux_conso / injected_flux_conso)
if np.prod(flux)/throughput>0:
contrast.append(np.mean(flux) / throughput)
if len(contrast)!=0:
contrast_mean=np.mean(contrast)
else:
contrast_mean=-1
return np.where(contrast_mean<0,0,1/contrast_mean)
def SNR_esti_full(self, sel_cube=[[0,0]],verbose=True):
"""
Function computing the final S/N detection map, in the case of the full-frame optimization mode,
after optimization of the PSF-subtraction techniques and the optimal selection of the residual cubes.
The final S/N map is obtained by averaging the S/N maps of the selected cubes of residuals provided
by mod_sel, [[i1,j1],[i2,j2],...] with i1 the first considered PSF-subtraction technique and j1 the
first considered ADI sequence, i2 the second considered PSF-subtraction technique, etc.
This function is used by the final detection map computation function (opti_map).
"""
snr_temp=[]
for k in range(len(sel_cube)):
j=sel_cube[k][0]
i=sel_cube[k][1]
#Computation of the SNR maps
if self.model[i]=='APCA':
print("Annular PCA estimation")
residuals_cube_, frame_fin = annular_pca_adisdi(self.cube[j], self.pa[j], self.scale_list[j], fwhm=self.fwhm, ncomp=self.ncomp[i][0,j], asize=self.asize[i],
delta_rot=self.delta_rot[i][0,j],delta_sep=self.delta_sep[i][0,j], svd_mode='lapack', n_segments=int(self.nsegments[i][0,j]), nproc=self.ncore,full_output=True,verbose=False)
snr_temp.append(vip.metrics.snrmap(frame_fin, fwhm=self.fwhm, approximated=False, plot=False,nproc=self.ncore, verbose=False))
elif self.model[i]=='NMF':
print("NMF estimation")
residuals_cube_, frame_fin = nmf_adisdi(self.cube[j], self.pa[j], self.scale_list[j], ncomp=self.ncomp[i][0,j], max_iter=100, random_state=0, mask_center_px=None,full_output=True,verbose=False)
snr_temp.append(vip.metrics.snrmap(frame_fin, fwhm=self.fwhm, approximated=False, plot=False,nproc=self.ncore, verbose=False))
elif self.model[i]=='LLSG':
print("LLSGestimation")
residuals_cube_, frame_fin = llsg_adisdi(self.cube[j], self.pa[j],self.scale_list[j], self.fwhm, rank=self.rank[i][0,j],asize=self.asize[i], thresh=1,n_segments=int(self.nsegments[i][0,j]), max_iter=40, random_seed=10, nproc=self.ncore,full_output=True,verbose=False)
snr_temp.append(vip.metrics.snrmap(frame_fin, fwhm=self.fwhm, approximated=False, plot=False,nproc=self.ncore, verbose=False))
elif self.model[i]=='LOCI':
print("LOCI estimation")
residuals_cube_,frame_fin=loci_adisdi(self.cube[j], self.pa[j],self.scale_list[j], fwhm=self.fwhm,asize=self.asize[i], n_segments=int(self.nsegments[i][0,j]),tol=self.tolerance[i][0,j], nproc=self.ncore, optim_scale_fact=2,delta_rot=self.delta_rot[i][0,j],delta_sep=self.delta_sep[i][0,j],verbose=False,full_output=True)
snr_temp.append(vip.metrics.snrmap(frame_fin, fwhm=self.fwhm, approximated=False, plot=False,nproc=self.ncore, verbose=False))
elif self.model[i]=='KLIP':
print("KLIP estimation")
cube_out, residuals_cube_, frame_fin = KLIP(self.cube[j], self.pa[j], ncomp=self.ncomp[i][0,j], fwhm=self.fwhm, asize=self.asize[i],
delta_rot=self.delta_rot[i][0,j],full_output=True,verbose=False)
snr_temp.append(vip.metrics.snrmap(frame_fin, fwhm=self.fwhm, approximated=False, plot=False,nproc=self.ncore, verbose=False))
return np.array(snr_temp).mean(axis=0)
def SNR_esti_annular(self,sel_cube=[[0,0]],verbose=True):
"""
Function computing the final S/N detection map, in the case of the annular optimization mode,
after optimization of the PSF-subtraction techniques and the optimal selection of the residual cubes.
The final S/N map is obtained by averaging the S/N maps of the selected cubes of residuals provided
by mod_sel, [[i1,j1],[i2,j2],...] with i1 the first considered PSF-subtraction technique and j1 the
first considered ADI sequence, i2 the second considered PSF-subtraction technique, etc.
This function is used by the final detection map computation function (opti_map).
"""
self.opti_sel=sel_cube
snrmap_array = np.zeros((self.cube[0].shape[-2],self.cube[0].shape[-1]))
for k in range(self.minradius+self.asize[0]//2,self.maxradius+1-self.asize[0]//2,self.asize[0]):
snr_temp=[]
for m in range(len(self.opti_sel[k])):
snrmap_array_temp = np.zeros((self.cube[0].shape[-2],self.cube[0].shape[-1]))
residuals_cube_=self.model_esti(self.opti_sel[k][m][1],self.opti_sel[k][m][0],k,self.cube[self.opti_sel[k][m][0]])[1]
mask = get_annulus_segments(residuals_cube_[0], k-int(self.asize[self.opti_sel[k][m][1]]/2),int(self.asize[self.opti_sel[k][m][1]]), mode="mask")[0]
mask = np.ma.make_mask(mask)
yy, xx = np.where(mask)
coords = zip(xx, yy)
res = pool_map(self.ncore, vip.metrics.snr, residuals_cube_.mean(axis=0), iterable(coords), self.fwhm, True,None, False)
res = np.array(res)
yy = res[:, 0]
xx = res[:, 1]
snr_value = res[:, -1]
snrmap_array_temp[yy.astype('int'), xx.astype('int')] = snr_value
snr_temp.append(snrmap_array_temp)
snrmap_array[yy.astype('int'), xx.astype('int')]=np.asarray(snr_temp).mean(axis=0)[yy.astype('int'), xx.astype('int')]
return snrmap_array
|
"""
Blocks for noise processing
"""
import numpy as np
from ... import read_config
from ...preprocess.filter import butterworth
from ...geometry import (n_steps_neigh_channels,
order_channels_by_distance)
from ...preprocess import standarize
from . import util
def covariance(recordings, temporal_size, neigbor_steps):
"""Compute noise spatial and temporal covariance
Parameters
----------
recordings: matrix
Multi-cannel recordings (n observations x n channels)
temporal_size:
neigbor_steps: int
Number of steps from the multi-channel geometry to consider two
channels as neighors
"""
CONFIG = read_config()
# get the neighbor channels at a max "neigbor_steps" steps
neigh_channels = n_steps_neigh_channels(CONFIG.neighChannels,
neigbor_steps)
# sum neighor flags for every channel, this gives the number of neighbors
# per channel, then find the channel with the most neighbors
# TODO: why are we selecting this one?
channel = np.argmax(np.sum(neigh_channels, 0))
# get the neighbor channels for "channel"
(neighbords_idx,) = np.where(neigh_channels[channel])
# order neighbors by distance
neighbords_idx, temp = order_channels_by_distance(channel, neighbords_idx,
CONFIG.geom)
# from the multi-channel recordings, get the neighbor channels
# (this includes the channel with the most neighbors itself)
rec = recordings[:, neighbords_idx]
# filter recording
if CONFIG.doFilter == 1:
rec = butterworth(rec, CONFIG.filterLow,
CONFIG.filterHighFactor,
CONFIG.filterOrder,
CONFIG.srate)
# standardize recording
sd_ = standarize.sd(rec, CONFIG.srate)
rec = standarize.standarize(rec, sd_)
# compute and return spatial and temporal covariance
return util.covariance(rec, temporal_size, neigbor_steps,
CONFIG.spikeSize)
|
import random
import string
from junit_xml import TestSuite, TestCase
def rand_duration():
return random.randint(0, 120) + random.random()
def rand_string(prefix, size=40):
text = "".join(
[random.choice(string.ascii_letters + ' ') for _ in range(size)])
return "{} {}".format(prefix, text)
def _gen_cases(n_passes, n_fails, n_skips, n_errors):
result = []
for i in range(n_passes):
case = TestCase(name='TestPassed%s' % i,
classname='generated.xml.test.case.passes',
elapsed_sec=rand_duration())
result.append(case)
for i in range(n_skips):
case = TestCase(name='TestSkipped%s' % i,
classname='generated.xml.test.case.skips',
elapsed_sec=rand_duration())
case.add_skipped_info(message=rand_string('skipped!'))
result.append(case)
for i in range(n_fails):
case = TestCase(name='TestFailed%s' % i,
classname='generated.xml.test.case.fails',
elapsed_sec=rand_duration())
case.add_failure_info(message=rand_string('failure!'))
result.append(case)
for i in range(n_errors):
case = TestCase(name='TestErrored%s' % i,
classname='generated.xml.test.case.errors',
elapsed_sec=rand_duration())
case.add_error_info(message=rand_string('error!'))
result.append(case)
return result
def get_junit_xml_string(n_passes=1, n_fails=1, n_skips=1, n_errors=1):
cases = _gen_cases(n_passes, n_fails, n_skips, n_errors)
suite = TestSuite("fake-junit-xml-suite", cases)
return TestSuite.to_xml_string([suite])
|
import pytest
from src.client import _Client
@pytest.fixture
def mock_config():
class Mock_Config:
def __init__(self):
self._data = {}
def load(self, read_env_data=False):
self._data = {
"portal_user": "aperture",
"portal_passwd": "dummy",
"portal_hostname": "LaLaLand",
"portal_db_name": "portal",
"pipeline_user": "pipeline",
"pipeline_passwd": "fake",
"pipeline_hostname": "localhost",
"pipeline_db_name": "hive"
}
return True
def get_value(self, value):
if value in self._data:
return self._data[value]
return Mock_Config()
@pytest.fixture
def instance_fixture(mock_config):
client_instance = _Client(read_env_data=False)
client_instance.config = mock_config
return client_instance
def test_instance_fixture(instance_fixture):
instance_fixture
assert True
def test_create_hive(instance_fixture):
class Custom_Table():
def __init__(self):
self.value = 0
def create_table(self):
self.value += 1
custom = Custom_Table()
instance_fixture.flags = custom
instance_fixture.service_periods = custom
instance_fixture.flagged = custom
instance_fixture.create_hive()
assert custom.value == 3
|
"""SynapsePay client library for the SynapsePay platform.
This client library is designed to support the SynapsePay API for creating
users, linking nodes (accounts), creating transactions between users, and adding subnets. Read
more at https://docs.synapsepay.com
"""
from .client import Client
from .api import Users, Nodes, Trans, Subnets, Subscriptions, ClientEndpoint, Atms
from .models import User, Node, Transaction, Subnet, Subscription, PublicKey, Atm
|
from .tep import *
from .utils import *
from .race_analysis import *
|
import cv2
from flask import Flask, request, make_response
import base64
import numpy as np
import urllib
app = Flask(__name__)
@app.route('/endpoint', methods=['GET'])
def process():
image_url = request.args.get('imageurl')
requested_url = urllib.urlopen(image_url)
image_array = np.asarray(bytearray(requested_url.read()), dtype=np.uint8)
img = cv2.imdecode(image_array, -1)
# Do some processing, get output_img
retval, buffer = cv2.imencode('.jpg', '../output/result.jpg')
png_as_text = base64.b64encode(buffer)
response = make_response(png_as_text)
response.headers['Content-Type'] = 'image/png'
return response
if __name__ == '__main__':
app.run(debug=True) |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'JobQueueState',
]
class JobQueueState(str, Enum):
DISABLED = "DISABLED"
ENABLED = "ENABLED"
|
import logging
import pandas as pd
import requests
import redis
from . import APP, RQ_CLIENT
REDIS_CLIENT = redis.Redis(host=APP.config.get('REDIS_HOST'), db=0)
GH_HEADERS = {'Authorization': 'token ' + APP.config.get('GITHUB_TOKEN', '')}
# https://stackoverflow.com/questions/17622439/how-to-use-github-api-token-in-python-for-requesting
# https://github.com/pandas-dev/pandas/issues/10526
@RQ_CLIENT.job()
def github_users(since=0, counter=0):
''' get users data from github api '''
req = requests.get(f'https://api.github.com/users?since={since}', headers=GH_HEADERS)
df = pd.DataFrame(req.json())
for _, v in df.iterrows():
github_each_user.queue(v['login'])
since = v['id']
# queue next job request
if counter+1 < 10:
github_users.queue(since=since, counter=counter+1)
@RQ_CLIENT.job()
def github_each_user(login):
''' get data for specific user '''
req = requests.get(f'https://api.github.com/users/{login}', headers=GH_HEADERS)
ds = pd.Series(req.json()).to_dict()
keys = ['public_repos', 'public_gists', 'followers', 'following']
ds2 = {key: ds[key] for key in keys}
logging.info(ds2)
REDIS_CLIENT.hmset(login, ds2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.