content
stringlengths 5
1.05M
|
|---|
import json
import pandas as pd
from os import path
import pickle
from collections import defaultdict
def load_json(file):
with open(file, "r") as f:
output = json.load(f)
return output
def load_pickle(file):
with open(file, "rb") as f:
output = pickle.load(f)
return output
class Train:
def __init__(self, data_path="."):
self.route_data = load_json(f"{data_path}/model_build_inputs/route_data.json")
self.actual_sequences = load_json(
f"{data_path}/model_build_inputs/actual_sequences.json"
)
self.route_id = list(self.route_data.keys())
self.all_stations = pd.Series([self.get_station(i) for i in range(len(self))])
self.data_path = data_path
def get_station(self, idx) -> str:
route_id = self[idx]
station = self.route_data[route_id]["station_code"]
return station
def __len__(self) -> int:
return len(self.route_data)
def __getitem__(self, idx) -> str:
return self.route_id[idx]
def _build_history(self) -> None:
output = {}
for station in self.all_stations.unique():
all_result = []
for idx in self.all_stations[self.all_stations == station].index:
route_id = self[idx]
stops = [
[k, *v.values()]
for k, v in self.route_data[route_id]["stops"].items()
]
for i, k in enumerate(
self.actual_sequences[route_id]["actual"].values()
):
stops[i].append(k)
stops.sort(key=lambda x: x[-1])
# old_x = ""
result = []
for x in [x[-2] for x in stops if not pd.isna(x[-2])]:
# if x != old_x:
# result.append(x)
if x not in result:
result.append(x)
old_x = x
result = [station] + result
all_result.append(result)
output[station] = all_result
with open(f"{self.data_path}/model_build_outputs/history.json", "w") as f:
json.dump(output, f)
# G = defaultdict(lambda: 0)
# for k, T in output.items():
# for s in T:
# for i in range(len(s) - 1):
# x = s[i].split(".")[0]
# y = s[i + 1].split(".")[0]
# if x != y:
# G[(k, x, y)] += 1
# G = dict(G)
# with open(f"{self.data_path}/model_build_outputs/main_zone_map.pkl", "wb") as f:
# pickle.dump(G, f)
def __call__(self) -> None:
self._build_history()
if __name__ == "__main__":
BASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))
print(BASE_DIR)
trainer = Train(data_path=f"{BASE_DIR}/data/")
trainer()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
class Nudity:
def __init__(self):
base_path = os.path.dirname(os.path.abspath(__file__))
model_file = os.path.abspath(base_path + "/files/retrained_graph.pb")
input_name = "import/input"
output_name = "import/final_result"
self.input_height = 224
self.input_width = 224
self.input_mean = 128
self.input_std = 128
self.graph = self.load_graph(model_file)
self.input_operation = self.graph.get_operation_by_name(input_name)
self.output_operation = self.graph.get_operation_by_name(output_name)
def read_tensor_from_image_file(
self, file_name, input_height=299, input_width=299, input_mean=0, input_std=255
):
with tf.compat.v1.Session() as sess:
tf.compat.v1.disable_eager_execution()
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.io.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(
file_reader, channels=3, name="png_reader"
)
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader")
)
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader"
)
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.compat.v1.image.resize_bilinear(
dims_expander, [input_height, input_width]
)
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
result = sess.run(normalized)
return result
# def load_graph(self, model_file):
# graph = tf.Graph()
# graph_def = tf.GraphDef()
# with open(model_file, "rb") as f:
# graph_def.ParseFromString(f.read())
# with graph.as_default():
# tf.import_graph_def(graph_def)
# return graph
def load_graph(self, model_file):
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def score(self, file_name):
t = self.read_tensor_from_image_file(
file_name,
input_height=self.input_height,
input_width=self.input_width,
input_mean=self.input_mean,
input_std=self.input_std,
)
with tf.compat.v1.Session(graph=self.graph) as sess:
results = sess.run(
self.output_operation.outputs[0], {self.input_operation.outputs[0]: t}
)
results = np.squeeze(results)
return results[1].item()
def has(self, file_name):
return self.score(file_name) >= 0.8
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="image to be processed")
args = parser.parse_args()
if not args.image:
print(
"--image is missing. please set image to be processed with --image='path'"
)
return
nudity = Nudity()
print(nudity.has(args.image))
if __name__ == "__main__":
main()
|
from . import question
from flask import jsonify, request
import json
import pymongo
from bson import BSON, json_util
dept_data = [
{
'name': '部门1',
'id': 12345
},
{
'name': '部门2',
'id': 12346
}
]
MONGO_URI = 'localhost'
MONGO_DATABASE = 'zhihu'
client = pymongo.MongoClient(MONGO_URI)
db = client[MONGO_DATABASE]
collection = 'question'
# get方法
@question.route('/<int:page>', methods=['GET', ])
def get_question(page):
datas = db[collection].find({}, {'_id': 0}).skip(20 * page).limit(20)
users = []
for data in datas:
user = json.dumps(data, default=json_util.default)
user = json.loads(user)
users.append(user)
return json.dumps(users)
|
# /usr/bin/env python
# -*- coding: utf-8 -*-
from loguru import logger
import pytest
import os.path as op
import discon
import discon.discon_tools
path_to_script = op.dirname(op.abspath(__file__))
def test_noarch_from_text():
assert discon.discon_tools.check_meta_yaml_for_noarch(None, "adfadf\n noarch: python \n") is True, "Check noarch in meta.yaml"
assert discon.discon_tools.check_meta_yaml_for_noarch(None, "adfadf\n # noarch: python \n") is False, "Check noarch in meta.yaml"
def test_noarch_from_file():
pth = op.join(path_to_script, "../conda-recipe/meta.yaml")
# pth = op.join(path_to_script, "../../io3d/conda-recipe/meta.yaml")
assert discon.discon_tools.check_meta_yaml_for_noarch(pth) is False, "Check noarch in meta.yaml"
|
import os
from collections import namedtuple
from box import Box
import yaml
from termcolor import colored
from cerberus import Validator
from sgains.configuration.schema import sgains_schema
def _dict_to_namedtuple(input_dict, dict_name="root"):
CONFIG_TUPLE = namedtuple(dict_name, input_dict.keys())
for key, value in input_dict.items():
if isinstance(value, dict):
input_dict[key] = _dict_to_namedtuple(value, key)
elif isinstance(value, list):
input_dict[key] = [
_dict_to_namedtuple(item)
if isinstance(item, dict)
else item
for item in value
]
return CONFIG_TUPLE(*input_dict.values()) # type: ignore
def _dict_to_box(input_dict):
return Box(input_dict, frozen_box=True)
class SgainsValidator(Validator):
def _normalize_coerce_abspath(self, value: str) -> str:
work_directory = self._config["work_dirname"]
if not os.path.isabs(value):
value = os.path.join(work_directory, value)
return os.path.normpath(value)
class Config:
def __init__(self, config):
self.config = config
self.verbose = 0
self.config_file = None
self.dry_run = False
self.force = False
self.parallel = 1
@staticmethod
def parse_argv(argv):
if '-c' in argv:
index = argv.index('-c')
elif '--config' in argv:
index = argv.index('--config')
else:
return None
index += 1
if index < 0 or index >= len(argv):
raise ValueError('config filename not found')
filename = argv[index]
config = Config.parse(filename)
return config
@staticmethod
def check_sge_argv(argv):
if '--sge' in argv:
return True
else:
return False
@staticmethod
def parse(filename):
assert os.path.exists(filename)
with open(filename, "r") as infile:
config_dict = yaml.safe_load(infile)
conf_dirname = os.path.dirname(filename)
return Config.from_dict(config_dict, conf_dirname)
@staticmethod
def from_dict(config_dict, work_dirname):
work_dirname = os.path.abspath(work_dirname)
config_dict["work_dirname"] = work_dirname
validator = SgainsValidator(
sgains_schema, work_dirname=work_dirname)
assert validator.validate(config_dict), validator.errors
return Config(_dict_to_box(validator.document))
# return Config(
# _dict_to_namedtuple(validator.document, "sgains"),
# validator)
@property
def schema(self):
return sgains_schema
def to_dict(self):
return self.config.to_dict()
def to_box(self):
return Box(self.to_dict(), default_box=True)
def check_nonempty_workdir(self, dirname):
if not os.path.exists(dirname):
return
if len(os.listdir(dirname)) and \
not self.force and not self.dry_run:
print(colored(
"ERROR: non-empty output directory and no --force option",
"red"))
raise ValueError(f"Non empty directory {dirname}")
def mappable_regions_filename(self, chrom=None):
mname = self.config.mappable_regions.mappable_file
if chrom:
mname = "{}_{}".format(
chrom, self.config.mappable_regions.mappable_file)
filename = os.path.join(
self.mappable_regions.mappable_dir,
mname
)
return filename
def bins_boundaries_filename(self, chrom=None):
bname = self.config.bins.bins_file
if chrom:
bname = "{}_{}".format(
chrom, self.config.bins.bins_file)
filename = os.path.join(
self.config.bins.bins_dir,
bname
)
return filename
def __getattr__(self, attr_name):
# FIXME Temporary hack to enable default values
# only for public attributes
if attr_name[0:2] == "__":
raise AttributeError()
if attr_name not in self.schema.keys():
raise ValueError(f"Unexpected attribute {attr_name}")
return getattr(self.config, attr_name)
@staticmethod
def cellname(filename):
return os.path.basename(filename).split(os.extsep, 1)[0]
def varbin_filename(self, cellname):
os.makedirs(self.config.varbin.varbin_dir, exist_ok=True)
outfile = os.path.join(
self.config.varbin.varbin_dir,
"{}{}".format(cellname, self.config.varbin.varbin_suffix)
)
return outfile
|
import html
from pprint import pprint
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.translation import gettext_lazy as _
from allauth.account.utils import send_email_confirmation
from newstream.functions import get_site_settings_from_default_site, set_default_from_email
from donations.functions import getDonationEmail
from donations.templates.donations.email_templates.plain_texts import get_donation_revoked_admin_text, get_donation_revoked_donor_text, get_new_donation_admin_text, get_donation_receipt_text, get_donation_status_change_text, get_new_recurring_admin_text, get_new_recurring_donor_text, get_recurring_rescheduled_admin_text, get_recurring_rescheduled_donor_text, get_subscription_status_change_text, get_new_renewal_text, get_renewal_receipt_text, get_recurring_adjusted_admin_text, get_recurring_adjusted_donor_text, get_recurring_paused_admin_text, get_recurring_paused_donor_text, get_recurring_resumed_admin_text, get_recurring_resumed_donor_text, get_recurring_cancelled_admin_text, get_recurring_cancel_request_admin_text, get_recurring_cancelled_donor_text, get_account_created_admin_text, get_account_deleted_admin_text, get_account_deleted_donor_text, get_donation_error_admin_text
def setDonorLanguagePreference(user):
if user.language_preference:
translation.activate(user.language_preference)
def sendEmailNotificationsToDonor(user_email, subject, textStr, htmlStr):
# setDonorLanguagePreference(user)
site_settings = get_site_settings_from_default_site()
# default_from_name is an I18nCharField
if str(site_settings.default_from_name):
from_email = '%s <%s>' % (str(site_settings.default_from_name), site_settings.default_from_email)
else:
from_email = site_settings.default_from_email
try:
send_mail(
str(subject),
textStr,
from_email,
[user_email],
html_message=htmlStr
)
except Exception as e:
print("Cannot send '"+str(subject)+"' to '" +
user_email+"': "+str(e), flush=True)
def sendEmailNotificationsToAdmins(site_settings, subject, textStr, htmlStr):
# set default language for admins' emails
# translation.activate(settings.LANGUAGE_CODE)
admin_list = [
admin_email.email for admin_email in site_settings.admin_emails.all()]
# default_from_name is an I18nCharField
if str(site_settings.default_from_name):
from_email = '%s <%s>' % (str(site_settings.default_from_name), site_settings.default_from_email)
else:
from_email = site_settings.default_from_email
try:
send_mail(
str(subject),
textStr,
from_email,
admin_list, # requires admin list to be set in site_settings
html_message=htmlStr
)
except Exception as e:
print("Cannot send '"+str(subject) +
"' emails to admins: "+str(e), flush=True)
def sendDonationErrorNotifToAdmins(donation, error_title, error_description):
siteSettings = get_site_settings_from_default_site()
mail_title = _("Donation Error")
if siteSettings.admin_receive_donation_error_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_donation_error_admin_text(donation, error_title, error_description), render_to_string(
'donations/email_templates/donation_error_admin.html', context={'donation': donation, 'mail_title': mail_title, 'error_title': error_title, 'error_description': error_description}))
def sendDonationNotifToAdmins(donation):
siteSettings = get_site_settings_from_default_site()
mail_title = _("New One-off Donation")
if siteSettings.admin_receive_checkout_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_new_donation_admin_text(donation), render_to_string(
'donations/email_templates/new_donation.html', context={'donation': donation, 'mail_title': mail_title}))
def sendDonationReceiptToDonor(donation):
mail_title = _("Thank you for your Donation")
sendEmailNotificationsToDonor(getDonationEmail(donation), mail_title, get_donation_receipt_text(donation), render_to_string('donations/email_templates/donation_receipt.html', context={'donation': donation, 'mail_title': mail_title}))
def sendDonationRevokedToAdmins(donation):
siteSettings = get_site_settings_from_default_site()
mail_title = _("A Donation is revoked")
if siteSettings.admin_receive_revoked_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_donation_revoked_admin_text(donation), render_to_string(
'donations/email_templates/donation_revoked_admin.html', context={'donation': donation, 'mail_title': mail_title}))
def sendDonationRevokedToDonor(donation):
mail_title = _("Your Donation is Revoked")
sendEmailNotificationsToDonor(getDonationEmail(donation), mail_title, get_donation_revoked_donor_text(donation), render_to_string('donations/email_templates/donation_revoked_donor.html', context={'donation': donation, 'mail_title': mail_title}))
def sendDonationStatusChangeToDonor(donation):
mail_title = _("Your Donation Status is Updated")
sendEmailNotificationsToDonor(getDonationEmail(donation), mail_title, get_donation_status_change_text(donation), render_to_string('donations/email_templates/donation_status_change.html', context={'donation': donation, 'mail_title': mail_title}))
def sendSubscriptionStatusChangeToDonor(subscription):
mail_title = _("Your Recurring Donation Status is Updated")
sendEmailNotificationsToDonor(subscription.user.email, mail_title, get_subscription_status_change_text(subscription), render_to_string('donations/email_templates/subscription_status_change.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRenewalNotifToAdmins(donation):
siteSettings = get_site_settings_from_default_site()
mail_title = _("New Renewal Donation")
if siteSettings.admin_receive_renewal_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_new_renewal_text(donation), render_to_string(
'donations/email_templates/new_renewal.html', context={'donation': donation, 'mail_title': mail_title}))
def sendRenewalReceiptToDonor(donation):
mail_title = _("Thank you for your Monthly Donation")
sendEmailNotificationsToDonor(donation.user.email, mail_title, get_renewal_receipt_text(donation), render_to_string('donations/email_templates/renewal_receipt.html', context={'donation': donation, 'mail_title': mail_title}))
def sendRecurringAdjustedNotifToAdmins(subscription):
siteSettings = get_site_settings_from_default_site()
mail_title = _("A Recurring Donation Amount is Adjusted")
if siteSettings.admin_receive_adjusted_recurring_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_recurring_adjusted_admin_text(subscription), render_to_string(
'donations/email_templates/recurring_adjusted_admin.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRecurringAdjustedNotifToDonor(subscription):
mail_title = _("Your Recurring Donation Amount is Adjusted")
sendEmailNotificationsToDonor(subscription.user.email, mail_title, get_recurring_adjusted_donor_text(subscription), render_to_string('donations/email_templates/recurring_adjusted_donor.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendNewRecurringNotifToAdmins(subscription):
siteSettings = get_site_settings_from_default_site()
mail_title = _("New Recurring Donation")
if siteSettings.admin_receive_new_recurring_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_new_recurring_admin_text(subscription), render_to_string(
'donations/email_templates/new_recurring_donation.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendNewRecurringNotifToDonor(subscription):
mail_title = _("Thank you for setting up a Recurring Donation")
sendEmailNotificationsToDonor(subscription.user.email, mail_title, get_new_recurring_donor_text(subscription), render_to_string('donations/email_templates/recurring_new_donor.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRecurringRescheduledNotifToAdmins(subscription):
siteSettings = get_site_settings_from_default_site()
mail_title = _("A Recurring Donation is Rescheduled")
if siteSettings.admin_receive_rescheduled_recurring_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_recurring_rescheduled_admin_text(subscription), render_to_string(
'donations/email_templates/recurring_rescheduled_admin.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRecurringRescheduledNotifToDonor(subscription):
mail_title = _("Your Recurring Donation is Rescheduled")
sendEmailNotificationsToDonor(subscription.user.email, mail_title, get_recurring_rescheduled_donor_text(subscription), render_to_string('donations/email_templates/recurring_rescheduled_donor.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRecurringPausedNotifToAdmins(subscription):
siteSettings = get_site_settings_from_default_site()
mail_title = _("A Recurring Donation is paused")
if siteSettings.admin_receive_pause_recurring_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_recurring_paused_admin_text(subscription), render_to_string(
'donations/email_templates/recurring_paused_admin.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRecurringPausedNotifToDonor(subscription):
mail_title = _("Your Recurring Donation is Paused")
sendEmailNotificationsToDonor(subscription.user.email, mail_title, get_recurring_paused_donor_text(subscription), render_to_string('donations/email_templates/recurring_paused_donor.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRecurringResumedNotifToAdmins(subscription):
siteSettings = get_site_settings_from_default_site()
mail_title = _("A Recurring Donation is resumed")
if siteSettings.admin_receive_resume_recurring_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_recurring_resumed_admin_text(subscription), render_to_string(
'donations/email_templates/recurring_resumed_admin.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRecurringResumedNotifToDonor(subscription):
mail_title = _("Your Recurring Donation is Resumed")
sendEmailNotificationsToDonor(subscription.user.email, mail_title, get_recurring_resumed_donor_text(subscription), render_to_string('donations/email_templates/recurring_resumed_donor.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRecurringCancelledNotifToAdmins(subscription):
siteSettings = get_site_settings_from_default_site()
mail_title = _("A Recurring Donation is cancelled")
if siteSettings.admin_receive_cancel_recurring_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_recurring_cancelled_admin_text(subscription), render_to_string(
'donations/email_templates/recurring_cancelled_admin.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRecurringCancelRequestNotifToAdmins(subscription):
siteSettings = get_site_settings_from_default_site()
mail_title = _("Cancellation to a Recurring Donation is requested")
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_recurring_cancel_request_admin_text(subscription), render_to_string(
'donations/email_templates/recurring_cancel_request_admin.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendRecurringCancelledNotifToDonor(subscription):
mail_title = _("Your Recurring Donation is Cancelled")
sendEmailNotificationsToDonor(subscription.user.email, mail_title, get_recurring_cancelled_donor_text(subscription), render_to_string('donations/email_templates/recurring_cancelled_donor.html', context={'subscription': subscription, 'mail_title': mail_title}))
def sendAccountCreatedNotifToAdmins(user):
siteSettings = get_site_settings_from_default_site()
mail_title = _("A Donor Account is created")
if siteSettings.admin_receive_account_created_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_account_created_admin_text(user), render_to_string(
'donations/email_templates/account_created_admin.html', context={'user': user, 'mail_title': mail_title}))
def sendAccountDeletedNotifToAdmins(user):
siteSettings = get_site_settings_from_default_site()
mail_title = _("A Donor Account is deleted")
if siteSettings.admin_receive_account_deleted_emails:
sendEmailNotificationsToAdmins(siteSettings, mail_title, get_account_deleted_admin_text(user), render_to_string(
'donations/email_templates/account_deleted_admin.html', context={'user': user, 'mail_title': mail_title}))
def sendAccountDeletedNotifToDonor(user):
mail_title = _("Your Account is Deleted")
sendEmailNotificationsToDonor(user.email, mail_title, get_account_deleted_donor_text(user), render_to_string('donations/email_templates/account_deleted_donor.html', context={'user': user, 'mail_title': mail_title}))
def sendVerificationEmail(user):
set_default_from_email()
# allauth's email confirmation uses DEFAULT_FROM_EMAIL
send_email_confirmation(user, True)
|
from json import JSONDecodeError
from django.http import JsonResponse
from requests.exceptions import InvalidSchema
from rest_framework import viewsets, status
from rest_framework.response import Response
from opendp_apps.dataverses.dataverse_client import DataverseClient
from opendp_apps.dataverses.dv_user_handler import DataverseUserHandler, DataverseResponseError
from opendp_apps.dataverses.models import DataverseHandoff
from opendp_apps.dataverses.serializers import DataverseUserSerializer
from opendp_apps.user.models import DataverseUser
from opendp_apps.utils.view_helper import get_json_error, get_json_success, get_object_or_error_response
from opendp_project.views import BaseModelViewSet
class DataverseUserView(BaseModelViewSet):
def get_serializer(self, instance=None):
return DataverseUserSerializer(context={'request': instance})
def create(self, request, *args, **kwargs):
"""Expects JSON. Given object_ids for OpenDPUser and DataverseHandoff objects,
retrieve the user's information from Dataverse and create a DataverseUser"""
# ----------------------------------
# Validate the input
# ----------------------------------
# print(f"data: {request.data}")
request_data = request.data.copy()
user_id = request.data.get('user')
handoff_id = request.data.get('dv_handoff')
request_data['handoff'] = handoff_id
request_data['user'] = user_id
handoff_obj = get_object_or_error_response(DataverseHandoff, object_id=handoff_id)
try:
dataverse_user = DataverseUser.objects.get(user__object_id=user_id,
dv_installation=handoff_obj.dv_installation)
opendp_user = dataverse_user.user
except DataverseUser.DoesNotExist:
# ----------------------------------
# Create the DataverseUser object
# ----------------------------------
dataverse_user_serializer = DataverseUserSerializer(data=request_data, context={'request': request})
if not dataverse_user_serializer.is_valid():
# print("INVALID SERIALIZER")
return Response(dataverse_user_serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
try:
dataverse_user = dataverse_user_serializer.save()
except DataverseHandoff.DoesNotExist:
return Response(dataverse_user_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
except DataverseUser.DoesNotExist:
return Response(dataverse_user_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
opendp_user = dataverse_user_serializer.validated_data.get('user')
# ----------------------------------
# Call the Dataverse API
# ----------------------------------
site_url = handoff_obj.dv_installation.dataverse_url
# print('-- site_url', site_url)
api_general_token = dataverse_user.dv_general_token
dataverse_client = DataverseClient(site_url, api_general_token)
try:
dataverse_response = dataverse_client.get_user_info(user_api_token=api_general_token)
except InvalidSchema:
return Response(get_json_error(f'The Site {site_url} is not valid'), status=status.HTTP_400_BAD_REQUEST)
except JSONDecodeError:
return Response(get_json_error(f'Error reading data from {site_url}'), status=status.HTTP_400_BAD_REQUEST)
if dataverse_response.success is not True:
return Response(get_json_error(dataverse_response.message), status=status.HTTP_400_BAD_REQUEST)
try:
handler = DataverseUserHandler(opendp_user.id, site_url,
api_general_token,
dataverse_response.__dict__)
update_response = handler.update_dataverse_user()
except DataverseResponseError as ex:
return Response(get_json_error(f'Error {ex}'), status=status.HTTP_400_BAD_REQUEST)
return Response(get_json_success('success', data={'dv_user': dataverse_user.object_id}),
status=status.HTTP_201_CREATED)
def update(self, request, object_id=None, *args, **kwargs):
"""NOT REALLY USED!!! e.g. create is really create_or_update"""
"""Update the Dataverse User. Expects JSON"""
# ----------------------------------
# Validate the input
# ----------------------------------
print(f"data: {request.data}\tpk: {object_id}")
dataverse_user = get_object_or_error_response(DataverseUser, object_id=object_id)
opendp_user = dataverse_user.user
request.data['user'] = opendp_user.object_id
dataverse_user_serializer = DataverseUserSerializer(data=request.data, context={'request': request})
if dataverse_user_serializer.is_valid():
try:
dataverse_user = dataverse_user_serializer.update(dataverse_user, request.data)
except DataverseHandoff.DoesNotExist:
return JsonResponse({'success': False, 'message': 'No such DataVerse exists'},
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(dataverse_user_serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
opendp_user = dataverse_user_serializer.validated_data.get('user')
if not opendp_user:
return Response({'success': False, 'message': 'No OpenDP user found'})
# ----------------------------------
# Call the Dataverse API
# ----------------------------------
dv_handoff = get_object_or_error_response(DataverseHandoff, object_id=request.data['dv_handoff'])
site_url = dv_handoff.dv_installation.dataverse_url
api_general_token = dataverse_user.dv_general_token
dataverse_client = DataverseClient(site_url, api_general_token)
try:
dataverse_response = dataverse_client.get_user_info(user_api_token=api_general_token)
except InvalidSchema:
return JsonResponse(get_json_error(f'The Site {site_url} is not valid'),
status=400)
except JSONDecodeError:
return JsonResponse(get_json_error(f'Error reading data from {site_url}'),
status=status.HTTP_400_BAD_REQUEST)
if dataverse_response.success is not True:
return JsonResponse(get_json_error(dataverse_response.message),
status=400)
# ----------------------------------
# Update the DataverseUser object
# ----------------------------------
try:
handler = DataverseUserHandler(opendp_user.id, site_url,
api_general_token,
dataverse_response.__dict__)
update_resp = handler.update_dataverse_user()
if update_resp.success:
updated_dv_user = update_resp.data
updated_dv_user.save()
else:
return JsonResponse(get_json_error(update_resp.message), status=status.HTTP_400_BAD_REQUEST)
except DataverseResponseError as ex:
return JsonResponse(get_json_error(f'Error {ex}'),
status=status.HTTP_400_BAD_REQUEST)
return JsonResponse(get_json_success('updated',
data=dict(dv_user=updated_dv_user.object_id)),
status=201)
|
#!/usr/bin/python
from distsys.services import services
s = services()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# vim:ts=4:sw=4:expandtab
import pygame
import pprint
from cb import *
from timer import *
from textobj import *
from wiimote import *
from questions import *
from view_question import *
from pygame.locals import *
STATE_IDLE = 0
STATE_PLAYER1 = 1
STATE_PLAYER2 = 2
STATE_PLAYER3 = 3
STATE_PLAYER4 = 4
STATE_DONE = 5
class ViewPlayers:
def __init__(self, screen):
print "initiating view"
font = pygame.font.Font(None, 72)
self.font = font
self.res = None
self.headline = TextObj(screen, font, "Spieler einstellen", "center", 50)
font = pygame.font.Font(None, 36)
self.sfont = font
self.state = STATE_IDLE
self.player_desc = [
TextObj(screen, font, "Spieler 1:", 100, 200),
TextObj(screen, font, "Spieler 2:", 100, 300),
TextObj(screen, font, "Spieler 3:", 100, 400),
TextObj(screen, font, "Spieler 4:", 100, 500)
]
self.player_names = [
TextObj(screen, font, "", 250, 200),
TextObj(screen, font, "", 250, 300),
TextObj(screen, font, "", 250, 400),
TextObj(screen, font, "", 250, 500)
]
self.screen = screen
def handle_key(self, key):
print "handling key"
if key == K_DOWN:
print "user pressed down"
def handle_key_u(self, key):
if key == u'1':
self.state = STATE_PLAYER1
elif key == u'2':
self.state = STATE_PLAYER2
elif key == u'3':
self.state = STATE_PLAYER3
elif key == u'4':
self.state = STATE_PLAYER4
elif key == u'0':
self.state = STATE_IDLE
else:
n = self.player_names[self.state - 1]
if self.state < STATE_PLAYER1 or self.state > STATE_PLAYER4:
if key == u'\r':
print "done with player setup"
q = questions.next_question(self.screen)
self.res = ViewQuestion(self.screen, q, 1)
for c in range(0, 4):
if len(self.player_names[c].get_text()) > 0:
players.add_player(self.player_names[c].get_text())
players.dump()
return
print "printable char %s entered" % key
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(key)
# Handle backspace
if key == u'\x08':
n.set_text(n.get_text()[0:-1])
elif key == u'\r':
w = None
c = 0
while w == None:
TextObj(self.screen, self.font, u"Jetzt 1+2 drücken! (" + str(c) + ")", "center", "center").render()
pygame.display.flip()
try:
w = Wiimote(n.get_text(), None)
w.set_cb(CB.get_cb())
except:
w = None
c += 1
self.state = STATE_IDLE
print "done"
else:
n.set_text(n.get_text() + key)
def render(self, screen):
screen.fill((0, 0, 0))
self.headline.render()
for t in self.player_desc + self.player_names:
t.render()
if self.state >= STATE_PLAYER1 and self.state <= STATE_PLAYER4:
TextObj(self.screen, self.sfont, ">", 50, 200 + ((self.state - 1) * 100)).render()
return self.res
|
import plotly.graph_objects as go
import plotly
import sys
import common
import table_single_provers
import figure_createall
def plot(prover_dict):
systems = ["D","T","S4","S5"]
quants = ["const","cumul","vary"]
configurations = [(a,b) for a in systems for b in quants]
fig = plotly.subplots.make_subplots(
rows=2, cols=3,
#shared_yaxes=True,
horizontal_spacing = 0.05,
vertical_spacing = 0.10,
row_heights=[0.3]*2,
column_widths=[0.3]*3,
subplot_titles=(["D/const","T/const","S4/const","S5/const","S5/cumul"])
)
for row in range(2):
for column in range(3):
if row == 1 and column == 2:
continue
if row == 0:
sys = systems[column]
else:
sys = "S5"
if row == 1 and column == 1:
quant = "cumul"
else:
quant = "const"
mlean_sys = sys+"all"
mlean_quant = quant+"all"
mlean_t_cpu = sorted(list(map(lambda p: p.cpu,prover_dict["mleancop"][mlean_sys][mlean_quant]['csa_single'])))
mlean_t_wc = sorted(list(map(lambda p: p.wc,prover_dict["mleancop"][mlean_sys][mlean_quant]['csa_single'])))
mlean_x = [x for x in range(1,len(mlean_t_wc)+1)]
nitpick_sys = sys+"sem"
nitpick_quant = quant+"all"
if quant == "vary":
nitpick_quant = quant+"all"
if sys == "S5" and quant != "vary":
nitpick_sys = "S5Usem"
if sys == "D" and quant == "const":
nitpick_sys = "Tsyn"
nitpick_t_cpu = sorted(list(map(lambda p: p.cpu,prover_dict["nitpick"][nitpick_sys][nitpick_quant]['csa_single'])))
nitpick_t_wc = sorted(list(map(lambda p: p.wc,prover_dict["nitpick"][nitpick_sys][nitpick_quant]['csa_single'])))
nitpick_x = [x for x in range(1,len(nitpick_t_wc)+1)]
show_legend = False
if row == 0 and column == 0:
show_legend = True
mlean_plot_cpu = go.Scatter(name="MLeanCop CPU",showlegend=show_legend,y=mlean_t_cpu,x=mlean_x,marker=dict(color=figure_createall.COL_MLEANCOP_PRIMARY, size=figure_createall.SIZE_LINE_TIME))
mlean_plot_wc = go.Scatter(name="MLeanCop WC",showlegend=show_legend,y=mlean_t_wc,x=mlean_x,marker=dict(color=figure_createall.COL_MLEANCOP_PRIMARY, size=figure_createall.SIZE_LINE_TIME))
nitpick_plot_cpu = go.Scatter(name="Nitpick CPU",showlegend=show_legend,y=nitpick_t_cpu,x=nitpick_x,marker=dict(color=figure_createall.COL_NITPICK_PRIMARY, size=figure_createall.SIZE_LINE_TIME))
nitpick_plot_wc = go.Scatter(name="Nitpick WC",showlegend=show_legend,y=nitpick_t_wc,x=nitpick_x,marker=dict(color=figure_createall.COL_NITPICK_SECONDARY, size=figure_createall.SIZE_LINE_TIME))
fig.append_trace(mlean_plot_cpu,row=(row+1),col=(column+1))
fig.append_trace(mlean_plot_wc,row=(row+1),col=(column+1))
fig.append_trace(nitpick_plot_cpu,row=(row+1),col=(column+1))
fig.append_trace(nitpick_plot_wc,row=(row+1),col=(column+1))
yaxis_dict = dict(titlefont_size=figure_createall.SIZE_FONT,
titlefont_color="black",
tickfont_color="black",
tickfont_size=figure_createall.SIZE_FONT,
range=[0, 240], row=row+1, col=column+1)
xaxis_dict = dict(titlefont_size=figure_createall.SIZE_FONT,
titlefont_color="black",
tickfont_color="black",
tickfont_size=figure_createall.SIZE_FONT,
range=[0, 450], row=row+1, col=column+1)
if column == 0:
yaxis_dict["title_text"]="time (s)"
if row == 1:
xaxis_dict["title_text"]="number of counter models"
fig.update_yaxes(**yaxis_dict)
fig.update_xaxes(**xaxis_dict)
fig.update_layout(
#title='US Export of Plastic Scrap',
font_color="black",
font_size=figure_createall.SIZE_FONT,
legend_orientation="h",
legend=dict(
y=-0.15,
font_size=figure_createall.SIZE_FONT
# x=0,
# y=1.0,
# bgcolor='rgba(255, 255, 255, 0)',
# bordercolor='rgba(255, 255, 255, 0)'
)
)
for i in fig['layout']['annotations']:
i['font'] = dict(size=figure_createall.SIZE_FONT,color='black')
return fig
def main_helper(prover_dict):
fig = plot(prover_dict)
#fig.show()
path="/home/tg/master_thesis/thesis/plots/csa_comparison_time.png"
fig.write_image(path,width=1600, height=1200)
def main(csv_file_list):
problem_list = common.accumulate_csv(csv_file_list)
prover_dict = table_single_provers.getTableData(problem_list)
table_single_provers.createOptHo(prover_dict)
main_helper(prover_dict)
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/python3
# This will prompt for the desktop type and launch the desktop of your choice in the TTY
from os import ttyname, listdir, system
from sys import stdout, stdin
from re import search
from io import open
from subprocess import getstatusoutput
# Make sure requirements are meet
# Have sx installed
if getstatusoutput('sx')[0] != 0:
print('Error! You need to have sx installed. Please install sx package to start x session')
exit(-11)
# Get the TTY name
TTY = str(ttyname(stdout.fileno())).split('/')[2].replace('tty','')
# Get the desktop environments installed on this system
DESKTOPS = listdir('/usr/share/xsessions')
# Present the list of desktops available to the user
print('Enter the number for the desktop you wish to use:\n')
INDEX = 0
for desktop in DESKTOPS:
print('[' + str(INDEX) + ']' + ' ' + str(desktop).replace('.desktop', ''))
INDEX = INDEX+1
# Get the desktop selected by the user
ENTEREDINDEX = False
SEARCH = None
SELECTEDINDEX = None
EXEC = None
while SELECTEDINDEX == None:
USERENTERED = input()
# Check input
SEARCH = search("\d", USERENTERED)
if SEARCH:
SELECTEDINDEX = int(SEARCH.group(0))
else:
print("Please enter a valid number")
print('\nSelected: ' + DESKTOPS[SELECTEDINDEX])
# Now get the command needed to start the desktop session
FILE = open('/usr/share/xsessions/' + DESKTOPS[SELECTEDINDEX], "rb")
for line in FILE:
try:
if line.decode('utf8').startswith("Exec="):
EXEC = ' '.join(line.decode('utf8').split('=')[1:]).rstrip()
except:
# Ignore the line
continue
# Start the desktop session
system('sx ' + EXEC + " -- :1 vt" + TTY)
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import glob
import os
import re
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Get new version.
with open(os.path.join(root, "pyro", "__init__.py")) as f:
for line in f:
if line.startswith("version_prefix ="):
new_version = line.strip().split()[-1]
# Collect potential files.
filenames = []
for path in ["examples", "tutorial/source"]:
for ext in ["*.py", "*.ipynb"]:
filenames.extend(glob.glob(os.path.join(root, path, "**", ext),
recursive=True))
filenames.sort()
# Update version string.
pattern = re.compile("assert pyro.__version__.startswith\\('[^']*'\\)")
text = f"assert pyro.__version__.startswith({new_version})"
for filename in filenames:
with open(filename) as f:
old_text = f.read()
new_text = pattern.sub(text, old_text)
if new_text != old_text:
print("updating {}".format(filename))
with open(filename, "w") as f:
f.write(new_text)
|
import tensorflow as tf
import sys
sys.path.append("../tf")
from backend.op import conv_bn, conv_dw, basic_rfb, separable_conv
def basic_conv(x, out_ch, kernel_size, stride=(1, 1), padding=0, dilation=1, relu=True,
bn=True, prefix='basic_conv'):
if 0 < padding:
out = tf.keras.layers.ZeroPadding2D(padding=padding, name=f'{prefix}_padding')(x)
else:
out = x
out = tf.keras.layers.Conv2D(out_ch,
kernel_size,
strides=stride,
dilation_rate=dilation,
use_bias=(not bn),
name=f'{prefix}_conv')(out)
if bn:
out = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}_bn')(out)
if relu:
out = tf.keras.layers.ReLU(name=f'{prefix}_relu')(out)
return out
def basic_rfb(x, in_ch, out_ch, stride=1, scale=0.1, map_reduce=8, vision=1, prefix='basic_rfb'):
inter_ch = in_ch // map_reduce
branch0 = basic_conv(x, inter_ch, kernel_size=1, stride=1, relu=False,
prefix=f'{prefix}.branch0.0')
branch0 = basic_conv(branch0, 2 * inter_ch, kernel_size=3, stride=stride, padding=1,
prefix=f'{prefix}.branch0.1')
branch0 = basic_conv(branch0, 2 * inter_ch, kernel_size=3, stride=1, dilation=vision + 1,
padding=vision + 1, relu=False, prefix=f'{prefix}.branch0.2')
branch1 = basic_conv(x, inter_ch, kernel_size=1, stride=1, relu=False,
prefix=f'{prefix}.branch1.0')
branch1 = basic_conv(branch1, 2 * inter_ch, kernel_size=3, stride=stride, padding=1,
prefix=f'{prefix}.branch1.1')
branch1 = basic_conv(branch1, 2 * inter_ch, kernel_size=3, stride=1, dilation=vision + 2,
padding=vision + 2, relu=False, prefix=f'{prefix}.branch1.2')
branch2 = basic_conv(x, inter_ch, kernel_size=1, stride=1, relu=False,
prefix=f'{prefix}.branch2.0')
branch2 = basic_conv(branch2, (inter_ch // 2) * 3, kernel_size=3, stride=1, padding=1,
prefix=f'{prefix}.branch2.1')
branch2 = basic_conv(branch2, 2 * inter_ch, kernel_size=3, stride=stride, padding=1,
prefix=f'{prefix}.branch2.2')
branch2 = basic_conv(branch2, 2 * inter_ch, kernel_size=3, stride=1, dilation=vision + 4,
padding=vision + 4, relu=False, prefix=f'{prefix}.branch2.3')
out = tf.keras.layers.Concatenate(axis=-1, name=f'{prefix}_cat')([branch0, branch1, branch2])
out = basic_conv(out, out_ch, kernel_size=1, stride=1, relu=False, prefix=f'{prefix}.convlinear')
shortcut = basic_conv(x, out_ch, kernel_size=1, stride=stride, relu=False, prefix=f'{prefix}.shortcut')
out = tf.multiply(out, scale, name=f'{prefix}_mul')
out = tf.keras.layers.Add(name=f'{prefix}_add')([out, shortcut])
out = tf.keras.layers.ReLU(name=f'{prefix}_relu')(out)
return out
def separable_conv(x, out_ch, kernel_size, stride, padding, prefix='separable_conv'):
out = tf.keras.layers.ZeroPadding2D(padding=padding, name=f'{prefix}_dconv_padding')(x)
out = tf.keras.layers.DepthwiseConv2D(kernel_size,
strides=stride,
name=f'{prefix}_dconvbias')(out)
out = tf.keras.layers.ReLU(name=f'{prefix}_relu')(out)
out = tf.keras.layers.Conv2D(out_ch, 1,
name=f'{prefix}_convbias')(out)
return out
def conv_bn(x, out_ch, stride, padding=1, prefix='conv_bn'):
out = tf.keras.layers.ZeroPadding2D(padding=padding, name=f'{prefix}.0_padding')(x)
out = tf.keras.layers.Conv2D(out_ch,
(3, 3),
strides=stride,
use_bias=False,
name=f'{prefix}.0_conv')(out)
out = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.1_bn')(out)
out = tf.keras.layers.ReLU(name=f'{prefix}.2_relu')(out)
return out
def conv_dw(x, out_ch, stride, padding=1, prefix='conv_dw'):
out = tf.keras.layers.ZeroPadding2D(padding=padding, name=f'{prefix}.0_padding')(x)
out = tf.keras.layers.DepthwiseConv2D(3, strides=stride,
use_bias=False,
name=f'{prefix}.0_dconv')(out)
out = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.1_bn')(out)
out = tf.keras.layers.ReLU(name=f'{prefix}.2_relu')(out)
out = tf.keras.layers.Conv2D(out_ch, 1, use_bias=False, name=f'{prefix}.3_conv')(out)
out = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.4_bn')(out)
out = tf.keras.layers.ReLU(name=f'{prefix}.5_relu')(out)
return out
def create_rfb_net(input_shape, base_channel, num_classes):
input_node = tf.keras.layers.Input(shape=(input_shape[0], input_shape[1], 3))
net = conv_bn(input_node, base_channel, stride=2, prefix='basenet.0') # 120x160
net = conv_dw(net, base_channel * 2, stride=1, prefix='basenet.1')
net = conv_dw(net, base_channel * 2, stride=2, prefix='basenet.2') # 60x80
net = conv_dw(net, base_channel * 2, stride=1, prefix='basenet.3')
net = conv_dw(net, base_channel * 4, stride=2, prefix='basenet.4') # 30x40
net = conv_dw(net, base_channel * 4, stride=1, prefix='basenet.5')
net = conv_dw(net, base_channel * 4, stride=1, prefix='basenet.6')
header_0 = basic_rfb(net, base_channel * 4, base_channel * 4, stride=1, scale=1.0, prefix='basenet.7')
net = conv_dw(header_0, base_channel * 8, stride=2, prefix='basenet.8') # 15x20
net = conv_dw(net, base_channel * 8, stride=1, prefix='basenet.9')
header_1 = conv_dw(net, base_channel * 8, stride=1, prefix='basenet.10')
net = conv_dw(header_1, base_channel * 16, stride=2, prefix='basenet.11') # 8x10
header_2 = conv_dw(net, base_channel * 16, stride=1, prefix='basenet.12')
out = tf.keras.layers.Conv2D(base_channel * 4, 1, padding='SAME', name='extras_convbias')(header_2)
out = tf.keras.layers.ReLU(name='extras_relu1')(out)
out = separable_conv(out, base_channel * 16, kernel_size=3, stride=2, padding=1,
prefix='extras_sep')
header_3 = tf.keras.layers.ReLU(name='extras_relu2')(out)
reg_0 = separable_conv(header_0, 3 * 4, kernel_size=3, stride=1, padding=1,
prefix='reg_0_sep')
cls_0 = separable_conv(header_0, 3 * num_classes, kernel_size=3, stride=1, padding=1,
prefix='cls_0_sep')
reg_1 = separable_conv(header_1, 2 * 4, kernel_size=3, stride=1, padding=1,
prefix='reg_1_sep')
cls_1 = separable_conv(header_1, 2 * num_classes, kernel_size=3, stride=1, padding=1,
prefix='cls_1_sep')
reg_2 = separable_conv(header_2, 2 * 4, kernel_size=3, stride=1, padding=1,
prefix='reg_2_sep')
cls_2 = separable_conv(header_2, 2 * num_classes, kernel_size=3, stride=1, padding=1,
prefix='cls_2_sep')
reg_3 = tf.keras.layers.Conv2D(3 * 4, kernel_size=3, padding='SAME',
name='reg_3_convbias')(header_3)
cls_3 = tf.keras.layers.Conv2D(3 * num_classes, kernel_size=3, padding='SAME',
name='cls_3_convbias')(header_3)
reg_list = [tf.keras.layers.Reshape([-1, 4])(reg) for reg in [reg_0, reg_1, reg_2, reg_3]]
cls_list = [tf.keras.layers.Reshape([-1, num_classes])(cls) for cls in [cls_0, cls_1, cls_2, cls_3]]
reg = tf.keras.layers.Concatenate(axis=1, name='face_boxes')(reg_list)
cls = tf.keras.layers.Concatenate(axis=1)(cls_list)
cls = tf.keras.layers.Softmax(axis=-1, name='face_scores')(cls)
model = tf.keras.Model(inputs=[input_node], outputs=[reg, cls])
model.summary()
return model
|
#!/usr/bin/env python
import hail
from hail.expr import TStruct
from pprint import pprint
def flatten_struct(struct, root='', leaf_only=True):
result = {}
for f in struct.fields:
path = '%s.%s' % (root, f.name)
if isinstance(f.typ, TStruct):
result.update(flatten_struct(f.typ, path))
if not leaf_only:
result[path] = f
else:
result[path] = f
return result
hc = hail.HailContext(log="/hail.log")
genomes_vds = hc.read('gs://gnomad-public/release/2.0.2/vds/genomes/gnomad.genomes.r2.0.2.sites.vds')
as_filter_status_fields=['va.info.AS_FilterStatus']
as_filter_status_attributes = flatten_struct(genomes_vds.variant_schema, root="va")
as_filter_status_expression = ['%s = %s.map(x => orMissing(isDefined(x), if(x.isEmpty()) "PASS" else x.toArray.mkString("|")))' % (x, x) for x in as_filter_status_fields]
genomes_vds = genomes_vds.annotate_variants_expr(as_filter_status_expression)
pprint(genomes_vds.variant_schema)
genomes_vds.export_vcf('gs://gnomad-browser/genomes/sept-2017-release-202-parts/gnomad.genomes.r2.0.2.sites.parts.vcf.bgz', parallel=True)
|
from itertools import groupby
from pprint import pformat
def name_stack_repr(name_stack):
segments = []
for name, group in groupby(name_stack):
group_len = len([*group])
segments.append(group_len > 1 and f"{name}x{group_len}" or name)
return segments[::-1]
class CellDSLError(Exception):
"""Base Cell DSL error"""
def __init__(self, message, action_num=None, action_lst=None, name_stack=None, action=None, save_points=None):
self.message = message
self.action_num = action_num
self.action_lst = action_lst
self.name_stack = name_stack
self.action = action
self.save_points = save_points
def __str__(self):
segments = []
if self.name_stack is not None:
segments.append(f"Name stack: {pformat(name_stack_repr(self.name_stack))}")
if self.action_lst is not None and self.action_num is not None:
segments.append(
f"Adjacent actions: {pformat(self.action_lst[max(self.action_num - 10, 0):self.action_num + 10])}"
)
if self.action_num is not None:
segments.append(f"Action num: {pformat(self.action_num)}")
if self.action is not None:
segments.append(f"Triggering action: {self.action}")
if self.save_points is not None:
segments.append(f"Save points already present: {pformat(self.save_points)}")
additional_info = "\n".join(segments)
full_message = [self.message]
if additional_info:
full_message.append(f"Additional info:\n{additional_info}")
return "\n".join(full_message)
class MovementCellDSLError(CellDSLError):
"""A Cell DSL error triggered by invalid movement"""
class ExecutionCellDSLError(CellDSLError):
"""A Cell DSL error triggered by an exception during execution."""
|
import unittest
import pso
class initPopulationTest(unittest.TestCase):
def test_initializePopulation(self):
pop = pso.initPopulation(10, {})
self.assertTrue(len(pop) == 10)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2015, Fabian Girrbach, Social Robotics Lab, University of Freiburg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Dependencies for ROS
import rospy
import rospkg
from std_msgs.msg import Float32, Bool
# Other dependencies
import yaml
from collections import namedtuple
from time import time, sleep
import subprocess
from signal import SIGKILL, SIGTERM
import os
from multiprocessing import Process, Pipe
# PySMAC
import pysmac
# definition of parameters which should be optimized
parameters_to_optimize=dict(\
track_is_mature_after_total_num_matches=("integer", [1, 300], 100), # same as x1, but the initial value is -1
max_occlusions_before_deletion_of_mature_track=("integer", [1, 250], 50), # same as x1, but the initial value is -1
max_occlusions_before_deletion=("integer", [1, 250], 20), # same as x1, but the initial value is -1
process_noise_level=("real", [1e-5, 2.5], 0.1), # same as x1, but the initial value is -1
measurement_noise=("real", [1e-5, 2.5], 0.1), # same as x1, but the initial value is -1
)
# list of files where the parameters for the optimization should be found
parameter_files_to_search = ['/launch/params/occlusion_manager_basic_tuning.yaml', '/launch/params/ekf_tuning.yaml',]
# list of ros package names where parameters can be found
packages_for_paramteres = ['srl_nearest_neighbor_tracker']
# roslaunch commands for several instances where tuning should happen
roslaunch_commands = [['roslaunch', 'srl_nearest_neighbor_tracker' , 'pedsim_tuning.launch']]
# List of forbidden configuations
forbidden_clauses = ['{(max_occlusions_before_deletion_of_mature_track < max_occlusions_before_deletion)}']
parameter_list = []
fail_result = -1.0
mota_result = -1.0
def find_parameters():
# get an instance of RosPack with the default search paths
rospack = rospkg.RosPack()
for package_name in packages_for_paramteres:
package_path = rospack.get_path(package_name)
for file in parameter_files_to_search:
param_file = package_path + file
try:
params_in_file = yaml.load(open(param_file))
for param_key in parameters_to_optimize:
if param_key in params_in_file:
print 'Found {} in {} paramfile {}'.format(param_key, param_file, parameters_to_optimize[param_key])
new_param = {'name':param_key, 'path':param_file, 'default':parameters_to_optimize[param_key][-1], 'current':50}
parameter_list.append(new_param)
except:
pass
def write_parameters():
print "inside write parameters"
for param in parameter_list:
print "current param {}".format(param)
with open(param['path'], 'r') as param_file:
params_in_file = yaml.load(param_file)
params_in_file[param['name']] = param['current']
with open(param['path'], 'w') as param_file:
param_file.write(yaml.dump(params_in_file, default_flow_style=True) )
def resultCallback(result):
print "PyMot results received {}".format(result)
global mota_result, proces_sim
mota_result = result.data
os.killpg(process_sim.pid, SIGTERM)
sleep(3)
rospy.signal_shutdown('Mota result received')
def clean_ros_logs():
# After many iterations ros gathered a lot of log files therfore we clean them after each iteration
subprocess.call(['rosclean','purge','-y'])
def start_node(child_conn, ros_command):
clean_ros_logs()
# launch tracker and everything which comes with it
global process_sim, mota_result
mota_result = fail_result
process_sim = subprocess.Popen(ros_command, preexec_fn=os.setsid)
node = rospy.init_node('tuning_node', anonymous=True)
while rospy.is_shutdown():
print 'Waiting for ROS to start'
sleep(1)
rospy.Subscriber("/pymot_result", Float32, resultCallback)
rospy.spin()
sleep(3)
tuning_object = dict()
tuning_object['result'] = mota_result
child_conn.send(tuning_object)
def optimize_parameters(**kwargs):
print "Function was called with arguments: {}".format(kwargs)
# Modify values in parameter list depending on passed values
for arg in kwargs.keys():
print "Current key argument: {}".format(arg)
if arg == "instance":
roslaunch_command = roslaunch_commands[kwargs[arg]]
print "Current ROS Launch command is {}".format(roslaunch_command)
continue
try:
current = next(param for param in parameter_list if param['name']==arg)
current['current'] = kwargs[arg]
except:
pass
write_parameters()
sleep(0.5)
parent_conn, child_conn = Pipe()
p = Process(target=start_node, args=(child_conn,roslaunch_command,))
p.start()
result = parent_conn.recv()
print 'Received current result {}'.format(result['result'])
p.join()
p.terminate()
return -result['result']
def init_optimization():
opt = pysmac.SMAC_optimizer(working_directory= '/home/fabian/tuning_deletion_logic/',persistent_files=True, debug = False)
parameter_definition= parameters_to_optimize
print parameter_definition
value, parameters = opt.minimize(optimize_parameters # the function to be minimized
, 500 # the maximum number of function evaluations
, parameter_definition # dict of parmaeter definition
, forbidden_clauses=forbidden_clauses # list of forbidden clauses
, t_limit_function_s=360 # time limit cor one tuning iteration
, num_runs=2 # number of independent tuning runs
, num_train_instances=len(roslaunch_commands) # number of datasets used for tuning
, deterministic=True) # deterministic results
print('The minimum value %f was found for the configurations %s'%(value, parameters))
for param_key in parameters.keys():
try:
current = next(param for param in parameter_list if param['name']==param_key)
current['current'] = parameters[param_key]
except:
pass
print("Writing best parameter configuration to param file(s) {}".format(parameter_files_to_search))
write_parameters()
print("Exited sucessfully!")
if __name__ == '__main__':
try:
find_parameters()
init_optimization()
except rospy.ROSInterruptException:
pass
|
from __future__ import print_function
# bustersAgents.py
# ----------------
from builtins import str
from builtins import range
from builtins import object
import util
from game import Agent
from game import Directions
from keyboardAgents import KeyboardAgent
import inference
import busters
class NullGraphics(object):
"Placeholder for graphics"
def initialize(self, state, isBlue = False):
pass
def update(self, state):
pass
def pause(self):
pass
def draw(self, state):
pass
def updateDistributions(self, dist):
pass
def finish(self):
pass
class KeyboardInference(inference.InferenceModule):
"""
Basic inference module for use with the keyboard.
"""
def initializeUniformly(self, gameState):
"Begin with a uniform distribution over ghost positions."
self.beliefs = util.Counter()
for p in self.legalPositions: self.beliefs[p] = 1.0
self.beliefs.normalize()
def observe(self, observation, gameState):
noisyDistance = observation
emissionModel = busters.getObservationDistribution(noisyDistance)
pacmanPosition = gameState.getPacmanPosition()
allPossible = util.Counter()
for p in self.legalPositions:
trueDistance = util.manhattanDistance(p, pacmanPosition)
if emissionModel[trueDistance] > 0:
allPossible[p] = 1.0
allPossible.normalize()
self.beliefs = allPossible
def elapseTime(self, gameState):
pass
def getBeliefDistribution(self):
return self.beliefs
class BustersAgent(object):
"An agent that tracks and displays its beliefs about ghost positions."
def __init__( self, index = 0, inference = "ExactInference", ghostAgents = None, observeEnable = True, elapseTimeEnable = True):
inferenceType = util.lookup(inference, globals())
self.inferenceModules = [inferenceType(a) for a in ghostAgents]
self.observeEnable = observeEnable
self.elapseTimeEnable = elapseTimeEnable
def registerInitialState(self, gameState):
"Initializes beliefs and inference modules"
import __main__
self.display = __main__._display
for inference in self.inferenceModules:
inference.initialize(gameState)
self.ghostBeliefs = [inf.getBeliefDistribution() for inf in self.inferenceModules]
self.firstMove = True
def observationFunction(self, gameState):
"Removes the ghost states from the gameState"
agents = gameState.data.agentStates
gameState.data.agentStates = [agents[0]] + [None for i in range(1, len(agents))]
return gameState
def getAction(self, gameState):
"Updates beliefs, then chooses an action based on updated beliefs."
for index, inf in enumerate(self.inferenceModules):
if not self.firstMove and self.elapseTimeEnable:
inf.elapseTime(gameState)
self.firstMove = False
if self.observeEnable:
inf.observeState(gameState)
self.ghostBeliefs[index] = inf.getBeliefDistribution()
self.display.updateDistributions(self.ghostBeliefs)
return self.chooseAction(gameState)
def chooseAction(self, gameState):
"By default, a BustersAgent just stops. This should be overridden."
return Directions.STOP
class BustersKeyboardAgent(BustersAgent, KeyboardAgent):
"An agent controlled by the keyboard that displays beliefs about ghost positions."
def __init__(self, index = 0, inference = "KeyboardInference", ghostAgents = None):
KeyboardAgent.__init__(self, index)
BustersAgent.__init__(self, index, inference, ghostAgents)
def getAction(self, gameState):
return BustersAgent.getAction(self, gameState)
def chooseAction(self, gameState):
return KeyboardAgent.getAction(self, gameState)
from distanceCalculator import Distancer
from game import Actions
from game import Directions
import random, sys
'''Random PacMan Agent'''
class RandomPAgent(BustersAgent):
def registerInitialState(self, gameState):
BustersAgent.registerInitialState(self, gameState)
self.distancer = Distancer(gameState.data.layout, False)
''' Example of counting something'''
def countFood(self, gameState):
food = 0
for width in gameState.data.food:
for height in width:
if(height == True):
food = food + 1
return food
''' Print the layout'''
def printGrid(self, gameState):
table = ""
##print(gameState.data.layout) ## Print by terminal
for x in range(gameState.data.layout.width):
for y in range(gameState.data.layout.height):
food, walls = gameState.data.food, gameState.data.layout.walls
table = table + gameState.data._foodWallStr(food[x][y], walls[x][y]) + ","
table = table[:-1]
return table
def printLineData(self,gameState):
'''Observations of the state
print(str(gameState.livingGhosts))
print(gameState.data.agentStates[0])
print(gameState.getNumFood())
print (gameState.getCapsules())
width, height = gameState.data.layout.width, gameState.data.layout.height
print(width, height)
print(gameState.data.ghostDistances)
print(gameState.data.layout)'''
'''END Observations of the state'''
print(gameState)
weka_line = ""
for i in gameState.livingGhosts:
weka_line = weka_line + str(i) + ","
weka_line = weka_line + str(gameState.getNumFood()) + ","
for i in gameState.getCapsules():
weka_line = weka_line + str(i[0]) + "," + str(i[1]) + ","
for i in gameState.data.ghostDistances:
weka_line = weka_line + str(i) + ","
weka_line = weka_line + str(gameState.data.score) + "," +\
str(len(gameState.data.capsules)) + "," + str(self.countFood(gameState)) +\
"," + str(gameState.data.agentStates[0].configuration.pos[0]) + "," +\
str(gameState.data.agentStates[0].configuration.pos[0]) +\
"," + str(gameState.data.agentStates[0].scaredTimer) + "," +\
self.printGrid(gameState) + "," +\
str(gameState.data.agentStates[0].numReturned) + "," +\
str(gameState.data.agentStates[0].getPosition()[0]) + "," +\
str(gameState.data.agentStates[0].getPosition()[1])+ "," +\
str(gameState.data.agentStates[0].numCarrying)+ "," +\
str(gameState.data.agentStates[0].getDirection())
print(weka_line)
def chooseAction(self, gameState):
move = Directions.STOP
legal = gameState.getLegalActions(0) ##Legal position from the pacman
move_random = random.randint(0, 3)
self.printLineData(gameState)
if ( move_random == 0 ) and Directions.WEST in legal: move = Directions.WEST
if ( move_random == 1 ) and Directions.EAST in legal: move = Directions.EAST
if ( move_random == 2 ) and Directions.NORTH in legal: move = Directions.NORTH
if ( move_random == 3 ) and Directions.SOUTH in legal: move = Directions.SOUTH
return move
class GreedyBustersAgent(BustersAgent):
"An agent that charges the closest ghost."
def registerInitialState(self, gameState):
"Pre-computes the distance between every two points."
BustersAgent.registerInitialState(self, gameState)
self.distancer = Distancer(gameState.data.layout, False)
def chooseAction(self, gameState):
"""
First computes the most likely position of each ghost that has
not yet been captured, then chooses an action that brings
Pacman closer to the closest ghost (according to mazeDistance!).
To find the mazeDistance between any two positions, use:
self.distancer.getDistance(pos1, pos2)
To find the successor position of a position after an action:
successorPosition = Actions.getSuccessor(position, action)
livingGhostPositionDistributions, defined below, is a list of
util.Counter objects equal to the position belief
distributions for each of the ghosts that are still alive. It
is defined based on (these are implementation details about
which you need not be concerned):
1) gameState.getLivingGhosts(), a list of booleans, one for each
agent, indicating whether or not the agent is alive. Note
that pacman is always agent 0, so the ghosts are agents 1,
onwards (just as before).
2) self.ghostBeliefs, the list of belief distributions for each
of the ghosts (including ghosts that are not alive). The
indices into this list should be 1 less than indices into the
gameState.getLivingGhosts() list.
"""
pacmanPosition = gameState.getPacmanPosition()
legal = [a for a in gameState.getLegalPacmanActions()]
livingGhosts = gameState.getLivingGhosts()
livingGhostPositionDistributions = \
[beliefs for i, beliefs in enumerate(self.ghostBeliefs)
if livingGhosts[i+1]]
"*** YOUR CODE HERE ***"
return Directions.EAST
|
import FWCore.ParameterSet.Config as cms
hltPhase2L3MuonPSetPvClusterComparerForIT = cms.PSet(
track_chi2_max = cms.double(20.0),
track_prob_min = cms.double(-1.0),
track_pt_max = cms.double(100.0),
track_pt_min = cms.double(1.0)
)
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for authbox.gpio_button"""
import sys
import unittest
import Queue
import authbox.gpio_button
from authbox import fake_gpio_for_testing
from RPi import GPIO
class BlinkTest(unittest.TestCase):
def setUp(self):
self.fake = fake_gpio_for_testing.FakeGPIO()
self.q = Queue.Queue()
self.b = authbox.gpio_button.Button(self.q, 'b', '1', '2', on_down=self.on_down)
def on_down(self):
pass
def test_on(self):
self.b.on()
self.b.run_inner()
# 2 is output
self.fake.compare_log([(0, 2, True)])
# 1 is input
self.assertEqual(GPIO.FALLING, self.fake.events[1][0])
self.fake.events[1][1](None)
self.assertEqual(self.q.get(block=False), (self.on_down, self.b))
def test_blinking_thread(self):
# TODO: Improve this test to not take 1.5 seconds of wall time by faking
# Queue.get timeouts.
self.b.start()
self.b.blink()
for i in range(4):
self.b.run_inner()
self.fake.compare_log([
(0.0, 2, True), (0.5, 2, False), (1.0, 2, True), (1.5, 2, False)])
|
import hubitatmaker as hm
def test_normal_lock_codes(mocker) -> None:
hub = mocker.MagicMock()
device = mocker.MagicMock()
device.attributes = {
hm.ATTR_LOCK_CODES: hm.Attribute(
{
"name": hm.ATTR_LOCK_CODES,
"currentValue": '{"1":{"name":"Test","code":"1234"}}',
}
)
}
from custom_components.hubitat.lock import HubitatLock
lock = HubitatLock(hub=hub, device=device)
assert isinstance(lock.codes, dict)
def test_encrypted_lock_codes(mocker) -> None:
hub = mocker.MagicMock()
device = mocker.MagicMock()
device.attributes = {
hm.ATTR_LOCK_CODES: hm.Attribute(
{"name": hm.ATTR_LOCK_CODES, "currentValue": "abc1235Qbxyz"}
)
}
from custom_components.hubitat.lock import HubitatLock
lock = HubitatLock(hub=hub, device=device)
# A lock with encrypted codes should return a string for the `codes`
# property
assert isinstance(lock.codes, str)
|
# encoding: utf-8
from tkinter.messagebox import askquestion
import logging_helper
from uiutil.window.dynamic import DynamicRootWindow
from configurationutil import Configuration, cfg_params
from uiutil._metadata import __version__, __authorshort__, __module_name__
from uiutil.resources import templates
from uiutil.frame import dynamic
from uiutil.frame._dynamic_base import LINE_DEFAULT
logging = logging_helper.setup_logging()
# Register Config details (These are expected to be overwritten by an importing app)
cfg_params.APP_NAME = __module_name__
cfg_params.APP_AUTHOR = __authorshort__
cfg_params.APP_VERSION = __version__
# Set the config initialisation parameters
dynamic.add_layout_config(templates.example_ui_layout)
TEST_LAYOUT_CFG = u'example_config'
BUTTONS = u'widget_layouts.device_config_button_layout'
DEFAULT_DEVICE = {
u'ip': u'0.0.0.0',
u'port': 22,
u'active': True,
u'connect': False,
u'remote': False,
u'html': False
}
class ExampleDeviceConfigWindow(DynamicRootWindow):
def __init__(self,
*args,
**kwargs):
super(ExampleDeviceConfigWindow, self).__init__(*args, **kwargs)
# Button Helpers
def update_item(self,
widget_var_name,
widget_var):
logging.debug(u'WIDGET_VAR_NAME: {n}'.format(n=widget_var_name))
logging.debug(u'WIDGET_VAR: {v}'.format(v=widget_var.get()))
widget_name_parts = widget_var_name.split(u'_')
# Split the device name off the widget name
item_name = u'_'.join(widget_name_parts[4:-1])
logging.debug(u'ITEM_NAME: {v}'.format(v=item_name))
key = u'{c}.{i}.{v}'.format(c=TEST_LAYOUT_CFG,
i=item_name,
v=widget_name_parts[2])
logging.debug(u'KEY: {v}'.format(v=key))
self.cfg[key] = widget_var.get()
def add_device_name_trace(self,
widget_var_name,
widget_var):
logging.debug(u'WIDGET_VAR_NAME: {n}'.format(n=widget_var_name))
logging.debug(u'WIDGET_VAR: {v}'.format(v=widget_var.get()))
# Split the device name off the widget name
item_name = widget_var.get()
logging.debug(u'NAME: {v}'.format(v=item_name))
self.dynamic_frame.item_dict_name = item_name
def add_edit_trace(self,
widget_var_name,
widget_var):
logging.debug(u'WIDGET_VAR_NAME: {n}'.format(n=widget_var_name))
logging.debug(u'WIDGET_VAR: {v}'.format(v=widget_var.get()))
widget_name_parts = widget_var_name.split(u'_')
# Split the device name off the widget name
item_name = widget_name_parts[2]
logging.debug(u'ITEM_NAME: {v}'.format(v=item_name))
item_value = widget_var.get()
logging.debug(u'ITEM_VALUE: {v}'.format(v=item_value))
self.dynamic_frame.item_dict[item_name] = item_value
def _add_edit(self,
edit=False):
if edit:
# Set the key for the edit layout
key = u'{c}.root_layouts.edit_device_layout'.format(c=dynamic.LAYOUT_CFG)
# Load item to be edited
selected = self.dynamic_frame.selected.get()
logging.debug(u'SELECTED: {d}'.format(d=selected))
item_key = u'{c}.{i}'.format(c=TEST_LAYOUT_CFG,
i=selected)
self.dynamic_frame.item_dict_name = selected
self.dynamic_frame.item_dict = self.cfg[item_key].copy()
else:
# Set the key for the add layout
key = u'{c}.root_layouts.add_device_layout'.format(c=dynamic.LAYOUT_CFG)
# Load a blank item
self.dynamic_frame.item_dict = DEFAULT_DEVICE
# Change the DynamicFrame layout to use the add/edit layout
self.dynamic_frame.layout = self.cfg[key]
# Call refresh to redraw with new layout
self.dynamic_frame.refresh()
def _return_to_root_layout(self):
self.dynamic_frame.layout = self.cfg[self.key]
self.dynamic_frame.item_dict_name = u''
self.dynamic_frame.item_dict = None
self.dynamic_frame.refresh()
# Button Methods
def add(self):
self._add_edit()
def edit(self):
self._add_edit(edit=True)
def delete(self):
selected = self.dynamic_frame.selected.get()
logging.debug(u'SELECTED: {d}'.format(d=selected))
result = askquestion(u"Delete Device",
u"Are you sure you want to delete: {item}?".format(item=selected),
parent=self._main_frame)
if result == u'yes':
key = u'{c}.{i}'.format(c=TEST_LAYOUT_CFG,
i=selected)
del self.cfg[key]
new_selected = self.cfg[TEST_LAYOUT_CFG].keys()[0]
select_key = u'{c}.{k}'.format(c=TEST_LAYOUT_CFG,
k=new_selected)
default = self.dynamic_frame.default.get()
if default == selected:
self.cfg[u'{c}.default'.format(c=select_key)] = True
self.dynamic_frame.default.set(new_selected)
self.dynamic_frame.selected.set(new_selected)
self.dynamic_frame.refresh()
def set_default(self):
selected = self.dynamic_frame.selected.get()
logging.debug(u'SELECTED: {d}'.format(d=selected))
for item in self.cfg[TEST_LAYOUT_CFG]:
key = u'{c}.{i}.{d}'.format(c=TEST_LAYOUT_CFG,
i=item,
d=LINE_DEFAULT)
self.cfg[key] = True if item == selected else False
# Call refresh to redraw with new layout
self.dynamic_frame.refresh()
def cancel(self):
self._return_to_root_layout()
def save(self):
key = u'{c}.{i}'.format(c=TEST_LAYOUT_CFG,
i=self.dynamic_frame.item_dict_name)
self.cfg[key] = self.dynamic_frame.item_dict
self._return_to_root_layout()
# Register configuration
Configuration().register(config=TEST_LAYOUT_CFG,
config_type=cfg_params.CONST.json,
template=templates.example_config)
cw = ExampleDeviceConfigWindow(layout_key=u'{c}.root_layouts.device_config_layout'.format(c=dynamic.LAYOUT_CFG),
window_title=u'Device Config')
|
class Node:
def __init__(self, element):
self.item = element
self.next_link = None
class StackLL:
def __init__(self):
self.head = None
def is_empty(self):
if self.head is None:
print("Error! The list is empty!")
return True
else:
return False
def get_head(self):
if StackLL.is_empty(self):
return False
else:
print(self.head.item)
return self.head.item
def print_stack(self):
if StackLL.is_empty(self):
return False
else:
node = self.head
while node is not None:
print(node.item)
node = node.next_link
def search_item(self, element):
if not StackLL.is_empty(self):
node = self.head
while node is not None:
if node.item == element:
print("Found")
return True
node = node.next_link
print("Not found")
return False
def push(self, element):
if self.head is None:
self.head = Node(element)
else:
new_node = Node(element)
new_node.next_link = self.head
self.head = new_node
def pop(self):
if StackLL.is_empty(self):
return False
pop_head = self.head
self.head = self.head.next_link
pop_head.next_link = None
print(pop_head.item)
return pop_head
|
import json
from collections import defaultdict
from datetime import datetime
from pathlib import Path
import click
from cumulusci.core.exceptions import FlowNotFoundError
from cumulusci.core.utils import format_duration
from cumulusci.utils import document_flow, flow_ref_title_and_intro
from cumulusci.utils.yaml.safer_loader import load_yaml_data
from .runtime import pass_runtime
from .ui import CliTable
from .utils import group_items
@click.group("flow", help="Commands for finding and running flows for a project")
def flow():
pass
@flow.command(name="doc", help="Exports RST format documentation for all flows")
@click.option(
"--project", "project", is_flag=True, help="Include project-specific flows only"
)
@pass_runtime(require_project=False, require_keychain=True)
def flow_doc(runtime, project=False):
flow_info_path = Path(__file__, "..", "..", "..", "docs", "flows.yml").resolve()
with open(flow_info_path, "r", encoding="utf-8") as f:
flow_info = load_yaml_data(f)
click.echo(flow_ref_title_and_intro(flow_info["intro_blurb"]))
flow_info_groups = list(flow_info["groups"].keys())
universal_flows = runtime.universal_config.list_flows()
if project:
flows = [
flow
for flow in runtime.project_config.list_flows()
if flow not in universal_flows
]
else:
flows = universal_flows
flows_by_group = group_items(flows)
flow_groups = sorted(
flows_by_group.keys(),
key=lambda group: flow_info_groups.index(group)
if group in flow_info_groups
else 100,
)
for group in flow_groups:
click.echo(f"{group}\n{'-' * len(group)}")
if group in flow_info["groups"]:
click.echo(flow_info["groups"][group]["description"])
for flow in sorted(flows_by_group[group]):
flow_name, flow_description = flow
try:
flow_coordinator = runtime.get_flow(flow_name)
except FlowNotFoundError as e:
raise click.UsageError(str(e))
additional_info = None
if flow_name in flow_info.get("flows", {}):
additional_info = flow_info["flows"][flow_name]["rst_text"]
click.echo(
document_flow(
flow_name,
flow_description,
flow_coordinator,
additional_info=additional_info,
)
)
click.echo("")
@flow.command(name="list", help="List available flows for the current context")
@click.option("--plain", is_flag=True, help="Print the table using plain ascii.")
@click.option("--json", "print_json", is_flag=True, help="Print a json string")
@pass_runtime(require_project=False)
def flow_list(runtime, plain, print_json):
plain = plain or runtime.universal_config.cli__plain_output
flows = runtime.get_available_flows()
if print_json:
click.echo(json.dumps(flows))
return None
flow_groups = group_items(flows)
for group, flows in flow_groups.items():
data = [["Flow", "Description"]]
data.extend(sorted(flows))
table = CliTable(
data,
group,
)
table.echo(plain)
click.echo(
"Use "
+ click.style("cci flow info <flow_name>", bold=True)
+ " to get more information about a flow."
)
@flow.command(name="info", help="Displays information for a flow")
@click.argument("flow_name")
@pass_runtime(require_keychain=True)
def flow_info(runtime, flow_name):
try:
coordinator = runtime.get_flow(flow_name)
output = coordinator.get_summary(verbose=True)
click.echo(output)
except FlowNotFoundError as e:
raise click.UsageError(str(e))
@flow.command(name="run", help="Runs a flow")
@click.argument("flow_name")
@click.option(
"--org",
help="Specify the target org. By default, runs against the current default org",
)
@click.option(
"--delete-org",
is_flag=True,
help="If set, deletes the scratch org after the flow completes",
)
@click.option(
"--debug", is_flag=True, help="Drops into pdb, the Python debugger, on an exception"
)
@click.option(
"-o",
nargs=2,
multiple=True,
help="Pass task specific options for the task as '-o taskname__option value'. You can specify more than one option by using -o more than once.",
)
@click.option(
"--no-prompt",
is_flag=True,
help="Disables all prompts. Set for non-interactive mode use such as calling from scripts or CI systems",
)
@pass_runtime(require_keychain=True)
def flow_run(runtime, flow_name, org, delete_org, debug, o, no_prompt):
# Get necessary configs
org, org_config = runtime.get_org(org)
if delete_org and not org_config.scratch:
raise click.UsageError("--delete-org can only be used with a scratch org")
# Parse command line options
options = defaultdict(dict)
if o:
for key, value in o:
if "__" in key:
task_name, option_name = key.split("__")
options[task_name][option_name] = value
else:
raise click.UsageError(
"-o option for flows should contain __ to split task name from option name."
)
# Create the flow and handle initialization exceptions
try:
coordinator = runtime.get_flow(flow_name, options=options)
start_time = datetime.now()
coordinator.run(org_config)
duration = datetime.now() - start_time
click.echo(f"Ran {flow_name} in {format_duration(duration)}")
except Exception:
runtime.alert(f"Flow error: {flow_name}")
raise
finally:
# Delete the scratch org if --delete-org was set
if delete_org:
try:
org_config.delete_org()
except Exception as e:
click.echo(
"Scratch org deletion failed. Ignoring the error below to complete the flow:"
)
click.echo(str(e))
runtime.alert(f"Flow Complete: {flow_name}")
|
import unittest
import unittest.mock
import edifice._component as component
import edifice.engine as engine
import edifice.base_components as base_components
from edifice.qt import QT_VERSION
if QT_VERSION == "PyQt5":
from PyQt5 import QtWidgets
else:
from PySide2 import QtWidgets
if QtWidgets.QApplication.instance() is None:
app = QtWidgets.QApplication(["-platform", "offscreen"])
class MockComponent(component.Component):
@component.register_props
def __init__(self, recursion_level):
super().__init__()
self.will_unmount = unittest.mock.MagicMock()
self.did_mount = unittest.mock.MagicMock()
self.did_render = unittest.mock.MagicMock()
def render(self):
if self.props.recursion_level == 1:
return base_components.Label("Test")
else:
return base_components.View()(
MockComponent(self.props.recursion_level + 1)
)
class ComponentLifeCycleTestCase(unittest.TestCase):
def test_mount_and_dismount(self):
component = MockComponent(0)
app = engine.RenderEngine(component)
render_results = app._request_rerender([component])
render_results.run()
component.did_mount.assert_called_once()
component.did_render.assert_called_once()
class OtherMockComponent(component.Component):
@component.register_props
def __init__(self):
super().__init__()
class MockController(object):
_request_rerender = unittest.mock.MagicMock()
self._controller = MockController()
class MockBrokenComponent(component.Component):
@component.register_props
def __init__(self):
super().__init__()
class MockController(object):
def _request_rerender(*args, **kwargs):
raise ValueError("I am broken")
self._controller = MockController()
class StorageManagerTestCase(unittest.TestCase):
def test_record(self):
class A(object):
value = 0
obj = A()
with engine._storage_manager() as storage_manager:
storage_manager.set(obj, "value", 1)
self.assertEqual(obj.value, 1)
self.assertEqual(obj.value, 1)
def test_record(self):
class A(object):
value = 0
obj = A()
try:
with engine._storage_manager() as storage_manager:
storage_manager.set(obj, "value", 1)
self.assertEqual(obj.value, 1)
raise ValueError
except ValueError:
pass
self.assertEqual(obj.value, 0)
class ComponentTestCase(unittest.TestCase):
def test_render_changes(self):
a = OtherMockComponent()
a.foo = 1
a.bar = 2
with a.render_changes():
a.foo = 3
self.assertEqual(a.foo, 3)
a.bar = 0
self.assertEqual(a.foo, 3)
self.assertEqual(a.bar, 0)
a._controller._request_rerender.assert_called_once()
a._controller._request_rerender.reset_mock()
try:
with a.render_changes():
a.bar = 1
self.assertEqual(a.bar, 1)
a.foo = 1 / 0
except ZeroDivisionError:
pass
self.assertEqual(a.foo, 3)
self.assertEqual(a.bar, 0)
a._controller._request_rerender.assert_not_called()
def test_state_change_unwind(self):
a = MockBrokenComponent()
a.foo = 1
a.bar = 2
exception_thrown = False
try:
with a.render_changes():
a.foo = 3
self.assertEqual(a.foo, 3)
a.bar = 0
except ValueError as e:
if str(e) == "I am broken":
exception_thrown = True
self.assertTrue(exception_thrown)
self.assertEqual(a.foo, 1)
self.assertEqual(a.bar, 2)
exception_thrown = False
try:
a.set_state(foo=3, bar=0)
except ValueError as e:
if str(e) == "I am broken":
exception_thrown = True
self.assertTrue(exception_thrown)
self.assertEqual(a.foo, 1)
self.assertEqual(a.bar, 2)
class MakeComponentTestCase(unittest.TestCase):
def test_make_component(self):
@component.make_component
def Component1234(self, prop1, prop2, children):
return 1234
self.assertEqual(Component1234.__name__, "Component1234")
comp = Component1234(1, 2)
self.assertEqual(comp.__class__, Component1234)
self.assertEqual(comp.props._d, {"prop1": 1, "prop2": 2, "children": []})
self.assertEqual(comp.render(), 1234)
|
# Generated by Django 3.0.6 on 2021-02-20 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('security', '0012_auto_20210202_1002'),
]
operations = [
migrations.CreateModel(
name='CveList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cve_id', models.CharField(max_length=25)),
('affected_packages', models.TextField()),
('status', models.SmallIntegerField(choices=[('NOT_FIXED', 0), ('FIXED', 1), ('IGNORED', 2), ('DEFERRED', 3)], default=0)),
('next_check_date', models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name='VulsScanResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('server_name', models.CharField(max_length=255, unique=True)),
('family', models.CharField(max_length=255, null=True)),
('release', models.CharField(max_length=255, null=True)),
('scan_date', models.DateTimeField()),
('cve_list', models.ManyToManyField(to='security.CveList')),
],
),
]
|
# Django
from django.contrib import admin
# Register your models here.
from .models import NewCommunity, JoinUser
admin.site.register(NewCommunity)
admin.site.register(JoinUser)
|
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
from threading import RLock
from collections import deque
from sawtooth_validator.journal.block_cache import BlockCache
from sawtooth_validator.journal.block_wrapper import BlockWrapper
from sawtooth_validator.journal.block_wrapper import NULL_BLOCK_IDENTIFIER
from sawtooth_validator.journal.timed_cache import TimedCache
from sawtooth_validator.protobuf.batch_pb2 import Batch
from sawtooth_validator.protobuf.block_pb2 import Block
from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader
from sawtooth_validator.protobuf.client_pb2 import ClientBatchSubmitRequest
from sawtooth_validator.protobuf import network_pb2
from sawtooth_validator.networking.dispatch import Handler
from sawtooth_validator.networking.dispatch import HandlerResult
from sawtooth_validator.networking.dispatch import HandlerStatus
LOGGER = logging.getLogger(__name__)
class Completer(object):
"""
The Completer is responsible for making sure blocks are formally
complete before they are delivered to the chain controller. A formally
complete block is a block whose predecessor is in the block cache and all
the batches are present in the batch list and in the order specified by the
block header. If the predecessor or a batch is missing, a request message
is sent sent out over the gossip network. It also checks that all batches
have their dependencies satisifed, otherwise it will request the batch that
has the missing transaction.
"""
def __init__(self,
block_store,
gossip,
cache_keep_time=300,
cache_purge_frequency=30,
requested_keep_time=1200):
"""
:param block_store (dictionary) The block store shared with the journal
:param gossip (gossip.Gossip) Broadcasts block and batch request to
peers
:param cache_keep_time (float) Time in seconds to keep values in
TimedCaches.
:param cache_purge_frequency (float) Time between purging the
TimedCaches.
"""
self.gossip = gossip
self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
self.block_cache = BlockCache(block_store,
cache_keep_time,
cache_purge_frequency)
self._block_store = block_store
# avoid throwing away the genesis block
self.block_cache[NULL_BLOCK_IDENTIFIER] = None
self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
self._incomplete_batches = TimedCache(cache_keep_time,
cache_purge_frequency)
self._incomplete_blocks = TimedCache(cache_keep_time,
cache_purge_frequency)
self._requested = TimedCache(requested_keep_time,
cache_purge_frequency)
self._on_block_received = None
self._on_batch_received = None
self.lock = RLock()
def _complete_block(self, block):
""" Check the block to see if it is complete and if it can be passed to
the journal. If the block's predecessor is not in the block_cache
the predecessor is requested and the current block is added to the
the incomplete_block cache. If the block.batches and
block.header.batch_ids are not the same length, the batch_id list
is checked against the batch_cache to see if the batch_list can be
built. If any batches are missing from the block and we do not have
the batches in the batch_cache, they are requested. The block is
then added to the incomplete_block cache. If we can complete the
block, a new batch list is created in the correct order and added
to the block. The block is now considered complete and is returned.
If block.batches and block.header.batch_ids are the same length,
the block's batch list needs to be in the same order as the
block.header.batch_ids list. If the block has all of its expected
batches but are not in the correct order, the batch list is rebuilt
and added to the block. Once a block has the correct batch list it
is added to the block_cache and is returned.
"""
if block.header_signature in self.block_cache:
LOGGER.debug("Drop duplicate block: %s", block)
return None
if block.previous_block_id not in self.block_cache:
if block.previous_block_id not in self._incomplete_blocks:
self._incomplete_blocks[block.previous_block_id] = [block]
elif block not in self._incomplete_blocks[block.previous_block_id]:
self._incomplete_blocks[block.previous_block_id] += [block]
# We have already requested the block, do not do so again
if block.previous_block_id in self._requested:
return None
LOGGER.debug("Request missing predecessor: %s",
block.previous_block_id)
self._requested[block.previous_block_id] = None
self.gossip.broadcast_block_request(block.previous_block_id)
return None
# Check for same number of batch_ids and batches
# If different starting building batch list, Otherwise there is a batch
# that does not belong, block should be dropped.
if len(block.batches) > len(block.header.batch_ids):
LOGGER.debug("Block has extra batches. Dropping %s", block)
return None
# used to supplement batch_cache, contains batches already in block
temp_batches = {}
for batch in block.batches:
temp_batches[batch.header_signature] = batch
# The block is missing batches. Check to see if we can complete it.
if len(block.batches) != len(block.header.batch_ids):
building = True
for batch_id in block.header.batch_ids:
if batch_id not in self.batch_cache and \
batch_id not in temp_batches:
# Request all missing batches
if batch_id not in self._incomplete_blocks:
self._incomplete_blocks[batch_id] = [block]
elif block not in self._incomplete_blocks[batch_id]:
self._incomplete_blocks[batch_id] += [block]
# We have already requested the batch, do not do so again
if batch_id in self._requested:
return None
self._requested[batch_id] = None
self.gossip.broadcast_batch_by_batch_id_request(batch_id)
building = False
if not building:
# The block cannot be completed.
return None
batches = self._finalize_batch_list(block, temp_batches)
del block.batches[:]
# reset batches with full list batches
block.batches.extend(batches)
if block.header_signature in self._requested:
del self._requested[block.header_signature]
return block
else:
batch_id_list = [x.header_signature for x in block.batches]
# Check to see if batchs are in the correct order.
if batch_id_list == list(block.header.batch_ids):
if block.header_signature in self._requested:
del self._requested[block.header_signature]
return block
# Check to see if the block has all batch_ids and they can be put
# in the correct order
elif sorted(batch_id_list) == sorted(list(block.header.batch_ids)):
batches = self._finalize_batch_list(block, temp_batches)
# Clear batches from block
del block.batches[:]
# reset batches with full list batches
if batches is not None:
block.batches.extend(batches)
else:
return None
if block.header_signature in self._requested:
del self._requested[block.header_signature]
return block
else:
LOGGER.debug("Block.header.batch_ids does not match set of "
"batches in block.batches Dropping %s", block)
return None
def _finalize_batch_list(self, block, temp_batches):
batches = []
for batch_id in block.header.batch_ids:
if batch_id in self.batch_cache:
batches.append(self.batch_cache[batch_id])
elif batch_id in temp_batches:
batches.append(temp_batches[batch_id])
else:
return None
return batches
def _complete_batch(self, batch):
valid = True
dependencies = []
for txn in batch.transactions:
txn_header = TransactionHeader()
txn_header.ParseFromString(txn.header)
for dependency in txn_header.dependencies:
# Check to see if the dependency has been seen or is in the
# current chain (block_store)
if dependency not in self._seen_txns and not \
self.block_cache.block_store.has_transaction(
dependency):
LOGGER.debug("Transaction %s in batch %s has "
"unsatisfied dependency: %s",
txn.header_signature,
batch.header_signature,
dependency)
# Check to see if the dependency has already been requested
if dependency not in self._requested:
dependencies.append(dependency)
self._requested[dependency] = None
if dependency not in self._incomplete_batches:
self._incomplete_batches[dependency] = [batch]
elif batch not in self._incomplete_batches[dependency]:
self._incomplete_batches[dependency] += [batch]
valid = False
if not valid:
self.gossip.broadcast_batch_by_transaction_id_request(
dependencies)
return valid
def _add_seen_txns(self, batch):
for txn in batch.transactions:
self._seen_txns[txn.header_signature] = batch.header_signature
def _process_incomplete_batches(self, key):
# Keys are transaction_id
if key in self._incomplete_batches:
batches = self._incomplete_batches[key]
for batch in batches:
self.add_batch(batch)
del self._incomplete_batches[key]
def _process_incomplete_blocks(self, key):
# Keys are either a block_id or batch_id
if key in self._incomplete_blocks:
to_complete = deque()
to_complete.append(key)
while to_complete:
my_key = to_complete.popleft()
if my_key in self._incomplete_blocks:
inc_blocks = self._incomplete_blocks[my_key]
for inc_block in inc_blocks:
if self._complete_block(inc_block):
self.block_cache[inc_block.header_signature] = \
inc_block
self._on_block_received(inc_block)
to_complete.append(inc_block.header_signature)
del self._incomplete_blocks[my_key]
def set_on_block_received(self, on_block_received_func):
self._on_block_received = on_block_received_func
def set_on_batch_received(self, on_batch_received_func):
self._on_batch_received = on_batch_received_func
def add_block(self, block):
with self.lock:
blkw = BlockWrapper(block)
block = self._complete_block(blkw)
if block is not None:
self.block_cache[block.header_signature] = blkw
self._on_block_received(blkw)
self._process_incomplete_blocks(block.header_signature)
def add_batch(self, batch):
with self.lock:
if batch.header_signature in self.batch_cache:
return
if self._complete_batch(batch):
self.batch_cache[batch.header_signature] = batch
self._add_seen_txns(batch)
self._on_batch_received(batch)
self._process_incomplete_blocks(batch.header_signature)
if batch.header_signature in self._requested:
del self._requested[batch.header_signature]
# If there was a batch waiting on this transaction, process
# that batch
for txn in batch.transactions:
if txn.header_signature in self._incomplete_batches:
if txn.header_signature in self._requested:
del self._requested[txn.header_signature]
self._process_incomplete_batches(txn.header_signature)
def get_chain_head(self):
"""Returns the block which is the current head of the chain.
Returns:
BlockWrapper: The head of the chain.
"""
with self.lock:
return self._block_store.chain_head
def get_block(self, block_id):
with self.lock:
if block_id in self.block_cache:
return self.block_cache[block_id]
return None
def get_batch(self, batch_id):
with self.lock:
if batch_id in self.batch_cache:
return self.batch_cache[batch_id]
else:
block_store = self.block_cache.block_store
try:
return block_store.get_batch(batch_id)
except ValueError:
return None
def get_batch_by_transaction(self, transaction_id):
with self.lock:
if transaction_id in self._seen_txns:
batch_id = self._seen_txns[transaction_id]
return self.get_batch(batch_id)
else:
block_store = self.block_cache.block_store
try:
return block_store.get_batch_by_transaction(transaction_id)
except ValueError:
return None
class CompleterBatchListBroadcastHandler(Handler):
def __init__(self, completer, gossip):
self._completer = completer
self._gossip = gossip
def handle(self, connection_id, message_content):
request = ClientBatchSubmitRequest()
request.ParseFromString(message_content)
for batch in request.batches:
if batch.trace:
LOGGER.debug("TRACE %s: %s", batch.header_signature,
self.__class__.__name__)
self._completer.add_batch(batch)
self._gossip.broadcast_batch(batch)
return HandlerResult(status=HandlerStatus.PASS)
class CompleterGossipHandler(Handler):
def __init__(self, completer):
self._completer = completer
def handle(self, connection_id, message_content):
gossip_message = network_pb2.GossipMessage()
gossip_message.ParseFromString(message_content)
if gossip_message.content_type == "BLOCK":
block = Block()
block.ParseFromString(gossip_message.content)
self._completer.add_block(block)
elif gossip_message.content_type == "BATCH":
batch = Batch()
batch.ParseFromString(gossip_message.content)
self._completer.add_batch(batch)
return HandlerResult(
status=HandlerStatus.PASS)
class CompleterGossipBlockResponseHandler(Handler):
def __init__(self, completer):
self._completer = completer
def handle(self, connection_id, message_content):
block_response_message = network_pb2.GossipBlockResponse()
block_response_message.ParseFromString(message_content)
block = Block()
block.ParseFromString(block_response_message.content)
self._completer.add_block(block)
return HandlerResult(status=HandlerStatus.PASS)
class CompleterGossipBatchResponseHandler(Handler):
def __init__(self, completer):
self._completer = completer
def handle(self, connection_id, message_content):
batch_response_message = network_pb2.GossipBatchResponse()
batch_response_message.ParseFromString(message_content)
batch = Batch()
batch.ParseFromString(batch_response_message.content)
self._completer.add_batch(batch)
return HandlerResult(status=HandlerStatus.PASS)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import
## batteries
import os
import sys
import shutil
import tempfile
import pytest
## 3rd party
import pandas as pd
## package
from pyTecanFluent import Fluent
from pyTecanFluent import Map2Robot
from pyTecanFluent import Utils
# data dir
test_dir = os.path.join(os.path.dirname(__file__))
data_dir = os.path.join(test_dir, 'data')
# tests
def test_help(script_runner):
ret = script_runner.run('pyTecanFluent', 'map2robot', '-h')
assert ret.success
def test_basic(script_runner, tmp_path):
output_prefix = os.path.join(str(tmp_path), 'basic')
map_file = os.path.join(data_dir, 'basic_96well.txt')
ret = script_runner.run('pyTecanFluent', 'map2robot', '--prefix',
output_prefix, map_file)
assert ret.success
def test_single_barcode(script_runner, tmp_path):
output_prefix = os.path.join(str(tmp_path), 'single-barcode')
map_file = os.path.join(data_dir, 'mapping_file_fecal_stability.txt')
ret = script_runner.run('pyTecanFluent', 'map2robot', '--prefix',
output_prefix, map_file)
assert ret.success
|
import unittest
import torch
from leanai.core.indexed_tensor_helpers import *
class TestIndexedTensorHelpers(unittest.TestCase):
def setUp(self) -> None:
pass
def test_map_per_batch(self):
values = torch.tensor([1, 2, 3, 4, 5])
indices = torch.tensor([0, 0, 1, 1, 2])
def fun(start, stop, batch):
return batch + start
result = map_per_batch(fun, values, indices)
self.assertListEqual(list(result), [1,2,5,6,9])
def test_sliced_per_batch(self):
values = torch.tensor([1, 2, 3, 4, 5])
indices = torch.tensor([0, 0, 1, 1, 2])
target = [
(0, 2, [1, 2]),
(2, 4, [3, 4]),
(4, 5, [5])
]
result = [(a, b, list(c)) for a, b, c in sliced_per_batch(values, indices)]
self.assertListEqual(result, target)
def test_sliced_per_batch_np(self):
values = np.array([1, 2, 3, 4, 5])
indices = np.array([0, 0, 1, 1, 2])
target = [
(0, 2, [1, 2]),
(2, 4, [3, 4]),
(4, 5, [5])
]
result = [(a, b, list(c)) for a, b, c in sliced_per_batch_np(values, indices)]
self.assertListEqual(result, target)
if __name__ == "__main__":
unittest.main()
|
from abc import ABC, abstractmethod
import os.path
from typing import Iterable, List, NamedTuple, Optional, Tuple
import numpy as np
import tensorflow as tf
from tensorflow.data import Dataset
from clfw.util import save_results
Array = np.ndarray
class Task(NamedTuple):
train: Dataset
valid: Optional[Dataset]
test: Dataset
labels: Iterable[int]
def to_one_hot(task: Task, nlabels: int) -> Task:
train, valid, test = (task.train, task.valid, task.test)
def one_hot(feature: tf.Tensor, label: tf.Tensor):
return feature, tf.one_hot(label, nlabels)
train = train.map(one_hot)
return Task(train=train, valid=valid, test=test, labels=task.labels)
def evaluate(model: "Model", evaluation_sets: List[Dataset]) -> List[float]:
return [model.evaluate(eval_set) for eval_set in evaluation_sets]
class Model(ABC):
""" Base class for a model for continual learning """
@abstractmethod
def train(self,
training_set: Dataset,
validation_set: Dataset,
labels: Iterable[int]) -> None:
pass
@abstractmethod
def evaluate(self, test_set: Dataset) -> float:
""" Evaluate the model on a test set.
:return: Accuracy. Between 0 and 1.
"""
pass
@abstractmethod
def reset(self) -> None:
""" Reset the model to its initial state. """
pass
class TaskSequence:
""" Sequence of tasks to test a continual learning algorithm.
Attributes:
feature_dim: the dimension of a feature
labels_per_task: list of labels each task's training set contains
nlabels: number of total possible labels
ntasks: total number of tasks
training_sets: list of training sets
test_sets: list of test sets
"""
def __init__(self, nlabels: int, one_hot: bool = True,
tasks: Optional[Iterable[Task]] = None) -> None:
self.labels_per_task: List[Iterable[int]] = []
self.one_hot = one_hot
self.nlabels = nlabels
self.training_sets: List[Dataset] = []
self.validation_sets: List[Dataset] = []
self.test_sets: List[Dataset] = []
self.ntasks = 0
self.one_hot = one_hot
if tasks is not None:
for task in tasks:
self.append(task)
@property
def feature_dim(self) -> List[int]:
if not self.training_sets:
raise ValueError("There are no tasks yet.")
sample, _ = next(iter(self.training_sets[0]))
return [s.value for s in sample.shape]
def append(self, task: Task) -> None:
""" Append a training set test set pair to the sequence. """
self.ntasks += 1
if self.one_hot:
task = to_one_hot(task, self.nlabels)
self.training_sets.append(task.train)
self.validation_sets.append(task.valid)
self.test_sets.append(task.test)
self.labels_per_task.append(task.labels)
def _evaluate(self, model: Model, test: bool = False,
logdir: Optional[str] = None) -> Tuple[Array, Array]:
""" Evaluate the model using the given sequence of tasks.
Returns:
1. average_accuracy measured after learning each task
2. accuracy per task measured after learning each task
Assume there are N tasks.
1 is a length N + 1 vector whose i-th element is the average accuracy on the whole
test set after training up to task i - 1.
2 is a (N + 1) x N matrix whose (i, j)-th element is the accuracy on test set
of task j after training up to task i - 1.
"""
if test:
evaluation_sets = self.test_sets
else:
evaluation_sets = self.validation_sets
# initialize to -1
accuracy_matrix = np.zeros((self.ntasks + 1, self.ntasks)) - 1
average_accuracy = np.zeros((self.ntasks,)) - 1
accuracy_matrix[0, :] = evaluate(model, evaluation_sets)
parameters = zip(
self.training_sets, self.validation_sets, self.labels_per_task)
eval_sets_until_now = None
for train_idx, (params, eval_set) in enumerate(zip(parameters,
evaluation_sets)):
model.train(*params)
if train_idx == 0:
eval_sets_until_now = eval_set
else:
eval_sets_until_now = eval_sets_until_now.concatenate(eval_set)
accuracy_matrix[train_idx + 1, :] = evaluate(model, evaluation_sets)
average_accuracy[train_idx] = model.evaluate(eval_sets_until_now)
save_results(logdir=logdir, test=test,
average_accuracy=average_accuracy,
accuracy_matrix=accuracy_matrix)
return average_accuracy, accuracy_matrix
def validate(self, model: Model,
logdir: Optional[str] = None) -> Tuple[Array, Array]:
""" Evaluate the model using validation set. """
return self._evaluate(model=model, test=False, logdir=logdir)
def test(self, model: Model,
logdir: Optional[str] = None) -> Tuple[Array, Array]:
""" Evaluate the model using test set. """
return self._evaluate(model=model, test=True, logdir=logdir)
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ErrorBatch'
db.create_table('djangodblog_errorbatch', (
('status', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_column='is_resolved')),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('server_name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(default=40, db_index=True, blank=True)),
('class_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True)),
('checksum', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('times_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('logger', self.gf('django.db.models.fields.CharField')(default='root', max_length=64, db_index=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('djangodblog', ['ErrorBatch'])
# Adding unique constraint on 'ErrorBatch', fields ['logger', 'server_name', 'checksum']
db.create_unique('djangodblog_errorbatch', ['logger', 'server_name', 'checksum'])
# Adding model 'Error'
db.create_table('djangodblog_error', (
('server_name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(default=40, db_index=True, blank=True)),
('class_name', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('logger', self.gf('django.db.models.fields.CharField')(default='root', max_length=64, db_index=True, blank=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('djangodblog', ['Error'])
def backwards(self, orm):
# Deleting model 'ErrorBatch'
db.delete_table('djangodblog_errorbatch')
# Removing unique constraint on 'ErrorBatch', fields ['logger', 'server_name', 'checksum']
db.delete_unique('djangodblog_errorbatch', ['logger', 'server_name', 'checksum'])
# Deleting model 'Error'
db.delete_table('djangodblog_error')
models = {
'djangodblog.error': {
'Meta': {'object_name': 'Error'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'djangodblog.errorbatch': {
'Meta': {'unique_together': "(('logger', 'server_name', 'checksum'),)", 'object_name': 'ErrorBatch'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_column': "'is_resolved'"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['djangodblog']
|
import sys
class Queue(object):
"""Abstract class for creating backend-specific queue implementations.
Your implementation should override all private methods and alter any
class attributes (e.g. FIFO) that do not apply to your backend."""
FIFO = True
SUPPORTS_DELAY = True
RECLAIMS_TO_BACK_OF_QUEUE = True
MAX_PUSH_BATCH_SIZE = sys.maxint
MAX_POP_BATCH_SIZE = sys.maxint
MAX_COMPLETE_BATCH_SIZE = sys.maxint
def __init__(self, *args, **kwargs):
raise NotImplementedError()
def _push(self, item):
raise NotImplementedError()
def _push_batch(self, items):
raise NotImplementedError()
def _pop(self):
raise NotImplementedError()
def _pop_batch(self, batch_size):
raise NotImplementedError()
def _touch(self, envelope, seconds):
"""Increase the visibility timeout on an
in-flight envelope by seconds."""
raise NotImplementedError()
def _complete(self, envelope):
raise NotImplementedError()
def _complete_batch(self, envelopes):
"""Returns a list of (envelope, success) where success
is a Boolean indicating whether the envelope was
completed successfully."""
raise NotImplementedError()
def _flush(self):
raise NotImplementedError()
def _stats(self):
"""Should return a dictionary with as many of the following
stat keys as the queue can report on:
- available
- in_flight
- delayed
"""
raise NotImplementedError()
def push(self, item):
return self._push(item)
def push_batch(self, items):
if len(items) > self.MAX_PUSH_BATCH_SIZE:
raise ValueError("Batch size cannot exceed {}.".format(self.MAX_PUSH_BATCH_SIZE))
return self._push_batch(items)
def pop(self):
return self._pop()
def pop_batch(self, batch_size):
if batch_size > self.MAX_POP_BATCH_SIZE:
raise ValueError("Batch size cannot exceed {}.".format(self.MAX_POP_BATCH_SIZE))
return self._pop_batch(batch_size)
def touch(self, envelope, seconds=10):
return self._touch(envelope, seconds)
def complete(self, envelope):
return self._complete(envelope)
def complete_batch(self, envelopes):
if len(envelopes) > self.MAX_COMPLETE_BATCH_SIZE:
raise ValueError("Batch size cannot exceed {}.".format(self.MAX_COMPLETE_BATCH_SIZE))
return self._complete_batch(envelopes)
def flush(self):
return self._flush()
def stats(self):
return self._stats()
|
from strava_client import StravaClient
strava_client = StravaClient()
strava_client.sync(direction="forward")
|
import matplotlib.pyplot as plt
import numpy as np
#import dnpdata
from .dnpData import dnpdata
figure = plt.figure
legend = plt.legend
xlim = plt.xlim
ylim = plt.ylim
gca = plt.gca
dark_green = '#46812B'
light_green = '#67AE3E'
dark_grey = '#4D4D4F'
light_grey = '#A7A9AC'
orange = '#F37021'
plt.rcParams['lines.linewidth'] = 1.5
plt.rcParams['axes.prop_cycle'] = plt.cycler(color = [orange, dark_green, light_green, dark_grey, light_grey])
def imshow(data, *args, **kwargs):
'''Image Plot for dnpdata object
Args:
data (dnpdata): dnpdata object for image plot
args: args for matplotlib imshow function
kwargs: kwargs for matplotlib imshow function
Example::
# Plotting a dnpdata object
dnp.dnpResults.plt.figure()
dnp.dnpResults.imshow(data)
dnp.dnpResults.plt.show()
# Plotting a workspace (dnpdata_collection)
dnp.dnpResults.plt.figure()
dnp.dnpResults.imshow(ws['proc'])
dnp.dnpResults.plt.show()
'''
dims = data.dims
x_coord = data.coords[dims[1]]
y_coord = data.coords[dims[0]]
x_min = np.min(x_coord)
x_max = np.max(x_coord)
y_min = np.min(y_coord)
y_max = np.max(y_coord)
plt.imshow(data.values, aspect = 'auto', extent = [x_min, x_max, y_max, y_min])
plt.xlabel(dims[1])
plt.ylabel(dims[0])
def plot(data, *args, **kwargs):
'''Plot function for dnpdata object
Args:
data (dnpdata): dnpdata object for matplotlib plot function
args: args for matplotlib plot function
kwargs: kwargs for matplotlib plot function
Example::
# Plotting a dnpdata object
dnp.dnpResults.plt.figure()
dnp.dnpResults.plot(data)
dnp.dnpResults.plt.show()
# Plotting a workspace (dnpdata_collection)
dnp.dnpResults.plt.figure()
dnp.dnpResults.plot(ws['proc'])
dnp.dnpResults.plt.show()
# Plotting two curves on the same figure
dnp.dnpResults.plt.figure()
dnp.dnpResults.plot(ws['proc1'])
dnp.dnpResults.plot(ws['proc2'])
dnp.dnpResults.plt.show()
# Plotting with some custom parameters
dnp.dnpResults.plt.figure()
dnp.dnpResults.plot(ws['proc'], 'k-', linewidth = 3.0, alpha = 0.5)
dnp.dnpResults.plt.show()
'''
coord = data.coords[0]
dim = data.dims[0]
plt.plot(coord, data.values, *args, **kwargs)
show = plt.show
if __name__ == '__main__':
pass
|
import numpy as np
import ctypes
import click
import logging
import os
import sys
sys.path.append('src/')
from frequentDirections import FrequentDirections
from randomProjections import RandomProjections
from randomSums import RandomSums
from rowSampler import RowSampler
from svdEmbedding import SVDEmbedding
from log_transforms import log_ppr, log_ppr_maxone, log_ppr_plusone
from scipy.io import loadmat
from tqdm import tqdm
try:
from telepyth import TelepythClient
except ImportError:
from utils import LazyClass as TelepythClient
TPC = TelepythClient()
DATASETS = ['POS', 'blogcatalog', 'Homo_sapiens', 'flickr', 'academic_coa_2014', 'academic_confs', 'vk2016']
SKETCHERS = {'fd': FrequentDirections, 'rp': RandomProjections, 's': RowSampler, 'h': RandomSums, 'svd': SVDEmbedding}
LOG_TRANSFORMS = {'log': log_ppr, 'add': log_ppr_plusone, 'max': log_ppr_maxone}
@click.command()
@click.argument('method', type=click.Choice(SKETCHERS.keys()))
@click.argument('dataset', type=click.Choice(DATASETS))
@click.argument('log', type=click.Choice(LOG_TRANSFORMS))
@click.argument('d', type=int)
@click.argument('datapath', type=click.Path(exists=True))
@click.option('--random_seed', type=int, default=-1, help='Random seed. If -1, use random one.')
@click.option('--random_order', type=bool, default=True, help='If True, shuffle rows before feeding them to a sketch (default: True).')
@click.option('--rotate', type=bool, default=True, help='Rotate (default: True).')
@click.option('--take_root', type=bool, default=True, help='Take root (default: True).')
@click.option('--left_vectors', type=bool, default=False, help='Which vectors to take if sketcher is SVD (default: True).')
@click.option('--algo', type=str, default='full', help='Which algo to use for SVD (default: full).')
@click.option('--ctrl_threads', type=bool, default=True, help='Whether or not to control the number of used CPU cores (default: True).')
@click.option('--ncores', type=int, default=1, help='The number of CPU cores to use (default: 1).')
def main(method, dataset, log, d, datapath, random_seed, random_order, rotate, take_root, left_vectors, algo, ctrl_threads, ncores):
TPC.send_text(f'Calculating embeddings for {dataset} with method {method}')
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
root.info(f'Calculating embeddings for {dataset} with method {method} rotate_{rotate} takeroot_{take_root}')
root.info(f'd: {d}, random seed: {random_seed}')
if method == 'svd':
root.info(f'using left vectors: {left_vectors}, algorithm: {algo}')
filename = f'/d_{d}_log_{log}_seed_{random_seed}_rotate_{rotate}_takeroot_{take_root}'
if method == 'svd':
filename = filename + f'_leftvectors_{left_vectors}_algo_{algo}'
directory = f'results/{method}/{dataset}'
if os.path.exists(directory+filename):
root.info(f'File exists! Aborting')
return
# random seed
if random_seed != -1:
np.random.seed(random_seed)
# control the number of cores to be used
if ctrl_threads:
mkl_rt = ctypes.CDLL('libmkl_rt.so')
mkl_get_max_threads = mkl_rt.mkl_get_max_threads
def mkl_set_num_threads(cores):
mkl_rt.mkl_set_num_threads(ctypes.byref(ctypes.c_int(cores)))
mkl_set_num_threads(ncores)
root.info(f'CPUs used: {mkl_get_max_threads()}')
# load dataset matfile
matf = loadmat(datapath + f'/{dataset}.mat')
G = matf['network']
n = G.shape[0]
if d > n:
root.error('d is too high for the given dataset (n={n}).')
raise Exception('d is too high for the given dataset (n={n}).')
# get precomputed PPR for the dataset
PPR = np.fromfile(datapath + f'/ppr/{dataset}.bin', dtype=np.float32).reshape(n, n) # n,n catches errors when the matrix is of unexpected size
log_transformer = LOG_TRANSFORMS[log]
log_transformer(PPR, n)
# reorder rows
ordering = np.arange(n)
if random_order:
np.random.shuffle(ordering)
# compute a sketch
if method == 'svd':
sketcher = SKETCHERS[method](n, d, left_vectors, algo)
else:
sketcher = SKETCHERS[method](n, d)
if method == 'svd':
sketcher.compute(PPR)
else:
for i in tqdm(range(n)):
sketcher.append(PPR[ordering[i], :])
# get embeddings
if method == 'fd':
embs = sketcher.get(rotate=rotate, take_root=take_root)
elif method == 'svd':
embs = sketcher.get(take_root=take_root)
else:
embs = sketcher.get()
if take_root and rotate:
[_, s, Vt] = np.linalg.svd(embs, full_matrices=False)
embs = np.diag(np.sqrt(s)) @ Vt
# save embeddings
if not os.path.exists(directory):
os.makedirs(directory)
root.info(embs.shape)
embs.T.tofile(directory + filename) # return n x d matrix
logging.info('Embeddings saved.')
TPC.send_text('Embeddings calculated and saved.')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import gdb
class SkiplistPrintCommand(gdb.Command):
"""Iterate and print a list.
skip <EXPR> [MAX]
Given a list EXPR, iterate though the list nodes' ->next pointers, printing
each node iterated. We will iterate thorugh MAX list nodes, to prevent
infinite loops with corrupt lists. If MAX is zero, we will iterate the
entire list.
List nodes types are expected to have a member named "next". List types
may be the same as node types, or a separate type with an explicit
head node, called "head"."""
MAX_ITER = 10
def __init__(self):
super(SkiplistPrintCommand, self).__init__("skiplist-print", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def invoke(self, _args, from_tty):
args = gdb.string_to_argv(_args)
start_node = args[0]
if len(args) > 1:
max_iter = int(args[1])
else:
max_iter = self.MAX_ITER
if len(args) > 2:
lvl = int(args[2])
else:
lvl = 0
p_node_t = gdb.lookup_type('node_t').pointer()
long_t = gdb.lookup_type('long')
node = gdb.parse_and_eval(start_node)
print node
for i in xrange(max_iter):
nexts = node['next']
nxt = gdb.Value(nexts[lvl]).cast(long_t)
nxt = nxt & ~1
node = gdb.Value(nxt).cast(p_node_t).dereference()
nexts = node['next']
print node['k'], node['level'], node['inserting'],
k = 0
while k < node['level']:
print(nexts[k]),
k+=1
print("")
SkiplistPrintCommand()
|
from lib.die import Die
import pygal
''' Vamos cria um histograma que é um gráfico de barras quem mostra a frequência da ocorrência
de determinados resultados. '''
# Cria um D6 e um D10.
die_1 = Die(8) # Cria uma instância de Die passando como argumento oito lados.
die_2 = Die(8) # Cria uma instância de Die passando como argumento oito lados.
# Faz alguns lançamentos e armazena os resultados em uma lista.
results = []
for roll_num in range(100000): # Lançando o dado mil vezes e armazenamos o resultado de cada lançamento em uma lista.
result = die_1.roll() + die_2.roll() # Soma dos dois dados em cada lançamento.
results.append(result)
# Analisa os resultados.
frequencies = [] # Armazena o número de vezes que cada valor foi tirado.
max_result = die_1.num_sides + die_2.num_sides # Maior resultado possível da soma é (12) e o menor é (2).
for value in range(2, max_result + 1): # Percorre os valores possíveis.
frequency = results.count(value) # Conta quantas vezes cada número aparece em results
frequencies.append(frequency) # Concatenamos o valor na lista frequencies.
# Visualiza os resultados.
hist = pygal.Bar() # Cria uma instância e armazena em hist.
# Definimos o atributo title, x_labels, y_labels, add e render_to_file.
hist.title = 'Results of rolling two D8 dice 100000 times.'
hist.x_labels = list(range(2, max_result + 1))
hist.x_title = 'Result'
hist.y_title = 'Frequency of result'
hist.add('D8 + D8', frequencies) # Acrescenta uma série de valores ao gráfico
hist.render_to_file('different1_dice_visual.svg') # Renderizamos o gráfico em um arquivo SVG, que espera
# um nome de arquivo com extensão .svg.
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
import sys
sys.path.insert(1,"../../../") # allow us to run this standalone
import numpy as np
import h2o
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid import H2OGridSearch
from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator
from tests import pyunit_utils as pu
seed = 1
def test_models_not_predicting_some_classes_dont_corrupt_resulting_SE_model():
def unique(fr):
return np.unique(fr.as_data_frame().values).tolist()
def scores_and_preds(models, test):
retval = lambda d: d
if not isinstance(models, list):
models = [models]
retval = lambda d: next(iter(d.values()))
training_scores = {m.key: m.mean_per_class_error() for m in models}
cv_scores = {m.key: m.mean_per_class_error(xval=True) for m in models}
test_scores = {m.key: m.model_performance(test).mean_per_class_error() for m in models}
test_predictions = {m.key: m.predict(test) for m in models}
test_pclasses = {m.key: unique(test_predictions[m.key]['predict']) for m in models}
return pu.ns(
training_scores=retval(training_scores),
cv_scores=retval(cv_scores),
test_scores=retval(test_scores),
test_pclasses=retval(test_pclasses),
)
def setup_data():
# MNIST is multinomial classification problem
train_full = h2o.import_file(pu.locate("bigdata/laptop/mnist/train.csv.gz"))
# test_full = h2o.import_file(pu.locate("bigdata/laptop/mnist/test.csv.gz"))
# train = train_full
# test = test_full
train, test, _ = train_full.split_frame(ratios=[.05, .1], seed=seed)
x = train.columns[:-1]
y = -1
for fr in [train]:
fr[y] = fr[y].asfactor()
domain = unique(train[y])
print(domain)
return pu.ns(x=x, y=y, train=train, test=test, domain=domain)
def train_base_models(data):
grid = H2OGridSearch(H2OGradientBoostingEstimator,
search_criteria=dict(
strategy='RandomDiscrete',
max_models=5,
seed=seed,
),
hyper_params=dict(
learn_rate=[0.5, 0.8, 1.0],
max_depth=[2, 3, 4, 5],
ntrees=[5, 10, 15],
),
)
grid.train(data.x, data.y, data.train,
nfolds=5,
fold_assignment='Modulo',
keep_cross_validation_predictions=True)
return grid.models
def train_bad_model(data):
glm = H2OGeneralizedLinearEstimator(family='multinomial',
missing_values_handling='MeanImputation',
alpha=[0.0, 0.2, 0.4, 0.6, 0.8, 1.0],
lambda_search=True,
nfolds=5,
fold_assignment='Modulo',
keep_cross_validation_predictions=True,
seed=seed)
glm.train(data.x, data.y, data.train, max_runtime_secs=2)
return glm
def check_stackedensemble_with_AUTO_metalearner(data, models):
se = H2OStackedEnsembleEstimator(base_models=models,
metalearner_nfolds=5,
seed=seed)
se.train(data.x, data.y, data.train)
results = scores_and_preds(se, data.test)
print(results)
assert data.domain == results.test_pclasses, "expected predicted classes {} but got {}".format(data.domain, results.test_pclasses)
def check_stackedensemble_with_DRF_metalearner(data, models):
se = H2OStackedEnsembleEstimator(base_models=models,
metalearner_algorithm='DRF',
metalearner_nfolds=5,
seed=seed)
se.train(data.x, data.y, data.train)
results = scores_and_preds(se, data.test)
print(results)
assert data.domain == results.test_pclasses, "expected predicted classes {} but got {}".format(data.domain, results.test_pclasses)
def check_stackedensemble_with_GLM_metalearner(data, models):
se = H2OStackedEnsembleEstimator(base_models=models,
metalearner_algorithm='GLM',
metalearner_nfolds=5,
seed=seed)
se.train(data.x, data.y, data.train)
results = scores_and_preds(se, data.test)
print(results)
assert data.domain != results.test_pclasses, "expected predictions not to include all target domain"
assert len(results.test_pclasses) == 1
def check_stackedensemble_with_GLM_metalearner_with_standardization_disabled(data, models):
se = H2OStackedEnsembleEstimator(base_models=models,
metalearner_algorithm='GLM',
metalearner_nfolds=5,
metalearner_params=dict(standardize=False),
seed=seed)
se.train(data.x, data.y, data.train)
results = scores_and_preds(se, data.test)
print(results)
assert data.domain == results.test_pclasses, "expected predicted classes {} but got {}".format(data.domain, results.test_pclasses)
data = setup_data()
base_models = train_base_models(data)
bad_model = train_bad_model(data)
# print(scores_and_preds(bad_model, data.test))
all_models = base_models + [bad_model]
check_stackedensemble_with_AUTO_metalearner(data, all_models)
check_stackedensemble_with_DRF_metalearner(data, all_models)
check_stackedensemble_with_GLM_metalearner(data, all_models)
check_stackedensemble_with_GLM_metalearner_with_standardization_disabled(data, all_models)
pu.run_tests([
test_models_not_predicting_some_classes_dont_corrupt_resulting_SE_model
])
|
import atexit
import random
import signal
import sys
from typing import Any, List, Optional
from ape.api import ReceiptAPI, TestProviderAPI, TransactionAPI, UpstreamProvider, Web3Provider
from ape.api.config import ConfigItem
from ape.exceptions import ContractLogicError, OutOfGasError, TransactionError, VirtualMachineError
from ape.logging import logger
from ape.utils import gas_estimation_error_message
from web3 import HTTPProvider, Web3
from web3.gas_strategies.rpc import rpc_gas_price_strategy
from .exceptions import HardhatProviderError, HardhatSubprocessError
from .process import HardhatProcess
EPHEMERAL_PORTS_START = 49152
EPHEMERAL_PORTS_END = 60999
HARDHAT_START_NETWORK_RETRIES = [0.1, 0.2, 0.3, 0.5, 1.0] # seconds between network retries
HARDHAT_START_PROCESS_ATTEMPTS = 3 # number of attempts to start subprocess before giving up
DEFAULT_PORT = 8545
def _signal_handler(signum, frame):
"""Runs on SIGTERM and SIGINT to force ``atexit`` handlers to run."""
atexit._run_exitfuncs()
sys.exit(143 if signum == signal.SIGTERM else 130)
class HardhatForkConfig(ConfigItem):
upstream_provider: Optional[str] = None
block_number: Optional[int] = None
class HardhatNetworkConfig(ConfigItem):
# --port <INT, default from Hardhat is 8545, but our default is to assign a random port number>
port: Optional[int] = None
# Retry strategy configs, try increasing these if you're getting HardhatSubprocessError
network_retries: List[float] = HARDHAT_START_NETWORK_RETRIES
process_attempts: int = HARDHAT_START_PROCESS_ATTEMPTS
# For setting the values in --fork and --fork-block-number command arguments.
# Used only in HardhatMainnetForkProvider.
mainnet_fork: Optional[HardhatForkConfig] = None
class HardhatProvider(Web3Provider, TestProviderAPI):
def __post_init__(self):
self._hardhat_web3 = (
None # we need to maintain a separate per-instance web3 client for Hardhat
)
self.port = self.config.port
self.process = None
self._config_manager = self.network.config_manager
self._base_path = self._config_manager.PROJECT_FOLDER
# When the user did not specify a port and we are attempting to start
# the process ourselves, we first try the default port of 8545. Otherwise,
# we pick a random port in an ephemeral range.
self._tried_default_port = False
# register atexit handler to make sure disconnect is called for normal object lifecycle
atexit.register(self.disconnect)
# register signal handlers to make sure atexit handlers are called when the parent python
# process is killed
signal.signal(signal.SIGINT, _signal_handler)
signal.signal(signal.SIGTERM, _signal_handler)
config = self._config_manager.get_config("test")
if hasattr(config, "mnemonic"):
mnemonic = config.mnemonic
number_of_accounts = config.number_of_accounts
else:
self._failing_to_load_test_plugins = True
logger.error("Failed to load config from 'ape-test' plugin, using default values.")
from ape_test import Config as TestConfig
_test_config_cls = TestConfig
mnemonic = _test_config_cls.__defaults__["mnemonic"]
number_of_accounts = _test_config_cls.__defaults__["number_of_accounts"]
self._mnemonic = mnemonic
self._number_of_accounts = number_of_accounts
def connect(self):
"""Start the hardhat process and verify it's up and accepting connections."""
if self.process:
raise HardhatProviderError(
"Cannot connect twice. Call disconnect before connecting again."
)
if self.port:
self._set_web3()
if not self._web3:
self._start_process()
self._set_web3()
else:
# We get here when user configured a port and the hardhat process
# was already running.
logger.info(f"Connecting to existing Hardhat node at port '{self.port}'.")
else:
for _ in range(self.config.process_attempts):
try:
self._start_process()
break
except HardhatSubprocessError as exc:
logger.info("Retrying hardhat subprocess startup: %r", exc)
self.port = None
self._set_web3()
def _set_web3(self):
self._web3 = Web3(HTTPProvider(self.uri))
if not self._web3.isConnected():
self._web3 = None
return
# Verify is actually a hardhat provider,
# or else skip it to possibly try another port.
client_version = self._web3.clientVersion
if "hardhat" in client_version.lower():
self._web3.eth.set_gas_price_strategy(rpc_gas_price_strategy)
else:
# This will trigger the plugin to try another port
# (provided the user did not request a specific port).
self._web3 = None
def _start_process(self):
if not self.port:
if not self._tried_default_port:
# Try port 8545 first.
self.port = DEFAULT_PORT
self._tried_default_port = True
else:
# Pick a random port if one isn't configured and 8545 is taken.
self.port = random.randint(EPHEMERAL_PORTS_START, EPHEMERAL_PORTS_END)
self.process = self._create_process()
self.process.start()
def _create_process(self):
"""
Sub-classes may override this to specify alternative values in the process,
such as using mainnet-fork mode.
"""
return HardhatProcess(self._base_path, self.port, self._mnemonic, self._number_of_accounts)
@property
def uri(self) -> str:
if not self.port:
raise HardhatProviderError("Can't build URI before `connect()` is called.")
return f"http://127.0.0.1:{self.port}"
@property # type: ignore
def _web3(self):
"""
This property overrides the ``EthereumProvider._web3`` class variable to return our
instance variable.
"""
return self._hardhat_web3
@_web3.setter
def _web3(self, value):
"""
Redirect the base class's assignments of self._web3 class variable to our instance variable.
"""
self._hardhat_web3 = value
@property
def priority_fee(self) -> int:
"""
Priority fee not needed in development network.
"""
return 0
def disconnect(self):
self._web3 = None
if self.process:
self.process.stop()
self.process = None
self.port = None
def _make_request(self, rpc: str, args: list) -> Any:
return self._web3.manager.request_blocking(rpc, args) # type: ignore
def set_block_gas_limit(self, gas_limit: int) -> bool:
return self._make_request("evm_setBlockGasLimit", [hex(gas_limit)])
def sleep(self, seconds: int) -> int:
return int(self._make_request("evm_increaseTime", [seconds]))
def mine(self, timestamp: Optional[int] = None) -> str:
return self._make_request("evm_mine", [timestamp] if timestamp else [])
def snapshot(self) -> str:
result = self._make_request("evm_snapshot", [])
return str(result)
def revert(self, snapshot_id: str):
if isinstance(snapshot_id, str) and snapshot_id.isnumeric():
snapshot_id = int(snapshot_id) # type: ignore
return self._make_request("evm_revert", [snapshot_id])
def unlock_account(self, address: str) -> bool:
return self._make_request("hardhat_impersonateAccount", [address])
def estimate_gas_cost(self, txn: TransactionAPI) -> int:
"""
Generates and returns an estimate of how much gas is necessary
to allow the transaction to complete.
The transaction will not be added to the blockchain.
"""
try:
return super().estimate_gas_cost(txn)
except ValueError as err:
tx_error = _get_vm_error(err)
# If this is the cause of a would-be revert,
# raise ContractLogicError so that we can confirm tx-reverts.
if isinstance(tx_error, ContractLogicError):
raise tx_error from err
message = gas_estimation_error_message(tx_error)
raise TransactionError(base_err=tx_error, message=message) from err
def send_transaction(self, txn: TransactionAPI) -> ReceiptAPI:
"""
Creates a new message call transaction or a contract creation
for signed transactions.
"""
try:
receipt = super().send_transaction(txn)
except ValueError as err:
raise _get_vm_error(err) from err
receipt.raise_for_status(txn)
return receipt
class HardhatMainnetForkProvider(HardhatProvider):
"""
A Hardhat provider that uses ``--fork``, like:
``npx hardhat node --fork <upstream-provider-url>``.
Set the ``upstream_provider`` in the ``hardhat.mainnet_fork`` config
section of your ``ape-config.yaml` file to specify which provider
to use as your archive node.
"""
def _create_process(self) -> HardhatProcess:
mainnet_fork = self.config.mainnet_fork or {} # type: ignore
upstream_provider_name = mainnet_fork.get("upstream_provider")
fork_block_num = mainnet_fork.get("block_number")
mainnet = self.network.ecosystem.mainnet
# NOTE: if `upstream_provider_name` is `None`, this gets the default mainnet provider.
upstream_provider = mainnet.get_provider(provider_name=upstream_provider_name)
if not isinstance(upstream_provider, UpstreamProvider):
raise HardhatProviderError(
f"Provider '{upstream_provider_name}' is not an upstream provider."
)
fork_url = upstream_provider.connection_str
if not fork_url:
raise HardhatProviderError("Upstream provider does not have a ``connection_str``.")
return HardhatProcess(
self._base_path,
self.port,
self._mnemonic,
self._number_of_accounts,
fork_url=fork_url,
fork_block_number=fork_block_num,
)
def _get_vm_error(web3_value_error: ValueError) -> TransactionError:
if not len(web3_value_error.args):
return VirtualMachineError(base_err=web3_value_error)
err_data = web3_value_error.args[0]
if not isinstance(err_data, dict):
return VirtualMachineError(base_err=web3_value_error)
message = str(err_data.get("message"))
if not message:
return VirtualMachineError(base_err=web3_value_error)
# Handle `ContactLogicError` similary to other providers in `ape`.
# by stripping off the unnecessary prefix that hardhat has on reverts.
hardhat_prefix = (
"Error: VM Exception while processing transaction: reverted with reason string "
)
if message.startswith(hardhat_prefix):
message = message.replace(hardhat_prefix, "").strip("'")
return ContractLogicError(revert_message=message)
elif "Transaction reverted without a reason string" in message:
return ContractLogicError()
elif message == "Transaction ran out of gas":
return OutOfGasError()
return VirtualMachineError(message=message)
|
import numpy as np
import tensorflow as tf
import torch
tf.compat.v1.enable_eager_execution()
### Model
def check_model():
from run_nerf_helpers import init_nerf_model
model = init_nerf_model(use_viewdirs=True)
print(model.summary())
print("--- Pytorch ---")
from run_nerf_helpers_torch import NeRF
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
model_torch = NeRF(use_viewdirs=True)
print(model_torch)
print(f"Number of parameters: {count_parameters(model_torch)}")
def check_get_rays():
from load_blender import load_blender_data as load_blender_data_tf
from run_nerf_helpers import get_rays
datadir = './test_data/nerf_synthetic/lego'
half_res = True
testskip = 1
white_bkgd = True
images, poses, render_poses, hwf, i_split = load_blender_data_tf(datadir, half_res, testskip)
if white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
i_train, i_val, i_test = i_split
near = 2.
far = 6.
H, W, focal = hwf
H, W = int(H), int(W)
hwf = [H, W, focal]
img_i = np.random.choice(i_train)
target = images[img_i]
pose = poses[img_i, :3,:4]
N_rand = 2
print(f"H, W, focal: {H}, {W}, {focal}")
rays_o, rays_d = get_rays(H, W, focal, pose)
print(f"rays_d, rays_o: {rays_d.shape}, {rays_o.shape}")
coords = tf.stack(tf.meshgrid(tf.range(H), tf.range(W), indexing='ij'), -1)
coords = tf.reshape(coords, [-1,2])
select_inds = np.random.choice(coords.shape[0], size=[N_rand], replace=False)
print(f"select_inds before gather: {select_inds}")
select_inds = tf.gather_nd(coords, select_inds[:,tf.newaxis])
print(f"select_inds after gather: {select_inds}")
rays_o = tf.gather_nd(rays_o, select_inds)
rays_d = tf.gather_nd(rays_d, select_inds)
batch_rays = tf.stack([rays_o, rays_d], 0)
target_s = tf.gather_nd(target, select_inds)
def check_preprocessing_one_image():
from run_nerf_helpers_torch import get_rays, get_rays_np
H, W, focal = int(400 / 40), int(400 / 40), 555.555 / 40
hwf = [H, W, focal]
pose = np.array([
[-0.9305, 0.1170, -0.3469, -1.3986],
[-0.3661, -0.2975, 0.8817, 3.554],
[0, 0.9475, 0.3197, 1.288]
])
# Sample inds of pixels
N_rand = 10
select_inds = np.random.choice(H, size=[N_rand], replace=False)
# tf
rays_o, rays_d = get_rays_np(H, W, focal, pose)
coords = tf.stack(tf.meshgrid(tf.range(H), tf.range(W), indexing='ij'), -1)
coords = tf.reshape(coords, [-1,2])
select_coords = tf.gather_nd(coords, select_inds[:,tf.newaxis])
rays_o = tf.gather_nd(rays_o, select_coords)
rays_d = tf.gather_nd(rays_d, select_coords)
rays_d = tf.cast(rays_d, tf.float64)
batch_rays = tf.stack([rays_o, rays_d], 0)
# torch
rays_o, rays_d = get_rays(H, W, focal, torch.Tensor(pose))
coords = torch.stack(torch.meshgrid(torch.linspace(0, H-1, H), torch.linspace(0, W-1, W)), -1) # (H, W, 2)
coords = torch.reshape(coords, [-1,2])
select_coords = coords[select_inds].long() # (N_rand, 2)
rays_o = rays_o[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
rays_d = rays_d[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
batch_rays_torch = torch.stack([rays_o, rays_d], 0)
assert np.allclose(batch_rays, batch_rays_torch.numpy())
check_preprocessing_one_image()
|
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2014 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
import jsonapi
@jsonapi.PrimaryResource
class Reviews(object):
"""The reviews in this system."""
name = "reviews"
value_class = api.review.Review
exceptions = (api.review.InvalidReviewId, api.repository.RepositoryError)
lists = ("issues", "notes")
@staticmethod
def json(value, parameters):
"""Review {
"id": integer,
"state": string,
"summary": string,
"description": string or null,
"repository": integer,
"branch": integer,
"owners": integer[],
"active_reviewers": integer[],
"assigned_reviewers": integer[],
"watchers": integer[],
"partitions": Partition[],
"issues": integer[],
"notes": integer[],
"pending_rebase": integer or null,
"progress": float,
"progress_per_commit": CommitChangeCount[],
}
Partition {
"commits": integer[],
"rebase": integer or null,
}
CommitChangeCount {
"commit_id": integer,
"total_changes": integer,
"reviewed_changes": integer,
}"""
def change_counts_as_dict(change_counts):
return [{
"commit_id": change_count.commit_id,
"total_changes": change_count.total_changes,
"reviewed_changes": change_count.reviewed_changes,
} for change_count in change_counts]
partitions = []
def add_partition(partition):
if partition.following:
partition_rebase = partition.following.rebase
else:
partition_rebase = None
partitions.append({ "commits": list(partition.commits.topo_ordered),
"rebase": partition_rebase })
if partition.following:
add_partition(partition.following.partition)
add_partition(value.first_partition)
return parameters.filtered(
"reviews", { "id": value.id,
"state": value.state,
"summary": value.summary,
"description": value.description,
"repository": value.repository,
"branch": value.branch,
"owners": jsonapi.sorted_by_id(value.owners),
"active_reviewers": jsonapi.sorted_by_id(
value.active_reviewers),
"assigned_reviewers": jsonapi.sorted_by_id(
value.assigned_reviewers),
"watchers": jsonapi.sorted_by_id(value.watchers),
"partitions": partitions,
"issues": jsonapi.sorted_by_id(value.issues),
"notes": jsonapi.sorted_by_id(value.notes),
"pending_rebase": value.pending_rebase,
"progress": value.total_progress,
"progress_per_commit":
change_counts_as_dict(value.progress_per_commit)})
@staticmethod
def single(parameters, argument):
"""Retrieve one (or more) reviews in this system.
REVIEW_ID : integer
Retrieve a review identified by its unique numeric id."""
return Reviews.setAsContext(parameters, api.review.fetch(
parameters.critic, review_id=jsonapi.numeric_id(argument)))
@staticmethod
def multiple(parameters):
"""Retrieve all reviews in this system.
repository : REPOSITORY : -
Include only reviews in one repository, identified by the
repository's unique numeric id or short-name.
state : STATE[,STATE,...] : -
Include only reviews in the specified state. Valid values are:
<code>open</code>, <code>closed</code>, <code>dropped</code>."""
repository = jsonapi.deduce("v1/repositories", parameters)
state_parameter = parameters.getQueryParameter("state")
if state_parameter:
state = set(state_parameter.split(","))
invalid = state - api.review.Review.STATE_VALUES
if invalid:
raise jsonapi.UsageError(
"Invalid review state values: %s"
% ", ".join(map(repr, sorted(invalid))))
else:
state = None
return api.review.fetchAll(
parameters.critic, repository=repository, state=state)
@staticmethod
def create(parameters, value, values, data):
critic = parameters.critic
path = parameters.subresource_path
review = value
if review:
if path == ["issues"] or path == ["notes"]:
Reviews.setAsContext(parameters, review)
if path == ["issues"]:
comment_type = "issue"
else:
comment_type = "note"
jsonapi.ensure(data, "type", comment_type)
raise jsonapi.InternalRedirect("v1/comments")
raise jsonapi.UsageError("Review creation not yet supported")
@staticmethod
def deduce(parameters):
review = parameters.context.get("reviews")
review_parameter = parameters.getQueryParameter("review")
if review_parameter is not None:
if review is not None:
raise jsonapi.UsageError(
"Redundant query parameter: review=%s" % review_parameter)
review = api.review.fetch(
parameters.critic,
review_id=jsonapi.numeric_id(review_parameter))
return review
@staticmethod
def setAsContext(parameters, review):
parameters.setContext(Reviews.name, review)
# Also set the review's repository and branch as context.
jsonapi.v1.repositories.Repositories.setAsContext(
parameters, review.repository)
jsonapi.v1.branches.Branches.setAsContext(parameters, review.branch)
return review
|
#
# Copyright (C) 2018 The Android Open Source Project
# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# refer to tanh_v1_dynamic.mod.py about the structore
# This adds reshape as the first op in a model and
# returns output of reshape, which is dynamic tensor
'''
Testing Gather op when the input1 is dynamic.
input1 [1, 2, 3, 4] shape [4] (value of shape will be [1, 2, 3, 4])
| |
+-------------+
|
Reshape (added by DynamicInputGenerator since it generates its output to be dynamic)
|
| axis = 0 input2 [2]
| | |
+-------------+-------------+
|
|
| dynamic tensor at compilation time but the shape will be [2, 2, 3, 4] at execution time
|
Gather
|
output (dynamic tensor, [2, 2, 3, 4] at execution time)
'''
import dynamic_tensor
model = Model()
input1_shape = [1, 2, 3, 4]
dynamic_layer = dynamic_tensor.DynamicInputGenerator(model, input1_shape, "TENSOR_FLOAT32")
node_input = dynamic_layer.getTestNodeInput()
input2 = Input("intput2", "TENSOR_INT32", "{2}")
axis = Int32Scalar("axis", 0)
output = Output("output", "TENSOR_FLOAT32", "{2,2,3,4}")
model = model.Operation("GATHER", node_input, axis, input2).To(output)
input1_data = [1.123456789123456789, 2.123456789123456789, 3.123456789123456789, 4.123456789123456789,
5.123456789123456789, 6.123456789123456789, 7.123456789123456789, 8.123456789123456789,
9.123456789123456789, 10.123456789123456789, 11.123456789123456789, 12.123456789123456789,
13.123456789123456789, 14.123456789123456789, 15.123456789123456789, 16.123456789123456789,
17.123456789123456789, 18.123456789123456789, 19.123456789123456789, 20.123456789123456789,
21.123456789123456789, 22.123456789123456789, 23.123456789123456789, 24.123456789123456789
]
input0 = {
dynamic_layer.getModelInput() : input1_data, # input 1
dynamic_layer.getShapeInput() : input1_shape,
input2 : [0, 0] # input 2
}
output0 = {
output: # output
[1.123456789123456789, 2.123456789123456789, 3.123456789123456789, 4.123456789123456789,
5.123456789123456789, 6.123456789123456789, 7.123456789123456789, 8.123456789123456789,
9.123456789123456789, 10.123456789123456789, 11.123456789123456789, 12.123456789123456789,
13.123456789123456789, 14.123456789123456789, 15.123456789123456789, 16.123456789123456789,
17.123456789123456789, 18.123456789123456789, 19.123456789123456789, 20.123456789123456789,
21.123456789123456789, 22.123456789123456789, 23.123456789123456789, 24.123456789123456789,
1.123456789123456789, 2.123456789123456789, 3.123456789123456789, 4.123456789123456789,
5.123456789123456789, 6.123456789123456789, 7.123456789123456789, 8.123456789123456789,
9.123456789123456789, 10.123456789123456789, 11.123456789123456789, 12.123456789123456789,
13.123456789123456789, 14.123456789123456789, 15.123456789123456789, 16.123456789123456789,
17.123456789123456789, 18.123456789123456789, 19.123456789123456789, 20.123456789123456789,
21.123456789123456789, 22.123456789123456789, 23.123456789123456789, 24.123456789123456789]
}
# Instantiate an example
Example((input0, output0))
|
import pytest
import pytz
from django import forms
from timezone_field import TimeZoneFormField
@pytest.fixture
def Form():
class _Form(forms.Form):
tz = TimeZoneFormField()
tz_opt = TimeZoneFormField(required=False)
yield _Form
@pytest.fixture
def FormInvalidChoice(invalid_tz):
class _FormInvalidChoice(forms.Form):
tz = TimeZoneFormField(
choices=(
[(tz, tz) for tz in pytz.all_timezones]
+ [(invalid_tz, pytz.utc)]
)
)
yield _FormInvalidChoice
def test_form_valid1(Form, pst, pst_tz):
form = Form({'tz': pst})
assert form.is_valid()
assert form.cleaned_data['tz'] == pst_tz
assert form.cleaned_data['tz_opt'] is None
def test_form_valid2(Form, gmt, gmt_tz, utc, utc_tz):
form = Form({'tz': gmt, 'tz_opt': utc})
assert form.is_valid()
assert form.cleaned_data['tz'] == gmt_tz
assert form.cleaned_data['tz_opt'] == utc_tz
@pytest.mark.parametrize('tz, tz_invalid_choice', [
[pytest.lazy_fixture('invalid_tz'), None],
[None, pytest.lazy_fixture('invalid_tz')],
[pytest.lazy_fixture('uncommon_tz'), None],
])
def test_form_invalid(Form, tz, tz_invalid_choice):
form = Form({'tz': tz, 'tz_invalid_choice': tz_invalid_choice})
assert not form.is_valid()
def test_form_default_human_readable_choices_dont_have_underscores(Form):
form = Form()
for choice in form.fields['tz'].choices:
assert '_' not in choice[1]
def test_form_invalid_choice_valid(FormInvalidChoice, pst, pst_tz):
form = FormInvalidChoice({'tz': pst})
assert form.is_valid()
assert form.cleaned_data['tz'] == pst_tz
def test_form_invalid_chocie_invalid_choice(FormInvalidChoice, invalid_tz):
form = FormInvalidChoice({'tz': invalid_tz})
assert not form.is_valid()
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import yaml
import os
import webbrowser
from robot.server import HTTPFrontend
from robot.mxnet_robot import MXNetRobot
parser = argparse.ArgumentParser()
parser.add_argument('--host', default='localhost', help='host to listen to')
parser.add_argument('--port', '-p', type=int, default=8080,
help='Port the web server should listen on (default 8080).')
args = parser.parse_args()
# Open web frontend and serve model
webbrowser.open('http://{}:{}/'.format(args.host, args.port), new=2)
go_robot = MXNetRobot('model_zoo/thetago_standard', 100)
go_server = HTTPFrontend(bot=go_robot, port=args.port)
go_server.run()
|
import pygame
from Data.scripts.Entity import *
from Data.scripts.Animation import *
from Data.scripts.coreFunctions import *
class Player(entity):
def __init__(self,x,y,startTexture):
super().__init__(x,y,startTexture)
animationList={"Idle":loadAnimationFromFolder("data/res/Player/Idle",frameTime=10),
"Move":loadAnimationFromFolder("data/res/Player/Move",frameTime=10),
"Fall":loadAnimationFromFolder("data/res/Player/Jump",frameTime=10)}
self.animContoller=AnimatonController(animationList,"Idle")
self.hsp=0.0
self.vsp=0.0
self.jumpHeight=0.9
self.grv=0.13
self.cayotiTime=10
self.airTime=0
self.jumpInput=False
self.jumpTime=0.2*60
self.jumpTimer=0
self.acc=0.1
self.speed=1
self.horizontalInput=0
def InputPressed(self):
pressed_keys=pygame.key.get_pressed()
self.horizontalInput=0
if pressed_keys[pygame.K_RIGHT]:
self.horizontalInput+=1
if pressed_keys[pygame.K_LEFT]:
self.horizontalInput-=1
def InputDown(self,e):
if e.type==pygame.KEYUP:
if e.key==pygame.K_SPACE:
self.jumpInput=False
if e.type==pygame.KEYDOWN:
if e.key==pygame.K_SPACE and self.airTime<3+self.cayotiTime: #3 is because when you make the vsp=0 it takes 3 frames to touch the block againg
self.jumpInput=True
self.jumpTimer=self.jumpTime
self.airTime+=self.cayotiTime
#self.vsp=-self.jumpHeight
def Run(self,tiles,window):
self.jumpTimer-=1
if self.jumpTimer<0:
self.jumpInput=False
self.hsp=lerp(self.hsp,self.horizontalInput*self.speed,self.acc)
self.vsp+=self.grv
if self.jumpInput:
self.vsp=-self.jumpHeight
print("X:",str(self.pos[0]))
print("Y:",str(self.pos[1]))
print("AirTime:",str(self.airTime))
if self.hsp<0.05 and self.hsp> -0.05:
self.animContoller.setState("Idle")
else:
if self.hsp>0:
self.animContoller.flipAnims(False,False)
else:
self.animContoller.flipAnims(True,False)
self.animContoller.setState("Move")
if self.vsp>0.6:
self.animContoller.setState("Fall")
collisions=self.move([self.hsp,self.vsp],tiles)
if collisions["right"] or collisions["left"]:
self.hsp=0
if collisions["top"] or collisions["bottom"]:
self.vsp=0
if collisions["bottom"]:
self.airTime=0
else:
self.airTime+=1
self.sprite=self.animContoller.Run()
self.render(window)
|
__all__ = [
'AddCellConnToPoints',
'PointsToTube',
'LonLatToUTM',
'RotatePoints',
'ExtractPoints',
'RotationTool',
'ExtractCellCenters',
'AppendCellCenters',
'IterateOverPoints',
'ConvertUnits',
'BuildSurfaceFromPoints',
]
__displayname__ = 'Point/Line Sets'
from datetime import datetime
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
import pyvista
from .. import _helpers, interface
from ..base import FilterBase, FilterPreserveTypeBase
# improt CreateTensorMesh for its cell string parsing
from ..model_build import CreateTensorMesh
# NOTE: internal import of pyproj in LonLatToUTM
###############################################################################
#---- Cell Connectivity ----#
class AddCellConnToPoints(FilterBase):
"""This filter will add linear cell connectivity between scattered points.
You have the option to add ``VTK_Line`` or ``VTK_PolyLine`` connectivity.
``VTK_Line`` connectivity makes a straight line between the points in order
(either in the order by index or using a nearest neighbor calculation).
The ``VTK_PolyLine`` adds a poly line connectivity between all points as
one spline (either in the order by index or using a nearest neighbor
calculation). Type map is specified in `vtkCellType.h`.
**Cell Connectivity Types:**
- 4: Poly Line
- 3: Line
"""
__displayname__ = 'Add Cell Connectivity to Points'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(self,
nInputPorts=1, inputType='vtkPolyData',
nOutputPorts=1, outputType='vtkPolyData')
# Parameters
self.__cell_type = kwargs.get('cell_type', vtk.VTK_POLY_LINE)
self.__usenbr = kwargs.get('nearest_nbr', False)
self.__close_loop = kwargs.get('close_loop', False)
self.__keep_vertices = kwargs.get('keep_vertices', False)
self.__unique = kwargs.get('unique', False)
def _connect_cells(self, pdi, pdo, log_time=False):
"""Internal helper to perfrom the connection"""
# NOTE: Type map is specified in vtkCellType.h
cell_type = self.__cell_type
if log_time:
start_time = datetime.now()
# Get the Points over the NumPy interface
pdi = pyvista.wrap(pdi)
points = np.copy(pdi.points) # New NumPy array of poins so we dont destroy input
if self.__unique:
# Remove repeated points
indexes = np.unique(points, return_index=True, axis=0)[1]
points = np.array(points[sorted(indexes)])
def _find_min_path(points):
try:
# sklearn's KDTree is faster: use it if available
from sklearn.neighbors import KDTree as Tree
except ImportError:
from scipy.spatial import cKDTree as Tree
_compute_dist = lambda pt0, pt1: np.linalg.norm(pt0-pt1)
ind, min_dist = None, np.inf
tree = Tree(points)
for pt in points:
cur_ind = tree.query([pt], k=len(points))[1].ravel()
dist = 0.
for i in range(len(cur_ind)-1):
dist += _compute_dist(points[cur_ind[i]], points[cur_ind[i+1]])
if dist < min_dist:
ind = cur_ind
min_dist = dist
return ind.ravel()
if self.__usenbr:
ind = _find_min_path(points)
else:
ind = np.arange(len(points), dtype=int)
if self.__keep_vertices:
poly = pyvista.PolyData(np.copy(points))
else:
poly = pyvista.PolyData()
poly.points = np.copy(points)
if cell_type == vtk.VTK_LINE:
lines = np.c_[np.full(len(ind)-1, 2), ind[0:-1], ind[1:]]
if self.__close_loop:
app = np.append(lines, [[2, ind[-1], ind[0]],], axis=0)
lines = app
poly.lines = lines
elif cell_type == vtk.VTK_POLY_LINE:
cells = vtk.vtkCellArray()
cell = vtk.vtkPolyLine()
if self.__close_loop:
cell.GetPointIds().SetNumberOfIds(len(ind) + 1)
else:
cell.GetPointIds().SetNumberOfIds(len(ind))
for i in ind:
cell.GetPointIds().SetId(i, ind[i])
if self.__close_loop:
cell.GetPointIds().SetId(i+1, ind[0])
cells.InsertNextCell(cell)
poly.SetLines(cells)
else:
raise _helpers.PVGeoError('Cell type ({}) not supported'.format(cell_type))
for key, val in pdi.point_arrays.items():
poly.point_arrays[key] = val
pdo.DeepCopy(poly)
if log_time:
print("exectuted in {}".format(datetime.now() - start_time))
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output data object
"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._connect_cells(pdi, pdo)
return 1
#### Seters and Geters ####
def set_cell_type(self, cell_type):
"""Set the cell typ by the integer id as specified in `vtkCellType.h`
"""
if cell_type != self.__cell_type:
self.__cell_type = cell_type
self.Modified()
def set_use_nearest_nbr(self, flag):
"""Set a flag on whether to a KDTree nearest neighbor
algorithm to sort the points to before adding linear connectivity.
"""
if flag != self.__usenbr:
self.__usenbr = flag
self.Modified()
def set_use_unique_points(self, flag):
"""Set a flag on whether to only use unique points"""
if flag != self.__unique:
self.__unique = flag
self.Modified()
###############################################################################
class PointsToTube(AddCellConnToPoints):
"""Takes points from a vtkPolyData object and constructs a line of those
points then builds a polygonal tube around that line with some specified
radius and number of sides.
"""
__displayname__ = 'Points to Tube'
__category__ = 'filter'
def __init__(self, num_sides=20, radius=10.0, capping=False, **kwargs):
AddCellConnToPoints.__init__(self, **kwargs)
# Additional Parameters
# NOTE: CellType should remain vtk.VTK_POLY_LINE (4) connection
self.__numSides = num_sides
self.__radius = radius
self.__capping = capping
def _connect_cells(self, pdi, pdo, log_time=False):
"""This uses the parent's ``_connect_cells()`` to build a tub around
"""
AddCellConnToPoints._connect_cells(self, pdi, pdo, log_time=log_time)
tube = vtk.vtkTubeFilter()
tube.SetInputData(pdo)
# User Defined Parameters
tube.SetCapping(self.__capping)
tube.SetRadius(self.__radius)
tube.SetNumberOfSides(self.__numSides)
# apply the filter
tube.Update()
pdo.ShallowCopy(tube.GetOutput())
return pdo
#### Seters and Geters ####
def set_radius(self, radius):
"""Set the radius of the tube
"""
if self.__radius != radius:
self.__radius = radius
self.Modified()
def set_number_of_sides(self, num):
"""Set the number of sides (resolution) for the tube
"""
if self.__numSides != num:
self.__numSides = num
self.Modified()
def set_capping(self, flag):
"""Set a boolean flag on whether or not to cap the ends of the tube
"""
if self.__capping != flag:
self.__capping = flag
self.Modified()
###############################################################################
#---- LonLat to Cartesian ----#
class LonLatToUTM(FilterPreserveTypeBase):
"""Converts Points from Lon Lat to UTM
"""
__displayname__ = 'Lat Lon To UTM'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterPreserveTypeBase.__init__(self, inputType='vtkDataSet', **kwargs)
self.__zone = 11,
self.__ellps = 'WGS84'
self.set_zone(kwargs.get('zone', 11)) # User defined
self.set_ellps(kwargs.get('ellps', 'WGS84')) # User defined
@staticmethod
def get_available_ellps(idx=None):
"""Returns the available ellps
"""
import pyproj
ellps = pyproj.pj_ellps.keys()
# Now migrate WGSXX to front so that 84 is always default
wgs = ['WGS60','WGS66','WGS72', 'WGS84']
for i, name in enumerate(wgs):
oldindex = ellps.index(name)
ellps.insert(0, ellps.pop(oldindex))
if idx is not None:
return ellps[idx]
return ellps
def __convert_2d(self, lon, lat, elev):
"""Converts 2D Lon Lat coords to 2D XY UTM points"""
import pyproj
p = pyproj.Proj(proj='utm', zone=self.__zone, ellps=self.__ellps)
utm_x, utm_y = p(lon, lat)
return np.c_[utm_x, utm_y, elev]
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = pyvista.wrap(self.GetInputData(inInfo, 0, 0))
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
if not hasattr(pdi, 'points'):
raise _helpers.PVGeoError('Input data object does not have points to convert.')
coords = pdi.points.copy() # New NumPy array of poins so we dont destroy input
# Now Conver the points
points = self.__convert_2d(coords[:, 0], coords[:, 1], coords[:, 2])
output = pdi.copy()
output.points = points
pdo.DeepCopy(output)
return 1
def set_zone(self, zone):
"""Set the UTM zone number"""
if zone < 1 or zone > 60:
raise _helpers.PVGeoError('Zone (%d) is invalid.' % zone)
if self.__zone != zone:
self.__zone = int(zone)
self.Modified()
def set_ellps(self, ellps):
"""Set the ellipsoid type"""
if isinstance(ellps, int):
ellps = self.get_available_ellps(idx=ellps)
if not isinstance(ellps, str):
raise _helpers.PVGeoError('Ellps must be a string.')
if self.__ellps != ellps:
self.__ellps = ellps
self.Modified()
###############################################################################
class RotationTool(object):
"""A class that holds a set of methods/tools for performing and estimating
coordinate rotations.
"""
__displayname__ = 'Rotation Tool'
__category__ = 'filter'
def __init__(self, decimals=6):
# Parameters
self.RESOLUTION = np.pi / 3200.0
self.DECIMALS = decimals
@staticmethod
def _get_rotation_matrix(theta):
"""Internal helper to generate a rotation matrix given a rotation angle"""
xx = np.cos(theta)
xy = -np.sin(theta)
yx = np.sin(theta)
yy = np.cos(theta)
if not isinstance(theta, np.ndarray):
return np.array([[xx, xy],
[yx, yy]])
# Otherwise populate arrat manually
mat = np.zeros((len(theta), 2, 2))
mat[:, 0, 0] = xx
mat[:, 0, 1] = xy
mat[:, 1, 0] = yx
mat[:, 1, 1] = yy
return mat
@staticmethod
def rotate_around(pts, theta, origin):
"""Rotate points around an origins given an anlge on the XY plane
"""
xarr, yarr = pts[:,0], pts[:,1]
ox, oy = origin[0], origin[1]
qx = ox + np.cos(theta) * (xarr - ox) - np.sin(theta) * (yarr - oy)
qy = oy + np.sin(theta) * (xarr - ox) + np.cos(theta) * (yarr - oy)
return np.vstack((qx, qy)).T
@staticmethod
def rotate(pts, theta):
"""Rotate points around (0,0,0) given an anlge on the XY plane
"""
rot = RotationTool._get_rotation_matrix(theta)
rotated = pts.dot(rot)
if not isinstance(theta, np.ndarray):
return rotated
return np.swapaxes(rotated, 0, 1)
@staticmethod
def distance_between(pts):
"""Gets the distance between two points
"""
if pts.ndim < 3:
return np.sqrt((pts[0,0] - pts[1,0])**2 + (pts[0,1] - pts[1,1])**2)
return np.sqrt((pts[:, 0,0] - pts[:, 1,0])**2 + (pts[:, 0,1] - pts[:, 1,1])**2)
@staticmethod
def cos_between(pts):
"""Gets the cosine between two points
"""
if pts.ndim < 3:
xdiff = abs(pts[0,0] - pts[1,0])
dist = RotationTool.distance_between(pts)
return np.arccos(xdiff/dist)
# Otherwise we have a set of points
xdiff = abs(pts[:, 0,0] - pts[:, 1,0])
dist = RotationTool.distance_between(pts)
return np.arccos(xdiff/dist)
@staticmethod
def sin_between(pts):
"""Calculate the sin angle between two points"""
ydiff = abs(pts[0,1] - pts[1,1])
dist = RotationTool.distance_between(pts)
return np.arcsin(ydiff/dist)
@staticmethod
def rotation_matrix(vector_orig, vector_fin):
"""Calculate the rotation matrix required to rotate from one vector to another.
For the rotation of one vector to another, there are an infinit series of rotation matrices
possible. Due to axially symmetry, the rotation axis can be any vector lying in the symmetry
plane between the two vectors. Hence the axis-angle convention will be used to construct the
matrix with the rotation axis defined as the cross product of the two vectors. The rotation
angle is the arccosine of the dot product of the two unit vectors.
Given a unit vector parallel to the rotation axis, w = [x, y, z] and the rotation angle a,
the rotation matrix R is::
| 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z |
R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z |
| -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) |
Args:
vector_orig (umpy array, len 3): The unrotated vector defined in the reference frame.
vector_fin (numpy array, len 3): The rotated vector defined in the reference frame.
Note:
This code was adopted from `printipi`_ under the MIT license.
.. _printipi: https://github.com/Wallacoloo/printipi/blob/master/util/rotation_matrix.py
"""
from math import acos, cos, sin
from numpy import cross, dot
from numpy.linalg import norm
R = np.zeros((3,3))
# Convert the vectors to unit vectors.
vector_orig = vector_orig / norm(vector_orig)
vector_fin = vector_fin / norm(vector_fin)
# The rotation axis (normalised).
axis = cross(vector_orig, vector_fin)
axis_len = norm(axis)
if axis_len != 0.0:
axis = axis / axis_len
# Alias the axis coordinates.
x = axis[0]
y = axis[1]
z = axis[2]
# The rotation angle.
angle = acos(dot(vector_orig, vector_fin))
# Trig functions (only need to do this maths once!).
ca = cos(angle)
sa = sin(angle)
# Calculate the rotation matrix elements.
R[0,0] = 1.0 + (1.0 - ca)*(x**2 - 1.0)
R[0,1] = -z*sa + (1.0 - ca)*x*y
R[0,2] = y*sa + (1.0 - ca)*x*z
R[1,0] = z*sa+(1.0 - ca)*x*y
R[1,1] = 1.0 + (1.0 - ca)*(y**2 - 1.0)
R[1,2] = -x*sa+(1.0 - ca)*y*z
R[2,0] = -y*sa+(1.0 - ca)*x*z
R[2,1] = x*sa+(1.0 - ca)*y*z
R[2,2] = 1.0 + (1.0 - ca)*(z**2 - 1.0)
return R
# def _converge_angle2(self, pt1, pt2):
# """internal use only: pts should only be a two neighboring points"""
# # Make the theta range up to 90 degrees to rotate points through
# #- angles = [0.0, 90.0)
# angles = np.arange(0.0, np.pi/2, self.RESOLUTION)
# pts = self.rotate(np.vstack((pt1, pt2)), angles)
# # Get the angles between the points
# c = self.cos_between(pts)
# dist = self.distance_between(pts)
#
# # Find angles that satisfy grid conditions
# xidx = np.argwhere(abs(c - np.pi/2.0) < (1 * 10**-self.DECIMALS))
# yidx = np.argwhere(abs(c - 0.0) < (1 * 10**-self.DECIMALS))
# if len(xidx) == 1 and len(yidx) == 0:
# return 0, np.pi/2-angles[xidx], dist[xidx]
# #return angles[xidx[0][0]]
# elif len(yidx) == 1 and len(xidx) == 0:
# return 1, np.pi/2-angles[yidx], dist[yidx]
# else:
# raise _helpers.PVGeoError('No angle found')
def _converge_angle(self, pt1, pt2):
"""Internal use only: pts should only be a two neighboring points.
"""
# Make the theta range up to 90 degrees to rotate points through
#- angles = [0.0, 90.0)
angles = np.arange(0.0, np.pi/2, self.RESOLUTION)
# nang = len(angles) # Number of rotations
# if pt1.ndim == pt2.ndim == 3:
# # uh-oh
# raise RuntimeError()
pts = np.vstack((pt1, pt2))
rotated = self.rotate(pts, angles) # Points rotated for all angles
cosbtw = self.cos_between(rotated)
distbtw = self.distance_between(rotated)
# Now find minimum
# X axis
xmin = np.argwhere(np.abs(cosbtw - np.pi/2.0) < (1 * 10**-self.DECIMALS)).flatten()
ymin = np.argwhere(np.abs(cosbtw - 0.0) < (1 * 10**-self.DECIMALS)).flatten()
# Protection to make sure we can converge
if len(xmin) == 0 and len(ymin) == 0:
# Uh-oh... lets decrease the precision
#- lets try again with lower precision
self.DECIMALS -= 1
if self.DECIMALS < 0:
self.DECIMALS = 0
raise _helpers.PVGeoError('No angle found.')
return self._converge_angle(pt1, pt2)
# Figure out of the two points share the x axis or y axis and return
if len(xmin) > 0 and len(ymin) > 0:
raise RuntimeError('Invalid solution')
elif len(xmin) > 0:
xidx = np.mean(xmin, dtype=int)
return 0, angles[xidx], distbtw[xidx]
elif len(ymin) > 0:
yidx = np.mean(ymin, dtype=int)
return 1, angles[yidx], distbtw[yidx]
# No solution found.
raise _helpers.PVGeoError('No angle found. Precision too low/high.')
def _estimate_angle_and_spacing(self, pts, sample=0.5):
"""internal use only
"""
try:
# sklearn's KDTree is faster: use it if available
from sklearn.neighbors import KDTree as Tree
except ImportError:
from scipy.spatial import cKDTree as Tree
# Creat the indexing range for searching the points:
num = len(pts)
rng = np.linspace(0, num-1, num=num, dtype=int)
N = int(num*sample) + 1
rng = np.random.choice(rng, N)
angles = np.zeros(len(rng))
tree = Tree(pts)
distances = [[],[]]
#######################################################################
#######################################################################
# Find nearest point
distall, ptsiall = tree.query(pts, k=2)
pt1all, pt2all = pts[ptsiall[:, 0]], pts[ptsiall[:, 1]]
#######################################################################
idx = 0
for i in rng:
# OPTIMIZE
ax, angles[idx], dist = self._converge_angle(pt1all[i], pt2all[i])
distances[ax].append(dist)
idx += 1
#######################################################################
#TODO??? angles, distances = self._converge_angle(pt1all, pt2all)
#######################################################################
#######################################################################
dx, dy = distances[0], distances[1]
if len(dx) == 0:
dx = dy
elif len(dy) == 0:
dy = dx
TOLERANCE = np.min(np.append(dx, dy)) / 2.0
angle = np.average(np.unique(angles))
dx = np.unique(np.around(dx / TOLERANCE)) * TOLERANCE
dy = np.unique(np.around(dy / TOLERANCE)) * TOLERANCE
# Now round to decimals
dx = np.around(dx, self.DECIMALS)
dy = np.around(dx, self.DECIMALS)
# print('Recovered: ', dx, dy)
return angle, dx[0], dy[0]
def estimate_and_rotate(self, x, y, z):
"""A method to estimate the rotation of a set of points and correct
that rotation on the XY plane
"""
if not (len(x) == len(y) == len(z)):
raise AssertionError('Must have same number of coordinates for all components.')
idxs = np.argwhere(z == z[0])
pts = np.hstack((x[idxs], y[idxs]))
angle, dx, dy = self._estimate_angle_and_spacing(pts)
inv = self.rotate(np.vstack((x, y)).T, angle)
return inv[:,0], inv[:,1], z, dx, dy, angle
#---- Coordinate Rotations ----#
class RotatePoints(FilterBase):
"""Rotates XYZ coordinates in `vtkPolyData` around an origin at a given
angle on the XY plane.
"""
__displayname__ = 'Rotate Points'
__category__ = 'filter'
def __init__(self, angle=45.0, origin=None, use_corner=True):
FilterBase.__init__(self,
nInputPorts=1, inputType='vtkPolyData',
nOutputPorts=1, outputType='vtkPolyData')
# Parameters
self.__angle = angle
if origin is None:
origin = [0.0, 0.0]
self.__origin = origin
self.__use_corner = use_corner
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output.
"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
# Get the Points over the NumPy interface
wpdi = dsa.WrapDataObject(pdi) # NumPy wrapped input
points = np.array(wpdi.Points) # New NumPy array of poins so we dont destroy input
origin = self.__origin
if self.__use_corner:
idx = np.argmin(points[:,0])
origin = [points[idx,0], points[idx,1]]
points[:,0:2] = RotationTool.rotate_around(points[:,0:2], self.__angle, origin)
pdo.DeepCopy(pdi)
pts = pdo.GetPoints()
for i, pt in enumerate(points):
pts.SetPoint(i, pt)
return 1
def set_rotation_degrees(self, theta):
"""Sets the rotational angle in degrees.
"""
theta = np.deg2rad(theta)
if self.__angle != theta:
self.__angle = theta
self.Modified()
def set_origin(self, xo, yo):
"""Sets the origin to perform the rotate around.
"""
if self.__origin != [xo, yo]:
self.__origin = [xo, yo]
self.Modified()
def set_use_corner(self, flag):
"""A flag to use a corner of the input data set as the rotational
origin.
"""
if self.__use_corner != flag:
self.__use_corner = flag
self.Modified()
###############################################################################
class ExtractPoints(FilterBase):
"""Extracts XYZ coordinates and point/cell data from an input ``vtkDataSet``
"""
__displayname__ = 'Extract Points'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(self,
nInputPorts=1, inputType='vtkDataSet',
nOutputPorts=1, outputType='vtkPolyData')
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
# Get the Points over the NumPy interface
wpdi = dsa.WrapDataObject(pdi) # NumPy wrapped input
if not hasattr(wpdi, 'Points'):
raise _helpers.PVGeoError('Input data object does not have XYZ points.')
points = np.array(wpdi.Points) # New NumPy array of poins so we dont destroy input
# Now transfer data
f = vtk.vtkCellDataToPointData()
f.SetInputData(pdi)
f.Update()
d = f.GetOutput()
pdo.ShallowCopy(interface.points_to_poly_data(points))
_helpers.copy_arrays_to_point_data(d, pdo, 0) # 0 is point data
return 1
class ExtractCellCenters(FilterBase):
__displayname__ = 'Extract Cell Centers'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(self, nInputPorts=1, inputType='vtkDataSet',
nOutputPorts=1, outputType='vtkPolyData', **kwargs)
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Find cell centers
filt = vtk.vtkCellCenters()
filt.SetInputDataObject(pdi)
filt.Update()
centers = dsa.WrapDataObject(filt.GetOutput()).Points
# Get CellData
wpdi = dsa.WrapDataObject(pdi)
celldata = wpdi.CellData
keys = celldata.keys()
# Make poly data of Cell centers:
pdo.DeepCopy(interface.points_to_poly_data(centers))
for i, name in enumerate(keys):
pdo.GetPointData().AddArray(pdi.GetCellData().GetArray(name))
return 1
class AppendCellCenters(FilterPreserveTypeBase):
__displayname__ = 'Append Cell Centers'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterPreserveTypeBase.__init__(self, **kwargs)
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Find cell centers
filt = vtk.vtkCellCenters()
filt.SetInputDataObject(pdi)
filt.Update()
# I use the dataset adapter/numpy interface because its easy
centers = dsa.WrapDataObject(filt.GetOutput()).Points
centers = interface.convert_array(centers)
centers.SetName('Cell Centers')
# Copy input data and add cell centers as tuple array
pdo.DeepCopy(pdi)
pdo.GetCellData().AddArray(centers)
return 1
class IterateOverPoints(FilterBase):
"""Iterate over points in a time varying manner.
"""
__displayname__ = 'Iterate Over Points'
__category__ = 'filter'
def __init__(self, dt=1.0):
FilterBase.__init__(self, nInputPorts=1, inputType='vtkPolyData',
nOutputPorts=1, outputType='vtkPolyData')
# Parameters
self.__dt = dt
self.__timesteps = None
self.__original = 2
self.__tindex = None
self.__n = 2
self.__decimate = 100
# The point/normal that gets updated on every iteration
self.__point = (0.0, 0.0, 0.0)
self.__normal = (1.0, 0.0, 0.0)
def _update_time_steps(self):
"""For internal use only
"""
self.__timesteps = _helpers.update_time_steps(self, self.__n, self.__dt)
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
# Get number of points
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
# Get the Points over the NumPy interface
# wpdi = dsa.WrapDataObject(pdi) # NumPy wrapped input
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
# Now grab point at this timestep
pt = pdi.GetPoints().GetPoint(self.__tindex[i])
# Calculate normal
pts1 = self.__point
pts2 = pt
x1, y1, z1 = pts1[0], pts1[1], pts1[2]
x2, y2, z2 = pts2[0], pts2[1], pts2[2]
normal = [x2-x1, y2-y1, z2-z1]
self.__point = pt
self.__normal = normal
poly = interface.points_to_poly_data(np.array(pt))
pdo.ShallowCopy(poly)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to set the time information
"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
# Get number of points
self.__original = pdi.GetNumberOfPoints()
self.set_decimate(self.__decimate)
# register time:
self._update_time_steps()
return 1
#### Public Getters / Setters ####
def set_decimate(self, percent):
"""Set the percent (1 to 100) to decimate
"""
if percent > 100 or percent < 1:
return
self.__decimate = percent
self.__n = int(self.__original * (percent/100.0))
self.__tindex = np.linspace(0, self.__original-1, self.__n, dtype=int)
self._update_time_steps()
self.Modified()
def set_time_delta(self, dt):
"""
Set the time step interval in seconds
"""
if self.__dt != dt:
self.__dt = dt
self._update_time_steps()
self.Modified()
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps
"""
return self.__timesteps.tolist() if self.__timesteps is not None else None
def get_point(self):
"""Get the current point"""
return list(self.__point)
def get_normal(self):
"""Get the current normal vector"""
return list(self.__normal)
class ConvertUnits(FilterPreserveTypeBase):
"""Convert points in an input data object to from meters to feet or vice versa.
This simply uses a ``vtkTransformFilter`` and scales input data object with
common conversions.
"""
__displayname__ = 'Convert XYZ Units'
__category__ = 'filter'
def __init__(self, conversion='meter_to_feet', **kwargs):
FilterPreserveTypeBase.__init__(self, **kwargs)
self.__conversion = conversion
@staticmethod
def lookup_conversions(get_keys=False):
"""All Available conversions
Return:
dict: dictionary of conversion units
"""
convs = dict(
meter_to_feet=3.2808399,
feet_to_meter=1/3.2808399,
)
if get_keys:
return convs.keys()
return convs
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
# Get number of points
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
filt = vtk.vtkTransformFilter()
trans = vtk.vtkTransform()
trans.Scale(self.get_conversion(), self.get_conversion(), self.get_conversion())
filt.SetTransform(trans)
filt.SetInputDataObject(pdi)
filt.Update()
scaled = filt.GetOutputDataObject(0)
pdo.DeepCopy(scaled)
return 1
def set_conversion(self, key):
"""Set the conversion via a lookup table"""
convs = self.lookup_conversions()
if isinstance(key, str):
if key.lower() not in convs.keys():
raise _helpers.PVGeoError('Converion `%s` not available.' % key)
elif isinstance(key, int):
key = convs.keys()[key]
if self.__conversion != key:
self.__conversion = key
self.Modified()
return 1
def get_conversion(self):
"""Get the conversion value"""
convs = self.lookup_conversions()
return convs[self.__conversion]
class BuildSurfaceFromPoints(FilterBase):
"""From the sorted x, y, and z station locations in the input PolyData,
create a surface to project down from the line of those points. Use the
Z cells to control the size of the mesh surface
"""
__displayname__ = 'Build Surface From Points'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(self, inputType='vtkPolyData',
outputType='vtkStructuredGrid', **kwargs)
self.__zcoords = CreateTensorMesh._read_cell_line('0. 50.')
zcoords = kwargs.get('zcoords', self.__zcoords)
if not isinstance(zcoords, (str, list, tuple, np.ndarray)):
raise TypeError('zcoords of bad type.')
if isinstance(zcoords, str):
self.set_z_coords_str(zcoords)
else:
self.set_z_coords(zcoords)
@staticmethod
def create_surface(points, z_range):
"""From the sorted x, y, and z station locations, create a surface
to display a seismic recording/migration on in space. The result is
defined in the X,Y,Z-z_range 3D space.
The z_range should be treated as relative coordinates to the values
given on the third column of the points array. If you want the values
in the z_range to be treated as the absolute coordinates, simply
do not pass any Z values in the points array - if points is N by 2,
then the values in z_range will be inferred as absolute.
Args:
points (np.ndarray): array-like of the station x and y locations
(npts by 2-3) z_range (np.ndarray): The linear space of the z
dimension. This will be filled out for every station location.
Return:
pyvista.UnstructuredGrid
"""
if hasattr(points, 'values'):
# This will extract data from pandas dataframes if those are given
points = points.values
points = np.array(points)
z_range = np.array(z_range)
xloc = points[:,0]
yloc = points[:,1]
if points.shape[1] > 2:
zloc = points[:,2]
else:
val = np.nanmax(z_range)
z_range = val - np.flip(z_range)
zloc = np.full(xloc.shape, val)
if not len(xloc) == len(yloc) == len(zloc):
raise AssertionError('Coordinate shapes do not match.')
nt = len(xloc)
ns = len(z_range)
# Extrapolate points to a 2D surface
# repeat the XY locations across
points = np.repeat(np.c_[xloc,yloc,zloc], ns, axis=0)
# repeat the Z locations across
tp = np.repeat(z_range.reshape((-1, len(z_range))), nt, axis=0)
tp = zloc[:,None] - tp
points[:,-1] = tp.ravel()
# Produce the output
output = pyvista.StructuredGrid()
output.points = points
output.dimensions = [ns, nt, 1]
return output
def RequestData(self, request, inInfo, outInfo):
"""Execute on pipeline"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
# Get number of points
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
data = pyvista.wrap(pdi)
output = BuildSurfaceFromPoints.create_surface(data.points, np.array(self.__zcoords))
pdo.DeepCopy(output)
return 1
def set_z_coords(self, zcoords):
"""Set the spacings for the cells in the Z direction
Args:
zcoords (list or np.array(floats)): the spacings along the Z-axis"""
if len(zcoords) != len(self.__zcoords) or not np.allclose(self.__zcoords, zcoords):
self.__zcoords = zcoords
self.Modified()
def set_z_coords_str(self, zcoordstr):
"""Set the spacings for the cells in the Z direction
Args:
zcoordstr (str) : the spacings along the Z-axis in the UBC style"""
zcoords = CreateTensorMesh._read_cell_line(zcoordstr)
self.set_z_coords(zcoords)
|
def contributors():
"""
Add your name in the list below, in the same PR as the signed CLA.
If your contribution is sponsored by an organization, you can also add the following entry:
"organization=[YOUR ORG]".
"""
return {
"1": ["name=Mihai Bojin", "email=mihai.bojin@gmail.com"],
}
|
#-*-coding:utf-8-*-
"""
@FileName:
base_communication.py
@Description:
base communication class for behavior-driven simulation
@Authors:
Hanbo Sun(sun-hb17@mails.tsinghua.edu.cn)
@CreateTime:
2022/05/07 17:38
"""
from mnsim_noc.utils.component import Component
from mnsim_noc.Tile import BaseTile
from mnsim_noc.Wire import WireNet
class BaseCommunication(Component):
"""
base communication class for behavior-driven simulation
"""
REGISTRY = "communication"
NAME = "behavior-driven"
def __init__(self, input_tile: BaseTile, output_tile: BaseTile, wire_net: WireNet):
"""
init base communication
data from input tile to output tile
"""
super(BaseCommunication, self).__init__()
# set input tile and output tile
self.input_tile = input_tile
self.output_tile = output_tile
self.wire_net = wire_net
# input buffer and output buffer, for tile
self.output_buffer = self.input_tile.output_buffer
self.input_buffer = self.output_tile.input_buffer
self.target_tile_id = self.output_tile.tile_id
self.source_tile_id = self.input_tile.tile_id
# state
self.running_state = False
self.communication_end_time = float("inf")
self.communication_range_time = []
# transfer data and path
self.transfer_data = None
self.transfer_path = None
# set communication id
self.communication_id = \
f"{input_tile.task_id},{input_tile.tile_id}"+\
f"->{output_tile.task_id},{output_tile.tile_id}"
def update(self, current_time):
"""
since there may be multiple communication
only change running state from True to False
"""
if self.running_state:
if current_time >= self.communication_end_time:
# PHASE COMMUNICATION END
# NO next communication
self.running_state = False
self.input_buffer.add_data_list(self.transfer_data, self.source_tile_id)
# clear transfer data path
self.wire_net.set_data_path_state(
self.transfer_path, False, self.communication_id, current_time
)
def check_communication_ready(self):
"""
check if this communication can transfer data
"""
# TODO: there may be larger for the tile input buffer
if self.running_state:
return False
# PHASE COMMUNICATION JUDGE
self.transfer_data = self.output_buffer.next_transfer_data(self.target_tile_id)
if self.transfer_data is not None \
and self.input_buffer.check_enough_space(self.transfer_data, self.source_tile_id):
return True
return False
def set_communication_task(self, current_time, trasnfer_path, transfer_time):
"""
transfer path can be None, means no communication
"""
if trasnfer_path is None:
if self.running_state == False:
self.communication_end_time = float("inf")
return None
assert not self.running_state, f"communication should be idle"
# PHASE COMMUNICATION START
self.running_state = True
self.transfer_path = trasnfer_path
# set buffer
self.input_buffer.add_transfer_data_list(self.transfer_data, self.source_tile_id)
self.output_buffer.delete_data_list(self.transfer_data, self.target_tile_id)
# get transfet time
self.communication_end_time = current_time + transfer_time
self.communication_range_time.append((current_time, self.communication_end_time))
# set wire state, in schedule
self.wire_net.set_data_path_state(self.transfer_path, True, self.communication_id, current_time)
return None
def get_communication_end_time(self):
"""
get the end time of the communication
"""
if self.running_state:
return self.communication_end_time
else:
return float("inf")
def get_communication_range(self):
"""
get the range of the communication
"""
return self.communication_range_time
def check_finish(self):
"""
check if the communication is finish
"""
assert self.running_state == False, "communication should be idle"
assert self.communication_end_time == float("inf"), \
"communication end time should be inf"
def get_running_rate(self, end_time):
"""
get the simulation result
"""
self.check_finish()
communication_time = sum([
end - start for start, end in self.communication_range_time
])
return communication_time * 1. / end_time
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
""" Calculation OBO score
This module calculate obo score and rank.
Return score ranking as below.
"""
class Calculation(object):
def __init__(self, contest_no):
self.contest_no = contest_no
def calculate(self, scores):
s = {}
tec = {}
pre = {}
for score in scores:
player = score["player_no"]
user = score["judge_user"]
_tec = score["technical_merit"]
_pre = score["presentation"]
if player not in tec:
tec[player] = {user: _tec}
pre[player] = {user: _pre}
else:
tec[player][user] = _tec
pre[player][user] = _pre
s["technical_merit"] = tec
s["presentation"] = pre
scores = []
for n in s["technical_merit"].keys():
result = {}
judge_num = len(s["technical_merit"][n].keys())
tec_sum = sum(s["technical_merit"][n].values())
pre_sum = sum(s["presentation"][n].values())
result["player_no"] = n
result["technical_merit"] = round(
tec_sum / judge_num * 0.1, 1)
result["presentation"] = round(
pre_sum / judge_num * 0.1, 1)
result["total"] = round((result["technical_merit"] +
result["presentation"]) / 2.0, 1)
result["judge_num"] = judge_num
scores.append(result)
import pprint
pp = pprint.PrettyPrinter(indent=2)
ranks = sorted(scores, key=lambda x: x["total"], reverse=True)
pp.pprint(ranks)
return s
if __name__ == "__main__":
from loader import Loader
l = Loader(1)
r = l.load()
c = Calculation(1)
s = c.calculate(r)
|
from django import template
from ..models import Category
from django.template import RequestContext
register = template.Library()
@register.inclusion_tag("inc/category_navbar.html", takes_context=True)
def category_navbar(context):
request = context['request']
return {
"categories": Category.objects.filter(status=True),
'request': request
}
|
from random import random
class LogicBase:
def __init__(self, pop):
self.pop = pop
@property
def can_work(self):
return True
def has_good(self, good, amount):
"Returns True if the Pop has a particular Good in their inventory"
inv = self.pop.inventory.get(good)
if inv is None:
return False
return inv.amount <= amount
def get_good(self, good):
"Gets a particular Good in a Pops inventory"
i = self.pop.inventory.get(good)
if i is None or i.amount == 0:
return None
return i
def charge_idle_money(self, charge=2):
"Change a Pop's money"
# print("{} charged {} for being idle".format(self.pop.pop_job.title, charge))
self.pop.money -= charge
def consume(self, good, amount, chance=1):
"Consumes a good in a chance"
# print("{} consumed {} {}".format(self.pop.pop_job.title, amount, good.title))
if random() <= chance:
# print('consume', good, amount)
return self.pop.inventory.subtract(good, amount)
def produce(self, good, amount, chance=1):
"Produces a good in a chance"
# print("{} produced {} {}".format(self.pop.pop_job.title, amount, good.title))
if random() <= chance:
# print('produce', good, amount)
return self.pop.inventory.add(good, amount)
def perform(self):
raise NotImplemented("LogicBase.perform implemented in inherited classes")
|
from gym_snake.envs.constants import GridType
from gym_snake.envs.snake_env import SnakeEnv
class Snake_4x4_DeadApple(SnakeEnv):
def __init__(self):
super().__init__(grid_size=4, initial_snake_size=2, done_apple=True)
class Snake_8x8_DeadApple(SnakeEnv):
def __init__(self):
super().__init__(grid_size=8, done_apple=True)
class Snake_16x16_DeadApple(SnakeEnv):
def __init__(self):
super().__init__(grid_size=16, done_apple=True)
class Snake_Hex_4x4_DeadApple(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=4, initial_snake_size=2, done_apple=True)
class Snake_Hex_8x8_DeadApple(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=8, done_apple=True)
class Snake_Hex_16x16_DeadApple(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=16, done_apple=True)
class Snake_4x4(SnakeEnv):
def __init__(self):
super().__init__(grid_size=4, initial_snake_size=2)
class Snake_8x8(SnakeEnv):
def __init__(self):
super().__init__(grid_size=8)
class Snake_16x16(SnakeEnv):
def __init__(self):
super().__init__(grid_size=16)
class Snake_Hex_4x4(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=4, initial_snake_size=2)
class Snake_Hex_8x8(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=8)
class Snake_Hex_16x16(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex,grid_size=16)
class Snake_4x4_4a(SnakeEnv):
def __init__(self):
super().__init__(grid_size=4, initial_snake_size=2, num_apples=4)
class Snake_8x8_4a(SnakeEnv):
def __init__(self):
super().__init__(grid_size=8, num_apples=4)
class Snake_16x16_4a(SnakeEnv):
def __init__(self):
super().__init__(grid_size=16, num_apples=4)
class Snake_Hex_4x4_4a(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=4, initial_snake_size=2, num_apples=4)
class Snake_Hex_8x8_4a(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=8, num_apples=4)
class Snake_Hex_16x16_4a(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=16, num_apples=4)
class Snake_4x4_Expand(SnakeEnv):
def __init__(self):
super().__init__(grid_size=4, initial_snake_size=2, reward_none=1, num_apples=0)
class Snake_8x8_Expand(SnakeEnv):
def __init__(self):
super().__init__(grid_size=8, always_expand=True, reward_none=1, num_apples=0)
class Snake_16x16_Expand(SnakeEnv):
def __init__(self):
super().__init__(grid_size=16, always_expand=True, reward_none=1, num_apples=0)
class Snake_Hex_4x4_Expand(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=4, initial_snake_size=2, reward_none=1, num_apples=0)
class Snake_Hex_8x8_Expand(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=8, always_expand=True, reward_none=1, num_apples=0)
class Snake_Hex_16x16_Expand(SnakeEnv):
def __init__(self):
super().__init__(grid_type=GridType.hex, grid_size=16, always_expand=True, reward_none=1, num_apples=0)
class Snake_4x4_DeadApple_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=4, initial_snake_size=2, done_apple=True)
class Snake_8x8_DeadApple_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=8, done_apple=True)
class Snake_16x16_DeadApple_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=16, done_apple=True)
class Snake_Hex_4x4_DeadApple_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=4, initial_snake_size=2, done_apple=True)
class Snake_Hex_8x8_DeadApple_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=8, done_apple=True)
class Snake_Hex_16x16_DeadApple_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=16, done_apple=True)
class Snake_4x4_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=4, initial_snake_size=2)
class Snake_8x8_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=8)
class Snake_16x16_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=16)
class Snake_Hex_4x4_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=4, initial_snake_size=2)
class Snake_Hex_8x8_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=8)
class Snake_Hex_16x16_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex,grid_size=16)
class Snake_4x4_4a_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=4, initial_snake_size=2, num_apples=4)
class Snake_8x8_4a_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=8, num_apples=4)
class Snake_16x16_4a_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=16, num_apples=4)
class Snake_Hex_4x4_4a_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=4, initial_snake_size=2, num_apples=4)
class Snake_Hex_8x8_4a_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=8, num_apples=4)
class Snake_Hex_16x16_4a_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=16, num_apples=4)
class Snake_4x4_Expand_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=4, initial_snake_size=2, reward_none=1, num_apples=0)
class Snake_8x8_Expand_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=8, always_expand=True, reward_none=1, num_apples=0)
class Snake_16x16_Expand_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_size=16, always_expand=True, reward_none=1, num_apples=0)
class Snake_Hex_4x4_Expand_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=4, initial_snake_size=2, reward_none=1, num_apples=0)
class Snake_Hex_8x8_Expand_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=8, always_expand=True, reward_none=1, num_apples=0)
class Snake_Hex_16x16_Expand_2s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=2, grid_type=GridType.hex, grid_size=16, always_expand=True, reward_none=1, num_apples=0)
class Snake_4x4_DeadApple_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=4, initial_snake_size=2, done_apple=True)
class Snake_8x8_DeadApple_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=8, done_apple=True)
class Snake_16x16_DeadApple_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=16, done_apple=True)
class Snake_Hex_4x4_DeadApple_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=4, initial_snake_size=2, done_apple=True)
class Snake_Hex_8x8_DeadApple_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=8, done_apple=True)
class Snake_Hex_16x16_DeadApple_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=16, done_apple=True)
class Snake_4x4_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=4, initial_snake_size=2)
class Snake_8x8_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=8)
class Snake_16x16_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=16)
class Snake_Hex_4x4_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=4, initial_snake_size=2)
class Snake_Hex_8x8_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=8)
class Snake_Hex_16x16_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex,grid_size=16)
class Snake_4x4_4a_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=4, initial_snake_size=2, num_apples=4)
class Snake_8x8_4a_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=8, num_apples=4)
class Snake_16x16_4a_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=16, num_apples=4)
class Snake_Hex_4x4_4a_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=4, initial_snake_size=2, num_apples=4)
class Snake_Hex_8x8_4a_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=8, num_apples=4)
class Snake_Hex_16x16_4a_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=16, num_apples=4)
class Snake_4x4_Expand_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=4, initial_snake_size=2, reward_none=1, num_apples=0)
class Snake_8x8_Expand_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=8, always_expand=True, reward_none=1, num_apples=0)
class Snake_16x16_Expand_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_size=16, always_expand=True, reward_none=1, num_apples=0)
class Snake_Hex_4x4_Expand_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=4, initial_snake_size=2, reward_none=1, num_apples=0)
class Snake_Hex_8x8_Expand_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=8, always_expand=True, reward_none=1, num_apples=0)
class Snake_Hex_16x16_Expand_3s(SnakeEnv):
def __init__(self):
super().__init__(num_snakes=3, grid_type=GridType.hex, grid_size=16, always_expand=True, reward_none=1, num_apples=0)
|
from colossalai.nn.optimizer import HybridAdam
from colossalai.zero.shard_utils import (BucketTensorShardStrategy,
TensorShardStrategy)
from model import GPT2_small_pipeline_hybrid
BATCH_SIZE = 8
NUM_EPOCHS = 60
SEQ_LEN = 1024
NUM_MICRO_BATCHES = 4
HIDDEN_SIZE = 768
TENSOR_SHAPE = (BATCH_SIZE // NUM_MICRO_BATCHES, SEQ_LEN, HIDDEN_SIZE)
zero = dict(
model_config=dict(
tensor_placement_policy='cpu',
shard_strategy=BucketTensorShardStrategy()
),
optimizer_config=dict()
)
optimizer = dict(
type=HybridAdam,
lr=0.00015,
weight_decay=1e-2,
)
model = dict(
type=GPT2_small_pipeline_hybrid,
checkpoint=True,
num_chunks=1
)
parallel = dict(
pipeline=2,
tensor=dict(size=2, mode='1d'),
)
|
from .. import BaseTest
from sentry import app
from sentry.client import ClientProxy
from sentry.client.logging import LoggingSentryClient
class ClientTest(BaseTest):
def test_client_proxy(self):
proxy = ClientProxy(app)
app.config['CLIENT'] = 'sentry.client.logging.LoggingSentryClient'
self.assertTrue(isinstance(proxy._ClientProxy__get_client(), LoggingSentryClient))
self.assertEquals(proxy._ClientProxy__get_client(), proxy._ClientProxy__get_client())
app.config['CLIENT'] = 'sentry.client.base.SentryClient'
self.assertFalse(isinstance(proxy._ClientProxy__get_client(), LoggingSentryClient))
self.assertEquals(proxy._ClientProxy__get_client(), proxy._ClientProxy__get_client())
|
"""Реализация базовых команд для асинхронного запроса к базе
данных Postgres через Django ORM.
"""
import os
import sys
from pathlib import Path
from typing import List, Optional, Union
from asgiref.sync import sync_to_async
ROOT_DIR = Path(__file__).parents[3]
MODEL_PATH = os.path.join(ROOT_DIR, "django_project")
sys.path.append(MODEL_PATH)
from iroxin.models import Product, Subscriber
@sync_to_async
def add_subscriber(
user_id: Union[str, int],
username: str,
first_name: Optional[str],
last_name: Optional[str],
) -> Subscriber:
"""Добавить подписчика."""
sub = Subscriber.objects.create(
user_id=int(user_id),
first_name=first_name,
last_name=last_name,
username=username,
)
sub.save()
return sub
@sync_to_async
def get_all_subscribers() -> List[Subscriber]:
"""Получить всех подписчиков."""
users = Subscriber.objects.all()
return users
@sync_to_async
def get_subscriber(user_id: int) -> Optional[Subscriber]:
"""Получить подписчика по user_id."""
if Subscriber.objects.filter(user_id=user_id).exists():
subscriber = Subscriber.objects.get(user_id=user_id)
return subscriber
return None
async def get_then_update(
user_id: int, name: str, email: str, phone: str
) -> Subscriber:
"""Получение и обновление подписчика."""
subscriber = await get_subscriber(user_id)
if not subscriber.first_name:
subscriber.first_name = name
if not subscriber.email:
subscriber.email = email
if not subscriber.phone:
subscriber.phone = phone
subscriber.save()
return subscriber
@sync_to_async
def get_count_subscribers() -> int:
"""Получить число зарегистрированных подписчиков."""
total = Subscriber.objects.all().count()
return total
@sync_to_async
def get_product(title: str) -> Optional[Product]:
"""Получить товары title."""
prod = Product.objects.get(title=title)
return prod
@sync_to_async
def get_all_products() -> List[Product]:
"""Получить все товары."""
prods = Product.objects.all()
return prods
@sync_to_async
def get_count_products() -> int:
"""Получить количество товаров."""
count = Product.objects.all().count()
return count
async def get_page(page: int = 1) -> int:
"""Получить страницу."""
max_val = await get_count_products()
if page >= max_val:
page = max_val
arr = await get_all_products()
return arr[page]
|
class Animal:
def __init__(self):
print("================= Animal ctor")
def eat(self):
print("Animal eat meat")
class Cat(Animal):
def __init__(self):
#######################################
# Third __init__ method to call base eat.
# It is a preferred way to use
#######################################
super().__init__()
print("================= Cat ctor")
""" Derived class Cat """
#######################################
# Override Animal eat method
def eat(self):
print("Cats eat fishes")
#######################################
# First method to call base eat
Animal.eat(self)
#######################################
# Second method to call base eat
super(Cat, self).eat()
#######################################
# Third method to call base eat.
# It is a preferred way to use
#######################################
super().eat()
def __str__(self):
s = super().__str__()
print("==================================")
print(s)
print("==================================")
return "Mars cat"
cat = Cat()
#######################################
# Override Animal eat method
cat.eat()
print(cat)
|
import os
import hashlib
import functools
from wpull.database.sqltable import SQLiteURLTable
from wpull.document.html import HTMLReader
from wpull.processor.rule import ProcessingRule
from libgrabsite import dupespotter
from libgrabsite.dupes import DupesOnDisk
def response_body_size(response) -> int:
try:
return response.body.size()
except Exception:
return 0
class NoFsyncSQLTable(SQLiteURLTable):
@classmethod
def _apply_pragmas_callback(cls, connection, record):
super()._apply_pragmas_callback(connection, record)
connection.execute('PRAGMA synchronous=OFF')
class DupeSpottingProcessingRule(ProcessingRule):
def __init__(self, *args, **kwargs):
self.dupes_db = kwargs.pop('dupes_db', None)
super().__init__(*args, **kwargs)
def scrape_document(self, item_session):
response = item_session.response
url_info = item_session.request.url_info
url = url_info.raw
if response_body_size(response) < 30 * 1024 * 1024:
dupes_db = self.dupes_db
body = response.body.content()
if HTMLReader.is_response(response):
body = dupespotter.process_body(body, url)
digest = hashlib.md5(body).digest()
if dupes_db is not None:
dupe_of = dupes_db.get_old_url(digest)
else:
dupe_of = None
if dupe_of is not None:
# Don't extract links from pages we've already seen
# to avoid loops that descend a directory endlessly
print("DUPE {}\n OF {}".format(url, dupe_of))
return
else:
if dupes_db is not None:
dupes_db.set_old_url(digest, url)
super().scrape_document(item_session)
def activate(app_session):
app_session.factory.class_map['URLTableImplementation'] = NoFsyncSQLTable
if int(os.environ["DUPESPOTTER_ENABLED"]):
dupes_db_location = os.path.join(os.environ["GRAB_SITE_WORKING_DIR"], "dupes_db")
dupes_db = DupesOnDisk(dupes_db_location)
app_session.factory.class_map['ProcessingRule'] = \
functools.partial(DupeSpottingProcessingRule, dupes_db=dupes_db)
|
import numpy as np
# import os
from PIL import Image
import matplotlib.pyplot as plt
import sklearn.metrics as sm
import csv
def compute_precision_recall(score_A_np):
array_5 = np.where(score_A_np[:, 1] == 5.0)
array_7 = np.where(score_A_np[:, 1] == 7.0)
print("len(array_5), ", len(array_5))
print("len(array_7), ", len(array_7))
mean_5 = np.mean((score_A_np[array_5])[:, 0])
mean_7 = np.mean((score_A_np[array_7])[:, 0])
medium = (mean_5 + mean_7) / 2.0
print("mean_5, ", mean_5)
print("mean_7, ", mean_7)
print("medium, ", medium)
array_upper = score_A_np[:, 0] >= medium
array_lower = score_A_np[:, 0] < medium
print("np.sum(array_upper.astype(np.float32)), ", np.sum(array_upper.astype(np.float32)))
print("np.sum(array_lower.astype(np.float32)), ", np.sum(array_lower.astype(np.float32)))
array_5_tf = score_A_np[:, 1] == 5.0
array_7_tf = score_A_np[:, 1] == 7.0
print("np.sum(array_5_tf.astype(np.float32)), ", np.sum(array_5_tf.astype(np.float32)))
print("np.sum(array_7_tf.astype(np.float32)), ", np.sum(array_7_tf.astype(np.float32)))
tn = np.sum(np.equal(array_lower, array_5_tf).astype(np.int32))
tp = np.sum(np.equal(array_upper, array_7_tf).astype(np.int32))
fp = np.sum(np.equal(array_upper, array_5_tf).astype(np.int32))
fn = np.sum(np.equal(array_lower, array_7_tf).astype(np.int32))
precision = tp / (tp + fp + 0.00001)
recall = tp / (tp + fn + 0.00001)
return tp, fp, tn, fn, precision, recall
def save_graph(x, y, filename, epoch):
plt.plot(x, y)
plt.title('ROC curve ' + filename + ' epoch:' + str(epoch))
# x axis label
plt.xlabel("FP / (FP + TN)")
# y axis label
plt.ylabel("TP / (TP + FN)")
# save
plt.savefig(filename + '_ROC_curve_epoch' + str(epoch) +'.png')
plt.close()
def make_ROC_graph(score_A_np, filename, epoch):
argsort = np.argsort(score_A_np, axis=0)[:, 0]
score_A_np_sort = score_A_np[argsort][::-1]
value_1_0 = (np.where(score_A_np_sort[:, 1] == 7., 1., 0.)).astype(np.float32)
# score_A_np_sort_0_1 = np.concatenate((score_A_np_sort, value_1_0), axis=1)
sum_1 = np.sum(value_1_0)
len_s = len(score_A_np)
sum_0 = len_s - sum_1
tp = np.cumsum(value_1_0).astype(np.float32)
index = np.arange(1, len_s + 1, 1).astype(np.float32)
fp = index - tp
fn = sum_1 - tp
tn = sum_0 - fp
tp_ratio = tp / (tp + fn + 0.00001)
fp_ratio = fp / (fp + tn + 0.00001)
save_graph(fp_ratio, tp_ratio, filename, epoch)
auc = sm.auc(fp_ratio, tp_ratio)
return auc
def unnorm_img(img_np):
img_np_255 = (img_np + 1.0) * 127.5
img_np_255_mod1 = np.maximum(img_np_255, 0)
img_np_255_mod1 = np.minimum(img_np_255_mod1, 255)
img_np_uint8 = img_np_255_mod1.astype(np.uint8)
return img_np_uint8
def convert_np2pil(images_255):
list_images_PIL = []
for num, images_255_1 in enumerate(images_255):
# img_255_tile = np.tile(images_255_1, (1, 1, 3))
image_1_PIL = Image.fromarray(images_255_1)
list_images_PIL.append(image_1_PIL)
return list_images_PIL
def make_output_img(img_batch_list, epoch, log_file_name, out_img_dir):
(data_num, img1_h, img1_w, _) = img_batch_list[0].shape
wide_image_np = np.ones(((img1_h + 1) * data_num - 1, (img1_w + 1) * len(img_batch_list) -1, 3), dtype=np.uint8) * 255
wide_image_PIL = Image.fromarray(wide_image_np)
for num_b, img_batch in enumerate(img_batch_list):
img_batch_unn = np.tile(unnorm_img(img_batch), (1, 1, 3))
img_batch_PIL = convert_np2pil(img_batch_unn)
for num, img_1 in enumerate(img_batch_PIL):
wide_image_PIL.paste(img_1, (num_b * (img1_w + 1), num * (img1_h + 1)))
wide_image_PIL.save(out_img_dir + "/resultImage_"+ log_file_name + '_' + str(epoch) + ".png")
def save_list_to_csv(list, filename):
f = open(filename, 'w')
writer = csv.writer(f, lineterminator='\n')
writer.writerows(list)
f.close()
|
"""
Description: pytests for transform.py
"""
from pathlib import Path
import fiona
import geopandas as gpd
import pytest
from geotrans import transform
from geotrans.transform import (
FILEGEODATABASE_DRIVER,
GEOPACKAGE_DRIVER,
SHAPEFILE_DRIVER,
)
from tests import Helpers
def test_check_file():
"""
Test check_file.
"""
assert transform.check_file(Helpers.test_multilayer_file_path) is None
try:
transform.check_file(Helpers.test_dir_path)
except IsADirectoryError:
pass
try:
transform.check_file(Helpers.test_invalid_path)
except FileNotFoundError:
pass
@pytest.mark.parametrize(
"file_path,assumed_filetype", Helpers.test_determine_filetype_params
)
def test_determine_filetype(file_path: Path, assumed_filetype: str):
"""
Test determine_filetype.
"""
assert transform.determine_filetype(file_path) == assumed_filetype
@pytest.mark.parametrize("file_path", Helpers.test_load_multilayer_params)
def test_load_multilayer(file_path: Path):
"""
Test load_multilayer.
"""
geodataframes, layer_names = transform.load_multilayer(filepath=file_path)
assert isinstance(geodataframes, list)
assert isinstance(layer_names, list)
assert len(geodataframes) != 0
assert len(geodataframes) == len(layer_names)
for gdf, name in zip(geodataframes, layer_names):
assert isinstance(gdf, gpd.GeoDataFrame)
assert isinstance(name, str)
def test_load_singlelayer():
"""
Test load_singlelayer.
"""
geodataframes, layer_names = transform.load_singlelayer(
Helpers.test_singlelayer_file_path,
transform.determine_filetype(Helpers.test_singlelayer_file_path),
)
assert isinstance(geodataframes, list)
assert isinstance(layer_names, list)
assert len(geodataframes) + len(layer_names) == 2
for gdf, name in zip(geodataframes, layer_names):
assert isinstance(gdf, gpd.GeoDataFrame)
assert isinstance(name, str)
def test_load_geojson():
"""
Test load_geojson.
"""
geodataframes, layer_names = transform.load_singlelayer(
Helpers.test_geojson_file_save_path,
transform.determine_filetype(Helpers.test_geojson_file_save_path),
)
assert isinstance(geodataframes, list)
assert isinstance(layer_names, list)
assert len(geodataframes) + len(layer_names) == 2
for gdf, name in zip(geodataframes, layer_names):
assert isinstance(gdf, gpd.GeoDataFrame)
assert isinstance(name, str)
def test_single_save_file_geojson(tmp_path):
"""
Test single_save_file_geojson.
"""
# tmp_path is a temporary Path directory
geodataframes, layer_names = transform.load_singlelayer(
Helpers.test_singlelayer_file_path,
transform.determine_filetype(Helpers.test_singlelayer_file_path),
)
filenames = [tmp_path / Helpers.test_single_file_save_path_geojson]
try:
transform.save_files(
geodataframes,
layer_names,
transform_to_type=transform.GEOJSON,
filenames=filenames,
)
except fiona.errors.SchemaError:
print([gdf.columns for gdf in geodataframes])
raise
def test_single_save_file(tmp_path):
"""
Test single_save_file.
"""
# tmp_path is a temporary Path directory
geodataframes, layer_names = transform.load_singlelayer(
Helpers.test_singlelayer_file_path,
transform.determine_filetype(Helpers.test_singlelayer_file_path),
)
filenames = [tmp_path / Helpers.test_single_file_save_path]
try:
transform.save_files(
geodataframes,
layer_names,
filenames=filenames,
transform_to_type=transform.SHAPEFILE,
)
except fiona.errors.SchemaError:
print([gdf.columns for gdf in geodataframes])
raise
def test_multi_layer_save(tmp_path):
"""
Test multi_layer_save.
"""
geodataframes, layer_names = transform.load_multilayer(
Helpers.test_multilayer_file_path
)
assert len(geodataframes) == len(layer_names) == 2
filenames = []
for layer_name in layer_names:
filenames.append(tmp_path / f"{layer_name}_test_multi_layer.shp")
transform.save_files(geodataframes, layer_names, filenames, transform.SHAPEFILE)
def test_driver_strings():
"""
Test driver_strings.
"""
for driver in [GEOPACKAGE_DRIVER, SHAPEFILE_DRIVER, FILEGEODATABASE_DRIVER]:
assert driver in fiona.supported_drivers
def test_load_filegeodatabase(tmp_path):
"""
Helpers.tests loading a filegeodatabase.
"""
geodataframes, layer_names = transform.load_multilayer(
Helpers.test_filegeodatabase_file_path
)
assert len(geodataframes) == len(layer_names)
filenames = []
for layer_name in layer_names:
filenames.append(tmp_path / f"{layer_name}_test_filegeodatabase.shp")
# Save to multiple shapefiles
transform.save_files(geodataframes, layer_names, filenames, transform.SHAPEFILE)
# Save same files to a single geopackage
filenames = [tmp_path / "saving_filegeodatabase.gpkg"]
transform.save_files(geodataframes, layer_names, filenames, transform.GEOPACKAGE)
|
# Generated by Django 3.2.5 on 2021-07-10 08:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('budget', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='budget',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='An owner of the budget'),
),
]
|
from user.common import constants as const
class UserException(Exception):
user_default_status_code = const.DEFAULT_STATUS_CODE
def __init__(self, message, status_code=None):
Exception.__init__(self)
if status_code is None:
status_code = self.user_default_status_code
self.message = message
self.status_code = status_code
def to_dict(self):
response = dict()
response['message'] = self.message
response['status_code'] = self.status_code
return response
class UserDbException(UserException):
db_default_status_code = const.DB_DEFAULT_STATUS_CODE
def __init__(self, message, status_code=None):
if status_code is None:
status_code = self.db_default_status_code
UserException.__init__(self, message, status_code)
class UserNotFoundException(UserException):
user_not_found_status_code = const.NOT_FOUNT_STATUS_CODE
def __init__(self, message, status_code=None):
if status_code is None:
status_code = self.user_not_found_status_code
UserException.__init__(self, message, status_code)
class UnauthorizedException(UserException):
unauthorized_status_code = const.UNAUTHORIZED_STATUS_CODE
def __init__(self, message, status_code=None):
if status_code is None:
status_code = self.unauthorized_status_code
UserException.__init__(self, message, status_code)
|
#!/usr/bin/env python
# Skupina C
class Zviratko:
def __init__(self, jmeno, vek, vaha):
self.jmeno = jmeno
self.vek = vek
self.vaha = vaha
self.cisloChlivku = None
def __str__(self):
return "Jsem {}, mám {} roky a vážím {} kg.".format(self.jmeno, self.vek, self.vaha)
class Dvorecek:
def __init__(self, zviratko1, zviratko2):
self.zviratko1 = zviratko1
self.zviratko2 = zviratko2
def ubytujZviratka(self):
if self.zviratko1.vaha > self.zviratko2.vaha:
self.zviratko1.cisloChlivku = 2
self.zviratko2.cisloChlivku = 1
elif self.zviratko1.vaha < self.zviratko2.vaha:
self.zviratko1.cisloChlivku = 1
self.zviratko2.cisloChlivku = 2
elif self.zviratko1.vaha == self.zviratko2.vaha:
if self.zviratko1.vek < self.zviratko2.vek:
self.zviratko1.cisloChlivku = 1
self.zviratko2.cisloChlivku = 2
elif self.zviratko1.vek > self.zviratko2.vek:
self.zviratko2.cisloChlivku = 1
self.zviratko1.cisloChlivku = 2
return "{} Bydlím v chlívku číslo {}.\n{} Bydlím v chlívku číslo {}.".format(self.zviratko1, self.zviratko1.cisloChlivku, self.zviratko2, self.zviratko2.cisloChlivku)
def __str__(self):
return self.ubytujZviratka()
prasatko = Zviratko("prasátko", 2, 60)
telatko = Zviratko("telátko", 1, 70)
dvorecek = Dvorecek(prasatko, telatko)
print(dvorecek)
|
import re
def replace_colons(text: str, strip: bool=False) -> str:
"""Parses a string with colon encoded emoji and renders found emoji.
Unknown emoji are left as is unless `strip` is set to `True`
:param text: String of text to parse and replace
:param strip: Whether to strip unknown codes or to leave them as `:unknown:`
>>> emoji_data_python.replace_colons('Hello world ! :wave::skin-tone-3: :earth_africa: :exclamation:')
'Hello world ! 👋🏼 🌍 ❗'
"""
from emoji_data_python import emoji_short_names
def emoji_repl(matchobj) -> str:
match = matchobj.group(0)
codes = match.split(':')
res = ''
for code in codes:
if len(code) > 0:
try:
res += emoji_short_names.get(code.replace('-', '_')).char
except AttributeError:
if not strip:
res += f':{code}:'
return res
return re.sub(r'\:[a-zA-Z0-9-_+]+\:(\:skin-tone-[2-6]\:)?', emoji_repl, text)
def get_emoji_regex():
"""Returns a regex to match any emoji
>>> emoji_data_python.get_emoji_regex().findall('Hello world ! 👋🏼 🌍 ❗')
['👋', '🏼', '🌍', '❗']
"""
from emoji_data_python import emoji_data
# Sort emojis by length to make sure mulit-character emojis are
# matched first
emojis = sorted([emoji.char for emoji in emoji_data], key=len, reverse=True)
pattern = u'(' + u'|'.join(re.escape(u) for u in emojis) + u')'
return re.compile(pattern)
|
class CalculatedDay:
"""Base class for calculated days
@actualDate - date of the day
@weekday - name of the day
@availableHours - number of hours available for occupation
@occupiedHours - number of hours occupied
@availableLecturers - dictionary of lecturers available this day with possible "prefer" flag
@availableSubjects - list of subjects on this day
@dayId - ID of the day
"""
def __init__(self, actualDate, weekday, availableHours, occupiedHours, availableLecturers, availableSpecializations, possibleSubjects, dayId):
self.actualDate = actualDate
self.weekday = weekday
self.availableHours = availableHours
self.occupiedHours = occupiedHours
self.availableLecturers = availableLecturers
self.availableSpecializations = availableSpecializations
self.possibleSubjects = possibleSubjects
self.dayId = dayId
def print(self):
print(
"Date:", self.actualDate, "\n",
"Day:", self.weekday, "\n",
"Available hours:", self.availableHours, "\n",
"Occupied hours:", self.occupiedHours, "\n",
"Available lecturers:", self.availableLecturers, "\n",
"Available specializations:", self.availableSpecializations, "\n",
"Possible subjects:", self.possibleSubjects, "\n",
"ID:", self.dayId, "\n")
|
#!/usr/bin/env python
import os
import re
import subprocess
import sys
EDIT_FILE = '/tmp/todo.txt'
# EDITOR = os.environ.get('EDITOR','vim')
EDITOR = 'vim'
query = " | ".join(sys.argv[1:])
# execute taskmaster task
def exec_task(ids, *cmd):
args = ["tm", "tk"] + list(cmd) + ids
print(args)
sp = subprocess.run(args)
if sp.returncode != 0:
print("subprocess error: ", sp.returncode)
print(sp.stderr)
exit(sp.returncode)
# execute notepad
def open_note(ids):
p = subprocess.Popen(["tm", "tk", "taskname", "-f", *ids], stdout=subprocess.PIPE)
tasknames = p.stdout.readlines()
p.wait()
for taskname in tasknames:
taskname = taskname.strip().decode()
subprocess.call(["lnch", "/home/martin/bin/poznKUkolu.sh", taskname]) # TODO Lebeda - configure
def copy_taskname(ids):
"""Copy taskname to clipboard"""
p = subprocess.Popen(["tm", "tk", "taskname", *ids], stdout=subprocess.PIPE)
tasknames = p.stdout.readlines()
p.wait()
tasknames_str = "".join(map(bytes.decode, tasknames))
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(input=tasknames_str.encode())
def open_url(names):
'''open urls from task'''
for name in names:
urls = re.findall('https*://\S+', name)
for url in urls:
subprocess.run(["vivaldi-stable", url]) # TODO Lebeda - configure
def open_jira(names):
'''Open task in jira - proprietary MC function'''
for name in names:
jira_tasks = re.findall('MCV-\d+', name)
for jira_task in jira_tasks:
subprocess.run(["vivaldi-stable", "http://mcv.marbes.cz/browse/" + jira_task])
# only for debug functions
# open_url(['https://www.databazeknih.cz/knihy/milenium-muz-ktery-hledal-svuj-stin-342724', 'http://github.com/tmux-plugins/tmux-open'])
# exit(0)
def edit_tasks(EDIT_FILE):
subprocess.call([EDITOR, EDIT_FILE])
subprocess.call(["tm", "tk", "import", EDIT_FILE])
showMaybe = False
while True:
'''Main loop'''
showMaybeParam = ""
if showMaybe:
print("maybe enabled")
showMaybeParam = "-m"
p1 = subprocess.Popen(["tm", "tk", "-C", "ls", showMaybeParam], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["fzf", '--multi', '--no-sort', '--border', "--ansi",
'--reverse', '--print-query', '--query=' + query,
'--expect=ctrl-w,ctrl-p,ctrl-u,ctrl-j,ctrl-alt-a,ctrl-alt-b,ctrl-alt-c,ctrl-alt-d,ctrl-c,'
+ 'f5,f8,alt-a,alt-b,alt-c,alt-d,alt-e,alt-l,'
+ 'f2,f4,ctrl-alt-r,ctrl-alt-m,alt-m,alt-n'],
stdin=p1.stdout, stdout=subprocess.PIPE)
p1.wait()
lines = p2.stdout.readlines()
exit_code = p2.wait()
if exit_code == 130:
print("Exit task with", exit_code)
exit(0)
query = lines[0].strip().decode()
lines.remove(lines[0])
print("query ", query)
key = lines[0].strip().decode()
lines.remove(lines[0])
print("key ", key)
if key == 'ctrl-alt-a':
query = '\'(A)'
continue # only refresh with new query
if key == 'ctrl-alt-b':
query = '\'(A) | \'(B)'
continue # only refresh with new query
if key == 'ctrl-alt-c':
query = '\'(A) | \'(B) | \'(C)'
continue # only refresh with new query
if key == 'ctrl-alt-d':
query = '\'@defered'
continue # only refresh with new query
if key == 'f5':
continue # only refresh
if key == 'ctrl-alt-m':
showMaybe = not showMaybe
if showMaybe:
print("show maybe enabled")
else:
print("show maybe disabled")
continue
if key == 'f2':
if os.path.exists(EDIT_FILE):
os.remove(EDIT_FILE)
edit_tasks(EDIT_FILE)
ids = []
names = []
for line in lines:
taskId = re.sub(r' .*', "", line.strip().decode())
taskName = re.sub(r'^\d+ *', "", line.strip().decode())
# taskNameStriped = re.sub(r'^\([A-Z]\) *', "", taskName).strip()
# taskNameStriped = re.sub(r' [+@][^ ]+', "", taskNameStriped).strip()
ids.append(taskId)
names.append(taskName)
if key == '':
# print("mark done ", taskId)
exec_task(ids, 'done')
elif key == 'f8':
exec_task(ids, "defer", "--context", "@defered") # TODO Lebeda - konstantu za param
elif key == 'f4':
exec_task(ids, "export", EDIT_FILE)
edit_tasks(EDIT_FILE)
elif key == 'ctrl-alt-r':
exec_task(ids, "delete")
elif key == 'ctrl-w':
exec_task(ids, "work", "-w")
elif key == 'alt-a':
exec_task(ids, "prio", "A")
elif key == 'alt-b':
exec_task(ids, "prio", "B")
elif key == 'alt-c':
exec_task(ids, "prio", "C")
elif key == 'alt-D':
exec_task(ids, "prio", "D")
elif key == 'alt-E':
exec_task(ids, "prio", "E")
elif key == 'alt-l':
exec_task(ids, "prio", "-c")
elif key == 'alt-n':
exec_task(ids, "normal")
elif key == 'alt-m':
exec_task(ids, "maybe")
elif key == 'ctrl-p':
open_note(ids)
elif key == 'ctrl-c':
copy_taskname(ids)
elif key == 'ctrl-j':
open_jira(names)
elif key == 'ctrl-u':
open_url(names)
# exit(0) # only for debug
|
from functools import wraps
from typing import Any, Callable
import torch
import torch.nn as nn
from torch import Tensor
from .quant_functions import Round_STE, IntervalQuantizeIntO, IntervalQuantize
round_STE = Round_STE.apply
interval_quantize_int_o = IntervalQuantizeIntO.apply
interval_quantize = IntervalQuantize.apply
minimal_num = 1e-12
__all__ = ['Quantizer']
from .quant_functions import SymmSignedQuantize
symmsignedquantize = SymmSignedQuantize.apply
def _quant_unimplemented(self, *input: Any) -> None:
r"""Defines the computation performed at every call.
Should be overridden by all subclasses.
.. note::
Although the recipe for forward pass needs to be defined within
this function, one should call the :class:`Module` instance afterwards
instead of this since the former takes care of running the
registered hooks while the latter silently ignores them.
"""
raise NotImplementedError
def _get_interval_unimplemented(self, *input: Any) -> None:
r"""Defines the computation performed at every call.
Should be overridden by all subclasses.
.. note::
Although the recipe for forward pass needs to be defined within
this function, one should call the :class:`Module` instance afterwards
instead of this since the former takes care of running the
registered hooks while the latter silently ignores them.
"""
raise NotImplementedError
class Quantizer(nn.Module):
def __init__(self):
super().__init__()
self.calibration = False
self.calibrated = None
# def quantize(self, input, interval, max_int, min_int, *args, **kwargs):
# return input
quant: Callable[..., Any] = _quant_unimplemented
get_interval: Callable[..., Any] = _get_interval_unimplemented
def reset(self):
self.calibrated = None
class IntervalQuantizer(Quantizer):
def __init__(self, bitwidth=8):
super().__init__()
# self.bitwidth = bitwidth
self.max_value = 2 ** (bitwidth - 1) - 1
def forward(self, tensor, *args, **kwargs):
# print('Executing wrap()')
interval = self.get_interval(tensor)
# print('Decorator Parameters:', self.sum, self.counter)
# print('Execute ' + self.quant.__name__ + '()')
qa = self.quant(tensor, interval, self.max_value, -self.max_value, *args, **kwargs)
# print(self.quant.__name__ + '() finished')
return qa
class AverageInterval(IntervalQuantizer):
def __init__(self, bitwidth=8):
super().__init__(bitwidth)
self.register_buffer('sum', torch.zeros(1,))
self.register_buffer('counter', torch.zeros(1,))
def get_interval(self, tensor):
with torch.no_grad():
# print((self.counter))
if self.training or (self.calibration and not self.calibrated):
interval = tensor.abs().max() / (self.max_value) + minimal_num
self.counter.data += 1
self.sum.data += interval
else:
interval = self.sum / self.counter
return interval
def reset(self):
super().reset()
self.register_buffer('sum', torch.zeros(1,))
self.register_buffer('counter', torch.zeros(1,))
def extra_repr(self) -> str:
return super().extra_repr() + "id:{}".format(id(self))
class Maxinterval(IntervalQuantizer):
def get_interval(self, x) -> Tensor:
interval = x.abs().max() / self.max_value + minimal_num
return interval
class AverageLinearSignSymmIntervalQuantizer(AverageInterval):
quant = interval_quantize
class AverageLinearSignSymmIntervalQuantizerIntO(AverageInterval):
quant = interval_quantize_int_o
class UniformQ(Maxinterval):
quant = interval_quantize
if __name__=="__main__":
print('Prepare to use decorated function')
quantizer1 = AverageLinearSignSymmIntervalQuantizer(8)
quantizer2 = AverageLinearSignSymmIntervalQuantizer(8)
for i in range(15):
a = torch.rand(100)
qa = quantizer1(a)
qa = quantizer2(a)
# print(id(quantizer))
print(id(quantizer1.sum))
print('Test finished')
print((a-qa).abs().max())
|
magos = ["Dynamo", "Enmascarado", "Chris Angel", "Mago De Oz"]
def show_magicians(lista):
for mago in lista:
print(mago)
print("\n")
show_magicians(magos)
def make_great(lista):
for mago in lista:
presentacion = "The Great " + mago.title()
print(presentacion)
print("\n")
make_great(magos)
show_magicians(magos)
#8-11
magosGreat = make_great(magos[:])
print(magosGreat[])
|
#!/usr/bin/env python
import rospy
import roslib
from fiducial_msgs.msg import FiducialTransformArray
import tf
from tf import transformations as t
import numpy as np
import numpy.matlib as npm
import tf2_ros
import geometry_msgs.msg
# https://answers.ros.org/question/322317/combine-two-parent-child-transformations-for-common-link/
if __name__ == '__main__':
rospy.init_node('map_broadcaster')
camera_name = rospy.get_param('~camera_name')
tfBuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfBuffer)
rate = rospy.Rate(5.0)
while not rospy.is_shutdown():
rospy.wait_for_service('/{}/extrinsic_calibration'.format(camera_name))
try:
transformstamped = tfBuffer.lookup_transform(
'{}_camera_fid_0'.format(camera_name),
'{}_camera_base'.format(camera_name), rospy.Time())
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rate.sleep()
continue
br = tf2_ros.StaticTransformBroadcaster()
static_tf = geometry_msgs.msg.TransformStamped()
static_tf.header.stamp = rospy.Time.now()
static_tf.header.frame_id = 'azure1_camera_fid_0'
static_tf.child_frame_id = '{}_camera_base'.format(camera_name)
static_tf.transform = transformstamped.transform
br.sendTransform(static_tf)
rospy.loginfo_once("published static tf: azure1_camera_fid_0 -> {}_camera_base".format(camera_name))
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, '.')
import argparse
from PIL import Image
import matplotlib.pyplot as plt
# utils
from utils.inference import inference
def main(args):
i2t_results, t2i_results = inference(args.cfg_file, args.checkpoint_dir)
if args.query_img:
query_img_id = int(args.query_img[-10:-4])
assert query_img_id in i2t_results, \
"Please make sure query img in the data/minitest_images/"
print('查询图像:', args.query_img)
print('检索结果:')
query_result = i2t_results[query_img_id]
for i in range(5):
print(f'Top{i+1}: {query_result[i]}')
if args.query_txt:
assert args.query_txt in t2i_results, \
"Please make sure query txt in the data/minmitest_captions.txt"
print('查询文本:')
print(args.query_txt)
print('检索结果:')
query_result = t2i_results[args.query_txt]
for i in range(5):
print('Top[%d]: data/minitest_images/COCO_val2014_000000%06d.jpg' % (i+1, query_result[i]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg_file', type=str,
default='configs/inference.yaml',
help='Path to the config file for a specific experiment.')
parser.add_argument('--query_img', type=str,
help='Which img to be queried.')
parser.add_argument('--query_txt', type=str,
help='Which txt to be queried.')
parser.add_argument('--checkpoint_dir', type=str,
default='exp/finetune_retrieval_22Y_02M_13D_23H/checkpoint-30',
help='Path to the pretrained weights.')
args = parser.parse_args()
# Make sure query img or txt exists
assert args.query_img or args.query_txt, \
"Please specify query img or query txt."
# Make sure checkpoint dir exists
assert os.path.isdir(args.checkpoint_dir), \
"Please make sure the specified checkpoint dir and eval epoch exist."
# Call main
main(args)
|
'''
https://leetcode.com/problems/longest-line-of-consecutive-one-in-matrix/
562. Longest Line of Consecutive One in Matrix
Given an m x n binary matrix mat, return the length of the longest line of consecutive one in the matrix.
The line could be horizontal, vertical, diagonal, or anti-diagonal.
'''
'''
Accepted
'''
class Solution:
def longestLine(self, mat: [[int]]) -> int:
# maps each point (i,j) to the length of longest consecutive 1s in each direction
# goes right
horizontal = {}
# goes down
vertical = {}
# goes right down
diagonal = {}
# goes left down
anti_diagonal = {}
# we know that 1 <= m,n so below is safe to do
m = len(mat)
n = len(mat[0])
# length of longest sequence of consecutive ones
max_length = 0
def getLongestLineAtPoint(i, j):
nonlocal max_length, mat, horizontal, vertical, diagonal, m, n
# we check horizontally to the right
# by default, since current (i,j) == 1 then horizontal[(i,j)] = 1
horizontal[(i, j)] = 1
if j + 1 < n and mat[i][j + 1] == 1:
# check the longest `horizontal` for (i, j+1)
# and do +1 for it to get the longest horizontal for (i,j)
horizontal[(i, j)] = horizontal[(i, j + 1)] + 1
max_length = max(max_length, horizontal[(i, j)])
# we check vertically down
# by default, since current (i,j) == 1 then vertical[(i,j)] = 1
vertical[(i, j)] = 1
if i + 1 < m and mat[i + 1][j] == 1:
# check the longest `vertical` for (i + 1, j)
# and do +1 for it to get the longest vertical for (i,j)
vertical[(i, j)] = vertical[(i + 1, j)] + 1
max_length = max(max_length, vertical[(i, j)])
# we check diagonally right down
# by default, since current (i,j) == 1 then diagonal[(i,j)] = 1
diagonal[(i, j)] = 1
if (i + 1 < m and j + 1 < n) and mat[i + 1][j + 1] == 1:
diagonal[(i, j)] = diagonal[(i + 1, j + 1)] + 1
max_length = max(max_length, diagonal[(i, j)])
# we check diagonally left down
anti_diagonal[(i, j)] = 1
if (i + 1 < m and j - 1 >= 0) and mat[i + 1][j - 1] == 1:
anti_diagonal[(i, j)] = anti_diagonal[(i + 1, j - 1)] + 1
max_length = max(max_length, anti_diagonal[(i, j)])
# since we check to the right of each point and under each point
# it's better if we start building our memos from the bottom right
# corner of the matrix. That way, we'd have our results ready for us
# as we go through the matrix
for i in range(m - 1, -1, -1):
for j in range(n - 1, -1, -1):
# we don't care about points that are 0
if mat[i][j] == 1:
getLongestLineAtPoint(i, j)
return max_length
# print(Solution().longestLine(mat=[[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 1]]))
# print(Solution().longestLine(mat = [[1,1,1,1],[0,1,1,0],[0,0,0,1]]))
# print(Solution().longestLine(mat = [[1]]))
# print(Solution().longestLine(mat = [[0]]))
print(Solution().longestLine(
[[1, 1, 0, 0, 1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 0, 0, 1], [1, 1, 1, 1, 1, 0, 0, 1, 1, 1], [0, 1, 0, 1, 1, 0, 1, 1, 1, 1], [1, 1, 1, 0, 1, 0, 1, 1, 1, 1]]))
|
import de_core_news_sm
#import de_core_news_md
import networkx as nx
import matplotlib.pyplot as plt
text1 = u'''Wie gehe ich mit einem Demenzkranken bei Tod der Mutter um?'''
text2 = u'''Ausnutzung der Demenz zur finanziellen Bereicherung'''
text3 = u'''Wie könnte ich anders auf die immer gleiche Frage reagieren?'''
text4 = u'''Wie kann man Fremde auf Demenz hinweisen?'''
text5 = u'''Meine Mutter verweigert Hilfe bei der Pflege'''
# load language model
nlp = de_core_news_sm.load()
def __plot_graph(graph):
pos = nx.spring_layout(graph) # positions for all nodes
nx.draw_networkx_nodes(graph, pos, node_size=200) # nodes
nx.draw_networkx_edges(graph, pos, width=1) # edges
nx.draw_networkx_labels(graph, pos, font_size=12, font_family='sans-serif') # labels
plt.axis('off') # disable axis plot
plt.show()
def __build_undirected_graph(sentence, plot=False):
doc = nlp(sentence)
edges = []
for token in doc:
for child in token.children:
# TODO indicate direction of the relationship - maybe with the help of the child token 's
source = token.lower_
sink = child.lower_
edges.append((f'{source}',
f'{sink}'))
graph = nx.Graph(edges)
if plot:
__plot_graph(graph)
return graph
__build_undirected_graph(text1, plot=True)
# NER
#for ent in doc.ents:
# print(ent.text, ent.label_)
# NOUN chunks
# relation extraction using dependency path
# word vector representations
# Answer type detection
|
import argparse
import ROOT
from simplot.rootplot import drawtools, rootio, drawoptions, style
###############################################################################
_out = rootio.CanvasWriter("plots")
###############################################################################
# def loadgenie():
# gROOT = ROOT.gROOT
# gSystem = ROOT.gSystem
# script_dir = gSystem.Getenv("GENIE")
# script_dir += "/src/scripts/gcint/"
# curr_dir = gSystem.pwd()
# gSystem.cd(script_dir)
# gROOT.ProcessLine(".x loadincs.C")
# gROOT.ProcessLine(".x loadlibs.C")
# gSystem.cd(curr_dir);
# gROOT.ProcessLine(".x analgenie.C")
# return
###############################################################################
def parsecml():
ap = argparse.ArgumentParser()
ap.add_argument("input_file", type=str, help="select input file")
return ap.parse_args()
###############################################################################
def gettree(fname):
tfile = ROOT.TFile(fname, "READ")
tree = tfile.Get("nRooTracker")
tree._tfile = tfile # trick to keep file open while tree exists
return tree
###############################################################################
def run(opt):
tree = gettree(opt.input_file)
tree.SetWeight(1.0)
paint = drawtools.TreePainter(tree=tree)
x = "EvtVtx[0]"
y = "EvtVtx[1]"
z = "EvtVtx[2]"
e = "StdHepP4[0][3]"
nu_px = "StdHepP4[0][0]"
nu_py = "StdHepP4[0][1]"
nu_pz = "StdHepP4[0][2]"
pdg = "StdHepPdg[0]"
pdgsplit = drawoptions.SplitDataset.from_integer_map(pdg, {"#nu_{e}" : 12,
"#bar{#nu_{e}}" : -12,
"#nu_{#mu}" : 14,
"#bar{#nu_{#mu}}" : -14,
})
weight = drawoptions.EventWeight("1.0")
canv = paint.paint("enu", "enu", "{e}".format(e=e), pdgsplit, drawoptions.UniformBinning(100, 0.0, 15.0))
_out.save(canv)
canv = paint.paint("nu_px", "nu_px", "{nu_px}".format(nu_px=nu_px))
_out.save(canv)
canv = paint.paint("nu_py", "nu_py", "{nu_py}".format(nu_py=nu_py))
_out.save(canv)
canv = paint.paint("nu_pz", "nu_pz", "{nu_pz}".format(nu_pz=nu_pz))
_out.save(canv)
canv = paint.paint("nupdg", "nupdg", pdg, pdgsplit, drawoptions.UniformBinning(31, -15.5, 15.5))
_out.save(canv)
canv = paint.paint("rnu", "rnu", "sqrt(pow({x},2)+pow({y},2))".format(x=x, y=y), weight)
_out.save(canv)
xbinning = drawoptions.UniformBinning(100, -6.0, 6.0)
zbinning = drawoptions.UniformBinning(100, -15.0, 15.0)
canv = paint.paint("xy", "xy", "{x}:{y}".format(x=x, y=y), xbinning, drawoptions.NDimensions(2), drawoptions.YBinning(xbinning), weight)
_out.save(canv)
for name, cmd in [("x", x),
("y", y),
("z", z),
]:
canv = paint.paint(name, name, cmd, zbinning, weight)
_out.save(canv)
return
###############################################################################
def main():
ROOT.gROOT.SetBatch()
style.setDefaultStyle()
opt = parsecml()
run(opt)
###############################################################################
if __name__ == "__main__":
main()
|
datalake_attribute_descriptions = {
"total": {
"description": "{metric}__!{source}",
"notes": "{notes}"
},
"by_post": {
"description": "{metric}__{source}__!{post_id}",
"notes": "{notes} post id: {post_id}"
}
}
datalake_attributes = {
"fan_count": {
"dimensions": {
"metric": "fan_count",
"source": "facebook_open"
},
"notes": "The number of users who like the Page. "
"For Global Pages this is the count for all Pages across the brand.",
"type_id": "TIME_DISCR"
},
"fan_delta": {
"dimensions": {
"metric": "fan_delta",
"source": "facebook_open"
},
"notes": "One day difference of the number of users who like the Page. "
"For Global Pages this is the count for all Pages across the brand.",
"type_id": "TIME_DISCR"
},
"talking_about_count": {
"dimensions": {
"metric": "talking_about_count",
"source": "facebook_open"
},
"notes": "The number of people talking about the Page",
"type_id": "TIME_DISCR"
},
"talking_about_delta": {
"dimensions": {
"metric": "talking_about_delta",
"source": "facebook_open"
},
"notes": "One day difference of the number of people talking about the Page",
"type_id": "TIME_DISCR"
},
"comments_count": {
"dimensions": {
"metric": "comments_count",
"source": "facebook_open"
},
"notes": "Total count of comments of the page",
"type_id": "TIME_DISCR"
},
"comments_delta": {
"dimensions": {
"metric": "comments_delta",
"source": "facebook_open"
},
"notes": "One day difference of total count of comments of the page",
"type_id": "TIME_DISCR"
},
"haha_count": {
"dimensions": {
"metric": "haha_count",
"source": "facebook_open"
},
"notes": "Total count of haha reactions of the page",
"type_id": "TIME_DISCR"
},
"haha_delta": {
"dimensions": {
"metric": "haha_delta",
"source": "facebook_open"
},
"notes": "One day difference of total count of haha reactions of the page",
"type_id": "TIME_DISCR"
},
"like_count": {
"dimensions": {
"metric": "like_count",
"source": "facebook_open"
},
"notes": "Total count of likes of the page",
"type_id": "TIME_DISCR"
},
"like_delta": {
"dimensions": {
"metric": "like_delta",
"source": "facebook_open"
},
"notes": "One day difference of total count of likes of the page",
"type_id": "TIME_DISCR"
},
"love_count": {
"dimensions": {
"metric": "love_count",
"source": "facebook_open"
},
"notes": "Total count of love reactions of the page",
"type_id": "TIME_DISCR"
},
"love_delta": {
"dimensions": {
"metric": "love_delta",
"source": "facebook_open"
},
"notes": "One day difference of total count of love reactions of the page",
"type_id": "TIME_DISCR"
},
"sad_count": {
"dimensions": {
"metric": "sad_count",
"source": "facebook_open"
},
"notes": "Total count of sad reactions of the page",
"type_id": "TIME_DISCR"
},
"sad_delta": {
"dimensions": {
"metric": "sad_delta",
"source": "facebook_open"
},
"notes": "One day difference of total count of sad reactions of the page",
"type_id": "TIME_DISCR"
},
"shares_count": {
"dimensions": {
"metric": "shares_count",
"source": "facebook_open"
},
"notes": "Total count of shares of the page",
"type_id": "TIME_DISCR"
},
"shares_delta": {
"dimensions": {
"metric": "shares_delta",
"source": "facebook_open"
},
"notes": "One day difference of total count of shares of the page",
"type_id": "TIME_DISCR"
},
"angry_count": {
"dimensions": {
"metric": "angry_count",
"source": "facebook_open"
},
"notes": "Total count of angry reactions of the page",
"type_id": "TIME_DISCR"
},
"angry_delta": {
"dimensions": {
"metric": "angry_delta",
"source": "facebook_open"
},
"notes": "One day difference of total count of angry reactions of the page",
"type_id": "TIME_DISCR"
},
"wow_count": {
"dimensions": {
"metric": "wow_count",
"source": "facebook_open"
},
"notes": "Total count of wow reactions of the page",
"type_id": "TIME_DISCR"
},
"wow_delta": {
"dimensions": {
"metric": "wow_delta",
"source": "facebook_open"
},
"notes": "One day difference of total count of wow reactions of the page",
"type_id": "TIME_DISCR"
}
}
datalake_modeling_unit_descriptions = {
"company": "!{artist}"
}
datalake_modeling_units = {
"fernando & sorocaba": {"dimensions": {"artist": "fernando_e_sorocaba"}},
"antony & gabriel": {"dimensions": {"artist": "antony_e_gabriel"}},
"bruninho & davi": {"dimensions": {"artist": "bruninho_e_davi"}},
"bruno & barretto": {"dimensions": {"artist": "bruno_e_barretto"}},
"bruno & caio cesar": {"dimensions": {"artist": "bruno_e_caio_cesar"}},
"bruno & marrone": {"dimensions": {"artist": "bruno_e_marrone"}},
"carlos & jader": {"dimensions": {"artist": "carlos_e_jader"}},
"carreiro & capataz": {"dimensions": {"artist": "carreiro_e_capataz"}},
"césar menotti & fabiano": {"dimensions": {"artist": "césar_menotti_e_fabiano"}},
"chitãozinho & xororó": {"dimensions": {"artist": "chitãozinho_e_xororó"}},
"chrystian & ralf": {"dimensions": {"artist": "chrystian_e_ralf"}},
"cleber & cauan": {"dimensions": {"artist": "cleber_e_cauan"}},
"daniel": {"dimensions": {"artist": "daniel"}},
"day & lara": {"dimensions": {"artist": "day_e_lara"}},
"diego & arnaldo": {"dimensions": {"artist": "diego_e_arnaldo"}},
"diego & marcel": {"dimensions": {"artist": "diego_e_marcel"}},
"diego & victor hugo": {"dimensions": {"artist": "diego_e_victor_hugo"}},
"dorgival dantas": {"dimensions": {"artist": "dorgival_dantas"}},
"edson & hudson": {"dimensions": {"artist": "edson_e_hudson"}},
"eduardo costa": {"dimensions": {"artist": "eduardo_costa"}},
"felipe araújo": {"dimensions": {"artist": "felipe_araújo"}},
"fernanda costa": {"dimensions": {"artist": "fernanda_costa"}},
"fiduma & jeca": {"dimensions": {"artist": "fiduma_e_jeca"}},
"fred & gustavo": {"dimensions": {"artist": "fred_e_gustavo"}},
"gabriel diniz": {"dimensions": {"artist": "gabriel_diniz"}},
"george henrique & rodrigo": {"dimensions": {"artist": "george_henrique_e_rodrigo"}},
"gino & geno": {"dimensions": {"artist": "gino_e_geno"}},
"guilherme & santiago": {"dimensions": {"artist": "guilherme_e_santiago"}},
"gustavo mioto": {"dimensions": {"artist": "gustavo_mioto"}},
"gusttavo lima": {"dimensions": {"artist": "gusttavo_lima"}},
"henrique & diego": {"dimensions": {"artist": "henrique_e_diego"}},
"henrique & juliano": {"dimensions": {"artist": "henrique_e_juliano"}},
"higor rocha": {"dimensions": {"artist": "higor_rocha"}},
"hugo & guilherme": {"dimensions": {"artist": "hugo_e_guilherme"}},
"hugo & tiago": {"dimensions": {"artist": "hugo_e_tiago"}},
"hugo del vecchio": {"dimensions": {"artist": "hugo_del_vecchio"}},
"hugo henrique": {"dimensions": {"artist": "hugo_henrique"}},
"hugo pena & gabriel": {"dimensions": {"artist": "hugo_pena_e_gabriel"}},
"humberto & ronaldo": {"dimensions": {"artist": "humberto_e_ronaldo"}},
"israel & rodolffo": {"dimensions": {"artist": "israel_e_rodolffo"}},
"israel novaes": {"dimensions": {"artist": "israel_novaes"}},
"jads & jadson": {"dimensions": {"artist": "jads_e_jadson"}},
"jefferson moraes": {"dimensions": {"artist": "jefferson_moraes"}},
"joão bosco & vinícius": {"dimensions": {"artist": "joão_bosco_e_vinícius"}},
"joão carreiro & capataz": {"dimensions": {"artist": "joão_carreiro_e_capataz"}},
"joão marcio & fabiano": {"dimensions": {"artist": "joão_marcio_e_fabiano"}},
"joão mineiro & marciano": {"dimensions": {"artist": "joão_mineiro_e_marciano"}},
"joão neto & frederico": {"dimensions": {"artist": "joão_neto_e_frederico"}},
"jorge & mateus": {"dimensions": {"artist": "jorge_e_mateus"}},
"julia & rafaela": {"dimensions": {"artist": "julia_e_rafaela"}},
"kléo dibah & rafael": {"dimensions": {"artist": "kléo_dibah_e_rafael"}},
"léo & raphael": {"dimensions": {"artist": "léo_e_raphael"}},
"léo magalhães": {"dimensions": {"artist": "léo_magalhães"}},
"leonardo": {"dimensions": {"artist": "leonardo"}},
"loubet": {"dimensions": {"artist": "loubet"}},
"luan santana": {"dimensions": {"artist": "luan_santana"}},
"lucas lucco": {"dimensions": {"artist": "lucas_lucco"}},
"luiz henrique & léo": {"dimensions": {"artist": "luiz_henrique_e_léo"}},
"luiza & maurílio": {"dimensions": {"artist": "luiza_e_maurílio"}},
"maiara & maraisa": {"dimensions": {"artist": "maiara_e_maraisa"}},
"mano walter": {"dimensions": {"artist": "mano_walter"}},
"marcos & belutti": {"dimensions": {"artist": "marcos_e_belutti"}},
"marcos & fernando": {"dimensions": {"artist": "marcos_e_fernando"}},
"maria cecília & rodolfo": {"dimensions": {"artist": "maria_cecília_e_rodolfo"}},
"marília mendonça": {"dimensions": {"artist": "marília_mendonça"}},
"matheus & kauan": {"dimensions": {"artist": "matheus_e_kauan"}},
"matogrosso & mathias": {"dimensions": {"artist": "matogrosso_e_mathias"}},
"michel teló": {"dimensions": {"artist": "michel_teló"}},
"milionário & josé rico": {"dimensions": {"artist": "milionário_e_josé_rico"}},
"munhoz & mariano": {"dimensions": {"artist": "munhoz_e_mariano"}},
"naiara azevedo": {"dimensions": {"artist": "naiara_azevedo"}},
"paula fernandes": {"dimensions": {"artist": "paula_fernandes"}},
"paula mattos": {"dimensions": {"artist": "paula_mattos"}},
"pedro & benício": {"dimensions": {"artist": "pedro_e_benício"}},
"pedro paulo & alex": {"dimensions": {"artist": "pedro_paulo_e_alex"}},
"rick & rangel": {"dimensions": {"artist": "rick_e_rangel"}},
"rick & renner": {"dimensions": {"artist": "rick_e_renner"}},
"rionegro & solimões": {"dimensions": {"artist": "rionegro_e_solimões"}},
"roberta miranda": {"dimensions": {"artist": "roberta_miranda"}},
"simone & simaria": {"dimensions": {"artist": "simone_e_simaria"}},
"solange almeida": {"dimensions": {"artist": "solange_almeida"}},
"teodoro & sampaio": {"dimensions": {"artist": "teodoro_e_sampaio"}},
"thaeme & thiago": {"dimensions": {"artist": "thaeme_e_thiago"}},
"thiago brava": {"dimensions": {"artist": "thiago_brava"}},
"victor & léo": {"dimensions": {"artist": "victor_e_léo"}},
"villa baggage": {"dimensions": {"artist": "villa_baggage"}},
"wesley safadão": {"dimensions": {"artist": "wesley_safadão"}},
"zé felipe": {"dimensions": {"artist": "zé_felipe"}},
"zé henrique & gabriel": {"dimensions": {"artist": "zé_henrique_e_gabriel"}},
"zé neto & cristiano": {"dimensions": {"artist": "zé_neto_e_cristiano"}},
"zezé di camargo & luciano": {"dimensions": {"artist": "zezé_di_camargo_e_luciano"}},
"anitta": {"dimensions": {"artist": "anitta"}}
}
datalake_geoarea_descriptions = {
"country": "!{country}"
}
datalake_geoareas = {
"BR": {
"dimensions": {
"country": "brazil"
}
}
}
|
#Practical 31: Patterns in python
def triangle(_depth, _type):
k = 2 * _depth - 2
for i in range(0, _depth):
for j in range(0, k):
print(end=" ")
k = k - 1
for j in range(0, i+1):
print(_type, end=" ")
print("\r")
def rightTriangle(_depth, _type):
for i in range(0, _depth):
for j in range(0, i+1):
print(_type, end=" ")
print("\r")
def leftTriangle(_depth, _type):
k = 2 * _depth - 2
for i in range(0, _depth):
for j in range(0, k):
print(end=" ")
k = k - 2
for j in range(0, i+1):
print(_type, end=" ")
print("\r")
_choice = int(input(
"1. Triangle \n2.Right-angle triangle - right \n3.Right-angle triangle - left\n"))
_depth = int(input("Enter depth: "))
_type = input("pattern character: ")
if _choice == 1:
triangle(_depth, _type)
elif _choice == 2:
rightTriangle(_depth, _type)
else:
leftTriangle(_depth, _type)
|
import sbol3
# ----------------------------------------------------------------------
# COMBINE 2020 SBOL 3 Tutorial
# October, 2020
#
# This tutorial code goes with the slides at:
#
# https://github.com/SynBioDex/Community-Media/blob/master/2020/COMBINE20/SBOL3-COMBINE-2020.pptx
# ----------------------------------------------------------------------
# Define a constant that is not defined in pySBOL3
SO_ENGINEERED_REGION = sbol3.SO_NS + '0000804'
SO_ASSEMBLY_SCAR = sbol3.SO_NS + '0001953'
# Set the default namespace for new objects and create a document
# --------------------------------------------------
# Slide 26: GFP expression cassette
# --------------------------------------------------
# Component
# identity: iGEM#I13504
# name: "iGEM 2016 interlab reporter"
# description: "GFP expression cassette used for 2016 iGEM interlab"
# type: SBO:0000251 (DNA)
# role: SO:0000804 (Engineered Region)
# Add the GFP expression cassette to the document
# --------------------------------------------------
# Slide 28: expression cassette parts
# --------------------------------------------------
# Add the RBS subcomponent
# Add the GFP subcomponent
# Add the terminator
# --------------------------------------------------
# Slide 30: Location of a SubComponent
# --------------------------------------------------
# BBa_I13504_sequence (875 bp)
# See https://synbiohub.org/public/igem/BBa_I13504_sequence/1
# BBa_B0015_sequence (129 bp)
# From https://synbiohub.org/public/igem/BBa_B0015_sequence/1
# Add the location on to the B0015 SubComponent
# pySBOL3 does not yet have an easy way to locate features based on
# arbitrary criteria so we have to loop over the list to find the
# SubComponent we are looking for
# --------------------------------------------------
# Slide 32: GFP production from expression cassette
# --------------------------------------------------
# Make a SubComponent referencing i13504
# pySBOL3 does not yet have an easy way to locate features based on
# arbitrary criteria so we have to loop over the list to find the
# SubComponent we are looking for
# Make a component reference for the GFP in i13504
# GFP Protein
# Make the template participation
# Make the product participation
# Make the interaction
# --------------------------------------------------
# Slide 34: Example: concatenating & reusing components
# --------------------------------------------------
# Left hand side of slide: interlab16device1
# Right hand side of slide: interlab16device2
# --------------------------------------------------
# Finally, write the data out to a file
# --------------------------------------------------
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2022- John Muradeli
#
# Distributed under the terms of the MIT License
# (see wavespin/__init__.py for details)
# -----------------------------------------------------------------------------
"""Convenience utilities."""
import numpy as np
import scipy.signal
import warnings
from scipy.fft import fft, ifft
from itertools import zip_longest, chain
from copy import deepcopy
def drop_batch_dim_jtfs(Scx, sample_idx=0):
"""Index into dim0 with `sample_idx` for every JTFS coefficient, and
drop that dimension.
Doesn't modify input:
- dict/list: new list/dict (with copied meta if applicable)
- array: new object but shared storage with original array (so original
variable reference points to unindexed array).
"""
fn = lambda x: x[sample_idx]
return _iterate_apply(Scx, fn)
def jtfs_to_numpy(Scx):
"""Convert PyTorch/TensorFlow tensors to numpy arrays, with meta copied,
and without affecting original data structures.
"""
B = ExtendedUnifiedBackend(Scx)
return _iterate_apply(Scx, B.numpy)
def _iterate_apply(Scx, fn):
def get_meta(s):
return {k: v for k, v in s.items() if not hasattr(v, 'ndim')}
if isinstance(Scx, dict):
out = {} # don't modify source dict
for pair in Scx:
if isinstance(Scx[pair], list):
out[pair] = []
for i, s in enumerate(Scx[pair]):
out[pair].append(get_meta(s))
out[pair][i]['coef'] = fn(s['coef'])
else:
out[pair] = fn(Scx[pair])
elif isinstance(Scx, list):
out = [] # don't modify source list
for s in Scx:
o = get_meta(s)
o['coef'] = fn(s['coef'])
out.append(o)
elif isinstance(Scx, tuple): # out_type=='array' && out_3D==True
out = (fn(Scx[0]), fn(Scx[1]))
elif hasattr(Scx, 'ndim'):
out = fn(Scx)
else:
raise ValueError(("unrecognized input type: {}; must be as returned by "
"`jtfs(x)`.").format(type(Scx)))
return out
def normalize(X, mean_axis=(1, 2), std_axis=(1, 2), C=None, mu=1, C_mult=None):
"""Log-normalize + (optionally) standardize coefficients for learning
algorithm suitability.
Is a modification of Eq. 10 of https://arxiv.org/pdf/2007.10926.pdf
For exact match (minus temporal global averaging), set
`mean_axis=std_axis=(0, 2)`.
Parameters
----------
X : tensor
Nonnegative tensor with dimensions `(samples, features, spatial)`.
If there's more than one `features` or `spatial` dimensions, flatten
before passing.
(Obtain tensor via e.g. `pack_coeffs_jtfs(Scx)`, or `out_type='array'`.)
std_axis : tuple[int] / int / None
If not None, will unit-variance after `rscaling` along specified axes.
mean_axis : tuple[int] / int / None
If not None, will zero-mean before `rscaling` along specified axes.
C : float / None
`log(1 + X * C / median)`.
Greater will bring more disparate values closer. Too great will equalize
too much, too low will have minimal effect.
Defaults to `5 / sparse_mean(abs(X / mu))`, which should yield moderate
contraction for a variety of signals. This was computed on a mixture
of random processes, with outliers, and may not generalize to all signals.
- `sparse_mean` takes mean over non-negligible points, aiding
consistency between representations. A scalogram with an extra
octave, for example, may capture nothing in the new octave,
while a simple mean would lower the output, attenuating existing
values.
mu : float / None
In case precomputed; See "Online computation".
`mu=None` will compute `mu` for per-channel normalization, while
`mu=1` essentially disables `mu` and preserves channels' relative scaling;
see "Relative scaling".
C_mult : float / None
Multiplies `C`. Useful if the default `C` compute scheme is appropriate
but needs adjusting. Defaults to `5` if `C` is None, else to `1`.
Returns
-------
Xnorm : tensor
Normalized `X`.
Relative scaling
----------------
Scaling `features` independently changes the relative norms bewteen them.
- If a signal rarely has high frequencies and low are dominant, for example,
then post-normalization this nuance is lost and highs and lows are brought
to a common norm - which may be undesired.
- SNR is lowered, as low signal contents that are dominated by noise
or float inaccuracies are amplified.
- Convolutions over `features` dims are invalidated (as it's akin to
standardizing individual time steps in 1D convolution); e.g. if
normalizing on per-`n1` basis, then we can no longer do 2D convs
over the joint `(n1, time)` pairs.
- To keep convs valid, all spatial dims that are convolved over must be
standardized by the same factor - i.e. same `mean` and `std`. `rscaling`
also accounts for rescaling due to log.
Regardless, this "channel normalization" has been used with success in
variuous settings; above are but points worth noting.
To preserve relative scaling, set `mu=1`.
Online computation
------------------
Any computation with `axis` that includes `0` requires simultaneous access
to all samples. This poses a problem in two settings:
1. Insufficient RAM. The solution is to write an *equivalent* computation
that aggregates statistics one sample at a time. E.g. for `mu`:
Xsum = []
for x in dataset:
Xsum.append(B.sum(x, axis=-1, keepdims=True))
mu = B.median(B.vstack(Xsum), axis=0, keepdims=True)
2. Streaming / new samples. In this case we must reuse parameters computed
over e.g. entire train set.
Computations over all axes *except* `0` are done on per-sample basis, which
means not having to rely on other samples - but also an inability to do so
(i.e. to precompute and reuse params).
"""
# validate args & set defaults ###########################################
if X.ndim != 3:
raise ValueError("input must be 3D, `(samples, features, spatial)` - "
"got %s" % str(X.shape))
B = ExtendedUnifiedBackend(X)
if B.backend_name == 'tensorflow' and mu is None:
raise ValueError("mu=None with TensorFlow backend isn't supported, as "
"TF's `median` doesn't support axis args")
# check input values
if B.min(X) < 0:
warnings.warn("`X` must be non-negative; will take modulus.")
X = B.abs(X)
# convert axes to positive
axes = [mean_axis, std_axis]
for i, ax in enumerate(axes):
if ax is None:
continue
ax = ax if isinstance(ax, (list, tuple)) else [ax]
ax = list(ax)
for j, a in enumerate(ax):
if a < 0:
ax[j] = X.ndim + a
axes[i] = tuple(ax)
mean_axis, std_axis = axes
# check input dims
dim_ones = tuple(d for d in range(X.ndim) if X.shape[d] == 1)
if dim_ones != ():
def check_dims(g, name):
g = g if isinstance(g, (tuple, list)) else (g,)
if all(dim in dim_ones for dim in g):
raise ValueError("input dims cannot be `1` along same dims as "
"`{}` (gives NaNs); got X.shape == {}, "
"{} = {}".format(name, X.shape, name, mean_axis))
check_dims(mean_axis, 'mean_axis')
check_dims(std_axis, 'std_axis')
# check mu
if mu is None and 0 in dim_ones and 2 in dim_ones:
raise ValueError("input dims cannot be `1` along dims 0 and 2 "
"if `mu` is None (gives NaNs); "
"got X.shape == {}".format(X.shape))
# main transform #########################################################
if mu is None:
# spatial sum (integral)
Xsum = B.sum(X, axis=-1, keepdims=True)
# sample median
mu = B.median(Xsum, axis=0, keepdims=True)
def sparse_mean(x, div=100, iters=4):
"""Mean of non-negligible points"""
m = B.mean(x)
for _ in range(iters - 1):
m = B.mean(x[x > m / div])
return m
# rescale
Xnorm = X / mu
# contraction factor
if C_mult is None:
C_mult = 5 if C is None else 1
if C is None:
C = 1 / sparse_mean(B.abs(Xnorm), iters=4)
C *= C_mult
# log
Xnorm = B.log(1 + Xnorm * C)
# standardization ########################################################
if mean_axis is not None:
Xnorm -= B.mean(Xnorm, axis=mean_axis, keepdims=True)
if std_axis is not None:
Xnorm /= B.std(Xnorm, axis=std_axis, keepdims=True)
return Xnorm
def pack_coeffs_jtfs(Scx, meta, structure=1, sample_idx=None,
separate_lowpass=None, sampling_psi_fr=None, out_3D=None,
reverse_n1=False, debug=False, recursive=False):
"""Packs efficiently JTFS coefficients into one of valid 4D structures.
Parameters
----------
Scx : tensor/list/dict
JTFS output. Must have `out_type` 'dict:array' or 'dict:list',
and `average=True`.
meta : dict
JTFS meta.
structure : int / None
Structure to pack `Scx` into (see "Structures" below), integer 1 to 4.
Will pack into a structure even if not suitable for convolution (as
determined by JTFS parameters); see "Structures" if convs are relevant.
- If can pack into one structure, can pack into any other (1 to 5).
- 6 to 9 aren't implemented since they're what's already returned
as output.
- This method is only needed for 3D or 4D convolutions, for which
only structure=5 with `out_3D=True` and `aligned=True` is fully valid
(see below); 1D convolutions can be done on any JTFS with
`average=True`, and 2D on any `out_3D=True`.
sample_idx : int / None
Index of sample in batched input to pack. If None (default), will
pack all samples.
Returns 5D if `not None` *and* there's more than one sample.
separate_lowpass : None / bool
If True, will pack spinned (`psi_t * psi_f_up`, `psi_t * psi_f_dn`)
and lowpass (`phi_t * phi_f`, `phi_t * psi_f`, `psi_t * phi_f`) pairs
separately. Recommended for convolutions (see Structures & Uniformitym).
Defaults to False if `structure != 5`. `structure = 5` requires True.
sampling_psi_fr : str / None
Used for sanity check for padding along `n1_fr`.
Must match what was passed to `TimeFrequencyScattering1D`.
If None, will assume library default.
out_3D : bool / None
Used for sanity check for padding along `n1`
(enforces same number of `n1`s per `n2`).
reverse_n1 : bool (default False)
If True, will reverse ordering of `n1`. By default, low n1 <=> high freq
(as directly output by `timefrequency_scattering1d`).
debug : bool (defualt False)
If True, coefficient values will be replaced by meta `n` values for
debugging purposes, where the last dim is size 4 and contains
`(n1_fr, n2, n1, time)` assuming `structure == 1`.
recursive : bool (default False)
Internal argument for handling batch_size > 1, do not use.
Returns
-------
out: tensor / tuple[tensor]
Packed `Scx`, depending on `structure` and `separate_lowpass`:
- 1: `out` if False else
`(out, out_phi_f, out_phi_t)`
- 2: same as 1
- 3: `(out_up, out_dn, out_phi_f)` if False else
`(out_up, out_dn, out_phi_f, out_phi_t)`
- 4: `(out_up, out_dn)` if False else
`(out_up, out_dn, out_phi_t)`
- 5: `(out_up, out_dn, out_phi_f, out_phi_t, out_phi)`
`out_phi_t` is `phi_t * psi_f` and `phi_t * phi_f` concatenated.
`out_phi_f` is `psi_t * phi_f` for all configs except
`3, True`, where it is concatenated with `phi_t * phi_f`.
For further info, see "Structures", "Parameter effects", and "Notes".
Structures
----------
Assuming `aligned=True`, then for `average, average_fr`, the following form
valid convolution structures:
1. `True, True*`: 3D/4D*, `(n1_fr, n2, n1, time)`
2. `True, True*`: 2D/4D*, `(n2, n1_fr, n1, time)`
3. `True, True*`: 4D, `(n2, n1_fr//2, n1, time)`*2,
`(n2, 1, n1, time)`
4. `True, True*`: 2D/4D*, `(n2, n1_fr//2 + 1, n1, time)`*2
5. `True, True*`: 4D, `(n2, n1_fr//2, n1, time)`*2,
`(n2, 1, n1, time)`,
`(1, n1_fr, n1, time)`,
`(1, 1, n1, time)`
6. `True, True*`: 2D/3D*, `(n2 * n1_fr, n1, time)`
7. `True, False`: 1D/2D*, `(n2 * n1_fr * n1, time)`
8. `False, True`: list of variable length 1D tensors
9. `False, False`: list of variable length 1D tensors
**Indexing/units**:
- n1: frequency [Hz], first-order temporal variation
- n2: frequency [Hz], second-order temporal variation
(frequency of amplitude modulation)
- n1_fr: quefrency [cycles/octave], first-order frequential variation
(frequency of frequency modulation bands, roughly. More precisely,
correlates with frequential bands (independent components/modes) of
varying widths, decay factors, and recurrences, per temporal slice)
- time: time [sec]
- The actual units are discrete, "Hz" and "sec" are an example.
To convert, multiply by sampling rate `fs`.
- The `n`s are indexings of the output array, also indexings of wavelets
once accounting for stride and order reversal (n1_reverse).
- E.g. `n1=2` may index `psi1_f[2*log2_F]` - or, generally,
`psi1_f[2*total_conv_stride_over_U1_realized]` (see `core`).
- With `aligned=False`, `n1` striding varies on per-`n2` basis.
`n1` is the only "uncertain" index in this regard, and only `n1` and
`t` are subject to stride; `n2` always means `psi2_f[n2]`, and
`n1_fr` always means `psi1_f_fr_up[n1_fr]` (or down).
- Hence, the frequency in "n2: frequency [Hz]" is obtained via
`psi2_f[n2]['xi']`.
- Higher n <=> higher center frequency. That is, coeffs are packed in
order of decreasing frequency, just as in computation.
Exceptions: 1) structure `1` or `2`, where spin down's `n1_fr` axis
is reversed, and 2) if `n1_reverse=True`.
**Convolution-validity**:
- Structure 3 is 3D/4D-valid only if one deems valid the disjoint
representation with separate convs over spinned and lowpassed
(thus convs over lowpassed-only coeffs are deemed valid) - or if one
opts to exclude the lowpassed pairs.
- Structure 4 is 3D/4D-valid only if one deems valid convolving over both
lowpassed and spinned coefficients.
- Structure 5 is completely valid.
- For convolutions, first dim is assumed to be channels (unless doing
4D convs).
- `True*` indicates a "soft requirement"; as long as `aligned=True`,
`False` can be fully compensated with padding.
Since 5 isn't implemented with `False`, it can be obtained from `False`
by reshaping one of 1-4.
- `2D/4D*` means 3D/4D convolutions aren't strictly valid for convolving
over trailing (last) dimensions (see below), but 1D/2D are.
`3D` means 1D, 2D, 3D are all valid.
Structure interpretations for convolution
-----------------------------------------
Interpretations for convolution (and equivalently, spatial coherence)
are as follows:
1. The true JTFS structure. `(n2, n1, time)` are uniform and thus
valid dimensions for 3D convolution (if all `phi` pairs are excluded,
which isn't default behavior; see "Uniformity").
2. It's a dim-permuted 1, but last three dimensions are no longer uniform
and don't necessarily form a valid convolution pair.
This is the preferred structure for conceptualizing or debugging as
it's how the computation graph unfolds (and so does information
density, as `N_fr` varies along `n2`).
3. It's 2, but split into uniform pairs - `out_up, out_dn, out_phi`
suited for convolving over last three dims. These still include
`phi_t * psi_f` and `phi_t * phi_f` pairs, so for strict uniformity
these slices should drop (e.g. `out_up[1:]`).
4. It's 3, but only `out_up, out_dn`, and each includes `psi_t * phi_f`.
If this "soft uniformity" is acceptable then `phi_t * psi_f` pairs
should be kept.
5. Completely valid convolutional structure.
Every pair is packed separately. The only role of `pack_coeffs_jtfs`
here is to reshape the pairs into 4D tensors, and pad.
6. `n2` and `n1_fr` are flattened into one dimension. The resulting
3D structure is suitable for 2D convolutions along `(n1, time)`.
7. `n2`, `n1_fr`, and `n1` are flattened into one dimension. The resulting
2D structure is suitable for 1D convolutions along `time`.
8. `time` is variable; structue not suitable for convolution.
9. `time` and `n1` are variable; structure not suitable for convolution.
Structures not suited for convolutions may be suited for other transforms,
e.g. Dense or Graph Neural Networks (or graph convolutions).
Helpful visuals: # TODO relink
https://github.com/kymatio/kymatio/discussions/708#discussioncomment-1624521
Uniformity
----------
Coefficients are "uniform" if their generating wavelets are spaced uniformly
(that is, equally incremented/spaced apart) in log space. The lowpass filter
is equivalently an infinite scale wavelet, thus it breaks uniformity
(it'd take infinite number of wavelets to be one increment away from lowpass).
Opposite spins require stepping over the lowpass and are hence disqualified.
Above is strictly true in continuous time. In a discrete setting, however,
the largest possible non-dc scale is far from infinite. A 2D lowpass wavelet
is somewhat interpretable as a subsequent scaling and rotation of the
largest scale bandpass, as the bandpass itself is such a scaling and rotation
of its preceding bandpass (emphasis on "somewhat", as this is wrong in
important ways).
Nonetheless, a lowpass is an averaging rather than modulation extracting
filter: its physical units differ, and it has zero FDTS sensitivity - and
this is a stronger objection for convolution. Further, when convolving over
modulus of wavelet transform (as frequential scattering does), the dc bin
is most often dominant, and by a lot - thus without proper renormalization
it will drown out the bandpass coefficients in concatenation.
The safest configuration for convolution thus excludes all lowpass pairs:
`phi_t * phi_f`, `phi_t * psi_f`, and `psi_t * phi_f`; these can be convolved
over separately. The bandpass and lowpass concatenations aren't recommended
as anything but experimental.
Parameter effects
-----------------
`average` and `average_fr` are described in "Structures". Additionally:
- aligned:
- True: enables the true JTFS structure (every structure in 1-7 is
as described).
- False: yields variable stride along `n1`, disqualifying it from
3D convs along `(n2, n1, time)`. However, assuming semi-uniformity
is acceptable, then each `n2` slice in `(n2, n1_fr, n1, time)`, i.e.
`(n1_fr, n1, time)`, has the same stride, and forms valid conv pair
(so use 3 or 4). Other structures require similar accounting.
Rules out structure 1 for 3D/4D convs.
- out_3D:
- True: enforces same freq conv stride on *per-`n2`* basis, enabling
3D convs even if `aligned=False`.
- sampling_psi_fr:
- 'resample': enables the true JTFS structure.
- 'exclude': enables the true JTFS structure (it's simply a subset of
'resample'). However, this involves large amounts of zero-padding to
fill the missing convolutions and enable 4D concatenation.
- 'recalibrate': breaks the true JTFS structure. `n1_fr` frequencies
and widths now vary with `n2`, which isn't spatially coherent in 4D.
It also renders `aligned=True` a pseudo-alignment.
Like with `aligned=False`, alignment and coherence is preserved on
per-`n2` basis, retaining the true structure in a piecewise manner.
Rules out structure 1 for 3D/4D convs.
- average:
- It's possible to support `False` the same way `average_fr=False` is
supported, but this isn't implemented.
Notes
-----
1. Method requires `out_exclude=None` if `not separate_lowpass` - else,
the following are allowed to be excluded: 'phi_t * psi_f',
'phi_t * phi_f', and if `structure != 4`, 'psi_t * phi_f'.
2. The built-in energy renormalization includes doubling the energy
of `phi_t * psi_f` pairs to compensate for computing only once (for
just one spin since it's identical to other spin), while here it may
be packed twice (structure=`1` or `2`, or structure=`3` or `4` and
`not separate_lowpass`); to compensate, its energy is halved before
packing.
3. Energy duplication isn't avoided for all configs:
- `3, separate_lowpass`: packs the `phi_t * phi_f` pair twice -
with `phi_t * psi_f`, and with `psi_t * phi_f`.
`out_phi_f` always concats with `phi_t * phi_f` for `3` since
`phi_f` is never concat with spinned, so it can't concat with
`phi_t` pairs as usual.
- `4, not separate_lowpass`: packs `phi_t * phi_f` and `psi_t * phi_f`
pairs twice, once for each spin.
- `4, separate_lowpass`: packs `psi_t * phi_f` pairs twice, once for
each spin.
- Note both `3` and `4` pack `phi_t * psi_f` pairs twice if
`not separate_lowpass`, but the energy is halved anyway and hence
not duped.
This is intentional, as the idea is to treat each packing as an
independent unit.
"""
B = ExtendedUnifiedBackend(Scx)
def combined_to_tensor(combined_all, recursive):
def process_dims(o):
if recursive:
assert o.ndim == 5, o.shape
else:
assert o.ndim == 4, o.shape
o = o[None]
return o
def not_none(x):
return (x is not None if not recursive else
all(_x is not None for _x in x))
# fetch combined params
if structure in (1, 2):
combined, combined_phi_t, combined_phi_f, combined_phi = combined_all
else:
(combined_up, combined_dn, combined_phi_t, combined_phi_f,
combined_phi) = combined_all
# compute pad params
cbs = [(cb[0] if recursive else cb) for cb in combined_all
if not_none(cb)]
n_n1s_max = max(len(cb[n2_idx][n1_fr_idx])
for cb in cbs
for n2_idx in range(len(cb))
for n1_fr_idx in range(len(cb[n2_idx])))
pad_value = 0 if not debug else -2
# left pad along `n1` if `reverse_n1`
left_pad_axis = (-2 if reverse_n1 else None)
general = False # use routine optimized for JTFS
kw = dict(pad_value=pad_value, left_pad_axis=left_pad_axis,
general=general)
# `phi`s #############################################################
out_phi_t, out_phi_f, out_phi = None, None, None
# ensure `phi`s and spinned pad to the same number of `n1`s
ref_shape = ((None, None, n_n1s_max, None) if not recursive else
(None, None, None, n_n1s_max, None))
# this will pad along `n1`
if not_none(combined_phi_t):
out_phi_t = tensor_padded(combined_phi_t, ref_shape=ref_shape, **kw)
out_phi_t = process_dims(out_phi_t)
if not_none(combined_phi_f):
out_phi_f = tensor_padded(combined_phi_f, ref_shape=ref_shape, **kw)
out_phi_f = process_dims(out_phi_f)
if not_none(combined_phi):
out_phi = tensor_padded(combined_phi, ref_shape=ref_shape, **kw)
out_phi = process_dims(out_phi)
# spinned ############################################################
# don't need `ref_shape` here since by implementation max `n1`s
# should be found in spinned (`phi`s are trimmed to ensure this)
if structure in (1, 2):
out = tensor_padded(combined, **kw)
out = process_dims(out)
if structure == 1:
tp_shape = (0, 2, 1, 3, 4)
out = B.transpose(out, tp_shape)
if separate_lowpass:
if out_phi_t is not None:
out_phi_t = B.transpose(out_phi_t, tp_shape)
if out_phi_f is not None:
out_phi_f = B.transpose(out_phi_f, tp_shape)
out = (out if not separate_lowpass else
(out, out_phi_f, out_phi_t))
elif structure in (3, 4):
out_up = tensor_padded(combined_up, **kw)
out_dn = tensor_padded(combined_dn, **kw)
out_up = process_dims(out_up)
out_dn = process_dims(out_dn)
if structure == 3:
out = ((out_up, out_dn, out_phi_f) if not separate_lowpass else
(out_up, out_dn, out_phi_f, out_phi_t))
else:
if not separate_lowpass:
out = (out_up, out_dn)
else:
out = (out_up, out_dn, out_phi_t)
elif structure == 5:
out = (out_up, out_dn, out_phi_f, out_phi_t, out_phi)
# sanity checks ##########################################################
phis = dict(out_phi_t=out_phi_t, out_phi_f=out_phi_f, out_phi=out_phi)
ref = out[0] if isinstance(out, tuple) else out
for name, op in phis.items():
if op is not None:
errmsg = (name, op.shape, ref.shape)
# `t`s must match
assert op.shape[-1] == ref.shape[-1], errmsg
# number of `n1`s must match
assert op.shape[-2] == ref.shape[-2], errmsg
# number of samples must match
assert op.shape[0] == ref.shape[0], errmsg
# due to transpose
fr_dim = -3 if structure != 1 else -4
if name in ('out_phi_f', 'out_phi'):
assert op.shape[fr_dim] == 1, op.shape
if name == 'out_phi':
# only for structure=5, which has `n2` at `shape[-4]`
assert op.shape[-4] == 1, op.shape
continue
# phi_t only #################################################
# compute `ref_fr_len`
if structure in (1, 2, 5):
ref_fr_len = ref.shape[fr_dim]
elif structure == 3:
# separate spins have half of total `n1_fr`s, but
# we also pack `phi_t` only once
ref_fr_len = ref.shape[fr_dim] * 1
elif structure == 4:
# above + having `psi_t * phi_f`
# (i.e. fr_len_4 = fr_len_3 + 1)
ref_fr_len = (ref.shape[fr_dim] - 1) * 1
# due to `phi_t * phi_f` being present only in `out_phi_t`
ref_fr_len = (ref_fr_len if not separate_lowpass else
ref_fr_len + 1)
# assert
assert op.shape[fr_dim] == ref_fr_len, (
"{} != {} | {} | {}, {}".format(op.shape[fr_dim], ref_fr_len,
name, op.shape, ref.shape))
if structure in (3, 4, 5):
assert out_up.shape == out_dn.shape, (out_up.shape, out_dn.shape)
if not recursive:
# drop batch dim; `None` in case of `out_exclude`
if isinstance(out, tuple):
out = tuple((o[0] if o is not None else o) for o in out)
else:
out = out[0]
return out
# pack full batch recursively ############################################
if not isinstance(Scx, dict):
raise ValueError("must use `out_type` 'dict:array' or 'dict:list' "
"for `pack_coeffs_jtfs` (got `type(Scx) = %s`)" % (
type(Scx)))
# infer batch size
ref_pair = list(Scx)[0]
if isinstance(Scx[ref_pair], list):
n_samples = Scx[ref_pair][0]['coef'].shape[0]
else: # tensor
n_samples = Scx[ref_pair].shape[0]
n_samples = int(n_samples)
# handle recursion, if applicable
if n_samples > 1 and sample_idx is None:
combined_phi_t_s, combined_phi_f_s, combined_phi_s = [], [], []
if structure in (1, 2):
combined_s = []
elif structure in (3, 4):
combined_up_s, combined_dn_s = [], []
for sample_idx in range(n_samples):
combined_all = pack_coeffs_jtfs(Scx, meta, structure, sample_idx,
separate_lowpass, sampling_psi_fr,
debug, recursive=True)
combined_phi_t_s.append(combined_all[-3])
combined_phi_f_s.append(combined_all[-2])
combined_phi_s.append(combined_all[-1])
if structure in (1, 2):
combined_s.append(combined_all[0])
elif structure in (3, 4):
combined_up_s.append(combined_all[0])
combined_dn_s.append(combined_all[1])
phis = (combined_phi_t_s, combined_phi_f_s, combined_phi_s)
if structure in (1, 2):
combined_all_s = (combined_s, *phis)
elif structure in (3, 4):
combined_all_s = (combined_up_s, combined_dn_s, *phis)
out = combined_to_tensor(combined_all_s, recursive=True)
return out
##########################################################################
# validate `structure` / set default
structures_available = {1, 2, 3, 4, 5}
if structure is None:
structure = structures_available[0]
elif structure not in structures_available:
raise ValueError(
"invalid `structure={}`; Available are: {}".format(
structure, ','.join(map(str, structures_available))))
if separate_lowpass is None:
separate_lowpass = False if structure != 5 else True
elif separate_lowpass is True and structure == 5:
raise ValueError("`structure=5` requires `separate_lowpass=True`.")
# unpack coeffs for further processing
Scx_unpacked = {}
list_coeffs = isinstance(list(Scx.values())[0], list)
if sample_idx is None and not recursive and n_samples == 1:
sample_idx = 0
Scx = drop_batch_dim_jtfs(Scx, sample_idx)
t_ref = None
for pair in Scx:
is_joint = bool(pair not in ('S0', 'S1'))
if not is_joint:
continue
Scx_unpacked[pair] = []
for coef in Scx[pair]:
if list_coeffs and (isinstance(coef, dict) and 'coef' in coef):
coef = coef['coef']
if t_ref is None:
t_ref = coef.shape[-1]
assert coef.shape[-1] == t_ref, (coef.shape, t_ref,
"(if using average=False, set "
"oversampling=99)")
if coef.ndim == 2:
Scx_unpacked[pair].extend(coef)
elif coef.ndim == 1:
Scx_unpacked[pair].append(coef)
else:
raise ValueError("expected `coef.ndim` of 1 or 2, got "
"shape = %s" % str(coef.shape))
# check that all necessary pairs are present
pairs = ('psi_t * psi_f_up', 'psi_t * psi_f_dn', 'psi_t * phi_f',
'phi_t * psi_f', 'phi_t * phi_f')
# structure 4 requires `psi_t * phi_f`
okay_to_exclude_if_sep_lp = (pairs[-3:] if structure != 4 else
pairs[-2:])
Scx_pairs = list(Scx)
for p in pairs:
if p not in Scx_pairs:
if (not separate_lowpass or
(separate_lowpass and p not in okay_to_exclude_if_sep_lp)):
raise ValueError(("configuration requires pair '%s', which is "
"missing") % p)
# for later; controls phi_t pair energy norm
phi_t_packed_twice = bool((structure in (1, 2)) or
(structure in (3, 4) and not separate_lowpass))
# pack into dictionary indexed by `n1_fr`, `n2` ##########################
packed = {}
ns = meta['n']
n_n1_frs_max = 0
for pair in pairs:
if pair not in Scx_pairs:
continue
packed[pair] = []
nsp = ns[pair].astype(int).reshape(-1, 3)
idx = 0
n2s_all = nsp[:, 0]
n2s = np.unique(n2s_all)
for n2 in n2s:
n1_frs_all = nsp[n2s_all == n2, 1]
packed[pair].append([])
n1_frs = np.unique(n1_frs_all)
n_n1_frs_max = max(n_n1_frs_max, len(n1_frs))
for n1_fr in n1_frs:
packed[pair][-1].append([])
n1s_done = 0
if out_3D:
# same number of `n1`s for all frequential slices *per-`n2`*
n_n1s = len(n1_frs_all)
n_n1s_in_n1_fr = n_n1s // len(n1_frs)
assert (n_n1s / len(n1_frs)
).is_integer(), (n_n1s, len(n1_frs))
else:
n_n1s_in_n1_fr = len(nsp[n2s_all == n2, 2
][n1_frs_all == n1_fr])
if debug:
# pack meta instead of coeffs
n1s = nsp[n2s_all == n2, 2][n1_frs_all == n1_fr]
coef = [[n2, n1_fr, n1, 0] for n1 in n1s]
# ensure coef.shape[-1] == t
while Scx_unpacked[pair][0].shape[-1] > len(coef[0]):
for i in range(len(coef)):
coef[i].append(0)
coef = np.array(coef)
packed[pair][-1][-1].extend(coef)
assert len(coef) == n_n1s_in_n1_fr
idx += len(coef)
n1s_done += len(coef)
else:
while idx < len(nsp) and n1s_done < n_n1s_in_n1_fr:
try:
coef = Scx_unpacked[pair][idx]
except Exception as e:
print(pair, idx)
raise e
if pair == 'phi_t * psi_f' and phi_t_packed_twice:
# see "Notes" in docs
coef = coef / B.sqrt(2., dtype=coef.dtype)
packed[pair][-1][-1].append(coef)
idx += 1
n1s_done += 1
# pad along `n1_fr`
if sampling_psi_fr is None:
sampling_psi_fr = 'exclude'
pad_value = 0 if not debug else -2
for pair in packed:
if 'psi_f' not in pair:
continue
for n2_idx in range(len(packed[pair])):
if len(packed[pair][n2_idx]) < n_n1_frs_max:
assert sampling_psi_fr == 'exclude' # should not occur otherwise
else:
continue
# make a copy to avoid modifying `packed`
ref = list(tensor_padded(packed[pair][n2_idx][0]))
# assumes last dim is same (`average=False`)
# and is 2D, `(n1, t)` (should always be true)
for i in range(len(ref)):
if debug:
# n2 will be same, everything else variable
ref[i][1:] = ref[i][1:] * 0 + pad_value
else:
ref[i] = ref[i] * 0
while len(packed[pair][n2_idx]) < n_n1_frs_max:
packed[pair][n2_idx].append(list(ref))
# pack into list ready to convert to 4D tensor ###########################
# current indexing: `(n2, n1_fr, n1, time)`
# c = combined
c_up = packed['psi_t * psi_f_up']
c_dn = packed['psi_t * psi_f_dn']
c_phi_t = packed['phi_t * psi_f'] if 'phi_t * psi_f' in Scx_pairs else None
c_phi_f = packed['psi_t * phi_f'] if 'psi_t * phi_f' in Scx_pairs else None
c_phi = packed['phi_t * phi_f'] if 'phi_t * phi_f' in Scx_pairs else None
can_make_c_phi_t = bool(c_phi_t is not None and c_phi is not None)
# `deepcopy` below is to ensure same structure packed repeatedly in different
# places isn't modified in both places when it's modified in one place.
# `None` set to variables means they won't be tensored and returned.
if structure in (1, 2):
# structure=2 is just structure=1 transposed, so pack them same
# and transpose later.
# instantiate total combined
combined = c_up
c_up = None
# append phi_f ####
if not separate_lowpass:
for n2 in range(len(c_phi_f)):
for n1_fr in range(len(c_phi_f[n2])):
c = c_phi_f[n2][n1_fr]
combined[n2].append(c)
c_phi_f = None
# assert that appending phi_f only increased dim1 by 1
l0, l1 = len(combined[0]), len(c_dn[0])
assert l0 == l1 + 1, (l0, l1)
# append down ####
# assert that so far dim0 hasn't changed
assert len(combined) == len(c_dn), (len(combined), len(c_dn))
# dn: reverse `psi_f` ordering
for n2 in range(len(c_dn)):
c_dn[n2] = c_dn[n2][::-1]
for n2 in range(len(combined)):
combined[n2].extend(c_dn[n2])
c_dn = None
# pack phi_t ####
if not separate_lowpass or can_make_c_phi_t:
c_phi_t = deepcopy(c_phi_t)
c_phi_t[0].append(c_phi[0][0])
# phi_t: reverse `psi_f` ordering
c_phi_t[0].extend(packed['phi_t * psi_f'][0][::-1])
c_phi = None
# append phi_t ####
if not separate_lowpass:
combined.append(c_phi_t[0])
c_phi_t = None
elif structure == 3:
# pack spinned ####
if not separate_lowpass:
c_up.append(c_phi_t[0])
c_dn.append(deepcopy(c_phi_t[0]))
c_phi_t = None
# pack phi_t ####
if separate_lowpass and can_make_c_phi_t:
c_phi_t[0].append(deepcopy(c_phi[0][0]))
# pack phi_f ####
# structure=3 won't pack `phi_f` with `psi_f`, so can't pack
# `phi_t * phi_f` along `phi_t * psi_f` (unless `separate_lowpass=True`
# where `phi_t` isn't packed with `psi_f`), must pack with `psi_t * phi_f`
# instead
if not separate_lowpass or (c_phi_f is not None and c_phi is not None):
c_phi_f.append(c_phi[0])
c_phi = None
elif structure == 4:
# pack phi_f ####
for n2 in range(len(c_phi_f)):
# structure=4 joins `psi_t * phi_f` with spinned
c = c_phi_f[n2][0]
c_up[n2].append(c)
c_dn[n2].append(c)
c_phi_f = None
# assert up == dn along dim1
l0, l1 = len(c_up[0]), len(c_dn[0])
assert l0 == l1, (l0, l1)
# pack phi_t ####
if separate_lowpass and can_make_c_phi_t:
# pack `phi_t * phi_f` with `phi_t * psi_f`
c_phi_t[0].append(c_phi[0][0])
elif not separate_lowpass:
# pack `phi_t * phi_f` with `phi_t * psi_f`, packed with each spin
# phi_t, append `n1_fr` slices via `n2`
c_up.append(deepcopy(c_phi_t[0]))
c_dn.append(c_phi_t[0])
# phi, append one `n2, n1_fr` slice
c_up[-1].append(deepcopy(c_phi[0][0]))
c_dn[-1].append(c_phi[0][0])
c_phi_t, c_phi = None, None
# assert up == dn along dim0, and dim1
assert len(c_up) == len(c_dn), (len(c_up), len(c_dn))
l0, l1 = len(c_up[0]), len(c_dn[0])
assert l0 == l1, (l0, l1)
elif structure == 5:
pass # all packed
# reverse ordering of `n1` ###############################################
if reverse_n1:
# pack all into `cbs`
if c_up is not None:
cbs = [c_up, c_dn]
else:
cbs = [combined]
if c_phi_t is not None:
cbs.append(c_phi_t)
if c_phi_f is not None:
cbs.append(c_phi_f)
if c_phi is not None:
cbs.append(c_phi)
# reverse `n1`
cbs_new = []
for i, cb in enumerate(cbs):
cbs_new.append([])
for n2_idx in range(len(cb)):
cbs_new[i].append([])
for n1_fr_idx in range(len(cb[n2_idx])):
cbs_new[i][n2_idx].append(cb[n2_idx][n1_fr_idx][::-1])
# unpack all from `cbs`
if c_up is not None:
c_up = cbs_new.pop(0)
c_dn = cbs_new.pop(0)
else:
combined = cbs_new.pop(0)
if c_phi_t is not None:
c_phi_t = cbs_new.pop(0)
if c_phi_f is not None:
c_phi_f = cbs_new.pop(0)
if c_phi is not None:
c_phi = cbs_new.pop(0)
assert len(cbs_new) == 0, len(cbs_new)
# finalize ###############################################################
phis = (c_phi_t, c_phi_f, c_phi)
combined_all = ((combined, *phis) if c_up is None else
(c_up, c_dn, *phis))
if recursive:
return combined_all
return combined_to_tensor(combined_all, recursive=False)
def coeff_energy(Scx, meta, pair=None, aggregate=True, correction=False,
kind='l2'):
"""Computes energy of JTFS coefficients.
Current implementation permits computing energy directly via
`sum(abs(coef)**2)`, hence this method isn't necessary.
Parameters
----------
Scx: dict[list] / dict[np.ndarray]
`jtfs(x)`.
meta: dict[dict[np.ndarray]]
`jtfs.meta()`.
pair: str / list/tuple[str] / None
Name(s) of coefficient pairs whose energy to compute.
If None, will compute for all.
aggregate: bool (default True)
True: return one value per `pair`, the sum of all its coeffs' energies
False: return `(E_flat, E_slices)`, where:
- E_flat = energy of every coefficient, flattened into a list
(but still organized pair-wise)
- E_slices = energy of every joint slice (if not `'S0', 'S1'`),
in a pair. That is, sum of coeff energies on per-`(n2, n1_fr)`
basis.
correction : bool (default False)
Whether to apply stride and filterbank norm correction factors:
- stride: if subsampled by 2, energy will reduce by 2
- filterbank: since input is assumed real, we convolve only over
positive frequencies, getting half the energy
Current JTFS implementation accounts for both so default is `False`
(in fact `True` isn't implemented with any configuration due to
forced LP sum renormalization - though it's possible to implement).
Filterbank energy correction is as follows:
- S0 -> 1
- U1 -> 2 (because psi_t is only analytic)
- phi_t * phi_f -> 2 (because U1)
- psi_t * phi_f -> 4 (because U1 and another psi_t that's
only analytic)
- phi_t * psi_f -> 4 (because U1 and psi_f is only for one spin)
- psi_t * psi_f -> 4 (because U1 and another psi_t that's
only analytic)
For coefficient correction (e.g. distance computation) we instead
scale the coefficients by square root of these values.
kind: str['l1', 'l2']
Kind of energy to compute. L1==`sum(abs(x))`, L2==`sum(abs(x)**2)`
(so actually L2^2).
Returns
-------
E: float / tuple[list]
Depends on `pair`, `aggregate`.
Rationale
---------
Simply `sum(abs(coef)**2)` won't work because we must account for subsampling.
- Subsampling by `k` will reduce energy (both L1 and L2) by `k`
(assuming no aliasing).
- Must account for variable subsampling factors, both in time
(if `average=False` for S0, S1) and frequency.
This includes if only seeking ratio (e.g. up vs down), since
`(a*x0 + b*x1) / (a*y0 + b*y1) != (x0 + x1) / (y0 + y1)`.
"""
if pair is None or isinstance(pair, (tuple, list)):
# compute for all (or multiple) pairs
pairs = pair
E_flat, E_slices = {}, {}
for pair in Scx:
if pairs is not None and pair not in pairs:
continue
E_flat[pair], E_slices[pair] = coeff_energy(
Scx, meta, pair, aggregate=False, kind=kind)
if aggregate:
E = {}
for pair in E_flat:
E[pair] = np.sum(E_flat[pair])
return E
return E_flat, E_slices
elif not isinstance(pair, str):
raise ValueError("`pair` must be string, list/tuple of strings, or None "
"(got %s)" % pair)
# compute compensation factor (see `correction` docs)
factor = _get_pair_factor(pair, correction)
fn = lambda c: energy(c, kind=kind)
norm_fn = lambda total_joint_stride: (2**total_joint_stride
if correction else 1)
E_flat, E_slices = _iterate_coeffs(Scx, meta, pair, fn, norm_fn, factor)
Es = []
for s in E_slices:
Es.append(np.sum(s))
E_slices = Es
if aggregate:
return np.sum(E_flat)
return E_flat, E_slices
def coeff_distance(Scx0, Scx1, meta0, meta1=None, pair=None, correction=False,
kind='l2'):
"""Computes L2 or L1 relative distance between `Scx0` and `Scx1`.
Current implementation permits computing distance directly between
coefficients, as `toolkit.rel_l2(coef0, coef1)`.
Parameters
----------
Scx0, Scx1: dict[list] / dict[np.ndarray]
`jtfs(x)`.
meta0, meta1: dict[dict[np.ndarray]]
`jtfs.meta()`. If `meta1` is None, will set equal to `meta0`.
Note that scattering objects responsible for `Scx0` and `Scx1` cannot
differ in any way that alters coefficient shapes.
pair: str / list/tuple[str] / None
Name(s) of coefficient pairs whose distances to compute.
If None, will compute for all.
kind: str['l1', 'l2']
Kind of distance to compute. L1==`sum(abs(x))`,
L2==`sqrt(sum(abs(x)**2))`. L1 is not implemented for `correction=False`.
correction: bool (default False)
See `help(wavespin.toolkit.coeff_energy)`.
Returns
-------
reldist_flat : list[float]
Relative distances between individual frequency rows, i.e. per-`n1`.
reldist_slices : list[float]
Relative distances between joint slices, i.e. per-`(n2, n1_fr)`.
"""
if not correction and kind == 'l1':
raise NotImplementedError
if meta1 is None:
meta1 = meta0
# compute compensation factor (see `correction` docs)
factor = _get_pair_factor(pair, correction)
fn = lambda c: c
def norm_fn(total_joint_stride):
if not correction:
return 1
return (2**(total_joint_stride / 2) if kind == 'l2' else
2**total_joint_stride)
c_flat0, c_slices0 = _iterate_coeffs(Scx0, meta0, pair, fn, norm_fn, factor)
c_flat1, c_slices1 = _iterate_coeffs(Scx1, meta1, pair, fn, norm_fn, factor)
# make into array and assert shapes are as expected
c_flat0, c_flat1 = np.asarray(c_flat0), np.asarray(c_flat1)
c_slices0 = [np.asarray(c) for c in c_slices0]
c_slices1 = [np.asarray(c) for c in c_slices1]
assert c_flat0.ndim == c_flat1.ndim == 2, (c_flat0.shape, c_flat1.shape)
is_joint = bool(pair not in ('S0', 'S1'))
if is_joint:
shapes = [np.array(c).shape for cs in (c_slices0, c_slices1) for c in cs]
# effectively 3D
assert all(len(s) == 2 for s in shapes), shapes
d_fn = lambda x: l2(x) if kind == 'l2' else l1(x)
ref0, ref1 = d_fn(c_flat0), d_fn(c_flat1)
eps = _eps(ref0, ref1)
ref = (ref0 + ref1) / 2 + eps
def D(x0, x1, axis):
if isinstance(x0, list):
return [D(_x0, _x1, axis) for (_x0, _x1) in zip(x0, x1)]
if kind == 'l2':
return np.sqrt(np.sum(np.abs(x0 - x1)**2, axis=axis)) / ref
return np.sum(np.abs(x0 - x1), axis=axis) / ref
# do not collapse `freq` dimension
reldist_flat = D(c_flat0, c_flat1, axis=-1)
reldist_slices = D(c_slices0, c_slices1, axis=(-1, -2) if is_joint else -1)
# return tuple consistency; we don't do "slices" here
return reldist_flat, reldist_slices
def coeff_energy_ratios(Scx, meta, down_to_up=True, max_to_eps_ratio=10000):
"""Compute ratios of coefficient slice energies, spin down vs up.
Statistically robust alternative measure to ratio of total energies.
Parameters
----------
Scx : dict[list] / dict[tensor]
`jtfs(x)`.
meta : dict[dict[np.ndarray]]
`jtfs.meta()`.
down_to_up : bool (default True)
Whether to take `E_dn / E_up` (True) or `E_up / E_dn` (False).
Note, the actual similarities are opposite, as "down" means convolution
with down, which is cross-correlation with up.
max_to_eps_ratio : int
`eps = max(E_pair0, E_pair1) / max_to_eps_ratio`. Epsilon term
to use in ratio: `E_pair0 / (E_pair1 + eps)`.
Returns
-------
Ratios of coefficient energies.
"""
# handle args
assert isinstance(Scx, dict), ("`Scx` must be dict (got %s); " % type(Scx)
+ "set `out_type='dict:array'` or 'dict:list'")
# compute ratios
l2s = {}
pairs = ('psi_t * psi_f_dn', 'psi_t * psi_f_up')
if not down_to_up:
pairs = pairs[::-1]
for pair in pairs:
_, E_slc0 = coeff_energy(Scx, meta, pair=pair, aggregate=False, kind='l2')
l2s[pair] = np.asarray(E_slc0)
a, b = l2s.values()
mx = np.vstack([a, b]).max(axis=0) / max_to_eps_ratio
eps = np.clip(mx, mx.max() / (max_to_eps_ratio * 1000), None)
r = a / (b + eps)
return r
def _get_pair_factor(pair, correction):
if pair == 'S0' or not correction:
factor = 1
elif 'psi' in pair:
factor = 4
else:
factor = 2
return factor
def _iterate_coeffs(Scx, meta, pair, fn=None, norm_fn=None, factor=None):
coeffs = drop_batch_dim_jtfs(Scx)[pair]
out_list = bool(isinstance(coeffs, list))
# infer out_3D
if out_list:
out_3D = bool(coeffs[0]['coef'].ndim == 3)
else:
out_3D = bool(coeffs.ndim == 3)
# fetch backend
B = ExtendedUnifiedBackend(coeffs)
# completely flatten into (*, time)
if out_list:
coeffs_flat = []
for coef in coeffs:
c = coef['coef']
coeffs_flat.extend(c)
else:
if out_3D:
coeffs = B.reshape(coeffs, (-1, coeffs.shape[-1]))
coeffs_flat = coeffs
# prepare for iterating
meta = deepcopy(meta) # don't change external dict
if out_3D:
meta['stride'][pair] = meta['stride'][pair].reshape(-1, 2)
meta['n'][pair] = meta['n'][pair].reshape(-1, 3)
assert (len(coeffs_flat) == len(meta['stride'][pair])), (
"{} != {} | {}".format(len(coeffs_flat), len(meta['stride'][pair]), pair))
# define helpers #########################################################
def get_total_joint_stride(meta_idx):
n_freqs = 1
m_start, m_end = meta_idx[0], meta_idx[0] + n_freqs
stride = meta['stride'][pair][m_start:m_end]
assert len(stride) != 0, pair
stride[np.isnan(stride)] = 0
total_joint_stride = stride.sum()
meta_idx[0] = m_end # update idx
return total_joint_stride
def n_current():
i = meta_idx[0]
m = meta['n'][pair]
return (m[i] if i <= len(m) - 1 else
np.array([-3, -3])) # reached end; ensure equality fails
def n_is_equal(n0, n1):
n0, n1 = n0[:2], n1[:2] # discard U1
n0[np.isnan(n0)], n1[np.isnan(n1)] = -2, -2 # NaN -> -2
return bool(np.all(n0 == n1))
# append energies one by one #############################################
fn = fn or (lambda c: c)
norm_fn = norm_fn or (lambda total_joint_stride: 2**total_joint_stride)
factor = factor or 1
is_joint = bool(pair not in ('S0', 'S1'))
E_flat = []
E_slices = [] if not is_joint else [[]]
meta_idx = [0] # make mutable to avoid passing around
for c in coeffs_flat:
if hasattr(c, 'numpy'):
if hasattr(c, 'cpu') and 'torch' in str(type(c)):
c = c.cpu()
c = c.numpy() # TF/torch
n_prev = n_current()
assert c.ndim == 1, (c.shape, pair)
total_joint_stride = get_total_joint_stride(meta_idx)
E = norm_fn(total_joint_stride) * fn(c) * factor
E_flat.append(E)
if not is_joint:
E_slices.append(E) # append to list of coeffs
elif n_is_equal(n_current(), n_prev):
E_slices[-1].append(E) # append to slice
else:
E_slices[-1].append(E) # append to slice
E_slices.append([])
# in case loop terminates early
if isinstance(E_slices[-1], list) and len(E_slices[-1]) == 0:
E_slices.pop()
# ensure they sum to same
Es_sum = np.sum([np.sum(s) for s in E_slices])
adiff = abs(np.sum(E_flat) - Es_sum)
assert np.allclose(np.sum(E_flat), Es_sum), "MAE=%.3f" % adiff
return E_flat, E_slices
def est_energy_conservation(x, sc=None, T=None, F=None, J=None, J_fr=None,
Q=None, Q_fr=None, max_pad_factor=None,
max_pad_factor_fr=None, pad_mode=None,
pad_mode_fr=None, average=None, average_fr=None,
sampling_filters_fr=None, r_psi=None, analytic=None,
out_3D=None, aligned=None, jtfs=False, backend=None,
verbose=True, get_out=False):
"""Estimate energy conservation given scattering configurations, especially
scale of averaging. With default settings, passing only `T`/`F`, computes the
upper bound.
Limitations:
- For time scattering (`jtfs=False`) and non-dyadic length `x`, the
estimation will be inaccurate per not accounting for energy loss due to
unpadding.
- With `jtfs=True`, energies are underestimated per lacking support for
`out_3D and not average_fr`. That is, convolutions over zero-padded
regions aren't included with `out_3D=False`. those are regions with
assumed negligible energy that are nonetheless part of actual
frequential input. See `out_3D` docs.
Parameters
----------
x : tensor
1D input.
sc : `Scattering1D` / `TimeFrequencyScattering1D` / None
Scattering object to use. If None, will create per parameters.
T, F, J, J_fr, Q, Q_fr, max_pad_factor, max_pad_factor_fr, pad_mode,
pad_mode_fr, average, average_fr, sampling_filters_fr, r_psi, analytic,
out_3D, aligned:
Scattering parameters.
jtfs : bool (default False)
Whether to estimate per JTFS; if False, does time scattering.
Must pass also with `sc` to indicate which object it is.
If `sc` is passed, won't use unaveraged variant where appropriate,
which won't provide upper bound on energy if `sc(average_fr=True)`.
backend : None / str
Backend to use (defaults to torch w/ GPU if available).
verbose : bool (default True)
Whether to print results to console.
get_out : bool (default False)
Whether to return computed coefficients and scattering objects alongside
energy ratios.
Returns
-------
ESr : dict[float]
Energy ratios.
Scx : tensor / dict[tensor]
Scattering output (if `get_out==True`).
sc : `Scattering1D` / `TimeFrequencyScattering1D`
Scattering object (if `get_out==True`).
"""
# warn if passing params alongside `sc`
_kw = dict(T=T, F=F, J=J, J_fr=J_fr, Q=Q, Q_fr=Q_fr,
max_pad_factor=max_pad_factor, max_pad_factor_fr=max_pad_factor_fr,
pad_mode=pad_mode, pad_mode_fr=pad_mode_fr,
average=average, average_fr=average_fr,
sampling_filters_fr=sampling_filters_fr,
out_3D=out_3D, aligned=aligned)
tm_params = ('T', 'J', 'Q', 'max_pad_factor', 'pad_mode', 'average')
fr_params = ('F', 'J_fr', 'Q_fr', 'max_pad_factor_fr', 'pad_mode_fr',
'average_fr', 'sampling_filters_fr', 'out_3D', 'aligned')
all_params = (*tm_params, *fr_params)
if sc is not None and any(_kw[arg] is not None for arg in all_params):
warnings.warn("`sc` object provided - parametric arguments ignored.")
elif not jtfs and any(_kw[arg] is not None for arg in fr_params):
warnings.warn("passed JTFS parameters with `jtfs=False` -- ignored.")
# create scattering object, if not provided
if sc is not None:
if jtfs:
sc_u = sc_a = sc
else:
if jtfs:
from wavespin import TimeFrequencyScattering1D as SC
else:
from wavespin import Scattering1D as SC
# handle args & pack parameters
N = x.shape[-1]
if Q is None:
Q = (8, 3)
if pad_mode is None:
pad_mode = 'reflect'
if r_psi is None:
r_psi = (.9, .9)
r_psi_fr = .9 if jtfs else None
if backend is None:
try:
import torch
backend = 'torch'
except:
backend = 'numpy'
elif backend == 'torch':
import torch
kw = dict(shape=N, J=int(np.log2(N)), T=T, max_pad_factor=max_pad_factor,
pad_mode=pad_mode, Q=Q, frontend=backend, r_psi=r_psi)
if not jtfs:
if average is None:
average = True
if analytic is None:
analytic = False # library default
kw.update(**dict(average=average, analytic=analytic, out_type='list'))
else:
# handle `J_fr` & `F`
if J_fr is None:
if F is None:
sc_temp = SC(**kw)
n_psi1 = len(sc_temp.psi1_f)
J_fr = int(np.log2(n_psi1)) - 1
F = 2**J_fr
else:
J_fr = int(np.log2(F))
elif F is None:
F = 2**J_fr
# handle other args
if pad_mode_fr is None:
pad_mode_fr = 'conj-reflect-zero'
if average_fr is None:
average_fr = False
if analytic is None:
analytic = True # library default
if aligned is None:
aligned = True
if out_3D is None:
out_3D = False
if sampling_filters_fr is None:
sampling_filters_fr = 'resample'
if Q_fr is None:
Q_fr = 4
# pack JTFS args
kw.update(**dict(max_pad_factor_fr=max_pad_factor_fr, F=F,
pad_mode_fr=pad_mode_fr, average_fr=average_fr,
analytic=analytic, Q_fr=Q_fr, out_type='dict:list',
sampling_filters_fr=sampling_filters_fr,
out_3D=out_3D, aligned=aligned, r_psi_fr=r_psi_fr))
if average is None:
kw_u = dict(**kw, average=False)
kw_a = dict(**kw, average=True)
else:
kw_u = kw_a = dict(**kw, average=average)
# create scattering object
if backend == 'torch':
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if not jtfs:
sc = SC(**kw)
if backend == 'torch':
sc = sc.to(device)
meta = sc.meta()
else:
sc_u, sc_a = SC(**kw_u), SC(**kw_a)
if backend == 'torch':
sc_u, sc_a = sc_u.to(device), sc_a.to(device)
# scatter
if not jtfs:
Scx = sc(x)
Scx = jtfs_to_numpy(Scx)
else:
Scx_u = sc_u(x)
Scx_a = sc_a(x)
Scx_u, Scx_a = jtfs_to_numpy(Scx_u), jtfs_to_numpy(Scx_a)
# compute energies
# input energy
Ex = energy(x)
if not jtfs and average:
Ex /= 2**sc.log2_T
# scattering energy & ratios
ES = {}
if not jtfs:
for o in (0, 1, 2):
ES[f'S{o}'] = np.sum([energy(Scx[int(i)]['coef']) for i in
np.where(meta['order'] == o)[0]])
else:
for pair in Scx_u:
Scx = Scx_u if pair not in ('S0', 'S1') else Scx_a
ES[pair] = np.sum([energy(c['coef']) for c in Scx[pair]])
ESr = {k: v/Ex for k, v in ES.items()}
if not jtfs:
ESr['total'] = np.sum(list(ES.values())) / Ex
else:
E_common = sum(ES[pair] for pair in ('S0', 'psi_t * phi_f',
'psi_t * psi_f_up',
'psi_t * psi_f_dn'))
E_v1 = E_common + ES['phi_t * phi_f'] + ES['phi_t * psi_f']
E_v2 = E_common + ES['S1']
ESr['total_v1'], ESr['total_v2'] = E_v1 / Ex, E_v2 / Ex
# print energies
if T is None:
T = (sc_a if jtfs else sc).T
_txt = f", F={F}" if jtfs else ""
print(f"E(Sx)/E(x) | T={T}{_txt}")
for k, v in ESr.items():
print("{:.4f} -- {}".format(v, k))
if jtfs:
sc = (sc_u, sc_a)
return (ESr, Scx, sc) if get_out else ESr
def compute_lp_sum(psi_fs, phi_f=None, J=None, log2_T=None,
fold_antianalytic=False):
lp_sum = 0
for psi_f in psi_fs:
lp_sum += np.abs(psi_f)**2
if phi_f is not None and (
# else lowest frequency bandpasses are too attenuated
log2_T is not None and J is not None and log2_T >= J):
lp_sum += np.abs(phi_f)**2
if fold_antianalytic:
lp_sum = fold_lp_sum(lp_sum, analytic_part=True)
return lp_sum
def fold_lp_sum(lp_sum, analytic_part=True):
if analytic_part:
# reflect anti-analytic part onto analytic;
# goal is energy conservation - if this is ignored and we
# normalize analytic part perfectly to 2, the non-zero negative
# freqs will make the filterbank energy-expansive
# sum onto positives, excluding DC and Nyquist,
# from negatives, excluding Nyquist
lp_sum[1:len(lp_sum)//2] += lp_sum[len(lp_sum)//2 + 1:][::-1]
# zero what we just carried over to not duplicate later by accident
lp_sum[len(lp_sum)//2 + 1:] = 0
# with `analytic=True`, this has no effect (all negatives == 0)
# (note, "analytic" in "analytic_only" includes pseudo-analytic)
else:
# above, but in reverse
lp_sum[len(lp_sum)//2 + 1:] += lp_sum[1:len(lp_sum)//2][::-1]
lp_sum[1:len(lp_sum)//2] = 0
return lp_sum
def make_jtfs_pair(N, pair='up', xi0=4, sigma0=1.35):
"""Creates a 2D JTFS wavelet. Used in `wavespin.visuals`."""
from .scattering1d.filter_bank import morlet_1d, gauss_1d
from scipy.fft import ifftshift, ifft
morl = morlet_1d(N, xi=xi0/N, sigma=sigma0/N).squeeze()
gaus = gauss_1d(N, sigma=sigma0/N).squeeze()
if pair in ('up', 'dn'):
i0, i1 = 0, 0
elif pair == 'phi_f':
i0, i1 = 1, 0
elif pair in ('phi_t', 'phi_t_dn'):
i0, i1 = 0, 1
elif pair == 'phi':
i0, i1 = 1, 1
else:
supported = {'up', 'dn', 'phi_f', 'phi_t', 'phi', 'phi_t_dn'}
raise ValueError("unknown pair %s; supported are %s" % (
pair, '\n'.join(supported)))
pf_f = (morl, gaus)[i0]
pt_f = (morl, gaus)[i1]
pf_f, pt_f = pf_f.copy(), pt_f.copy()
if pair in ('dn', 'phi_t_dn'):
# time reversal
pf_f[1:] = pf_f[1:][::-1]
pf, pt = [ifftshift(ifft(p)) for p in (pf_f, pt_f)]
Psi = pf[:, None] * pt[None]
return Psi
#### Validating 1D filterbank ################################################
def validate_filterbank_tm(sc=None, psi1_f=None, psi2_f=None, phi_f=None,
criterion_amplitude=1e-3, verbose=True):
"""Runs `validate_filterbank()` on temporal filters; supports `Scattering1D`
and `TimeFrequencyScattering1D`.
Parameters
----------
sc : `Scattering1D` / `TimeFrequencyScattering1D` / None
If None, then `psi1_f_fr_up`, `psi1_f_fr_dn`, and `phi_f_fr` must
be not None.
psi1_f : list[tensor] / None
First-order bandpasses in frequency domain.
Overridden if `sc` is not None.
psi2_f : list[tensor] / None
Second-order bandpasses in frequency domain.
Overridden if `sc` is not None.
phi_f : tensor / None
Lowpass filter in frequency domain.
Overridden if `sc` is not None.
criterion_amplitude : float
Used for various thresholding in `validate_filterbank()`.
verbose : bool (default True)
Whether to print the report.
Returns
-------
data1, data2 : dict, dict
Returns from `validate_filterbank()` for `psi1_f` and `psi2_f`.
"""
if sc is None:
assert not any(arg is None for arg in (psi1_f, psi2_f, phi_f))
else:
psi1_f, psi2_f, phi_f = [getattr(sc, k) for k in
('psi1_f', 'psi2_f', 'phi_f')]
psi1_f, psi2_f = [[p[0] for p in ps] for ps in (psi1_f, psi2_f)]
phi_f = phi_f[0][0] if isinstance(phi_f[0], list) else phi_f[0]
if verbose:
print("\n// FIRST-ORDER")
data1 = validate_filterbank(psi1_f, phi_f, criterion_amplitude,
verbose=verbose,
for_real_inputs=True, unimodal=True)
if verbose:
print("\n\n// SECOND-ORDER")
data2 = validate_filterbank(psi2_f, phi_f, criterion_amplitude,
verbose=verbose,
for_real_inputs=True, unimodal=True)
return data1, data2
def validate_filterbank_fr(sc=None, psi1_f_fr_up=None, psi1_f_fr_dn=None,
phi_f_fr=None, psi_id=0, criterion_amplitude=1e-3,
verbose=True):
"""Runs `validate_filterbank()` on frequential filters of JTFS.
Parameters
----------
sc : `TimeFrequencyScattering1D` / None
JTFS instance. If None, then `psi1_f_fr_up`, `psi1_f_fr_dn`, and
`phi_f_fr` must be not None.
psi1_f_fr_up : list[tensor] / None
Spin up bandpasses in frequency domain.
Overridden if `sc` is not None.
psi1_f_fr_dn : list[tensor] / None
Spin down bandpasses in frequency domain.
Overridden if `sc` is not None.
phi_f_fr : tensor / None
Lowpass filter in frequency domain.
Overridden if `sc` is not None.
psi_id : int
See `psi_id` in `filter_bank_jtfs.psi_fr_factory`.
criterion_amplitude : float
Used for various thresholding in `validate_filterbank()`.
verbose : bool (default True)
Whether to print the report.
Returns
-------
data_up, data_dn : dict, dict
Returns from `validate_filterbank()` for `psi1_f_fr_up` and
`psi1_f_fr_dn`.
"""
if sc is None:
assert not any(arg is None for arg in
(psi1_f_fr_up, psi1_f_fr_dn, phi_f_fr))
else:
psi1_f_fr_up, psi1_f_fr_dn, phi_f_fr = [
getattr(sc, k) for k in
('psi1_f_fr_up', 'psi1_f_fr_dn', 'phi_f_fr')]
psi1_f_fr_up, psi1_f_fr_dn = psi1_f_fr_up[psi_id], psi1_f_fr_dn[psi_id]
phi_f_fr = phi_f_fr[0][0][0]
if verbose:
print("\n// SPIN UP")
data_up = validate_filterbank(psi1_f_fr_up, phi_f_fr, criterion_amplitude,
verbose=verbose,
for_real_inputs=False, unimodal=True)
if verbose:
print("\n\n// SPIN DOWN")
data_dn = validate_filterbank(psi1_f_fr_dn, phi_f_fr, criterion_amplitude,
verbose=verbose,
for_real_inputs=False, unimodal=True)
return data_up, data_dn
def validate_filterbank(psi_fs, phi_f=None, criterion_amplitude=1e-3,
for_real_inputs=True, unimodal=True, is_time_domain=False,
verbose=True):
"""Checks whether the wavelet filterbank is well-behaved against several
criterion:
1. Analyticity:
- A: Whether analytic *and* anti-analytic filters are present
(input should contain only one)
- B: Extent of (anti-)analyticity - whether there's components
on other side of Nyquist
- C: Whether the Nyquist bin is halved
2. Aliasing:
- A. Whether peaks are sorted (left to right or right to left).
If not, it's possible aliasing (or sloppy user input).
- B. Whether peaks are distributed exponentially or linearly.
If neither, it's possible aliasing. (Detection isn't foulproof.)
3. Zero-mean: whether filters are zero-mean (in time domain)
4. Zero-phase: whether filters are zero-phase
5. Frequency coverage: whether filters capture every frequency,
and whether they do so excessively or insufficiently.
- Measured with Littlewood-Paley sum (sum of energies),
the "energy transfer function".
- Also measured with sum of LP sum, in case of imperfect
analyticity not being accounted for (must fold leaked frequencies,
see `help(toolkit.compute_lp_sum)`, `fold_antianalytic`).
6. Frequency-bandwidth tiling: whether upper quarters of frequencies
follow CQT (fixed `xi/sigma = (center freq) / bandwidth`), and
whether all wavelet peak frequencies are distributed either
exponentially or linearly.
Only upper quarters (i.e. not `0 to N//4`) is checked for CQT because
the non-CQT portion could be in the majority, but unlikely for it to
ever span the upper half.
7. Redundancy: whether filters overlap excessively (this isn't
necessarily bad).
- Measured as ratio of product of energies to sum of energies
of adjacent filters
- Also measured as peak duplication in frequency domain. Note,
it's possible to exceed redundancy thershold without duplicating
peaks, and vice versa (but latter is more difficult).
8. Decay:
- A: Whether any filter is a pure sine (occurs if we try to sample
a wavelet at too low of a center frequency)
- B: Whether filters decay sufficiently in time domain to avoid
boundary effects
- C: Whether filters decay sufficiently in frequency domain
(bandwidth isn't the entire signal), and whether they decay
permanently (don't rise again after decaying)
B may fail for same reason as 8A & 8B (see these).
9. Temporal peaks:
- A: Whether peak is at t==0
- B: Whether there is only one peak
- C: Whether decay is smooth (else will incur inflection points)
A and B may fail to hold for lowest xi due to Morlet's corrective
term; this is proper behavior.
See https://www.desmos.com/calculator/ivd7t3mjn8
Parameters
----------
psi_fs : list[tensor]
Wavelet filterbank, by default in frequency domain (if in time domain,
set `in_time_domain=True`.
Analytic or pseudo-analytic, or anti- of either; does not support
real-valued wavelets (in time domain).
If `psi_fs` aren't all same length, will pad in time domain and
center about `n=0` (DFT-symmetrically), with original length's center
placed at index 0.
Note, if `psi_fs` are provided in time domain or aren't all same length,
they're padded such that FFT convolution matches
`np.convolve(, mode='full')`. If wavelets are properly centered for FFT
convolution - that is, either at `n=0` or within `ifftshift` or `n=0`,
then for even lengths, `np.convolve` *will not* produce correct
results - which is what happens with `scipy.cwt`.
phi_f : tensor
Lowpass filter in frequency domain, of same length as `psi_fs`.
criterion_amplitude : float
Used for various thresholding.
for_real_inputs : bool (default True)
Whether the filterbank is intended for real-only inputs.
E.g. `False` for spinned bandpasses in JTFS.
unimodal : bool (default True)
Whether the wavelets have a single peak in frequency domain.
If `False`, some checks are omitted, and others might be inaccurate.
Always `True` for Morlet wavelets.
in_time_domain : bool (default False)
Whether `psi_fs` are in time domain. See notes in `psi_fs`.
verbose : bool (default True)
Whether to print the report.
Returns
-------
data : dict
Aggregated testing info, along with the report. For keys, see
`print(list(data))`. Note, for entries that describe individual filters,
the indexing corresponds to `psi_fs` sorted in order of decreasing
peak frequency.
"""
def pop_if_no_header(report, did_atleast_one_header):
"""`did_atleast_one_header` sets to `False` after every `title()` call,
whereas `did_header` before every subsection, i.e. a possible
`if not did_header: report += []`. Former is to pop titles, latter
is to avoid repeatedly appending subsection text.
"""
if not did_atleast_one_header:
report.pop(-1)
# handle `psi_fs` domain and length ######################################
# squeeze all for convenience
psi_fs = [p.squeeze() for p in psi_fs]
# fetch max length
max_len = max(len(p) for p in psi_fs)
# take to freq or pad to max length
_psi_fs = [] # store processed filters
# also handle lowpass
if phi_f is not None:
psi_fs.append(phi_f)
for p in psi_fs:
if len(p) != max_len:
if not is_time_domain:
p = ifft(p)
# right-pad
orig_len = len(p)
p = np.pad(p, [0, max_len - orig_len])
# odd case: circularly-center about n=0; equivalent to `ifftshift`
# even case: center such that first output index of FFT convolution
# corresponds to `sum(x, p[::-1][-len(p)//2:])`, where `p` is in
# time domain. This is what `np.convolve` does, and it's *not*
# equivalent to FFT convolution after `ifftshift`
center_idx = orig_len // 2
p = np.roll(p, -(center_idx - 1))
# take to freq-domain
p = fft(p)
elif is_time_domain:
center_idx = len(p) // 2
p = np.roll(p, -(center_idx - 1))
p = fft(p)
_psi_fs.append(p)
psi_fs = _psi_fs
# recover & detach phi_f
if phi_f is not None:
phi_f = psi_fs.pop(-1)
##########################################################################
# set reference filter
psi_f_0 = psi_fs[0]
# fetch basic metadata
N = len(psi_f_0)
# assert all inputs are same length
# note, above already guarantees this, but we keep the code logic in case
# something changes in the future
for n, p in enumerate(psi_fs):
assert len(p) == N, (len(p), N)
if phi_f is not None:
assert len(phi_f) == N, (len(phi_f), N)
# initialize report
report = []
data = {k: {} for k in ('analytic_a_ratio', 'nonzero_mean', 'sine', 'decay',
'imag_mean', 'time_peak_idx', 'n_inflections',
'redundancy', 'peak_duplicates')}
data['opposite_analytic'] = []
def title(txt):
return ("\n== {} " + "=" * (80 - len(txt)) + "\n").format(txt)
# for later
w_pos = np.linspace(0, N//2, N//2 + 1, endpoint=True).astype(int)
w_neg = - w_pos[1:-1][::-1]
w = np.hstack([w_pos, w_neg])
eps = np.finfo(psi_f_0.dtype).eps
peak_idxs = np.array([np.argmax(np.abs(p)) for p in psi_fs])
peak_idxs_sorted = np.sort(peak_idxs)
if unimodal and not (np.all(peak_idxs == peak_idxs_sorted) or
np.all(peak_idxs == peak_idxs_sorted[::-1])):
warnings.warn("`psi_fs` peak locations are not sorted; a possible reason "
"is aliasing. Will sort, breaking mapping with input's.")
data['not_sorted'] = True
peak_idxs = peak_idxs_sorted
# Analyticity ############################################################
# check if there are both analytic and anti-analytic bandpasses ##########
report += [title("ANALYTICITY")]
did_header = did_atleast_one_header = False
peak_idx_0 = np.argmax(psi_f_0)
if peak_idx_0 == N // 2: # ambiguous case; check next filter
peak_idx_0 = np.argmax(psi_fs[1])
analytic_0 = bool(peak_idx_0 < N//2)
# assume entire filterbank is per psi_0
analyticity = "analytic" if analytic_0 else "anti-analytic"
# check whether all is analytic or anti-analytic
found_counteranalytic = False
for n, p in enumerate(psi_fs[1:]):
peak_idx_n = np.argmax(np.abs(p))
analytic_n = bool(peak_idx_n < N//2)
if not (analytic_0 is analytic_n):
if not did_header:
report += [("Found analytic AND anti-analytic filters in same "
"filterbank! psi_fs[0] is {}, but the following "
"aren't:\n").format(analyticity)]
did_header = did_atleast_one_header = True
report += [f"psi_fs[{n}]\n"]
data['opposite_analytic'].append(n)
found_counteranalytic = True
# set `is_analytic` based on which there are more of
if not found_counteranalytic:
is_analytic = analytic_0
else:
n_analytic = sum(np.argmax(np.abs(p)) <= N//2 for p in psi_fs)
n_antianalytic = sum(np.argmax(np.abs(p)) >= N//2 for p in psi_fs)
if n_analytic > n_antianalytic or n_analytic == n_antianalytic:
is_analytic = True
else:
is_analytic = False
report += [("\nIn total, there are {} analytic and {} anti-analytic "
"filters\n").format(n_analytic, n_antianalytic)]
# determine whether the filterbank is strictly analytic/anti-analytic
if is_analytic:
negatives_all_zero = False
for p in psi_fs:
# exclude Nyquist as it's both in analytic and anti-analytic
if not np.allclose(p[len(p)//2 + 1:], 0.):
break
else:
negatives_all_zero = True
strict_analyticity = negatives_all_zero
else:
positives_all_zero = False
for p in psi_fs:
# exclude DC, one problem at a time; exclude Nyquist
if not np.allclose(p[1:len(p)//2], 0.):
break
else:
positives_all_zero = True
strict_analyticity = positives_all_zero
# determine whether the Nyquist bin is halved
if strict_analyticity:
did_header = False
pf = psi_fs[0]
if is_analytic:
nyquist_halved = bool(pf[N//2 - 1] / pf[N//2] > 2)
else:
nyquist_halved = bool(pf[N//2 + 1] / pf[N//2] > 2)
if not nyquist_halved:
report += [("Nyquist bin isn't halved for strictly analytic wavelet; "
"yields improper analyticity with bad time decay.\n")]
did_header = did_atleast_one_header = True
# check if any bandpass isn't strictly analytic/anti- ####################
did_header = False
th_ratio = (1 / criterion_amplitude)
for n, p in enumerate(psi_fs):
ap = np.abs(p)
# assume entire filterbank is per psi_0
if is_analytic:
# Nyquist is *at* N//2, so to include in sum, index up to N//2 + 1
a_ratio = (ap[:N//2 + 1].sum() / (ap[N//2 + 1:].sum() + eps))
else:
a_ratio = (ap[N//2:].sum() / (ap[:N//2].sum() + eps))
if a_ratio < th_ratio:
if not did_header:
report += [("\nFound not strictly {} filter(s); threshold for "
"ratio of `spectral sum` to `spectral sum past "
"Nyquist` is {} - got (less is worse):\n"
).format(analyticity, th_ratio)]
did_header = did_atleast_one_header = True
report += ["psi_fs[{}]: {:.1f}\n".format(n, a_ratio)]
data['analytic_a_ratio'][n] = a_ratio
# check if any bandpass isn't zero-mean ##################################
pop_if_no_header(report, did_atleast_one_header)
report += [title("ZERO-MEAN")]
did_header = did_atleast_one_header = False
for n, p in enumerate(psi_fs):
if p[0] != 0:
if not did_header:
report += ["Found non-zero mean filter(s)!:\n"]
did_header = did_atleast_one_header = True
report += ["psi_fs[{}][0] == {:.2e}\n".format(n, p[0])]
data['nonzero_mean'][n] = p[0]
# Littlewood-Paley sum ###################################################
def report_lp_sum(report, phi):
with_phi = not isinstance(phi, int)
s = "with" if with_phi else "without"
report += [title("LP-SUM (%s phi)" % s)]
did_header = did_atleast_one_header = False
# compute parameters #################################################
# finish computing lp sum
lp_sum = lp_sum_psi + np.abs(phi)**2
lp_sum = (lp_sum[:N//2 + 1] if is_analytic else
lp_sum[N//2:])
if with_phi:
data['lp'] = lp_sum
else:
data['lp_no_phi'] = lp_sum
if not with_phi and is_analytic:
lp_sum = lp_sum[1:] # exclude dc
# excess / underflow
diff_over = lp_sum - th_lp_sum_over
diff_under = th_lp_sum_under - lp_sum
diff_over_max, diff_under_max = diff_over.max(), diff_under.max()
excess_over = np.where(diff_over > th_sum_excess)[0]
excess_under = np.where(diff_under > th_sum_excess)[0]
if not is_analytic:
excess_over += N//2
excess_under += N//2
elif is_analytic and not with_phi:
excess_over += 1
excess_under += 1 # dc
# lp sum sum
lp_sum_sum = lp_sum.sum()
# `1` per bin, minus
# - DC bin, since no phi
# - half of Nyquist bin, since `analytic=True` cannot ever get a full
# Nyquist (Nyquist bin is halved, so even in best case of the peak
# placed at Nyquist, we get 0.5). Unclear if any correction is due
# on this.
# negligible adjustments if `N` is large (JTFS N_frs can be small enough)
expected_sum = N
if not with_phi:
expected_sum -= 1
if strict_analyticity:
expected_sum -= .5
# scale according to tolerance.
# tolerances determined empirically from the most conservative case;
# see `tests.test_jtfs.test_lp_sum`
th_sum_above = .01
th_sum_below = .15
expected_above = expected_sum * (1 + th_sum_above)
expected_below = expected_sum * (1 - th_sum_below)
# append report entries ##############################################
input_kind = "real" if for_real_inputs else "complex"
if len(excess_over) > 0:
# show at most 30 values
stride = max(int(round(len(excess_over) / 30)), 1)
s = f", shown skipping every {stride-1} values" if stride != 1 else ""
report += [("LP sum exceeds threshold of {} (for {} inputs) by "
"at most {:.3f} (more is worse) at following frequency "
"bin indices (0 to {}{}):\n"
).format(th_lp_sum_over, input_kind, diff_over_max,
N//2, s)]
report += ["{}\n\n".format(w[excess_over][::stride])]
did_header = did_atleast_one_header = True
if with_phi:
data['lp_excess_over'] = excess_over
data['lp_excess_over_max'] = diff_over_max
else:
data['lp_no_phi_excess_over'] = excess_over
data['lp_no_phi_excess_over_max'] = diff_over_max
if len(excess_under) > 0:
# show at most 30 values
stride = max(int(round(len(excess_under) / 30)), 1)
s = f", shown skipping every {stride-1} values" if stride != 1 else ""
report += [("LP sum falls below threshold of {} (for {} inputs) by "
"at most {:.3f} (more is worse; ~{} implies ~zero "
"capturing of the frequency!) at following frequency "
"bin indices (0 to {}{}):\n"
).format(th_lp_sum_under, input_kind, diff_under_max,
th_lp_sum_under, N//2, s)]
# w_show = np.round(w[excess_under][::stride], 3)
report += ["{}\n\n".format(w[excess_under][::stride])]
did_header = did_atleast_one_header = True
if with_phi:
data['lp_excess_under'] = excess_under
data['lp_excess_under_max'] = diff_under_max
else:
data['lp_no_phi_excess_under'] = excess_under
data['lp_no_phi_excess_under_max'] = diff_under_max
if lp_sum_sum > expected_above:
report += [("LP sum sum exceeds expected: {} > {}. If LP sum "
"otherwise has no excess, then there may be leakage due "
"to imperfect analyticity, corrected by folding; see "
"help(toolkit.fold_lp_sum)\n").format(lp_sum_sum,
expected_above)]
did_header = did_atleast_one_header = True
diff = lp_sum_sum - expected_above
if with_phi:
data['lp_sum_sum_excess_over'] = diff
else:
data['lp_sum_sum_no_phi_excess_over'] = diff
if lp_sum_sum < expected_below:
report += [("LP sum sum falls short of expected: {} < {}. If LP sum "
"otherwise doesn't fall short, then there may be leakage "
"due to imperfect analyticity, corrected by folding; see "
"help(toolkit.fold_lp_sum)\n").format(lp_sum_sum,
expected_below)]
did_header = did_atleast_one_header = True
diff = expected_below - lp_sum_sum
if with_phi:
data['lp_sum_sum_excess_under'] = diff
else:
data['lp_sum_sum_no_phi_excess_under'] = diff
if did_header:
stdev = np.abs(lp_sum[lp_sum >= th_lp_sum_under] -
th_lp_sum_under).std()
report += [("Mean absolute deviation from tight frame: {:.2f}\n"
"Standard deviation from tight frame: {:.2f} "
"(excluded LP sum values below {})\n").format(
np.abs(diff_over).mean(), stdev, th_lp_sum_under)]
pop_if_no_header(report, did_atleast_one_header)
pop_if_no_header(report, did_atleast_one_header)
th_lp_sum_over = 2 if for_real_inputs else 1
th_lp_sum_under = th_lp_sum_over / 2
th_sum_excess = (1 + criterion_amplitude)**2 - 1
lp_sum_psi = np.sum([np.abs(p)**2 for p in psi_fs], axis=0)
# fold opposite frequencies to ensure leaks are accounted for
lp_sum_psi = fold_lp_sum(lp_sum_psi, analytic_part=is_analytic)
# do both cases
if phi_f is not None:
report_lp_sum(report, phi=phi_f)
report_lp_sum(report, phi=0)
# Redundancy #############################################################
from .scattering1d.filter_bank import compute_filter_redundancy
report += [title("REDUNDANCY")]
did_header = did_atleast_one_header = False
max_to_print = 20
# overlap ####
th_r = .4 if for_real_inputs else .2
printed = 0
for n in range(len(psi_fs) - 1):
r = compute_filter_redundancy(psi_fs[n], psi_fs[n + 1])
data['redundancy'][(n, n + 1)] = r
if r > th_r:
if not did_header:
report += [("Found filters with redundancy exceeding {} (energy "
"overlap relative to sum of individual energies) "
"-- This isn't necessarily bad. Showing up to {} "
"filters:\n").format(th_r, max_to_print)]
did_header = did_atleast_one_header = True
if printed < max_to_print:
report += ["psi_fs[{}] & psi_fs[{}]: {:.3f}\n".format(
n, n + 1, r)]
printed += 1
# peak duplication ####
did_header = False
printed = 0
for n, peak_idx in enumerate(peak_idxs):
if np.sum(peak_idx == peak_idxs) > 1:
data['peak_duplicates'][n] = peak_idx
if not did_header:
spc = "\n" if did_atleast_one_header else ""
report += [("{}Found filters with duplicate peak frequencies! "
"Showing up to {} filters:\n").format(spc,
max_to_print)]
did_header = did_atleast_one_header = True
if printed < max_to_print:
report += ["psi_fs[{}], peak_idx={}\n".format(n, peak_idx)]
printed += 1
# Decay: check if any bandpass is a pure sine ############################
pop_if_no_header(report, did_atleast_one_header)
report += [title("DECAY (check for pure sines)")]
did_header = did_atleast_one_header = False
th_ratio_max_to_next_max = (1 / criterion_amplitude)
for n, p in enumerate(psi_fs):
psort = np.sort(np.abs(p)) # least to greatest
ratio = psort[-1] / (psort[-2] + eps)
if ratio > th_ratio_max_to_next_max:
if not did_header:
report += [("Found filter(s) that are pure sines! Threshold for "
"ratio of Fourier peak to next-highest value is {} "
"- got (more is worse):\n"
).format(th_ratio_max_to_next_max)]
did_header = did_atleast_one_header = True
report += ["psi_fs[{}]: {:.2e}\n".format(n, ratio)]
data['sine'][n] = ratio
# Decay: frequency #######################################################
from .scattering1d.filter_bank import compute_bandwidth
pop_if_no_header(report, did_atleast_one_header)
report += [title("DECAY (frequency)")]
did_header = did_atleast_one_header = False
# compute bandwidths
bandwidths = [compute_bandwidth(pf, criterion_amplitude)
for pf in psi_fs]
excess_bw = N//2 if strict_analyticity else N
for n, bw in enumerate(bandwidths):
if bw == excess_bw:
if not did_header:
report += [("Found filter(s) that never sufficiently decay "
"in frequency:\n")]
did_header = did_atleast_one_header = True
report += ["psi_fs[{}], bandwidth={}\n".format(n, bw)]
# handle case where a filter first decays and then rises again
if unimodal:
def decayed_then_rose(epf):
criterion_energy = criterion_amplitude**2
decay_idxs = np.where(epf < criterion_energy)[0]
if len(decay_idxs) == 0:
# never decayed
return False
first_decay_idx = decay_idxs[0]
bound = len(epf)//2 # exclude opposite half
rise_idxs = np.where(epf[first_decay_idx + 1:bound + 1] >
criterion_energy)[0]
return bool(len(rise_idxs) > 0)
did_header = False
for n, pf in enumerate(psi_fs):
# center about n=0 to handle left & right separately
pf = np.roll(pf, -np.argmax(np.abs(pf)))
epf = np.abs(pf)**2
dtr_right = decayed_then_rose(epf)
# frequency-reverse
epf[1:] = epf[1:][::-1]
dtr_left = decayed_then_rose(epf)
# both apply regardless of `strict_analyticity`
# (since one of them should be impossible if it's `True`)
if dtr_left or dtr_right:
if not did_header:
report += [("Found filter(s) that decay then rise again in "
"frequency:\n")]
did_header = did_atleast_one_header = True
report += ["psi_fs[{}]\n".format(n)]
# Decay: boundary effects ################################################
pop_if_no_header(report, did_atleast_one_header)
report += [title("DECAY (boundary effects)")]
did_header = did_atleast_one_header = False
th_ratio_max_to_min = (1 / criterion_amplitude)
psis = [np.fft.ifft(p) for p in psi_fs]
apsis = [np.abs(p) for p in psis]
for n, ap in enumerate(apsis):
ratio = ap.max() / (ap.min() + eps)
if ratio < th_ratio_max_to_min:
if not did_header:
report += [("Found filter(s) with incomplete decay (will incur "
"boundary effects), with following ratios of "
"amplitude max to edge (less is worse; threshold "
"is {}):\n").format(1 / criterion_amplitude)]
did_header = did_atleast_one_header = True
report += ["psi_fs[{}]: {:.1f}\n".format(n, ratio)]
data['decay'][n] = ratio
# check lowpass
if phi_f is not None:
aphi = np.abs(np.fft.ifft(phi_f))
ratio = aphi.max() / (aphi.min() + eps)
if ratio < th_ratio_max_to_min:
nl = "\n" if did_header else ""
report += [("{}Lowpass filter has incomplete decay (will incur "
"boundary effects), with following ratio of amplitude "
"max to edge: {:.1f} > {}\n").format(nl, ratio,
th_ratio_max_to_min)]
did_header = did_atleast_one_header = True
data['decay'][-1] = ratio
# Phase ##################################################################
pop_if_no_header(report, did_atleast_one_header)
report += [title("PHASE")]
did_header = did_atleast_one_header = False
th_imag_mean = eps
for n, p in enumerate(psi_fs):
imag_mean = np.abs(p.imag).mean()
if imag_mean > th_imag_mean:
if not did_header:
report += [("Found filters with non-zero phase, with following "
"absolute mean imaginary values:\n")]
did_header = did_atleast_one_header = True
report += ["psi_fs[{}]: {:.1e}\n".format(n, imag_mean)]
data['imag_mean'][n] = imag_mean
# Aliasing ###############################################################
def diff_extend(diff, th, cond='gt', order=1):
# the idea is to take `diff` without losing samples, if the goal is
# `where(diff == 0)`; `diff` is forward difference, and two samples
# participated in producing the zero, where later one's index is dropped
# E.g. detecting duplicate peak indices:
# [0, 1, 3, 3, 5] -> diff gives [2], so take [2, 3]
# but instead of adding an index, replace next sample with zero such that
# its `where == 0` produces that index
if order > 1:
diff_e = diff_extend(diff, th)
for o in range(order - 1):
diff_e = diff_e(diff_e, th)
return diff_e
diff_e = []
d_extend = 2*th if cond == 'gt' else th
prev_true = False
for d in diff:
if prev_true:
diff_e.append(d_extend)
prev_true = False
else:
diff_e.append(d)
if (cond == 'gt' and np.abs(d) > th or
cond == 'eq' and np.abs(d) == th):
prev_true = True
if prev_true:
# last sample was zero; extend
diff_e.append(d_extend)
return np.array(diff_e)
if unimodal:
pop_if_no_header(report, did_atleast_one_header)
report += [title("ALIASING")]
did_header = did_atleast_one_header = False
eps_big = eps * 100 # ease threshold for "zero"
if len(peak_idxs) < 6:
warnings.warn("Alias detector requires at least 6 wavelets to "
"work properly, per repeated `np.diff`")
# check whether peak locations follow a linear or exponential
# distribution, progressively dropping those that do to see if any remain
# x[n] = A^n + C; x[n] - x[n - 1] = A^n - A^(n-1) = A^n*(1 - A) = A^n*C
# log(A^n*C) = K + n; diff(diff(K + n)) == 0
# `abs` for anti-analytic case with descending idxs
logdiffs = np.diff(np.log(np.abs(np.diff(peak_idxs))), 2)
# In general it's impossible to determine whether a rounded sequence
# samples an exponential, since if the exponential rate (A in A^n) is
# sufficiently small, its rounded values will be linear over some portion.
# However, it cannot be anything else, and we are okay with linear
# (unless constant, i.e. duplicate, captured elsewhere) - thus the net
# case of `exp + lin` is still captured. The only uncertainty is in
# the threshold; assuming deviation by at most 1 sample, we set it to 1.
# A test is:
# `for b in linspace(1.2, 6.5, 500): x = round(b**arange(10) + 50)`
# with `if any(abs(diff, o).min() == 0 for o in (1, 2, 3)): continue`,
# Another with: `linspace(.2, 1, 500)` and `round(256*b**arange(10) + 50)`
# to exclude `x` with repeated or linear values
# However, while this has no false positives (never misses an exp/lin),
# it can also count some non-exp/lin as exp/lin, but this is rare.
# To be safe, per above test, we use the empirical value of 0.9
logdiffs_extended = diff_extend(logdiffs, .9)
if len(logdiffs_extended) > len(logdiffs) + 2:
# this could be `assert` but not worth erroring over this
warnings.warn("`len(logdiffs_extended) > len(logdiffs) + 2`; will "
"use more conservative estimate on peaks distribution")
logdiffs_extended = logdiffs
keep = np.where(np.abs(logdiffs_extended) > .9)
# note due to three `diff`s we artificially exclude 3 samples
peak_idxs_remainder = peak_idxs[keep]
# now constant (diff_order==1) and linear (diff_order==2)
for diff_order in (1, 2):
idxs_diff2 = np.diff(peak_idxs_remainder, diff_order)
keep = np.where(np.abs(idxs_diff2) > eps_big)
peak_idxs_remainder = peak_idxs_remainder[keep]
# if anything remains, it's neither
if len(peak_idxs_remainder) > 0:
report += [("Found Fourier peaks that are spaced neither "
"exponentially nor linearly, suggesting possible "
"aliasing.\npsi_fs[n], n={}\n"
).format(peak_idxs_remainder)]
data['alias_peak_idxs'] = peak_idxs_remainder
did_header = did_atleast_one_header = True
# Frequency-bandwidth tiling; CQT ########################################
# note, we checked for linear/exponential spacing in "Aliasing" section
if unimodal:
pop_if_no_header(report, did_atleast_one_header)
report += [title("FREQUENCY-BANDWIDTH TILING")]
did_header = did_atleast_one_header = False
def isnt_lower_quarter(pidx):
return ((is_analytic and pidx > N//8) or
(not is_analytic and pidx < (N - N//8)))
got_peaks_above_first_quarter = any(isnt_lower_quarter(peak_idx)
for peak_idx in peak_idxs)
if got_peaks_above_first_quarter:
# idxs must reflect distance from DC
if is_analytic:
peak_idxs_dist = peak_idxs
else:
peak_idxs_dist = [N - peak_idx for peak_idx in peak_idxs]
# compute bandwidths, accounting for strict analyticity;
# can infer full intended bandwidth from just one half
if strict_analyticity:
if is_analytic:
# right is trimmed
bandwidths = [compute_bandwidth(pf, criterion_amplitude,
left_only=True)
for pf in psi_fs]
else:
# left is trimmed
bandwidths = [compute_bandwidth(pf, criterion_amplitude,
right_only=True)
for pf in psi_fs]
else:
bandwidths = [compute_bandwidth(pf, criterion_amplitude)
for pf in psi_fs]
Qs_upper_quarters = {n: peak_idx_dist / bw
for n, (peak_idx_dist, bw)
in enumerate(zip(peak_idxs_dist, bandwidths))
# must still use original peak idxs here
if isnt_lower_quarter(peak_idxs[n])}
Qs_values = list(Qs_upper_quarters.values())
tolerance = .01 # abs relative difference tolerance 1%
# pick most favorable reference
Qs_diffs = np.abs(np.diff(Qs_values))
Q_ref = Qs_values[np.argmin(Qs_diffs) + 1]
non_cqts = []
for n, Q in Qs_upper_quarters.items():
if abs(Q - Q_ref) / Q_ref > tolerance:
non_cqts.append((n, Q))
if len(non_cqts) > 0:
non_cqt_strs = ["psi_fs[{}], Q={}".format(n, Q)
for n, Q in zip(*zip(*non_cqts))]
report += [("Found non-CQT wavelets in upper quarters of "
"frequencies - i.e., `(center freq) / bandwidth` "
"isn't constant: \n{}\n"
).format("\n".join(non_cqt_strs))]
data['non_cqts'] = non_cqts
did_header = did_atleast_one_header = True
# Temporal peak ##########################################################
if unimodal:
# check that temporal peak is at t==0 ################################
pop_if_no_header(report, did_atleast_one_header)
report += [title("TEMPORAL PEAK")]
did_header = did_atleast_one_header = False
for n, ap in enumerate(apsis):
peak_idx = np.argmax(ap)
if peak_idx != 0:
if not did_header:
report += [("Found filters with temporal peak not at t=0!, "
"with following peak locations:\n")]
did_header = did_atleast_one_header = True
report += ["psi_fs[{}]: {}\n".format(n, peak_idx)]
data['time_peak_idx'][n] = peak_idx
# check that there is only one temporal peak #########################
did_header = False
for n, ap in enumerate(apsis):
# count number of inflection points (where sign of derivative changes)
# exclude very small values
# center for proper `diff`
ap = np.fft.ifftshift(ap)
inflections = np.diff(np.sign(np.diff(ap[ap > 10*eps])))
n_inflections = sum(np.abs(inflections) > eps)
if n_inflections > 1:
if not did_header:
report += [("\nFound filters with multiple temporal peaks "
"(or incomplete/non-smooth decay)! "
"(more precisely, >1 inflection points) with "
"following number of inflection points:\n")]
did_header = did_atleast_one_header = True
report += ["psi_fs[{}]: {}\n".format(n, n_inflections)]
data['n_inflections'] = n_inflections
else:
pop_if_no_header(report, did_atleast_one_header)
# Print report ###########################################################
report = ''.join(report)
data['report'] = report
if verbose:
if len(report) == 0:
print("Perfect filterbank!")
else:
print(report)
return data
#### energy & distance #######################################################
def energy(x, axis=None, kind='l2'):
"""Compute energy. L1==`sum(abs(x))`, L2==`sum(abs(x)**2)` (so actually L2^2).
"""
x = x['coef'] if isinstance(x, dict) else x
B = ExtendedUnifiedBackend(x)
out = (B.norm(x, ord=1, axis=axis) if kind == 'l1' else
B.norm(x, ord=2, axis=axis)**2)
if np.prod(out.shape) == 1:
out = float(out)
return out
def l2(x, axis=None, keepdims=True):
"""`sqrt(sum(abs(x)**2))`."""
B = ExtendedUnifiedBackend(x)
return B.norm(x, ord=2, axis=axis, keepdims=keepdims)
def rel_l2(x0, x1, axis=None, adj=False):
"""Coeff distance measure; Eq 2.24 in
https://www.di.ens.fr/~mallat/papiers/ScatCPAM.pdf
"""
ref = l2(x0, axis) if not adj else (l2(x0, axis) + l2(x1, axis)) / 2
return l2(x1 - x0, axis) / ref
def l1(x, axis=None, keepdims=True):
"""`sum(abs(x))`."""
B = ExtendedUnifiedBackend(x)
return B.norm(x, ord=1, axis=axis, keepdims=keepdims)
def rel_l1(x0, x1, adj=False, axis=None):
ref = l1(x0, axis) if not adj else (l1(x0, axis) + l1(x1, axis)) / 2
return l1(x1 - x0, axis) / ref
def rel_ae(x0, x1, eps=None, ref_both=True):
"""Relative absolute error."""
B = ExtendedUnifiedBackend(x0)
if ref_both:
if eps is None:
eps = _eps(x0, x1)
ref = (x0 + x1)/2 + eps
else:
if eps is None:
eps = _eps(x0)
ref = x0 + eps
return B.abs(x0 - x1) / ref
def _eps(x0, x1=None):
B = ExtendedUnifiedBackend(x0)
if x1 is None:
eps = B.abs(x0).max() / 1000
else:
eps = (B.abs(x0).max() + B.abs(x1).max()) / 2000
eps = max(eps, 10 * np.finfo(B.numpy(x0).dtype).eps)
return eps
#### test signals ###########################################################
def echirp(N, fmin=1, fmax=None, tmin=0, tmax=1):
"""https://overlordgolddragon.github.io/test-signals/ (bottom)"""
fmax = fmax or N // 2
t = np.linspace(tmin, tmax, N)
phi = _echirp_fn(fmin, fmax, tmin, tmax)(t)
return np.cos(phi)
def _echirp_fn(fmin, fmax, tmin=0, tmax=1):
a = (fmin**tmax / fmax**tmin) ** (1/(tmax - tmin))
b = fmax**(1/tmax) * (1/a)**(1/tmax)
phi = lambda t: 2*np.pi * (a/np.log(b)) * (b**t - b**tmin)
return phi
def fdts(N, n_partials=2, total_shift=None, f0=None, seg_len=None,
partials_f_sep=1.6, global_shift=0, brick_spectrum=False,
endpoint=False):
"""Generate windowed tones with Frequency-dependent Time Shifts (FDTS)."""
def brick(g):
gf = np.fft.rfft(g)
# center at dc
ct = np.argmax(np.abs(gf))
gf_ct = np.roll(gf, -ct)
agf_ct = np.abs(gf_ct)
# brickwall width = ~support width
# decays slower so pick smaller criterion_amplitude
width = np.where(agf_ct < agf_ct.max() / 10000)[0][0]
brick_f = np.zeros(len(g)//2 + 1)
brick_f[:width] = 1
brick_f[-width:] = 1
gf_ct *= brick_f
gf_bricked = np.roll(gf_ct, ct)
g_bricked = np.fft.irfft(gf_bricked)
return g_bricked
total_shift = total_shift or N//16
f0 = f0 or N//12
seg_len = seg_len or N//8
t = np.linspace(0, 1, N, endpoint=endpoint)
window = scipy.signal.tukey(seg_len, alpha=0.5)
pad_right = (N - len(window)) // 2
pad_left = N - len(window) - pad_right
window = np.pad(window, [pad_left, pad_right])
x = np.zeros(N)
xs = x.copy()
for p in range(0, n_partials):
f_shift = partials_f_sep**p
x_partial = np.sin(2*np.pi * f0 * f_shift * t) * window
if brick_spectrum:
x_partial = brick(x_partial)
partial_shift = int(total_shift * np.log2(f_shift) / np.log2(n_partials))
xs_partial = np.roll(x_partial, partial_shift)
x += x_partial
xs += xs_partial
if global_shift:
x = np.roll(x, global_shift)
xs = np.roll(xs, global_shift)
return x, xs
#### misc ###################################################################
def tensor_padded(seq, pad_value=0, init_fn=None, cast_fn=None, ref_shape=None,
left_pad_axis=None, general=True):
"""Make tensor from variable-length `seq` (e.g. nested list) padded with
`fill_value`.
Parameters
----------
seq : list[tensor]
Nested list of tensors.
pad_value : float
Value to pad with.
init_fn : function / None
Instantiates packing tensor, e.g. `lambda shape: torch.zeros(shape)`.
Defaults to `backend.full`.
cast_fn : function / None
Function to cast tensor values before packing, e.g.
`lambda x: torch.tensor(x)`.
ref_shape : tuple[int] / None
Tensor output shape to pack into, instead of what's inferred for `seq`,
as long as >= that shape. Shape inferred from `seq` is the minimal size
determined from longest list at each nest level, i.e. we can't go lower
without losing elements.
Tuple can contain `None`: `(None, 3)` will pad dim0 per `seq` and dim1
to 3, unless `seq`'s dim1 is 4, then will pad to 4.
Recommended to pass this argument if applying `tensor_padded` multiple
times, as time to infer shape is significant, especially relative to
GPU computation.
left_pad_axis : int / tuple[int] / None
Along these axes, will pad from left instead of the default right.
Not implemented for dim0 (`0 in left_pad_axis`).
general : bool (default True)
If `False`, will use a much faster routine that's for JTFS.
Not implemented for TensorFlow: will convert to numpy array then rever to
TF tensor.
Code borrows from: https://stackoverflow.com/a/27890978/10133797
"""
iter_axis = [0]
prev_axis = [iter_axis[0]]
def fill_tensor(arr, seq, fill_value=0):
if iter_axis[0] != prev_axis[0]:
prev_axis[0] = iter_axis[0]
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
if len_ == 0:
pass
elif len(shape) not in left_pad_axis: # right pad
arr[:len_] = cast_fn(seq)
else: # left pad
arr[-len_:] = cast_fn(seq)
else:
iter_axis[0] += 1
left_pad = bool(iter_axis[0] in left_pad_axis)
if left_pad:
seq = IterWithDelay(seq, len(arr) - len(seq), fillvalue=())
for subarr, subseq in zip_longest(arr, seq, fillvalue=()):
fill_tensor(subarr, subseq, fill_value)
if subarr.ndim != 1:
iter_axis[0] -= 1
# infer `init_fn` and `cast_fn` from `seq`, if not provided ##############
backend, backend_name = _infer_backend(seq, get_name=True)
is_tf = bool(backend_name == 'tensorflow')
if is_tf:
tf = backend
backend = np
backend_name = 'numpy'
# infer dtype & whether on GPU
sq = seq
while isinstance(sq, list):
sq = sq[0]
dtype = sq.dtype if hasattr(sq, 'dtype') else type(sq)
if backend_name == 'torch':
device = sq.device
else:
device = None
if init_fn is None:
if backend_name == 'numpy':
if is_tf:
dtype = dtype.name
init_fn = lambda s: np.full(s, pad_value, dtype=dtype)
elif backend_name == 'torch':
init_fn = lambda s: backend.full(s, pad_value, dtype=dtype,
device=device)
if cast_fn is None:
if is_tf:
cast_fn = lambda x: x.numpy()
elif backend_name == 'numpy':
cast_fn = lambda x: x
elif backend_name == 'torch':
cast_fn = lambda x: (backend.tensor(x, device=device)
if not isinstance(x, backend.Tensor) else x)
##########################################################################
# infer shape if not provided
if ref_shape is None or any(s is None for s in ref_shape):
shape = list(find_shape(seq, fast=not general))
# override shape with `ref_shape` where provided
if ref_shape is not None:
for i, s in enumerate(ref_shape):
if s is not None and s >= shape[i]:
shape[i] = s
else:
shape = ref_shape
shape = tuple(shape)
# handle `left_pad_axis`
if left_pad_axis is None:
left_pad_axis = ()
elif isinstance(left_pad_axis, int):
left_pad_axis = [left_pad_axis]
elif isinstance(left_pad_axis, tuple):
left_pad_axis = list(left_pad_axis)
# negatives -> positives
for i, a in enumerate(left_pad_axis):
if a < 0:
# +1 since counting index depth which goes `1, 2, ...`
left_pad_axis[i] = (len(shape) + 1) + a
if 0 in left_pad_axis:
raise NotImplementedError("`0` in `left_pad_axis`")
# fill
arr = init_fn(shape)
fill_tensor(arr, seq, fill_value=pad_value)
# revert if needed
if is_tf:
arr = tf.convert_to_tensor(arr)
return arr
def find_shape(seq, fast=False):
"""Finds shape to pad a variably nested list to.
`fast=True` uses an implementation optimized for JTFS.
"""
if fast:
"""Assumes 4D/5D and only variable length lists are in dim3."""
flat = chain.from_iterable
try:
dim4 = len(seq[0][0][0][0]) # 5D
dims = (len(seq), len(seq[0]), len(seq[0][0]),
max(map(len, flat(flat(seq)))), dim4)
except:
dims = (len(seq), len(seq[0]),
max(map(len, flat(seq))), len(seq[0][0][0]))
else:
dims = _find_shape_gen(seq)
return dims
def _find_shape_gen(seq):
"""Code borrows from https://stackoverflow.com/a/27890978/10133797"""
try:
len_ = len(seq)
except TypeError:
return ()
shapes = [_find_shape_gen(subseq) for subseq in seq]
return (len_,) + tuple(max(sizes) for sizes in
zip_longest(*shapes, fillvalue=1))
class IterWithDelay():
"""Allows implementing left padding by delaying iteration of a sequence."""
def __init__(self, x, delay, fillvalue=()):
self.x = x
self.delay = delay
self.fillvalue = fillvalue
def __iter__(self):
self.idx = 0
return self
def __next__(self):
if self.idx > self.delay - 1:
idx = self.idx - self.delay
if idx < len(self.x):
out = self.x[idx]
else:
raise StopIteration
else:
out = self.fillvalue
self.idx += 1
return out
def fill_default_args(cfg, defaults, copy_original=True,
check_against_defaults=False):
"""If a key is present in `defaults` but not in `cfg`, then copies
the key-value pair from `defaults` onto `cfg`. Also applies to nests.
`check_against_defaults` will raise Exception is there's keys in `cfg`
that aren't in `defaults`.
"""
if cfg is None or cfg == {}:
return defaults
elif not isinstance(cfg, dict):
raise ValueError("`cfg` must be dict or None, got %s" % type(cfg))
if copy_original:
cfg = deepcopy(cfg) # don't affect external
for k, v in defaults.items():
if k not in cfg:
cfg[k] = v
else:
if isinstance(v, dict):
cfg[k] = fill_default_args(cfg[k], v)
if check_against_defaults:
for k in cfg:
if k not in defaults:
raise ValueError("unknown kwarg: '{}', supported are:\n{}".format(
k, '\n'.join(list(cfg))))
return cfg
def get_phi_for_psi_id(jtfs, psi_id):
"""Returns `phi_f_fr` at appropriate length, but always of scale `log2_F`."""
scale_diff = list(jtfs.psi_ids.values()).index(psi_id)
pad_diff = jtfs.J_pad_frs_max_init - jtfs.J_pad_frs[scale_diff]
return jtfs.phi_f_fr[0][pad_diff][0]
# decimate object ############################################################
class Decimate():
def __init__(self, backend='numpy', gpu=None, dtype=None,
sign_correction='abs', cutoff_mult=1.):
"""Windowed-sinc decimation.
Parameters
----------
backend : str['numpy', 'torch', 'tensorflow'] / module
Name of module, or module object, to use as backend.
- 'torch' defaults to using GPU and single precision.
- 'tensorflow' is not supported.
gpu : bool / None
Whether to use GPU (torch/tensorflow backends only). For 'torch'
backend, defaults to True.
dtype : str['float32', 'float64'] / None
Whether to compute and store filters in single or double precision.
sign_correction: str / None
None: no correction
'abs': `abs(out)`.
An explored alternative was `out -= out.min()`, but it's not
favored per
- shifting the entire output (dc bias), while the negatives
don't result from such a shift
- the negatives are in minority and vary with "noisy" factors
such as boundary effects and signal regularity, making
the process itself noisy and sensitive to outliers
"""
# input checks
assert sign_correction in (None, 'abs'), sign_correction
if not isinstance(dtype, (str, type(None))):
dtype = str(dtype).split('.')[-1] # e.g. 'torch.float32'
assert dtype in (None, 'float32', 'float64'), dtype
self.dtype = dtype
self.sign_correction = sign_correction
self.cutoff_mult = cutoff_mult
# handle `backend`
if isinstance(backend, str):
self.backend_name = backend
import importlib
backend = importlib.import_module('wavespin.scattering1d.backend.'
+ self.backend_name + "_backend",
'backend').backend
else:
self.backend_name = backend.__module__.split('.')[-1].rstrip(
'_backend')
self.Bk = backend
# complete module of backend
if self.backend_name == 'torch':
import torch
self.B = torch
elif self.backend_name == 'tensorflow':
raise NotImplementedError("currently only 'numpy' and 'torch' "
"backends are supported.")
# import tensorflow as tf
# self.B = tf
else:
self.B = np
# handle `gpu`
if gpu is None:
gpu = bool(self.backend_name != 'numpy')
elif gpu and self.backend_name == 'numpy':
self._err_backend()
self.gpu = gpu
# instantiate reusables
self.filters = {}
self.unpads = {}
self.pads = {}
def __call__(self, x, factor, axis=-1, x_is_fourier=False):
"""Decimate input (anti-alias filter + subsampling).
Parameters
----------
x : tensor
n-dim tensor.
factor : int
Subsampling factor, must be power of 2.
axis : int
Axis along which to decimate. Negative supported.
x_is_fourier : bool (default False)
Whether `x` is already in frequency domain.
If possible, it's more performant to pass in `x` in time domain
as it's passed to time domain anyway before padding (unless it
won't require padding, which is possible).
Returns
-------
o : tensor
`x` decimated along `axis` axis by `factor` factor.
"""
assert np.log2(factor).is_integer()
key = (factor, x.shape[axis])
if key not in self.filters:
self.make_filter(key)
return self.decimate(x, key, axis, x_is_fourier)
def decimate(self, x, key, axis=-1, x_is_fourier=False):
xf, filtf, factor, ind_start, ind_end = self._handle_input(
x, key, axis, x_is_fourier)
# convolve, subsample, unpad
of = xf * filtf
of = self.Bk.subsample_fourier(of, factor, axis=axis)
o = self.Bk.irfft(of, axis=axis)
o = self.Bk.unpad(o, ind_start, ind_end, axis=axis)
# sign correction
if self.sign_correction == 'abs':
o = self.B.abs(o)
return o
def _handle_input(self, x, key, axis, x_is_fourier):
# from `key` get filter & related info
factor, N = key
filtf = self.filters[key]
ind_start, ind_end = self.unpads[key]
pad_left, pad_right = self.pads[key]
# pad `x` if necessary
if pad_left != 0 or pad_right != 0:
if x_is_fourier:
xf = x
x = self.Bk.ifft(xf, axis=axis)
xp = self.Bk.pad(x, pad_left, pad_right, pad_mode='zero', axis=axis)
xf = self.Bk.fft(xp, axis=axis)
elif not x_is_fourier:
xf = self.Bk.fft(x, axis=axis)
else:
xf = x
# broadcast filter to input's shape
broadcast = [None] * x.ndim
broadcast[axis] = slice(None)
filtf = filtf[tuple(broadcast)]
return xf, filtf, factor, ind_start, ind_end
def make_filter(self, key):
"""Create windowed sinc, centered at n=0 and padded to a power of 2,
and compute pad and unpad parameters.
The filters are keyed by `key = (factor, N)`, where `factor` and `N`
are stored with successive calls to `Decimate`, yielding dynamic
creation and storage of filters.
"""
q, N = key
half_len = 10 * q
n = int(2 * half_len)
cutoff = (1. / q) * self.cutoff_mult
filtf, unpads, pads = self._make_decimate_filter(n + 1, cutoff, q, N)
self.filters[key] = filtf
self.unpads[key] = unpads
self.pads[key] = pads
# helpers ################################################################
def _make_decimate_filter(self, numtaps, cutoff, q, N):
h = self._windowed_sinc(numtaps, cutoff)
# for FFT conv
((pad_left_x, pad_right_x), (pad_left_filt, pad_right_filt)
) = self._compute_pad_amount(N, h)
h = np.pad(h, [pad_left_filt, pad_right_filt])
# time-center filter about 0 (in DFT sense, n=0)
h = np.roll(h, -np.argmax(h))
# take to fourier
hf = np.fft.fft(h)
# assert zero phase (imag part zero)
assert hf.imag.mean() < 1e-15, hf.imag.mean()
# keep only real part
hf = hf.real
# backend, device, dtype
hf = self._handle_backend_device_dtype(hf)
# account for additional padding
ind_start = int(np.ceil(pad_left_x / q))
ind_end = int(np.ceil((N + pad_left_x) / q))
return hf, (ind_start, ind_end), (pad_left_x, pad_right_x)
def _compute_pad_amount(self, N, h):
# don't concern with whether it decays to zero sooner, assume worst case
support = len(h)
# since we zero-pad, can halve (else we'd pad by `support` on each side)
to_pad = support
# pow2 for fast FFT conv
padded_pow2 = int(2**np.ceil(np.log2(N + to_pad)))
# compute padding for input
pad_right_x = padded_pow2 - N
pad_left_x = 0
# compute padding for filter
pad_right_filt = padded_pow2 - len(h)
pad_left_filt = 0
return (pad_left_x, pad_right_x), (pad_left_filt, pad_right_filt)
def _windowed_sinc(self, numtaps, cutoff):
"""Sample & normalize windowed sinc, in time domain"""
win = scipy.signal.get_window("hamming", numtaps, fftbins=False)
# sample, window, & norm sinc
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = win * cutoff * np.sinc(cutoff * m)
h /= h.sum() # L1 norm
return h
def _handle_backend_device_dtype(self, hf):
if self.backend_name == 'numpy':
if self.dtype == 'float32':
hf = hf.astype('float32')
if self.gpu:
self._err_backend()
elif self.backend_name == 'torch':
hf = self.B.from_numpy(hf)
if self.dtype == 'float32':
hf = hf.float()
if self.gpu:
hf = hf.cuda()
elif self.backend_name == 'tensorflow':
raise NotImplementedError
return hf
def _err_backend(self):
raise ValueError("`gpu=True` requires `backend` that's 'torch' "
"or 'tensorflow' (got %s)" % str(self.backend_name))
# backend ####################################################################
class ExtendedUnifiedBackend():
"""Extends existing WaveSpin backend with functionality."""
def __init__(self, x_or_backend_name):
if isinstance(x_or_backend_name, str):
backend_name = x_or_backend_name
else:
backend_name = _infer_backend(x_or_backend_name, get_name=True)[1]
self.backend_name = backend_name
if backend_name == 'torch':
import torch
self.B = torch
elif backend_name == 'tensorflow':
import tensorflow as tf
self.B = tf
else:
self.B = np
self.Bk = get_wavespin_backend(backend_name)
def __getattr__(self, name):
# fetch from wavespin.backend if possible
if hasattr(self.Bk, name):
return getattr(self.Bk, name)
raise AttributeError(f"'{self.Bk.__name__}' object has no "
f"attribute '{name}'")
def abs(self, x):
return self.B.abs(x)
def log(self, x):
if self.backend_name == 'numpy':
out = np.log(x)
elif self.backend_name == 'torch':
out = self.B.log(x)
else:
out = self.B.math.log(x)
return out
def sum(self, x, axis=None, keepdims=False):
if self.backend_name == 'numpy':
out = np.sum(x, axis=axis, keepdims=keepdims)
elif self.backend_name == 'torch':
out = self.B.sum(x, dim=axis, keepdim=keepdims)
else:
out = self.B.reduce_sum(x, axis=axis, keepdims=keepdims)
return out
def norm(self, x, ord=2, axis=None, keepdims=True):
if self.backend_name == 'numpy':
if ord == 1:
out = np.sum(np.abs(x), axis=axis, keepdims=keepdims)
elif ord == 2:
out = np.linalg.norm(x, ord=None, axis=axis, keepdims=keepdims)
else:
out = np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
elif self.backend_name == 'torch':
out = self.B.norm(x, p=ord, dim=axis, keepdim=keepdims)
else:
out = self.B.norm(x, ord=ord, axis=axis, keepdims=keepdims)
return out
def median(self, x, axis=None, keepdims=None):
if keepdims is None and self.backend_name != 'tensorflow':
keepdims = True
if self.backend_name == 'numpy':
out = np.median(x, axis=axis, keepdims=keepdims)
elif self.backend_name == 'torch':
out = self.B.median(x, dim=axis, keepdim=keepdims)
# torch may return `values` and `indices` if `axis is not None`
if isinstance(out.values, self.B.Tensor):
out = out.values
else:
if axis is not None or keepdims is not None:
raise ValueError("`axis` and `keepdims` for `median` in "
"TensorFlow backend are not implemented.")
v = self.B.reshape(x, [-1])
m = v.get_shape()[0]//2
out = self.B.reduce_min(self.B.nn.top_k(v, m, sorted=False).values)
return out
def std(self, x, axis=None, keepdims=True):
if self.backend_name == 'numpy':
out = np.std(x, axis=axis, keepdims=keepdims)
elif self.backend_name == 'torch':
out = self.B.std(x, dim=axis, keepdim=keepdims)
else:
out = self.B.math.reduce_std(x, axis=axis, keepdims=keepdims)
return out
def min(self, x, axis=None, keepdims=False):
if self.backend_name == 'numpy':
out = np.min(x, axis=axis, keepdims=keepdims)
elif self.backend_name == 'torch':
kw = {'dim': axis} if axis is not None else {}
if keepdims:
kw['keepdim'] = True
out = self.B.min(x, **kw)
else:
out = self.B.math.reduce_min(x, axis=axis, keepdims=keepdims)
return out
def numpy(self, x):
if self.backend_name == 'numpy':
out = x
else:
if hasattr(x, 'to') and 'cpu' not in x.device.type:
x = x.cpu()
if getattr(x, 'requires_grad', False):
x = x.detach()
out = x.numpy()
return out
def _infer_backend(x, get_name=False):
while isinstance(x, (dict, list)):
if isinstance(x, dict):
if 'coef' in x:
x = x['coef']
else:
x = list(x.values())[0]
else:
x = x[0]
module = type(x).__module__.split('.')[0]
if module == 'numpy':
backend = np
elif module == 'torch':
import torch
backend = torch
elif module == 'tensorflow':
import tensorflow
backend = tensorflow
elif isinstance(x, (int, float)):
# list of lists, fallback to numpy
module = 'numpy'
backend = np
else:
raise ValueError("could not infer backend from %s" % type(x))
return (backend if not get_name else
(backend, module))
def get_wavespin_backend(backend_name):
if backend_name == 'numpy':
from .backend.numpy_backend import NumpyBackend as B
elif backend_name == 'torch':
from .backend.torch_backend import TorchBackend as B
elif backend_name == 'tensorflow':
from .backend.tensorflow_backend import TensorFlowBackend as B
return B
|
"""
Neuronal Network of C. Elegans implemented in Python with the LIF-Model
(by JW)
New Circuit Edition (SIM-CE)
Version: I don't know - Still in work!
"""
# Some dependencies
import numpy as np
import matplotlib.pyplot as plt
import gym
from lif import I_syn_calc, I_gap_calc, U_neuron_calc
"""
Parameters for the neural Network
Motor Neurons: FWD, REV
Sensory Neurons: PVD, PLM, AVM, ALM
Inter Neurons: AVA, AVD, PVC, DVA, AVB
"""
# Treshold
v = -20 # [mV]
# Variables
E_ex = 0 #mV
E_in = -70 #mV
Default_U_leak = -70 #mV
# Time Constants:
t0 = t = 0
T = 100
delta_t = 1
# Making Contact with Neurons through Synapses and Gap-Junctions----------------------------
# A = Connections between Neurons with excitatory Nature (E = 0mV)
A_in = np.matrix('0 0 0 1 1 0 1 0; 0 0 0 1 1 0 0 0; 0 0 0 1 0 0 0 1; 0 0 0 0 0 0 0 0; 0 0 0 0 0 0 0 0; 0 0 0 1 1 0 1 0; 0 0 0 1 1 1 0 1; 0 0 0 0 0 0 1 0')
A_ex = np.matrix('0 0 0 0 0 0 0 0; 0 0 0 0 0 0 0 0; 0 0 0 0 0 0 0 0; 0 0 0 0 1 1 1 1; 0 0 0 1 0 1 0 0; 0 0 0 0 0 0 0 0; 0 0 0 0 0 0 0 0; 0 0 0 0 0 0 0 0')
A_gap = np.matrix('0 0 0 1 0 1 0 0; 0 0 1 0 0 0 0 0; 0 0 0 0 1 0 0 0; 1 0 1 0 0 0 1 0; 0 0 0 0 0 0 0 0; 1 0 0 0 0 0 1 0; 0 0 0 0 0 1 0 0; 0 0 0 0 0 0 0 0')
#-------------------------------------------------------------------------------------------
# Parameter Matrix--------------------------------------------------------------------------
# Weights (or Number of same Synapses) per Synapse (n) - has to be normalized!
w_in = np.matrix('0 0 0 25 62.5 0 50 0; 0 0 0 75 12 0 0 0; 0 0 0 100 0 0 0 125; 0 0 0 0 0 0 0 0; 0 0 0 0 0 0 0 0; 0 0 0 37.5 125 0 250 0; 0 0 0 25 50 62 0 25; 0 0 0 0 37.5 0 75 0')
w_ex = np.matrix('0 0 0 0 0 0 0 0; 0 0 0 0 0 0 0 0; 0 0 0 0 0 0 0 0; 0 0 0 0 325 100 425 725; 0 0 0 25 0 0 1400 0; 0 0 0 0 0 0 0 0; 0 0 0 0 0 0 0 0; 0 0 0 0 0 0 0 0')
w_gap = np.matrix('0 0 0 120 0 120 0 0; 0 0 120 0 0 0 0 0; 0 0 0 0 120 0 0 0; 120 0 120 0 0 0 600 0; 0 0 0 0 0 0 0 0; 120 0 0 0 0 0 60 0; 0 0 0 600 0 60 0 0; 0 0 0 0 0 0 0 0')
# For Synapses and Gap-Junctions
G_syn = np.multiply(np.ones((8,8)), 2.3) # G_syn [mS] Parameter for Neurons - Sweep from 0.1 to 1 mS/cm^2
G_gap = np.multiply(np.ones((8,8)), 1.02) # G_gap [mS] Parameter for Neurons - Sweep from 0.1 to 1 mS/cm^2
V_range = np.multiply(np.ones((8,8)), 3) # V_range [mV] Parameter for Neurons - Sweep from 3 to 6 mV
V_shift = np.multiply(np.ones((8,8)), -30) # V_shift [mV] Parameter for Neurons - Sweep from -10 to -40mV
# For Neurons
C_m = np.matrix('0.1111 0.1111 0.2 0.075 0.1111 0.0714 0.06667 0.0714')
G_leak = np.multiply(np.ones((1,8)), 0.0525) # G_leak [mS] Parameter for Neurons - Sweep from 0.04 to 1 mS/cm^2
V_leak = np.multiply(np.ones((1,8)), -70) # V_leak [mV] Parameter for Neurons - Sweep from -90 to 0 mV
#-------------------------------------------------------------------------------------------
# Current Matrix----------------------------------------------------------------------------
# Current Matrix for Symapses between Neurons
I_syn_ex = np.zeros((8,8))
I_syn_in = np.zeros((8,8))
# Current Matrix for Gap-Junctions between Neurons
I_gap = np.zeros((8,8))
#-------------------------------------------------------------------------------------------
# Voltage Matrix----------------------------------------------------------------------------
x = [0, 0, 0, 0, 0, 0, 0, 0] #PLM, ALM, AVM, PVC, AVD, LUA, AVA, AVB
#-------------------------------------------------------------------------------------------
# State Matrix------------------------------------------------------------------------------
fire = [0, 0, 0, 0, 0, 0, 0, 0] #PLM, ALM, AVM, PVC, AVD, LUA, AVA, AVB
#-------------------------------------------------------------------------------------------
# Initialization----------------------------------------------------------------------------
def initialize(Default_U_leak):
# Initializing Neurons and Sensors------------------------------------------------------
for i in range(0,8):
x[i] = Default_U_leak
global PLM, ALM, AVM, PVC, AVD, LUA, AVA, AVB, AVA_spike, AVB_spike
PLM = np.array([Default_U_leak])
ALM = np.array([Default_U_leak])
AVM = np.array([Default_U_leak])
PVC = np.array([Default_U_leak])
AVD = np.array([Default_U_leak])
LUA = np.array([Default_U_leak])
AVA = np.array([Default_U_leak])
AVB = np.array([Default_U_leak])
AVA_spike = np.array([0])
AVB_spike = np.array([0])
#---------------------------------------------------------------------------------------
# Initializing OpenAI Environments------------------------------------------------------
#env = gym.make('CartPole-v0')
#env.reset()
#---------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Compute Function--------------------------------------------------------------------------
def compute():
# Compute all Synapse Currents in this network------------------------------------------
for i in range(0,7):
for j in range (0,7):
# Synapse Currents between Interneurons
if A_ex[i, j] == 1:
# Excitatory Synapse
I_syn_ex[i, j] = I_syn_calc(x[i], x[j], E_ex, w_ex[i, j], G_syn[i, j], V_shift[i, j], V_range[i, j])
else:
I_syn_ex[i, j] = 0
if A_in[i, j] == 1:
# Inhibitory Synapse
I_syn_in[i, j] = I_syn_calc(x[i], x[j], E_in, w_ex[i, j], G_syn[i, j], V_shift[i, j], V_range[i, j])
else:
I_syn_in[i, j] = 0
# Gap-Junction Currents between Interneurons
if A_gap[i, j] == 1:
# There is a Gap-Junctions
I_gap[i, j] = I_gap_calc(x[i], x[j], w_gap[i, j], G_gap[i, j])
else:
I_gap[i, j] = 0
#---------------------------------------------------------------------------------------
# Now compute inter Neurons Voltages----------------------------------------------------
for i in range(0,7):
I_syn = np.add(I_syn_in, I_syn_ex) # Addition of inhitory and excitatory Synapse-Currents
I_s = I_syn.sum(axis = 0) # Creates a 1x8 Array with the Sum of all Columns
I_g = I_gap.sum(axis = 0) # Creates a 1x8 Array with the Sum of all Columns
x[i], fire[i] = U_neuron_calc(x[i], I_s[i], I_g[i], C_m[0, i], G_leak[0, i], V_leak[0, i], v, delta_t)
#---------------------------------------------------------------------------------------
return x, fire
#-------------------------------------------------------------------------------------------
# Append Function---------------------------------------------------------------------------
def arr(x, fire):
global PLM, ALM, AVM, PVC, AVD, LUA, AVA, AVB, AVA_spike, AVB_spike
PLM = np.append(PLM, x[0])
ALM = np.append(ALM, x[1])
AVM = np.append(AVM, x[2])
PVC = np.append(PVC, x[3])
AVD = np.append(AVD, x[4])
LUA = np.append(LUA, x[5])
AVA = np.append(AVA, x[6])
AVB = np.append(AVB, x[7])
AVA_spike = np.append(AVA_spike, fire[6]) # Reverse lokomotion
AVB_spike = np.append(AVB_spike, fire[7]) # Forward lokomotion
#-------------------------------------------------------------------------------------------
# Plot Function-----------------------------------------------------------------------------
def plot():
plt.suptitle('Leaky-Integrate-and-Fire Neuronal Network', fontsize=16)
plt.subplot(221)
plt.title('Sensory Neurons', fontsize=10)
plt.plot(PLM, '-b', label='PLM', linewidth=1)
plt.plot(ALM, '-y', label='ALM', linewidth=1)
plt.plot(AVM, '-g', label='AVM', linewidth=1)
plt.xlabel('t (in s)')
plt.ylabel('u(t) in [mV]')
plt.legend(loc='upper left')
plt.subplot(222)
plt.title('Inter Neurons', fontsize=10)
plt.plot(PVC, '-b', label='PVC', linewidth=1)
plt.plot(AVD, '-y', label='AVD', linewidth=1)
plt.plot(LUA, '-g', label='LUA', linewidth=1)
plt.xlabel('t (in s)')
plt.ylabel('u(t) in [mV]')
plt.legend(loc='upper left')
plt.subplot(223)
plt.title('Fire Neurons', fontsize=10)
plt.plot(AVA, '-b', label='AVA', linewidth=1)
plt.plot(AVB, '-y', label='AVB', linewidth=1)
plt.xlabel('t (in s)')
plt.ylabel('u(t) in [mV]')
plt.legend(loc='upper left')
plt.show()
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Main Function-----------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
def main():
initialize(Default_U_leak) # Initializing all Interneurons with the desired leakage voltage
for t in np.arange(t0,T,delta_t):
x, fire = compute() # Compute the next Interneuron Voltages along with a possible "fire" Event
arr(x, fire) # Storing Information for graphical analysis
plot() # Plotting everyting using matplotlib
#-------------------------------------------------------------------------------------------
if __name__=="__main__":
main()
|
from allauth.account.app_settings import EMAIL_CONFIRMATION_EXPIRE_DAYS
from allauth.account.models import EmailAddress
from django.core import signing
from django.conf import settings
from django.shortcuts import redirect
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from knox.models import AuthToken
from protocol_crm.users.models import User
redirecturl = settings.AFTER_CONFIRMATION_REDIRECT_URL
# from allauth.account.models from email
# from allauth.account.decorators from app_settings
# Create your views here.
class UserCreateView(APIView):
def post(self, request, format='json'):
try:
data = request.data
data['is_superuser'] = False
data['is_staff'] = True
data['is_active'] = True
data['is_agent'] = True
print(data)
emaildata = EmailAddress.objects.get(email=data['email'])
print(emaildata.user.is_superuser)
print(emaildata.user.is_agent)
print(emaildata.user.agency_agent)
if emaildata.user.agency_agent is None and emaildata.user.is_agent:
data['agency_agent'] = emaildata.user
if emaildata:
if emaildata.user.is_active:
emaildata = EmailAddress.objects.get(email=data['email'])
print(emaildata.verified)
if emaildata.verified:
if data['confirm_password'] == data['password']:
del data['confirm_password']
if not (User.objects.filter(username=data['username']).exists() or User.objects.filter(email=data['email']).exists()):
userdata = User.objects.create(**data)
userdata.set_password(userdata.password)
userdata.save()
if userdata:
token = AuthToken.objects.create(userdata)
print(token)
return Response({'status': 'success', "message": "User Created Successfully", "user": {"username": userdata.username, "email": userdata.email, "password": userdata.password}, "token": token[1]}, status=status.HTTP_201_CREATED)
else:
return Response({'status': 'Error', "message": "User not created"}, status=status.HTTP_411_LENGTH_REQUIRED)
else:
userdata = User.objects.get(email=data['email'])
print(data)
print(userdata.id, userdata.email)
print(data['agency_agent'] is not None)
print(userdata.password == "")
print(userdata.username == "")
print(userdata.first_name is not None)
print(userdata.last_name is not None)
if data['agency_agent'] is not None and userdata.password == "" and userdata.username == "" and userdata.first_name is not None and userdata.last_name is not None:
userdata.username = data['username']
userdata.is_active = data['is_active']
userdata.is_superuser = data['is_superuser']
userdata.is_staff = data['is_staff']
userdata.is_agent = data['is_agent']
userdata.agency_agent = data['agency_agent']
userdata.set_password(data['password'])
userdata.save()
if userdata:
token = AuthToken.objects.create(userdata)
print(token)
return Response({'status': 'success', "message": "User Created Successfully", "user": {"username": userdata.username, "email": userdata.email, "password": userdata.password}, "token": token[1]}, status=status.HTTP_201_CREATED)
else:
return Response({'status': 'Error', "message": "Username or Email Allready Exist"}, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({'status': 'Error', "message": "Password and Confirm Password not match"}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
else:
return Response({'status': 'Error', "message": "Email Not Verified"}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
else:
return Response({'status': 'Error', "message": "Agency is not active"}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
else:
return Response({'status': 'Error', "message": "Email not found please contact to Admin"}, status=status.HTTP_404_NOT_FOUND)
except EmailAddress.DoesNotExist:
return Response({'status': 'Error', "message": "Email not found please contact to Admin"}, status=status.HTTP_404_NOT_FOUND)
except Exception as e:
print("ERRRR", e)
return Response({'status': 'Error', "message": "Internal Server Error"}, status=status.HTTP_400_BAD_REQUEST)
@api_view()
def confirmUrl(request, **kwargs):
try:
key = kwargs['key']
max_age = EMAIL_CONFIRMATION_EXPIRE_DAYS
print("max_age", max_age)
fromkey = signing.loads(key, max_age=max_age, salt='account')
emaildata = EmailAddress.objects.get(pk=fromkey)
if(emaildata.verified):
detail = {"message": "Already Verifyed", "already_Verify": True, "status_code": 200}
return redirect('{}?{}={}'.format(redirecturl, "email", emaildata.email))
emaildata.verified = True
emaildata.save()
detail = {"message": "Verified Succefuly", "is_verify": True, "status_code": 200}
return redirect('{}?{}={}'.format(redirecturl, "email", emaildata.email))
except (signing.SignatureExpired, signing.BadSignature, EmailAddress.DoesNotExist):
detail = {"message": "Token Expired", "error": True, "status_code": 400}
return Response(detail)
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import common_pb2 as common__pb2
import functional_api_pb2 as functional__api__pb2
class FunctionalServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.OpenPassWindow = channel.unary_unary(
'/base.FunctionalService/OpenPassWindow',
request_serializer=common__pb2.ClientId.SerializeToString,
response_deserializer=common__pb2.Empty.FromString,
)
self.ClosePassWindow = channel.unary_unary(
'/base.FunctionalService/ClosePassWindow',
request_serializer=common__pb2.ClientId.SerializeToString,
response_deserializer=common__pb2.Empty.FromString,
)
self.SetFanSpeed = channel.unary_unary(
'/base.FunctionalService/SetFanSpeed',
request_serializer=functional__api__pb2.SenderInfo.SerializeToString,
response_deserializer=common__pb2.Empty.FromString,
)
self.SubscribeToFanSpeed = channel.unary_stream(
'/base.FunctionalService/SubscribeToFanSpeed',
request_serializer=functional__api__pb2.SubscriberRequest.SerializeToString,
response_deserializer=functional__api__pb2.Value.FromString,
)
class FunctionalServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def OpenPassWindow(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ClosePassWindow(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetFanSpeed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeToFanSpeed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FunctionalServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'OpenPassWindow': grpc.unary_unary_rpc_method_handler(
servicer.OpenPassWindow,
request_deserializer=common__pb2.ClientId.FromString,
response_serializer=common__pb2.Empty.SerializeToString,
),
'ClosePassWindow': grpc.unary_unary_rpc_method_handler(
servicer.ClosePassWindow,
request_deserializer=common__pb2.ClientId.FromString,
response_serializer=common__pb2.Empty.SerializeToString,
),
'SetFanSpeed': grpc.unary_unary_rpc_method_handler(
servicer.SetFanSpeed,
request_deserializer=functional__api__pb2.SenderInfo.FromString,
response_serializer=common__pb2.Empty.SerializeToString,
),
'SubscribeToFanSpeed': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeToFanSpeed,
request_deserializer=functional__api__pb2.SubscriberRequest.FromString,
response_serializer=functional__api__pb2.Value.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'base.FunctionalService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
from datetime import datetime
from typing import List
import requests
from .constants import GAME_TYPES
from .exceptions import (
APIException,
ClientException,
GameNotFoundException,
LoginError,
PaymentRequiredException,
PlayerNotFoundException,
TooManyRequestsException,
TeamNotFoundException,
ParkNotFoundException,
)
class ZBaseballDataClient(object):
def __init__(
self, username=None, password=None, api_url="https://www.zbaseballdata.com"
):
self._username = username
self._password = password
self._api_url = api_url
self._session = requests.Session()
self._session.headers.update({"Accept": "application/json"})
self._login()
def _get(self, *args, **kwargs):
"""Get wrapper to catch and retry all HTTP 401s (token may be stale)"""
response = self._session.get(*args, **kwargs)
if response.status_code == 401:
self._login()
response = self._session.get(*args, **kwargs)
elif response.status_code == 429:
msg = "API query rate exceeded"
raise TooManyRequestsException(msg)
elif response.status_code == 402:
msg = response.json()["detail"]
raise PaymentRequiredException(msg)
return response
def _login(self):
"""Use credentials to grab a new api token"""
self._session.headers.pop("Authorization", None)
login_endpoint = self._api_url + "/api/auth/login/"
response = self._session.post(
url=login_endpoint,
data={"username": self._username, "password": self._password},
)
if response.status_code == 200:
token = response.json()["token"]
self._session.headers.update({"Authorization": "Token {}".format(token)})
else:
msg = response.json()["msg"]
raise LoginError(msg)
def _logout(self):
"""Indicate to the API we are done with our current token"""
login_endpoint = self._api_url + "/api/auth/logout/"
self._session.post(url=login_endpoint)
del self._session.headers["Authorization"]
def get_game(self, game_id):
"""Retrieve data for a specific game
Args:
game_id: str, the unique identifier for a particular game. E.g. "NYA192104130"
Returns:
A dict with details about that particular game. Fields including but not limited
to: time, attendance, umpires, winning pitcher, losing pitcher, game site,
weather, wind dir, temperature, game duration, date and a few more.
"""
game_endpoint = self._api_url + "/api/v1/games/{}/".format(game_id)
response = self._get(url=game_endpoint)
if response.status_code == 404:
raise GameNotFoundException(response.json()["detail"])
elif response.status_code != 200:
msg = "Received HTTP status {} when fetching game_id={}".format(
response.status_code, game_id
)
raise APIException(msg)
game = response.json()
game["date"] = datetime.strptime(game["date"], "%Y-%m-%d").date()
return game
def get_game_events(self, game_id):
"""Get a list of play-by-play events for a specific game"""
game_endpoint = self._api_url + "/api/v1/games/{}/events/".format(game_id)
response = self._get(url=game_endpoint)
if response.status_code == 404:
raise GameNotFoundException(response.json()["detail"])
elif response.status_code != 200:
msg = "Received HTTP status {} when fetching events for game_id={}".format(
response.status_code, game_id
)
raise APIException(msg)
return response.json()
def get_games(
self,
year: int = None,
team_id: str = None,
start_date: str = None,
end_date: str = None,
game_type: str = None,
):
"""Get games & allow some filters
Args:
year: int or None, you may filter games by year (or season if you prefer).
The API will return all regular season games as well as post season games.
The API does not distinguish between regular season games and post-season.
team_id: str or None, filter games by a teams 3 character "team-id". E.g. "NYA"
NB! 3 Character team-id's are NOT neccessarily unique! Specifically, for
MIL and HOU, there are 2 "teams" with each of those ID's. Generally, this
happens when a particular team switches leagues from AL to NL or vice versa.
start_date: str, e.g. 2019-01-01 only return games after this date
end_date: str, only return games before this date
game_type: str, filter based on regular season games, postseason, allstar etc.
SEE constants.py for the full list of options. Use POST for only postseason
games, REG for only regular season games, None for all games.
Returns:
a generator of dicts, such that each dict has some simple facts about each game.
E.g.
{
"game_id": "NYA192104140",
"date": "1921-04-14",
"start_time": null,
"home_team": 9,
"away_team": 98
}
"home_team" and "away_team" are the UNIQUE team identifiers. Details about
a team can be found using the teams API or the "get_teams" client method.
"""
filters = []
if year:
filters.append("year={}".format(year))
if team_id:
filters.append("team-id={}".format(team_id))
if start_date:
filters.append("start-date={}".format(start_date))
if end_date:
filters.append("end-date={}".format(end_date))
if game_type:
if game_type != "POST" and game_type not in GAME_TYPES:
msg = "game_type must be 'POST' or one of {}".format(GAME_TYPES)
raise ClientException(msg)
filters.append("game-type={}".format(game_type))
games_endpoint = self._api_url + "/api/v1/games/"
if len(filters) > 0:
games_endpoint += "?" + "&".join(filters)
response = self._get(url=games_endpoint)
if response.status_code == 400:
msg = response.json()["detail"]
raise APIException(msg)
elif response.status_code != 200:
msg = "Received HTTP status {} when fetching games".format(
response.status_code
)
raise APIException(msg)
data = response.json()
while len(data["results"]) > 0:
for game in data["results"]:
yield game
next_url = data["next"]
if next_url is None:
break
response = self._get(url=next_url)
data = response.json()
def get_player(self, retro_id):
"""Get some basic details about a player"""
player_endpoint = self._api_url + "/api/v1/players/{}/".format(retro_id)
response = self._get(url=player_endpoint)
if response.status_code == 404:
msg = "Player with retro-id={} not found.".format(retro_id)
raise PlayerNotFoundException(msg)
elif response.status_code != 200:
msg = "Received HTTP status {} when fetching player w/ retro-id={}".format(
response.status_code, retro_id
)
raise APIException(msg)
player_data = response.json()
player_data["debut"] = datetime.strptime(
player_data["debut"], "%Y-%m-%d"
).date()
return player_data
def get_players(self, search=None):
"""Get players, with some searching capacity
Args:
search: str | None, an optional parameter that you can search for players
on. The search term will return players with either first-names, last-names
or retro_ids that are "LIKE" (read startswith) the search term.
Returns:
a generator of player-dict/objects, where each dict has first-name, last-name
unique "retro_id" and the player's MLB debut.
"""
player_endpoint = self._api_url + "/api/v1/players/"
if search:
search.replace(" ", "%20")
player_endpoint += "?search={}".format(search)
response = self._get(url=player_endpoint)
if response.status_code != 200:
msg = "Received HTTP status {} when fetching players.".format(
response.status_code
)
raise APIException(msg)
data = response.json()
while len(data["results"]) > 0:
for player in data["results"]:
player["debut"] = datetime.strptime(player["debut"], "%Y-%m-%d").date()
yield player
next_url = data["next"]
if next_url is None:
break
response = self._get(url=next_url)
data = response.json()
def get_parks(self, city=None, state=None, league=None):
"""Get gen of ballparks known to the retrosheet universe"""
query_params = []
if city:
query_params.append("city={}".format(city))
if state:
query_params.append("state={}".format(state))
if league:
query_params.append("league={}".format(city))
if len(query_params) > 0:
query_string = "?" + "&".join(query_params)
else:
query_string = ""
parks_endpoint = self._api_url + "/api/v1/parks/" + query_string
response = self._get(parks_endpoint)
if response.status_code != 200:
msg = "Received HTTP status {} when fetching parks".format(
response.status_code
)
raise APIException(msg)
data = response.json()
while len(data["results"]) > 0:
for park in data["results"]:
park["start_date"] = datetime.strptime(
park["start_date"], "%Y-%m-%d"
).date()
if park["end_date"] is not None:
park["end_date"] = datetime.strptime(
park["end_date"], "%Y-%m-%d"
).date()
yield park
next_url = data["next"]
if next_url is None:
break
response = self._get(url=next_url)
data = response.json()
def get_park(self, park_id):
"""Get a specific park object"""
park_endpoint = self._api_url + "/api/v1/parks/{}".format(park_id)
response = self._get(url=park_endpoint)
if response.status_code == 404:
msg = "Park with park-id={} not found.".format(park_id)
raise ParkNotFoundException(msg)
elif response.status_code != 200:
msg = "Received HTTP status {} when fetching park w/ park-id={}".format(
response.status_code, park_id
)
raise APIException(msg)
park_data = response.json()
park_data["start_date"] = datetime.strptime(
park_data["start_date"], "%Y-%m-%d"
).date()
if park_data["end_date"] is not None:
park_data["end_date"] = datetime.strptime(
park_data["end_date"], "%Y-%m-%d"
).date()
return park_data
def get_teams(self, search: str = None, only_active: bool = False):
"""Get a generator of teams
Args:
search: str, search parameter which returns teams based on their "nickname"
city or string team-id (e.g. NYA). Matches exactly to city and team-id,
of partially to nick-name
active: bool, only return teams that still exist. Defaults to false
Returns:
generator of team-object/dicts that match search criteria.
"""
if only_active:
params = "?only-active=1"
else:
params = "?only-active=0"
if search is not None:
params += "&search={}".format(search)
team_endpoint = self._api_url + "/api/v1/teams/" + params
response = self._get(team_endpoint)
if response.status_code != 200:
msg = "Received HTTP status {} when fetching teams".format(
response.status_code
)
raise APIException(msg)
data = response.json()
while len(data["results"]) > 0:
for team in data["results"]:
yield team
next_url = data["next"]
if next_url is None:
break
response = self._get(url=next_url)
data = response.json()
def get_team(self, int_team_id: int):
"""Get details about a team"""
team_endpoint = self._api_url + "/api/v1/teams/{}/".format(int_team_id)
response = self._get(team_endpoint)
if response.status_code == 404:
msg = "Team with ID: {} not found".format(int_team_id)
raise TeamNotFoundException(msg)
elif response.status_code != 200:
msg = "Received HTTP status {} when fetching team with id: {}".format(
response.status_code, int_team_id
)
raise APIException(msg)
return response.json()
def get_player_events(
self, retro_id: str, start_date: str = None, end_date: str = None
):
"""Get paginated events for a player
The API exposes an endpoint to filter play-by-play events by player. All events are
returned for a specific player, regardless of whether the player was the hitter or the pitcher.
Therefore, the user should be careful to understand this point!
A user may also filter based on a date window, i.e. return all events within this
range of dates, or if only a start_date or end_date is supplied, the events will be
bounded by those respective dates.
Args:
retro_id: str, unique retrosheet ID of the player events should be returned for.
start_date: str, YYYY-MM-DD string to return events after
end_date: str, YYYY-MM-DD string to return events before
Returns:
a generator of tuples, which have the form:
{
"game_id": "HOU201805010",
"date": "2018-05-01",
"hitter_retro_id": "judga001",
"pitcher_retro_id": "verlj001",
"pitch_sequence": "F1*BBCS",
"event_description": "K",
"count_on_play": "22",
"inning": 1,
"event_count": 1
}
"""
filters = []
if start_date:
filters.append("start-date=" + start_date)
if end_date:
filters.append("end-date=" + end_date)
player_events_endpoint = self._api_url + "/api/v1/players/{}/events/".format(
retro_id
)
if filters:
player_events_endpoint += "?" + "&".join(filters)
response = self._get(url=player_events_endpoint)
if response.status_code != 200:
msg = "Received HTTP status {} when fetching events for player: {}".format(
response.status_code, retro_id
)
raise APIException(msg)
data = response.json()
while len(data["results"]) > 0:
for event in data["results"]:
event["date"] = datetime.strptime(event["date"], "%Y-%m-%d").date()
yield event
next_url = data["next"]
if next_url is None:
break
response = self._get(url=next_url)
data = response.json()
def get_batting_stat_split(
self,
retro_id: str,
stats: List[str],
agg_by: str,
vs_pitcher: str = None,
game_type: str = None,
pitcher_throw: str = None,
start_date: str = None,
end_date: str = None,
year: int = None,
day_or_night: str = None,
park_id: str = None,
vs_team: str = None,
):
"""Get batting statistics
Args:
retro_id: str, for whom we want to get statistics, e.g. judga001
stats: List[str], one or more of H, AB, PA, etc.... see full list
in constants.py BATTING_STATS
agg_by: str, D (day), M (month), DOW(day of week) etc... for full list see
constants.py AGGREGATE_OPTIONS
vs_pitcher: str, a retro_id of a player. This will tell the server to return
and aggregate data for when this hitter was facing this pitcher.
game_type: str, if None, regular and postseason stats are returned. Options
are REG, POST, ALCS, ALDS, ALWC, WS... etc...
pitcher_throw: str, None or "L" or "R"
start_date: str, None or YYYY-MM-DD, return after this date.
end_date: str, None or YYYY-MM-DD, return data before this date.
year: int, None or some year. Only return data for this year.
Returns:
a dictionary of the form: Dict[stat, Dict[aggregate, value].
stats = ["HR", "PA"], agg_by="DOW" (day of week) for some player.
These values will change if a user is to supply any of the splits (optional parameters)
For example:
{
"HR": {
"fri": 10,
"mon": 5,
"sat": 13,
"sun": 7,
"thu": 4,
"tue": 7,
"wed": 8
},
"PA": {
"fri": 147,
"mon": 108,
"sat": 162,
"sun": 146,
"thu": 106,
"tue": 143,
"wed": 133
}
}
"""
stat_query_string = "&" + "&".join(["stat={}".format(s) for s in stats])
query_string = (
"?hitter_retro_id={retro_id}&agg_by={agg_by}".format(
retro_id=retro_id, agg_by=agg_by
)
+ stat_query_string
)
# Add splits if they're provided
if vs_pitcher:
query_string += "&vs_pitcher={}".format(vs_pitcher)
if game_type:
query_string += "&game_type={}".format(game_type)
if pitcher_throw:
query_string += "&pitcher_throw={}".format(pitcher_throw)
if start_date:
query_string += "&start_date={}".format(start_date)
if end_date:
query_string += "&end_date={}".format(end_date)
if year:
query_string += "&year={}".format(year)
if day_or_night:
query_string += "&day_or_night={}".format(day_or_night)
if park_id:
query_string += "&park_id={}".format(park_id)
if vs_team:
query_string += "&vs_team={}".format(vs_team)
stat_split_endpoint = self._api_url + "/api/v1/stats/batting/" + query_string
response = self._get(url=stat_split_endpoint)
if response.status_code == 400:
raise APIException(response.json()["detail"])
elif response.status_code != 200:
msg = "Received HTTP status {} when fetching stat split for: {}".format(
response.status_code, retro_id
)
raise APIException(msg)
return response.json()
def get_lineup(self, game_id):
"""Get lineup list given a game_id"""
lineup_route = self._api_url + "/api/v1/games/{}/lineup/".format(game_id)
response = self._get(lineup_route)
if response.status_code == 404:
msg = "Game with ID: {} not found".format(game_id)
raise TeamNotFoundException(msg)
elif response.status_code != 200:
msg = "Received HTTP status {} when fetching lineup for game: {}".format(
response.status_code, game_id
)
raise APIException(msg)
return response.json()
def get_pitching_stat_split(
self,
retro_id: str,
stats: List[str],
agg_by: str = "C",
vs_hitter: str = None,
game_type: str = None,
batter_hand: str = None,
year: int = None,
start_date: str = None,
end_date: str = None,
):
"""Get pitching stats
This client method is the fraternal twin of "get_batting_stat_split". It's
pretty much the same, and follows the same rule, except it hits the pitching API.
For pitching however, there is is another API for what we call "game level" things,
I.e. Wins, Starts, Games, Saves, Losses for pitchers. Naturally, these can't exactly
be broken down by inning, or situations with runners on base, so that data comes
fromm a second, but very similar, API endpoint. At the time of writing, no client method
has been implemented for that, but this will change.
This method serves "event level data", i.e. things that can be computed from play
by play data.
"""
stat_query_string = "&" + "&".join(["stat={}".format(s) for s in stats])
query_string = (
"?pitcher_retro_id={retro_id}&agg_by={agg_by}".format(
retro_id=retro_id, agg_by=agg_by
)
+ stat_query_string
)
# Add query filters if they're provided
if vs_hitter:
query_string += "&vs_hitter={}".format(vs_hitter)
if game_type:
query_string += "&game_type={}".format(game_type)
if batter_hand:
query_string += "&batter_hand={}".format(batter_hand)
if start_date:
query_string += "&start_date={}".format(start_date)
if end_date:
query_string += "&end_date={}".format(end_date)
if year:
query_string += "&year={}".format(year)
stat_split_endpoint = self._api_url + "/api/v1/stats/pitching/" + query_string
response = self._get(url=stat_split_endpoint)
if response.status_code == 400:
raise APIException(response.json()["detail"])
elif response.status_code != 200:
msg = "Received HTTP status {} when fetching stat split for: {}".format(
response.status_code, retro_id
)
raise APIException(msg)
return response.json()
|
import os.path as osp
import numpy as np
import mmcv
from viseval import eval_det
from .custom import CustomDataset
from .builder import DATASETS
@DATASETS.register_module()
class VisDroneDataset(CustomDataset):
CLASSES = ('pedestrian', 'person', 'bicycle', 'car', 'van',
'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor', 'others')
def __init__(self,
ann_file,
pipeline,
classes=None,
data_root=None,
img_prefix='',
seg_prefix=None,
proposal_file=None,
test_mode=False,
filter_empty_gt=True,
label_prefix=None):
self.label_prefix = label_prefix
self.labels = []
super(VisDroneDataset, self).__init__(
ann_file,
pipeline,
classes,
data_root,
img_prefix,
seg_prefix,
proposal_file,
test_mode,
filter_empty_gt)
@staticmethod
def open_label_file(path):
label = np.loadtxt(path, delimiter=',', dtype=np.int64,
ndmin=2, usecols=range(8))
if not len(label):
label = label.reshape(0, 8)
return label
def load_annotations(self, ann_file):
filenames = open(ann_file).readlines()
data_infos = []
for filename in filenames:
filename_raw = filename.strip()
img_path = osp.join(self.img_prefix, filename_raw + '.jpg')
img = mmcv.imread(img_path)
height, width = img.shape[:2]
info = dict(filename=filename_raw + '.jpg', width=width, height=height)
data_infos.append(info)
if self.label_prefix is not None:
label = self.open_label_file(
osp.join(self.label_prefix, filename_raw + '.txt'))
self.labels.append(label)
return data_infos
def get_ann_info(self, idx):
return self._parse_ann_info(idx)
def get_cat_ids(self, idx):
raise NotImplementedError
# return self._parse_ann_info(idx)['labels'].tolist()
def _parse_ann_info(self, idx):
label = self.labels[idx]
if label.shape[0]:
x1y1 = label[:, 0:2]
wh = label[:, 2:4]
x2y2 = x1y1 + wh
_gt_bboxes = np.concatenate((x1y1, x2y2), axis=1).astype(np.float32)
_gt_labels = label[:, 5]
_gt_truncation = label[:, 6]
_gt_occlusion = label[:, 7]
gt_mask = np.logical_and(label[:, 4], label[:, 5])
ignore_mask = np.logical_not(gt_mask)
size_mask = np.min(wh > 0, axis=1)
class_mask = _gt_labels < 11 # remove "other" category
gt_mask = np.logical_and(gt_mask, size_mask)
gt_mask = np.logical_and(gt_mask, class_mask)
ignore_mask = np.logical_and(ignore_mask, size_mask)
gt_bboxes = _gt_bboxes[gt_mask]
gt_labels = _gt_labels[gt_mask]
gt_truncation = _gt_truncation[gt_mask]
gt_occlusion = _gt_occlusion[gt_mask]
gt_bboxes_ignore = _gt_bboxes[ignore_mask]
else:
gt_bboxes = np.empty((0, 4), dtype=np.float32)
gt_labels = np.empty(0, dtype=np.int64)
gt_bboxes_ignore = np.empty((0, 4), dtype=np.float32)
gt_truncation = np.empty(0, dtype=np.int32)
gt_occlusion = np.empty(0, dtype=np.int32)
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
truncation=gt_truncation,
occlusion=gt_occlusion)
return ann
def evaluate(self,
results,
metric=None,
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
results = self.format_results(results)
heights = [info['height'] for info in self.data_infos]
widths = [info['width'] for info in self.data_infos]
ap_all, ap_50, ap_75, ar_1, ar_10, ar_100, ar_500 = eval_det(
self.labels, results, heights, widths)
eval_res = dict(
ap_all=float(ap_all),
ap_50=float(ap_50),
ap_75=float(ap_75),
ar_1=float(ar_1),
ar_10=float(ar_10),
ar_100=float(ar_100),
ar_500=float(ar_500)
)
return eval_res
def format_results(self, results, **kwargs):
results_out = []
for result in results:
category = [i
for i, det_per_class in enumerate(result)
for _ in det_per_class]
category = np.array(category).reshape(-1, 1)
result_out = np.concatenate(result, axis=0)
result_out = np.concatenate((result_out, category), axis=1)
# sort by scoring in descending order
result_out = result_out[result_out[:, 4].argsort()[::-1]]
# x1y1x2y2 to x1y1wh
result_out[:, 2:4] -= result_out[:, :2]
results_out.append(result_out)
return results_out
|
TP = True
FP = False
class Records:
"""Save prediction records during update.
Attributes:
iou_threshold (float): iou threshold
pred_infos (list): save the results (TP/FP)
Args:
iou_threshold (float): iou threshold (default: 0.5)
"""
def __init__(self, iou_threshold=0.5):
self.iou_threshold = iou_threshold
self.pred_infos = []
def reset(self):
self.pred_infos = []
def add_records(self, gt_bboxes, pred_bboxes):
"""Add ground truth and prediction records.
Args:
gt_bboxes: ground truth bboxes in the current image
pred_bboxes: sorted predicition bboxes in the current image
"""
gt_seen = [False] * len(gt_bboxes)
for pred_bbox in pred_bboxes:
max_iou = -1
max_idx = -1
for i, gt_bbox in enumerate(gt_bboxes):
if gt_bbox.label != pred_bbox.label:
continue
iou = gt_bbox.iou(pred_bbox)
if iou > max_iou:
max_iou = iou
max_idx = i
if max_iou >= self.iou_threshold:
if not gt_seen[max_idx]:
gt_seen[max_idx] = True
self.pred_infos.append((pred_bbox.score, TP))
else:
self.pred_infos.append((pred_bbox.score, FP))
else:
self.pred_infos.append((pred_bbox.score, FP))
|
"""
Exceptions for the DB API 2.0 implementation.
"""
__all__ = [
"Warning",
"Error",
"InterfaceError",
"DatabaseError",
"DataError",
"OperationalError",
"IntegrityError",
"InternalError",
"ProgrammingError",
"NotSupportedError",
]
class Warning(Exception): # pylint: disable=redefined-builtin
"""
Important warnings like data truncations while inserting.
Exception raised for important warnings like data truncations
while inserting, etc. It must be a subclass of the Python
StandardError (defined in the module exceptions).
"""
class Error(Exception):
"""
Base class of all other error exceptions.
Exception that is the base class of all other error exceptions.
You can use this to catch all errors with one single except
statement. Warnings are not considered errors and thus should
not use this class as base. It must be a subclass of the Python
StandardError (defined in the module exceptions).
"""
class InterfaceError(Error):
"""
Errors that are related to the database interface.
Exception raised for errors that are related to the database
interface rather than the database itself. It must be a subclass
of Error.
"""
class DatabaseError(Error):
"""
Errors that are related to the database.
Exception raised for errors that are related to the database.
It must be a subclass of Error.
"""
class DataError(DatabaseError):
"""
Errors that are due to problems with the processed data.
Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc. It must be a subclass of DatabaseError.
"""
class OperationalError(DatabaseError):
"""
Errors that are related to the database's operation.
Exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc. It must be a subclass of
DatabaseError.
"""
class IntegrityError(DatabaseError):
"""
Raised when the relational integrity of the database is affected.
Exception raised when the relational integrity of the database is
affected, e.g. a foreign key check fails. It must be a subclass of
DatabaseError.
"""
class InternalError(DatabaseError):
"""
Raised when the database encounters an internal error.
Exception raised when the database encounters an internal error,
e.g. the cursor is not valid anymore, the transaction is out of
sync, etc. It must be a subclass of DatabaseError.
"""
class ProgrammingError(DatabaseError):
"""
Raised for programming errors.
Exception raised for programming errors, e.g. table not found or
already exists, syntax error in the SQL statement, wrong number
of parameters specified, etc. It must be a subclass of DatabaseError.
"""
class NotSupportedError(DatabaseError):
"""
Raised in case a method or database API is not supported.
Exception raised in case a method or database API was used which is
not supported by the database, e.g. requesting a .rollback() on a
connection that does not support transaction or has transactions
turned off. It must be a subclass of DatabaseError.
"""
|
from pysubs2 import SSAStyle
from pysubs2.substation import parse_tags
def test_no_tags():
text = "Hello, world!"
assert parse_tags(text) == [(text, SSAStyle())]
def test_i_tag():
text = "Hello, {\\i1}world{\\i0}!"
assert parse_tags(text) == [("Hello, ", SSAStyle()),
("world", SSAStyle(italic=True)),
("!", SSAStyle())]
def test_r_tag():
text = "{\\i1}Hello, {\\r}world!"
assert parse_tags(text) == [("", SSAStyle()),
("Hello, ", SSAStyle(italic=True)),
("world!", SSAStyle())]
def test_r_named_tag():
styles = {"other style": SSAStyle(bold=True)}
text = "Hello, {\\rother style\\i1}world!"
assert parse_tags(text, styles=styles) == \
[("Hello, ", SSAStyle()),
("world!", SSAStyle(italic=True, bold=True))]
def test_drawing_tag():
text = r"{\p1}m 0 0 l 100 0 100 100 0 100{\p0}test"
fragments = parse_tags(text)
assert len(fragments) == 3
drawing_text, drawing_style = fragments[0]
assert drawing_text == ""
assert drawing_style.drawing is False
drawing_text, drawing_style = fragments[1]
assert drawing_text == "m 0 0 l 100 0 100 100 0 100"
assert drawing_style.drawing is True
drawing_text, drawing_style = fragments[2]
assert drawing_text == "test"
assert drawing_style.drawing is False
def test_no_drawing_tag():
text = r"test{\paws}test"
fragments = parse_tags(text)
assert len(fragments) == 2
for fragment_text, fragment_style in fragments:
assert fragment_text == "test"
assert fragment_style.drawing is False
|
import json
from flask import request
from functools import wraps
from jose import jwt
from urllib.request import urlopen
AUTH0_DOMAIN = "mothership-v2.us.auth0.com"
ALGORITHMS = ["RS256"]
class AuthError(Exception):
"""AuthError Exception
A standardized way to communicate auth failure modes
"""
def __init__(self, description, code):
self.description = description
self.code = code
def get_token_auth_header():
"""Retrieve jwt from the request header"""
if "Authorization" not in request.headers:
raise AuthError(
"Missing mandatory headers.", 401,
)
auth_header = request.headers["Authorization"]
header_parts = auth_header.split(" ")
if len(header_parts) != 2:
raise AuthError(
"Missing authorization elements.", 401,
)
elif header_parts[0].lower() != "bearer":
raise AuthError(
"Unable to find appropiate keywords.", 401,
)
return header_parts[1]
def check_permissions(permission, payload):
"""Validate claims"""
if "permissions" not in payload:
raise AuthError("Missing mandatory key.", 401)
if permission not in payload["permissions"]:
raise AuthError(
"User don't have access to resource.", 401,
)
return True
def verify_decode_jwt(token, audience): # noqa: C901
"""Checks if the jwt has been tampered with"""
# Get auth0 public key
jsonurl = urlopen(f"https://{AUTH0_DOMAIN}/.well-known/jwks.json")
jwks = json.loads(jsonurl.read())
# Get data from header
try:
unverified_header = jwt.get_unverified_header(token)
except Exception:
raise AuthError(
"Malformed header value.", 401,
)
# Choose our key
rsa_key = {}
if "kid" not in unverified_header:
raise AuthError("Authorization malformed.", 401)
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"],
}
# Verify
if rsa_key:
try:
# Validate jwt
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=audience,
issuer="https://" + AUTH0_DOMAIN + "/",
)
return payload
except jwt.ExpiredSignatureError:
raise AuthError("Token expired.", 401)
except jwt.JWTClaimsError:
raise AuthError(
"Incorrect claims. Please, check the audience and issuer.", 401,
)
except Exception:
raise AuthError(
"Unable to parse authentication token.", 400,
)
raise AuthError(
"Unable to find the appropriate key.", 400,
)
def requires_auth(permission="", audience=""):
"""Auth decorator for routes"""
def requires_auth_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
jwt = get_token_auth_header()
payload = verify_decode_jwt(jwt, audience)
except AuthError as err:
raise AuthError(
err.description, err.code,
)
check_permissions(permission, payload)
return f(payload, *args, **kwargs)
return wrapper
return requires_auth_decorator
|
import unittest
from application.calculators.tcid50.tcid50_calculator import TCID50Calculator
from application.models.tcid50.dilution import Dilution
from application.models.tcid50.tcid50_input_data_set import TCID50InputDataSet
from application.models.tcid50.tcid50_calculated_data_set import TCID50CalculatedDataSet
from typing import Sequence
class Given_A_TCID50Calculator_And_A_TCID50InputDataset_It_Should_Calculate_A_TCID50CalculatedDataSet(unittest.TestCase):
def test(self):
input_dilutions = [
Dilution(0.00000001, 0, 8),
Dilution(0.000001, 5, 3),
Dilution(0.00001, 8, 0),
Dilution(0.0000001, 1, 7)
]
output = TCID50Calculator().calculate(TCID50InputDataSet(input_dilutions))
self.assertEqual(output.dilutions[0].dilution_amount, 0.00001)
self.assertEqual(output.dilutions[0].infected_total, 8)
self.assertEqual(output.dilutions[0].uninfected_total, 0)
self.assertEqual(output.dilutions[1].dilution_amount, 0.000001)
self.assertEqual(output.dilutions[1].infected_total, 5)
self.assertEqual(output.dilutions[1].uninfected_total, 3)
self.assertEqual(output.dilutions[2].dilution_amount, 0.0000001)
self.assertEqual(output.dilutions[2].infected_total, 1)
self.assertEqual(output.dilutions[2].uninfected_total, 7)
self.assertEqual(output.dilutions[3].dilution_amount, 0.00000001)
self.assertEqual(output.dilutions[3].infected_total, 0)
self.assertEqual(output.dilutions[3].uninfected_total, 8)
self.assertEqual(output.dilution_to_cumulative_infected[output.dilutions[0]], 14)
self.assertEqual(output.dilution_to_cumulative_infected[output.dilutions[1]], 6)
self.assertEqual(output.dilution_to_cumulative_infected[output.dilutions[2]], 1)
self.assertEqual(output.dilution_to_cumulative_infected[output.dilutions[3]], 0)
self.assertEqual(output.dilution_to_cumulative_uninfected[output.dilutions[0]], 0)
self.assertEqual(output.dilution_to_cumulative_uninfected[output.dilutions[1]], 3)
self.assertEqual(output.dilution_to_cumulative_uninfected[output.dilutions[2]], 10)
self.assertEqual(output.dilution_to_cumulative_uninfected[output.dilutions[3]], 18)
self.assertEqual(output.dilution_to_percent_infected[output.dilutions[0]], 100)
self.assertEqual(round(output.dilution_to_percent_infected[output.dilutions[1]], 1), 66.7)
self.assertEqual(round(output.dilution_to_percent_infected[output.dilutions[2]], 1), 9.1)
self.assertEqual(output.dilution_to_percent_infected[output.dilutions[3]], 0)
self.assertEqual(round(output.pd, 2), 0.29)
self.assertEqual(round(output.tcid50, 9), 0.000000513)
self.assertEqual(round(output.tcid50_per_milliliter, 0), 194748304.0)
self.assertEqual(round(output.pfu_per_milliliter, 0), 134376330.0)
|
from miRNASNP3 import app as application
|
# https://leetcode.com/problems/range-sum-query-2d-immutable
from typing import List
from itertools import accumulate
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
self.R = len(matrix)
self.C = len(matrix[0])
self.sum_matrix = []
for r in range(self.R):
self.sum_matrix.append(list(accumulate(matrix[r])))
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
res = 0
for r in range(row1, row2 + 1):
if col1 == 0:
res += self.sum_matrix[r][col2]
else:
res += self.sum_matrix[r][col2] - self.sum_matrix[r][col1 - 1]
return res
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
|
from __future__ import absolute_import
from .mf_main import mf_main
from ._version import __version__
|
# Copyright 2017 Mabry Cervin and all contributers listed in AUTHORS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gzip
from lxml import etree
import requests
import tempfile
# TODO: Look at using an interable parse to save memory
def load_xml_file(filename):
handle = open(filename, 'rb')
return etree.parse(handle)
def fetch_gz_file(url, filename):
file_request = requests.get(url)
with tempfile.NamedTemporaryFile() as tf:
for chunk in file_request.iter_content(chunk_size=1024):
if chunk:
tf.write(chunk)
tf.flush()
with gzip.open(tf.name, 'rb') as gf:
unzipped_content = gf.read()
with open(filename, 'wb') as of:
of.write(unzipped_content)
|
import argparse
import os
from tqdm import tqdm
import numpy as np
import torch
from scipy.io.wavfile import read
from training.tacotron2_model.stft import TacotronSTFT
max_wav_value = 32768.0
sampling_rate = 22050
filter_length = 1024
hop_length = 256
win_length = 1024
n_mel_channels = 80
mel_fmin = 0.0
mel_fmax = 8000.0
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def wav_to_mel(stft, path, output_path):
audio, sampling_rate = load_wav_to_torch(path)
if sampling_rate != stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(sampling_rate, stft.sampling_rate))
audio_norm = audio / max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
np.save(output_path, melspec, allow_pickle=False)
if __name__ == "__main__":
"""Script to generate MELs from wavs"""
parser = argparse.ArgumentParser(description="Convert WAVs to MEL spectograms")
parser.add_argument("-w", "--wavs", help="Text file path", type=str, required=True)
parser.add_argument("-o", "--output", help="Output path", type=str, required=True)
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
stft = TacotronSTFT(filter_length, hop_length, win_length, n_mel_channels, sampling_rate, mel_fmin, mel_fmax)
for f in tqdm(os.listdir(args.wavs)):
wav_path = os.path.join(args.wavs, f)
output_path = os.path.join(args.output, f.replace(".wav", ".npy"))
wav_to_mel(stft, wav_path, output_path)
|
from pathlib import Path
from setuptools import find_packages, setup
project_root = Path(__file__).parent
install_requires = (project_root / 'requirements.txt').read_text().splitlines()
extras_require = {'test': ['pytest']}
extras_require['dev'] = extras_require['test']
setup(
name='speechcolab',
version='0.0.1-alpha',
python_requires='>=3.6.0',
description='A library of speech gadgets.',
author='The SpeechColab Development Team',
long_description=(project_root / 'README.md').read_text(),
long_description_content_type="text/markdown",
license='Apache-2.0 License',
packages=find_packages(),
install_requires=install_requires,
extras_require=extras_require,
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"License :: OSI Approved :: Apache Software License",
"Topic :: Multimedia :: Sound/Audio :: Speech",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed"
],
)
|
resposta = ''
vezes = 0
while resposta != 'M' and resposta != 'F':
if vezes > 0:
print('\n\033[31mResposta Inválida. Tente novamente.\033[m\n')
resposta = str(input('Responda:\n\nM - Para masculino\nF - Para feminino\n\n').upper())
vezes = 1
print('\033[32mObrigado! Tenha um bom dia.')
|
from config_handler import handle_config
from datetime import datetime,timezone
import glob
import logging
import multiprocessing
from os import system, path, chmod, rename
import pysolar.solar as ps
import socket
import ssl
import sys
from threading import Event, Thread
import time
import traceback
import urllib.request as req
try:
from os import mkdirs # for python3.5
except:
from os import makedirs as mkdirs # for python3.6 and above
socket.setdefaulttimeout(2);
# util function to call a routine at a specified interval
def call_repeatedly(intv, func, *args):
stopped = Event()
def loop():
i = 0
while not stopped.wait(intv):
func(*args)
i += 1
Thread(target=loop).start()
return stopped.set
# move files from cache to output directory
def flush_files(cams):
for camera in cams:
fns = glob.glob(cachepath+camera+'/*jpg')
for fn in fns:
doy = fn[-18:-10]
dest = "{}{}/{}".format( imagepath, camera, doy )
if not path.isdir(dest):
if SAFE_MODE:
print("mkdirs " + dest)
else:
mkdirs(dest)
chmod(dest, 0o755)
if SAFE_MODE:
print("rename {} to {}/{}_{}".format(fn,dest,camera,fn[-18:]))
else:
rename( fn, "{}/{}_{}".format( dest, camera, fn[-18:] ) )
# download images from cameras to "cache"
# and also make a copy to "latest" directory
# the "latest" directory enables the web dashboard to show real time images
def makeRequest(cam):
starttime = datetime.utcnow()
timestamp = starttime.strftime( "%Y%m%d%H%M%S" )
# for improper ssl certificates, try this to ignore CERTs
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
proxy = req.ProxyHandler({})
opener = req.build_opener(proxy,req.HTTPSHandler(context=context))
req.install_opener(opener)
fn = cachepath + cam + "/{}_{}.jpg".format( cam, timestamp )
fn_latest = latest + cam + '_latest.jpg'
if SAFE_MODE:
print( "Would retrieve {} to {}".format( urls[cam]+url_suffix, fn ) )
print( "Would copy {} to {}".format( fn, fn_latest ) )
else:
req.urlretrieve( urls[cam] + url_suffix, fn )
chmod(fn,0o755) # set the permission
system( "cp {} {}".format( fn, fn_latest ) )
chmod( fn_latest, 0o755 ) # set the permission
if __name__ == "__main__":
cp = handle_config(
metadata={"invoking_script":"image_downloader"}, header="downloader"
)
site = cp["site_id"]
config = cp['downloader']
SAFE_MODE = config['safe_mode'] # run without consequences?
if SAFE_MODE:
print( "Initializing image_downloader in safe_mode" )
url_suffix = config['network']['url_suffix']
flush_interval = config["flush_interval"]
interval_day = config['interval']['interval_day']
interval_night = config['interval']['interval_night']
site_config = config[site] # parameters that vary between sites
lat = site_config['geolocation']['lat']
lon = site_config['geolocation']['lon']
site_paths = cp['paths'][site]
cachepath = site_paths['cache_path']
latest = site_paths['latest_path']
imagepath = site_paths['img_path']
logpath = site_paths['logging_path']
# create the directories used if they do not already exist
for dest in [cachepath,latest,imagepath]:
if not path.isdir(dest) and not SAFE_MODE:
mkdirs(dest)
chmod(dest,0o755)
urls = {}
for cameraID, url in cp['cameras'][site]['urls'].items():
cameraID = cameraID.upper()
urls[cameraID] = url
dest = cachepath + cameraID
if not path.isdir(dest) and not SAFE_MODE:
mkdirs(dest)
chmod(dest,0o755)
desti = imagepath + cameraID
if not path.isdir(dest) and not SAFE_MODE:
mkdirs(dest)
chmod(dest,0o755)
# initialize the logger
logging.basicConfig(format='%(asctime)s [%(funcName)s] [%(process)d %(thread)d] %(levelname)s: %(message)s',\
level=logging.INFO,filename=path.join(logpath,'image_downloader.log'),filemode='w')
logger=logging.getLogger(__name__)
### Start loops
# invoke flush_files every flush_interval seconds
flush_event = call_repeatedly( flush_interval, flush_files, urls )
p = multiprocessing.Pool( len(urls) )
while (True):
try:
day_flag = ps.get_altitude(lat, lon, datetime.now(timezone.utc)) > 5
# invoke makeRequest once per camera every intv seconds
intv = interval_day if day_flag else interval_night
saveimage_event = call_repeatedly(intv, p.map_async, makeRequest, urls)
# check periodically if the sun has set or risen
if day_flag:
while ps.get_altitude( lat, lon, datetime.now(timezone.utc) ) > 5:
time.sleep(180)
else:
while ps.get_altitude( lat, lon, datetime.now(timezone.utc) ) <= 5:
time.sleep(600)
except Exception as e:
msg = traceback.trace_exc()
logger.error( msg )
finally:
# end the save_image loop so we can restart it with the new intv
try:
saveimage_event()
except:
msg = traceback.trace_exc()
logger.error( msg )
|
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import json
from janome.tokenizer import Tokenizer
import json
from requests_oauthlib import OAuth1Session
from summpy.lexrank import summarize
# twitterAPI
oath_key_dict = {
"consumer_key": "2qimKikZwCOJXG0wxJ0lzkcM6",
"consumer_secret": "MHAjJsYvGCF0mVkgs9w0tJh0fJf0ZpBMKqUMiqTUzQmqYoIFA2",
"access_token": "157729228-r5JXs6Mi79rEgPAd1AyS9w5l7BaUADzrmLpc9JiR",
"access_token_secret": "Dm0C0ZPCBCDcNARnAaJvUDxEk88o1pbTtWuZgvILzFG2u"
}
research_ids = ["get2ch_soccer", "BaseballNEXT", "gorin"]
pattern = r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+\$,%#]+)"
rss_news = [r"https://headlines.yahoo.co.jp/rss/jsportsv-c_spo.xml",
r"https://headlines.yahoo.co.jp/rss/soccerk-c_spo.xml",
r"https://headlines.yahoo.co.jp/rss/bfj-c_spo.xml",
r"https://headlines.yahoo.co.jp/rss/nallabout-c_spo.xml",
r"https://headlines.yahoo.co.jp/rss/asahik-c_spo.xml",
r"https://headlines.yahoo.co.jp/rss/baseballk-c_spo.xml"]
news_dict = {}
def create_oath_session(oath_key_dict):
oath = OAuth1Session(
oath_key_dict["consumer_key"],
oath_key_dict["consumer_secret"],
oath_key_dict["access_token"],
oath_key_dict["access_token_secret"]
)
return oath
class SportsLive:
def __init__(self, parent=None):
pass
'''
形態素解析
'''
@staticmethod
def morphological_analysis(text):
txt = text
t = Tokenizer()
word_dic = {}
lines = txt.split("\r\n")
for line in lines:
blog_txt = t.tokenize(line)
for w in blog_txt:
word = w.surface
ps = w.part_of_speech
if ps.find('名詞') < 0:
continue
if word not in word_dic:
word_dic[word] = 0
word_dic[word] += 1
keys = sorted(word_dic.items(), key=lambda x: x[1], reverse=True)
keyword = ''
for word, cnt in keys[:4]:
print("{0} ".format(word))
keyword += "{0} ".format(word)
return keyword
def score_check(self, keyword):
data = []
try:
target_url = 'https://sports.yahoo.co.jp/search/text?query=' + keyword
resp = requests.get(target_url)
soup = BeautifulSoup(resp.text, "html.parser")
tables = soup.find_all("p", class_="siteUrl")
for table in tables:
geturl = table.text
geturl = geturl.rstrip(' - キャッシュ')
data.append(geturl)
except:
pass
score = ''
try:
for url in data:
if 'game' in url:
score = self.get_score(url)
break
else:
continue
except:
pass
return score
def twitter_check(self, keyword, debug=False):
keyword_list = keyword.split(' ')
tweet_list = []
output_list = []
json_dict = {}
for keyword in keyword_list:
if keyword == "":
break
for research_id in research_ids:
tweets = self.tweet_search(keyword, oath_key_dict, research_id)
for tweet in tweets["statuses"]:
text = tweet['text']
text = self.tweet_analysis(text)
if not text[0] in outtext:
outtext += text[0] + '<br>'
outtext2 += outtext[:600]
outtext = ''
outtext2 = outtext2.replace(keyword, '<font color="red">' + keyword + '</font>')
return outtext2
def news_check(self, keyword, debug=False):
keyword = keyword.split(' ')
output_text = ""
json_dict = {}
for rss in rss_news:
resp = requests.get(rss)
soup = BeautifulSoup(resp.text, "html.parser")
titles = soup.find_all("title")
links = soup.find_all("link")
for title, link in zip(titles, links):
news_dict.update({title.next: str(link.next).replace('\n', '').replace(' ', '')})
for key in keyword:
if key == "":
break
news_key_list = [l for l in news_dict.keys() if key in l]
print(news_key_list)
for list_key in news_key_list:
text = ""
resp = requests.get(news_dict[list_key])
soup = BeautifulSoup(resp.text, "html.parser")
for s in soup.find_all("p", class_="ynDetailText"):
text += s.get_text()
analysis_text = self.tweet_analysis(text)
if debug:
# タイトル:{リンク,全文,要約}
json_dict.update({list_key:
{
'link':news_dict[list_key],
'text':text,
'a_text':analysis_text,
}}
)
output_text += '<br>'.join(analysis_text)
json_dict.update({"result_text":output_text})
encode_json_data = json.dumps(json_dict)
return encode_json_data
@staticmethod
def tweet_search(search_word, oath_key_dict, account):
url = "https://api.twitter.com/1.1/search/tweets.json?"
params = {
"q": search_word,
"from":account,
"lang": "ja",
"result_type": "recent",
"count": "100"
}
oath = create_oath_session(oath_key_dict)
responce = oath.get(url, params=params)
if responce.status_code != 200:
print("Error code: %d" % (responce.status_code))
return None
tweets = json.loads(responce.text)
return tweets
@staticmethod
def get_score(url):
target_url = url
resp = requests.get(target_url)
soup = BeautifulSoup(resp.text)
if 'baseball' in url:
score_table = soup.find('table', {'width': "100%", 'cellpadding': "0", 'cellspacing': "0", 'border': "0"})
rows = score_table.findAll("tr")
score = []
text = '最新の試合の結果は' + '\n'
try:
for row in rows:
csvRow = []
for cell in row.findAll(['td', 'th']):
csvRow.append(cell.get_text())
score.append(csvRow)
text += '\t|'.join(csvRow) + '\n'
finally:
return text
elif 'soccer' in url:
hometeam = soup.find_all('div', class_="homeTeam team")
hometotal = soup.find_all("td", class_="home goal")
home1st = soup.find_all("td", class_="home first")
home2nd = soup.find_all("td", class_="home second")
awayteam = soup.find_all('div', class_="awayTeam team")
awaytotal = soup.find_all("td", class_="away goal")
away1st = soup.find_all("td", class_="away first")
away2nd = soup.find_all("td", class_="away second")
for homename, awayname, homegoal, awaygoal in zip(hometeam, awayteam, hometotal, awaytotal):
text = '最新の試合の結果は' + '\n' + str(homename.text.replace('\n', '')) + \
'-' + str(awayname.text.replace('\n', '')) + '\n'
if len(home1st[0].text) > -1:
text += home1st[0].text + '前半' + away1st[0].text + '\n'
if len(home2nd[0].text) > -1:
text += home2nd[0].text + '後半' + away2nd[0].text + '\n'
if len(homegoal) > -1:
text += homegoal.text + ' - ' + awaygoal.text
return text
@staticmethod
def tweet_analysis(text):
sentences, debug_info = summarize(
text, sent_limit=5, continuous=True, debug=True
)
return sentences
def main():
SL = SportsLive()
print(SL.news_check(SL.morphological_analysis('羽生のオリンピック')))
print(SL.news_check(SL.morphological_analysis('宇野昌磨の記録'), debug=True))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file is part of the complex_terrain algorithm
M. Lamare, M. Dumont, G. Picard (IGE, CEN).
"""
import numpy as np
import Py6S as ps
class rtmodel(object):
"""
"""
def __init__(self):
self.outputs = None
def run(self, sza, saa, vza, vaa, wvl, alt, aero,
aod=0.1, refl=0.99, water=0.05, ozone=0.30, atmo=None, atcor=False,
):
""" Run PY6S
date = datetime object specifying the day and month of acquisition
lat = latitude of point considered (in degrees)
sza, saa, vza, vaa = viewing angles input in radians
wvl = wavelength in namometers
alt_km =
profile has to be one of the following:
"""
# Convert nanometer input into um for the model
wvl_um = wvl / 1000
alt_km = alt / 1000
# Build aero profiles
aero_profiles = {
"Continental": ps.AeroProfile.Continental,
"BiomassBurning": ps.AeroProfile.BiomassBurning,
"Desert": ps.AeroProfile.Desert,
"Maritime": ps.AeroProfile.Maritime,
"Stratospheric": ps.AeroProfile.Stratospheric,
"Urban": ps.AeroProfile.Urban,
"None": ps.AeroProfile.NoAerosols,
}
# Generate a 6S class
s = ps.SixS()
# Add geometry
s.geometry = ps.Geometry.User()
# Convert angles in radians to degrees
s.geometry.solar_z = np.rad2deg(sza)
s.geometry.view_z = np.rad2deg(vza)
s.geometry.solar_a = np.rad2deg(saa)
s.geometry.view_a = np.rad2deg(vaa)
# s.geometry.day = date.day
# s.geometry.month = date.month
# Set altitudes
s.altitudes = ps.Altitudes()
s.altitudes.set_target_custom_altitude(alt_km)
s.altitudes.set_sensor_satellite_level()
s.wavelength = ps.Wavelength(wvl_um) # Set wavelength in um
# Atmosphere parameters
if atmo is None:
# If no standard atmospheric profile is specified, use water and
# ozone.
s.atmos_profile = ps.AtmosProfile.UserWaterAndOzone(water, ozone)
else:
# Build atmo dictionnary
atmo_profiles = {
"Mid_lat_summer": ps.AtmosProfile.MidlatitudeSummer,
"Mid_lat_winter": ps.AtmosProfile.MidlatitudeWinter,
"Sub_arctic_summer": ps.AtmosProfile.SubarcticSummer,
"Sub_arctic_winter": ps.AtmosProfile.SubarcticWinter,
"Tropical": ps.AtmosProfile.Tropical,
"None": ps.AtmosProfile.NoGaseousAbsorption,
}
# Run a standard atmospheric profile
s.atmos_profile = ps.AtmosProfile.PredefinedType(
atmo_profiles[atmo])
# Aerosol parameters
s.aero_profile = ps.AeroProfile.PredefinedType(aero_profiles[aero])
s.aot550 = aod
# According to switch, perform atmospheric correction or not
if atcor:
s.atmos_corr = ps.AtmosCorr.AtmosCorrLambertianFromReflectance(
refl)
else:
s.ground_reflectance = ps.GroundReflectance.HomogeneousLambertian(
refl
)
s.run() # Run Py6S
self.outputs = s.outputs
|
"""
Everything to do with cleaning up experimental data
"""
from copy import deepcopy
import numpy as np
from tracklib import Trajectory, TaggedSet
def split_trajectory_at_big_steps(traj, threshold):
"""
Removes suspected mislinkages.
Exception: two consecutive big steps such that the overall displacement is
smaller than the threshold are single misconnections in otherwise fine
trajectories. In this case we will simply remove the misconnected point.
Parameters
----------
traj : Trajectory
the trajectory to investigate
threshold : float
the maximum allowed frame to frame displacement
Returns
-------
set of tracklib.Trajectory
See also
--------
split_dataset_at_big_steps
Notes
-----
As of now, this only works on trajectories with ``N=1``.
This really just checks for frame-to-frame connections exceeding the
threshold. So if there are missing frames in a trajectory, the step across
those missing data will not be considered.
"""
if traj.N != 1: # pragma: no cover
raise ValueError("Cannot detect mislinkages in trajectories with N > 1")
old_npwarns = np.seterr(invalid='ignore') # yes, we'll do np.nan <= threshold. Gives False.
difftraj = traj.diff().abs()
step_isBig = np.where(difftraj[:][:, 0] <= threshold, 0, 1.)
step_isBig[np.where(np.isnan(difftraj[:][:, 0]))[0]] = np.nan
np.seterr(**old_npwarns)
step_isBig = np.pad(step_isBig, 1, constant_values=np.nan) # Now step_isBig[i] describes traj[i] - traj[i-1]
inds_bigsteps = np.where(step_isBig == 1)[0]
# Check for single misconnections
for ind in inds_bigsteps:
if ((step_isBig[(ind-1):(ind+2)] == 1).tolist() == [False, True, True]
and step_isBig[ind+2] != 1):
traj.data[:, ind, :] = np.nan
step_isBig[ind:(ind+2)] = np.nan # Now the steps don't exist anymore
# If now everything's fine, that's cool
if not np.any(step_isBig == 1):
return {traj}
# Split at remaining big steps
inds_bigsteps = np.where(step_isBig == 1)[0]
new_trajs = set()
old_ind = 0
for ind in inds_bigsteps:
new_trajs.add(Trajectory.fromArray(traj.data[:, old_ind:ind, :]))
old_ind = ind
new_trajs.add(Trajectory.fromArray(traj.data[:, old_ind:, :]))
del traj
return {traj for traj in new_trajs if len(traj) > 1}
def split_dataset_at_big_steps(data, threshold):
"""
Apply `split_trajectory_at_big_steps` to a whole data set
Parameters
----------
data : TaggedSet
the dataset
threshold : float
the maximum allowed step size
Returns
-------
TaggedSet
a new data set with the split trajectories
See also
--------
split_trajectory_at_big_steps
"""
def gen():
for traj, tags in data(giveTags=True):
for part_traj in split_trajectory_at_big_steps(traj, threshold):
yield (deepcopy(part_traj), deepcopy(tags))
return TaggedSet(gen())
|
def matrix_multiply(A,B):
result = []
for i in range(len(B[0])):
result.append([0]*len(A))
if len(A[0]) != len(B):
return -1
else:
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(A[0])):
result[i][j] = result[i][j] + A[i][k]*B[k][j]
#print('i:{}|j:{}|result|{}'.format(i, j, result[i][j]))
return result
if __name__ == '__main__':
A = [
[9, 2, 3],
[1, 2, 3],
]
B = [
[1, 2],
[1, 2],
[1, 2],
]
print(matrix_multiply(A, B))
|
from tradssat.genotype.vars_._cropgro import cropgro_cul_vars, cropgro_eco_vars
cul_vars_PPGRO = cropgro_cul_vars('VAR-NAME')
eco_vars_PPGRO = cropgro_eco_vars()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.