content stringlengths 5 1.05M |
|---|
# *-* encoding: utf-8 *-*
from django_tables2 import tables, Column
from vmtory.models import VM
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
class Top10Table(tables.Table):
assignee__username = Column(_('User'))
total = Column(_('Quantity'))
class GenericTable(tables.Table):
QUICK_ACCESS_TEMPLATE = """{% load i18n %}<div class="ui tiny icon center aligned buttons">
{% url \'vm_details\' record.id as the_url%} <a href="{{the_url}}" class="ui icon button" data-tooltip="{% blocktrans %}VM details and adavanced options{% endblocktrans %}"><i class="blue info icon"></i></a>
{% if record.pending_poweron %}
<span class="ui icon button" data-tooltip="{% blocktrans %}There\'s a poweron request pending{% endblocktrans %}"><i class="refresh disabled icon loading"></i></span>
{% else %}
{% if record.state %}
{% url \'vm_poweroff\' record.id as the_url%} <a href="{{the_url}}" class="ui icon button {% if record.deleted %}disabled{% endif %}" data-tooltip="{% blocktrans %}Request power off{% endblocktrans %}"><i class="red stop circle icon"></i></a>
{% else %}
{% url \'vm_poweron\' record.id as the_url%} <a href="{{the_url}}" class="ui icon {% if record.deleted %}disabled{% endif %} button" data-tooltip="{% blocktrans %}Request power on{% endblocktrans %}"><i class="green play circle icon"></i></a>
{% endif %}
{% endif %}
{% url \'favorites_toggle\' record.id as the_url%}<a href="#" onclick="toggleFavorite(\'{{the_url}}\', \'.fav{{record.id}}\', \'.anchor{{record.id}}\')" class="ui icon button anchor{{record.id}}" data-toggle=tooltip data-tooltip="{%if user in record.favorites.all%}{% blocktrans %}Remove from{% endblocktrans %}{%else%}{% blocktrans %}Add to{% endblocktrans %}{%endif%} {%blocktrans%}favourites{%endblocktrans%}"><i class="star {%if user in record.favorites.all%}yellow {%else%} black{%endif%} icon fav{{record.id}}"></i></a>
{% url \'vm_delete\' record.id as the_url%} <a href="{{the_url}}" class="ui icon button {% if record.deleted %}disabled{% endif %}" data-toggle=tooltip data-tooltip="{% blocktrans %}Request deletion{% endblocktrans %}"><i class="orange trash icon"></i></a>
</div>"""
quick_access = tables.columns.TemplateColumn(QUICK_ACCESS_TEMPLATE, orderable=False, verbose_name=_('Quick Access'), attrs={'th': {'class': 'collapsing'}})
annotation_p = tables.columns.TemplateColumn('{% if record.annotation %}{{record.annotation|linebreaksbr}}{%else%}—{%endif%}', orderable=False, verbose_name=_('Annotations'))
class Meta:
model = VM
template = 'semantic_table.html'
exclude = ['uuid', 'public', 'path', 'vmid', 'annotation', 'iuuid', 'deleted', 'pending_poweron', 'uptime', 'notes', 'last_poweron', 'networking']
attrs = {"class": "ui table"}
sequence = (
'id',
'state',
'hypervisor',
'environment',
'name',
'cpus',
'ram',
'hdds',
'guest',
'ip_address',
'assignee',
'group',
'annotation_p',
'last_update',
'quick_access',
)
def render_environment(self, value, record):
if record.environment:
display = ' <span class="ui horizontal mini %s basic label">%s</span>' % (record.environment.color, record.environment.name)
else:
display = ''
return mark_safe(display)
def render_ram(self, value, record):
return record.ramh()
def render_hypervisor(self, value):
hv = value
if hv.tooltip:
return mark_safe('<span data-tooltip="' + hv.tooltip + '">' + hv.name + '</span>')
else:
return mark_safe('<span>' + hv.name + '</span>')
def render_state(self, value, record):
template = '<div data-tooltip="%s" class="ui center aligned icon"><i class="%s %s icon"></i></div>'
if record.deleted:
return mark_safe(template % (_('Eliminada'), 'trash', 'brown'))
known_state, unkown_message = record.known_state()
if known_state:
if value:
ut = record.uptimef()
if ut:
powered_on = _('Powered on since %s') % record.uptimef()
else:
powered_on = _('Powered on')
return mark_safe(template % (powered_on, 'play circle', 'green'))
else:
return mark_safe(template % (_('Powered off'), 'stop circle', 'red'))
else:
return mark_safe(template % (unkown_message, 'question circle', 'grey'))
def order_assignee(self, queryset, is_descending):
queryset = queryset.order_by(('-' if is_descending else '') + 'assignee__username')
return (queryset, True)
def order_group(self, queryset, is_descending):
queryset = queryset.order_by(('-' if is_descending else '') + 'group__name')
return (queryset, True)
class VMTable(GenericTable):
class Meta:
attrs = {"class": "ui striped table"}
exclude = ['assignee', 'guest', 'last_update']
sequence = (
'id',
'state',
'hypervisor',
'environment',
'name',
'cpus',
'ram',
'hdds',
'guest',
'ip_address',
'assignee',
'group',
'annotation_p',
'last_update',
'quick_access',
)
class GroupVMTable(GenericTable):
class Meta:
exclude = ['guest', 'last_update']
sequence = (
'id',
'state',
'hypervisor',
'environment',
'name',
'cpus',
'ram',
'hdds',
'guest',
'ip_address',
'assignee',
'group',
'annotation_p',
'last_update',
'quick_access',
)
def order_assignee(self, queryset, is_descending):
queryset = queryset.order_by(('-' if is_descending else '') + 'assignee__username')
return (queryset, True)
def order_group(self, queryset, is_descending):
queryset = queryset.order_by(('-' if is_descending else '') + 'group__name')
return (queryset, True)
class DeletedVMTable(GenericTable):
class Meta:
exclude = ['state', 'quick_access', 'guest']
sequence = (
'id',
'hypervisor',
'environment',
'name',
'cpus',
'ram',
'hdds',
'ip_address',
'assignee',
'group',
'last_update',
)
class AllVMTable(GenericTable):
id_with_link = tables.columns.TemplateColumn('{% url \'vm_details\' record.id as the_url%} <a href="{{the_url}}">{{record.id}}</a>', verbose_name='ID', order_by='id')
class Meta:
exclude = ['id', 'guest']
sequence = (
'id_with_link',
'state',
'hypervisor',
'environment',
'name',
'cpus',
'ram',
'hdds',
'ip_address',
'assignee',
'group',
'annotation_p',
'last_update',
)
class AdvancedSearchVMTable(tables.Table):
options_template = '<a class="ui mini compact button" data-tooltip="{% load i18n %}{% blocktrans %}Opens an email with the VM data and the owner as receipent.{% endblocktrans %}" href="mailto:{{record.assignee}}@ayourdomain?Subject={% blocktrans %}Query%20about%20VM{% endblocktrans %}%20%22{{record.name}}%22&Body={% blocktrans %}Query%20about%20VM%20id:%20{% endblocktrans %}{{record.id}}%20%0D%0A%20VM%20Name:%20{{record.name}}%20%0D%0A%20IP:%20{{record.ip_address}}"><i class="envelope icon"></i> {% blocktrans %}Query{% endblocktrans %}</a>'
options = tables.columns.TemplateColumn(options_template, verbose_name=_("Options"), orderable=False)
id_with_link = tables.columns.TemplateColumn('{% url \'vm_details\' record.id as the_url%} <a href="{{the_url}}">{{record.id}}</a>', verbose_name='ID', order_by='id')
class Meta:
model = VM
exclude = ['id', 'uuid', 'public', 'path', 'vmid', 'networking', 'annotation', 'iuuid', 'deleted', 'pending_poweron', 'guest', 'uptime', 'last_update', 'notes', 'last_poweron']
sequence = (
'id_with_link',
'state',
'hypervisor',
'environment',
'name',
'cpus',
'ram',
'hdds',
'ip_address',
'assignee',
'group',
'options',
)
def render_ram(self, value, record):
return record.ramh()
def render_hypervisor(self, value):
hv = value
return mark_safe('<span data-tooltip="%s">%s</span>' % (hv.tooltip, hv.name))
def render_state(self, value, record):
template = '<div data-tooltip="%s" class="ui center aligned icon" data-variation="small"><i class="%s %s icon"></i></div>'
if record.deleted:
return mark_safe(template % (_('Deleted'), 'trash', 'brown'))
known_state, unkown_message = record.known_state()
if known_state:
if value:
ut = record.uptimef()
if ut:
powered_on = _('Powered on since %s') % record.uptimef()
else:
powered_on = _('Powered on')
return mark_safe(template % (powered_on, 'play circle', 'green'))
else:
return mark_safe(template % (_('Powered off'), 'stop circle', 'red'))
else:
return mark_safe(template % (unkown_message, 'question circle', 'grey'))
def render_environment(self, value, record):
if record.environment:
display = ' <span class="ui horizontal mini %s basic label">%s</span>' % (record.environment.color, record.environment.name)
else:
display = ''
return mark_safe(display)
|
import datetime
import uuid
from auth.domain.activation import Activation
from auth.domain.user import User
from auth.settings import ACTIVATION_EXPIRE_TIME
def test_user_id():
email = 'foo.bar@email.com'
user = User(id=uuid.uuid4(), full_name='Foo Bar', email=email, password='a-secret')
activation = Activation(user=user)
assert activation.user_id == user.id
def test_expire_date():
created_at = datetime.datetime.now()
expected_expire_date = created_at + datetime.timedelta(seconds=ACTIVATION_EXPIRE_TIME)
email = 'foo.bar@email.com'
user = User(id=uuid.uuid4(), full_name='Foo Bar', email=email, password='a-secret')
activation = Activation(user=user, created_at=created_at)
assert activation.expire_date == expected_expire_date
def test_is_expired_false():
created_at = datetime.datetime.now()
email = 'foo.bar@email.com'
user = User(id=uuid.uuid4(), full_name='Foo Bar', email=email, password='a-secret')
activation = Activation(user=user, created_at=created_at)
assert not activation.is_expired
def test_is_expired_false():
created_at = datetime.datetime(2019, 1, 1)
email = 'foo.bar@email.com'
user = User(id=uuid.uuid4(), full_name='Foo Bar', email=email, password='a-secret')
activation = Activation(user=user, created_at=created_at)
assert activation.is_expired
|
from utils import startTestMsg
from tests.test_FA_Determinization import tFA_Determinization
from tests.test_FA_Minimization import tFA_Minimization
from tests.test_FA_Recognition import tFA_Recognition
from tests.test_FA_Operations import tFA_Renaming_States, tFA_Union
from tests.test_CFG_disk import testCFGReading, testCFGWriting
from tests.test_RegularConversions import *
def test_FA():
print('\tTesting Determinization\n')
tFA_Determinization()
print('\tTesting Minimization\n')
tFA_Minimization()
print('\tTesting Words Recognition\n')
tFA_recognition()
print('\tTesting Renaming States\n')
tFA_Renaming_States()
print('\tTesting Union\n')
tFA_Union()
def testCFG():
startTestMsg("TEST CFG READING")
testCFGReading()
startTestMsg("TEST CFG WRITING")
testCFGWriting()
def testRegularConversions():
startTestMsg("TEST REGULAR GRAMMAR -> FINITE AUTOMATA CONVERSION")
testRGtoFiniteAutomataConversion()
startTestMsg("TEST FINITE AUTOMATA -> REGULAR GRAMMAR CONVERSION")
testFAtoRegularGrammarConversion()
if __name__ == '__main__':
# test_FA()
tFA_Union()
print('\n')
testCFG()
print('\n')
testRegularConversions()
|
# v3 API endpoints of HackerEarth Code Checker API
COMPILE_API_ENDPOINT = "https://api.hackerearth.com/v3/code/compile/"
RUN_API_ENDPOINT = "https://api.hackerearth.com/v3/code/run/"
# Max run time of a program in seconds
RUN_TIME_UPPER_LIMIT = 5
# Max memory consumption allowed for a program
MEMORY_UPPER_LIMIT = 1024 * 256
|
__author__ = "Tomasz Rybotycki"
from copy import deepcopy
from typing import List, Iterable
from numpy import ndarray
from scipy import special
from ..boson_sampling_utilities.boson_sampling_utilities import generate_possible_outputs
from ..distribution_calculators.bs_distribution_calculator_interface import \
BosonSamplingExperimentConfiguration
from ..distribution_calculators.bs_distribution_calculator_with_fixed_losses import \
BSDistributionCalculatorWithFixedLosses, BSPermanentCalculatorInterface
from multiprocessing import cpu_count, Pool
class BSDistributionCalculatorWithUniformLosses \
(BSDistributionCalculatorWithFixedLosses):
def __init__(self, configuration: BosonSamplingExperimentConfiguration,
permanent_calculator: BSPermanentCalculatorInterface) -> None:
super().__init__(configuration, permanent_calculator)
self.weights = self._initialize_weights()
self.weightless = False
def set_weightless(self, weightless: bool) -> None:
if not weightless:
self.weights = self._initialize_weights()
else:
self.weights = [1 for _ in self.weights]
self.weightless = weightless
def _initialize_weights(self) -> List[float]:
weight = \
lambda n, l, eta: pow(eta, l) * special.binom(n, l) * pow(1.0 - eta, n - l)
weights = []
for number_of_particles_left \
in range(self.configuration.initial_number_of_particles + 1):
weights.append(
weight(self.configuration.initial_number_of_particles,
number_of_particles_left,
self.configuration.uniform_transmissivity)
)
return weights
def calculate_probabilities_of_outcomes(self,
outcomes: Iterable[Iterable[int]]) -> \
List[float]:
with Pool(processes=cpu_count()) as pool:
outcomes_probabilities = pool.map(self._calculate_probability_of_outcome,
outcomes)
return outcomes_probabilities
def _calculate_probability_of_outcome(self, outcome: ndarray) -> float:
number_of_particles_left = int(sum(outcome))
l = number_of_particles_left
if l == 0:
return self.weights[0]
n = self.configuration.initial_number_of_particles
subconfiguration = deepcopy(self.configuration)
subconfiguration.number_of_particles_left = number_of_particles_left
subconfiguration.number_of_particles_lost = n - l
subdistribution_calculator = \
BSDistributionCalculatorWithFixedLosses(subconfiguration,
self._permanent_calculator)
probability_of_outcome = subdistribution_calculator.calculate_probabilities_of_outcomes([outcome])[0]
return probability_of_outcome * self.weights[l]
def get_outcomes_in_proper_order(self) -> List[ndarray]:
return generate_possible_outputs(self.configuration.initial_number_of_particles,
self.configuration.number_of_modes, consider_loses=True)
|
from setuptools import setup
setup(
name="batch-file-renamer",
scripts = [
'BatchFileRenamer/batch-file-renamer'
],
version="1.0",
description="Simple batch file renaming application",
author="JJ Style",
author_email="style.jj@pm.me",
license="MIT",
url="https://github.com/jj-style/BatchFileRenamer",
python_requires=">=3.6"
)
|
# -*- coding: utf-8 -*-
import os
import pickle
from autobahn.twisted.util import sleep
from mdstudio.deferred.chainable import chainable
from mdstudio.component.session import ComponentSession
from mdstudio.runner import main
from mdstudio_workflow import Workflow
class LIEPredictionWorkflow(ComponentSession):
"""
This workflow will perform a binding affinity prediction for CYP 1A2 with
applicability domain analysis using the Linear Interaction Energy (LIE)
method as described in:
Capoferri L, Verkade-Vreeker MCA, Buitenhuis D, Commandeur JNM, Pastor M,
Vermeulen NPE, et al. (2015) "Linear Interaction Energy Based Prediction
of Cytochrome P450 1A2 Binding Affinities with Reliability Estimation."
PLoS ONE 10(11): e0142232. https://doi.org/10.1371/journal.pone.0142232
The workflow uses data from the pre-calibrated CYP1A2 model created using
the eTOX ALLIES Linear Interaction Energy pipeline (liemodel parameter).
Pre-calculated molecular dynamics trajectory LIE energy values are
available for bound and unbound ligand cases (bound_trajectory,
unbound_trajectory respectively)
"""
def authorize_request(self, uri, claims):
"""
Microservice specific authorization method.
Will always be called when the service first tries to register with the
broker. It returns True (= authorized) by default.
"""
return True
@chainable
def on_run(self):
# Ligand to make prediction for
ligand = 'O1[C@@H](CCC1=O)CCC'
ligand_format = 'smi'
liemodel = os.path.join(os.getcwd(), '1A2_model')
# CYP1A2 pre-calibrated model
modelpicklefile = os.path.join(liemodel, 'params.pkl')
modelfile = pickle.load(open(modelpicklefile))
unbound_trajectory = os.path.join(os.getcwd(), "unbound_trajectory.ene")
bound_trajectory = [os.path.join(os.getcwd(), "bound_trajectory.ene")]
decompose_files = [os.path.join(os.getcwd(), "decompose_dataframe.ene")]
# Build Workflow
wf = Workflow(project_dir='./lie_prediction')
wf.task_runner = self
# STAGE 5. PYLIE FILTERING, AD ANALYSIS AND BINDING-AFFINITY PREDICTION
# Collect Gromacs bound and unbound MD energy trajectories in a dataframe
t18 = wf.add_task('Create mdframe',
task_type='WampTask',
uri='mdgroup.lie_pylie.endpoint.collect_energy_trajectories')
t18.set_input(unbound_trajectory=unbound_trajectory,
bound_trajectory=bound_trajectory,
lie_vdw_header="Ligand-Ligenv-vdw",
lie_ele_header="Ligand-Ligenv-ele")
# Determine stable regions in MDFrame and filter
t19 = wf.add_task('Detect stable regions',
task_type='WampTask',
uri='mdgroup.lie_pylie.endpoint.filter_stable_trajectory')
t19.set_input(do_plot=True,
minlength=45,
workdir='/tmp/mdstudio/lie_pylie')
wf.connect_task(t18.nid, t19.nid, 'mdframe')
# Extract average LIE energy values from the trajectory
t20 = wf.add_task('LIE averages',
task_type='WampTask',
uri='mdgroup.lie_pylie.endpoint.calculate_lie_average')
wf.connect_task(t19.nid, t20.nid, filtered_mdframe='mdframe')
# Calculate dG using pre-calibrated model parameters
t21 = wf.add_task('Calc dG',
task_type='WampTask',
uri='mdgroup.lie_pylie.endpoint.liedeltag')
t21.set_input(alpha_beta_gamma=modelfile['LIE']['params'])
wf.connect_task(t20.nid, t21.nid, 'averaged', averaged='dataframe')
# Applicability domain: 1. Tanimoto similarity with training set
t22 = wf.add_task('AD1 tanimoto simmilarity',
task_type='WampTask',
uri='mdgroup.mdstudio_structures.endpoint.chemical_similarity')
t22.set_input(test_set=[ligand], mol_format=ligand_format, reference_set=modelfile['AD']['Tanimoto']['smi'],
ci_cutoff=modelfile['AD']['Tanimoto']['Furthest'])
wf.connect_task(t18.nid, t22.nid)
# Applicability domain: 2. residue decomposition
t23 = wf.add_task('AD2 residue decomposition',
task_type='WampTask',
uri='mdgroup.lie_pylie.endpoint.adan_residue_decomp',
inline_files=False)
t23.set_input(model_pkl=modelpicklefile, decompose_files=decompose_files)
wf.connect_task(t18.nid, t23.nid)
# Applicability domain: 3. deltaG energy range
t24 = wf.add_task('AD3 dene yrange',
task_type='WampTask',
uri='mdgroup.lie_pylie.endpoint.adan_dene_yrange')
t24.set_input(ymin=modelfile['AD']['Yrange']['min'],
ymax=modelfile['AD']['Yrange']['max'])
wf.connect_task(t21.nid, t24.nid, 'liedeltag_file', liedeltag_file='dataframe')
# Applicability domain: 4. deltaG energy distribution
t25 = wf.add_task('AD4 dene distribution',
task_type='WampTask',
uri='mdgroup.lie_pylie.endpoint.adan_dene')
t25.set_input(model_pkl=modelpicklefile,
center=list(modelfile['AD']['Dene']['Xmean']),
ci_cutoff=modelfile['AD']['Dene']['Maxdist'])
wf.connect_task(t21.nid, t25.nid, 'liedeltag_file', liedeltag_file='dataframe')
wf.run()
while wf.is_running:
yield sleep(1)
if __name__ == "__main__":
main(LIEPredictionWorkflow, auto_reconnect=False, daily_log=False)
|
# https://leetcode.com/problems/minimum-remove-to-make-valid-parentheses/
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
indexes_to_remove = set()
unclosed_open_parentheses_indexes = list()
for idx, char in enumerate(s):
if char == '(':
unclosed_open_parentheses_indexes.append(idx)
elif char == ')':
if unclosed_open_parentheses_indexes:
unclosed_open_parentheses_indexes.pop()
else:
indexes_to_remove.add(idx)
indexes_to_remove.update(unclosed_open_parentheses_indexes)
result = list()
for idx, char in enumerate(s):
if idx not in indexes_to_remove:
result.append(char)
return ''.join(result)
|
# nxt.motcont module -- Interface to Linus Atorf's MotorControl NXC
# Copyright (C) 2011 Marcus Wanner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import nxt
import nxt.error
import time
from threading import Lock
class MotorConError(nxt.error.ProtocolError):
pass
def _power(power):
pw = abs(power)
psign = int(power >= 0) * 2 - 1
if psign == -1:
pw += 100
pw = str(pw)
pw = '0'*(3-len(pw))+pw #pad front with 0s to make 3 chars
return pw
def _tacho(tacholimit):
tacho = str(tacholimit)
tacho = '0'*(6-len(tacho))+tacho #pad front with 0s to make 6 chars
return tacho
def interval(delay, lastrun):
now = time.time()
if lastrun+delay > now:
diff = now - lastrun
time.sleep(0.010 - diff)
class MotCont():
'''
This class provides an interface to Linus Atorf's MotorControl NXC
program. It is a wrapper which follows the documentation at
http://www.mindstorms.rwth-aachen.de/trac/wiki/MotorControl
and provides command strings and timing intervals as dictated there. To
use this module, you will need to put MotorControl22.rxe on your NXT
brick. This file and its corresponding source can be found at
http://www.mindstorms.rwth-aachen.de/trac/browser/trunk/tools/MotorControl
You can use nxt_push or any other nxt file manager to put the file on
the NXT. Before using any of the functions here, use MotCont.start() to
start the program. You can also start it manually my using the menu on
the brick. When your script exits, it would be a good idea to do
b.stop_program().
'''
def __init__(self, brick):
self.brick = brick
self.is_ready_lock = Lock()
self.last_is_ready = time.time()-1
self.last_cmd = {}
def cmd(self, port, power, tacholimit, speedreg=1, smoothstart=0, brake=0):
'''
Sends a "CONTROLLED_MOTORCMD" to MotorControl. port is
nxt.motor.PORT_[A-C], power is -100-100, tacholimit is 0-999999,
speedreg is whether to try to maintain speeds under load, and brake is
whether to enable active braking after the motor is in the specified
place (DIFFERENT from the nxt.motor.turn() function's brake arg).'''
interval(0.010, self.last_is_ready)
if port in self.last_cmd:
interval(0.015, self.last_cmd[port])
mode = str(
0x01*int(brake)+
0x02*int(speedreg)+
0x04*int(smoothstart)
)
command = '1'+str(port)+_power(power)+_tacho(tacholimit)+mode
self.brick.message_write(1, command)
self.last_cmd[port] = time.time()
def move_to(self, port, power, tachocount, speedreg=1, smoothstart=0, brake=0):
'''
Same as cmd(), except that the tachocount is subtracted from the motor's
current position and that value is used to turn the motor. Power is
-100-100, but the sign is rewritten as needed.'''
tacho = nxt.Motor(self.brick, port).get_tacho().block_tacho_count
tacho = tachocount-tacho
tsign = int(tacho >= 0) * 2 - 1
tacho = abs(tacho)
power = abs(power)*tsign
self.cmd(port, power, tacho, speedreg, smoothstart, brake)
def reset_tacho(self, port):
'''
Sends a "RESET_ERROR_CORRECTION" to MotorControl, which causes it to
reset the current tacho count for that motor.'''
interval(0.010, self.last_is_ready)
self.brick.message_write(1, '2'+str(port))
self.last_cmd[port] = time.time()
def is_ready(self, port):
'''
Sends an "ISMOTORREADY" to MotorControl and returns the reply.'''
interval(0.010, self.last_is_ready)
with self.is_ready_lock:
self.brick.message_write(1, '3'+str(port))
time.sleep(0.015) #10ms pause from the docs seems to not be adequate
reply = self.brick.message_read(0, 1, 1)[1]
if reply[0] != str(port):
raise MotorConError, 'Wrong port returned from ISMOTORREADY'
self.last_is_ready = time.time()
return bool(int(reply[1]))
def set_output_state(self, port, power, tacholimit, speedreg=1):
'''
Sends a "CLASSIC_MOTORCMD" to MotorControl. Brick is a brick object,
port is nxt.motor.PORT_[A-C], power is -100-100, tacholimit is 0-999999,
speedreg is whether to try to maintain speeds under load, and brake is
whether to enable active braking after the motor is in the specified
place (DIFFERENT from the nxt.motor.turn() function's brake arg).'''
interval(0.010, self.last_is_ready)
if port in self.last_cmd:
interval(0.015, self.last_cmd[port])
command = '4'+str(port)+_power(power)+_tacho(tacholimit)+str(speedreg)
self.brick.message_write(1, command)
self.last_cmd[port] = time.time()
def start(self, version=22):
'''
Starts the MotorControl program on the brick. It needs to already be
present on the brick's flash and named MotorControlXX.rxc, where XX is
the version number passed as the version arg (default is whatever is
bundled with this version of nxt-python).'''
try:
self.brick.stop_program()
except nxt.error.DirProtError:
pass
self.brick.start_program('MotorControl%d.rxe' % version)
time.sleep(0.1)
def stop(self):
'''
Used to stop the MotorControl program. All this actually does is stop
the currently running rxe.'''
self.brick.stop_program()
|
import torch as th
import torch.nn as nn
import torch.nn.functional as functional
import torch.nn.init as INIT
class TransEScore(nn.Module):
def __init__(self, gamma):
super(TransEScore, self).__init__()
self.gamma = gamma
def edge_func(self, edges):
head = edges.src['emb']
tail = edges.dst['emb']
rel = edges.data['emb']
score = head + rel - tail
return {'score': self.gamma - th.norm(score, p=1, dim=-1)}
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
def reset_parameters(self):
pass
def save(self, path, name):
pass
def load(self, path, name):
pass
def create_neg(self, neg_head):
gamma = self.gamma
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = tails - relations
tails = tails.reshape(num_chunks, chunk_size, hidden_dim)
return gamma - th.cdist(tails, heads, p=1)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
heads = heads + relations
heads = heads.reshape(num_chunks, chunk_size, hidden_dim)
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
return gamma - th.cdist(heads, tails, p=1)
return fn
class DistMultScore(nn.Module):
def __init__(self):
super(DistMultScore, self).__init__()
def edge_func(self, edges):
head = edges.src['emb']
tail = edges.dst['emb']
rel = edges.data['emb']
score = head * rel * tail
# TODO: check if there exists minus sign and if gamma should be used here(jin)
return {'score': th.sum(score, dim=-1)}
def reset_parameters(self):
pass
def save(self, path, name):
pass
def load(self, path, name):
pass
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
heads = th.transpose(heads, 1, 2)
tmp = (tails * relations).reshape(num_chunks, chunk_size, hidden_dim)
return th.bmm(tmp, heads)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = tails.shape[1]
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = th.transpose(tails, 1, 2)
tmp = (heads * relations).reshape(num_chunks, chunk_size, hidden_dim)
return th.bmm(tmp, tails)
return fn
class ComplExScore(nn.Module):
def __init__(self):
super(ComplExScore, self).__init__()
def edge_func(self, edges):
real_head, img_head = th.chunk(edges.src['emb'], 2, dim=-1)
real_tail, img_tail = th.chunk(edges.dst['emb'], 2, dim=-1)
real_rel, img_rel = th.chunk(edges.data['emb'], 2, dim=-1)
score = real_head * real_tail * real_rel \
+ img_head * img_tail * real_rel \
+ real_head * img_tail * img_rel \
- img_head * real_tail * img_rel
# TODO: check if there exists minus sign and if gamma should be used here(jin)
return {'score': th.sum(score, -1)}
def reset_parameters(self):
pass
def save(self, path, name):
pass
def load(self, path, name):
pass
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real = tails[..., :hidden_dim // 2]
emb_imag = tails[..., hidden_dim // 2:]
rel_real = relations[..., :hidden_dim // 2]
rel_imag = relations[..., hidden_dim // 2:]
real = emb_real * rel_real + emb_imag * rel_imag
imag = -emb_real * rel_imag + emb_imag * rel_real
emb_complex = th.cat((real, imag), dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, hidden_dim)
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
heads = th.transpose(heads, 1, 2)
return th.bmm(tmp, heads)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real = heads[..., :hidden_dim // 2]
emb_imag = heads[..., hidden_dim // 2:]
rel_real = relations[..., :hidden_dim // 2]
rel_imag = relations[..., hidden_dim // 2:]
real = emb_real * rel_real - emb_imag * rel_imag
imag = emb_real * rel_imag + emb_imag * rel_real
emb_complex = th.cat((real, imag), dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, hidden_dim)
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = th.transpose(tails, 1, 2)
return th.bmm(tmp, tails)
return fn
class RESCALScore(nn.Module):
def __init__(self, relation_dim, entity_dim):
super(RESCALScore, self).__init__()
self.relation_dim = relation_dim
self.entity_dim = entity_dim
def edge_func(self, edges):
head = edges.src['emb']
tail = edges.dst['emb'].unsqueeze(-1)
rel = edges.data['emb']
rel = rel.view(-1, self.relation_dim, self.entity_dim)
score = head * th.matmul(rel, tail).squeeze(-1)
# TODO: check if use self.gamma
return {'score': th.sum(score, dim=-1)}
# return {'score': self.gamma - th.norm(score, p=1, dim=-1)}
def reset_parameters(self):
pass
def save(self, path, name):
pass
def load(self, path, name):
pass
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
|
__title__ = 'pycimvp'
__description__ = 'A Python library project for TDD + CI.'
__url__ = 'https://pycimvp.readthedocs.io/'
__version__ = '1.0.3'
# __build__ = 0x022501
__author__ = 'Lee Shiueh'
__author_email__ = 'lee.shiueh@gmail.com'
__license__ = 'Apache 2.0'
# __copyright__ = 'Copyright 2021'
|
import os
print("""
/*
Uses Testcases from https://github.com/nst/JSONTestSuite/
MIT License
Copyright (c) 2016 Nicolas Seriot
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
""")
print("use yalrjson::Parser;")
for file in os.listdir("test_parsing"):
if not file[0] in ['y', 'n', 'i']:
continue
data = ""
try:
with open("test_parsing/" + file, 'r') as myfile:
data=myfile.read().replace('\n', '')
except UnicodeDecodeError:
# Ignore unicode decode errors as those are out of scope for our project
continue
file = file[:-1] \
.replace("-", "dash") \
.replace("+", "plus") \
.replace(".", "dot") \
.replace("#", "hashtag")
if file.startswith("y"):
print("""#[test]
fn {}() {{
assert!(Parser::parse_str(r#"{}"#).is_ok());
}}
""".format(file[:-5], data))
elif file.startswith("n"):
print("""#[test]
fn {}() {{
assert!(Parser::parse_str(r#"{}"#).is_err());
}}
""".format(file, data))
elif file.startswith("i"):
print("""#[test]
fn {}() {{
Parser::parse_str(r#"{}"#);
}}
""".format(file, data))
|
from typing import List, Optional
import torch
import torch.distributed as dist
from colossalai.zero.shard_utils import BaseShardStrategy
from colossalai.zero.sharded_model._zero3_utils import get_shard
from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor
from colossalai.utils import get_current_device
class TensorShardStrategy(BaseShardStrategy):
def __init__(self, process_group: Optional[dist.ProcessGroup] = None) -> None:
super().__init__(process_group)
def shard(self, tensor_list: List[ShardedTensor]):
for t in tensor_list:
self._shard_tensor(t)
def gather(self, tensor_list: List[ShardedTensor]):
for t in tensor_list:
self._gather_tensor(t)
def _shard_tensor(self, t: ShardedTensor):
if t.is_sharded:
return
sharded_payload, _ = get_shard(t.payload, self.local_rank, self.world_size)
t.reset_payload(sharded_payload)
t.is_sharded = True
def _gather_tensor(self, t: ShardedTensor):
if not t.is_sharded:
return
target_device = t.device
buffer_list = []
payload_numel = t.payload.numel()
for i in range(self.world_size):
if i == self.local_rank:
buffer_list.append(t.payload.cuda(get_current_device()))
else:
buffer_list.append(torch.zeros(payload_numel, dtype=t.dtype, device=get_current_device()))
torch.distributed.all_gather(buffer_list,
buffer_list[self.local_rank],
group=self.process_group,
async_op=False)
gathered_payload = torch.narrow(torch.cat(buffer_list), 0, 0, t.origin_numel).reshape(t.origin_shape)
t.reset_payload(gathered_payload)
t.to(target_device)
t.is_sharded = False
|
# Generated by Django 2.1.7 on 2019-03-02 22:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('torrents', '0014_auto_20190302_2121'),
]
operations = [
migrations.AddField(
model_name='torrentinfo',
name='is_deleted',
field=models.BooleanField(default=False),
preserve_default=False,
),
]
|
__version__ = "0.16.5"
__author__ = "Tulir Asokan <tulir@maunium.net>"
__all__ = [
"api",
"appservice",
"bridge",
"client",
"crypto",
"errors",
"util",
"types",
"__optional_imports__",
]
from typing import TYPE_CHECKING
__optional_imports__ = TYPE_CHECKING
|
import numpy as np
from satellipy.analysis import audio as Audio
from satellipy.analysis import emotions as Emotions
def analyse_and_get_personality_features(
username,
emotions_collection,
audio_features_collection):
audio_analysis = Audio.analyse_audio_for_user(
username, audio_features_collection
)
_, emotions_analysis, emotions_stats = Emotions.analyse_emotions_of_user(
username, emotions_collection
)
return {
'audio': audio_analysis,
'emotions': emotions_analysis,
'emotions_stats': emotions_stats,
}
def flatten_features(features):
emotions_stats_count = features['emotions_stats']['count'];
emotions_count = 1 if emotions_stats_count == 0 else emotions_stats_count
f = [
features['audio']['averages']['valence'],
features['audio']['averages']['danceability'],
features['audio']['averages']['energy'],
features['emotions']['joy']['count'] / emotions_count,
features['emotions']['sadness']['count'] / emotions_count,
features['emotions']['anger']['count'] / emotions_count,
features['emotions']['disgust']['count'] / emotions_count,
features['emotions']['fear']['count'] / emotions_count,
features['emotions']['surprise']['count'] / emotions_count,
]
return f
def get_features_for_user(
username, emotions_collection, audio_features_collection):
return flatten_features(analyse_and_get_personality_features(
username, emotions_collection, audio_features_collection
))
|
# coding: utf-8
import re
import numpy as np
from scipy.ndimage import imread
from scipy.misc import toimage
class ImageProcessor(object):
BLACK = 0
WHITE = 254
LIMIT = 0.45
@classmethod
def _maximizer(cls, x, maximum):
val = x / maximum
ret_val = cls.WHITE
if val < cls.LIMIT:
ret_val = cls.BLACK
return ret_val
@classmethod
def _normalize_image(cls, image_file):
image = imread(image_file, flatten=True)
max_value = np.max(image)
maximizer_vectorized = np.vectorize(cls._maximizer)
return maximizer_vectorized(
maximizer_vectorized(image, max_value),
max_value
)
@classmethod
def normalize_image(cls, image):
if isinstance(image, str):
with open(image, 'rb') as imageFile:
norm_image = cls._normalize_image(imageFile)
else:
norm_image = cls._normalize_image(image)
return toimage(norm_image)
class WorkaroundFixer(object):
def __init__(self):
self.lineList = list()
self.linesByY = dict()
self.yCoordinates = set()
self.similiarLines = set()
@staticmethod
def approximate_y(y):
ACCURACY = 25
approximate = float(y) // ACCURACY
return int(ACCURACY * approximate)
@staticmethod
def normalize_num(bad_string):
bad_letter = {
'O': '0',
'o': '0',
'Б': '6',
'б': '6',
'В': '8',
'в': '8'
}
good_letters = list()
for letter in bad_string:
if letter in bad_letter:
good_letters.append(bad_letter.get(letter))
else:
good_letters.append(letter)
return int(''.join(good_letters))
@staticmethod
def validate_data(data):
if not data:
return False
if not data.get('regions'):
return None
return data['regions']
def _make_lines(self, data):
for region in data:
for line in region.get('lines', list()):
self.lineList.extend(
line.get('words', list())
)
def _sort_by_y(self):
for line in self.lineList:
_y = line.get('boundingBox', '').split(',')[1]
y = self.approximate_y(_y)
if y not in self.linesByY:
self.linesByY[y] = list()
self.linesByY[y].append(line)
def _make_new_line(self):
res = list()
if not (self.linesByY and isinstance(self.linesByY, dict)):
print('Error: No lines found!')
else:
for y, lines in sorted(self.linesByY.items()):
tmp = [line.get('text') for line in lines]
res.append(' '.join(tmp))
return res
def _make_result(self, res):
result = dict()
first_type = re.compile(r'=(\w+)')
second_type = re.compile(r'\.{2}(\w+)')
for num, line in enumerate(res):
r = first_type.search(line) or second_type.search(line)
if r:
try:
r = self.normalize_num(r.group(1))
except ValueError:
r = None
if r:
result[res[num-2]] = r
return result
def fix_it(self, data):
valid_data = self.validate_data(data)
if not valid_data:
return False
self._make_lines(valid_data)
self._sort_by_y()
res = self._make_new_line()
result = self._make_result(res)
print(res)
return result
|
import sys
import os
import copy
import traceback
import simplejson as json
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import numpy as np
from typing import List
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.dummy import DummyClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from syscalls import syscalls
from utils import loadDataCached
from plot import plot, plotCDF
from prog import Prog
def __parseCall(call):
name = call.split('(')[0]
if '=' in name:
name = name.split('=')[1].strip()
args = call.split(name)[1]
return name.lower(), args
def __processTest(test):
"""
Things to collect:
Times triaging failed programs
signalRun: Number. Remaining sig for TP, FP.
Minimization: Number. Time spent on failed programs. Success TP, FP. Fail TN, FN.
Minimization success stats
"""
ret = []
fn = 'result_' + test
if not os.path.isfile(fn):
return ret;
f = open(fn);
prev_pc = 0;
executeCount = 0;
idx = 0;
ts_cur = 0;
ts_bgn = 0;
status_cur = {
"triagingTotal": 0,
"triagingFail": 0,
"minimizeTotal": 0,
"minimizeFail": 0,
"minimizeNew": 0,
"minimizeTP": 0,
"minimizeFP": 0,
"minimizeTN": 0,
"minimizeFN": 0,
}
minimizeSz = [{}, {}]
sigInit = 0;
corpusProg = False
# Minimize
progStatus = None
inProg = False
curCalls = []
curProg = None
minimizeProgFrom = None
minimizeProgTo = None
minimizeAttempts = []
minimizeSuccess = False
minimizeExec = 0
# Coverage
coverageTotal = set()
coveragePrev = 0
for line in f:
line = line.strip('\n').strip();
if len(line) == 0:
continue;
if line[:3] == '<<<' and line[-3:] == '>>>':
line = line.strip('<<<').strip('>>>')
ts_cur = int(line)
if ts_bgn == 0:
ts_bgn = ts_cur
elif (line == ">" or line[:3] == ">>>") and not inProg:
inProg = True
curCalls = []
elif line == "<" or line == "<<<":
inProg = False
curProg = Prog.newProg(calls=curCalls, ts=ts_cur, signal=0, origin=None)
if progStatus == "MinimizeFrom":
minimizeProgFrom = curProg
elif progStatus == "MinimizeAttempt":
minimizeProgTo = curProg
progStatus = None
elif inProg:
if line[:2] == "> ":
line = line.strip("> ")
if len(line) == 0:
continue
curCalls.append(' '.join(__parseCall(line)))
elif line[:2] == '= ':
tmp = line.split();
try:
pc = int(tmp[1], 16)
except:
continue
if (pc & 0xffff000000000000) == 0xffff000000000000:
coverageTotal.add(pc);
elif (pc & 0xffffffff00000000) == 0:
coverageTotal.add(pc);
elif line[:2] == "- " and "executeRaw" in line:
executeCount += 1;
status = copy.deepcopy(status_cur);
status["executeCount"] = executeCount;
status["ts"] = (ts_cur - ts_bgn) / 1000000000;
ret.append(status)
coveragePrev = len(coverageTotal)
elif "# signalRun 0: " in line:
tmp = line.split("# signalRun 0: ")[1].split('+');
sigInit = int(tmp[0])
elif line[:8] == "# Result":
tmp = line.strip("# Result: ").split(',')
if int(tmp[2]) > 0:
corpusProg = True
else:
corpusProg = False
status_cur["triagingFail"] += 1;
# print(tmp[2], status_cur["triagingFail"])
status_cur["triagingTotal"] += 1;
elif line[-8:] == "Minimize":
progStatus = "MinimizeFrom"
elif "# Minimize Attempt" in line:
progStatus = "MinimizeAttempt"
elif "# Minimize Fail" in line or "# Minimize Success" in line:
minimizeProgFrom.childrenMinimize.append(minimizeProgTo)
entry = {
"from": minimizeProgFrom,
"to": minimizeProgTo,
"success": "Success" in line
}
minimizeAttempts.append(entry)
elif "# Minimize" in line and "->" in line:
tmp = line.split(': ')[1].replace('->',' ').replace('+', ' ').replace(',', ' ').split();
if tmp[3] == tmp[5]:
minimizeSuccess = True
minimizeExec += 1
if len(coverageTotal) > coveragePrev:
status_cur["minimizeNew"] += 1
f.close();
return ret, minimizeAttempts;
def CrossValidation(data, y, vocabulary=syscalls, train_size=0.2, batch=10000, mode=None, test_name=""):
idx_bgn = 0
idx_end = batch
models = ["Dummy", "NB", "NB-I", "SVM", "KNN", "NN"]
scores = {}
number_total = {}
for m in models:
scores[m] = []
number_total[m] = [0,0,0,0]
clf_NBI = MultinomialNB()
vectorizer_NBI = CountVectorizer(vocabulary=vocabulary)
while idx_bgn < len(data):
__data = data[idx_bgn:idx_end]
__y = y[idx_bgn:idx_end]
scores_local = {}
number_local = {}
for m in models:
scores_local[m] = []
number_local[m] = [0,0,0,0]
#kf = KFold(n_splits=K)
#for train_index, test_index in kf.split(__data):
# Reverse
# d_train = [__data[i] for i in test_index]
# d_test = [__data[i] for i in train_index]
# y_test, y_train = y[train_index], y[test_index]
try:
if type(train_size) == float:
split_point = int(batch * train_size)
if split_point > len(__data):
continue
elif type(train_size) == int and train_size < len(__data):
split_point = train_size
else:
continue
d_train = __data[:split_point]
d_test = __data[split_point:]
y_train = __y[:split_point]
y_test = __y[split_point:]
if mode == "TF-IDF":
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(d_train).toarray()
X_test = vectorizer.transform(d_test).toarray()
elif mode == "Count":
vectorizer = CountVectorizer()
X_train = vectorizer.fit_transform(d_train).toarray()
X_test = vectorizer.transform(d_test).toarray()
#X_train_NBI = vectorizer_NBI.transform(d_train).toarray()
#X_test_NBI = vectorizer_NBI.transform(d_test).toarray()
else:
X_train = np.array(d_train)
X_test = np.array(d_test)
X_train_NBI = X_train
X_test_NBI = X_test
# Dummy
sys.stderr.write("Dummy ")
clf_dummy = DummyClassifier(strategy='uniform')
clf_dummy.fit(X_train, y_train)
pred = clf_dummy.predict(X_test)
for i in range(len(pred)):
if pred[i] == 1 and y_test[i] == 1:
number_local["Dummy"][0] += 1
elif pred[i] == 0 and y_test[i] == 0:
number_local["Dummy"][1] += 1
elif pred[i] == 1 and y_test[i] == 0:
number_local["Dummy"][2] += 1
elif pred[i] == 0 and y_test[i] == 1:
number_local["Dummy"][3] += 1
scores_local["Dummy"].append(clf_dummy.score(X_test, y_test))
# NB
sys.stderr.write("NB ")
clf_NB = MultinomialNB()
clf_NB.fit(X_train, y_train)
pred = clf_NB.predict(X_test)
for i in range(len(pred)):
if pred[i] == 1 and y_test[i] == 1:
number_local["NB"][0] += 1
elif pred[i] == 0 and y_test[i] == 0:
number_local["NB"][1] += 1
elif pred[i] == 1 and y_test[i] == 0:
number_local["NB"][2] += 1
elif pred[i] == 0 and y_test[i] == 1:
number_local["NB"][3] += 1
scores_local["NB"].append(clf_NB.score(X_test, y_test))
# NB-I
sys.stderr.write("NB-I ")
if (mode != "TF-IDF" and mode != "Count"):
clf_NBI.partial_fit(X_train_NBI, y_train, classes=[0,1])
X_increment = []
y_increment = []
for i in range(len(X_test)):
pred = clf_NBI.predict([X_test_NBI[i]])[0]
if pred == 1 and y_test[i] == 1:
number_local["NB-I"][0] += 1
elif pred == 0 and y_test[i] == 0:
number_local["NB-I"][1] += 1
elif pred == 1 and y_test[i] == 0:
number_local["NB-I"][2] += 1
elif pred == 0 and y_test[i] == 1:
number_local["NB-I"][3] += 1
scores_local["NB-I"].append((number_local["NB-I"][0] + number_local["NB-I"][1]) / len(X_test))
# SVM
# SVM is too slow for TF-IDF or Term Count
sys.stderr.write("SVM ")
if (mode != "TF-IDF" and mode != "Count"):
clf_SVN = SVC(gamma='auto')
clf_SVN.fit(X_train, y_train)
pred = clf_SVN.predict(X_test)
for i in range(len(pred)):
if pred[i] == 1 and y_test[i] == 1:
number_local["SVM"][0] += 1
elif pred[i] == 0 and y_test[i] == 0:
number_local["SVM"][1] += 1
elif pred[i] == 1 and y_test[i] == 0:
number_local["SVM"][2] += 1
elif pred[i] == 0 and y_test[i] == 1:
number_local["SVM"][3] += 1
scores_local["SVM"].append(clf_SVN.score(X_test, y_test))
# KNN
# KNN is too slow for TF-IDF or Term Count
sys.stderr.write("KNN ")
if (mode != "TF-IDF" and mode != "Count"):
clf_KNN = KNeighborsClassifier(n_neighbors=1)
clf_KNN.fit(X_train, y_train)
pred = clf_KNN.predict(X_test)
for i in range(len(pred)):
if pred[i] == 1 and y_test[i] == 1:
number_local["KNN"][0] += 1
elif pred[i] == 0 and y_test[i] == 0:
number_local["KNN"][1] += 1
elif pred[i] == 1 and y_test[i] == 0:
number_local["KNN"][2] += 1
elif pred[i] == 0 and y_test[i] == 1:
number_local["KNN"][3] += 1
scores_local["KNN"].append(clf_KNN.score(X_test, y_test))
# NN
# NN is too slow for TF-IDF or Term Count
sys.stderr.write("KNN ")
if (mode != "TF-IDF" and mode != "Count"):
clf_NN = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
clf_NN.fit(X_train, y_train)
pred = clf_NN.predict(X_test)
for i in range(len(pred)):
if pred[i] == 1 and y_test[i] == 1:
number_local["NN"][0] += 1
elif pred[i] == 0 and y_test[i] == 0:
number_local["NN"][1] += 1
elif pred[i] == 1 and y_test[i] == 0:
number_local["NN"][2] += 1
elif pred[i] == 0 and y_test[i] == 1:
number_local["NN"][3] += 1
scores_local["NN"].append(clf_NN.score(X_test, y_test))
sys.stderr.write("DONE\n")
except:
traceback.print_exc()
idx_bgn += batch
idx_end += batch
continue
idx_bgn += batch
idx_end += batch
for model in scores_local:
scores[model] += scores_local[model]
if len(scores_local[model]) > 0:
scores_local[model] = np.mean(scores_local[model])
for model in number_total:
for i in range(4):
number_total[model][i] += number_local[model][i]
# print("%f\t%f\t%f\t%f\t%f" % (len(X_test), number_local["NB-TN"], number_local["NB-FN"], number_local["SVM-TN"], number_local["SVM-FN"]))
for model in scores:
if len(scores[model]) > 0:
scores[model] = np.mean(scores[model])
else:
scores[model] = -1
for model in number_total:
for i in range(4):
number_total[model][i] /= len(data)
out = "%s\t%d" % (test_name, len(data))
for m in models:
out += "\t%f\t%f\t%f\t%f\t%f" % (
number_total[m][0], number_total[m][1], number_total[m][2], number_total[m][3], scores[m]
)
print(out)
sys.stdout.flush()
def MLMinimize(attempts):
if len(attempts) < 100:
return
data_success = [(1 if d["success"] is True else 0) for d in attempts]
y = np.array(data_success)
data_2 = [
[
len(d["from"]), len(d["to"])
] for d in attempts]
data_4 = [
[
len(d["from"]), len(d["to"]),
d["from"].argCount, d["to"].argCount,
] for d in attempts]
data_6 = [
[
len(d["from"]), len(d["to"]),
d["from"].argCount, d["to"].argCount,
d["from"].argSize, d["to"].argSize
] for d in attempts]
# CrossValidation(data, y)
# TF-IDF
'''
data_from = []
data_to = []
data_all = []
data_diff = []
for d in attempts:
tmp_from = []
for call in d["from"].calls:
tmp_from.append(call.split()[0])
data_from.append(" ".join(tmp_from))
tmp_to = []
for call in d["to"].calls:
tmp_to.append(call.split()[0])
data_to.append(" ".join(tmp_to))
tmp_all = []
tmp_diff = []
for call in tmp_from:
tmp_all.append("FROM_" + call)
if not call in tmp_to:
tmp_diff.append(call)
for call in tmp_to:
tmp_all.append("TO_" + call)
data_all.append(" ".join(tmp_all))
data_diff.append(" ".join(tmp_diff))
voc_all = []
for call in syscalls:
voc_all.append("FROM_" + call)
voc_all.append("TO_" + call)
print(tmp_from)
print(tmp_to)
print(tmp_all)
print(tmp_diff)
'''
batch = 1000000000
for train_size in [1000, 2500, 10000, 20000]:
print("Batch: Inf, Train: %d" % train_size)
CrossValidation(data_2, y, batch=batch, train_size=train_size, test_name="2-feats")
CrossValidation(data_4, y, batch=batch, train_size=train_size, test_name="4-feats")
CrossValidation(data_6, y, batch=batch, train_size=train_size, test_name="6-feats")
#CrossValidation(data_from, y, batch=batch, train_size=train_size, mode="Count", test_name="TC-From")
#CrossValidation(data_to, y, batch=batch, train_size=train_size, mode="Count", test_name="TC-To")
#CrossValidation(data_all, y, batch=batch, train_size=train_size, mode="Count", test_name="TC-All")
#CrossValidation(data_diff, y, batch=batch, train_size=train_size, mode="Count", test_name="TC-Diff")
for batch in [10000, 20000]:
for train_size in [0.05, 0.1, 0.2]:
print("Batch: %d, Train: %f" % (batch, train_size))
CrossValidation(data_2, y, batch=batch, train_size=train_size, test_name="2-feats")
CrossValidation(data_4, y, batch=batch, train_size=train_size, test_name="4-feats")
CrossValidation(data_6, y, batch=batch, train_size=train_size, test_name="6-feats")
#CrossValidation(data_from, y, batch=batch, train_size=train_size, mode="Count", test_name="TC-From")
#CrossValidation(data_to, y, batch=batch, train_size=train_size, mode="Count", test_name="TC-To")
#CrossValidation(data_all, y, batch=batch, train_size=train_size, mode="Count", test_name="TC-All", vocabulary=voc_all)
#CrossValidation(data_diff, y, batch=batch, train_size=train_size, mode="Count", test_name="TC-Diff")
def analyzeMinimize(test_name, attempts):
data_from = {}
data_to = {}
for d in attempts:
from_id = d["from"]
to_id = d["to"]
if not from_id in data_from:
data_from[from_id] = []
if not to_id in data_to:
data_to[to_id] = []
data_from[from_id].append(d["success"])
data_to[to_id].append(d["success"])
# print(data_from)
success_rate_from = []
for from_id in data_from:
count = 0.0
score = 0.0
for success in data_from[from_id]:
if success:
score += 1.0
count += 1.0
success_rate_from.append(score / count)
print(success_rate_from)
plotCDF({"Success Rate": success_rate_from}, xlabel="Success Rate", ylabel="CDF", outfile="minimize_success_%s.png" % test_name)
def plotTriage(tests=["RAMINDEX", "KCOV"]):
data = {}
for test in tests:
#__data, minimizeAttempts = loadDataCached('triage_%s.cache', test, __processTest);
__data, minimizeAttempts = __processTest(test);
print(len(__data), __data[-1] if len(__data) > 0 else -1)
# Triaging
data = {
"Total": [(v["executeCount"], v["ts"], v["triagingTotal"]) for v in __data],
"Wasted": [(v["executeCount"], v["ts"], v["triagingFail"]) for v in __data],
}
plot(data, 0, 2, xlabel="Programs executed", ylabel="Number", title="", outfile="triage_total_%s.png" % test);
# Minimization total
if "Default" in test:
print(test)
# MLMinimize(minimizeAttempts)
# analyzeMinimize(test, minimizeAttempts)
exit()
#plot(data, 0, 2, xlabel="Programs executed", ylabel="Number", title="", outfile="minimize_accuracy_%s.png" % test);
|
from textblob import TextBlob
import sys
import tweepy
from dotenv import load_dotenv
import os
load_dotenv()
api_key = os.getenv('PUBLIC_API')
api_key_secret = os.getenv('PRIVATE_API')
access_token = os.getenv('ACCESS_TOKEN')
access_token_secret = os.getenv('ACCESS_TOKEN_SECRET')
auth_handler = tweepy.OAuthHandler(consumer_key=api_key, consumer_secret=api_key_secret)
auth_handler.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth_handler=auth_handler)
search_term = 'stocks'
tweet_amount = 200
tweets = tweepy.Cursor(api.search, q=search_term, lang='en').items(tweet_amount)
polarity = 0
positive = 0
neutral = 0
negative = 0
for tweet in tweets:
# preprocessing
final_text = tweet.text.replace('RT', '')
if final_text.startswith(' @'):
position = final_text.index(':')
final_text = final_text[position+2:]
if final_text.startswith(' @'):
position = final_text.index(' ')
final_text = final_text[position+2:]
# print(final_text)
# analysis
analysis = TextBlob(final_text)
tweet_polarity = analysis.polarity
if tweet_polarity > 0.00 :
positive += 1
elif tweet_polarity < 0.00 :
negative += 1
elif tweet_polarity == 0.00:
neutral += 1
polarity += analysis.polarity
print(polarity)
print(f'Amount of positive tweets : {positive}')
print(f'Amount of negative tweets : {negative}')
print(f'Amount of neutral tweets : {neutral}') |
import numpy as np
import popsynth
from dask.distributed import Client, LocalCluster
from popsynth.aux_samplers.lognormal_aux_sampler import Log10NormalAuxSampler
from popsynth.aux_samplers.normal_aux_sampler import NormalAuxSampler
from popsynth.aux_samplers.trunc_normal_aux_sampler import \
TruncatedNormalAuxSampler
from cosmogrb.instruments.gbm import GBMGRB_CPL, GBM_CPL_Universe
from cosmogrb.utils.logging import update_logging_level
update_logging_level("DEBUG")
# this is a script that is used to generate the test data for the
# pytest. it is meant to be run from the top of the pacakge
class TDecaySampler(popsynth.AuxiliarySampler):
_auxiliary_sampler_name = "TDecaySampler"
def __init__(self):
super(TDecaySampler, self).__init__(name="tdecay", observed=False)
def true_sampler(self, size):
t90 = self._secondary_samplers["t90"].true_values
trise = self._secondary_samplers["trise"].true_values
self._true_values = (
1.0 / 50.0 * (10 * t90 + trise + np.sqrt(trise) * np.sqrt(20 * t90 + trise))
)
class DurationSampler(popsynth.AuxiliarySampler):
_auxiliary_sampler_name = "DurationSampler"
def __init__(self):
super(DurationSampler, self).__init__(name="duration", observed=False)
def true_sampler(self, size):
t90 = self._secondary_samplers["t90"].true_values
self._true_values = 1.5 * t90
a_true =1.
r0_true = 0.13
rise_true = 0.1
decay_true = 4.0
peak_true = 1.5
td_true = 3.0
sigma_true = 1.0
Lmin_true = 1e50
alpha_true = 1.5
r_max = 5.0
pop_gen = popsynth.populations.ParetoSFRPopulation(
r0=r0_true,
a=a_true,
rise=rise_true,
decay=decay_true,
peak=peak_true,
Lmin=Lmin_true,
alpha=alpha_true,
r_max=r_max,
)
ep = Log10NormalAuxSampler(name="ep", observed=False)
ep.mu = 2
ep.tau = 0.5
alpha = TruncatedNormalAuxSampler(name="alpha", observed=False)
alpha.lower = -1.5
alpha.upper = 0.0
alpha.mu = -1.0
alpha.tau = 0.25
tau = TruncatedNormalAuxSampler(name="tau", observed=False)
tau.lower = 1.5
tau.upper = 2.5
tau.mu = 2.0
tau.tau = 0.25
trise = TruncatedNormalAuxSampler(name="trise", observed=False)
trise.lower = 0.01
trise.upper = 5.0
trise.mu = 1.0
trise.tau = 1.0
t90 = Log10NormalAuxSampler(name="t90", observed=False)
t90.mu = 1
t90.tau = 0.25
tdecay = TDecaySampler()
duration = DurationSampler()
tdecay.set_secondary_sampler(t90)
tdecay.set_secondary_sampler(trise)
duration.set_secondary_sampler(t90)
pop_gen.add_observed_quantity(ep)
pop_gen.add_observed_quantity(tau)
pop_gen.add_observed_quantity(alpha)
pop_gen.add_observed_quantity(tdecay)
pop_gen.add_observed_quantity(duration)
pop = pop_gen.draw_survey()
pop.writeto("test_grb_pop.h5")
|
import os
from setuptools import setup
VERSION = "0.4"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="stepmania-to-sqlite",
description="Save data about your Stepmania library to a SQLite database",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Tobias Kunze",
author_email="r@rixx.de",
url="https://github.com/rixx/stepmania-to-sqlite",
project_urls={
"Source": "https://github.com/rixx/stepmania-to-sqlite",
"Issues": "https://github.com/rixx/stepmania-to-sqlite/issues",
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: OSI Approved",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Database",
],
keywords="stepmania dance sqlite export dogsheep",
license="Apache License, Version 2.0",
version=VERSION,
packages=["stepmania_to_sqlite"],
entry_points="""
[console_scripts]
stepmania-to-sqlite=stepmania_to_sqlite.cli:update
""",
install_requires=[
"beautifulsoup4~=4.8",
"click",
"python-dateutil",
"requests",
"sqlite-utils~=2.4.4",
"tqdm~=4.36",
],
)
|
"""Test the skip_celltags option."""
import os
from typing import TYPE_CHECKING
from nbqa.__main__ import main
if TYPE_CHECKING:
from _pytest.capture import CaptureFixture
def test_skip_celltags_cli(capsys: "CaptureFixture") -> None:
"""
Check flake8 works. Shouldn't alter the notebook content.
Parameters
----------
capsys
Pytest fixture to capture stdout and stderr.
"""
# check passing both absolute and relative paths
path = os.path.join("tests", "data", "notebook_for_testing.ipynb")
main(["flake8", path, "--nbqa-skip-celltags=skip-flake8,flake8-skip"])
out, err = capsys.readouterr()
expected_out = f"{path}:cell_4:1:1: F401 'random.randint' imported but unused\n"
expected_err = ""
assert out == expected_out
assert err == expected_err
def test_skip_celltags_pyprojecttoml(capsys: "CaptureFixture") -> None:
"""
Check flake8 works. Shouldn't alter the notebook content.
Parameters
----------
capsys
Pytest fixture to capture stdout and stderr.
"""
# check passing both absolute and relative paths
with open("pyproject.toml", "w") as handle:
handle.write(
"[tool.nbqa.skip_celltags]\n" 'flake8 = ["skip-flake8", "flake8-skip"]\n'
)
path = os.path.join("tests", "data", "notebook_for_testing.ipynb")
main(["flake8", path])
out, err = capsys.readouterr()
expected_out = f"{path}:cell_4:1:1: F401 'random.randint' imported but unused\n"
expected_err = ""
assert out == expected_out
assert err == expected_err
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import pandas as pd
from scipy import interpolate
class Function:
"""Esta clase carga una función a partir de una disperción de datos ordenados.
Posee el método 'eval', que evalúa la función en cualquier número mientras
esté en el dominio de la función. Por ahora solo usa una interpolación lineal
para calcular el resultado.
Se debe inicializar con una tupla de listas de datos X e Y. e.g.:
funcion_de_prueba = Function( (listaDePuntosX, listaDePuntosY))"""
def __init__(self, plotData, interpolator="linear"):
self.interpolator = interpolator
self.xDataPoints = []
self.yDataPoints = []
self.xDataPoints, self.yDataPoints = plotData[0], plotData[1]
# Generar representación de splines cúbicos
if interpolator == "spline":
self.splineRepresentation = interpolate.splrep(self.xDataPoints, self.yDataPoints)
def eval(self, xValue):
# Revisar si esta fuera del dominio
if not min(self.xDataPoints) < xValue < max(self.xDataPoints):
print("ERROR: trying to evaluate outside domain")
return False
# Revisar si coincide con alguno de los puntos
index = 0
while index < len(self.xDataPoints):
if xValue == self.xDataPoints[index]:
return self.yDataPoints[index]
index += 1
# Si no coincide ningún valor, interpolar.
if self.interpolator == "linear":
return self.linear_interpol(xValue)
elif self.interpolator == "spline":
return self.spline_interpol(xValue)
else:
print("ERROR: Unknown interpolator")
return False
def linear_interpol(self, xValue):
"""Método de interpolación lineal, los interpoladores se deben escribir
de manera que sólo nececiten un valor X para producir un valor Y"""
# Encontrar los valores x0 y x1 mas cercanos a xValue y sus respectivas
# imágenes
index = 1
while index < len(self.xDataPoints):
if self.xDataPoints[index] > xValue:
x0 = self.xDataPoints[index - 1]
y0 = self.yDataPoints[index - 1]
x1 = self.xDataPoints[index]
y1 = self.yDataPoints[index]
break
else:
index += 1
continue
return y0 + (xValue - x0) * (y1 - y0) / (x1 - x0)
def spline_interpol(self, xValue):
return interpolate.splev(xValue, self.splineRepresentation)
def show(self, xLabel=None, yLabel=None):
"""Grafica la función contenida"""
fig, ax = plt.subplots()
ax.plot(self.xDataPoints, self.yDataPoints)
ax.set(xlabel=xLabel, ylabel=yLabel)
ax.grid()
plt.show()
def clean_gromacs_garbage(path=os.getcwd()):
"""Deletes backups left by Gromacs"""
garbagePattern = re.compile(r"#([\w\d.]+)#")
for file in os.listdir(path):
if garbagePattern.match(file):
os.remove(os.path.join(path, file))
print(os.path.join(path, file), "removed")
def get_overlap(function1, function2):
"""Recibe dos objetos Function, y devuelve los límites inferior y superior en que
ambos dominios se superponen. Útil para generar una tercera función a partir de dos."""
if min(function1.xDataPoints) < min(function2.xDataPoints):
xMin = min(function2.xDataPoints)
else:
xMin = min(function1.xDataPoints)
if max(function1.xDataPoints) < max(function2.xDataPoints):
xMax = max(function1.xDataPoints)
else:
xMax = max(function2.xDataPoints)
return [xMin, xMax]
def calculate_enthalpy_plot(lowTempFunc, highTempFunc, deltaTemp, nPoints=200):
"""A partir de dos funciones de Energía libre a diferentes temperaturas produce una
función de entalpía para el mismo proceso"""
xLowLimit, xHighLimit = get_overlap(lowTempFunc, highTempFunc)
deltaX = (xHighLimit - xLowLimit) / nPoints
xValues = []
enthalpyValues = []
currentX = xLowLimit
while currentX <= xHighLimit:
currentX += deltaX
xValues.append(currentX)
enthalpyValues.append(
-(highTempFunc.eval(currentX) - lowTempFunc.eval(currentX)) / deltaTemp
)
return Function([xValues, enthalpyValues])
def show_umbrella_plot(profileFilename, histogramFilename):
"""Muestra el gráfico del perfil y los histogramas en el mismo gráfico. Útil para determinar
si al cálculo le faltan ventanas."""
figure = plt.figure()
histogramsData = parseXVG(histogramFilename)
histoPlot = figure.add_subplot(111)
for histogramNum in range(1, len(histogramsData)):
histoPlot.fill_between(
histogramsData[0], 0, histogramsData[histogramNum], color="grey", alpha=0.35
)
histoPlot.set_xlabel("Distance from bilayer center [nm]")
histoPlot.set_ylabel("Population")
profileData = parseXVG(profileFilename)
profilePlot = figure.add_subplot(111, sharex=histoPlot, frameon=False)
profilePlot.plot(profileData[0], profileData[1])
profilePlot.yaxis.tick_right()
profilePlot.yaxis.set_label_position("right")
profilePlot.set_ylabel("Mean force potential [kj/mol]")
profilePlot.grid()
plt.show()
def generate_tpr_list_file(path, tprListFile="tpr_files.dat"):
"""Genera la lista de archivos TPR"""
windowsList = []
pattern = re.compile(r"umbrella([\w.]+).gro")
for file in os.listdir(path):
if pattern.match(file):
windowsList.append(pattern.findall(file)[0])
try:
os.remove(path + tprListFile)
except:
print("No previous tpr file found")
outputFile = open(path + tprListFile, "w+")
for window in windowsList:
print("umbrella" + window + ".tpr", file=outputFile)
outputFile.close()
def generate_pullf_list_file(path, pullfListFile="pullf_files.dat"):
"""Genera la lista de archivos pullf"""
windowsList = []
pattern = re.compile(r"umbrella([\w.]+).gro")
for file in os.listdir(path):
if pattern.match(file):
windowsList.append(pattern.findall(file)[0])
try:
os.remove(path + pullfListFile)
except:
print("No provious pullf list found")
outputFile = open(path + pullfListFile, "w+")
for window in windowsList:
print("pullf_umbrella" + window + ".xvg", file=outputFile)
outputFile.close()
def list_finished_runs(path=os.getcwd()):
windowsList = []
pattern = re.compile(r"umbrella([\w.]+).gro")
for file in os.listdir(path):
if pattern.match(file):
windowsList.append(pattern.match(file)[1])
return windowsList
def xvg_to_dataframe(xvgFilename):
"""Returns a dataframe from a XVG file. The filename of the XVG file needs to be provided"""
# Transformar el archivo xvg en un dataFrame
xvgArray = np.loadtxt(xvgFilename, comments=["#", "@"])
xvgDataFrame = pd.DataFrame(xvgArray)
xvgDataFrame = xvgDataFrame.set_index(0)
# Buscar el nombre de las columnas en el metadato del archivo xvg
columnNames = []
if len(xvgDataFrame.columns) == 1:
columnNamePattern = re.compile(r"@[\s]+title\s\"([\w]+)")
else:
columnNamePattern = re.compile(r"@\ss\d\slegend\s\"([\w\s]+)")
xvgFileData = open(xvgFilename, "r")
while len(columnNames) < (len(xvgDataFrame.columns)):
line = xvgFileData.readline()
if line.startswith("#"):
continue
elif line.startswith("@"):
if columnNamePattern.match(line):
columnNames.append(columnNamePattern.findall(line)[0])
else:
xvgFileData.close()
columnNames = [str(i + 1) for i in range(len(xvgDataFrame.columns))]
break
xvgFileData.close()
xvgDataFrame.columns = columnNames
return xvgDataFrame
|
from django.apps import AppConfig
class App2CvVideosConfig(AppConfig):
name = 'App_2_CV_Videos'
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test ILL borrowing requests."""
import json
from flask import url_for
from invenio_search import current_search
from tests.helpers import user_login
_HTTP_OK = [200, 201, 204]
BRWREQ_PID = "illbid-1"
ITEM_ENDPOINT = "invenio_records_rest.illbid_item"
LIST_ENDPOINT = "invenio_records_rest.illbid_list"
def test_ill_brwreqs_list_permissions(client, testdata, json_headers, users):
"""Test borrowing requests list permissions."""
patron1_brwreq = dict(
status="PENDING",
document_pid="docid-1",
patron_pid="1",
library_pid="illlid-1",
type="PHYSICAL_COPY",
)
patron2_brwreq = dict(
status="PENDING",
document_pid="docid-1",
patron_pid="2",
library_pid="illlid-1",
type="PHYSICAL_COPY",
)
def _test_list(expected_status, pids):
"""Test get list for given pids."""
q = " OR ".join(["pid:{}".format(pid) for pid in pids])
list_url = url_for(LIST_ENDPOINT, q=q)
res = client.get(list_url, headers=json_headers)
assert res.status_code in expected_status
return res.get_json()
# create records
list_url = url_for(LIST_ENDPOINT)
user_login(client, "admin", users)
res = client.post(
list_url, headers=json_headers, data=json.dumps(patron1_brwreq)
)
patron1_brwreq_pid = res.get_json()["metadata"]["pid"]
res = client.post(
list_url, headers=json_headers, data=json.dumps(patron2_brwreq)
)
patron2_brwreq_pid = res.get_json()["metadata"]["pid"]
all_pids = [patron1_brwreq_pid, patron2_brwreq_pid]
# wait for ES
current_search.flush_and_refresh(index="ill_borrowing_requests")
# test results
tests = [
("admin", _HTTP_OK, all_pids),
("librarian", _HTTP_OK, all_pids),
("patron1", _HTTP_OK, [patron1_brwreq_pid]),
("patron2", _HTTP_OK, [patron2_brwreq_pid]),
]
for username, expected_status, expected_pids in tests:
user_login(client, username, users)
results = _test_list(expected_status, all_pids)
assert results["hits"]["total"] == len(expected_pids)
found_pids = [
hit["metadata"]["pid"] for hit in results["hits"]["hits"]
]
assert set(expected_pids) == set(found_pids)
# anonymous
user_login(client, "anonymous", users)
_test_list([401], [])
def test_ill_brwreq_details_permissions(client, testdata, json_headers, users):
"""Test borrowing requests details permissions."""
dummy_borrowing_request = dict(
status="PENDING",
document_pid="docid-1",
patron_pid="1",
library_pid="illlid-1",
type="PHYSICAL_COPY",
)
def _test_create(expected_status, data, user):
"""Test record creation."""
url = url_for(LIST_ENDPOINT)
res = client.post(url, headers=json_headers, data=json.dumps(data))
assert res.status_code in expected_status
if res.status_code < 400:
brw_req = res.get_json()["metadata"]
assert brw_req["status"] == "PENDING"
expected_created_by = dict(type="user_id", value=str(user.id))
assert brw_req["created_by"] == expected_created_by
assert not brw_req.get("updated_by")
return brw_req["pid"]
def _test_update(expected_status, data, pid, user):
"""Test record update."""
pid_value = pid or BRWREQ_PID
url = url_for(ITEM_ENDPOINT, pid_value=pid_value)
res = client.put(url, headers=json_headers, data=json.dumps(data))
assert res.status_code in expected_status
if res.status_code < 400:
expected_changed_by = dict(type="user_id", value=str(user.id))
brw_req = res.get_json()["metadata"]
assert brw_req["created_by"] == expected_changed_by
assert brw_req["updated_by"] == expected_changed_by
def _test_read(expected_status, pid):
"""Test record read."""
pid_value = pid or BRWREQ_PID
url = url_for(ITEM_ENDPOINT, pid_value=pid_value)
res = client.get(url, headers=json_headers)
assert res.status_code in expected_status
def _test_delete(expected_status, pid):
"""Test record delete."""
url = url_for(ITEM_ENDPOINT, pid_value=pid)
res = client.delete(url, headers=json_headers)
assert res.status_code in expected_status
# create/update
tests = [
("anonymous", [401], dummy_borrowing_request),
("patron2", [403], dummy_borrowing_request),
("patron1", [403], dummy_borrowing_request),
("librarian", _HTTP_OK, dummy_borrowing_request),
("admin", _HTTP_OK, dummy_borrowing_request),
]
for username, expected_status, data in tests:
user = user_login(client, username, users)
pid = _test_create(expected_status, data, user)
_test_update(expected_status, data, pid, user)
# get
tests = [
("anonymous", [401]),
("patron2", [403]),
("patron1", _HTTP_OK),
("librarian", _HTTP_OK),
("admin", _HTTP_OK),
]
for username, expected_status in tests:
user_login(client, username, users)
_test_read(expected_status, BRWREQ_PID)
# delete
tests = [
("anonymous", [401]),
("patron2", [403]),
("patron1", [403]),
("librarian", [403]),
("admin", _HTTP_OK),
]
for username, expected_status in tests:
user_login(client, username, users)
_test_delete(expected_status, BRWREQ_PID)
|
# - * - encoding : utf - 8 - * -
"""
:copyright: 2017-2018 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
#import abc
import os
import numpy as np
import pandas as pd
from daal.data_management import (AOSNumericTable, FileDataSource,
DataSource, HomogenNumericTable)
class IInput:
'''
Abstract class for generic input data in daal library
'''
# @abc.abstractclassmethod
def getNumericTable(self, **kwargs):
raise NotImplementedError()
class HomogenousDaalData(IInput):
'''
Converts numpy, pandas, csv-file to daal_solver NumericTables
e.g. np.array([[1,2,3],[4,5,6]])
pd.DataFrame(values)
'example.csv'
'''
def __init__(self, indata=None):
self.indata = indata
if self.indata is not None:
self._categorize(indata)
def __call__(self, indata):
if indata is not None and indata is not self.indata:
self._categorize(indata)
def _categorize(self, indata):
'''
decide what data type is input
:param indata:
'''
if isinstance(indata, np.ndarray):
self.informat = 'numpy'
elif isinstance(indata, pd.DataFrame):
self.informat = 'pandas'
elif isinstance(indata, str):
if os.path.isfile(input):
self.informat = 'csv'
else:
raise ValueError("DaalData error in intialization,\
no valid format given.")
else:
raise ValueError("DaalData error in intialization,\
no valid format given.")
self.indata = indata
def getNumericTable(self, **kwargs):
if self.informat == 'numpy':
return HomogenNumericTable(self.indata)
if self.informat == 'pandas':
array = self.indata.as_matrix()
return HomogenNumericTable(array)
if self.informat == 'csv':
dataSource = \
FileDataSource(self.indata,
DataSource.doAllocateNumericTable,
DataSource.doDictionaryFormContext)
dataSource.loadDataBlock()
return dataSource.getNumericTable()
raise ValueError("Cannot identify input type.")
class HeterogenousDaalData(HomogenousDaalData):
'''
Heterogenous data with numpy:
np.array([(1,2.3),(2,-1,-0.9)],
dtype=('x',np.float32), ('y', np.float64)])
'''
def __init__(self, indata=None):
HomogenousDaalData.__init__(indata)
def __call__(self, indata):
HomogenousDaalData.__call__(self, indata)
def _getStructureArray(self, dataframe, dtypes):
'''
:param dataframe:
:param dtypes:
:output structured numpy array
'''
dataList = []
for i in range(dataframe.shape[0]):
dataList.append(tuple(dataframe.loc[i]))
decDtype = list(zip(dataframe.columns.tolist(), dtypes))
array = np.array(dataList, dtype=decDtype)
return array
def getNumericTable(self, **kwargs):
if self.informat == 'numpy':
return AOSNumericTable(self.indata)
if self.informat == 'pandas':
array = self._getStructureArray(
self.indata,
dtypes=self.indata.dtypes)
return AOSNumericTable(array)
if self.informat == 'csv':
dataSource = FileDataSource(
self.indata,
DataSource.notAllocateNumericTable,
DataSource.doDictionaryFromContext)
if 'nRows' not in kwargs and 'dtype' not in kwargs:
raise ValueError("HeterogenousDaalData, for csv file, \
'nrows' and 'dtypes' must be specified.")
nRows = kwargs['nRows']
dtype = kwargs['dtype']
array = np.empty([nRows,], dtype=dtype)
nT = AOSNumericTable(array)
return dataSource.loadDataBlock(nRows, nT)
return None
|
""" Plotting class for FVCOM results. """
from __future__ import print_function
import copy
from datetime import datetime
from pathlib import Path
from warnings import warn
import collections
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.widgets
import mpl_toolkits.axes_grid1
import numpy as np
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from descartes import PolygonPatch
from matplotlib import pyplot as plt
from matplotlib import rcParams
from matplotlib import cm as mplcm
from matplotlib.animation import FuncAnimation
from matplotlib.dates import DateFormatter, date2num
from mpl_toolkits.axes_grid1 import make_axes_locatable
from shapely.geometry import Polygon, Point, LineString
from PyFVCOM.coordinate import lonlat_from_utm, utm_from_lonlat
from PyFVCOM.current import vector2scalar
from PyFVCOM.grid import get_boundary_polygons
from PyFVCOM.grid import getcrossectiontriangles, unstructured_grid_depths, Domain, nodes2elems, mp_interp_func
from PyFVCOM.ocean import depth2pressure, dens_jackett
from PyFVCOM.read import FileReader
from PyFVCOM.utilities.general import PassiveStore, warn
from cmocean import cm
have_basemap = True
try:
from mpl_toolkits.basemap import Basemap
except ImportError:
warn('No mpl_toolkits found in this python installation. Some functions will be disabled.')
Basemap = None
have_basemap = False
rcParams['mathtext.default'] = 'regular' # use non-LaTeX fonts
class Depth(object):
""" Create depth-resolved plots based on output from FVCOM.
Provides
--------
plot_slice
Author(s)
---------
Pierre Cazenave (Plymouth Marine Laboratory)
"""
def __init__(self, dataset, figure=None, figsize=(20, 8), axes=None, cmap='viridis', title=None, legend=False,
fs=10, date_format=None, cb_label=None, hold=False):
"""
Parameters
----------
dataset : Dataset, PyFVCOM.read.FileReader
netCDF4 Dataset or PyFVCOM.read.FileReader object.
figure : Figure, optional
Matplotlib Figure object. A figure object is created if not
provided.
figsize : tuple(float), optional
Figure size in cm. This is only used if a new Figure object is
created.
axes : Axes, optional
Matplotlib axes object. An Axes object is created if not
provided.
cmap : None, Colormap
Provide a colourmap to use when plotting vectors or 2D plots (anything with a magnitude). Defaults to
'viridis'.
title : str, optional
Title to use when creating the plot.
fs : int, optional
Font size to use when rendering plot text.
legend : bool, optional
Set to True to add a legend. Defaults to False.
date_format : str
Date format to use.
cb_label : str
Label to apply to the colour bar. Defaults to no label.
hold : bool, optional
Set to True to keep existing plots when adding to an existing figure. Defaults to False.
"""
self.ds = dataset
self.figure = figure
self.axes = axes
self.fs = fs
self.title = title
self.figsize = figsize
self.hold = hold
self.add_legend = legend
self.cmap = cmap
self.date_format = date_format
self.cb_label = cb_label
# Plot instances (initialise to None for truthiness test later)
self.slice_plot = None
# Are we working with a FileReader object or a bog-standard netCDF4 Dataset?
self._FileReader = False
if isinstance(dataset, (FileReader, Domain)):
self._FileReader = True
# Initialise the figure
self.__init_figure()
def __init_figure(self):
# Initialise the figure
if self.figure is None:
figsize = (cm2inch(self.figsize[0]), cm2inch(self.figsize[1]))
self.figure = plt.figure(figsize=figsize)
# Create plot axes
if not self.axes:
self.axes = self.figure.add_subplot(1, 1, 1)
if self.title:
self.axes.set_title(self.title)
def plot_slice(self, horizontal, depth, variable, fill_seabed=False, *args, **kwargs):
"""
Parameters
----------
horizontal : np.ndarray
The horizontal array (x-axis). This can be distance along the slice or a coordinate.
depth : np.ndarray
The vertical depth array (positive-down).
variable : np.ndarray
The variable to plot in the vertical. Its shape must be compatible with `horizontal' and `depth'.
fill_seabed : bool, optional
Set to True to fill the seabed from the maximum water depth to the edge of the plot with gray.
Remaining args and kwargs are passed to self.axes.pcolormesh.
"""
# I'm not much of a fan of all this transposing. It feels like it's going to be a pain to debug when it
# inevitably does something you don't expect.
try:
self.slice_plot = self.axes.pcolormesh(horizontal, -depth, variable,
cmap=self.cmap, *args, **kwargs)
except TypeError:
# Try flipping the data array, that might make it work.
self.slice_plot = self.axes.pcolormesh(horizontal, -depth, variable.T,
cmap=self.cmap, *args, **kwargs)
if fill_seabed:
self.axes.fill_between(horizontal, self.axes.get_ylim()[0], -np.max(depth, axis=0), color='0.6')
divider = make_axes_locatable(self.axes)
cax = divider.append_axes("right", size="3%", pad=0.1)
self.colorbar = self.figure.colorbar(self.slice_plot, cax=cax)
self.colorbar.ax.tick_params(labelsize=self.fs)
if self.cb_label:
self.colorbar.set_label(self.cb_label)
class Time(object):
""" Create time series plots based on output from FVCOM.
Provides
--------
plot_line
plot_scatter
plot_quiver
plot_surface
Author(s)
---------
Pierre Cazenave (Plymouth Marine Laboratory)
"""
def __init__(self, dataset, figure=None, figsize=(20, 8), axes=None, cmap='viridis', title=None, legend=False,
fs=10, date_format=None, cb_label=None, hold=False, extend='neither'):
"""
Parameters
----------
dataset : Dataset, PyFVCOM.read.FileReader
netCDF4 Dataset or PyFVCOM.read.FileReader object.
figure : Figure, optional
Matplotlib Figure object. A figure object is created if not
provided.
figsize : tuple(float), optional
Figure size in cm. This is only used if a new Figure object is
created.
axes : Axes, optional
Matplotlib axes object. An Axes object is created if not
provided.
cmap : None, Colormap
Provide a colourmap to use when plotting vectors or 2D plots (anything with a magnitude). Defaults to
'viridis'.
title : str, optional
Title to use when creating the plot.
fs : int, optional
Font size to use when rendering plot text.
legend : bool, optional
Set to True to add a legend. Defaults to False.
date_format : str
Date format to use.
cb_label : str
Label to apply to the colour bar. Defaults to no label.
hold : bool, optional
Set to True to keep existing plots when adding to an existing figure. Defaults to False.
extend : str, optional
Set the colour bar extension ('neither', 'both', 'min', 'max').
Defaults to 'neither').
"""
self.ds = dataset
self.figure = figure
self.axes = axes
self.fs = fs
self.title = title
self.figsize = figsize
self.hold = hold
self.add_legend = legend
self.cmap = cmap
self.date_format = date_format
self.cb_label = cb_label
self.extend = extend
# Plot instances (initialise to None for truthiness test later)
self.line_plot = None
self.scatter_plot = None
self.quiver_plot = None # for vectors with time (e.g. currents at a point)
self.surface_plot = None # for depth-resolved time, for example.
self.legend = None
self.colorbar = None
self.quiver_key = None
# Are we working with a FileReader object or a bog-standard netCDF4 Dataset?
self._FileReader = False
if isinstance(dataset, (FileReader, Domain)):
self._FileReader = True
# Initialise the figure
self.__init_figure()
def __init_figure(self):
# Read in required grid variables
if self._FileReader:
self.time = self.ds.time.datetime
else:
# Try a couple of time formats.
try:
self.time = np.asarray([datetime.strftime('%Y-%m-%dT%H:%M:%S.%f', i) for i in self.ds.variables['Times']])
except ValueError:
self.time = np.asarray([datetime.strftime('%Y/%m/%d %H:%M:%S.%f', i) for i in self.ds.variables['Times']])
self.n_times = len(self.time)
# Initialise the figure
if self.figure is None:
figsize = (cm2inch(self.figsize[0]), cm2inch(self.figsize[1]))
self.figure = plt.figure(figsize=figsize)
# Create plot axes
if not self.axes:
self.axes = self.figure.add_subplot(1, 1, 1)
if self.title:
self.axes.set_title(self.title)
def plot_line(self, time_series, *args, **kwargs):
"""
Plot a time series as a line.
Parameters
----------
time_series : list-like, np.ndarray
Time series data to plot.
Additional kwargs are passed to `matplotlib.pyplot.plot'.
"""
if self.line_plot and not self.hold:
# Update the current line.
self.line_plot.set_ydata = time_series
self.line_plot.set_xdata = self.time
return
self.line_plot, = self.axes.plot(self.time, time_series,
*args, **kwargs)
if self.add_legend:
self.legend = self.axes.legend(frameon=False)
def plot_scatter(self, time_series, **kwargs):
"""
Plot a time series as a set of scatter points.
Parameters
----------
time_series : list-like, np.ndarray
Time series data to plot.
Additional kwargs are passed to `matplotlib.pyplot.scatter'.
"""
if self.scatter_plot and not self.hold:
# Update the current scatter. I can't see how to replace both the x, y and colour data (I think set_array
# does the latter), so just clear the axis and start again.
self.axes.cla()
self.scatter_plot = self.axes.scatter(self.time, time_series,
**kwargs)
if self.add_legend:
self.legend = self.axes.legend(frameon=False)
def plot_quiver(self, u, v, field=None, scale=1, **kwargs):
"""
Plot a time series of vectors.
Parameters
----------
u, v : list-like, np.ndarray
Arrays of time-varying vector components.
field : list-like, np.ndarray, str, optional
Field by which to colour the vectors. If set to 'magnitude', use the magnitude of the velocity vectors.
Defaults to colouring by `color'.
scale : float, optional
Scale to pass to the quiver. See `matplotlib.pyplot.quiver' for information.
Additional kwargs are passed to `matplotlib.pyplot.quiver'.
Notes
-----
The `hold' option to PyFVCOM.plot.Time has no effect here: an existing plot is cleared before adding new data.
"""
# To plot time along the x-axis with quiver, we need to use numerical representations of time. So,
# convert from datetimes to numbers and then format the x-axis labels after the fact.
quiver_time = date2num(self.time)
if field == 'magnitude':
field = np.hypot(u, v)
if self.quiver_plot:
if np.any(field):
self.quiver_plot.set_UVC(u, v, field)
else:
self.quiver_plot.set_UVC(u, v)
return
if np.any(field):
self.quiver_plot = self.axes.quiver(quiver_time, np.zeros(u.shape), u, v, field,
cmap=self.cmap,
units='inches',
scale_units='inches',
scale=scale,
**kwargs)
divider = make_axes_locatable(self.axes)
cax = divider.append_axes("right", size="3%", pad=0.1)
self.colorbar = self.figure.colorbar(self.quiver_plot, cax=cax)
self.colorbar.ax.tick_params(labelsize=self.fs)
if self.cb_label:
self.colorbar.set_label(self.cb_label)
else:
self.quiver_plot = self.axes.quiver(quiver_time, np.zeros(u.shape), u, v,
units='inches',
scale_units='inches',
scale=scale,
**kwargs)
# Something approaching dynamic labelling of dates.
if not self.date_format:
x_range = self.quiver_plot.axes.get_xlim()
x_delta = x_range[1] - x_range[0]
if x_delta > int(1.5 * 365):
date_format = DateFormatter('%Y-%m-%d')
elif x_delta > 2:
date_format = DateFormatter('%Y-%m-%d %H:%M')
elif x_delta < 2:
date_format = DateFormatter('%H:%M:%S')
else:
date_format = DateFormatter('%H:%M')
self.axes.xaxis.set_major_formatter(date_format)
else:
self.axes.xaxis.set_major_formatter(self.date_format)
if self.add_legend:
label = f'{scale} $\mathrm{{ms^{-1}}}$'
self.quiver_key = plt.quiverkey(self.quiver_plot, 0.9, 0.9, scale, label, coordinates='axes')
# Turn off the y-axis labels as they don't correspond to the vector lengths.
self.axes.get_yaxis().set_visible(False)
def plot_surface(self, depth, time_series, fill_seabed=False, **kwargs):
"""
Parameters
----------
depth : np.ndarray
Depth-varying array of depth. See `PyFVCOM.tide.make_water_column' for more information.
time_series : np.ndarray
Depth-varying array of data to plot.
fill_seabed : bool, optional
Set to True to fill the seabed from the maximum water depth to the edge of the plot with gray.
Remaining kwargs are passed to self.axes.pcolormesh.
"""
# Squeeze out singleton dimensions first.
depth = np.squeeze(depth)
time_series = np.squeeze(time_series)
if not self.surface_plot:
self.surface_plot = self.axes.pcolormesh(np.tile(self.time, [depth.shape[-1], 1]).T,
depth,
time_series,
cmap=self.cmap,
**kwargs)
if fill_seabed:
self.axes.fill_between(self.time, np.min(depth, axis=1), self.axes.get_ylim()[0], color='0.6')
divider = make_axes_locatable(self.axes)
cax = divider.append_axes("right", size="3%", pad=0.1)
self.colorbar = self.figure.colorbar(self.surface_plot, cax=cax, extend=self.extend)
self.colorbar.ax.tick_params(labelsize=self.fs)
if self.cb_label:
self.colorbar.set_label(self.cb_label)
else:
# Update the existing plot with the new data (currently untested!)
self.surface_plot.set_array(time_series)
class Plotter(object):
""" Create plot objects based on output from the FVCOM.
Class to assist in the creation of plots and animations based on output
from the FVCOM.
Methods
-------
plot_field
plot_quiver
plot_lines
plot_scatter
plot_streamlines
add_scale
set_title
replot
close
Author(s)
---------
James Clark (Plymouth Marine Laboratory)
Pierre Cazenave (Plymouth Marine Laboratory)
Mike Bedington (Plymouth Marine Laboratory)
"""
def __init__(self, dataset, figure=None, axes=None, stations=None, extents=None, vmin=None, vmax=None, mask=None,
res='c', fs=10, title=None, cmap='viridis', figsize=(10., 10.), axis_position=None, tick_inc=None, bg_color='gray',
cb_label=None, extend='neither', norm=None, m=None, cartesian=False, axis_labels = True,
line_width=None, mapper='basemap', coast=True, **bmargs):
"""
Parameters
----------
dataset : Dataset, PyFVCOM.read.FileReader
netCDF4 Dataset or PyFVCOM.read.FileReader object.
stations : 2D array, optional
List of station coordinates to be plotted ([[lons], [lats]])
extents : 1D array, optional
Four element numpy array giving lon/lat limits as west, east, south, north (e.g. [-4.56, -3.76, 49.96,
50.44])
vmin : float, optional
Lower bound to be used on colour bar (plot_field only).
vmax : float, optional
Upper bound to be used colour bar (plot_field only).
mask : float, optional
Mask out values < mask (plot_field only).
res : string, optional
Resolution to use when drawing Basemap object. If None, no coastline is plotted.
fs : int, optional
Font size to use when rendering plot text
title : str, optional
Title to use when creating the plot
cmap : string, optional
Colormap to use when shading field data (plot_field only).
figure : Figure, optional
Matplotlib figure object. A figure object is created if not
provided.
figsize : tuple(float), optional
Figure size in cm. This is only used if a new Figure object is
created.
axes : Axes, optional
Matplotlib Axes object. An Axes object is created if not
provided.
axis_position : 1D array, optional
Array giving axis dimensions
tick_inc : list, optional
Add coordinate axes (i.e. lat/long) at the intervals specified in
the list ([lon_spacing, lat_spacing]).
cb_label : str, optional
Set the colour bar label.
extend : str, optional
Set the colour bar extension ('neither', 'both', 'min', 'max').
Defaults to 'neither').
norm : matplotlib.colors.Normalize, optional
Normalise the luminance to 0, 1. For example, use from matplotlib.colors.LogNorm to do log plots of fields.
m : mpl_toolkits.basemap.Basemap, optional
Pass a Basemap object rather than creating one on each invocation.
cartesian : bool, optional
Set to True to skip using Basemap and instead return a simple cartesian axis plot. Defaults to False
(geographical coordinates).
mapper : string, optional
Set to 'basemap' to use Basemap for plotting or 'cartopy' for cartopy.
coast : bool, optional
Set to True to plot coastline. Default to True.
bmargs : dict, optional
Additional arguments to pass to Basemap.
axis_labels : bool, optional
Whether to annotate x and y axis with labels (defaults to Latitude and Longitude)
bg_color: str, optional
sets the figure background color. Defaults to gray
line_width: float, optional
sets line width. If missing, uses default in rcParams
Author(s)
---------
James Clark (Plymouth Marine Laboratory)
Pierre Cazenave (Plymouth Marine Laboratory)
Mike Bedington (Plymouth Marine Laboratory)
Ricardo Torres (Plymouth Marine Laboratory)
"""
self._debug = False
self.ds = dataset
self.figure = figure
self.axes = axes
self.stations = stations
self.extents = extents
self.vmin = vmin
self.vmax = vmax
self.mask = mask
self.res = res
self.fs = fs
self.title = title
self.cmap = cmap
self.figsize = figsize
self.axis_position = axis_position
self.tick_inc = tick_inc
self.cb_label = cb_label
self.extend = extend
self.norm = norm
self.m = m
self.cartesian = cartesian
self.bmargs = bmargs
self.mapper = mapper
self.coast = coast
self.bg_color = bg_color
if not line_width:
self.line_width = line_width
else:
self.line_width = rcParams['lines.linewidth']
self.axis_labels = axis_labels
# Plot instances to hold the plot objects.
self.quiver_plot = None
self.quiver_key = None
self.scatter_plot = None
self.tripcolor_plot = None
self.line_plot = None
self.streamline_plot = None
self.tri = None
self.masked_tris = None
self.colorbar_axis = None
self.cbar = None
self.projection = None
self._plot_projection = {}
# For cartopy, we need to have a Plate Carree transform defined for doing the actual plotting of data since
# we're using Lambert for the "display" projection.
if self.mapper == 'cartopy':
self._plot_projection = {'transform': ccrs.PlateCarree()}
# Are we working with a FileReader object or a bog-standard netCDF4 Dataset?
self._FileReader = False
if isinstance(dataset, (FileReader, Domain)):
self._FileReader = True
# Initialise the figure
self._init_figure()
def _add_ticks(self, ax):
gl = ax.gridlines(linewidth=0, draw_labels=True, linestyle='--', color='k')
gl.xlabel_style = {'fontsize': rcParams['axes.labelsize']}
gl.ylabel_style = {'fontsize': rcParams['axes.labelsize']}
gl.xlabels_top=False
gl.ylabels_right=False
gl.xlabels_bottom=True
gl.ylabels_left=True
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
if self.axis_labels:
ax.text(-0.12, 0.55, 'Latitude N (deg)', va='bottom', ha='center',
rotation='vertical', rotation_mode='anchor', fontsize=rcParams['axes.labelsize'],
transform=ax.transAxes)
ax.text(0.5, -0.2, 'Longitude W (deg)', va='bottom', ha='center',
rotation='horizontal', rotation_mode='anchor', fontsize=rcParams['axes.labelsize'],
transform=ax.transAxes)
def _init_figure(self):
# Read in required grid variables
if self._FileReader:
self.n_nodes = getattr(self.ds.dims, 'node')
self.n_elems = getattr(self.ds.dims, 'nele')
self.lon = getattr(self.ds.grid, 'lon')
self.lat = getattr(self.ds.grid, 'lat')
self.lonc = getattr(self.ds.grid, 'lonc')
self.latc = getattr(self.ds.grid, 'latc')
self.x = getattr(self.ds.grid, 'x')
self.y = getattr(self.ds.grid, 'y')
self.xc = getattr(self.ds.grid, 'xc')
self.yc = getattr(self.ds.grid, 'yc')
self.nv = getattr(self.ds.grid, 'nv')
else:
self.n_nodes = len(self.ds.dimensions['node'])
self.n_elems = len(self.ds.dimensions['nele'])
self.lon = self.ds.variables['lon'][:]
self.lat = self.ds.variables['lat'][:]
self.lonc = self.ds.variables['lonc'][:]
self.latc = self.ds.variables['latc'][:]
self.x = self.ds.variables['x'][:]
self.y = self.ds.variables['y'][:]
self.xc = self.ds.variables['xc'][:]
self.yc = self.ds.variables['yc'][:]
self.nv = self.ds.variables['nv'][:]
if self.nv.min() != 1:
if self.nv.min() > 0:
self.nv -= self.nv.min()
else:
self.nv += 1 - self.nv.min()
# Triangles
self.triangles = self.nv.transpose() - 1
# Initialise the figure
if self.figure is None:
figsize = (cm2inch(self.figsize[0]), cm2inch(self.figsize[1]))
self.figure = plt.figure(figsize=figsize)
self.figure.set_facecolor('white')
# If plot extents were not given, use min/max lat/lon values
if self.extents is None:
if self.cartesian:
self.extents = np.array([self.x.min(), self.x.max(),
self.y.min(), self.y.max()])
else:
self.extents = np.array([self.lon.min(), self.lon.max(),
self.lat.min(), self.lat.max()])
# Create mapping object if appropriate.
if not self.cartesian:
if self.mapper == 'basemap':
if have_basemap:
if self.m is None:
self.m = Basemap(llcrnrlon=np.min(self.extents[:2]),
llcrnrlat=np.min(self.extents[-2:]),
urcrnrlon=np.max(self.extents[:2]),
urcrnrlat=np.max(self.extents[-2:]),
rsphere=(6378137.00, 6356752.3142),
resolution=self.res,
projection='merc',
lat_0=np.mean(self.extents[-2:]),
lon_0=np.mean(self.extents[:2]),
lat_ts=np.mean(self.extents[-2:]),
ax=self.axes,
**self.bmargs)
# Make a set of coordinates.
self.mx, self.my = self.m(self.lon, self.lat)
self.mxc, self.myc = self.m(self.lonc, self.latc)
else:
raise RuntimeError('mpl_toolkits is not available in this Python.')
elif self.mapper == 'cartopy':
self.projection = ccrs.PlateCarree() #ccrs.LambertConformal(central_longitude=np.mean(self.extents[:2]),
# central_latitude=np.mean(self.extents[2:]),
# false_easting=400000, false_northing=400000)
# Make a coastline depending on whether we've got a GSHHS resolution or a Natural Earth one.
if self.res in ('c', 'l', 'i', 'h', 'f'):
# Use the GSHHS data as in Basemap (a lot slower than the cartopy data).
land = cfeature.GSHHSFeature(scale=self.res, edgecolor='k', facecolor='none')
else:
# Make a land object which is fairly similar to the Basemap on we use.
land = cfeature.NaturalEarthFeature('physical', 'land', self.res, edgecolor='k', facecolor='0.6')
# Make a set of coordinates.
self.mx, self.my = self.lon, self.lat
self.mxc, self.myc = self.lonc, self.latc
else:
raise ValueError(f"Unrecognised mapper value '{self.mapper}'. Choose 'basemap' (default) or 'cartopy'")
else:
# Easy peasy, just the cartesian coordinates.
self.mx, self.my = self.x, self.y
self.mxc, self.myc = self.xc, self.yc
# Create plot axes
if not self.axes:
self.axes = self.figure.add_subplot(1, 1, 1, projection=self.projection)
if self.axis_position:
self.axes.set_position(self.axis_position)
if self.mapper == 'cartopy':
self.axes.set_extent(self.extents, crs=ccrs.PlateCarree())
if self.coast:
# shpfile = cartopy.io.shapereader.gshhs('f')
# shp = cartopy.io.shapereader.Reader(shpfile)
# self.axes.add_geometries(
# shp.geometries(), ccrs.PlateCarree(), edgecolor='red', facecolor='none')
#
self.axes.add_feature(land, zorder=1000)
# *Must* call show and draw in order to get the axis boundary used to add ticks:
self.axes.background_patch.set_facecolor(self.bg_color)
else:
self.axes.set_facecolor(self.bg_color)
self.figure.show()
self.figure.canvas.draw()
elif self.mapper == 'basemap' and not self.cartesian and self.coast:
self.m.drawmapboundary()
#self.m.drawcoastlines(zorder=1000)
#self.m.fillcontinents(color='0.6', zorder=1000)
if self.title:
self.axes.set_title(self.title)
# Check the values of tick_inc aren't bigger than the extents.
if self.tick_inc is not None:
if self.tick_inc[0] > self.extents[1] - self.extents[0]:
warn('The x-axis tick interval is larger than the plot x-axis extent.')
if self.tick_inc[1] > self.extents[3] - self.extents[2]:
warn('The y-axis tick interval is larger than the plot y-axis extent.')
# Add coordinate labels to the x and y axes.
if self.tick_inc is not None:
meridians = np.arange(np.floor(np.min(self.extents[:2])), np.ceil(np.max(self.extents[:2])), self.tick_inc[0])
parallels = np.arange(np.floor(np.min(self.extents[2:])), np.ceil(np.max(self.extents[2:])), self.tick_inc[1])
if self.cartesian:
# Cartesian
self.axes.set_xticks(np.arange(self.extents[0], self.extents[1] + self.tick_inc[0], self.tick_inc[0]))
self.axes.set_yticks(np.arange(self.extents[2], self.extents[3] + self.tick_inc[1], self.tick_inc[1]))
elif self.mapper == 'basemap':
self.m.drawparallels(parallels, labels=[1, 0, 0, 0], fontsize=self.fs, linewidth=0, ax=self.axes)
self.m.drawmeridians(meridians, labels=[0, 0, 0, 1], fontsize=self.fs, linewidth=0, ax=self.axes)
elif self.mapper == 'cartopy':
# self.axes.gridlines(xlocs=meridians, ylocs=parallels, linewidth=0)
self._add_ticks(self.axes)
# # Label the end-points of the gridlines using the custom tick makers.
# self.axes.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
# self.axes.yaxis.set_major_formatter(LATITUDE_FORMATTER)
# self._lambert_xticks(meridians)
# self._lambert_yticks(parallels)
# Whole bunch of hackery to get cartopy to label Lambert plots. Shamelessly copied from:
# https://nbviewer.jupyter.org/gist/ajdawson/dd536f786741e987ae4e
@staticmethod
def _find_side(ls, side):
"""
Given a shapely LineString which is assumed to be rectangular, return the
line corresponding to a given side of the rectangle.
"""
minx, miny, maxx, maxy = ls.bounds
points = {'left': [(minx, miny), (minx, maxy)],
'right': [(maxx, miny), (maxx, maxy)],
'bottom': [(minx, miny), (maxx, miny)],
'top': [(minx, maxy), (maxx, maxy)], }
return LineString(points[side])
def _lambert_xticks(self, ticks):
"""Draw ticks on the bottom x-axis of a Lambert Conformal projection."""
te = lambda xy: xy[0]
lc = lambda t, n, b: np.vstack((np.zeros(n) + t, np.linspace(b[2], b[3], n))).T
xticks, xticklabels = self._lambert_ticks(ticks, 'bottom', lc, te)
self.axes.xaxis.tick_bottom()
self.axes.set_xticks(xticks)
self.axes.set_xticklabels([self.axes.xaxis.get_major_formatter()(xtick) for xtick in xticklabels])
def _lambert_yticks(self, ticks):
"""Draw ricks on the left y-axis of a Lambert Conformal projection."""
te = lambda xy: xy[1]
lc = lambda t, n, b: np.vstack((np.linspace(b[0], b[1], n), np.zeros(n) + t)).T
yticks, yticklabels = self._lambert_ticks(ticks, 'left', lc, te)
self.axes.yaxis.tick_left()
self.axes.set_yticks(yticks)
self.axes.set_yticklabels([self.axes.yaxis.get_major_formatter()(ytick) for ytick in yticklabels])
def _lambert_ticks(self, ticks, tick_location, line_constructor, tick_extractor):
"""Get the tick locations and labels for an axis of a Lambert Conformal projection."""
outline_patch = LineString(self.axes.outline_patch.get_path().vertices.tolist())
axis = self._find_side(outline_patch, tick_location)
n_steps = 30
extent = self.axes.get_extent(ccrs.PlateCarree())
_ticks = []
for t in ticks:
xy = line_constructor(t, n_steps, extent)
proj_xyz = self.axes.projection.transform_points(ccrs.Geodetic(), xy[:, 0], xy[:, 1])
xyt = proj_xyz[..., :2]
ls = LineString(xyt.tolist())
locs = axis.intersection(ls)
if not self.cartesian:
meridians = np.arange(np.floor(np.min(self.extents[:2])), np.ceil(np.max(self.extents[:2])), self.tick_inc[0])
parallels = np.arange(np.floor(np.min(self.extents[2:])), np.ceil(np.max(self.extents[2:])), self.tick_inc[1])
self.m.drawparallels(parallels, labels=[1, 0, 0, 0], fontsize=rcParams['axes.labelsize'], linewidth=None, ax=self.axes)
self.m.drawmeridians(meridians, labels=[0, 0, 0, 1], fontsize=rcParams['axes.labelsize'], linewidth=None, ax=self.axes)
if not locs:
tick = [None]
else:
tick = tick_extractor(locs.xy)
_ticks.append(tick[0])
# Remove ticks that aren't visible:
ticklabels = copy.copy(ticks).tolist()
while True:
try:
index = _ticks.index(None)
except ValueError:
break
_ticks.pop(index)
ticklabels.pop(index)
return _ticks, ticklabels
def get_colourbar_extension(self, field, clims):
"""
Find the colourbar extension for the current variable, clipping in space if necessary.
Parameters
----------
field : np.ndarray
The data being plotted.
clims : list, tuple
The colour limits of the plot.
Returns
-------
extend : str
The colourbar extension ('neither', 'min', 'max' or 'both').
"""
# We need to find the nodes/elements for the current variable to make sure our colour bar extends for
# what we're plotting (not the entire data set). We'll have to guess based on shape here.
x = self.lon
y = self.lat
if self.n_elems == field.shape[0]:
x = self.lonc
y = self.latc
mask = (x > self.extents[0]) & (x < self.extents[1]) & (y < self.extents[3]) & (y > self.extents[2])
if all(clims) is None:
clims = [field[..., mask].min(), field[..., mask].max()]
if clims[0] is None:
clims[0] = field[..., mask].min()
if clims[1] is None:
clims[1] = field[..., mask].max()
extend = colorbar_extension(clims[0], clims[1], field[..., mask].min(), field[..., mask].max())
return extend
def replot(self):
"""
Helper method to nuke and existing plot in the current self.axes and reset everything to clean.
"""
self.axes.cla()
self._init_figure()
self.tripcolor_plot = None
self.line_plot = None
self.quiver_plot = None
self.quiver_key = None
self.scatter_plot = None
self.streamline_plot = None
def plot_domain(self, mesh=False, depth=False, **kwargs):
"""
Add a domain plot to the given domain (as domain.domain_plot).
Parameters
----------
mesh : bool
Set to True to overlay the model mesh. Defaults to False.
depth : bool
Set to True to plot water depth. Defaults to False. If enabled, a colour bar is added to the figure.
Remaining keyword arguments are passed to PyFVCOM.plot.Plotter.
Provides
--------
domain_plot : PyFVCOM.plot.Plotter
The plot object.
mesh_plot : matplotlib.axes, optional
The mesh axis object, if enabled.
"""
cb_label = kwargs.pop('cb_label', None)
linewidth = kwargs.pop('linewidth', None)
alpha = kwargs.pop('alpha', 1.0)
if mesh:
mesh_plot = self.axes.triplot(self.mx, self.my,
self.triangles, 'k-',
linewidth=linewidth, zorder=2000, alpha=alpha, **self._plot_projection, **kwargs)
self.mesh_plot = mesh_plot
if depth:
# Make depths negative down.
if np.all(self.ds.grid.h < 0):
self.plot_field(self.ds.grid.h, cmap=colourmap('h'), cb_label=cb_label, **kwargs)
else:
self.plot_field(-self.ds.grid.h, cmap=colourmap('h_r'), cb_label=cb_label, **kwargs)
def plot_field(self, field, *args, **kwargs):
"""
Map the given `field'.
Parameters
----------
field : np.ndarray
Field to plot (either on elements or nodes).
Additional arguments and keyword arguments are passed to `matplotlib.pyplot.tripcolor'.
"""
# We ignore the mask given when initialising the Plotter object and instead use the one given when calling
# this function. We'll warn in case anything (understandably) gets confused.
cmap = kwargs.pop('cmap', None)
cb_label = kwargs.pop('cb_label', None)
var_field = kwargs.pop('variable_name', None)
if var_field:
cmap = colourmap('h')
if self.mask is not None:
warn("The mask given when initiliasing this object is ignored for plotting surfaces. Supply a `mask' "
"keyword to this function instead")
if self.tripcolor_plot:
# The field needs to be on the elements since that's the how it's plotted internally in tripcolor (unless
# shading is 'gouraud'). Check if we've been given element data and if not, convert accordingly. If we've
# been given a mask, things get compliated. We can't mask with a mask which varies in time (so a static
# in time mask is fine, but one that varies doesn't work with set_array. So we need to firstly find out
# if we've got a mask whose valid positions matches what we've already got, if so, easy peasy,
# just update the array with set_array. If it doesn't match, the only way to mask the data properly is to
# make a brand new plot.
if 'mask' in kwargs:
if len(self.tripcolor_plot.get_array()) == ~kwargs['mask'].sum():
if self._debug:
print('updating')
# Mask is probably the same as the previous one (based on number of positions). Mask sense needs
# to be inverted when setting the array as we're supplying valid positions, not hiding invalid
# ones. Confusing, isn't it. You can imagine the fun I had figuring this out.
if len(field) == len(self.mx):
self.tripcolor_plot.set_array(nodes2elems(field, self.triangles)[~kwargs['mask']])
else:
self.tripcolor_plot.set_array(field[~kwargs['mask']])
return
else:
# Nothing to do here except clear the plot and make a brand new plot (which is a lot slower),
self.tripcolor_plot.remove()
if self._debug:
print('replotting')
else:
if len(field) == len(self.mx):
self.tripcolor_plot.set_array(nodes2elems(field, self.triangles))
else:
self.tripcolor_plot.set_array(field)
return
self.tripcolor_plot = self.axes.tripcolor(self.mx, self.my, self.triangles, np.squeeze(field), *args,
vmin=self.vmin, vmax=self.vmax, norm=self.norm, cmap = cmap,
**self._plot_projection, **kwargs)
if self.cartesian:
self.axes.set_aspect('equal')
self.axes.set_xlim(self.mx.min(), self.mx.max())
self.axes.set_ylim(self.my.min(), self.my.max())
extend = copy.copy(self.extend)
if extend is None:
extend = self.get_colourbar_extension(field, (self.vmin, self.vmax))
if self.cbar is None:
if self.cartesian:
divider = make_axes_locatable(self.axes)
cax = divider.append_axes("right", size="3%", pad=0.1)
self.cbar = self.figure.colorbar(self.tripcolor_plot, cax=cax, extend=extend)
elif self.mapper == 'cartopy':
divider = make_axes_locatable(self.axes)
cax = divider.append_axes("right", size="3%", pad=0.05, axes_class=plt.Axes)
self.cbar = self.figure.colorbar(self.tripcolor_plot, cax=cax, extend=extend)
self.cbar.ax.tick_params(labelsize=self.fs)
else:
self.cbar = self.m.colorbar(self.tripcolor_plot, extend=extend)
self.cbar.ax.tick_params(labelsize=self.fs)
if self.cb_label:
self.cbar.set_label(self.cb_label, size=self.fs)
if cb_label: # over-ride plotter label
self.cbar.set_label(cb_label, size=self.fs)
def plot_quiver(self, u, v, field=False, dx=None, dy=None, add_key=True, scale=1.0, label=None, mask_land=True, *args, **kwargs):
"""
Quiver plot using velocity components.
Parameters
----------
u : np.ndarray
u-component of the velocity field.
v : np.ndarray
v-component of the velocity field
field : np.ndarray
velocity magnitude field. Used to colour the vectors. Also adds a colour bar which uses the cb_label and
cmap, if provided.
add_key : bool, optional
Add key for the quiver plot. Defaults to True.
dx, dy : float, optional
If given, the vectors will be plotted on a regular grid at intervals of `dx' and `dy'. If `dy' is omitted,
it is assumed to be the same as `dx'.
scale : float, optional
Scaling to be provided to arrows with scale_units of inches. Defaults to 1.0.
label : str, optional
Give label to use for the quiver key (defaults to "`scale' ms^{-1}").
mask_land : bool, optional
Set to False to disable the (slow) masking of regular locations outside the model domain. Defaults to True.
Additional `args' and `kwargs' are passed to `matplotlib.pyplot.quiver'.
"""
if not self._FileReader:
xy_mask = np.full((len(self.lonc), len(self.latc)), True)
else:
xy_mask = np.full((len(self.lonc)), True)
if dx is not None:
if dy is None:
dy = dx
if not hasattr(self, '_regular_lon'):
self._make_regular_grid(dx, dy, mask_land=mask_land)
xy_mask = self._mask_for_unstructured
if self.cartesian:
mxc, myc = self._regular_x, self._regular_y
else:
mxc, myc = self.m(self._regular_x, self._regular_y)
else:
mxc, myc = self.mxc, self.myc
u = u[xy_mask]
v = v[xy_mask]
if np.any(field):
field = field[xy_mask]
if self.quiver_plot:
if np.any(field):
self.quiver_plot.set_UVC(u, v, field)
else:
self.quiver_plot.set_UVC(u, v)
return
if not label:
label = '{} '.format(scale) + r'$\mathrm{ms^{-1}}$'
if np.any(field):
self.quiver_plot = self.axes.quiver(mxc, myc, u, v, field,
cmap=self.cmap,
units='inches',
scale_units='inches',
scale=scale,
*args,
**self._plot_projection,
**kwargs)
self.cbar = self.m.colorbar(self.quiver_plot)
self.cbar.ax.tick_params(labelsize=self.fs)
if self.cb_label:
self.cbar.set_label(self.cb_label)
else:
self.quiver_plot = self.axes.quiver(mxc, myc, u, v, units='inches', scale_units='inches', scale=scale,
**self._plot_projection)
if add_key:
self.quiver_key = plt.quiverkey(self.quiver_plot, 0.9, 0.9, scale, label, coordinates='axes')
if self.cartesian:
self.axes.set_aspect('equal')
self.axes.set_xlim(mxc.min(), mxc.max())
self.axes.set_ylim(myc.min(), myc.max())
def plot_lines(self, x, y, zone_number='30N', *args, **kwargs):
"""
Plot geographical lines.
Parameters:
-----------
x : np.ndarray, list
Array of x coordinates to plot (cartesian coordinates).
y : np.ndarray, list
Array of y coordinates to plot (cartesian coordinates).
zone_number : string, optional
See PyFVCOM.coordinates documentation for a full list of supported codes. Defaults to `30N'.
Additional `args' and `kwargs' are passed to `matplotlib.pyplot.plot'.
"""
if 'color' not in kwargs:
kwargs['color'] = 'r'
lon, lat = lonlat_from_utm(x, y, zone_number)
if self.cartesian:
mx, my = lon, lat
else:
mx, my = self.m(lon, lat)
self.line_plot = self.axes.plot(mx, my, *args, **self._plot_projection, **kwargs)
def plot_scatter(self, x, y, zone_number='30N', *args, **kwargs):
"""
Plot scatter points.
Parameters
----------
x : np.ndarray, list
Array of x coordinates to plot (cartesian coordinates).
y : np.ndarray, list
Array of y coordinates to plot (cartesian coordinates).
zone_number : string, optional
See PyFVCOM.coordinates documentation for a full list of supported codes. Defaults to `30N'.
Additional `args' and `kwargs' are passed to `matplotlib.pyplot.scatter'.
"""
# Collection plotting kwargs
if self.mapper=='cartopy':
self.scatter_plot = self.axes.scatter(x, y, **self._plot_projection, **kwargs)
self.axes.set_extent(self.extents, crs=self._plot_projection['transform'])
if self.coast:
self.axes.coastlines(resolution='10m', linewidth=self.line_width)
if self.tick_inc:
self._add_ticks(self.axes)
else:
lon, lat = lonlat_from_utm(x, y, zone_number)
if self.cartesian:
mx, my = lon, lat
else:
mx, my = self.m(lon, lat)
self.scatter_plot = self.axes.scatter(mx, my, *args, **self._plot_projection, **kwargs)
def plot_streamlines(self, u, v, dx=1000, dy=None, mask_land=True, **kwargs):
"""
Plot streamlines of the given u and v data.
The data will be interpolated to a regular grid (the streamline plotting function does not support
unstructured grids.
Parameters
----------
u, v : np.ndarray
Unstructured arrays of a velocity field. Single time and depth only.
dx : float, optional
Grid spacing for the interpolation in the x direction in metres. Defaults to 1000 metres.
dy : float, optional
Grid spacing for the interpolation in the y direction in metres. Defaults to `dx'.
mask_land : bool, optional
Set to False to disable the (slow) masking of regular locations outside the model domain. Defaults to True.
Additional `kwargs' are passed to `matplotlib.pyplot.streamplot'.
Notes
-----
- This method must interpolate the FVCOM grid onto a regular grid prior to plotting, which obviously has a
performance penalty.
- The `density' keyword argument for is set by default to [2.5, 5] which seems to work OK for my data. Change
by passing a different value if performance is dire.
- To set the colour limits for the arrows, pass a matplotlib.colors.Normalize object with the min/max values
to PyFVCOM.plot.Plotter. Don't bother trying to do it via self.streamline_plot.arrows.set_clim(). The
equivalent method on self.streamline_plot.lines works fine, but the arrows one doesn't.
"""
if self.mapper != 'cartopy':
raise ValueError("The streamplot function is subtly broken with Basemap plotting. Use cartopy instead.")
if dx is not None and dy is None:
dy = dx
# In theory, changing the x and y positions as well as the colours is possible via a few self.stream_plot
# methods (set_offsets, set_array), I've not found the correct way of doing this, however. In addition,
# removing the lines is easy enough (self.streamline_plot.lines.remove()) but the equivalent method for
# self.streamline_plot.arrows returns "not yet implemented". So, we'll just nuke the plot and start again.
if self.streamline_plot is not None:
self.replot()
# Set a decent initial density if we haven't been given one in kwargs.
if 'density' not in kwargs:
kwargs['density'] = [2.5, 5]
if 'cmap' in kwargs and self.cmap is not None:
kwargs.pop('cmap', None)
warn('Ignoring the given colour map as one has been supplied during initialisation.')
if not hasattr(self, '_mask_for_unstructured'):
self._make_regular_grid(dx, dy, mask_land=mask_land)
# Remove singleton dimensions because they break the masking.
u = np.squeeze(u)
v = np.squeeze(v)
if self.cartesian:
plot_x, plot_y = self._regular_x[0, :], self._regular_y[:, 0]
fvcom_x, fvcom_y = self.xc[self._mask_for_unstructured], self.yc[self._mask_for_unstructured]
else:
if self.mapper == 'cartopy':
plot_x, plot_y = self._regular_x, self._regular_y
else:
# The Basemap version needs 1D arrays only.
plot_x, plot_y = self._regular_x[0, :], self._regular_y[:, 0]
fvcom_x, fvcom_y = self.lonc[self._mask_for_unstructured], self.latc[self._mask_for_unstructured]
# Interpolate whatever positions we have (spherical/cartesian).
ua_r = mp_interp_func((fvcom_x, fvcom_y, u[self._mask_for_unstructured], self._regular_x, self._regular_y))
va_r = mp_interp_func((fvcom_x, fvcom_y, v[self._mask_for_unstructured], self._regular_x, self._regular_y))
# Check for a colour map in kwargs and if we have one, make a magnitude array for the plot. Check we haven't
# been given a color array in kwargs too.
speed_r = None
if self.cmap is not None:
if 'color' in kwargs:
speed_r = mp_interp_func((fvcom_x, fvcom_y,
np.squeeze(kwargs['color'])[self._mask_for_unstructured],
self._regular_x, self._regular_y))
kwargs.pop('color', None)
else:
speed_r = np.hypot(ua_r, va_r)
# Apparently, really tiny velocities fail to plot, so skip if we are in that situation. Exclude NaNs in this
# check. I'm not a fan of this hardcoded threshold...
# Nope, don't do this, let the calling script handle the error.
# if np.all(np.hypot(u[np.isfinite(u)], v[np.isfinite(v)]) < 0.04):
# if self._debug:
# print('Skipping due to all tiny values in the input vector components.')
# return
# Mask off arrays as appropriate.
ua_r = np.ma.array(ua_r, mask=self._mask_for_regular)
va_r = np.ma.array(va_r, mask=self._mask_for_regular)
# Force the underlying data to NaN for the masked region. This is a problem which manifests itself when
# plotting with cartopy.
ua_r.data[self._mask_for_regular] = np.nan
va_r.data[self._mask_for_regular] = np.nan
if self.cmap is not None:
speed_r = np.ma.array(speed_r, mask=self._mask_for_regular)
speed_r.data[self._mask_for_regular] = np.nan
# Now we have some data, do the streamline plot.
self.streamline_plot = self.axes.streamplot(plot_x, plot_y, ua_r, va_r, color=speed_r, cmap=self.cmap,
norm=self.norm, **self._plot_projection, **kwargs)
if self.mapper == 'cartopy' and not hasattr(self, '_mask_patch'):
# I simply cannot get cartopy to not plot arrows outside the domain. So, the only thing I can think of
# doing is making a polygon out of the region which is outside the model domain and plotting that on top
# as white. It'll sit just above the arrow zorder. It's not currently possible to simply remove the
# arrows either.
warn("Cartopy doesn't mask the arrows on the streamlines correctly, so we're overlaying a white polygon to "
"hide them. Things underneath it will disappear.")
model_boundaries = get_boundary_polygons(self.triangles)
model_polygons = [Polygon(np.asarray((self.lon[i], self.lat[i])).T) for i in model_boundaries]
polygon_areas = [i.area for i in model_polygons]
main_polygon_index = polygon_areas.index(max(polygon_areas))
model_domain = model_polygons[main_polygon_index]
# Make a polygon of the regular grid extents and then subtract the model domain from that to yield a
# masking polyon. Plot that afterwards.
regular_domain = Polygon(((self._regular_x.min() - 1, self._regular_y.min() - 1), # lower left
(self._regular_x.min() - 1, self._regular_y.max() + 1), # upper left
(self._regular_x.max() + 1, self._regular_y.max() + 1), # upper right
(self._regular_x.max() + 1, self._regular_y.min() - 1))) # lower right
mask_domain = regular_domain.difference(model_domain)
self._mask_patch = PolygonPatch(mask_domain, facecolor='w', edgecolor='none',
**self._plot_projection)
patch = self.axes.add_patch(self._mask_patch)
patch.set_zorder(self.streamline_plot.arrows.get_zorder() + 1)
if self.cmap is not None:
extend = copy.copy(self.extend)
if extend is None:
extend = self.get_colourbar_extension(speed_r, (self.vmin, self.vmax))
if self.cbar is None:
if self.cartesian:
divider = make_axes_locatable(self.axes)
cax = divider.append_axes("right", size="3%", pad=0.1)
self.cbar = self.figure.colorbar(self.streamline_plot.lines, cax=cax, extend=extend)
elif self.mapper == 'cartopy':
self.cbar = self.figure.colorbar(self.streamline_plot.lines, extend=extend)
else:
self.cbar = self.m.colorbar(self.streamline_plot.lines, extend=extend)
self.cbar.ax.tick_params(labelsize=self.fs)
if self.cb_label:
self.cbar.set_label(self.cb_label)
if self.cartesian:
self.axes.set_aspect('equal')
self.axes.set_xlim(plot_x.min(), plot_x.max())
self.axes.set_ylim(plot_y.min(), plot_y.max())
def _make_regular_grid(self, dx, dy, mask_land=True):
"""
Make a regular grid at intervals of `dx', `dy' for the current plot domain. Supports both spherical and
cartesian grids.
Locations which are either outside the model domain (defined as the largest polygon by area) or on islands
are stored in the self._mask_for_regular array.
Locations in the FVCOM grid which are outside the plotting extent are masked in the
self._mask_for_unstructured array.
Parameters
----------
dx : float
Grid spacing in the x-direction in metres.
dy :
Grid spacing in the y-direction in metres.
mask_land : bool, optional
Set to False to disable the (slow) masking of regular locations outside the model domain. Defaults to True.
Provides
--------
self._regular_x : np.ma.ndarray
The regularly gridded x positions as a masked array.
self._regular_y : np.ma.ndarray
The regularly gridded y positions as a masked array.
self._mask_for_regular : np.ndarray
The mask for the regular grid positions.
self._mask_for_unstructured : np.ndarray
The mask for the unstructured positions within the current plot domain.
"""
# To speed things up, extract only the positions actually within the mapping domain.
if self.cartesian:
x = self.xc
y = self.yc
if self.extents is not None:
west, east, south, north = self.extents
else:
west, east, south, north = self.x.min(), self.x.max(), self.y.min(), self.y.max()
else:
x = self.lonc
y = self.latc
# Should we use self.extents here?
if self.mapper == 'basemap':
west, east, south, north = self.m.llcrnrlon, self.m.urcrnrlon, self.m.llcrnrlat, self.m.urcrnrlat
else:
west, east, south, north = self.lon.min(), self.lon.max(), self.lat.min(), self.lat.max()
self._mask_for_unstructured = (x >= west) & (x <= east) & (y >= south) * (y <= north)
x = x[self._mask_for_unstructured]
y = y[self._mask_for_unstructured]
if self.cartesian:
# Easy peasy, just return the relevant set of numbers with the given increments.
reg_x = np.arange(x.min(), x.max() + dx, dx)
reg_y = np.arange(y.min(), y.max() + dy, dy)
else:
# Convert dx and dy into spherical distances so we can do a regular grid on the lonc/latc arrays. This is a
# pretty hacky way of going about this.
xref, yref = self.xc[self._mask_for_unstructured].mean(), self.yc[self._mask_for_unstructured].mean()
# Get the zone we're in for the mean position.
_, _, zone = utm_from_lonlat(x.mean(), y.mean())
start_x, start_y = lonlat_from_utm(xref, yref, zone=zone[0])
_, end_y = lonlat_from_utm(xref, yref + dy, zone=zone[0])
end_x, _ = lonlat_from_utm(xref + dx, yref, zone=zone[0])
dx_spherical = end_x - start_x
dy_spherical = end_y - start_y
reg_x = np.arange(x.min(), x.max() + dx_spherical, dx_spherical)
reg_y = np.arange(y.min(), y.max() + dy_spherical, dy_spherical)
self._regular_x, self._regular_y = np.meshgrid(reg_x, reg_y)
self._mask_for_regular = np.full(self._regular_x.shape, False)
if mask_land:
# Make a mask for the regular grid. This uses the model domain to identify points which are outside the
# grid. Those are set to False whereas those in the domain are True. We assume the longest polygon is the
# model boundary and all other polygons are islands within it.
model_boundaries = get_boundary_polygons(self.triangles)
model_polygons = [Polygon(np.asarray((self.lon[i], self.lat[i])).T) for i in model_boundaries]
polygon_areas = [i.area for i in model_polygons]
main_polygon_index = polygon_areas.index(max(polygon_areas))
# Find locations outside the main model domain.
ocean_indices, land_indices = [], []
for index, sample in enumerate(zip(np.array((self._regular_x.ravel(), self._regular_y.ravel())).T)):
point = Point(sample[0])
if self._debug:
print(f'Checking outside domain point {index} of {len(self._regular_x.ravel())}', flush=True)
if point.intersects(model_polygons[main_polygon_index]):
ocean_indices.append(index)
else:
land_indices.append(index)
# Mask off indices outside the main model domain.
ocean_row, ocean_column = np.unravel_index(ocean_indices, self._regular_x.shape)
land_row, land_column = np.unravel_index(land_indices, self._regular_x.shape)
self._mask_for_regular[land_row, land_column] = True
# To remove the sampling stations on islands, identify points which intersect the remaining polygons,
# and then remove them from the sampling site list.
land_indices = []
# Exclude the main polygon from the list of polygons.
# TODO: This is ripe for parallelisation, especially as it's pretty slow in serial.
for pi, polygon in enumerate([i for count, i in enumerate(model_polygons) if count != main_polygon_index]):
for oi, (row, column, index) in enumerate(zip(ocean_row, ocean_column, ocean_indices)):
point = Point((self._regular_x[row, column], self._regular_y[row, column]))
if self._debug:
print(f'Polygon {pi + 1} of {len(model_polygons) - 1}: '
f'ocean point {oi} of {len(ocean_indices)}', flush=True)
if point.intersects(polygon):
land_indices.append(index)
# Mask off island indices.
land_row, land_column = np.unravel_index(land_indices, self._regular_x.shape)
self._mask_for_regular[land_row, land_column] = True
self._regular_x = np.ma.masked_array(self._regular_x, mask=self._mask_for_regular)
self._regular_y = np.ma.masked_array(self._regular_y, mask=self._mask_for_regular)
def set_title(self, title):
""" Set the title for the current axis. """
self.axes.set_title(title, fontsize=self.fs)
def add_scale(self, x, y, ref_lon, ref_lat, length, **kwargs):
"""
Add a Basemap scale to the plot.
Parameters
----------
x, y : float
The position (in map units).
ref_lon, ref_lat : float
The reference longitude and latitude for the scale length.
length : float
The length of the scale (in kilometres).
Additional keyword arguments are passed to self.m.drawmapscale.
"""
self.m.drawmapscale(x, y, ref_lon, ref_lat, length, ax=self.axes, **kwargs)
def close(self):
""" Close the current figure. """
plt.close(self.figure)
class CrossPlotter(Plotter):
""" Create cross-section plots based on output from the FVCOM.
Class to assist in the creation of cross section plots of FVCOM data
Provides
--------
cross_section_init(cross_section_points, dist_res) -
Initialises the cross section working out the time varying y coordinates and wetting and drying.
cross_section_points - list of 2x2 arrays defining the cross section (piecewise lines)
dist_res - resolution to sample the cross section at
plot_pcolor_field(var, timestep) -
Plot pcolor of variable at given timestep index
var - string of variable name
timestep - integer timestep index
Example
-------
>>> import numpy as np
>>> import PyFVCOM as pf
>>> import matplotlib.pyplot as plt
>>> filestr = '/data/euryale2/scratch/mbe/Models_2/FVCOM/tamar/output/depth_tweak2_phys_only/2006/03/tamar_v2_0001.nc'
>>> filereader = pf.read.FileReader(filestr)
>>> cross_points = [np.asarray([[413889.37304891, 5589079.54545454], [415101.00156087, 5589616.47727273]])]
>>> c_plot = pf.plot.CrossPlotter(filereader, cmap='bwr', vmin=5, vmax=10)
>>> c_plot.cross_section_init(cross_points, dist_res=5)
>>> c_plot.plot_pcolor_field('temp', 150)
>>> plt.show()
Notes
-----
Only works with FileReader data. No plans to change this.
"""
# TODO
# - Currently only works for scalar variables, want to get it working for vectors to do u/v/w plots
# - Sort colour bars
# - Sort left hand channel justification for multiple channels.
# - Error handling for no wet/dry, no land
# - Plus a lot of other stuff. And tidy it up.
def _init_figure(self):
"""
Initialise a cross-sectional plot object.
"""
self.cross_plot_x = None
self.cross_plot_y = None
self.cross_plot_x_pcolor = None
self.cross_plot_y_pcolor = None
self.sub_samp = None
self.sample_points = None
self.sample_points_ind = None
self.sample_points_ind_pcolor = None
self.wet_points_data = None
self.chan_x = None
self.chan_y = None
self.sub_samp = None
self.sel_points = None
self.xlim_vals = None
self.ylim_vals = None
if self._FileReader:
self.nv = self.ds.grid.nv
self.x = self.ds.grid.x
self.y = self.ds.grid.y
else:
print('Only implemented for file reader input')
raise NotImplementedError
if self.nv.min() != 1:
self.nv -= self.nv.min()
self.triangles = self.nv.transpose() - 1
if self.figure is None:
figsize = (cm2inch(self.figsize[0]), cm2inch(self.figsize[1]))
self.figure = plt.figure(figsize=figsize)
self.figure.set_facecolor('white')
if not self.axes:
self.axes = self.figure.add_subplot(1, 1, 1)
if self.axis_position:
self.axes.set_position(self.axis_position)
if self.title:
self.axes.set_title(self.title)
def cross_section_init(self, cross_section_points, dist_res=50, variable_at_cells=False, wetting_and_drying=True):
"""
Sample the cross section.
TODO: Finish this docstring!
Parameters
----------
cross_section_points :
dist_res :
variable_at_cells :
wetting_and_drying :
"""
[sub_samp, sample_cells, sample_nodes] = getcrossectiontriangles(cross_section_points[0],
self.triangles, self.x, self.y, dist_res)
if len(cross_section_points) > 1:
for this_cross_section in cross_section_points[1:]:
[this_sub_samp, this_sample_cells, this_sample_nodes] = getcrossectiontriangles(this_cross_section,
self.triangles,
self.x, self.y,
dist_res)
sub_samp = np.vstack([sub_samp, this_sub_samp])
sample_cells = np.append(sample_cells, this_sample_cells)
sample_nodes = np.append(sample_nodes, this_sample_nodes)
if variable_at_cells:
self.sample_points = sample_cells
else:
self.sample_points = sample_nodes
self.sub_samp = sub_samp
self.sel_points = np.asarray(np.unique(self.sample_points[self.sample_points != -1]), dtype=int)
sample_points_ind = np.zeros(len(self.sample_points))
for this_ind, this_point in enumerate(self.sel_points):
sample_points_ind[self.sample_points == this_point] = this_ind
sample_points_ind[self.sample_points == -1] = len(self.sel_points)
self.sample_points_ind = np.asarray(sample_points_ind, dtype=int)
if not hasattr(self.ds.data, 'zeta'):
self.ds.load_data(['zeta'])
if variable_at_cells:
siglay = self.ds.grid.siglay_center[:, self.sel_points]
siglev = self.ds.grid.siglev_center[:, self.sel_points]
h = self.ds.grid.h_center[self.sel_points]
zeta = np.mean(self.ds.data.zeta[:, self.ds.grid.nv - 1], axis=1)[:, self.sel_points]
else:
siglay = self.ds.grid.siglay[:, self.sel_points]
siglev = self.ds.grid.siglev[:, self.sel_points]
h = self.ds.grid.h[self.sel_points]
zeta = self.ds.data.zeta[:, self.sel_points]
depth_sel = -unstructured_grid_depths(h, zeta, siglay, nan_invalid=True)
depth_sel_pcolor = -unstructured_grid_depths(h, zeta, siglev, nan_invalid=True)
depth_sel = self._nan_extend(depth_sel)
depth_sel_pcolor = self._nan_extend(depth_sel_pcolor)
# set up the x and y for the plots
self.cross_plot_x = np.tile(np.arange(0, len(self.sample_points)),
[depth_sel.shape[1], 1]) * dist_res + dist_res * 1/2
self.cross_plot_x_pcolor = np.tile(np.arange(0, len(self.sample_points) + 1),
[depth_sel_pcolor.shape[1], 1]) * dist_res
self.cross_plot_y = -depth_sel[:, :, self.sample_points_ind]
insert_ind = np.min(np.where(self.sample_points_ind != np.max(self.sample_points_ind))[0])
self.sample_points_ind_pcolor = np.insert(self.sample_points_ind, insert_ind, self.sample_points_ind[insert_ind])
self.cross_plot_y_pcolor = -depth_sel_pcolor[:, :, self.sample_points_ind_pcolor]
# pre process the channel variables
chan_y_raw = np.nanmin(self.cross_plot_y_pcolor, axis=1)[-1, :]
chan_x_raw = self.cross_plot_x_pcolor[-1, :]
max_zeta = np.ceil(np.max(zeta))
if np.any(np.isnan(chan_y_raw)):
chan_y_raw[np.min(np.where(~np.isnan(chan_y_raw)))] = max_zeta # bodge to get left bank adjacent
chan_y_raw[np.isnan(chan_y_raw)] = max_zeta
self.chan_x, self.chan_y = self._chan_corners(chan_x_raw, chan_y_raw)
# sort out wetting and drying nodes if requested
if wetting_and_drying:
if variable_at_cells:
self.ds.load_data(['wet_cells'])
self.wet_points_data = np.asarray(self.ds.data.wet_cells[:, self.sel_points], dtype=bool)
else:
self.ds.load_data(['wet_nodes'])
self.wet_points_data = np.asarray(self.ds.data.wet_nodes[:, self.sel_points], dtype=bool)
else:
self.wet_points_data = np.asarray(np.ones((self.ds.dims.time, len(self.sel_points))), dtype=bool)
self.ylim_vals = [np.floor(np.nanmin(self.cross_plot_y_pcolor)), np.ceil(np.nanmax(self.cross_plot_y_pcolor)) + 1]
self.xlim_vals = [np.nanmin(self.cross_plot_x_pcolor), np.nanmax(self.cross_plot_x_pcolor)]
def _save_existing_cross_section(self):
required_param = ['sample_points', 'sub_samp', 'sel_points', 'sample_points_ind', 'cross_plot_x', 'cross_plot_x_pcolor', 'cross_plot_y', 'cross_plot_y_pcolor',
'chan_x', 'chan_y','wet_points_data','ylim_vals', 'xlim_vals']
cross_sect_dict = {}
for this_param in required_param:
try:
cross_sect_dict[this_param] = getattr(self, this_param)
except:
print('Missing {}'.format(this_param))
return cross_sect_dict
def _apply_existing_cross_section(self,cross_sect_dict):
"""
For some multiproccesing applications its useful to precalculate the cross section (as in cross_section_init) then be able to apply it from a presaved dictionary.
Parameters
----------
cross_sect_dict : dict
Must have the parameters of the
"""
required_param = ['sample_points', 'sub_samp', 'sel_points', 'sample_points_ind', 'cross_plot_x', 'cross_plot_x_pcolor', 'cross_plot_y', 'cross_plot_y_pcolor',
'chan_x', 'chan_y','wet_points_data','ylim_vals', 'xlim_vals']
for this_param in required_param:
try:
setattr(self, this_param, cross_sect_dict[this_param])
except:
print('Missing {}'.format(this_param))
def plot_pcolor_field(self, var, timestep, plot_cbar=True):
"""
Finish me.
TODO: docstring!
Parameters
----------
var :
timestep :
"""
if isinstance(var, str):
plot_z = self._var_prep(var, timestep).T
else:
plot_z = var
plot_x = self.cross_plot_x_pcolor.T
plot_y = self.cross_plot_y_pcolor[timestep, :, :].T
if self.vmin is None:
self.vmin = np.nanmin(plot_z)
if self.vmax is None:
self.vmax = np.nanmax(plot_z)
for this_node in self.sel_points:
# choose_horiz = np.asarray(self.sample_points == this_node, dtype=bool)
choose_horiz = np.asarray(np.where(self.sample_points == this_node)[0], dtype=int)
choose_horiz_extend = np.asarray(np.append(choose_horiz, np.max(choose_horiz) + 1), dtype=int)
y_uniform = np.tile(np.median(plot_y[choose_horiz_extend, :], axis=0), [len(choose_horiz_extend), 1])
pc = self.axes.pcolormesh(plot_x[choose_horiz_extend, :],
y_uniform,
plot_z[choose_horiz, :],
cmap=self.cmap,
vmin=self.vmin,
vmax=self.vmax,
**self._plot_projection)
self.axes.plot(self.chan_x, self.chan_y, linewidth=2, color='black')
if plot_cbar:
self.figure.colorbar(pc)
self.axes.set_ylim(self.ylim_vals)
self.axes.set_xlim(self.xlim_vals)
def plot_quiver(self, timestep, u_str='u', v_str='v', w_str='ww', w_factor=1):
"""
Finish me.
TODO: docstring!
Parameters
----------
timestep :
u_str :
v_str :
w_str :
w_factor :
"""
raw_cross_u = self._var_prep(u_str, timestep)
raw_cross_v = self._var_prep(v_str, timestep)
raw_cross_w = self._var_prep(w_str, timestep)
cross_u, cross_v, cross_io = self._uvw_rectify(raw_cross_u, raw_cross_v, raw_cross_w)
plot_x = np.ma.masked_invalid(self.cross_plot_x).T
plot_y = np.ma.masked_invalid(self.cross_plot_y[timestep, :, :]).T
self.plot_pcolor_field(cross_io.T, timestep)
self.axes.quiver(plot_x, plot_y, cross_u.T, cross_v.T*w_factor, **self._plot_projection)
def _var_prep(self, var, timestep):
"""
Finish me.
TODO: docstring!
Parameters
----------
var :
timestep :
"""
if not hasattr(self.ds.data, var):
self.ds.load_data([var], dims={'time': [timestep]})
var_sel = np.squeeze(getattr(self.ds.data, var))[..., self.sel_points]
else:
time_sel = np.squeeze(getattr(self.ds.data, var)[timestep,...])
var_sel = np.squeeze(time_sel)[..., self.sel_points]
this_step_wet_points = np.asarray(self.wet_points_data[timestep, :], dtype=bool)
var_sel[:, ~this_step_wet_points] = np.NaN
self.var_sel = var_sel
var_sel_ext = self._nan_extend(var_sel)
cross_plot_z = var_sel_ext[:, self.sample_points_ind]
return np.ma.masked_invalid(cross_plot_z)
def _uvw_rectify(self, u_field, v_field, w_field):
"""
Finish me.
TODO: docstring!
Parameters
----------
u_field :
v_field :
w_field :
"""
cross_lr = np.empty(u_field.shape)
cross_io = np.empty(v_field.shape)
cross_ud = w_field
pll_vec = np.empty([len(self.sub_samp), 2])
for this_ind, (point_1, point_2) in enumerate(zip(self.sub_samp[0:-2], self.sub_samp[2:])):
# work out pll vectors
this_pll_vec = np.asarray([point_2[0] - point_1[0], point_2[1] - point_1[1]])
pll_vec[this_ind + 1, :] = this_pll_vec / np.sqrt(this_pll_vec[0]**2 + this_pll_vec[1]**2)
pll_vec[0] = pll_vec[1]
pll_vec[-1] = pll_vec[-2]
for this_ind, this_samp in enumerate(zip(u_field, v_field)):
# dot product for parallel component
cross_lr[this_ind, :] = np.asarray([np.dot(this_uv, this_pll) for this_uv, this_pll in zip(np.asarray(this_samp).T, pll_vec)])
# cross product for normal component
cross_io[this_ind, :] = np.asarray([np.cross(this_uv, this_pll) for this_uv, this_pll in zip(np.asarray(this_samp).T, pll_vec)])
return np.ma.masked_invalid(cross_lr), cross_ud, np.ma.masked_invalid(cross_io)
@staticmethod
def _nan_extend(in_array):
if np.ndim(in_array) == 3:
nan_ext = np.empty([in_array.shape[0], in_array.shape[1], 1])
elif np.ndim(in_array) == 2:
nan_ext = np.empty([in_array.shape[0], 1])
else:
raise ValueError('Unsupported number of dimensions.')
nan_ext[:] = np.NaN
return np.append(in_array, nan_ext, axis=len(in_array.shape) - 1)
@staticmethod
def _chan_corners(chan_x, chan_y):
new_chan_x = [chan_x[0]]
new_chan_y = [chan_y[0]]
for this_ind, this_y in enumerate(chan_y[1:]):
if this_y != chan_y[this_ind] and not np.isnan(this_y) and not np.isnan(chan_y[this_ind]):
new_chan_x.append(chan_x[this_ind])
new_chan_y.append(this_y)
new_chan_x.append(chan_x[this_ind + 1])
new_chan_y.append(this_y)
return np.asarray(new_chan_x), np.asarray(new_chan_y)
class MPIWorker(object):
""" Worker class for parallel plotting. """
def __init__(self, comm=None, root=0, verbose=False):
"""
Create a plotting worker object. MPIWorker.plot_* load and plot a subset in time of the results.
Parameters
----------
comm : mpi4py.MPI.Intracomm, optional
The MPI intracommunicator object. Omit if not running in parallel.
root : int, optional
Specify a given rank to act as the root process. This is only for outputting verbose messages (if enabled
with `verbose').
verbose : bool, optional
Set to True to enabled some verbose output messages. Defaults to False (no messages).
"""
self.dims = None
self.have_mpi = True
try:
from mpi4py import MPI
self.MPI = MPI
except ImportError:
warn('No mpi4py found in this python installation. Some functions will be disabled.')
self.have_mpi = False
self.comm = comm
if self.have_mpi:
self.rank = self.comm.Get_rank()
else:
self.rank = 0
self.root = root
self._noisy = verbose
self.field = None
self.label = None
self.clims = None
def __loader(self, fvcom_file, variable):
"""
Function to load and make meta-variables, if appropriate, which can then be plotted by `plot_*'.
Parameters
----------
fvcom_file : str, pathlib.Path
The file to load.
variable : str
The variable name to load from `fvcom_file'. This can be a meta-variable name. Currently configured are:
- 'speed'
- 'depth_averaged_speed'
- 'speed_anomaly'
- 'depth_averaged_speed_anomaly'
- 'direction'
- 'depth_averaged_direction'
Provides
--------
self.fvcom : PyFVCOM.read.FileReader
The FVCOM data ready for plotting.
"""
load_verbose = False
if self._noisy and self.rank == self.root:
load_verbose = True
print(f'Loading {variable} data from netCDF...', end=' ', flush=True)
load_vars = [variable]
if variable in ('speed', 'direction', 'speed_anomaly'):
load_vars = ['u', 'v']
elif variable in ('depth_averaged_speed', 'depth_averaged_direction', 'depth_averaged_speed_anomaly'):
load_vars = ['ua', 'va']
elif variable == 'tauc':
load_vars = [variable, 'temp', 'salinity']
self.fvcom = FileReader(fvcom_file, variables=load_vars, dims=self.dims, verbose=load_verbose)
try:
self.fvcom.load_data(['wet_cells'])
except NameError:
print('No wetting and drying in model')
# Make the meta-variable data.
if variable in ('speed', 'direction'):
self.fvcom.data.direction, self.fvcom.data.speed = vector2scalar(self.fvcom.data.u, self.fvcom.data.v)
# Add the attributes for labelling.
self.fvcom.atts.speed = PassiveStore()
self.fvcom.atts.speed.long_name = 'speed'
self.fvcom.atts.speed.units = 'ms^{-1}'
self.fvcom.atts.direction = PassiveStore()
self.fvcom.atts.direction.long_name = 'direction'
self.fvcom.atts.direction.units = '\degree'
self.fvcom.variable_dimension_names[variable] = self.fvcom.variable_dimension_names['u']
elif variable in ('depth_averaged_speed', 'depth_averaged_direction'):
da_dir, da_speed = vector2scalar(self.fvcom.data.ua, self.fvcom.data.va)
self.fvcom.data.depth_averaged_direction, self.fvcom.data.depth_averaged_speed = da_dir, da_speed
# Add the attributes for labelling.
self.fvcom.atts.depth_averaged_speed = PassiveStore()
self.fvcom.atts.depth_averaged_speed.long_name = 'depth-averaged speed'
self.fvcom.atts.depth_averaged_speed.units = 'ms^{-1}'
self.fvcom.atts.depth_averaged_direction = PassiveStore()
self.fvcom.atts.depth_averaged_direction.long_name = 'depth-averaged direction'
self.fvcom.atts.depth_averaged_direction.units = '\degree'
self.fvcom.variable_dimension_names[variable] = self.fvcom.variable_dimension_names['ua']
if variable == 'speed_anomaly':
self.fvcom.data.speed_anomaly = self.fvcom.data.speed.mean(axis=0) - self.fvcom.data.speed
self.fvcom.atts.speed = PassiveStore()
self.fvcom.atts.speed.long_name = 'speed anomaly'
self.fvcom.atts.speed.units = 'ms^{-1}'
self.fvcom.variable_dimension_names[variable] = self.fvcom.variable_dimension_names['u']
elif variable == 'depth_averaged_speed_anomaly':
self.fvcom.data.depth_averaged_speed_anomaly = self.fvcom.data.uava.mean(axis=0) - self.fvcom.data.uava
self.fvcom.atts.depth_averaged_speed_anomaly = PassiveStore()
self.fvcom.atts.depth_averaged_speed_anomaly.long_name = 'depth-averaged speed anomaly'
self.fvcom.atts.depth_averaged_speed_anomaly.units = 'ms^{-1}'
self.fvcom.variable_dimension_names[variable] = self.fvcom.variable_dimension_names['ua']
elif variable == 'tauc':
pressure = nodes2elems(depth2pressure(self.fvcom.data.h, self.fvcom.data.y),
self.fvcom.grid.triangles)
tempc = nodes2elems(self.fvcom.data.temp, self.fvcom.grid.triangles)
salinityc = nodes2elems(self.fvcom.data.temp, self.fvcom.grid.triangles)
rho = dens_jackett(tempc, salinityc, pressure[np.newaxis, :])
self.fvcom.data.tauc *= rho
self.fvcom.atts.tauc.units = 'Nm^{-2}'
self.fvcom.variable_dimension_names[variable] = self.fvcom.variable_dimension_names['tauc']
if self._noisy and self.rank == self.root:
print(f'done.', flush=True)
def _figure_prep(self, fvcom_file, variable, dimensions, time_indices, clims, label, **kwargs):
""" Initialise a bunch of things which can be shared across different plot types. """
# Should this loading stuff be outside this function?
self.dims = dimensions
if self.dims is None:
self.dims = {}
self.dims.update({'time': time_indices})
self.__loader(fvcom_file, variable)
self.field = np.squeeze(getattr(self.fvcom.data, variable))
# Find out what the range of data is so we can set the colour limits automatically, if necessary.
if self.clims is None:
if self.have_mpi:
global_min = self.comm.reduce(np.nanmin(self.field), op=self.MPI.MIN)
global_max = self.comm.reduce(np.nanmax(self.field), op=self.MPI.MAX)
else:
# Fall back to local extremes.
global_min = np.nanmin(self.field)
global_max = np.nanmax(self.field)
self.clims = [global_min, global_max]
if self.have_mpi:
self.clims = self.comm.bcast(clims, root=0)
if self.label is None:
try:
self.label = f'{getattr(self.fvcom.atts, variable).long_name.title()} ' \
f'(${getattr(self.fvcom.atts, variable).units}$)'
except:
pass
grid_mask = np.ones(self.field[0].shape[0], dtype=bool)
if 'extents' in kwargs:
# We need to find the nodes/elements for the current variable to make sure our colour bar extends for
# what we're plotting (not the entire data set).
if 'node' in self.fvcom.variable_dimension_names[variable]:
x = self.fvcom.grid.lon
y = self.fvcom.grid.lat
elif 'nele' in self.fvcom.variable_dimension_names[variable]:
x = self.fvcom.grid.lonc
y = self.fvcom.grid.latc
extents = kwargs['extents']
grid_mask = (x > extents[0]) & (x < extents[1]) & (y < extents[3]) & (y > extents[2])
self.extend = colorbar_extension(clims[0], clims[1],
self.field[..., grid_mask].min(), self.field[..., grid_mask].max())
def plot_field(self, fvcom_file, time_indices, variable, figures_directory, label=None, set_title=False,
dimensions=None, clims=None, norm=None, mask=False, figure_index=None, figure_stem=None,
*args, **kwargs):
"""
Plot a given horizontal surface for `variable' for the time indices in `time_indices'.
fvcom_file : str, pathlib.Path
The file to load.
time_indices : list-like
The time indices to load from the `fvcom_file'.
variable : str
The variable name to load from `fvcom_file'.
figures_directory : str, pathlib.Path
Where to save the figures. Figure files are named f'{variable}_{time_index + 1}.png'.
label : str, optional
What label to use for the colour bar. If omitted, uses the variable's `long_name' and `units'.
set_title : bool, optional
Add a title comprised of each time (formatted as '%Y-%m-%d %H:%M:%S').
dimensions : str, optional
What additional dimensions to load (time is handled by the `time_indices' argument).
clims : tuple, list, optional
Limit the colour range to these values.
norm : matplotlib.colors.Normalize, optional
Apply the normalisation given to the colours in the plot.
mask : bool
Set to True to enable masking with the FVCOM wet/dry data.
figure_index : int
Give a starting index for the figure names. This is useful if you're calling this function in a loop over
multiple files.
figure_stem : str
Give a file name prefix for the saved figures. Defaults to f'{variable}_streamline'.
Additional args and kwargs are passed to PyFVCOM.plot.Plotter.
"""
self.label = label
self._figure_prep(fvcom_file, variable, dimensions, time_indices, clims, label, **kwargs)
if self._noisy and self.rank == self.root:
list2print = kwargs
print(f'Creating Plotter object with kwargs. {list2print}', flush=True)
local_plot = Plotter(self.fvcom, cb_label=self.label, *args, **kwargs)
if norm is not None:
# Check for zero and negative values if we're LogNorm'ing the data and replace with the colour limit
# minimum.
invalid = self.field <= 0
if np.any(invalid):
if self.clims is None or self.clims[0] <= 0:
raise ValueError("For log-scaling data with zero or negative values, we need a floor with which "
"to replace those values. This is provided through the `clims' argument, "
"which hasn't been supplied, or which has a zero (or below) minimum.")
self.field[invalid] = self.clims[0]
if figure_index is None:
figure_index = 0
for local_time, global_time in enumerate(time_indices):
if mask:
local_mask = getattr(self.fvcom.data, 'wet_cells')[local_time] == 0
else:
local_mask = np.zeros(self.fvcom.dims.nele, dtype=bool)
local_plot.plot_field(self.field[local_time], mask=local_mask, variable_name = variable)
local_plot.tripcolor_plot.set_clim(*clims)
if set_title:
title_string = self.fvcom.time.datetime[local_time].strftime('%Y-%m-%d %H:%M:%S')
local_plot.set_title(title_string)
if figure_stem is None:
figure_stem = f'{variable}_streamline'
local_plot.figure.savefig(str(Path(figures_directory, f'{figure_stem}_{figure_index + global_time + 1:04d}.png')),
bbox_inches='tight',
pad_inches=0.2,
dpi=120)
def plot_streamlines(self, fvcom_file, time_indices, variable, figures_directory, dx=None, dy=None, label=None,
set_title=False, dimensions=None, clims=None, mask=False, figure_index=None, figure_stem=None,
stkwargs=None, mask_land=True, *args, **kwargs):
"""
Plot a given horizontal surface for `variable' for the time indices in `time_indices'.
fvcom_file : str, pathlib.Path
The file to load.
time_indices : list-like
The time indices to load from the `fvcom_file'.
variable : str
The variable name to load from `fvcom_file'.
figures_directory : str, pathlib.Path
Where to save the figures. Figure files are named f'{variable}_streamlines_{time_index + 1}.png'.
dx, dy : float, optional
If given, the streamlines will be plotted on a regular grid at intervals of `dx' and `dy'. If `dy' is
omitted, it is assumed to be the same as `dx'.
label : str, optional
What label to use for the colour bar. If omitted, uses the variable's `long_name' and `units'.
set_title : bool, optional
Add a title comprised of each time (formatted as '%Y-%m-%d %H:%M:%S').
dimensions : str, optional
What additional dimensions to load (time is handled by the `time_indices' argument).
clims : tuple, list, optional
Limit the colour range to these values.
mask : bool, optional
Set to True to enable masking with the FVCOM wet/dry data.
figure_index : int, optional
Give a starting index for the figure names. This is useful if you're calling this function in a loop over
multiple files.
figure_stem : str, optional
Give a file name prefix for the saved figures. Defaults to f'{variable}_streamline'.
mask_land : bool, optional
Set to False to disable the (slow) masking of regular locations outside the model domain. Defaults to True.
stkwargs : dict, optional
Additional streamplot keyword arguments to pass.
Additional args and kwargs are passed to PyFVCOM.plot.Plotter.
"""
if stkwargs is None:
stkwargs = {}
if dx is not None and dy is None:
dy = dx
self._figure_prep(fvcom_file, variable, dimensions, time_indices, clims, label, **kwargs)
local_plot = Plotter(self.fvcom, cb_label=self.label, *args, **kwargs)
# Get the vector field of interest based on the variable name.
if 'depth_averaged' in variable:
u, v = self.fvcom.data.ua, self.fvcom.data.va
else:
u, v = np.squeeze(self.fvcom.data.u), np.squeeze(self.fvcom.data.v)
if figure_index is None:
figure_index = 0
for local_time, global_time in enumerate(time_indices):
if mask:
local_mask = getattr(self.fvcom.data, 'wet_cells')[local_time] == 0
else:
local_mask = np.full(self.fvcom.dims.nele, False)
u_local = np.ma.masked_array(u[local_time], mask=local_mask)
v_local = np.ma.masked_array(v[local_time], mask=local_mask)
magnitude = np.ma.masked_array(self.field[local_time], mask=local_mask)
try:
local_plot.plot_streamlines(u_local, v_local, color=magnitude, dx=dx, dy=dy, mask_land=mask_land,
**stkwargs)
except ValueError:
# The plot failed (sometimes due to teeny tiny velocities. Save what we've got anyway.
pass
# If we got all zeros for the streamline plot, the associated object will be none, so check that here and
# only update colours if we definitely plotted something.
if local_plot.streamline_plot is not None:
# The lines are a LineCollection and we can update the colour limits in one shot. The arrows need
# iterating.
local_plot.streamline_plot.lines.set_clim(*clims)
if set_title:
title_string = self.fvcom.time.datetime[local_time].strftime('%Y-%m-%d %H:%M:%S')
local_plot.set_title(title_string)
if figure_stem is None:
figure_stem = f'{variable}_streamline'
local_plot.figure.savefig(str(Path(figures_directory, f'{figure_stem}_{figure_index + global_time + 1:04d}.png')),
bbox_inches='tight',
pad_inches=0.2,
dpi=120)
class Player(FuncAnimation):
""" Animation class for FVCOM outputs. Shamelessly lifted from https://stackoverflow.com/a/46327978 """
def __init__(self, fig, func, init_func=None, fargs=None, save_count=None, mini=0, maxi=100, pos=(0.125, 0.92),
**kwargs):
"""
Initialise an animation window.
Parameters
----------
fig : matplotlib.figure.Figure
The figure into which we should animate.
func : function
The function describing the animation.
init_func : function, optional
An initial function for the first frame.
fargs : tuple or None, optional
Additional arguments to pass to each call to ``func``
save_count : int, optional
The number of values from `frames` to cache.
mini : int, optional
The start index for the animation (defaults to zero).
maxi : int, optional
The maximum index for the animation (defaults to 100).
pos : tuple
The (x, y) position of the player controls. Defaults to near the top of the figure.
Additional kwargs are passed to `matplotlib.animation.FuncAnimation'.
"""
self.i = 0
self.min = mini
self.max = maxi
self.runs = True
self.forwards = True
self.fig = fig
self.func = func
self.setup(pos)
super().__init__(self.fig, self.func, frames=self.play(), init_func=init_func, fargs=fargs,
save_count=save_count, **kwargs)
def play(self, *dummy):
""" What to do when we play the animation. """
while self.runs:
self.i = self.i + self.forwards - (not self.forwards)
if self.min < self.i < self.max:
yield self.i
else:
self.stop()
yield self.i
def start(self, *dummy):
""" Start the animation. """
self.runs = True
self.event_source.start()
def stop(self, *dummy):
""" Stop the animation. """
self.runs = False
self.event_source.stop()
def forward(self, *dummy):
""" Play forwards. """
self.forwards = True
self.start()
def backward(self, *dummy):
""" Play backwards. """
self.forwards = False
self.start()
def oneforward(self, *dummy):
""" Skip one forwards. """
self.forwards = True
self.onestep()
def onebackward(self, *dummy):
""" Skip one backwards. """
self.forwards = False
self.onestep()
def onestep(self, *dummy):
""" Skip through one frame at a time. """
if self.min < self.i < self.max:
self.i = self.i + self.forwards - (not self.forwards)
elif self.i == self.min and self.forwards:
self.i += 1
elif self.i == self.max and not self.forwards:
self.i -= 1
self.func(self.i)
self.slider.set_val(self.i)
self.fig.canvas.draw_idle()
def setup(self, pos):
""" Set up the animation. """
playerax = self.fig.add_axes([pos[0], pos[1], 0.64, 0.04])
divider = mpl_toolkits.axes_grid1.make_axes_locatable(playerax)
bax = divider.append_axes("right", size="80%", pad=0.05)
sax = divider.append_axes("right", size="80%", pad=0.05)
fax = divider.append_axes("right", size="80%", pad=0.05)
ofax = divider.append_axes("right", size="100%", pad=0.05)
sliderax = divider.append_axes("right", size="500%", pad=0.07)
self.button_oneback = matplotlib.widgets.Button(playerax, label='$\u29CF$')
self.button_back = matplotlib.widgets.Button(bax, label='$\u25C0$')
self.button_stop = matplotlib.widgets.Button(sax, label='$\u25A0$')
self.button_forward = matplotlib.widgets.Button(fax, label='$\u25B6$')
self.button_oneforward = matplotlib.widgets.Button(ofax, label='$\u29D0$')
self.button_oneback.on_clicked(self.onebackward)
self.button_back.on_clicked(self.backward)
self.button_stop.on_clicked(self.stop)
self.button_forward.on_clicked(self.forward)
self.button_oneforward.on_clicked(self.oneforward)
self.slider = matplotlib.widgets.Slider(sliderax, '', self.min, self.max, valinit=self.i)
self.slider.on_changed(self.set_pos)
def set_pos(self, i):
""" Set the slider position. """
self.i = int(self.slider.val)
self.func(self.i)
def update(self, i):
""" Update the slider to the given position. """
self.slider.set_val(i)
def plot_domain(domain, mesh=False, depth=False, **kwargs):
"""
Add a domain plot to the given domain (as domain.domain_plot).
Parameters
----------
mesh : bool
Set to True to overlay the model mesh. Defaults to False.
depth : bool
Set to True to plot water depth. Defaults to False. If enabled, a colour bar is added to the figure.
Remaining keyword arguments are passed to PyFVCOM.plot.Plotter.
Provides
--------
domain_plot : PyFVCOM.plot.Plotter
The plot object.
mesh_plot : matplotlib.axes, optional
The mesh axis object, if enabled.
"""
domain.domain_plot = Plotter(domain, **kwargs)
if mesh:
mesh_plot = domain.domain_plot.axes.triplot(domain.domain_plot.mx, domain.domain_plot.my,
domain.grid.triangles, 'k-',
linewidth=1, zorder=2000, **domain.domain_plot._plot_projection)
domain.domain_plot.mesh_plot = mesh_plot
if depth:
# Make depths negative down.
if np.all(domain.grid.h < 0):
domain.domain_plot.plot_field(domain.grid.h)
else:
domain.domain_plot.plot_field(-domain.grid.h)
def colorbar_extension(colour_min, colour_max, data_min, data_max):
"""
For the range specified by `colour_min' to `colour_max', return whether the data range specified by `data_min'
and `data_max' is inside, outside or partially overlapping. This allows you to automatically set the `extend'
keyword on a `matplotlib.pyplot.colorbar' call.
Parameters
----------
colour_min, colour_max : float
Minimum and maximum value of the current colour bar limits.
data_min, data_max : float
Minimum and maximum value of the data limits.
Returns
-------
extension : str
Will be 'neither', 'both', 'min, or 'max' for the case when the colour_min and colour_max values are: equal
to the data; inside the data range; only larger or only smaller, respectively.
"""
if data_min < colour_min and data_max > colour_max:
extension = 'both'
elif data_min < colour_min and data_max <= colour_max:
extension = 'min'
elif data_min >= colour_min and data_max > colour_max:
extension = 'max'
else:
extension = 'neither'
return extension
def cm2inch(value):
"""
Convert centimetres to inches.
:param value:
:return:
"""
return value / 2.54
def colourmap(variable):
""" Use a predefined colour map for a given variable.
Leverages the cmocean package for perceptually uniform colour maps.
Parameters
----------
variable : str, iterable
For the given variable name(s), return the appropriate colour palette from the cmocean/matplotlib colour maps.
If the variable is not in the pre-defined variables here, the returned values will be `viridis`.
Returns
-------
colourmaps : matplotlib.colours.cmap, dict
The colour map(s) for the variable(s) given.
"""
default_cmap = mplcm.get_cmap('viridis')
cmaps = {'q2': cm.dense,
'l': cm.dense,
'q2l': cm.dense,
'tke': cm.dense,
'viscofh': cm.dense,
'kh': cm.dense,
'nuh': cm.dense,
'teps': cm.dense,
'tauc': cm.dense,
'temp': cm.thermal,
'sst': cm.thermal,
'salinity': cm.haline,
'zeta': cm.balance,
'ww': cm.balance,
'omega': cm.balance,
'uv': cm.speed,
'uava': cm.speed,
'speed': cm.speed,
'u': cm.delta,
'v': cm.delta,
'ua': cm.delta,
'va': cm.delta,
'uvanomaly': cm.delta,
'direction': cm.phase,
'uvdir': cm.phase,
'h_morpho': cm.deep,
'h': cm.deep,
'h_r': cm.deep_r,
'bathymetry': cm.deep,
'bathymetry_r': cm.deep_r,
'taub_total': cm.thermal,
'mud_1': cm.turbid,
'mud_2': cm.turbid,
'sand_1': cm.turbid,
'sand_2': cm.turbid,
'todal_ssc': cm.turbid,
'total_ssc': cm.turbid,
'mud_1_bedfrac': cm.dense,
'mud_2_bedfrac': cm.dense,
'sand_1_bedfrac': cm.dense,
'sand_2_bedfrac': cm.dense,
'mud_1_bedload': cm.dense,
'mud_2_bedload': cm.dense,
'sand_1_bedload': cm.dense,
'sand_2_bedload': cm.dense,
'bed_thick': cm.deep,
'bed_age': cm.tempo,
'bed_por': cm.turbid,
'bed_diff': cm.haline,
'bed_btcr': cm.thermal,
'bot_sd50': cm.turbid,
'bot_dens': cm.thermal,
'bot_wsed': cm.turbid,
'bot_nthck': cm.matter,
'bot_lthck': cm.matter,
'bot_dthck': cm.matter,
'bot_morph': cm.deep,
'bot_tauc': cm.thermal,
'bot_rlen': cm.dense,
'bot_rhgt': cm.dense,
'bot_bwav': cm.turbid,
'bot_zdef': cm.dense,
'bot_zapp': cm.dense,
'bot_zNik': cm.dense,
'bot_zbio': cm.dense,
'bot_zbfm': cm.dense,
'bot_zbld': cm.dense,
'bot_zwbl': cm.dense,
'bot_actv': cm.deep,
'bot_shgt': cm.deep_r,
'bot_maxD': cm.deep,
'bot_dnet': cm.matter,
'bot_doff': cm.thermal,
'bot_dslp': cm.amp,
'bot_dtim': cm.haline,
'bot_dbmx': cm.dense,
'bot_dbmm': cm.dense,
'bot_dbzs': cm.dense,
'bot_dbzm': cm.dense,
'bot_dbzp': cm.dense,
'wet_nodes': cm.amp,
'tracer1_c': cm.dense,
'DYE': cm.dense}
if isinstance(variable, collections.Iterable) and not isinstance(variable, str):
colourmaps = []
for var in variable:
if var in cmaps:
colourmaps.append(cmaps[var])
else:
colourmaps.append(default_cmap)
# If we got a list of a single value, return the value rather than a list.
if len(colourmaps) == 1:
colourmaps = colourmaps[0]
else:
if variable in cmaps:
colourmaps = cmaps[variable]
else:
colourmaps = default_cmap
return colourmaps
|
# from lib.core.mscgnet import *
# score 0.547, no TTA
import datetime
import logging
import os
import numpy as np
import torch
from PIL import Image
# from core.net import *
from core.net import get_model
from utils.data.augmentation import img_load
from utils.data.preprocess import IDS, GT, IMG
from utils import PROJECT_ROOT
#####################################
# Setup Logging
# TODO: need to eventually cut after refacotirzation
#####################################
from utils.gpu import get_available_gpus
model_name = "ensemble"
output_path = PROJECT_ROOT / "submission" / "results_checkpoint1_checkpoint2_tta"
logging.basicConfig(level=logging.DEBUG)
logFormatter = logging.Formatter(
"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s"
)
rootLogger = logging.getLogger()
log_path = PROJECT_ROOT / "logs/{0}/{1}.log".format(
f"/{model_name}", f"{model_name}-{datetime.datetime.now():%d-%b-%y-%H:%M:%S}"
)
log_dir = PROJECT_ROOT / f"logs/{model_name}"
if os.path.exists(log_dir):
print("Saving log files to:", log_dir)
else:
print("Creating log directory:", log_dir)
os.mkdir(log_dir)
fileHandler = logging.FileHandler(log_path)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
checkpoint1 = {
"core": "MSCG-Rx50",
"data": "Agriculture",
"bands": ["NIR", "RGB"],
"num_classes": 10,
"nodes": (32, 32),
"snapshot": "checkpoints/adam/MSCG-Rx50/Agriculture_NIR-RGB_kf-0-0-reproduce_ACW_loss2_adax/MSCG-Rx50-epoch_6_loss_1.09903_acc_0.77739_acc-cls_0.53071_mean-iu_0.39789_fwavacc_0.64861_f1_0.53678_lr_0.0000845121.pth",
}
# score 0.550 , no TTA
checkpoint2 = {
"core": "MSCG-Rx101",
"data": "Agriculture",
"bands": ["NIR", "RGB"],
"num_classes": 10,
"nodes": (32, 32),
"snapshot": "checkpoints/adam/MSCG-Rx101/Agriculture_NIR-RGB_kf-0-0-reproduce/MSCG-Rx101-epoch_4_loss_1.26896_acc_0.77713_acc-cls_0.54260_mean-iu_0.40996_fwavacc_0.64399_f1_0.55334_lr_0.0001245001.pth",
}
checkpoint3 = {
"core": "MSCG-Rx101",
"data": "Agriculture",
"bands": ["NIR", "RGB"],
"num_classes": 10,
"nodes": (32, 32),
"snapshot": "checkpoints/epoch_15_loss_0.88412_acc_0.88690_acc-cls_0.78581_"
"mean-iu_0.68205_fwavacc_0.80197_f1_0.80401_lr_0.0001075701.pth",
}
# checkpoint1 + checkpoint2, test score 0.599,
# checkpoint1 + checkpoint2 + checkpoint3, test score 0.608
def get_net(checkpoint_path: str, checkpoint=checkpoint1, use_gpu: bool = True):
"""Function for loading an MSCG-Net from a .pth checkpoint file path
:param checkpoint: Dictionary containing model meta-data for loading from a .pth
:return:
"""
net = get_model(
name=checkpoint["core"],
classes=checkpoint["num_classes"],
node_size=checkpoint["nodes"],
)
print(checkpoint["num_classes"],
checkpoint["nodes"])
checkpoint_path = PROJECT_ROOT / (checkpoint["snapshot"].replace("../", ""))
logging.debug("Loading from {}".format(str(checkpoint_path)))
print(get_available_gpus(6, "gb"))
available_gpus = get_available_gpus(6, "gb")
# if len(available_gpus) == 0:
net.load_state_dict(torch.load(str(checkpoint_path), map_location=torch.device("cpu")))
# else:
# net.load_state_dict(torch.load(checkpoint_path, map_location=torch.device(available_gpus[0])))
if use_gpu:
net.cuda()
else:
net.cpu()
net.eval()
return net
def load_test_img(test_files):
"""Load the set of test images
TODO: what is data struct of test_files?
:param test_files:
:return:
"""
id_dict = test_files[IDS]
image_files = test_files[IMG]
# mask_files = test_files[GT]
for key in id_dict.keys():
for id in id_dict[key]:
if len(image_files) > 1:
imgs = []
for i in range(len(image_files)):
filename = image_files[i].format(id)
path, _ = os.path.split(filename)
if path[-3:] == "nir":
# img = imload(filename, gray=True)
img = np.asarray(Image.open(filename), dtype="uint8")
img = np.expand_dims(img, 2)
imgs.append(img)
else:
img = img_load(filename)
imgs.append(img)
image = np.concatenate(imgs, 2)
else:
filename = image_files[0].format(id)
path, _ = os.path.split(filename)
if path[-3:] == "nir":
# image = imload(filename, gray=True)
image = np.asarray(Image.open(filename), dtype="uint8")
image = np.expand_dims(image, 2)
else:
image = img_load(filename)
# label = np.asarray(Image.open(mask_files.format(id)), dtype='uint8')
yield image
def load_ids(test_files):
"""Generator function for loading the test set of images from disk
:param test_files:
:return:
"""
id_dict = test_files[IDS]
for key in id_dict.keys():
for id in id_dict[key]:
yield id
def load_gt(test_files):
"""Generator function for loading the test set of ground-truth files from disk
TODO: what is the type/datastruct of `test_files`?
:param test_files:
:return:
"""
id_dict = test_files[IDS]
mask_files = test_files[GT]
for key in id_dict.keys():
for id in id_dict[key]:
label = np.asarray(Image.open(mask_files.format(id)), dtype="uint8")
yield label
|
import discord
from discord.ext import commands
import asyncio
from asyncio import sleep
class Embeds(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def id(self, ctx, member : discord.Member):
embed = discord.Embed(title = member.name, description = member.mention, color = discord.Color.green())
embed.add_field(name = 'ID', value = member.id, inline = True)
embed.set_thumbnail(url = member.avatar_url)
embed.set_footer(icon_url = ctx.author.avatar_url,text=f"Requested by {ctx.author.name}")
await ctx.send(embed = embed)
@commands.command()
async def info(self, ctx, member : discord.Member = None):
member = ctx.author if not member else member
roles = [role for role in member.roles]
embed = discord.Embed(colour=member.color, timestamp=ctx.message.created_at)
embed.set_author(name = f'User Info - {member}')
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text=f'Requested by {ctx.author}', icon_url=ctx.author.avatar_url)
embed.add_field(name='ID', value=member.id)
embed.add_field(name="Nickname:", value=member.display_name)
embed.add_field(name='Creation Date:', value=member.created_at.strftime("%a, %#d %B %Y, %I : %M %p UTC"))
embed.add_field(name='Joined server:', value=member.joined_at.strftime("%a, %#d %B %Y, %I : %M %p UTC"))
embed.add_field(name=f"Roles ({len(roles)})", value=" ".join([role.mention for role in roles]))
embed.add_field(name="Top role:", value = member.top_role.mention)
embed.add_field(name='Bot?', value = member.bot)
await ctx.send(embed=embed)
'''@commands.command()
async def testing(self, ctx):
message1 = discord.Embed(title='OOGA')
message2 = discord.Embed(title='BOOGA')
message3 = discord.Embed(title='NIGGA')
message4 = discord.Embed(title='NIGGAx69')
msg = await ctx.send(embed=message1)
await msg.add_reaction("❌")
await msg.add_reaction("➡️")
await asyncio.sleep(4)
await msg.edit(embed=message2)
await msg.add_reaction("❌")
await msg.add_reaction("⬅️")
await msg.add_reaction("➡️")
await asyncio.sleep(4)
await msg.edit(embed=message3)
await msg.add_reaction("❌")
await msg.add_reaction("⬅️")
await msg.add_reaction("➡️")
await asyncio.sleep(4)
await msg.edit(embed=message3)
await msg.add_reaction("❌")
await msg.add_reaction("⬅️")
await asyncio.sleep(4)
await msg.edit(embed=message4)'''
@commands.command()
async def test(self,ctx):
message1 = discord.Embed(title='OOGA')
msg = await ctx.send(embed=message1)
message2 = discord.Embed(title='BOOGA')
await msg.add_reaction("❌")
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) == '❌'
try:
reaction, user = await self.client.wait_for('reaction_add', timeout=60.0, check=check)
except asyncio.TimeoutError:
await ctx.send("Fail")
else:
await msg.edit(embed=message2)
def setup(client):
client.add_cog(Embeds(client)) |
from rest_framework import serializers
from .models import CoronaCaseRaw
class CoronaCaseRawSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = CoronaCaseRaw
fields = [
'case_type',
'name',
'description',
'latitude',
'longitude',
] |
import numpy as np
import math
import matplotlib.pyplot as plt
import time
from control_utilities.chrono_utilities import calcAngle
class MatplotlibWrapper:
def __init__(self, step_size, vehicle, opponents=None, obstacles=None, render_step_size=1.0/60):
self.step_size = step_size
self.time = 0
# Time interval between two render frames
self.render_step_size = render_step_size # FPS = 60
self.render_steps = int(math.ceil(self.render_step_size / self.step_size))
self.step_number = 0
self.vehicle = vehicle
self.vehicles = dict()
self.vehicles[self.vehicle] = (0)
self.opponents = opponents
if self.opponents != None:
for opponent in self.opponents:
self.vehicles[opponent.vehicle] = (0)
self.obstacles = obstacles
if self.obstacles != None:
self.obstacle_outlines = dict()
for _, obstacle in self.obstacles.items():
self.obstacle_outlines[obstacle.num] = (0)
def close(self):
plt.close()
def plotTrack(self, track):
# track.center.plot(color='-r', show=False)
track.left.plot(color='-k', show=False)
track.right.plot(color='-k', show=False)
def plotOpponents(self):
for opponent in self.opponents:
self.plotVehicle(opponent.vehicle, cabcolor="-b", wheelcolor="-k")
def Advance(self, step, save=False):
if self.step_number % self.render_steps == 0:
self.plotVehicle(self.vehicle)
self.plotText()
if self.opponents != None:
self.plotOpponents()
if self.obstacles != None:
self.plotObstacles()
plt.pause(1e-9)
if save:
file_name = "fig{}.png".format(int(self.step_number/5))
print("Saving to {}".format(file_name))
plt.savefig(file_name, dpi=300, quality=80, optimize=True, progressive=True, format="jpg")
if len(plt.get_fignums()) == 0:
return False
self.step_number += 1
self.time += step
return True
def plotText(self):
str = 'Time :: {0:0.1f}\nThrottle :: {1:0.2f}\nSteering :: {2:0.2f}\nBraking :: {3:0.2f}\nSpeed :: {4:0.2f}'.format(
self.time, self.vehicle.driver.GetThrottle(), self.vehicle.driver.GetSteering(), self.vehicle.driver.GetBraking(), self.vehicle.vehicle.GetVehicleSpeed())
if not hasattr(self, 'annotation'):
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
self.annotation = plt.annotate(str, xy=(.97, .7), xytext=(0, 10), xycoords=('axes fraction', 'figure fraction'), textcoords='offset points', size=10, ha='right', va='bottom',bbox=bbox_props)
else:
self.annotation.set_text(str)
# Now let's add your additional information
def plotVehicle(self, vehicle, cabcolor="-k", wheelcolor="-k"): # pragma: no cover
state = vehicle.GetState()
outline = np.array([[-vehicle.backtowheel, (vehicle.length - vehicle.backtowheel), (vehicle.length - vehicle.backtowheel), -vehicle.backtowheel, -vehicle.backtowheel],
[vehicle.width / 2, vehicle.width / 2, - vehicle.width / 2, -vehicle.width / 2, vehicle.width / 2]])
fr_wheel = np.array([[vehicle.wheel_len, -vehicle.wheel_len, -vehicle.wheel_len, vehicle.wheel_len, vehicle.wheel_len],
[-vehicle.wheel_width - vehicle.tread, -vehicle.wheel_width - vehicle.tread, vehicle.wheel_width - vehicle.tread, vehicle.wheel_width - vehicle.tread, -vehicle.wheel_width - vehicle.tread]])
rr_wheel = np.copy(fr_wheel)
fl_wheel = np.copy(fr_wheel)
fl_wheel[1, :] *= -1
rl_wheel = np.copy(rr_wheel)
rl_wheel[1, :] *= -1
Rot1 = np.array([[math.cos(state.yaw), math.sin(state.yaw)],
[-math.sin(state.yaw), math.cos(state.yaw)]])
Rot2 = np.array([[math.cos(vehicle.driver.GetSteering()), math.sin(vehicle.driver.GetSteering())],
[-math.sin(vehicle.driver.GetSteering()), math.cos(vehicle.driver.GetSteering())]])
fr_wheel = (fr_wheel.T.dot(Rot2)).T
fl_wheel = (fl_wheel.T.dot(Rot2)).T
fr_wheel[0, :] += vehicle.wb
fl_wheel[0, :] += vehicle.wb
fr_wheel = (fr_wheel.T.dot(Rot1)).T
fl_wheel = (fl_wheel.T.dot(Rot1)).T
outline = (outline.T.dot(Rot1)).T
rr_wheel = (rr_wheel.T.dot(Rot1)).T
rl_wheel = (rl_wheel.T.dot(Rot1)).T
offset = np.array(vehicle.offset)
offset = (offset.T.dot(Rot1)).T
outline[0, :] += offset[0] + state.x
outline[1, :] += offset[1] + state.y
fr_wheel[0, :] += offset[0] + state.x
fr_wheel[1, :] += offset[1] + state.y
rr_wheel[0, :] += offset[0] + state.x
rr_wheel[1, :] += offset[1] + state.y
fl_wheel[0, :] += offset[0] + state.x
fl_wheel[1, :] += offset[1] + state.y
rl_wheel[0, :] += offset[0] + state.x
rl_wheel[1, :] += offset[1] + state.y
if self.vehicles[vehicle] == 0:
cab, = plt.plot(np.array(outline[0, :]).flatten(),np.array(outline[1, :]).flatten(), cabcolor)
fr, = plt.plot(np.array(fr_wheel[0, :]).flatten(), np.array(fr_wheel[1, :]).flatten(), wheelcolor)
rr, = plt.plot(np.array(rr_wheel[0, :]).flatten(), np.array(rr_wheel[1, :]).flatten(), wheelcolor)
fl, = plt.plot(np.array(fl_wheel[0, :]).flatten(), np.array(fl_wheel[1, :]).flatten(), wheelcolor)
rl, = plt.plot(np.array(rl_wheel[0, :]).flatten(), np.array(rl_wheel[1, :]).flatten(), wheelcolor)
self.vehicles[vehicle] = (cab,fr,rr,fl,rl)
else:
(cab, fr, rr, fl, rl) = self.vehicles[vehicle]
cab.set_ydata(np.array(outline[1, :]).flatten())
cab.set_xdata(np.array(outline[0, :]).flatten())
fr.set_ydata(np.array(fr_wheel[1, :]).flatten())
fr.set_xdata(np.array(fr_wheel[0, :]).flatten())
rr.set_ydata(np.array(rr_wheel[1, :]).flatten())
rr.set_xdata(np.array(rr_wheel[0, :]).flatten())
fl.set_ydata(np.array(fl_wheel[1, :]).flatten())
fl.set_xdata(np.array(fl_wheel[0, :]).flatten())
rl.set_ydata(np.array(rl_wheel[1, :]).flatten())
rl.set_xdata(np.array(rl_wheel[0, :]).flatten())
def plotObstacles(self, color="-k"):
num = 0
for i in self.obstacles.keys():
o = self.obstacles[i]
outline = np.array([[-o.length, o.length, o.length, -o.length, -o.length], [o.width / 2, o.width / 2, - o.width / 2, -o.width / 2, o.width / 2]])
ang = calcAngle(o.p1, o.p2)
Rot1 = np.array([[math.cos(ang), math.sin(ang)], [-math.sin(ang), math.cos(ang)]])
outline = (outline.T.dot(Rot1)).T
outline[0, :] += o.p1.x
outline[1, :] += o.p1.y
if self.obstacle_outlines[o.num] == 0:
outline, = plt.plot(np.array(outline[0, :]).flatten(), np.array(outline[1, :]).flatten(), color)
self.obstacle_outlines[o.num] = (outline)
else:
border = self.obstacle_outlines[o.num]
border.set_ydata(np.array(outline[1, :]).flatten())
border.set_xdata(np.array(outline[0, :]).flatten())
|
#!/bin/python3
"""dbreversepivot takes an input file with time/value columns, and
pivots the table into a narrow table with one line per old column.
For example, if the input was this:
#fsdb -F s time foo bar
1 10 0
2 30 20
3 0 40
It would convert this to:
#fsdb -F s time key value
1 foo 10
2 bar 20
2 foo 30
3 bar 40
This is the inverse operation of dbfullpivot.
"""
import sys
import argparse
import pyfsdb
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=__doc__)
parser.add_argument("-k", "--key-column", default="key", type=str,
help="The key column to use in the output for column names to store in")
parser.add_argument("-c", "--columns", nargs="+", type=str,
help="The columns to pivot into keys")
parser.add_argument("-v", "--value-column", default="value", type=str,
help="What output column to store the value for what was found in the columns")
parser.add_argument("-o", "--other-columns", default=[], type=str, nargs="*",
help="Other columns to copy to every row")
parser.add_argument("input_file", type=argparse.FileType('r'),
nargs='?', default=sys.stdin,
help="The input FSDB file to read")
parser.add_argument("output_file", type=argparse.FileType('w'),
nargs='?', default=sys.stdout,
help="The output FSDB file to write to")
args = parser.parse_args()
return args
def main():
args = parse_args()
# set up storage structures
storage = {}
columns = {}
# from the input, get extract column numbers/names
key_column = args.key_column
value_column = args.value_column
other_columns = args.other_columns
columns = args.columns
# open the input file stream
input = pyfsdb.Fsdb(file_handle = args.input_file,
return_type=pyfsdb.RETURN_AS_DICTIONARY)
output = pyfsdb.Fsdb(out_file_handle = args.output_file)
output.out_column_names = [key_column, value_column] + other_columns
# for each row, remember each value based on time and key
for row in input:
for column in columns:
out_row = [column, row[column]]
for other in other_columns:
out_row.append(row[other])
output.append(out_row)
output.close()
if __name__ == "__main__":
main()
|
class Card:
"""Card class for representing and manipulating one playing card"""
def __init__(self, rank, suit):
"""A constructor method that sets the suit and rank"""
self.suit = suit
self.rank = rank
def __str__(self):
"""Overrides the information used by the print function"""
return "The " + self.rank + " of " + self.suit
def get_suit(self):
"""A reader method that returns the suit of the card"""
return self.suit
def get_rank(self):
"""A reader method that returns the rank of the card"""
return self.rank
# -----------------------------------------------------
class Deck:
"""Deck class for representing and manipulating 52 instances of Card"""
def __init__(self):
"""A constructor method that creates a 52 card deck"""
self.cards = []
for rank in ["two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "jack", "queen", "king", "ace"]:
for suit in ["clubs", "diamonds", "hearts", "spades"]:
self.cards += [Card(rank,suit)]
def print_deck(self):
"""A method that prints the 52 card deck"""
print("Deck of Cards")
print("-------------")
number = 1
for card in self.cards:
print(number, card)
number += 1
print()
# -----------------------------------------------------
cards = Deck()
cards.print_deck()
# cards.shuffle()
print("After shuffling...\n")
# cards.print_deck()
|
from django.conf.urls import url
from django.contrib.auth.decorators import login_required, permission_required
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', permission_required('sale.view_table', login_url='account_login')
(views.list), name='room-list'),
url(r'^add/$', permission_required('sale.add_paymentoption', login_url='account_login')
(views.add), name='room-add'),
url(r'^delete/(?P<pk>[0-9]+)/$', permission_required('sale.delete_room', login_url='account_login')
(views.delete), name='room-delete'),
url(r'^detail/(?P<pk>[0-9]+)/$', views.detail, name='room-detail'),
url(r'^update/(?P<pk>[0-9]+)/$', views.edit, name='update-room'),
url( r'^search/$', views.searchs, name='room-search'),
url(r'^paginate/', views.paginate, name='room_paginate'),
]
if settings.DEBUG:
# urlpatterns += [ url(r'^static/(?P<path>.*)$', serve)] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
import cPickle
def convert_to_csv():
'''
Convert ques_id, user_id to user int id, ques int id
stores a mapping of id to int id
'''
mahout_file = open("../bytecup2016data/invited_info_train_mahout.csv","w")
mahout_test_file = open("../bytecup2016data/test_nolabel_mahout.csv","w")
# variable to construct user and ques features
max_user = 0
user_to_idx = {}
max_ques = 0
ques_to_idx = {}
with open("../bytecup2016data/invited_info_train_PROC.txt","r") as f:
training_data = f.readline().strip().split("\t")
while training_data and len(training_data) == 3 :
question_id = training_data[0]
user_id = training_data[1]
label = training_data[2]
if user_id not in user_to_idx:
user_to_idx[user_id] = max_user
max_user+=1
if question_id not in ques_to_idx:
ques_to_idx[question_id] = max_ques
max_ques+=1
mahout_file.write( "{0},{1},{2}\n".format(user_to_idx[user_id], ques_to_idx[question_id], label) )
# if label == "1":
# mahout_file.write( "{0},{1}\n".format(user_to_idx[user_id], ques_to_idx[question_id], label) )
training_data = f.readline().strip().split("\t")
f.close()
mahout_file.close()
#invited_info_train_PROC_test
with open("../bytecup2016data/validate_nolabel.txt","r") as f:
training_data = f.readline().strip().split(",")
training_data = f.readline().strip().split(",")
while training_data and len(training_data) >= 2 :
question_id = training_data[0]
user_id = training_data[1]
if user_id not in user_to_idx:
user_to_idx[user_id] = max_user
max_user+=1
if question_id not in ques_to_idx:
ques_to_idx[question_id] = max_ques
max_ques+=1
mahout_test_file.write( "{0},{1}\n".format(user_to_idx[user_id], ques_to_idx[question_id], label) )
training_data = f.readline().strip().split(",")
f.close()
mahout_test_file.close()
print max_user, max_ques
cPickle.dump(user_to_idx, open("../bytecup2016data/user_to_idx.p","wb"), protocol=2)
cPickle.dump(ques_to_idx, open("../bytecup2016data/ques_to_idx.p","wb"), protocol=2)
if __name__ == '__main__':
convert_to_csv()
print "Finished writing mahout file"
|
# -*- coding: utf-8 -*-
'''
This is a PyTorch implementation of CURL: Neural Curve Layers for Global Image Enhancement
https://arxiv.org/pdf/1911.13175.pdf
Please cite paper if you use this code.
Tested with Pytorch 1.7.1, Python 3.7.9
Authors: Sean Moran (sean.j.moran@gmail.com), 2020
Instructions:
To get this code working on your system / problem please see the README.
*** BATCH SIZE: Note this code is designed for a batch size of 1. The code needs re-engineered to support higher batch sizes. Using higher batch sizes is not supported currently and could lead to artefacts. To replicate our reported results
please use a batch size of 1 only ***
'''
from data import Adobe5kDataLoader, Dataset
import time
import torch
import torchvision.transforms as transforms
from torch.autograd import Variable
import logging
import argparse
import torch.optim as optim
import numpy as np
import datetime
import os.path
import os
import metric
import model
import sys
from torch.utils.tensorboard import SummaryWriter
np.set_printoptions(threshold=sys.maxsize)
def main():
print("*** Before running this code ensure you keep the default batch size of 1. The code has not been engineered to support higher batch sizes. See README for more detail. Remove the exit() statement to use code. ***")
exit()
writer = SummaryWriter()
timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
log_dirpath = "./log_" + timestamp
os.mkdir(log_dirpath)
handlers = [logging.FileHandler(
log_dirpath + "/curl.log"), logging.StreamHandler()]
logging.basicConfig(
level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', handlers=handlers)
parser = argparse.ArgumentParser(
description="Train the CURL neural network on image pairs")
parser.add_argument(
"--num_epoch", type=int, required=False, help="Number of epoches (default 5000)", default=100000)
parser.add_argument(
"--valid_every", type=int, required=False, help="Number of epoches after which to compute validation accuracy",
default=10)
parser.add_argument(
"--checkpoint_filepath", required=False, help="Location of checkpoint file", default=None)
parser.add_argument(
"--inference_img_dirpath", required=False,
help="Directory containing images to run through a saved CURL model instance", default=None)
parser.add_argument(
"--training_img_dirpath", required=False,
help="Directory containing images to train a DeepLPF model instance", default="/home/sjm213/adobe5k/adobe5k/")
args = parser.parse_args()
num_epoch = args.num_epoch
valid_every = args.valid_every
checkpoint_filepath = args.checkpoint_filepath
inference_img_dirpath = args.inference_img_dirpath
training_img_dirpath = args.training_img_dirpath
logging.info('######### Parameters #########')
logging.info('Number of epochs: ' + str(num_epoch))
logging.info('Logging directory: ' + str(log_dirpath))
logging.info('Dump validation accuracy every: ' + str(valid_every))
logging.info('Training image directory: ' + str(training_img_dirpath))
logging.info('##############################')
BATCH_SIZE=1 # *** WARNING: batch size of > 1 not supported in current version of code ***
if (checkpoint_filepath is not None) and (inference_img_dirpath is not None):
'''
inference_img_dirpath: the actual filepath should have "input" in the name an in the level above where the images
for inference are located, there should be a file "images_inference.txt with each image filename as one line i.e."
images_inference.txt ../
a1000.tif
a1242.tif
etc
'''
assert(BATCH_SIZE==1)
inference_data_loader = Adobe5kDataLoader(data_dirpath=inference_img_dirpath,
img_ids_filepath=inference_img_dirpath+"/images_inference.txt")
inference_data_dict = inference_data_loader.load_data()
inference_dataset = Dataset(data_dict=inference_data_dict,
transform=transforms.Compose([transforms.ToTensor()]), normaliser=1,
is_inference=True)
inference_data_loader = torch.utils.data.DataLoader(inference_dataset, batch_size=BATCH_SIZE, shuffle=False,
num_workers=10)
'''
Performs inference on all the images in inference_img_dirpath
'''
logging.info(
"Performing inference with images in directory: " + inference_img_dirpath)
net = model.CURLNet()
checkpoint = torch.load(checkpoint_filepath, map_location='cuda')
net.load_state_dict(checkpoint['model_state_dict'])
net.eval()
criterion = model.CURLLoss()
inference_evaluator = metric.Evaluator(
criterion, inference_data_loader, "test", log_dirpath)
inference_evaluator.evaluate(net, epoch=0)
else:
assert(BATCH_SIZE==1)
training_data_loader = Adobe5kDataLoader(data_dirpath=training_img_dirpath,
img_ids_filepath=training_img_dirpath+"/images_train.txt")
training_data_dict = training_data_loader.load_data()
training_dataset = Dataset(data_dict=training_data_dict, normaliser=1, is_valid=False)
validation_data_loader = Adobe5kDataLoader(data_dirpath=training_img_dirpath,
img_ids_filepath=training_img_dirpath+"/images_valid.txt")
validation_data_dict = validation_data_loader.load_data()
validation_dataset = Dataset(data_dict=validation_data_dict, normaliser=1, is_valid=True)
testing_data_loader = Adobe5kDataLoader(data_dirpath=training_img_dirpath,
img_ids_filepath=training_img_dirpath+"/images_test.txt")
testing_data_dict = testing_data_loader.load_data()
testing_dataset = Dataset(data_dict=testing_data_dict, normaliser=1,is_valid=True)
training_data_loader = torch.utils.data.DataLoader(training_dataset, batch_size=BATCH_SIZE, shuffle=True,
num_workers=6)
testing_data_loader = torch.utils.data.DataLoader(testing_dataset, batch_size=BATCH_SIZE, shuffle=False,
num_workers=6)
validation_data_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=BATCH_SIZE,
shuffle=False,
num_workers=6)
net = model.CURLNet()
net.cuda()
logging.info('######### Network created #########')
logging.info('Architecture:\n' + str(net))
for name, param in net.named_parameters():
if param.requires_grad:
print(name)
criterion = model.CURLLoss(ssim_window_size=5)
'''
The following objects allow for evaluation of a model on the testing and validation splits of a dataset
'''
validation_evaluator = metric.Evaluator(
criterion, validation_data_loader, "valid", log_dirpath)
testing_evaluator = metric.Evaluator(
criterion, testing_data_loader, "test", log_dirpath)
start_epoch=0
if (checkpoint_filepath is not None) and (inference_img_dirpath is None):
logging.info('######### Loading Checkpoint #########')
checkpoint = torch.load(checkpoint_filepath, map_location='cuda')
net.load_state_dict(checkpoint['model_state_dict'])
optimizer = optim.Adam(filter(lambda p: p.requires_grad,
net.parameters()), lr=1e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-10)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
for g in optimizer.param_groups:
g['lr'] = 1e-5
start_epoch = checkpoint['epoch']
loss = checkpoint['loss']
net.cuda()
else:
optimizer = optim.Adam(filter(lambda p: p.requires_grad,
net.parameters()), lr=1e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-10)
best_valid_psnr = 0.0
alpha = 0.0
optimizer.zero_grad()
net.train()
running_loss = 0.0
examples = 0
psnr_avg = 0.0
ssim_avg = 0.0
total_examples = 0
for epoch in range(start_epoch,num_epoch):
# train loss
examples = 0.0
running_loss = 0.0
for batch_num, data in enumerate(training_data_loader, 0):
input_img_batch, gt_img_batch, category = Variable(data['input_img'],
requires_grad=False).cuda(), Variable(data['output_img'],
requires_grad=False).cuda(), data[
'name']
start_time = time.time()
net_img_batch, gradient_regulariser = net(
input_img_batch)
net_img_batch = torch.clamp(
net_img_batch, 0.0, 1.0)
elapsed_time = time.time() - start_time
loss = criterion(net_img_batch,
gt_img_batch, gradient_regulariser)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.data[0]
examples += BATCH_SIZE
total_examples+=BATCH_SIZE
writer.add_scalar('Loss/train', loss.data[0], total_examples)
logging.info('[%d] train loss: %.15f' %
(epoch + 1, running_loss / examples))
writer.add_scalar('Loss/train_smooth', running_loss / examples, epoch + 1)
# Valid loss
'''
examples = 0.0
running_loss = 0.0
for batch_num, data in enumerate(validation_data_loader, 0):
net.eval()
input_img_batch, gt_img_batch, category = Variable(
data['input_img'],
requires_grad=True).cuda(), Variable(data['output_img'],
requires_grad=False).cuda(), \
data[
'name']
net_img_batch, gradient_regulariser = net(
input_img_batch)
net_img_batch = torch.clamp(
net_img_batch, 0.0, 1.0)
optimizer.zero_grad()
loss = criterion(net_img_batch,
gt_img_batch, gradient_regulariser)
running_loss += loss.data[0]
examples += BATCH_SIZE
total_examples+=BATCH_SIZE
writer.add_scalar('Loss/train', loss.data[0], total_examples)
logging.info('[%d] valid loss: %.15f' %
(epoch + 1, running_loss / examples))
writer.add_scalar('Loss/valid_smooth', running_loss / examples, epoch + 1)
net.train()
'''
if (epoch + 1) % valid_every == 0:
logging.info("Evaluating model on validation dataset")
valid_loss, valid_psnr, valid_ssim = validation_evaluator.evaluate(
net, epoch)
test_loss, test_psnr, test_ssim = testing_evaluator.evaluate(
net, epoch)
# update best validation set psnr
if valid_psnr > best_valid_psnr:
logging.info(
"Validation PSNR has increased. Saving the more accurate model to file: " + 'curl_validpsnr_{}_validloss_{}_testpsnr_{}_testloss_{}_epoch_{}_model.pt'.format(valid_psnr,
valid_loss.tolist()[0], test_psnr, test_loss.tolist()[
0],
epoch))
best_valid_psnr = valid_psnr
snapshot_prefix = os.path.join(
log_dirpath, 'curl')
snapshot_path = snapshot_prefix + '_validpsnr_{}_validloss_{}_testpsnr_{}_testloss_{}_epoch_{}_model.pt'.format(valid_psnr,
valid_loss.tolist()[
0],
test_psnr, test_loss.tolist()[
0],
epoch +1)
'''
torch.save(net, snapshot_path)
'''
torch.save({
'epoch': epoch+1,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}, snapshot_path)
net.train()
'''
Run the network over the testing dataset split
'''
snapshot_prefix = os.path.join(
log_dirpath, 'curl')
valid_loss, valid_psnr, valid_ssim = validation_evaluator.evaluate(
net, epoch)
test_loss, test_psnr, test_ssim = testing_evaluator.evaluate(
net, epoch)
snapshot_path = snapshot_prefix + '_validpsnr_{}_validloss_{}_testpsnr_{}_testloss_{}_epoch_{}_model.pt'.format(valid_psnr,
valid_loss.tolist()[
0],
test_psnr, test_loss.tolist()[
0],
epoch +1)
snapshot_prefix = os.path.join(log_dirpath, 'curl')
torch.save({
'epoch': epoch+1,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}, snapshot_path)
if __name__ == "__main__":
main()
|
from pydantic import BaseModel
class AlertType(BaseModel):
id: int
name: str
description: str
photo: str
class Config:
orm_mode = True
class Animal(BaseModel):
id: int
name: str
photo: str
type: int
class Config:
orm_mode = True
class AnimalType(BaseModel):
id: int
name: str
description: str
photo: str
class Config:
orm_mode = True
class Logs(BaseModel):
token: str
date: str
user: int
class Config:
orm_mode = True
class Observation(BaseModel):
id: int
type: str
date: str
long: str
lat: str
desc: str
photo: str
class Config:
orm_mode = True
class User(BaseModel):
id: int
name: str
surname: str
photo: str
fonction: str
date: str
email: str
telephone: str
city: str
country: str
password: str
class Config:
orm_mode = True
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Register new Pipes. Requires the API to be running.
"""
from __future__ import annotations
from meerschaum.utils.typing import SuccessTuple, Any, List, Optional
def register(
action : Optional[List[str]] = None,
**kw : Any
) -> SuccessTuple:
"""
Register new elements.
"""
from meerschaum.utils.misc import choose_subaction
options = {
'pipes' : _register_pipes,
'plugins' : _register_plugins,
'users' : _register_users,
}
return choose_subaction(action, options, **kw)
def _complete_register(
action : Optional[List[str]] = None,
**kw : Any
) -> List[str]:
"""
Override the default Meerschaum `complete_` function.
"""
if action is None:
action = []
options = {
'plugin' : _complete_register_plugins,
'plugins' : _complete_register_plugins,
}
if len(action) > 0 and action[0] in options:
sub = action[0]
del action[0]
return options[sub](action=action, **kw)
from meerschaum.actions.shell import default_action_completer
return default_action_completer(action=(['register'] + action), **kw)
def _register_pipes(
connector_keys : Optional[List[str]] = None,
metric_keys : Optional[List[str]] = None,
location_keys : Optional[List[str]] = None,
params : Optional[Dict[str, Any]] = None,
debug : bool = False,
**kw : Any
) -> SuccessTuple:
"""
Create and register Pipe objects.
Required: connector_keys and metric_keys. If location_keys is empty, assume [None]
"""
from meerschaum import get_pipes, get_connector
from meerschaum.utils.debug import dprint
from meerschaum.utils.warnings import warn, info
if connector_keys is None:
connector_keys = []
if metric_keys is None:
metric_keys = []
if location_keys is None:
location_keys = []
if params is None:
params = {}
if (
len(connector_keys) == 0 or
len(metric_keys) == 0
):
warn(
"You must provide connector keys (-c) and metrics (-m) to register pipes.\n\n" +
"Run `bootstrap pipe` for an interactive guide that creates pipes.",
stack = False
)
return False, "Missing connector keys or metrics"
pipes = get_pipes(
connector_keys = connector_keys,
metric_keys = metric_keys,
location_keys = location_keys,
params = params,
as_list = True,
method = 'explicit',
debug = debug,
**kw
)
success, message = True, "Success"
failed_message = ""
for p in pipes:
if debug:
dprint(f"Registering pipe '{p}'...")
ss, msg = p.register(debug=debug)
if not ss:
warn(f"{msg}", stack=False)
success = False
failed_message += f"{p}, "
if len(failed_message) > 0:
message = "Failed to register pipes: " + failed_message[:(-1 * len(', '))]
return success, message
def _register_plugins(
action : Optional[List[str]] = None,
repository : Optional[str] = None,
shell : bool = False,
debug : bool = False,
yes: bool = False,
force: bool = False,
**kw : Any
) -> SuccessTuple:
from meerschaum.utils.debug import dprint
from meerschaum.plugins import reload_plugins, get_plugins_names
from meerschaum.connectors.parse import parse_repo_keys
from meerschaum.config import get_config
from meerschaum.utils.warnings import warn, error, info
from meerschaum._internal.Plugin import Plugin
from meerschaum import get_connector
from meerschaum.utils.formatting import print_tuple
from meerschaum.utils.prompt import prompt, yes_no
if action is None:
action = []
reload_plugins(debug=debug)
repo_connector = parse_repo_keys(repository)
if repo_connector.__dict__.get('type', None) != 'api':
return False, (
f"Can only upload plugins to the Meerschaum API." +
f"Connector '{repo_connector}' is of type " +
f"'{repo_connector.get('type', type(repo_connector))}'."
)
if len(action) == 0 or action == ['']:
return False, "No plugins to register."
plugins_to_register = dict()
plugins_names = get_plugins_names()
for p in action:
if p not in plugins_names:
warn(
f"Plugin '{p}' is not installed and cannot be registered. Ignoring...",
stack=False
)
else:
plugins_to_register[p] = Plugin(p)
successes = dict()
for name, plugin in plugins_to_register.items():
desc = None
plugin.attributes = repo_connector.get_plugin_attributes(plugin, debug=debug)
if plugin.attributes is None:
plugin.attributes = {}
question = f"Would you like to add a description to plugin '{name}'?"
if plugin.attributes.get('description', None):
info(f"Found existing description for plugin '{plugin}':")
print(plugin.attributes['description'])
question = (
"Would you like to overwrite this description?\n"
+ "To edit the existing text, visit /dash/plugins for this repository."
)
if yes_no(
question,
default='n',
yes=yes
):
info('Press (Esc + Enter) to submit the description, (CTRL + C) to cancel.')
try:
desc = prompt('', multiline=True, icon=False)
except KeyboardInterrupt:
desc = None
if desc == '':
desc = None
if desc is not None:
plugin.attributes = {'description': desc}
info(f"Registering plugin '{plugin}' to Meerschaum API '{repo_connector}'..." + '\n')
success, msg = repo_connector.register_plugin(plugin, debug=debug)
print_tuple((success, msg + '\n'))
successes[name] = (success, msg)
total_success, total_fail = 0, 0
for p, tup in successes.items():
if tup[0]:
total_success += 1
else:
total_fail += 1
if debug:
from meerschaum.utils.formatting import pprint
dprint("Return values for each plugin:")
pprint(successes)
msg = (
f"Finished registering {len(plugins_to_register)} plugins" + '\n' +
f" ({total_success} succeeded, {total_fail} failed)."
)
reload_plugins(debug=debug)
return total_success > 0, msg
def _complete_register_plugins(*args, **kw):
from meerschaum.actions.uninstall import _complete_uninstall_plugins
return _complete_uninstall_plugins(*args, **kw)
def _register_users(
action : Optional[List[str]] = None,
mrsm_instance : Optional[str] = None,
shell : bool = False,
debug : bool = False,
**kw : Any
) -> SuccessTuple:
"""
Register a new user to a Meerschaum instance.
"""
from meerschaum.config import get_config
from meerschaum.config.static import _static_config
from meerschaum import get_connector
from meerschaum.connectors.parse import parse_instance_keys
from meerschaum.utils.debug import dprint
from meerschaum.utils.warnings import warn, error, info
from meerschaum._internal.User import User
from meerschaum.utils.formatting import print_tuple
from meerschaum.utils.prompt import prompt, get_password, get_email
if mrsm_instance is None:
mrsm_instance = get_config('meerschaum', 'instance')
instance_connector = parse_instance_keys(mrsm_instance)
if not action:
return False, "No users to register."
### filter out existing users
nonregistered_users = []
for username in action:
min_len = _static_config()['users']['min_username_length']
if len(username) < min_len:
warn(
f"Username '{username}' is too short (less than {min_len} characters). Skipping...",
stack = False
)
continue
user = User(username=username, instance=instance_connector)
user_id = instance_connector.get_user_id(user, debug=debug)
if user_id is not None:
warn(f"User '{user}' already exists. Skipping...", stack=False)
continue
nonregistered_users.append(user)
### prompt for passwords and emails, then try to register
success = dict()
successfully_registered_users = set()
for _user in nonregistered_users:
try:
username = _user.username
password = get_password(
username,
minimum_length = _static_config()['users']['min_password_length']
)
email = get_email(username, allow_omit=True)
except Exception as e:
return False, (
"Aborted registering users " +
', '.join(
[
str(u) for u in nonregistered_users
if u not in successfully_registered_users
]
)
)
if len(email) == 0:
email = None
user = User(username, password, email=email)
info(f"Registering user '{user}' to Meerschaum instance '{instance_connector}'...")
result_tuple = instance_connector.register_user(user, debug=debug)
print_tuple(result_tuple)
success[username] = result_tuple[0]
if success[username]:
successfully_registered_users.add(user)
succeeded, failed = 0, 0
for username, r in success.items():
if r:
succeeded += 1
else:
failed += 1
msg = (
f"Finished registering {succeeded + failed} users." + '\n' +
f" ({succeeded} succeeded, {failed} failed)"
)
return succeeded > 0, msg
### NOTE: This must be the final statement of the module.
### Any subactions added below these lines will not
### be added to the `help` docstring.
from meerschaum.utils.misc import choices_docstring as _choices_docstring
register.__doc__ += _choices_docstring('register')
|
"""
This example will access the twitter follow button API, grab a number like
the number of followers... and display it on a screen!
if you can find something that spits out JSON data, we can display it
"""
import sys
import time
import board
from adafruit_pyportal import PyPortal
cwd = ("/"+__file__).rsplit('/', 1)[0] # the current working directory (where this file is)
sys.path.append(cwd)
import openweather_graphics # pylint: disable=wrong-import-position
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
### START AIO CODE
# Import Adafruit IO HTTP Client
from adafruit_io.adafruit_io import IO_HTTP, AdafruitIO_RequestError
# Set your Adafruit IO Username and Key in secrets.py
# (visit io.adafruit.com if you need to create an account,
# or if you need your Adafruit IO key.)
ADAFRUIT_IO_DATA_SOURCE = 'http://wifitest.adafruit.com/testwifi/index.html'
ADAFRUIT_IO_USER = secrets['aio_username']
ADAFRUIT_IO_KEY = secrets['aio_key']
ADAFRUIT_IO_DATA_LOCATION = []
### END AIO CODE
# Use cityname, country code where countrycode is ISO3166 format.
# E.g. "New York, US" or "London, GB"
LOCATION = "Richland, WA, US"
# Set up where we'll be fetching data from
DATA_SOURCE = "http://api.openweathermap.org/data/2.5/weather?q="+LOCATION
DATA_SOURCE += "&appid="+secrets['openweather_token']
# You'll need to get a token from openweather.org, looks like 'b6907d289e10d714a6e88b30761fae22'
DATA_LOCATION = []
# Initialize the pyportal object and let us know what data to fetch and where
# to display it
pyportal = PyPortal(url=ADAFRUIT_IO_DATA_SOURCE,
json_path=ADAFRUIT_IO_DATA_LOCATION,
status_neopixel=board.NEOPIXEL,
default_bg=0x000000)
pyportal.set_backlight(0.75)
gfx = openweather_graphics.OpenWeather_Graphics(pyportal.splash, am_pm=True, celsius=False)
# push some test data to AIO
t0 = time.monotonic()
print('* pushing to aio...', (time.monotonic()-t0) / 60)
pyportal.push_to_io('shoplightlevel', (time.monotonic()-t0) / 60)
pyportal.push_to_io('shoplightlevel', (time.monotonic()-t0) / 60)
print('*** pushed to aio. ', (time.monotonic()-t0) / 60)
### START AIO CODE
pyportal._url = DATA_SOURCE
pyportal._json_path = DATA_LOCATION
# Go get that data
print("Fetching text from", pyportal._url)
data = pyportal.fetch()
### END AIO CODE
localtile_refresh = None
weather_refresh = None
pyportal.play_file("storm_tracker.wav", wait_to_finish=True) # True to disable speaker after playing
t0 = time.monotonic()
while True:
# only query the online time once per hour (and on first run)
if (not localtile_refresh) or (time.monotonic() - localtile_refresh) > 3600:
try:
print("Getting time from internet!")
pyportal.get_local_time()
localtile_refresh = time.monotonic()
except (ValueError, RuntimeError) as e: # ValueError added from quote.py change
print("Some error occured, retrying! -", e)
continue
# only query the weather every 10 minutes (and on first run)
if (not weather_refresh) or (time.monotonic() - weather_refresh) > 600:
try:
value = pyportal.fetch()
print("Response is", value)
gfx.display_weather(value)
weather_refresh = time.monotonic()
print('*** pushing to aio...', (time.monotonic()-t0) / 60)
pyportal.push_to_io('shoplightlevel', (time.monotonic()-t0) / 60)
print('*** pushed to aio. ', (time.monotonic()-t0) / 60)
t0 = time.monotonic()
except (ValueError, RuntimeError) as e: # ValueError added from quote.py change
print("Some error occured, retrying! -", e)
continue
gfx.update_time()
time.sleep(30) # wait 30 seconds before updating anything again
|
import struct
import re
from err import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
insert_values = re.compile(r'\svalues\s*(\(.+\))', re.IGNORECASE)
class Cursor(object):
'''
This is the object you use to interact with the database.
'''
def __init__(self, connection):
'''
Do not create an instance of a Cursor yourself. Call
connections.Connection.cursor().
'''
from weakref import proxy
self.connection = proxy(connection)
self.description = None
self.rownumber = 0
self.rowcount = -1
self.arraysize = 1
self._executed = None
self.messages = []
self.errorhandler = connection.errorhandler
self._has_next = None
self._rows = ()
def __del__(self):
'''
When this gets GC'd close it.
'''
self.close()
def close(self):
'''
Closing a cursor just exhausts all remaining data.
'''
if not self.connection:
return
try:
while self.nextset():
pass
except:
pass
self.connection = None
def _get_db(self):
if not self.connection:
self.errorhandler(self, ProgrammingError, "cursor closed")
return self.connection
def _check_executed(self):
if not self._executed:
self.errorhandler(self, ProgrammingError, "execute() first")
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def nextset(self):
''' Get the next query set '''
if self._executed:
self.fetchall()
del self.messages[:]
if not self._has_next:
return None
connection = self._get_db()
connection.next_result()
self._do_get_result()
return True
def execute(self, query, args=None):
''' Execute a query '''
from sys import exc_info
conn = self._get_db()
charset = conn.charset
del self.messages[:]
# this ordering is good because conn.escape() returns
# an encoded string.
if isinstance(query, unicode):
query = query.encode(charset)
if args is not None:
query = query % conn.escape(args)
result = 0
try:
result = self._query(query)
except:
exc, value, tb = exc_info()
del tb
self.messages.append((exc,value))
self.errorhandler(self, exc, value)
self._executed = query
return result
def executemany(self, query, args):
''' Run several data against one query '''
del self.messages[:]
conn = self._get_db()
if not args:
return
charset = conn.charset
if isinstance(query, unicode):
query = query.encode(charset)
self.rowcount = sum([ self.execute(query, arg) for arg in args ])
return self.rowcount
def callproc(self, procname, args=()):
''' Call a stored procedure. Take care to ensure that procname is
properly escaped. '''
if not isinstance(args, tuple):
args = (args,)
argstr = ("%s," * len(args))[:-1]
return self.execute("CALL `%s`(%s)" % (procname, argstr), args)
def fetchone(self):
''' Fetch the next row '''
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result
def fetchmany(self, size=None):
''' Fetch several rows '''
self._check_executed()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
if self._rows is None:
return None
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
''' Fetch all the rows '''
self._check_executed()
if self._rows is None:
return None
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
self.errorhandler(self, ProgrammingError,
"unknown scroll mode %s" % mode)
if r < 0 or r >= len(self._rows):
self.errorhandler(self, IndexError, "out of range")
self.rownumber = r
def _query(self, q):
conn = self._get_db()
self._last_executed = q
conn.query(q)
self._do_get_result()
return self.rowcount
def _do_get_result(self):
conn = self._get_db()
self.rowcount = conn._result.affected_rows
self.rownumber = 0
self.description = conn._result.description
self.lastrowid = conn._result.insert_id
self._rows = conn._result.rows
self._has_next = conn._result.has_next
conn._result = None
def __iter__(self):
self._check_executed()
result = self.rownumber and self._rows[self.rownumber:] or self._rows
return iter(result)
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
|
from django.conf import settings
from datetime import datetime
from kazoo.client import KazooClient
PERM_READ = 1
PERM_WRITE = 2
PERM_CREATE = 4
PERM_DELETE = 8
PERM_ADMIN = 16
PERM_ALL = PERM_READ | PERM_WRITE | PERM_CREATE | PERM_DELETE | PERM_ADMIN
TIMEOUT = 10.0
ZOOKEEPER_SERVERS = getattr(settings, 'ZOOKEEPER_SERVERS')
def _convert_stat(stat):
rtv = {}
for key in dir(stat):
if key.startswith('_'):
continue
value = getattr(stat, key)
if key in ['ctime', 'mtime']:
rtv[key] = datetime.fromtimestamp(value / 1000)
else:
rtv[key] = value
return rtv
def _convert_acls(acls):
rtv = []
for acl in acls:
perms = acl.perms
perms_list = []
if perms & PERM_READ:
perms_list.append('PERM_READ')
if perms & PERM_WRITE:
perms_list.append('PERM_WRITE')
if perms & PERM_CREATE:
perms_list.append('PERM_CREATE')
if perms & PERM_DELETE:
perms_list.append('PERM_DELETE')
if perms & PERM_ADMIN:
perms_list.append('PERM_ADMIN')
if perms & PERM_ALL == PERM_ALL:
perms_list = ['PERM_ALL']
rtv.append({'scheme': acl.id.scheme, 'id': acl.id.id, 'perm_list': perms_list})
return rtv
class ZNode(object):
def __init__(self, path='/'):
self.path = path
zk_client = KazooClient(hosts=ZOOKEEPER_SERVERS, read_only=True, timeout=TIMEOUT)
try:
zk_client.start()
self.data, stat = zk_client.get(path)
self.stat = _convert_stat(stat)
self.children = zk_client.get_children(path) or []
self.acls = _convert_acls(zk_client.get_acls(path)[0])
finally:
zk_client.stop()
|
#!/usr/bin/env python3.6
x = 123456789
x = 123456
x = .1
x = 1.
x = 1E+1
x = 1E-1
x = 1.00000001
x = 123456789.123456789
x = 123456789.123456789E123456789
x = 123456789E123456789
x = 123456789J
x = 123456789.123456789J
x = 0XB1ACC
x = 0B1011
x = 0O777
x = 0.000000006
# output
#!/usr/bin/env python3.6
x = 123_456_789
x = 123456
x = 0.1
x = 1.0
x = 1e1
x = 1e-1
x = 1.000_000_01
x = 123_456_789.123_456_789
x = 123_456_789.123_456_789e123_456_789
x = 123_456_789e123_456_789
x = 123_456_789j
x = 123_456_789.123_456_789j
x = 0xb1acc
x = 0b1011
x = 0o777
x = 0.000_000_006
|
#!/usr/bin/env python3
# the TweetStreamer is a subclass of TwythonStreamer
from twython import TwythonStreamer
# errors!
import error_messenger
# the TweetStreamer class will use the streaming api to check for new tweets.
# It will be used for filtering all tweets containing the trigger word specified in setup.py
# This class could technically be used to reply to all kinds of tweets.
class TweetStreamer(TwythonStreamer):
# Simple label to know from where the tweet streamer was called
arvid220u_error_title = "placeholder"
# Normally, retweets should be excluded
arvid220u_exclude_retweets = True
arvid220u_new_tweet_observers = []
def arvid220u_add_observer(self, observer):
self.arvid220u_new_tweet_observers.append(observer)
# this function will be called when a tweet is received
def on_success(self, data):
if "text" not in data:
return
if self.arvid220u_exclude_retweets:
# filter out retweets
if data["text"].startswith("RT"):
return
if "retweeted_status" in data:
return
# send tweet to the specified delegate function
# self.new_tweet needs to be set
for observer in self.arvid220u_new_tweet_observers:
observer(data)
# when an error is caught
def on_error(self, status_code, data):
print("STREAMING API ERROR IN TWEETSTREAMER!")
print("Status code:")
print(status_code)
error_messenger.send_error_message("streaming API error, with code " + str(status_code), "TweetStreamer.on_error from " + arvid220u_error_title)
print("Other data:")
print(data)
print("END OF ERROR MESSAGE")
# on timeout
def on_timeout(self):
print("STREAMING API TIMEOUT IN TWEETSTREAMER!")
error_messenger.send_error_message("streaming API timeout", "TweetStreamer.on_timeout from " + arvid220u_error_title)
print("END OF ERROR MESSAGE")
|
# Copyright 2017 Zhongyi Han. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definition of 512 SpinePathNet network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
#from tensorflow.contrib import rnn
from collections import namedtuple
import tensorflow as tf
import numpy as np
slim = tf.contrib.slim
def weight_variable(shape):
#initializer = tf.truncated_normal_initializer()
initializer= tf.contrib.layers.xavier_initializer(uniform=False, seed=None, dtype=tf.float16)
return tf.get_variable("weights", shape, initializer=initializer)
def bias_variable(shape):
initializer=tf.constant_initializer(0.1)
return tf.get_variable("biases", shape, initializer=initializer)
def conv_layer(x, w, b, name, padding = 'SAME'):
w = weight_variable(w)
b = bias_variable(b)
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding = padding, name = name) + b
def conv_layer_without_bias(x, w, name, padding = 'SAME'):
w = weight_variable(w)
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding = padding, name = name)
def conv_layer_gan(x, w, b, name, padding = 'SAME'):
w = weight_variable(w)
b = bias_variable(b)
return tf.nn.conv2d(x, w, strides=[1, 2, 2, 1], padding = padding, name = name) + b
def deconv_layer(x,w,output_shape,name=None):
w = weight_variable(w)
return tf.nn.conv2d_transpose(x,w,output_shape,strides=[1, 2, 2, 1], padding='SAME', data_format='NHWC',name=name)
def graph_layer(x, batch_size, fold):
kg = np.load('datasets/spine_segmentation/spine_segmentation_{0}/knowledge_graph.npy'.format(fold))
#print(kg.item()['node_edge'])
node_edge = tf.convert_to_tensor(kg.item()['node_edge'],dtype=tf.float32) #a 6 x 6 matrix
node_representation = tf.convert_to_tensor(kg.item()['node_representation'], dtype=tf.float32)# a 6 * 128 matrix of NV: 0, ND: 1, NNF: 2, AV: 3, AD: 4, ANF: 5
node_edge = tf.expand_dims(node_edge, 0) #a 1 * 6 * 6 matrix
node_representation = tf.expand_dims(node_representation, 0) #a 1 * 6 * 128 matrix
node_edge = tf.tile(node_edge, [batch_size,1,1])#TODO #a batch_size * 6 * 6 matrix
node_representation = tf.tile(node_representation, [batch_size,1,1])#TODO #a batch_size * 6 * 128 matrix
""" model one, local to semantic voting:"""
with tf.variable_scope('graph_1'):
x1 = conv_layer_without_bias(x, [1, 1, 128, 128], 'graph_1') # [batchsize * 128 * 128 * 128]
x1 = tf.reshape(x1, [batch_size, -1, 128]) # [batchsize * (128*128) * 128]
with tf.variable_scope('graph_2'):
x2 = conv_layer_without_bias(x, [1, 1, 128, 6], 'graph_2') #[batchsize * (128*128) * 128]
x2 = tf.reshape(x2, [batch_size, 6, -1]) # [batchsize * 6 * (128*128)]
x2 = tf.nn.softmax(x2, axis=-1) # [batchsize * 6 * (128*128)]
hps = tf.nn.relu(tf.matmul(x2, x1)) # [batchsize * 6 * 128]
""" module two, local to semantic voting:"""
hps = tf.concat([node_representation, hps], -1)# [batchsize * 6 * (128 + 128)]
with tf.variable_scope('graph_3'):
hps = tf.layers.dense(hps, 128, name='graph_3') # [batchsize * 6 * 128]
hg = tf.nn.relu(tf.matmul(node_edge, hps)) # [batchsize * 6 * 128]
""" module three, semantic to local mapping:"""
# expand 6 * 128 to (128*128) * 6 * 128
hg_expand = tf.expand_dims(hg, 1)# [batch_szie * 1 * 6 * 128]
hg_expand = tf.tile(hg_expand, [1, 128*128, 1, 1]) # [batch_szie * (128*128) * 6 * 128]
x_expand = tf.reshape(x, [batch_size, 128 * 128, 1, 128]) # [batch_szie * (128*128) * 1 * 128]
x_expand = tf.tile(x_expand, [1, 1, 6, 1]) # [batch_szie * (128*128) * 6 * 128]
hg_x = tf.concat([hg_expand, x_expand], -1) # [batch_szie * (128*128) * 6 * (128+128)]
with tf.variable_scope('graph_4'):
hg_x = conv_layer_without_bias(hg_x, [1, 1, 256, 1], 'graph_4')
hg_x = tf.reshape(hg_x, [batch_size, 128 * 128, 6])
hg_x = tf.nn.softmax(hg_x, axis=-1)
with tf.variable_scope('graph_5'):
hsp = tf.layers.dense(hg, 128, name='graph_3')
#hsp = conv_layer_without_bias(hg, [1, 1, 128, 128], 'graph_5')
output = tf.nn.relu(tf.reshape(tf.matmul(hg_x, hsp), [batch_size, 128, 128, 128]))
#output = tf.reshape(tf.matmul(hg_x, hsp), [batch_size, 128, 128, 128])
return output
def batch_normalization(inputs, scope, is_training=True):
bn = tf.contrib.layers.batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=tf.GraphKeys.UPDATE_OPS,
is_training=is_training,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=False,
data_format='NHWC',
zero_debias_moving_mean=False,
scope=scope,
renorm=False,
renorm_clipping=None,
renorm_decay=0.99)
return tf.nn.relu(bn, name='relu')
def maxpooling_2x2(x,name):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME',name = name)
def avgpooling_2x2(x, name):
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name = name)
def maxpooling_4x4(x,name):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 4, 4, 1], padding='SAME',name = name)
def avgpooling_4x4(x):
return tf.nn.avg_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME')
def avgpooling_8x8(x):
return tf.nn.avg_pool(x, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')
def atrous_conv_layer(x, w, b, rate, name, padding = 'SAME'):
w = weight_variable(w)
b = bias_variable(b)
return tf.nn.atrous_conv2d(x, w, rate, padding, name = name) + b #without stride
def biLSTM(logits, class_num, batch_size):
n_classes = class_num
n_steps = 512*512
n_hidden = n_classes
logits = tf.reshape(logits, [batch_size,-1, n_classes])
x = tf.unstack(logits, n_steps, 1)
# Forward direction cell
lstm_fw_cell = rnn.LSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
#lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
#outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
# dtype=tf.float32)
outputs, _, = rnn.static_rnn(lstm_fw_cell, x,
dtype=tf.float32)
return outputs
def gridLSTM1x1(feature, feature_size, feature_dimension, batch_size):
n_steps = feature_size*feature_size
n_hidden = feature_dimension
feature = tf.reshape(feature, [batch_size, -1, feature_size])
x = tf.unstack(feature, n_steps, 1)
# Forward direction cell
#lstm_fw_cell = tf.contrib.rnn.GridLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
#outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
# dtype=tf.float32)
outputs, _, = rnn.static_rnn(lstm_fw_cell, x, dtype=tf.float32)
outputs = tf.reshape(outputs,[batch_size, feature_size,feature_size,feature_dimension])
print (outputs.shape)
return outputs
def LSTM2x2(net, superpixel, feature_size, feature_dimension, batch_size):
#n_steps is the number of superpixels.
n_steps = 2000
n_hidden = feature_dimension
indexs_batch = []
one_node_features = []
#pooling
for j in xrange(batch_size):
indexs_image = []
for i in xrange(n_steps):
indexs = tf.where(tf.equal(superpixel[j,:,:],i))
indexs_image.append(indexs)
one_node_feature = tf.gather_nd(net[j,:,:,:],indexs)
one_node_feature = tf.reduce_mean(one_node_feature,axis=0, keep_dims=False)
one_node_features.append(one_node_feature)
indexs_batch.append(indexs_image)
one_node_features = tf.reshape(one_node_features, [batch_size, -1, 128])
#print (one_node_features.shape) #shape is [batch size, 2000, 128].
features = tf.unstack(one_node_features, n_steps, 1)
lstm_cell = tf.contrib.rnn.LSTMCell(n_hidden, forget_bias=1.0)
#lstm is a 2000 length list, the shape of per element is (4,128).ok.
lstm, _ = tf.contrib.rnn.static_rnn(lstm_cell, features, dtype=tf.float32)
"""
unpooling, restore results of net.
indexs length is 8000. the shape of per element is (?,2).
? is the unkown nomber of pixels belonging to one superpixel.
2 is the coordate (x,y) of one batch.
"""
nets = []
for j in xrange(4):
net_one_image = net[j,:,:,:]
for i in xrange(n_steps): #n_steps
indices = indexs_batch[j][i]# The j-th batch and i-th superpixel.
updates = tf.expand_dims(lstm[i][j,:],0)
#update = updates
updates = tf.tile(updates,[tf.div(tf.size(indices),2),1])
print (updates.shape)
#x = 0
#while tf.less_equal(x, tf.div(tf.size(indices),2) ) is True:
#for z in xrange(300):
# update = tf.concat([update,updates],0)
# x = x + 1
#scatter =
net_one_image = net_one_image + tf.scatter_nd(indices, updates, tf.constant([512,512,128], dtype=tf.int64))
nets.append(net_one_image)
return net + tf.to_float(tf.reshape(nets, [batch_size, 512, 512, 128]))
def LSTM_pool_4x4(net, feature_size, feature_dimension, batch_size, is_training):#reverse direction, from right down to left top
net = avgpooling_4x4(net)
n_hidden = feature_dimension
net = tf.reshape(net, [batch_size, -1, feature_dimension])#(4,1024,128)
n_steps = net.shape[1]
net = tf.unstack(net, n_steps, 1)
fw_cell = tf.contrib.rnn.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0)
#bw_cell = tf.contrib.rnn.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0)
net,_= tf.contrib.rnn.static_rnn(fw_cell, net, dtype=tf.float32)
net = tf.reshape(net, [batch_size, 32, 32, feature_dimension])
with tf.variable_scope('deconv_3'):
net = deconv_layer(net,[3,3,128,128],[batch_size,64,64,128], name = 'g_deconv_3')
net = batch_normalization(net,'g_bn_11', is_training=is_training)
with tf.variable_scope('deconv_4'):
net = deconv_layer(net,[3,3,128,128],[batch_size,128,128,128], name = 'g_deconv_4')
net = batch_normalization(net,'g_bn_12', is_training=is_training)
return net
def BiLSTM_pool_4x4(net, feature_size, feature_dimension, batch_size, is_training):#reverse direction, from right down to left top
net = avgpooling_4x4(net)
n_hidden = feature_dimension
net = tf.reshape(net, [batch_size, -1, feature_dimension])#(4,1024,128)
n_steps = net.shape[1]
net = tf.unstack(net, n_steps, 1)
fw_cell = tf.contrib.rnn.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0)
bw_cell = tf.contrib.rnn.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0)
net,_,_= tf.contrib.rnn.static_bidirectional_rnn(fw_cell, bw_cell, net, dtype=tf.float32)
#outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
# dtype=tf.float32)
net = tf.reshape(net, [batch_size, 32, 32, 256])
with tf.variable_scope('deconv_3'):
net = deconv_layer(net,[3,3,128,256],[batch_size,64,64,128], name = 'g_deconv_3')
net = batch_normalization(net,'g_bn_11', is_training=is_training)
with tf.variable_scope('deconv_4'):
net = deconv_layer(net,[3,3,128,128],[batch_size,128,128,128], name = 'g_deconv_4')
net = batch_normalization(net,'g_bn_12', is_training=is_training)
return net
def LSTM_pool_4x4_reverse(net, feature_size, feature_dimension, batch_size, is_training):#reverse direction, from right down to left top
net = avgpooling_4x4(net)
n_hidden = feature_dimension
net = tf.reshape(net, [batch_size, -1, feature_dimension])#(4,1024,128)
net = tf.reverse(net, [1])
n_steps = net.shape[1]
net = tf.unstack(net, n_steps, 1)
fw_cell = tf.contrib.rnn.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0)
#bw_cell = tf.contrib.rnn.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0)
net,_= tf.contrib.rnn.static_rnn(fw_cell, net, dtype=tf.float32)
net = tf.reverse(net, [1])
net = tf.reshape(net, [batch_size, 32, 32, feature_dimension])
with tf.variable_scope('deconv_3'):
net = deconv_layer(net,[3,3,128,128],[batch_size,64,64,128], name = 'g_deconv_3')
net = batch_normalization(net,'g_bn_11', is_training=is_training)
with tf.variable_scope('deconv_4'):
net = deconv_layer(net,[3,3,128,128],[batch_size,128,128,128], name = 'g_deconv_4')
net = batch_normalization(net,'g_bn_12', is_training=is_training)
return net
def LSTM_pool_8x8(net, feature_size, feature_dimension, batch_size, is_training):
net = avgpooling_8x8(net)
n_hidden = feature_dimension
net = tf.reshape(net, [batch_size, -1, feature_dimension])
n_steps = net.shape[1]
net = tf.unstack(net, n_steps, 1)
fw_cell = tf.contrib.rnn.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0)
#bw_cell = tf.contrib.rnn.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0)
net, _, = tf.contrib.rnn.static_rnn(fw_cell, net, dtype=tf.float32)
net = tf.reshape(net, [batch_size, 16, 16, feature_dimension])
with tf.variable_scope('deconv_3'):
net = deconv_layer(net,[3,3,128,128],[batch_size,32,32,128], name = 'g_deconv_3')
net = batch_normalization(net,'g_bn_11', is_training=is_training)
with tf.variable_scope('deconv_3_1'):
net = deconv_layer(net,[3,3,128,128],[batch_size,64,64,128], name = 'g_deconv_3_1')
net = batch_normalization(net,'g_bn_11', is_training=is_training)
with tf.variable_scope('deconv_4'):
net = deconv_layer(net,[3,3,128,128],[batch_size,128,128,128], name = 'g_deconv_4')
net = batch_normalization(net,'g_bn_12', is_training=is_training)
return net
def LSTM_pool_2x2(net, feature_size, feature_dimension, batch_size, is_training):
net = avgpooling_2x2(net,'LSTM')
n_hidden = feature_dimension
net = tf.reshape(net, [batch_size, -1, feature_dimension])
n_steps = net.shape[1]
net = tf.unstack(net, n_steps, 1)
fw_cell = tf.contrib.rnn.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0)
#bw_cell = tf.contrib.rnn.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0)
net, _, = tf.contrib.rnn.static_rnn(fw_cell, net, dtype=tf.float32)
net = tf.reshape(net, [batch_size, 64, 64, feature_dimension])
# with tf.variable_scope('deconv_3'):
# net = deconv_layer(net,[3,3,128,128],[batch_size,64,64,128], name = 'g_deconv_3')
# net = batch_normalization(net,'g_bn_11', is_training=is_training)
with tf.variable_scope('deconv_4'):
net = deconv_layer(net,[3,3,128,128],[batch_size,128,128,128], name = 'g_deconv_4')
net = batch_normalization(net,'g_bn_12', is_training=is_training)
return net
def rnn(net, feature_size, feature_dimension, batch_size):
patch_size = 4
#n_steps = feature_size*feature_size
n_hidden = feature_dimension*patch_size
net = tf.reshape(net, [batch_size, -1, feature_dimension*patch_size])
print (net.shape)
n_steps = net.shape[1]
net = tf.unstack(net, n_steps, 1)
#print (tf.size(x))
# Forward direction cell
#lstm_fw_cell = tf.contrib.rnn.GridLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_fw_cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
net, _, = tf.contrib.rnn.static_rnn(lstm_fw_cell, net, dtype=tf.float32)
#print (outputs.shape)
return tf.reshape(net,[batch_size, feature_size,feature_size,feature_dimension])
# =========================================================================== #
# SpinePathNet build.
# =========================================================================== #
#Noly use for basic convolutional network.
def net(inputs, batch_size, class_num, reuse=False,
is_training=True, scope='SpinePathNet'):
with tf.variable_scope(scope, 'SpinePathNet',[input], reuse=reuse):
with tf.variable_scope('conv_1'):
conv_1 = conv_layer(inputs, [7,7,1,32], 32, 'conv_1')#receptive field = 7
bn_1 = batch_normalization(conv_1,'bn_1', is_training=is_training)
with tf.variable_scope('conv_2'):
conv_2 = conv_layer(bn_1, [7,7,32,32], 32, 'conv_2')#receptive field = 13
bn_2 = batch_normalization(conv_2,'bn_2', is_training=is_training)
pool_conv2 = maxpooling_2x2(bn_2, 'pool_conv2') #receptive field = 26
with tf.variable_scope('conv_3'):
conv_3 = conv_layer(pool_conv2, [3,3,32,64], 64, 'conv_3')#receptive field = 28]
bn_3 = batch_normalization(conv_3,'bn_3', is_training=is_training)
with tf.variable_scope('conv_4'):
conv_4 = conv_layer(bn_3, [3,3,64,64], 64, 'conv_4') #receptive field = 30
bn_4 = batch_normalization(conv_4,'bn_4', is_training=is_training)
pool_conv4= maxpooling_2x2(bn_4, 'pool_conv3') #receptive field = 60
with tf.variable_scope('conv_5'):
conv_5 = atrous_conv_layer(pool_conv4, [3,3,64,128], 128, 2, 'conv_5')#receptive field = 66
bn_5 = batch_normalization(conv_5,'bn_5', is_training=is_training)
with tf.variable_scope('conv_6'):
conv_6 = atrous_conv_layer(bn_5, [3,3,128,128], 128, 4, 'conv_6')#receptive field = 76
bn_6 = batch_normalization(conv_6,'bn_6', is_training=is_training)
with tf.variable_scope('conv_7'):
conv_7 = atrous_conv_layer(bn_6, [3,3,128,128], 128, 8, 'conv_7')#receptive field = 94
bn_7 = batch_normalization(conv_7, 'bn_7', is_training=is_training)
with tf.variable_scope('conv_8'):
conv_8 = atrous_conv_layer(bn_7, [3,3,128,128], 128, 16, 'conv_8')#receptive field = 128
bn_8 = batch_normalization(conv_8, 'bn_8', is_training=is_training)
with tf.variable_scope('deconv_1'):
deconv_1 = deconv_layer(bn_8,[3,3,128,128],[batch_size,256,256,128], name = 'deconv_1')
bn_9 = batch_normalization(deconv_1, 'bn_9', is_training=is_training)
with tf.variable_scope('deconv_2'):
deconv_2 = deconv_layer(bn_9,[3,3,128,128],[batch_size,512,512,128],name='deconv_2')
bn_10 = batch_normalization(deconv_2, 'bn_10', is_training=is_training)
with tf.variable_scope('feature_embedding'):
conv_9 = conv_layer(bn_10, [1,1,128,64], 64, 'feature_embedding')
feature_embedding = batch_normalization(conv_9, 'feature_embedding', is_training=is_training)
with tf.variable_scope('logits'):
logits = conv_layer(feature_embedding, [1,1,64,class_num],class_num,'logits')
return feature_embedding, logits
#Noly use for basic convolutional network and adverisal network. without lstm.
def g_net(inputs, batch_size, class_num, reuse=False,
is_training=True, scope='g_SpinePathNet'):
with tf.variable_scope(scope, 'g_SpinePathNet',[input], reuse=reuse):
with tf.variable_scope('conv_1'):
conv_1 = conv_layer(inputs, [7,7,1,32], 32, 'g_conv_1')#receptive field = 7
bn_1 = batch_normalization(conv_1,'g_bn_1', is_training=is_training)
with tf.variable_scope('conv_2'):
conv_2 = conv_layer(bn_1, [7,7,32,32], 32, 'g_conv_2')#receptive field = 13
bn_2 = batch_normalization(conv_2,'g_bn_2', is_training=is_training)
pool_conv2 = maxpooling_2x2(bn_2, 'g_pool_conv2') #receptive field = 26
with tf.variable_scope('conv_3'):
conv_3 = conv_layer(pool_conv2, [3,3,32,64], 64, 'g_conv_3')#receptive field = 28]
bn_3 = batch_normalization(conv_3,'g_bn_3', is_training=is_training)
with tf.variable_scope('conv_4'):
conv_4 = conv_layer(bn_3, [3,3,64,64], 64, 'g_conv_4') #receptive field = 30
bn_4 = batch_normalization(conv_4,'g_bn_4', is_training=is_training)
pool_conv4= maxpooling_2x2(bn_4, 'g_pool_conv3') #receptive field = 60
with tf.variable_scope('conv_5'):
conv_5 = atrous_conv_layer(pool_conv4, [3,3,64,128], 128, 2, 'g_conv_5')#receptive field = 66
bn_5 = batch_normalization(conv_5,'g_bn_5', is_training=is_training)
with tf.variable_scope('conv_6'):
conv_6 = atrous_conv_layer(bn_5, [3,3,128,128], 128, 4, 'g_conv_6')#receptive field = 76
bn_6 = batch_normalization(conv_6,'g_bn_6', is_training=is_training)
with tf.variable_scope('conv_7'):
conv_7 = atrous_conv_layer(bn_6, [3,3,128,128], 128, 8, 'g_conv_7')#receptive field = 94
bn_7 = batch_normalization(conv_7, 'g_bn_7', is_training=is_training)
with tf.variable_scope('conv_8'):
conv_8 = atrous_conv_layer(bn_7, [3,3,128,128], 128, 16, 'g_conv_8')#receptive field = 128
bn_8 = batch_normalization(conv_8, 'g_bn_8', is_training=is_training)
with tf.variable_scope('deconv_1'):
deconv_1 = deconv_layer(bn_8,[3,3,128,128],[batch_size,256,256,128], name = 'g_deconv_1')
bn_9 = batch_normalization(deconv_1, 'g_bn_9', is_training=is_training)
with tf.variable_scope('deconv_2'):
deconv_2 = deconv_layer(bn_9,[3,3,128,128],[batch_size,512,512,128],name='g_deconv_2')
bn_10 = batch_normalization(deconv_2, 'g_bn_10', is_training=is_training)
with tf.variable_scope('feature_embedding'):
conv_9 = conv_layer(bn_10, [1,1,128,64], 64, 'g_feature_embedding')
feature_embedding = batch_normalization(conv_9, 'g_feature', is_training=is_training)
with tf.variable_scope('logits'):
logits = conv_layer(feature_embedding, [1,1,64,class_num],class_num,'g_logits')
return deconv_1, logits
#Noly use for generation network with knowledge graph.
def g_net_graph(inputs, Fold, batch_size, class_num, reuse=True,
is_training=True, scope='g_SpinePathNet'):
with tf.variable_scope(scope, 'g_SpinePathNet', [input], reuse=reuse):
with tf.variable_scope('conv_1'):
conv_1 = conv_layer(inputs, [7, 7, 1, 32], 32, 'g_conv_1') # receptive field = 7
bn_1 = batch_normalization(conv_1, 'g_bn_1', is_training=is_training)
with tf.variable_scope('conv_2'):
conv_2 = conv_layer(bn_1, [7, 7, 32, 32], 32, 'g_conv_2') # receptive field = 13
bn_2 = batch_normalization(conv_2, 'g_bn_2', is_training=is_training)
pool_conv2 = maxpooling_2x2(bn_2, 'g_pool_conv2') # receptive field = 26
with tf.variable_scope('conv_3'):
conv_3 = conv_layer(pool_conv2, [3, 3, 32, 64], 64, 'g_conv_3') # receptive field = 28]
bn_3 = batch_normalization(conv_3, 'g_bn_3', is_training=is_training)
with tf.variable_scope('conv_4'):
conv_4 = conv_layer(bn_3, [3, 3, 64, 64], 64, 'g_conv_4') # receptive field = 30
bn_4 = batch_normalization(conv_4, 'g_bn_4', is_training=is_training)
pool_conv4 = maxpooling_2x2(bn_4, 'g_pool_conv3') # receptive field = 60
with tf.variable_scope('conv_5'):
conv_5 = atrous_conv_layer(pool_conv4, [3, 3, 64, 128], 128, 2, 'g_conv_5') # receptive field = 66
bn_5 = batch_normalization(conv_5, 'g_bn_5', is_training=is_training)
with tf.variable_scope('conv_6'):
conv_6 = atrous_conv_layer(bn_5, [3, 3, 128, 128], 128, 4, 'g_conv_6') # receptive field = 76
bn_6 = batch_normalization(conv_6, 'g_bn_6', is_training=is_training)
with tf.variable_scope('conv_7'):
conv_7 = atrous_conv_layer(bn_6, [3, 3, 128, 128], 128, 8, 'g_conv_7') # receptive field = 94
bn_7 = batch_normalization(conv_7, 'g_bn_7', is_training=is_training)
with tf.variable_scope('conv_8'):
conv_8 = atrous_conv_layer(bn_7, [3, 3, 128, 128], 128, 16, 'g_conv_8') # receptive field = 128
bn_8 = batch_normalization(conv_8, 'g_bn_8', is_training=is_training)
with tf.variable_scope('graph_layer_1'): #TODO
gl_1 = graph_layer(bn_8, batch_size, Fold)
#gl_1 = batch_normalization(gl_1, 'g_bn_gl1', is_training=is_training)
with tf.variable_scope('deconv_1'):
deconv_1 = deconv_layer(bn_8 + gl_1, [3, 3, 128, 128], [batch_size, 256, 256, 128], name='g_deconv_1')
bn_9 = batch_normalization(deconv_1, 'g_bn_9', is_training=is_training)
with tf.variable_scope('deconv_2'):
deconv_2 = deconv_layer(bn_9, [3, 3, 128, 128], [batch_size, 512, 512, 128], name='g_deconv_2')
bn_10 = batch_normalization(deconv_2, 'g_bn_10', is_training=is_training)
with tf.variable_scope('feature_embedding'):
conv_9 = conv_layer(bn_10, [1, 1, 128, 64], 64, 'g_feature_embedding')
feature_embedding = batch_normalization(conv_9, 'g_feature', is_training=is_training)
with tf.variable_scope('logits'):
logits = conv_layer(feature_embedding, [1, 1, 64, class_num], class_num, 'g_logits')
return deconv_1, logits
def deeplab3_net(inputs, batch_size, class_num, reuse=False,
is_training=True, scope='deeplab3_net'):
with tf.variable_scope(scope, 'deeplab3_net',[input], reuse=reuse):
with tf.variable_scope('conv_1'):
conv_1 = conv_layer(inputs, [7,7,1,32], 32, 'g_conv_1')#receptive field = 7
bn_1 = batch_normalization(conv_1,'g_bn_1', is_training=is_training)
with tf.variable_scope('conv_2'):
conv_2 = conv_layer(bn_1, [7,7,32,32], 32, 'g_conv_2')#receptive field = 13
bn_2 = batch_normalization(conv_2,'g_bn_2', is_training=is_training)
pool_conv2 = maxpooling_2x2(bn_2, 'g_pool_conv2') #receptive field = 26
with tf.variable_scope('conv_3'):
conv_3 = conv_layer(pool_conv2, [3,3,32,64], 64, 'g_conv_3')#receptive field = 28]
bn_3 = batch_normalization(conv_3,'g_bn_3', is_training=is_training)
with tf.variable_scope('conv_4'):
conv_4 = conv_layer(bn_3, [3,3,64,64], 64, 'g_conv_4') #receptive field = 30
bn_4 = batch_normalization(conv_4,'g_bn_4', is_training=is_training)
pool_conv4= maxpooling_2x2(bn_4, 'g_pool_conv3') #receptive field = 60
with tf.variable_scope('conv_5'):
conv_5 = atrous_conv_layer(pool_conv4, [3,3,64,128], 128, 2, 'g_conv_5')#receptive field = 66
bn_5 = batch_normalization(conv_5,'g_bn_5', is_training=is_training)
with tf.variable_scope('conv_6'):
conv_6 = atrous_conv_layer(pool_conv4, [3,3,64,128], 128, 4, 'g_conv_6')#receptive field = 76
bn_6 = batch_normalization(conv_6,'g_bn_6', is_training=is_training)
with tf.variable_scope('conv_7'):
conv_7 = atrous_conv_layer(pool_conv4, [3,3,64,128], 128, 8, 'g_conv_7')#receptive field = 94
bn_7 = batch_normalization(conv_7, 'g_bn_7', is_training=is_training)
with tf.variable_scope('conv_8'):
conv_8 = atrous_conv_layer(pool_conv4, [3,3,64,128], 128, 16, 'g_conv_8')#receptive field = 128
bn_8 = batch_normalization(conv_8, 'g_bn_8', is_training=is_training)
concat = tf.concat([bn_5,bn_6,bn_7,bn_8],-1)
with tf.variable_scope('conv_9'):
conv_9 = conv_layer(concat, [1,1,512,128], 128, 'g_conv_9')
bn_9 = batch_normalization(conv_9, 'g_bn_9', is_training=is_training)
with tf.variable_scope('deconv_1'):
deconv_1 = deconv_layer(bn_9,[3,3,128,128],[batch_size,256,256,128], name = 'g_deconv_1')
bn_d1 = batch_normalization(deconv_1, 'bn_d1', is_training=is_training)
with tf.variable_scope('deconv_2'):
deconv_2 = deconv_layer(bn_d1,[3,3,128,128],[batch_size,512,512,128],name='g_deconv_2')
bn_d2 = batch_normalization(deconv_2, 'bn_d2', is_training=is_training)
with tf.variable_scope('feature_embedding'):
conv_10 = conv_layer(bn_d2, [1,1,128,64], 64, 'g_feature_embedding')
feature_embedding = batch_normalization(conv_10, 'g_feature', is_training=is_training)
with tf.variable_scope('logits'):
logits = conv_layer(feature_embedding, [1,1,64,class_num],class_num,'g_logits')
return deconv_1, logits
#Use for basic convolutional network + adverisal network + lstm.
def g_l_net(inputs, batch_size, class_num, reuse=False, is_training=True, scope='g_SpinePathNet'):
with tf.variable_scope(scope, 'g_SpinePathNet',[input], reuse=reuse):
with tf.variable_scope('conv_1'):
net = conv_layer(inputs, [7,7,1,32], 32, 'g_conv_1')#receptive field = 7
net = batch_normalization(net,'g_bn_1', is_training=is_training)
with tf.variable_scope('conv_2'):
net = conv_layer(net, [7,7,32,32], 32, 'g_conv_2')#receptive field = 13
net = batch_normalization(net,'g_bn_2', is_training=is_training)
net = maxpooling_2x2(net, 'g_pool_conv2') #receptive field = 26
with tf.variable_scope('conv_3'):
net = conv_layer(net, [3,3,32,64], 64, 'g_conv_3')#receptive field = 28]
net = batch_normalization(net,'g_bn_3', is_training=is_training)
with tf.variable_scope('conv_4'):
net = conv_layer(net, [3,3,64,64], 64, 'g_conv_4') #receptive field = 30
net = batch_normalization(net,'g_bn_4', is_training=is_training)
net = maxpooling_2x2(net, 'g_pool_conv3') #receptive field = 60
with tf.variable_scope('conv_5'):
net = atrous_conv_layer(net, [3,3,64,128], 128, 2, 'g_conv_5')#receptive field = 66
net = batch_normalization(net,'g_bn_5', is_training=is_training)
with tf.variable_scope('conv_6'):
net = atrous_conv_layer(net, [3,3,128,128], 128, 4, 'g_conv_6')#receptive field = 76
net = batch_normalization(net,'g_bn_6', is_training=is_training)
with tf.variable_scope('conv_7'):
net = atrous_conv_layer(net, [3,3,128,128], 128, 8, 'g_conv_7')#receptive field = 94
net = batch_normalization(net, 'g_bn_7', is_training=is_training)
with tf.variable_scope('conv_8'):
net = atrous_conv_layer(net, [3,3,128,128], 128, 16, 'g_conv_8')#receptive field = 128
net_1 = batch_normalization(net, 'g_bn_8', is_training=is_training)
with tf.variable_scope('LSTM'):
net_2 = BiLSTM_pool_4x4(net_1, 128, 128, batch_size, is_training=is_training)
with tf.variable_scope('deconv_1'):
net_3 = deconv_layer(net_1+net_2,[3,3,128,128],[batch_size,256,256,128], name = 'g_deconv_1')
net = batch_normalization(net_3, 'g_bn_9', is_training=is_training)
with tf.variable_scope('deconv_2'):
net = deconv_layer(net,[3,3,128,128],[batch_size,512,512,128],name='g_deconv_2')
net = batch_normalization(net, 'g_bn_10', is_training=is_training)
# with tf.variable_scope('LSTM'):
# net = LSTM2x2(net, superpixel, 512, 128, batch_size)
#net2 = batch_normalization(net, 'g_bn_10', is_training=is_training)
with tf.variable_scope('conv_9'):
net = conv_layer(net, [1,1,128, 64], 64, 'g_conv_9')
net = batch_normalization(net, 'g_feature', is_training=is_training)
with tf.variable_scope('logits'):
net = conv_layer(net, [1,1,64,class_num], class_num,'g_logits')
return net,net_1,net_3
def d_net(inputs, class_num, reuse = False, is_training=True, scope='d_gan'):
with tf.variable_scope(scope, 'd_gan', [input], reuse=reuse):
with tf.variable_scope('gan_conv_1'):
net = conv_layer_gan(inputs, [7,7,class_num,32], 32, 'd_conv_1')
net = batch_normalization(net,'d_bn_1', is_training=is_training)
net = avgpooling_2x2(net, 'pool_conv1')
with tf.variable_scope('gan_conv_2'):
net = conv_layer_gan(net, [7,7,32,64], 64, 'd_conv_2')
net = batch_normalization(net,'d_bn_2', is_training=is_training)
net = avgpooling_2x2(net, 'pool_conv2')
with tf.variable_scope('gan_conv_3'):
net = conv_layer_gan(net, [7,7,64,128], 128, 'd_conv_3')
net = batch_normalization(net,'d_bn_3', is_training=is_training)
net = avgpooling_2x2(net, 'pool_conv3')
with tf.variable_scope('d_fc_1'):
net = tf.contrib.layers.fully_connected(net,256)
net = tf.contrib.layers.dropout(net, keep_prob=0.6)
with tf.variable_scope('d_output'):
net = tf.contrib.layers.fully_connected(net,1,activation_fn=None)
return net
# define u-net for comparison.
def unet(inputs, batch_size, class_num, reuse=False,
is_training=True, scope='unet'):
with tf.variable_scope(scope, 'g_net',[input], reuse=reuse):
with tf.variable_scope('conv_1'):
net = conv_layer(inputs, [3,3,1,64], 64, 'g_conv_1')#receptive field = 7
net = tf.nn.relu(net)
with tf.variable_scope('conv_2'):
net = conv_layer(net, [3,3,64,64], 64, 'g_conv_2')#receptive field = 13
net_1 = tf.nn.relu(net)
net = maxpooling_2x2(net_1, 'g_pool_conv2') #receptive field = 26
with tf.variable_scope('conv_3'):
net = conv_layer(net, [3,3,64,128], 128, 'g_conv_3')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('conv_4'):
net = conv_layer(net, [3,3,128,128], 128, 'g_conv_4') #receptive field = 30
net_2 = tf.nn.relu(net)
net = maxpooling_2x2(net_2, 'g_pool_conv4') #receptive field = 60
with tf.variable_scope('conv_5'):
net = conv_layer(net, [3,3,128,256], 256, 'g_conv_5')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('conv_6'):
net = conv_layer(net, [3,3,256,256], 256, 'g_conv_6') #receptive field = 30
net_3 = tf.nn.relu(net)
net = maxpooling_2x2(net_3, 'g_pool_conv6') #receptive field = 60
with tf.variable_scope('conv_7'):
net = conv_layer(net, [3,3,256,512], 512, 'g_conv_7')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('conv_8'):
net = conv_layer(net, [3,3,512,512], 512, 'g_conv_8') #receptive field = 30
net_4 = tf.nn.relu(net)
net = maxpooling_2x2(net_4, 'g_pool_conv8') #receptive field = 60
with tf.variable_scope('conv_9'):
net = conv_layer(net, [3,3,512,1024], 1024, 'g_conv_9')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('conv_10'):
net = conv_layer(net, [3,3,1024,512], 512, 'g_conv_10')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('deconv_1'):
net = deconv_layer(net,[2,2,512,512],[batch_size,64,64,512], name = 'g_deconv_1')
net = tf.nn.relu(net)
with tf.variable_scope('deconv_1_conv_1'):
net = conv_layer(tf.concat([net_4,net],-1), [3,3,1024,512], 512, 'deconv_1_conv_1')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('deconv_1_conv_2'):
net = conv_layer(net, [3,3,512,256], 256, 'deconv_1_conv_2')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('deconv_2'):
net = deconv_layer(net,[2,2,256,256],[batch_size,128,128,256], name = 'g_deconv_2')
net = tf.nn.relu(net)
with tf.variable_scope('deconv_2_conv_1'):
net = conv_layer(tf.concat([net_3,net],-1), [3,3,512,256], 256, 'deconv_2_conv_1')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('deconv_2_conv_2'):
net = conv_layer(net, [3,3,256,128], 128, 'deconv_2_conv_2')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('deconv_3'):
net = deconv_layer(net,[2,2,128,128],[batch_size,256,256,128], name = 'g_deconv_1')
net = tf.nn.relu(net)
with tf.variable_scope('deconv_3_conv_1'):
net = conv_layer(tf.concat([net_2,net],-1), [3,3,256,128], 128, 'deconv_3_conv_1')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('deconv_3_conv_2'):
net = conv_layer(net, [3,3,128,64], 64, 'deconv_3_conv_2')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('deconv_4'):
net = deconv_layer(net,[2,2,64,64],[batch_size,512,512,64], name = 'g_deconv_4')
net = tf.nn.relu(net)
with tf.variable_scope('deconv_4_conv_1'):
net = conv_layer(tf.concat([net_1,net],-1), [3,3,128,64], 64, 'deconv_4_conv_1')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('deconv_4_conv_2'):
net = conv_layer(net, [3,3,64,64], 64, 'deconv_4_conv_2')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('logits'):
net = conv_layer(net, [1,1,64,class_num], class_num,'g_logits')
return net
# define SegNet for comparison.
def SegNet(inputs, batch_size, class_num, reuse=False,
is_training=True, scope='SegNet'):
with tf.variable_scope(scope, 'SegNet',[input], reuse=reuse):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
with tf.variable_scope('upsampling_1'):
net = deconv_layer(net,[2,2,512,512],[batch_size,32,32,512], name = 'upsampling_1')
net = tf.nn.relu(net)
with tf.variable_scope('deconv5/conv5_3'):
net = conv_layer(net, [3,3,512,512], 512, 'deconv5/conv5_3')
net = tf.nn.relu(net)
with tf.variable_scope('deconv5/conv5_2'):
net = conv_layer(net, [3,3,512,512], 512, 'deconv5/conv5_2')
net = tf.nn.relu(net)
with tf.variable_scope('conv5/conv5_3'):
net = conv_layer(net, [3,3,512,512], 512, 'deconv5/conv5_1')
net = tf.nn.relu(net)
with tf.variable_scope('upsampling_2'):
net = deconv_layer(net,[2,2,512,512],[batch_size,64,64,512], name = 'upsampling_2')
net = tf.nn.relu(net)
with tf.variable_scope('deconv4/conv4_3'):
net = conv_layer(net, [3,3,512,512], 512, 'deconv4/conv4_3')
net = tf.nn.relu(net)
with tf.variable_scope('deconv4/conv4_2'):
net = conv_layer(net, [3,3,512,512], 512, 'deconv4/conv4_2')
net = tf.nn.relu(net)
with tf.variable_scope('deconv4/conv4_1'):
net = conv_layer(net, [3,3,512,256], 256, 'deconv4/conv4_1')
net = tf.nn.relu(net)
with tf.variable_scope('upsampling_3'):
net = deconv_layer(net,[2,2,256,256],[batch_size,128,128,256], name = 'deconv_3')
net = tf.nn.relu(net)
with tf.variable_scope('deconv3/conv3_3'):
net = conv_layer(net, [3,3,256,256], 256, 'deconv3/conv3_3')
net = tf.nn.relu(net)
with tf.variable_scope('deconv3/conv3_2'):
net = conv_layer(net, [3,3,256,256], 256, 'deconv3/conv3_2')
net = tf.nn.relu(net)
with tf.variable_scope('deconv3/conv3_1'):
net = conv_layer(net, [3,3,256,128], 128, 'deconv3/conv3_1')
net = tf.nn.relu(net)
with tf.variable_scope('upsampling_4'):
net = deconv_layer(net,[2,2,128,128],[batch_size,256,256,128], name = 'deconv_4')
net = tf.nn.relu(net)
with tf.variable_scope('deconv2/conv2_2'):
net = conv_layer(net, [3,3,128,128], 128, 'deconv2/conv2_2')
net = tf.nn.relu(net)
with tf.variable_scope('deconv2/conv2_1'):
net = conv_layer(net, [3,3,128,64], 64, 'deconv2/conv2_1')
net = tf.nn.relu(net)
with tf.variable_scope('upsampling_5'):
net = deconv_layer(net,[2,2,64,64],[batch_size,512,512,64], name = 'deconv_5')
net = tf.nn.relu(net)
with tf.variable_scope('deconv1/conv1_2'):
net = conv_layer(net, [3,3,64,64], 64, 'deconv1/conv1_2')
net = tf.nn.relu(net)
with tf.variable_scope('deconv1/conv1_1'):
net = conv_layer(net, [3,3,64,64], 64, 'deconv1/conv1_1')
net = tf.nn.relu(net)
with tf.variable_scope('logit'):
net = conv_layer(net, [1,1,64,class_num], class_num, 'logit')#receptive field = 28]
return net
def vgg16(inputs, batch_size, class_num, reuse=False,
is_training=True, scope='vgg_16'):
with tf.variable_scope(scope, 'vgg_16',[input], reuse=reuse):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
with tf.variable_scope('conv6'):
net = conv_layer(net, [1,1,512,1028], 1028, 'conv6') #receptive field = 30
net = tf.nn.relu(net)
with tf.variable_scope('conv7'):
net = conv_layer(net, [1,1,1028,1028], 1028, 'conv7')#receptive field = 28]
net = tf.nn.relu(net)
with tf.variable_scope('conv8'):
net = conv_layer(net, [1,1,1028,256], 256, 'conv8')#receptive field = 28]
#net = tf.nn.relu(net)
with tf.variable_scope('deconv_1'):
net = deconv_layer(net,[2,2,256,256],[batch_size,32,32,256], name = 'deconv_1')
net = tf.nn.relu(net)
with tf.variable_scope('deconv_2'):
net = deconv_layer(net,[2,2,256,256],[batch_size,64,64,256], name = 'deconv_2')
net = tf.nn.relu(net)
with tf.variable_scope('deconv_3'):
net = deconv_layer(net,[2,2,256,256],[batch_size,128,128,256], name = 'deconv_3')
net = tf.nn.relu(net)
with tf.variable_scope('deconv_4'):
net = deconv_layer(net,[2,2,256,256],[batch_size,256,256,256], name = 'deconv_4')
net = tf.nn.relu(net)
with tf.variable_scope('deconv_5'):
net = deconv_layer(net,[2,2,256,256],[batch_size,512,512,256], name = 'deconv_5')
net = tf.nn.relu(net)
with tf.variable_scope('logit'):
net = conv_layer(net, [1,1,256,class_num], class_num, 'logit')#receptive field = 28]
#net = tf.nn.relu(net)
return net
|
#import pyARC
from pyARC_dev import pyARC
import numpy as np
import matplotlib
import matplotlib.ticker
import matplotlib.pyplot as plt
import glob
import pdb, sys, os
"""
Module containing a routine for installing the namelist
parameters in an ATMO object.
"""
# Fiducial values for HAT-P-18:
LOGG = 2.69
TEQ = 850
RPLANET = 0.995
RSTAR = 0.749
AAU = 0.0559
MDH = 0.0 # solar
CORATIO = 0.56 # solar
def Main( ATMO ):
"""
Routine for setting the namelist parameters.
"""
ATMO.executable = 'atmo.x'
ATMO.nice = None
ATMO.infile_path = 'ptchem.in' # ATMO input file path
# PARAM: Parameters for ATMO
ATMO.Debug = 1
ATMO.fout = 'pt.ncdf'
#ATMO.fin = 'temp.ncdf'
ATMO.fin = 'pt.ncdf'
# EOS: Equation of state parameters
#ATMO.gamma = 0.
# GRID: Grid parameters
ATMO.pmin = 1e-6
ATMO.pmax = 5
ATMO.taumin = 1e-6
ATMO.taumax = 10 #8e3 #2e2
ATMO.logg = LOGG
ATMO.teff = 100.
ATMO.ndepth = 15 #20 # 50 # this seems to be the parameter that affects speed a lot
ATMO.Rp = RPLANET
ATMO.pp_Rp = 0.001
ATMO.nfreq = 250
ATMO.nkmix = 10 #30
ATMO.nband = 250 #350
ATMO.nband_std = 32
ATMO.corr_k = True
ATMO.numax = 5e6
# CHEMISTRY: Chemistry parameters
ATMO.chem = 'eq'
ATMO.MdH = MDH
ATMO.COratio = CORATIO
ATMO.fAin = 'chem_dummy.ncdf'
ATMO.fAeqout = 'chem_eq.ncdf'
ATMO.fAneqout = 'chem_neq.ncdf'
#ATMO.fcoeff = '/home/tevans/code/pacode/atmo/chem/coeff_NASA_sc.dat'
ATMO.print_chem = False
# CHEM_NEQ: Non-equilibrium chemistry parameters
ATMO.mixing = False
ATMO.photochem = False
ATMO.kzzcst = 1e9
ATMO.nmol_eq = 107
ATMO.tmax = 1e12
ATMO.dtmax = 1e10
ATMO.rate_limiter = True
ATMO.Nmin = 1e-100
ATMO.atol = 1e-10
# RADTRANS: Radiative transfer parameters
ATMO.nrays = 16
ATMO.scatter = True
ATMO.irrad = True
ATMO.firad = 'lte048-4.5-0.0a+0.0.BT-NextGen.7.ncdf' # stellar spectrum filepath
ATMO.rstar = RSTAR
ATMO.rorbit = AAU
ATMO.murad = 0.5
ATMO.fred = 0.5
ATMO.ftrans_spec = 'TransmissionModel.ncdf' # output file for transmission spectrum
ATMO.fspectrum = 'EmissionModel.ncdf' # output file for the emission spectrum
ATMO.fcfout = 'ContribFunc.ncdf' # output file for the normalised contribution function
# OPACITY: Opacity parameters
ATMO.nkap = 6
ATMO.art_haze = 1
ATMO.cloud = False
ATMO.cloud_top = 1
ATMO.cloud_bottom = 20
ATMO.cloud_strength = 1
ATMO.kap_smooth = True
ATMO.kerkap_smooth = 2
# SOLVER: ATMO solver parameters
ATMO.solve_hydro = False
ATMO.solve_energy = False
ATMO.minstep = 1e-3
ATMO.maxstep = 9e-1
ATMO.accuracy = 1e-1
ATMO.psurf = 1e-6
ATMO.print_err = False
ATMO.transmission_spectrum = True
ATMO.surface_spectrum = False
#ATMO.hydrostatic = True
ATMO.calc_cf = False
# CONVECTION: Convection parameters
ATMO.alpha = 0.
return ATMO
|
import abc as ABC
from abc import ABC, abstractmethod, abstractproperty
class Analyzer(ABC):
@abstractmethod
def analyze(self):
pass
|
# Run inference on batches of data
# uses default configuration from detectron2
#
# Fatemeh Saleh <fatemehsadat.saleh@anu.edu.au>
import numpy as np
import cv2
import os
from detectron2 import model_zoo
from detectron2.modeling import build_model
from detectron2.config import get_cfg
import torch
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
import glob
import json
from skimage import measure
from pycocotools import mask
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', help='name of sample such as, [0002_white_floor_05_02_2019_08_19_17_47]', required=True)
parser.add_argument('-f', help='name of furniture from [Kallax_Shelf_Drawer, Lack_Side_Table, Lack_Coffee_Table, Lack_TV_Bench]', required=True)
parser.add_argument('-root', default='/path/to/dataset/', required=True)
parser.add_argument('-batch', default=10, required=True, type=int)
parser.add_argument('-model', help='name of the model in cfg.OUTPUT_DIR such as [ResNeXt.pth] ', required=True)
args = parser.parse_args()
if __name__ == '__main__':
test_dir = os.path.join(args.root, args.f, args.s, 'dev3', 'images')
test_imgs = glob.glob(test_dir + '/*')
test_imgs.sort()
test_imgs_transform = []
test_json = {'images': [], 'annotations': [], 'categories': []}
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set threshold for this model
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, args.model)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 7
transform_gen = T.ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST)
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.train(False)
for i in range(len(test_imgs)):
original_image = cv2.imread(test_imgs[i])
height, width = original_image.shape[:2]
image = transform_gen.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
input = {"image": image, "height": height, "width": width}
test_imgs_transform.append(input)
test_json['images'].append(
{"id": i + 1, "file_name": args.f + '/' + args.s + '/dev3/images/' + str.split(test_imgs[i], '/')[-1],
"height": height, "width": width})
segment_id = 1
start = time.time()
with torch.no_grad():
for b in range(0, len(test_imgs_transform), args.batch):
if b + args.batch < len(test_imgs_transform):
outputs = model(test_imgs_transform[b:b + args.batch])
for j in range(args.batch):
for s in range(len(outputs[j]['instances'])):
bimask = outputs[j]['instances'][s].pred_masks.cpu().numpy().astype("uint8")[0]
segment = []
contours = measure.find_contours(bimask, 0.5)
for contour in contours:
contour = np.flip(contour, axis=1)
segmentation = contour.ravel().tolist()
segment.append(segmentation)
bimask = np.expand_dims(bimask, axis=-1)
Rs = mask.encode(np.asfortranarray(np.uint8(bimask)))
area = int(mask.area(Rs)[0]),
bbox_seg = mask.toBbox(Rs)[0].tolist()
test_json['annotations'].append(
{'image_id': int(b + j + 1), 'id': segment_id, 'segmentation': segment,
'bbox': outputs[j]['instances'][s].pred_boxes.tensor.cpu().numpy()[0].tolist()
, 'category_id': int(outputs[j]['instances'][s].pred_classes.cpu().numpy()[0]) + 1,
'score': float(outputs[j]['instances'][s].scores.cpu().numpy()[0]), 'area': int(area[0])})
segment_id += 1
print("Inference Done for ", b, "Frames")
if b < len(test_imgs_transform):
outputs = model(test_imgs_transform[b:])
for j in range(len(test_imgs_transform) - b):
for s in range(len(outputs[j]['instances'])):
bimask = outputs[j]['instances'][s].pred_masks.cpu().numpy().astype("uint8")[0]
segment = []
contours = measure.find_contours(bimask, 0.5)
for contour in contours:
contour = np.flip(contour, axis=1)
segmentation = contour.ravel().tolist()
segment.append(segmentation)
bimask = np.expand_dims(bimask, axis=-1)
Rs = mask.encode(np.asfortranarray(np.uint8(bimask)))
area = int(mask.area(Rs)[0]),
bbox_seg = mask.toBbox(Rs)[0].tolist()
test_json['annotations'].append(
{'image_id': int(b + j + 1), 'id': segment_id, 'segmentation': segment,
'bbox': outputs[j]['instances'][s].pred_boxes.tensor.cpu().numpy()[0].tolist()
, 'category_id': int(outputs[j]['instances'][s].pred_classes.cpu().numpy()[0]) + 1,
'score': float(outputs[j]['instances'][s].scores.cpu().numpy()[0]), 'area': int(area[0])})
segment_id += 1
print('Total inference time for ', len(test_imgs_transform), ' frames:', time.time()-start)
test_json['categories'] =[{'id': 1, 'name': 'table_top'}, {'id': 2, 'name': 'leg'}, {'id': 3, 'name': 'shelf'},
{'id': 4, 'name': 'side_panel'}, {'id': 5, 'name': 'front_panel'}, {'id': 6, 'name': 'bottom_panel'},
{'id': 7, 'name': 'rear_panel'}]
with open(os.path.join(cfg.OUTPUT_DIR, args.s + '.json'), 'w') as outfile:
json.dump(test_json, outfile)
print('Output json file successfully created!')
|
import os
def getRootPath():
return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
def getProjectAbsPath(*path):
return os.path.join(getRootPath(), *path)
def getCachePath(*path):
return getProjectAbsPath(".cache", *path)
def getTemplatePath(*path):
return getProjectAbsPath("cli", "templates", *path)
def getNodeBinPath(name):
return getProjectAbsPath("node_modules", ".bin", name)
def getPipEnvBinPath(name):
return getProjectAbsPath("env", "bin", name)
def getCurrentAbsPath(path="."):
if os.path.isabs(path):
return os.path.abspath(path)
else:
return getCurrentAbsPath(os.path.join(os.getcwd(), path))
|
def aumentar(preço, pc):
aum = preço + (preço * pc / 100)
return aum
def diminuir(preço, pc):
dim = preço - (preço * pc / 100)
return dim
def dobro(preço):
dob = preço * 2
return dob
def metade(preço):
met = preço / 2
return met
|
from api.db import db_users
from api.services.custom_mapper import map_json_to_object
from api.services.view_models import ErrorModel, PostModelReturn
def worker_for_post(posted_user):
mongo_object = map_json_to_object(posted_user)
if mongo_object.user == 'frontpage':
error_object = ErrorModel(mongo_object.user, mongo_object.list, 'frontpage is an invalid user - reserved')
return_object = { 'status_code': 403, 'body': error_object}
return return_object
list_exists = db_users.check_if_userlist_exists(mongo_object.user, mongo_object.list)
if list_exists:
error_object = ErrorModel(mongo_object.user, mongo_object.list, 'Unable to insert list already exists')
return_object = {'status_code': 400, 'body': error_object}
return return_object
insert_list_res = db_users.insert_new_list(mongo_object)
post_model = PostModelReturn(mongo_object.user, mongo_object.list, insert_list_res, posted_user)
return_object = {'status_code': 200, 'body': post_model}
return return_object |
# NOTE: this file is not an actual test though, but could be helpful to debug the new replayer
import os
from pathlib import Path
from simod.readers.log_reader import LogReader
from simod.replayer_datatypes import BPMNGraph, ElementInfo
from simod.configuration import Configuration
bpmn_schema_url = 'http://www.omg.org/spec/BPMN/20100524/MODEL'
bpmn_element_ns = {'xmlns': bpmn_schema_url}
xes_simodbpmn_file_paths = {
'purchasing_example': ['/input_files/xes_files/PurchasingExample.xes',
'/input_files/bpmn_simod_models/PurchasingExample.bpmn'],
'production': ['/input_files/xes_files/production.xes',
'/input_files/bpmn_simod_models/Production.bpmn'],
'insurance': ['/input_files/xes_files/insurance.xes',
'/input_files/bpmn_simod_models/insurance.bpmn'],
'call_centre': ['/input_files/xes_files/callcentre.xes',
'/input_files/bpmn_simod_models/callcentre.bpmn'],
'bpi_challenge_2012': ['/input_files/xes_files/BPI_Challenge_2012_W_Two_TS.xes',
'/input_files/bpmn_simod_models/BPI_Challenge_2012_W_Two_TS.bpmn'],
'bpi_challenge_2017_filtered': ['/input_files/xes_files/BPI_Challenge_2017_W_Two_TS_filtered.xes',
'/input_files/bpmn_simod_models/BPI_Challenge_2017_W_Two_TS_filtered.bpmn'],
'bpi_challenge_2017': ['/input_files/xes_files/BPI_Challenge_2017_W_Two_TS.xes',
'/input_files/bpmn_simod_models/BPI_Challenge_2017_W_Two_TS.bpmn'],
'consulta_data_mining': ['/input_files/xes_files/ConsultaDataMining201618.xes',
'/input_files/bpmn_simod_models/ConsultaDataMining201618.bpmn']
}
to_execute = {'HC-STRICT': False,
'HC-FLEX': False,
'TS-STRICT': False,
'NSGA-II': False,
'METRICS': True}
experiment_logs = {0: 'production',
1: 'purchasing_example',
2: 'consulta_data_mining',
3: 'insurance',
4: 'call_centre',
5: 'bpi_challenge_2012',
6: 'bpi_challenge_2017_filtered',
7: 'bpi_challenge_2017'}
def reply_event_log(event_log, bpmn_graph, log_path=None):
traces = event_log.get_traces()
flow_arcs_prob = []
flow_arcs_freq = []
correct_traces = 0
correct_activities = 0
total_activities = 0
task_fired_ratio = dict()
task_missed_tokens = 0
missed_tokens = dict()
total_tokens = 0
total_traces = 0
for post_p in [False, True]:
flow_arcs_frequency = dict()
for trace in traces:
sequence = [event['task'] for event in trace]
is_correct, fired_tasks, pending_tokens = bpmn_graph.replay_trace(sequence, flow_arcs_frequency, post_p)
if not post_p:
total_traces += 1
if len(pending_tokens) > 0:
task_missed_tokens += 1
for flow_id in pending_tokens:
if flow_id not in missed_tokens:
missed_tokens[flow_id] = 0
missed_tokens[flow_id] += 1
total_tokens += 1
if is_correct:
correct_traces += 1
for i in range(0, len(sequence)):
if sequence[i] not in task_fired_ratio:
task_fired_ratio[sequence[i]] = [0, 0]
if fired_tasks[i]:
correct_activities += 1
task_fired_ratio[sequence[i]][0] += 1
task_fired_ratio[sequence[i]][1] += 1
total_activities += len(fired_tasks)
flow_arcs_prob.append(bpmn_graph.compute_branching_probability(flow_arcs_frequency))
flow_arcs_freq.append(flow_arcs_frequency)
t_r = 100 * correct_traces / total_traces
a_r = 100 * correct_activities / total_activities
print("Correct Traces Ratio %.2f (Pass: %d, Fail: %d, Total: %d)" % (
t_r, correct_traces, total_traces - correct_traces, total_traces))
print("Correct Tasks Ratio %.2f (Fire: %d, Fail: %d, Total: %d)" % (
a_r, correct_activities, total_activities - correct_activities, total_activities))
print("Missed Tokens Ratio %.2f (%d tokens left)" % (100 * task_missed_tokens / total_traces, total_tokens))
print('----------------------------------------------')
for task_id in task_fired_ratio:
print('%s -- %.2f (%d / %d)' % (task_id, task_fired_ratio[task_id][0] / task_fired_ratio[task_id][1],
task_fired_ratio[task_id][0], task_fired_ratio[task_id][1]))
print('----------------------------------------------')
print_probabilities(flow_arcs_prob, flow_arcs_freq)
return flow_arcs_frequency
def print_probabilities(flow_arcs_prob, f_arcs_freq):
c = 1
for g_id in flow_arcs_prob[0]:
print(g_id)
for i in [0, 1]:
print('G_%d' % c, end='')
for flow_id in flow_arcs_prob[i][g_id]:
print(', %.3f (%d)' % (flow_arcs_prob[i][g_id][flow_id], f_arcs_freq[i][flow_id]), end='')
print()
print('........................................')
c += 1
def main():
for i in range(7, 8):
current_dir = str(Path(os.path.dirname(__file__)).parent.parent)
log_path = current_dir + xes_simodbpmn_file_paths[experiment_logs[i]][0]
model_path = current_dir + xes_simodbpmn_file_paths[experiment_logs[i]][1]
settings = Configuration(model_path=Path(model_path), log_path=Path(log_path))
settings.fill_in_derived_fields()
event_log = LogReader(log_path, settings.read_options)
bpmn_graph = BPMNGraph.from_bpmn_path(Path(model_path))
print('Process: ' + experiment_logs[i])
f_arcs_freq = reply_event_log(event_log, bpmn_graph, log_path)
break
os._exit(0)
if __name__ == '__main__':
main()
|
# Copyright 2021 - Guillaume Charbonnier
# Licensed under the Apache License, Version 2.0 (the "License");
# http://www.apache.org/licenses/LICENSE-2.0
from typing import Optional
from _nats.aio.client import Client as NC
from .mixins.consumers import ConsumersMixin
from .mixins.infos import AccountInfosMixin
from .mixins.streams import StreamsMixin
class Client(NC, AccountInfosMixin, ConsumersMixin, StreamsMixin):
"""Python client for JetStream NATS servers.
The client exposes user friendly methods which in turn leverage NATS python client from `nats.py`
and perform NATS requests according to Jetstream NATS API.
Docs:
* Jetstream NATS API Reference: <https://docs.nats.io/jetstream/nats_api_reference>
Examples:
- Create and connect a client:
>>> from jsm.api import Client as JS
>>> js = JS()
>>> async def main():
>>> await js.connect()
- Get account info
>>> await.js.account_info()
- List streams
>>> await js.stream_list()
- Create a new stream
>>> await js.stream_create("DEMO", subjects="demo.>")
- Check that the stream was created
>>> stream_names_res = await js.stream_names()
>>> assert "DEMO" in stream_names_res.streams
- Get info about a stream
>>> stream_info = await js.stream_info("DEMO")
- Create a consumer
>>> await js.consumer_durable_create("DEMO", "app-consumer-01")
- Publish a message and fetch it using the consumer
>>> await js.publish("demo.foo", b"bar")
>>> msg = await js.consumer_msg_next("DEMO", "app-consumer-01")
>>> assert msg.data == b"bar"
- Delete a consumer
>>> await js.consumer_delete("DEMO", "app-consumer-01")
"""
def __init__(
self,
domain: Optional[str] = None,
default_timeout: float = 1.0,
raise_on_error: bool = False,
):
super().__init__()
self._prefix = f"$JS.{domain}.API" if domain else "$JS.API"
self._timeout = default_timeout
self._raise_on_error = raise_on_error
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class MultiLayerPerceptron(object):
def __init__(self,hidden_neurons,feature_dim):
self.model = keras.Sequential([
Dense(hidden_neurons, activation=tf.nn.relu, input_shape=[feature_dim]),
Dense(3, activation='softmax')
])
self.model.compile(
optimizer=tf.optimizers.Adam(),
loss=tf.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy']
)
def train(self, train_set, train_label, plot):
early_stop =keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)
history = self.model.fit(train_set, train_label, epochs=1000, verbose=0, validation_split = 0.1, callbacks=[early_stop])
hist = pd.DataFrame(history.history)
print(hist)
acc_final = float(hist['val_accuracy'].tail(1))
print()
print('Final Accuracy on validation set: {}'.format(round(acc_final, 3)))
if plot:
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.plot(hist['accuracy'], label='Train Error')
plt.plot(hist['val_accuracy'], label = 'Validation Error')
plt.legend()
plt.ylim([0,1])
plt.show()
def test(self,test_set,test_label):
loss, accuracy = self.model.evaluate(test_set, test_label)
print('Accuracy on test set: {}'.format(round(accuracy, 3)))
print('Initial Prediction:')
initial = np.zeros((1, len(test_set[0,:])))
initial[0] = test_set[0,:]
print(initial)
print(self.model.predict(initial))
print(test_label[0])
|
from .method import Method
from .scheme import Scheme
from .status_code import StatusCode
|
import asyncio
async def foo(data):
""""sleep and return an increment"""
print('foo', data)
await asyncio.sleep(0.1)
print('done foo')
return data + 1
async def bar():
""""sleep and return nothing"""
print('bar')
await asyncio.sleep(0.2)
return 'done bar'
async def quit_it(loop_):
""""sleep and stop the loop"""
print('quit handler')
await asyncio.sleep(2)
print('quit fired')
loop_.stop()
return None
# get the event loop (aka reactor)
loop = asyncio.get_event_loop()
# instantiate the coroutines
foo_coro = foo(10)
bar_coro = bar()
quit_coro = quit_it(loop)
# schedule them (they don't run yet)
# use gather to create a single future from a list of coro's
tasks = asyncio.gather(*[foo_coro, bar_coro, quit_coro])
if 1:
# run the event loop, try and play nice and ensure that you clean up at the end, even on Keyboard Interrupt
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
finally:
loop.close()
print('loop closed')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
def resource_path(relative):
'''
resolve the path(PyInstaller)
'''
if hasattr(sys, "_MEIPASS"):
# noinspection PyProtectedMember
return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
class Saveload(object):
'''Saveload implementation
very simple save & load implementation
'''
def __init__(self, ft):
self.font = ft
def blit(self, screen, img, pos):
screen.blit(img[0], pos[0])
screen.blit(img[1], pos[1])
text_surface = self.font.render(u'Save', True, (0, 0, 0))
screen.blit(text_surface, (pos[0][0] + 5, pos[0][1] + 15))
text_surface = self.font.render(u'Load', True, (0, 0, 0))
screen.blit(text_surface, (pos[1][0] + 5, pos[1][1] + 15))
def save(self, dpo, cpo, san):
sa = open(resource_path('src/save.dat'), 'wb')
sa.write(str(dpo.encode('utf-8')) + ' ' + str(cpo.encode('utf-8')) + ' ' + str(san))
sa.close()
def load(self):
sa = open(resource_path('src/save.dat'), 'rb')
for x in sa:
res = map(str, x.strip().split(' '))
sa.close()
dpo = res[0].decode('utf-8')
cpo = res[1].decode('utf-8')
san = long(res[2])
return (dpo, cpo, san)
|
"""Parse a grammar written in ECMArkup."""
from __future__ import annotations
# mypy: no-implicit-optional
import os
import collections
from typing import Dict, Iterable, Optional, Tuple
from jsparagus import parse_pgen, gen, grammar, extension, types
from jsparagus.lexer import LexicalGrammar
from jsparagus.ordered import OrderedSet, OrderedFrozenSet
ESGrammarLexer = LexicalGrammar(
# the operators and keywords:
"[ ] { } , ~ + ? <! = == != => ( ) @ < > ' ; "
"but empty here lookahead no not of one or returns through Some None impl for let",
NL="\n",
# any number of colons together
EQ=r':+',
# terminals of the ES grammar, quoted with backticks
T=r'`[^` \n]+`|```',
# also terminals, denoting control characters
CHR=r'<[A-Z ]+>|U\+[0-9A-f]{4}',
# nonterminals/types that will be followed by parameters
NTCALL=r'[A-Za-z]\w*(?=[\[<])',
# nonterminals (also, boolean parameters and type names)
NT=r'[A-Za-z]\w*',
# nonterminals wrapped in vertical bars for no apparent reason
NTALT=r'\|[A-Z]\w+\|',
# the spec also gives a few productions names
PRODID=r'#[A-Za-z]\w*',
# prose not wrapped in square brackets
# To avoid conflict with the `>` token, this is recognized only after a space.
PROSE=r'(?<= )>[^\n]*',
# prose wrapped in square brackets
WPROSE=r'\[>[^]]*\]',
# expression denoting a matched terminal or nonterminal
MATCH_REF=r'\$(?:0|[1-9][0-9]*)',
# the spec also gives a few productions names
RUSTCOMMENT=r'//.*\n',
)
ESGrammarParser = gen.compile(
parse_pgen.load_grammar(
os.path.join(os.path.dirname(__file__), "esgrammar.pgen")))
SIGIL_FALSE = '~'
SIGIL_TRUE = '+'
# Abbreviations for single-character terminals, used in the lexical grammar.
ECMASCRIPT_CODE_POINTS = {
# From <https://tc39.es/ecma262/#table-31>
'<ZWNJ>': grammar.Literal('\u200c'),
'<ZWJ>': grammar.Literal('\u200d'),
'<ZWNBSP>': grammar.Literal('\ufeff'),
# From <https://tc39.es/ecma262/#table-32>
'<TAB>': grammar.Literal('\t'),
'<VT>': grammar.Literal('\u000b'),
'<FF>': grammar.Literal('\u000c'),
'<SP>': grammar.Literal(' '),
'<NBSP>': grammar.Literal('\u00a0'),
# <ZWNBSP> already defined above
'<USP>': grammar.UnicodeCategory('Zs'),
# From <https://tc39.es/ecma262/#table-33>
'<LF>': grammar.Literal('\u000a'),
'<CR>': grammar.Literal('\u000d'),
'<LS>': grammar.Literal('\u2028'),
'<PS>': grammar.Literal('\u2028'),
}
class ESGrammarBuilder:
def __init__(self, terminal_names):
# Names of terminals that are written as nonterminals in the grammar.
# For example, "BooleanLiteral" is a terminal name when parsing the
# syntactic grammar.
if terminal_names is None:
terminal_names = frozenset()
self.terminal_names = frozenset(terminal_names)
self.reset()
def reset(self):
self.lexer = None
# This is how full-parsing and lazy-parsing are implemented, using
# different traits.
#
# This field contains the Rust's trait used for calling the method.
# When a CallMethod is generated, it is assumed to be a function of
# this trait. The trait is used by the Rust backend to generate
# multiple backends which are implementing different set of traits.
# Having the trait on the function call is useful as a way to filter
# functions calls at code-generation time.
#
# This field is updated by the `rust_param_impl`, which is used in
# grammar extensions, and visited before producing any CallMethod.
self.method_trait = "AstBuilder"
def rust_edsl(self, impl, grammar):
return extension.GrammarExtension(impl, grammar, self.lexer.filename)
def rust_param_impl(self, trait, for_type, param):
self.method_trait = trait
return extension.ImplFor(param, trait, for_type)
def rust_impl(self, trait, impl_type):
return self.rust_param_impl(trait, impl_type, [])
def rust_nt_def(self, lhs, rhs_line):
# Right now, only focus on the syntactic grammar, and assume that all
# rules are patching existing grammar production by adding code.
return extension.ExtPatch(self.nt_def(None, lhs, ':', [rhs_line]))
def rust_rhs_line(self, symbols):
return self.rhs_line(None, symbols, None, None)
def rust_expr(self, expr):
assert isinstance(expr, grammar.CallMethod)
return expr
def empty(self):
return []
def single(self, x):
return [x]
def append(self, x, y):
return x + [y]
def concat(self, x, y):
return x + y
def blank_line(self):
return []
def nt_def_to_list(self, nt_def):
return [nt_def]
def to_production(self, lhs, i, rhs, is_sole_production):
"""Wrap a list of grammar symbols `rhs` in a Production object."""
body, reducer, condition = rhs
if reducer is None:
reducer = self.default_reducer(lhs, i, body, is_sole_production)
return grammar.Production(body, reducer, condition=condition)
def default_reducer(self, lhs, i, body, is_sole_production):
assert isinstance(lhs, grammar.Nt)
nt_name = lhs.name
nargs = sum(1 for e in body if grammar.is_concrete_element(e))
if is_sole_production:
method_name = nt_name
else:
method_name = '{} {}'.format(nt_name, i)
return self.expr_call(method_name, tuple(range(nargs)), None)
def needs_asi(self, lhs, p):
"""True if p is a production in which ASI can happen."""
# The purpose of the fake ForLexicalDeclaration production is to have a
# copy of LexicalDeclaration that does not trigger ASI.
#
# Two productions have body == [";"] -- one for EmptyStatement and one
# for ClassMember. Neither should trigger ASI.
#
# The only other semicolons that should not trigger ASI are the ones in
# `for` statement productions, which happen to be exactly those
# semicolons that are not at the end of a production.
return (not (isinstance(lhs, grammar.Nt)
and lhs.name == 'ForLexicalDeclaration')
and len(p.body) > 1
and p.body[-1] == ';')
def apply_asi(self, p, reducer_was_autogenerated):
"""Return two rules based on p, so that ASI can be applied."""
assert isinstance(p.reducer, grammar.CallMethod)
if reducer_was_autogenerated:
# Don't pass the semicolon to the method.
reducer = self.expr_call(p.reducer.method,
p.reducer.args[:-1],
None)
else:
reducer = p.reducer
# Except for do-while loops, check at runtime that ASI occurs only at
# the end of a line.
if (len(p.body) == 7
and p.body[0] == 'do'
and p.body[2] == 'while'
and p.body[3] == '('
and p.body[5] == ')'
and p.body[6] == ';'):
code = "do_while_asi"
else:
code = "asi"
return [
# The preferred production, with the semicolon in.
p.copy_with(body=p.body[:],
reducer=reducer),
# The fallback production, performing ASI.
p.copy_with(body=p.body[:-1] + [grammar.ErrorSymbol(code)],
reducer=reducer),
]
def expand_lexical_rhs(self, rhs):
body, reducer, condition = rhs
out = []
for e in body:
if isinstance(e, str):
# The terminal symbols of the lexical grammar are characters, so
# add each character of this string as a separate element.
out += [grammar.Literal(ch) for ch in e]
else:
out.append(e)
return [out, reducer, condition]
def nt_def(self, nt_type, lhs, eq, rhs_list):
has_sole_production = (len(rhs_list) == 1)
production_list = []
for i, rhs in enumerate(rhs_list):
if eq == ':':
# Syntactic grammar. A hack is needed for ASI.
reducer_was_autogenerated = rhs[1] is None
p = self.to_production(lhs, i, rhs, has_sole_production)
if self.needs_asi(lhs, p):
production_list += self.apply_asi(p, reducer_was_autogenerated)
else:
production_list.append(p)
elif eq == '::':
# Lexical grammar. A hack is needed to replace multicharacter
# terminals like `!==` into sequences of character terminals.
rhs = self.expand_lexical_rhs(rhs)
p = self.to_production(lhs, i, rhs, has_sole_production)
production_list.append(p)
return (lhs.name, eq, grammar.NtDef(lhs.args, production_list, nt_type))
def nt_def_one_of(self, nt_type, nt_lhs, eq, terminals):
return self.nt_def(nt_type, nt_lhs, eq, [([t], None, None) for t in terminals])
def nt_lhs_no_params(self, name):
return grammar.Nt(name, ())
def nt_lhs_with_params(self, name, params):
return grammar.Nt(name, tuple(params))
def simple_type(self, name):
return types.Type(name)
def lifetime_type(self, name):
return types.Lifetime(name)
def parameterized_type(self, name, args):
return types.Type(name, tuple(args))
def t_list_line(self, terminals):
return terminals
def terminal(self, t):
assert t[0] == "`"
assert t[-1] == "`"
return t[1:-1]
def terminal_chr(self, chr):
raise ValueError("FAILED: %r" % chr)
def rhs_line(self, ifdef, rhs, reducer, _prodid):
return (rhs, reducer, ifdef)
def rhs_line_prose(self, prose):
return ([prose], None, None)
def empty_rhs(self):
return []
def expr_match_ref(self, token):
assert token.startswith('$')
return int(token[1:])
def expr_call(self, method, args, fallible):
# NOTE: Currently "AstBuilder" functions are made fallible using the
# fallible_methods taken from some Rust code which extract this
# information to produce a JSON file.
if self.method_trait == "AstBuilder":
fallible = None
return grammar.CallMethod(method, args or (), types.Type(self.method_trait),
fallible is not None)
def expr_some(self, expr):
return grammar.Some(expr)
def expr_none(self):
return None
def ifdef(self, value, nt):
return nt, value
def optional(self, nt):
return grammar.Optional(nt)
def but_not(self, nt, exclusion):
_, exclusion = exclusion
return grammar.Exclude(nt, [exclusion])
# return ('-', nt, exclusion)
def but_not_one_of(self, nt, exclusion_list):
exclusion_list = [exclusion for _, exclusion in exclusion_list]
return grammar.Exclude(nt, exclusion_list)
# return ('-', nt, exclusion_list)
def no_line_terminator_here(self, lt):
if lt not in ('LineTerminator', '|LineTerminator|'):
raise ValueError("unrecognized directive " + repr("[no " + lt + " here]"))
return grammar.NoLineTerminatorHere
def nonterminal(self, name):
if name in self.terminal_names:
return name
return grammar.Nt(name, ())
def nonterminal_apply(self, name, args):
if name in self.terminal_names:
raise ValueError("parameters applied to terminal {!r}".format(name))
if len(set(k for k, expr in args)) != len(args):
raise ValueError("parameter passed multiple times")
return grammar.Nt(name, tuple(args))
def arg_expr(self, sigil, argname):
if sigil == '?':
return (argname, grammar.Var(argname))
else:
return (argname, sigil)
def sigil_false(self):
return False
def sigil_true(self):
return True
def exclusion_terminal(self, t):
return ("t", t)
def exclusion_nonterminal(self, nt):
return ("nt", nt)
def exclusion_chr_range(self, c1, c2):
return ("range", c1, c2)
def la_eq(self, t):
return grammar.LookaheadRule(OrderedFrozenSet([t]), True)
def la_ne(self, t):
return grammar.LookaheadRule(OrderedFrozenSet([t]), False)
def la_not_in_nonterminal(self, nt):
return grammar.LookaheadRule(OrderedFrozenSet([nt]), False)
def la_not_in_set(self, lookahead_exclusions):
if all(len(excl) == 1 for excl in lookahead_exclusions):
return grammar.LookaheadRule(
OrderedFrozenSet(excl[0] for excl in lookahead_exclusions),
False)
raise ValueError("unsupported: lookahead > 1 token, {!r}"
.format(lookahead_exclusions))
def chr(self, t):
assert t[0] == "<" or t[0] == 'U'
if t[0] == "<":
assert t[-1] == ">"
if t not in ECMASCRIPT_CODE_POINTS:
raise ValueError("unrecognized character abbreviation {!r}".format(t))
return ECMASCRIPT_CODE_POINTS[t]
else:
assert t[1] == "+"
return grammar.Literal(chr(int(t[2:], base=16)))
def finish_grammar(nt_defs, goals, variable_terminals, synthetic_terminals,
single_grammar=True, extensions=[]):
nt_grammars = {}
for nt_name, eq, _ in nt_defs:
if nt_name in nt_grammars:
raise ValueError(
"duplicate definitions for nonterminal {!r}"
.format(nt_name))
nt_grammars[nt_name] = eq
# Figure out which grammar we were trying to get (":" for syntactic,
# "::" for lexical) based on the goal symbols.
goals = list(goals)
if len(goals) == 0:
raise ValueError("no goal nonterminals specified")
if single_grammar:
selected_grammars = set(nt_grammars[goal] for goal in goals)
assert len(selected_grammars) != 0
if len(selected_grammars) > 1:
raise ValueError(
"all goal nonterminals must be part of the same grammar; "
"got {!r} (matching these grammars: {!r})"
.format(set(goals), set(selected_grammars)))
[selected_grammar] = selected_grammars
terminal_set = set()
def hack_production(p):
for i, e in enumerate(p.body):
if isinstance(e, str) and e[:1] == "`":
if len(e) < 3 or e[-1:] != "`":
raise ValueError(
"Unrecognized grammar symbol: {!r} (in {!r})"
.format(e, p))
p[i] = token = e[1:-1]
terminal_set.add(token)
nonterminals = {}
for nt_name, eq, rhs_list_or_lambda in nt_defs:
if single_grammar and eq != selected_grammar:
continue
if isinstance(rhs_list_or_lambda, grammar.NtDef):
nonterminals[nt_name] = rhs_list_or_lambda
else:
rhs_list = rhs_list_or_lambda
for p in rhs_list:
if not isinstance(p, grammar.Production):
raise ValueError(
"invalid grammar: ifdef in non-function-call context")
hack_production(p)
if nt_name in nonterminals:
raise ValueError(
"unsupported: multiple definitions for nt " + nt_name)
nonterminals[nt_name] = rhs_list
for t in terminal_set:
if t in nonterminals:
raise ValueError(
"grammar contains both a terminal `{}` and nonterminal {}"
.format(t, t))
# Add execution modes to generate the various functions needed to handle
# syntax parsing and full parsing execution modes.
exec_modes = collections.defaultdict(OrderedSet)
noop_parser = types.Type("ParserTrait", (types.Lifetime("alloc"), types.UnitType))
token_parser = types.Type("ParserTrait", (
types.Lifetime("alloc"), types.Type("StackValue", (types.Lifetime("alloc"),))))
ast_builder = types.Type("AstBuilderDelegate", (types.Lifetime("alloc"),))
# Full parsing takes token as input and build an AST.
exec_modes["full_actions"].extend([token_parser, ast_builder])
# Syntax parsing takes token as input but skip building the AST.
# TODO: The syntax parser is commented out for now, as we need something to
# be produced when we cannot call the AstBuilder for producing the values.
# No-op parsing is used for the simulator, which is so far used for
# querying whether we can end the incremental input and lookup if a state
# can accept some kind of tokens.
exec_modes["noop_actions"].add(noop_parser)
# Extensions are using an equivalent of Rust types to define the kind of
# parsers to be used, this map is used to convert these type names to the
# various execution modes.
full_parser = types.Type("FullParser")
syntax_parser = types.Type("SyntaxParser")
noop_parser = types.Type("NoopParser")
type_to_modes = {
noop_parser: ["noop_actions", "full_actions"],
syntax_parser: ["full_actions"],
full_parser: ["full_actions"],
}
result = grammar.Grammar(
nonterminals,
goal_nts=goals,
variable_terminals=variable_terminals,
synthetic_terminals=synthetic_terminals,
exec_modes=exec_modes,
type_to_modes=type_to_modes)
result.patch(extensions)
return result
def parse_esgrammar(
text: str,
*,
filename: Optional[str] = None,
extensions: Iterable[Tuple[os.PathLike, int, str]] = (),
goals: Optional[Iterable[str]] = None,
terminal_names: Iterable[str] = (),
synthetic_terminals: Optional[Dict[str, OrderedSet[str]]] = None,
single_grammar: bool = True
) -> grammar.Grammar:
if not text.endswith("\n\n"):
# Horrible hack: add a blank line at the end of the document so that
# the esgrammar grammar can use newlines as delimiters. :-P
text += "\n"
terminal_names = frozenset(terminal_names)
if synthetic_terminals is None:
synthetic_terminals = {}
builder = ESGrammarBuilder(terminal_names)
parser = ESGrammarParser(builder=builder, goal="grammar")
lexer = ESGrammarLexer(parser, filename=filename)
lexer.write(text)
nt_defs = lexer.close()
grammar_extensions = []
for ext_filename, start_lineno, content in extensions:
builder.reset()
parser = ESGrammarParser(builder=builder, goal="rust_edsl")
lexer = ESGrammarLexer(parser, filename=ext_filename)
builder.lexer = lexer
lexer.start_lineno = start_lineno
lexer.write(content)
result = lexer.close()
grammar_extensions.append(result)
if goals is None:
# Default to the first nonterminal in the input.
goals = [nt_defs[0][0]]
return finish_grammar(
nt_defs,
goals=goals,
variable_terminals=terminal_names - frozenset(synthetic_terminals),
synthetic_terminals=synthetic_terminals,
single_grammar=single_grammar,
extensions=grammar_extensions)
|
n = int(input())
s = []
for i in range(n):
s.append(input())
s[i] = s[i][::-1]
s.sort()
for i in range(n):
s[i] = s[i][::-1]
print(s[i])
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_colors:
#
# Color names
# ===========
#
# ProPlot registers several new color names and includes tools for defining
# your own color names. These features are described below.
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_colors_included:
#
# Included colors
# ---------------
#
# ProPlot adds new color names from the `XKCD color survey
# <https://blog.xkcd.com/2010/05/03/color-survey-results/>`__ and
# the `Open Color <https://github.com/yeun/open-color>`__ UI design color
# palettes. You can use `~proplot.demos.show_colors` to generate a table of these
# colors. Note that matplotlib's native `X11/CSS4 named colors
# <https://matplotlib.org/examples/color/named_colors.html>`__ are still
# registered, but some of these color names may be overwritten by the XKCD names,
# and we encourage choosing colors from the below tables instead. XKCD colors
# are normally `available in matplotlib
# <https://matplotlib.org/stable/tutorials/colors/colors.html>`__ under the
# ``xkcd:`` prefix, but ProPlot doesn't require this prefix because the XKCD
# selection is larger and the names are more likely to match your intuition
# for what a color "should" look like.
#
# To reduce the number of registered color names to a more manageable size,
# ProPlot filters the available XKCD colors so that they are *sufficiently
# distinct* in the :ref:`perceptually uniform colorspace <ug_perceptual>`.
# This makes it a bit easier to pick out colors from the table generated with
# `~proplot.demos.show_colors`. Similar names were also cleaned up -- for
# example, ``'reddish'`` and ``'reddy'`` are changed to ``'red'``.
# %%
import proplot as pplt
fig, axs = pplt.show_colors()
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_colors_change:
#
# Modifying colors
# ----------------
#
# You can quickly modify colors using the `~proplot.utils.set_alpha`,
# `~proplot.utils.set_hue`, `~proplot.utils.set_saturation`,
# `~proplot.utils.set_luminance`, `~proplot.utils.shift_hue`,
# `~proplot.utils.scale_saturation` and `~proplot.utils.scale_luminance`
# functions. The ``set`` functions change individual hue, saturation, or
# luminance values in the :ref:`perceptually uniform colorspace <ug_perceptual>`
# specified by the `space` keyword. The ``shift`` and ``scale`` functions shift or
# scale the hue, saturation, or luminance by the input value -- for example,
# ``scale_luminance('color', 1.2)`` makes ``'color'`` 20% brighter. These
# are useful for creating color gradations outside of `~proplot.colors.Cycle` or
# if you simply spot a color you like and want to make it a bit brighter,
# less vibrant, etc.
# %%
import proplot as pplt
import numpy as np
# Figure
state = np.random.RandomState(51423)
fig, axs = pplt.subplots(ncols=3, axwidth=2)
axs.format(
suptitle='Modifying colors',
toplabels=('Shifted hue', 'Scaled luminance', 'Scaled saturation'),
toplabelweight='normal',
xformatter='none', yformatter='none',
)
# Shifted hue
with pplt.rc.context({'legend.handlelength': 0}):
N = 50
marker = 'o'
for shift in (0, -60, 60):
x, y = state.rand(2, N)
color = pplt.shift_hue('grass', shift)
axs[0].scatter(x, y, marker=marker, c=color, legend='b', label=shift)
# Scaled luminance
for scale in (0.2, 1, 2):
x, y = state.rand(2, N)
color = pplt.scale_luminance('bright red', scale)
axs[1].scatter(x, y, marker=marker, c=color, legend='b', label=scale)
# Scaled saturation
for scale in (0, 1, 3):
x, y = state.rand(2, N)
color = pplt.scale_saturation('ocean blue', scale)
axs[2].scatter(x, y, marker=marker, c=color, legend='b', label=scale)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_colors_cmaps:
#
# Colors from colormaps
# ---------------------
#
# If you want to draw an individual color from a colormap or a color cycle,
# use ``color=(cmap, coord)`` or ``color=(cycle, index)`` with any command
# that accepts the `color` keyword. The ``coord`` should be between ``0`` and
# ``1``, while the ``index`` is the index on the list of cycle colors. This
# feature is powered by the `~proplot.colors.ColorDatabase` class. This is
# useful if you spot a nice color in one of the available colormaps or color
# cycles and want to use it for some arbitrary plot element. Use the
# `~proplot.utils.to_rgb` or `~proplot.utils.to_rgba` functions to retrieve
# the RGB or RGBA channel values.
# %%
import proplot as pplt
import numpy as np
# Figure
fig, axs = pplt.subplots(ncols=2, share=0, refwidth=2.2)
axs.format(
xformatter='null', yformatter='null', abc=True, abcstyle='A.', titleloc='l',
suptitle='On-the-fly color selections'
)
# Drawing from colormaps
ax = axs[0]
name = 'Deep'
idxs = pplt.arange(0, 1, 0.2)
state = np.random.RandomState(51423)
state.shuffle(idxs)
for idx in idxs:
data = (state.rand(20) - 0.4).cumsum()
h = ax.plot(
data, lw=5, color=(name, idx),
label=f'idx {idx:.1f}', legend='l', legend_kw={'ncols': 1}
)
ax.colorbar(pplt.Colormap(name), loc='l', locator='none')
ax.format(title=f'Drawing from colormap {name!r}', grid=True)
# Drawing from color cycles
ax = axs[1]
name = 'Qual1'
idxs = np.arange(6)
state.shuffle(idxs)
for idx in idxs:
data = (state.rand(20) - 0.4).cumsum()
h = ax.plot(
data, lw=5, color=(name, idx),
label=f'idx {idx:.0f}', legend='r', legend_kw={'ncols': 1}
)
ax.colorbar(pplt.Colormap(name), loc='r', locator='none')
ax.format(title=f'Drawing from color cycle {name!r}')
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_colors_user:
#
# Using your own colors
# ---------------------
#
# You can register your own colors by adding ``.txt`` files to the
# ``~/.proplot/colors`` directory and calling
# `~proplot.config.register_colors`. This command is also called on import.
# Each file should contain lines that look like ``color: #xxyyzz`` where
# ``color`` is the registered color name and ``#xxyyzz`` is the HEX color
# value. Lines beginning with ``#`` are ignored as comments.
|
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
IDPS_INDEX_URL = reverse('horizon:identity:identity_providers:index')
IDPS_REGISTER_URL = reverse('horizon:identity:identity_providers:register')
IDPS_UPDATE_URL = reverse('horizon:identity:identity_providers:update',
args=['idp_1'])
IDPS_DETAIL_URL = reverse('horizon:identity:identity_providers:detail',
args=['idp_1'])
class IdPsViewTests(test.BaseAdminViewTests):
@test.create_mocks({api.keystone: ('identity_provider_list',)})
def test_index(self):
self.mock_identity_provider_list.return_value = \
self.identity_providers.list()
res = self.client.get(IDPS_INDEX_URL)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
self.assertItemsEqual(res.context['table'].data,
self.identity_providers.list())
self.mock_identity_provider_list.assert_called_once_with(
test.IsHttpRequest())
@test.create_mocks({api.keystone: ('identity_provider_create', )})
def test_create(self):
idp = self.identity_providers.first()
self.mock_identity_provider_create.return_value = idp
formData = {'method': 'RegisterIdPForm',
'id': idp.id,
'description': idp.description,
'enabled': idp.enabled,
'remote_ids': ', '.join(idp.remote_ids)}
res = self.client.post(IDPS_REGISTER_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.mock_identity_provider_create.assert_called_once_with(
test.IsHttpRequest(),
idp.id,
description=idp.description,
enabled=idp.enabled,
remote_ids=idp.remote_ids)
@test.create_mocks({api.keystone: ('identity_provider_get',
'identity_provider_update')})
def test_update(self):
idp = self.identity_providers.first()
new_description = 'new_idp_desc'
self.mock_identity_provider_get.return_value = idp
self.mock_identity_provider_update.return_value = None
formData = {'method': 'UpdateIdPForm',
'id': idp.id,
'description': new_description,
'enabled': idp.enabled,
'remote_ids': ', '.join(idp.remote_ids)}
res = self.client.post(IDPS_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.mock_identity_provider_get.assert_called_once_with(
test.IsHttpRequest(), idp.id)
self.mock_identity_provider_update.assert_called_once_with(
test.IsHttpRequest(),
idp.id,
description=new_description,
enabled=idp.enabled,
remote_ids=idp.remote_ids)
@test.create_mocks({api.keystone: ('identity_provider_list',
'identity_provider_delete')})
def test_delete(self):
idp = self.identity_providers.first()
self.mock_identity_provider_list.return_value = \
self.identity_providers.list()
self.mock_identity_provider_delete.return_value = None
formData = {'action': 'identity_providers__delete__%s' % idp.id}
res = self.client.post(IDPS_INDEX_URL, formData)
self.assertNoFormErrors(res)
self.mock_identity_provider_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_identity_provider_delete.assert_called_once_with(
test.IsHttpRequest(), idp.id)
@test.create_mocks({api.keystone: ('identity_provider_get',
'protocol_list')})
def test_detail(self):
idp = self.identity_providers.first()
self.mock_identity_provider_get.return_value = idp
self.mock_protocol_list.return_value = self.idp_protocols.list()
res = self.client.get(IDPS_DETAIL_URL)
self.assertTemplateUsed(
res, 'identity/identity_providers/_detail_overview.html')
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.mock_identity_provider_get.assert_called_once_with(
test.IsHttpRequest(), idp.id)
self.mock_protocol_list.assert_called_once_with(
test.IsHttpRequest(), idp.id)
@test.create_mocks({api.keystone: ('identity_provider_get',
'protocol_list')})
def test_detail_protocols(self):
idp = self.identity_providers.first()
self.mock_identity_provider_get.return_value = idp
self.mock_protocol_list.return_value = self.idp_protocols.list()
res = self.client.get(IDPS_DETAIL_URL + '?tab=idp_details__protocols')
self.assertTemplateUsed(
res, 'identity/identity_providers/_detail_overview.html')
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertItemsEqual(res.context['idp_protocols_table'].data,
self.idp_protocols.list())
self.mock_identity_provider_get.assert_called_once_with(
test.IsHttpRequest(), idp.id)
self.mock_protocol_list.assert_called_once_with(
test.IsHttpRequest(), idp.id)
|
import re
def fun(email):
#pattern = '[^@]+@[^@]+\.[^@]{1,3}'
pattern = '^[a-zA-Z][\w-]*@[a-zA-Z0-9]+\.[a-zA-Z]{1,3}$'
return re.match(pattern, email)
|
# -*- coding: utf-8 -*-
from django.db import models
from .mixins import SeasonsMixin
from .time_base import TimeMixin
from . import fields
class PartManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
class Part(SeasonsMixin, TimeMixin, models.Model):
objects = PartManager()
name = fields.NameField(
'Bezeichnung',
unique=True,
help_text="Bezeichnung des Abschnitts",
)
description = fields.DescriptionField(
'Beschreibung',
help_text="Beschreibung des Abschnitts",
blank=True, default=''
)
order = fields.OrderField()
def natural_key(self):
return self.name,
natural_key.dependencies = ['server.season']
def __str__(self):
return self.name
class Meta:
get_latest_by = "updated"
verbose_name = "Abschnitt"
verbose_name_plural = "Abschnitte"
ordering = ('order', 'name')
|
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.annotations.tokenize.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tokenization module for the multilingual text normalization system.
"""
import re
from sppas.src.utils.makeunicode import sppasUnicode
# ---------------------------------------------------------------------------
class sppasTokenSegmenter(object):
"""Create words from tokens on the basis of a lexicon.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
This is a totally language independent method, based on a longest
matching algorithm to aggregate tokens into words. Words of a lexicon
are found and:
1/ unbind or not if they contain a separator character:
- rock'n'roll -> rock'n'roll
- I'm -> I 'm
- it's -> it 's
2/ bind using a character separator like for example, with '_':
- parce que -> parce_que
- rock'n roll -> rock'n_roll
"""
SEPARATOR = "_"
STICK_MAX = 7
# -------------------------------------------------------------------------
def __init__(self, vocab=None):
"""Create a new sppasTokenSegmenter instance.
:param vocab: (Vocabulary)
"""
self.__vocab = vocab
self.__separator = sppasTokenSegmenter.SEPARATOR
self.__aggregate_max = sppasTokenSegmenter.STICK_MAX
# -------------------------------------------------------------------------
def set_aggregate_max(self, value=STICK_MAX):
"""Fix the maximum number of words to stick.
This is a language dependant value. For French, it's 5 with the word:
"au fur et à mesure". But it can be more to stick phrases instead of
words for example.
:param value: (int) Maximum number of tokens to aggregate/stick.
"""
value = int(value)
if value < 1:
raise ValueError('set_aggregate_max: value should be > 0.')
if value > 100:
raise ValueError('set_aggregate_max: value should be < 100.')
self.__aggregate_max = value
# -------------------------------------------------------------------------
def set_separator(self, char=SEPARATOR):
"""Fix the character to separate tokens.
:param char: (char) Separator character. Can be an empty string.
"""
char = str(char)
if len(char) > 0:
char = char[0]
self.__separator = char
# -------------------------------------------------------------------------
def __stick_longest_lr(self, phrase, separator):
"""Return the longest first word of a phrase.
A longest matching algorithm is applied from left to right.
:param phrase: (str)
:returns: tuple of (index of the first longest token, the longest token)
"""
tab_toks = phrase.split(" ")
token = tab_toks[0]
i = len(tab_toks)
if self.__vocab is None:
return 1, token
while i > 0:
# try to aggregate all tokens
token = separator.join(tab_toks)
# next round will try without the last token
tab_toks.pop()
i -= 1
# find if this is a word in the vocabulary
if self.__vocab.is_unk(token) is False:
break
# the first real token is the first given token
return i, sppasUnicode(token).to_strip()
# -----------------------------------------------------------------------
def bind(self, utt):
"""Bind tokens of an utterance using a specific character.
:param utt: (list) List of tokens of an utterance (a transcription, a sentence, ...)
:returns: A list of strings
"""
new_utt = list()
idx_start = 0
while idx_start < len(utt):
# use a longest matching to aggregate the current token with the next ones
idx_end = min(len(utt), idx_start+self.__aggregate_max+1)
phrase = " ".join(utt[idx_start:idx_end])
idx_end, word = self.__stick_longest_lr(sppasUnicode(phrase).to_strip(), self.__separator)
new_utt.append(word)
idx_start += idx_end + 1
return new_utt
# -----------------------------------------------------------------------
def unbind(self, utt):
"""Unbind tokens containing - or ' or . depending on rules.
:param utt: (list) List of tokens of an utterance (a transcription, a sentence, ...)
:returns: A list of strings
"""
new_utt = list()
for tok in utt:
is_unknown = self.__vocab.is_unk(tok.lower().strip())
is_sampa = tok.startswith('/') and tok.endswith('/')
is_trunc = tok.endswith('-')
# a missing compound word?
# --> an unknown token
# --> containing a special character
# --> that is not a truncated word
# --> not in a sampa sequence!
if is_unknown is True \
and ("-" in tok or "'" in tok or "." in tok) \
and is_sampa is False\
and is_trunc is False:
# KEEP special chars in the array!
tab_split = re.split("([-'.])", tok)
tab_tok = list(entry for entry in tab_split if len(entry) > 0)
idx_start = 0
while idx_start < len(tab_tok):
# use a longest matching to aggregate the current token with the next ones
idx_end = min(len(tab_tok), idx_start + 5)
phrase = " ".join(tab_tok[idx_start:idx_end])
idx_end, word = self.__stick_longest_lr(sppasUnicode(phrase).to_strip(), "")
new_utt.append(word)
idx_start += idx_end + 1
else:
new_utt.append(sppasUnicode(tok).to_strip())
return new_utt
|
from delphi.translators.for2py.format import *
from delphi.translators.for2py.arrays import *
def main():
A = Array([(-3,1),(-4,0)])
for i in range(-3,1+1):
for j in range(-4,0+1):
A.set_((i,j), i+j)
fmt_obj = Format(['5(I5,X)'])
for i in range(-3,1+1):
sys.stdout.write(fmt_obj.write_line([A.get_((i,-4)),
A.get_((i,-3)),
A.get_((i,-2)),
A.get_((i,-1)),
A.get_((i,0))]))
main()
|
"""The data models and enums returned and accepted by the api."""
__all__ = ["cards", "embeds", "events", "fundings", "merchants", "transactions"]
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# (c) 2020 Mike Lewis
import setuptools
import foursquare
version = str(foursquare.__version__)
setuptools.setup(
name="foursquare",
version=version,
author="Mike Lewis",
author_email="mlewis.mail@gmail.com",
url="http://github.com/mLewisLogic/foursquare",
description="easy-as-pie foursquare API client",
long_description=open("./README.txt", "r").read(),
download_url="http://github.com/mLewisLogic/foursquare/tarball/master",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"License :: OSI Approved :: MIT License",
],
packages=setuptools.find_packages(),
install_requires=["requests>=2.1", "six"],
license="MIT License",
keywords="foursquare api",
include_package_data=True,
zip_safe=True,
)
|
from subprocess import call
class Telescope:
MAX_SPEED = 10000.0
MIN_DURATION = 20.0
MAX_DURATION = 20000.0
DURATION_CONST= MAX_SPEED * MIN_DURATION
def __init__(self, device='/dev/ttyACM0'):
call(("stty -F %s -imaxbel -opost -isig -icanon -echo -echoe -ixoff -ixon 9600" % device).split(" "))
self.file = open(device, "w")
def setAlt(self, speed):
if (speed < 0):
command = 'u'
else:
command = 'j'
duration = self.calcDuration(speed)
print "n%dx" % duration
self.file.write("n%dx" % duration)
self.file.write(command)
self.file.write(command)
self.file.flush()
def setAzimuth(self, speed):
if (speed < 0):
command = 'k'
else:
command = 'h'
duration = self.calcDuration(speed)
print "m%dx" % duration
self.file.write("m%dx" % duration)
self.file.write(command)
self.file.write(command)
self.file.flush()
def setSteps(self, num):
self.file.write(num);
self.file.flush();
def up(self):
self.file.write("s");
self.file.flush();
def down(self):
self.file.write("w");
self.file.flush();
def left(self):
self.file.write("d");
self.file.flush();
def right(self):
self.file.write("a");
self.file.flush();
def calcDuration(self, speed):
if speed == 0:
speed = 1
duration = Telescope.DURATION_CONST / speed
duration = int(abs(duration))
if duration > Telescope.MAX_DURATION:
duration = Telescope.MAX_DURATION
if duration <= Telescope.MIN_DURATION:
duration = Telescope.MIN_DURATION
return duration
def start(self):
self.file.write("1")
self.file.flush()
def stop(self):
self.file.write("0")
self.file.flush()
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
This script check that only whitelisted headers are included (transitivly) by
including chemfiles.h or chemfiles.hpp.
"""
import os
import sys
import re
ROOT = os.path.join(os.path.dirname(__file__), "..", "..")
GENERATED_HEADERS = ["chemfiles/config.hpp", "chemfiles/exports.hpp"]
ERRORS = 0
WHITELIST = [
# standard C99 headers
"stdbool.h", "stdint.h",
# standard C++11 headers
"iterator", "functional", "cstdint", "array", "utility", "cassert",
"string", "memory", "exception", "limits", "algorithm", "stdexcept",
"vector", "cmath", "type_traits", "unordered_map",
# chemfiles helper headers
"chemfiles/span.hpp",
"chemfiles/optional.hpp",
"chemfiles/exports.hpp",
"chemfiles/config.hpp",
"chemfiles/sorted_set.hpp",
"chemfiles/unreachable.hpp",
# chemfiles main headers
"chemfiles/generic.hpp",
"chemfiles/types.hpp",
"chemfiles/Atom.hpp",
"chemfiles/Frame.hpp",
"chemfiles/Error.hpp",
"chemfiles/Residue.hpp",
"chemfiles/Property.hpp",
"chemfiles/Topology.hpp",
"chemfiles/UnitCell.hpp",
"chemfiles/Trajectory.hpp",
"chemfiles/Selections.hpp",
"chemfiles/Connectivity.hpp",
# chemfiles capi headers
"chemfiles/capi/atom.h",
"chemfiles/capi/selection.h",
"chemfiles/capi/trajectory.h",
"chemfiles/capi/residue.h",
"chemfiles/capi/property.h",
"chemfiles/capi/cell.h",
"chemfiles/capi/frame.h",
"chemfiles/capi/types.h",
"chemfiles/capi/topology.h",
"chemfiles/capi/misc.h",
]
def error(message):
global ERRORS
ERRORS += 1
print(message)
def included_headers(path):
includes = set()
with open(path) as fd:
for line in fd:
if "#include" in line:
matched = re.match("#include\s*[\"<](.*)[\">]", line)
if not matched:
error("bad include in {}: {}".format(path, line))
header = matched.groups()[0]
includes.add(header)
if header.startswith("chemfiles"):
if header not in GENERATED_HEADERS:
path = os.path.join(ROOT, "include", header)
includes.update(included_headers(path))
return includes
def check_allowded(headers):
for header in headers:
if header not in WHITELIST:
error("private header {} is publicly reachable".format(header))
if __name__ == '__main__':
headers = included_headers(os.path.join(ROOT, "include", "chemfiles.h"))
check_allowded(headers)
headers = included_headers(os.path.join(ROOT, "include", "chemfiles.hpp"))
check_allowded(headers)
if ERRORS != 0:
sys.exit(1)
|
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
import math
import numpy as np
import os, argparse
import time
import shutil
import subprocess
from s3dg import S3D
from dataset import VideoClipDataset, RandomSequenceSampler
from preprocessing import Preprocessing
parser = argparse.ArgumentParser(description='HowTo100M clip-level video feature extractor')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--id2path', type=str, help='csv file with an map from id to video path')
parser.add_argument('--ann_file', type=str, help='caption.json in HowTo100M')
parser.add_argument('--data_root', type=str, default='./', help='data root of all the relative path')
parser.add_argument('--ITP', action='store_true', default=False, help='using ITP')
args, leftovers = parser.parse_known_args()
# python main.py --batch_size 32 --id2path test_id2path.csv --ann_file ../annotation/caption.json --data_root ./
# For ITP
if args.ITP:
os.environ['CUDA_VISIBLE_DEVICES'] = os.environ['OMPI_COMM_WORLD_LOCAL_RANK']
args.id2path = args.id2path + os.environ['CUDA_VISIBLE_DEVICES'] + ".csv"
print('{} {} {} {}'.format(os.environ['CUDA_VISIBLE_DEVICES'], torch.cuda.current_device(), torch.cuda.device_count(), args.id2path))
# Instantiate the model
net = S3D(f'{args.data_root}/s3d_dict.npy', 512) # text module
net = net.cuda()
net.load_state_dict(torch.load(f'{args.data_root}/s3d_howto100m.pth')) # S3D
# Video input should be of size Batch x 3 x T x H x W and normalized to [0, 1]
dataset = VideoClipDataset(
args.id2path,
args.ann_file,
args.data_root,
framerate=16,
size=224,
centercrop=True, # TODO: use ?*224 or ?*224 + centercrop or 224*224
)
n_dataset = len(dataset)
sampler = RandomSequenceSampler(n_dataset, 10)
loader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=2,
sampler=sampler if n_dataset > 10 else None,
)
preprocess = Preprocessing(framenum=16)
device_id = os.environ['CUDA_VISIBLE_DEVICES']
# Evaluation mode
net = net.eval()
feature_file = {}
with torch.no_grad():
time_s = time.time()
for k, data in enumerate(loader):
if data['video_id'][0] == 'NoFile':
print('{} Computing features of video {}/{}: {}, But File Load Failed or Caption Not Exists'.format(device_id, k + 1, n_dataset, data['video_id'][0]))
continue
video_id = data['video_id'][0]
# Load
clip_lengths = []
output_files = []
video = []
for clip_meta in data['input']:
output_file = clip_meta['output_path'][0]
clip = clip_meta['data'].squeeze(0)
# clip in fact. with Frames * 3 * H * W
if len(clip.shape) == 4:
# Clip_Len * 3 * T * H * W // Clip_Len depends on how many Frames/framenum in this clip
# TODO: ZeroPad to framenum in process
_tmp = preprocess(clip)
video.append(_tmp)
clip_lengths.append(len(_tmp))
output_files.append(output_file)
# Inference
# 'Video_Len' * 3 * T * H * W
video = torch.cat(video, dim=0)
n_chunk = len(video)
features = torch.cuda.FloatTensor(n_chunk, 1024).fill_(0)
n_iter = int(math.ceil(n_chunk / float(args.batch_size)))
for i in range(n_iter):
min_ind = i * args.batch_size
max_ind = (i + 1) * args.batch_size
video_batch = video[min_ind:max_ind].cuda()
# batch_size * 1024
batch_features = net(video_batch)['mixed_5c']
if False:
batch_features = F.normalize(batch_features, dim=1)
features[min_ind:max_ind] = batch_features
features = features.cpu().numpy()
if False:
features = features.astype('float16')
# Save
clip_end = 0
clip_feature = []
for clip_idx, output_file in enumerate(output_files):
clip_begin = clip_end
clip_end = clip_begin + clip_lengths[clip_idx]
clip_feature.append(features[ clip_begin : clip_end ])
# np.save(output_file, features[ clip_begin : clip_end ])
# Save Way 2
feature_file[video_id] = clip_feature
print('{} Computing features of video {}/{}: {}, estimation: {}'.format(device_id, k + 1, n_dataset, video_id, (time.time() - time_s) * (n_dataset-k-1) / (k+1) / 3600))
# Zip & remove.
# output_dir = data['output_path'][0]
# cmd = f'zip -0 -q {output_dir}.zip {output_dir}/*'
# subprocess.check_output(cmd, shell=True, stderr=subprocess.DEVNULL)
# shutil.rmtree(output_dir)
# Dump the feature_file
torch.save(feature_file, os.path.join(data['output_path'][0], f"{device_id}.pth"))
|
from io import BytesIO
from pathlib import Path
from typing import (
Any,
List,
Mapping,
Optional,
Union,
)
import numpy as np
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest.core.string_description import StringDescription
from sklearn.externals import joblib
from microcosm_sagemaker.testing.bytes_extractor import ExtractorMatcherPair
class IsObjectEqual(BaseMatcher):
"""
Matcher to compare the object A with another object B.
Sometimes there is some randomness within these generated files, so the user can optionally
provide a `ignore_properties` array which will ignore mismatches of those given object
properties. For instance, if we have:
MyObject(foo=bar, extras=Test)
MyObject(foo=bar, extras=Test2)
If ignore_properties is not specified, these will be unequal. If ignore_properties is
set to ["extras"], they will be equal.
"""
def __init__(self, obj: Any, ignore_properties: List = []) -> None:
self.ignore_properties = ignore_properties
self.compare_object = obj
def _matches(self, item: Any) -> bool:
compare_a = self._get_dict_structure(self.compare_object)
compare_b = self._get_dict_structure(item)
total_attributes = (
attr
for attr in set(compare_a.keys()) | set(compare_b.keys())
if attr not in self.ignore_properties
)
for attr in total_attributes:
if attr not in compare_a or attr not in compare_b:
return False
if type(compare_a[attr]) != type(compare_b[attr]): # noqa: 721
return False
if isinstance(compare_a[attr], np.ndarray):
if (compare_a[attr] != compare_b[attr]).any():
return False
elif compare_a[attr] != compare_b[attr]:
return False
return True
def describe_to(self, description: StringDescription) -> None:
description.append_text(str(self.compare_object))
def _get_dict_structure(self, obj: Any) -> Mapping[str, Any]:
if isinstance(obj, dict):
return obj
# the matcher expects a dictionary in the _matches method
# -> we convert the set into a dictionary with a dummy key
elif isinstance(obj, set):
return dict(dummy_attrb=obj)
return vars(obj)
def pickle_extractor(binary: bytes) -> Any:
return joblib.load(BytesIO(binary))
def matches_object(
obj: Union[object, dict],
ignore_properties: Optional[List[str]] = None,
) -> IsObjectEqual:
if ignore_properties is None:
ignore_properties = []
return IsObjectEqual(obj, ignore_properties)
def matches_with_object(obj: Any) -> IsObjectEqual:
return matches_object(obj)
def _get_dir_pickles(dir: Path, pickle_suffixes: List[str]) -> List[Path]:
return [
subpath.relative_to(dir)
for subpath in dir.glob("**/*")
if subpath.suffix in pickle_suffixes
]
def _create_matchers(dir_pickles: List[Path]) -> Mapping[Path, ExtractorMatcherPair]:
return {
path: ExtractorMatcherPair(
pickle_extractor,
matches_with_object,
)
for path in dir_pickles
}
def create_pickle_matchers_for_dir(dir, pickle_suffixes=[".pk", ".pickle", ".pkl"]):
dir_pickles = _get_dir_pickles(dir, pickle_suffixes)
matchers = _create_matchers(dir_pickles)
return matchers
|
import argparse
import logging
import sys
from requests.exceptions import RequestException
from dothttp.log_utils import setup_logging
from .exceptions import DotHttpException
from .request_base import CurlCompiler, RequestCompiler, HttpFileFormatter, Config, eprint
logger = logging.getLogger('dothttp')
def apply(args: Config):
setup_logging(logging.DEBUG if args.debug else logging.CRITICAL)
logger.info(f'command line arguments are {args}')
if args.format:
if args.experimental:
comp_class = HttpFileFormatter
else:
eprint("http formatter is still in experimental phase. enable experimental flag to use it (--experimental)")
sys.exit(1)
elif args.curl:
comp_class = CurlCompiler
else:
comp_class = RequestCompiler
try:
comp_class(args).run()
except DotHttpException as dotthtppexc:
logger.error(f'dothttp exception happened {dotthtppexc}', exc_info=True)
eprint(dotthtppexc.message)
except RequestException as exc:
logger.error(f'exception from requests {exc}', exc_info=True)
eprint(exc)
except Exception as exc:
logger.error(f'unknown error happened {exc}', exc_info=True)
eprint(f'unknown exception occurred with message {exc}')
def main():
parser = argparse.ArgumentParser(
description='http requests for humans', prog="dothttp")
general_group = parser.add_argument_group('general')
general_group.add_argument('--curl', help='generates curl script',
action='store_const', const=True)
property_group = parser.add_argument_group('property')
property_group.add_argument('--property-file', '-p', help='property file')
general_group.add_argument('--no-cookie', '-nc', help='cookie storage is disabled', action='store_const',
const=True)
property_group.add_argument('--env', '-e',
help='environment to select in property file. properties will be enabled on FIFO',
nargs='+', default=['*'])
general_group.add_argument(
'--debug', '-d', help='debug will enable logs and exceptions', action='store_const', const=True)
general_group.add_argument(
'--info', '-i', help='more information', action='store_const', const=True)
fmt_group = parser.add_argument_group('format')
fmt_group.add_argument(
'--format', '-fmt', help='format http file', action='store_const', const=True)
property_group.add_argument(
'--experimental', '--b', help='enable experimental', action='store_const', const=True)
fmt_group.add_argument(
'--stdout', help='print to commandline', action='store_const', const=True)
property_group.add_argument(
'--property', help='list of property\'s', nargs='+', default=[])
general_group.add_argument('file', help='http file')
general_group.add_argument('--target', '-t', help='targets a particular http definition', type=str)
args = parser.parse_args()
if args.debug and args.info:
eprint("info and debug are conflicting options, use debug for more information")
sys.exit(1)
for one_prop in args.property:
if '=' not in one_prop:
# FUTURE,
# this can be done better by adding validation in add_argument.
eprint(f"command line property: `{one_prop}` is invalid, expected prop=val")
sys.exit(1)
config = Config(curl=args.curl, property_file=args.property_file, env=args.env, debug=args.debug, file=args.file,
info=args.info, properties=args.property, no_cookie=args.no_cookie,
target=args.target,
format=args.format, stdout=args.stdout, experimental=args.experimental)
apply(config)
if __name__ == "__main__":
main()
|
# coding:utf-8
import sys
import csv
import json
import os
import FuelSDK
import et_objects
from getpass import getpass
CONFIG_PATH = '~/.fuelsdk/config.python'
class Commands(object):
def authenticate(self, client_id=None, client_secret=None, debug=False):
if client_id is None or client_secret is None:
self.client = FuelSDK.ET_Client(debug=debug)
else:
self.client = FuelSDK.ET_Client(
params={
'clientid': client_id,
'clientsecret': client_secret
}, debug=debug)
def configure(self, args):
fuelsdk_config = os.path.expanduser(CONFIG_PATH)
if (
os.path.isfile(fuelsdk_config) and
raw_input('Do you want to overwrite {} ?(y/n)'.format(CONFIG_PATH)) != 'y'
):
return
client_id = raw_input('Input Your ExactTarget Client ID: ')
client_secret = getpass('Input Your ExactTarget Client Secret: ')
fuelsdk_dir = os.path.expanduser('~/.fuelsdk')
if not os.path.isdir(fuelsdk_dir):
os.mkdir(fuelsdk_dir)
f = open(fuelsdk_config, 'w')
f.write("""[Web Services]
appsignature: none
clientid: {0}
clientsecret: {1}
defaultwsdl: https://webservice.exacttarget.com/etframework.wsdl
authenticationurl: https://auth.exacttargetapis.com/v1/requestToken?legacy=1""".format(client_id, client_secret))
def describe_de_command(self, args):
fields = self.describe_de(args)
print(json.dumps(fields))
def describe_de(self, args):
"""
describe data extension with customer key.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
de_target_fields = [
"Name",
"CustomerKey",
"DefaultValue",
"FieldType",
"Scale",
"MaxLength",
"IsPrimaryKey",
"IsRequired",
]
deColumn = FuelSDK.ET_DataExtension_Column()
deColumn.auth_stub = self.client
deColumn.props = de_target_fields
deColumn.search_filter = {
'Property': 'DataExtension.CustomerKey',
'SimpleOperator': 'equals',
'Value': args.customer_key
}
response = deColumn.get()
return [
self.convert_field_to_dict(result, de_target_fields)
for result in response.results
]
def convert_field_to_dict(self, field, target_fields):
converted_dict = {}
for field_name in target_fields:
if hasattr(field, field_name):
converted_dict[field_name] = getattr(field, field_name)
return converted_dict
def retrieve_de(self, args):
"""
retrieve all rows from data extension.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
fields = self.describe_de(args)
row = FuelSDK.ET_DataExtension_Row()
row.auth_stub = self.client
row.CustomerKey = args.customer_key
row.props = [field['Name'] for field in fields]
response = row.get()
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
writer.writerow(row.props)
for result in response.results:
row = []
for prop in result.Properties[0]:
if prop.Value is None:
row.append("")
else:
row.append(prop.Value.encode("utf-8"))
writer.writerow(row)
def describe_all_de(self, args):
"""
describe all data extension.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
de = FuelSDK.ET_DataExtension()
de.auth_stub = self.client
de.props = ["Name", "CustomerKey", "ObjectID"]
response = de.get()
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
writer.writerow(de.props)
for result in response.results:
writer.writerow([
result.Name.encode("utf-8"),
result.CustomerKey.encode("utf-8"),
result.ObjectID.encode("utf-8")
])
def retrieve_subs(self, args):
"""
retrieve all subscriber rows.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
getSub = FuelSDK.ET_Subscriber()
getSub.auth_stub = self.client
response = getSub.get()
attributes = []
if (hasattr(response.results[0], 'Attributes')):
attributes = [
attr.Name.encode("utf-8")
for attr in response.results[0].Attributes
]
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
header = ["SubscriberID", "EmailAddress", "SubscriberKey"]
header.extend(attributes)
writer.writerow(header)
for result in response.results:
field_map = {}
if (hasattr(result, 'Attributes')):
for field in result.Attributes:
field_map[field.Name] = field.Value
fields = [result.ID, result.EmailAddress, result.SubscriberKey]
for attribute in attributes:
val = field_map[attribute]
if val is None:
fields.append("")
else:
fields.append(val.encode("utf-8"))
writer.writerow(fields)
def retrieve_triggeredsend(self, args):
"""
retrive a triggered send with customer key.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
getTS = FuelSDK.ET_TriggeredSend()
getTS.auth_stub = self.client
getTS.props = [
"CustomerKey",
"Name",
"TriggeredSendStatus",
"ObjectID"
]
getTS.search_filter = {
'Property': 'CustomerKey',
'SimpleOperator': 'equals',
'Value': args.customer_key
}
getResponse = getTS.get()
for result in getResponse.results:
return result.ObjectID
return ""
def retrieve_sentevent(self, args):
"""
retrieve all sent event with triggered send's customer key.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
triggeredSendDefinitionObjectID = self.retrieve_triggeredsend(args)
getSentEvent = FuelSDK.ET_SentEvent()
getSentEvent.auth_stub = self.client
getSentEvent.props = [
"SendID",
"SubscriberKey",
"EventDate",
"Client.ID",
"EventType",
"BatchID",
"TriggeredSendDefinitionObjectID",
"ListID",
"PartnerKey",
"SubscriberID"
]
getSentEvent.search_filter = {
'Property': 'TriggeredSendDefinitionObjectID',
'SimpleOperator': 'equals',
'Value': triggeredSendDefinitionObjectID
}
getResponse = getSentEvent.get()
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
writer.writerow(["EventDate", "SubscriberID"])
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
while getResponse.more_results:
getResponse = getSentEvent.getMoreResults()
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
def retrieve_openevent(self, args):
"""
retrieve all open event with triggered send's customer key.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
triggeredSendDefinitionObjectID = self.retrieve_triggeredsend(args)
getOpenEvent = FuelSDK.ET_OpenEvent()
getOpenEvent.auth_stub = self.client
getOpenEvent.props = [
"SendID",
"SubscriberKey",
"EventDate",
"Client.ID",
"EventType",
"BatchID",
"TriggeredSendDefinitionObjectID",
"ListID",
"PartnerKey",
"SubscriberID"
]
getOpenEvent.search_filter = {
'Property': 'TriggeredSendDefinitionObjectID',
'SimpleOperator': 'equals',
'Value': triggeredSendDefinitionObjectID
}
getResponse = getOpenEvent.get()
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
writer.writerow(["EventDate", "SubscriberID"])
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
while getResponse.more_results:
getResponse = getOpenEvent.getMoreResults()
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
def retrieve_bounceevent(self, args):
"""
retrieve all bounce event with triggered send's customer key.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
triggeredSendDefinitionObjectID = self.retrieve_triggeredsend(args)
getBounceEvent = FuelSDK.ET_BounceEvent()
getBounceEvent.auth_stub = self.client
getBounceEvent.props = [
"SendID",
"SubscriberKey",
"EventDate",
"Client.ID",
"EventType",
"BatchID",
"TriggeredSendDefinitionObjectID",
"ListID",
"PartnerKey",
"SubscriberID"
]
getBounceEvent.search_filter = {
'Property': 'TriggeredSendDefinitionObjectID',
'SimpleOperator': 'equals',
'Value': triggeredSendDefinitionObjectID
}
getResponse = getBounceEvent.get()
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
writer.writerow(["EventDate", "SubscriberID"])
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
while getResponse.more_results:
getResponse = getBounceEvent.getMoreResults()
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
def create_de_row(self, args):
"""
create data extension row.
:param string customer_key: data extension's customer key
:param string attributes_json:
:return: data extension's name array.
"""
deRow = FuelSDK.ET_DataExtension_Row()
deRow.CustomerKey = args.customer_key
deRow.auth_stub = self.client
args.attributes = json.loads(args.attribute_file.read())
deRow.props = json.loads(args.attributes_json)
deRowResponse = deRow.post()
print(json.dumps(deRowResponse.results))
def triggered_send(self, args):
sendTrig = FuelSDK.ET_TriggeredSend()
sendTrig.auth_stub = self.client
sendTrig.props = {"CustomerKey": args.customer_key}
if args.attribute_file is None:
attributes = {}
else:
attributes = json.loads(args.attribute_file.read())
sendTrig.subscribers = [{
"EmailAddress": args.email,
"SubscriberKey": args.subscriber_key,
}]
sendTrig.attributes = [{"Name": key, "Value": val} for key, val in attributes.items()]
sendResponse = sendTrig.send()
print(json.dumps([{
"StatusCode": result.StatusCode,
"StatusMessage": result.StatusMessage,
"OrdinalID": result.OrdinalID,
"NewID": result.NewID,
"ErrorCode": result.ErrorCode if hasattr(result, "ErrorCode") else None,
} for result in sendResponse.results]))
def push_message(self, args):
pushMessageContact = et_objects.ET_PushMessageContact()
pushMessageContact.auth_stub = self.client
pushMessageContact.props = {
"messageId": args.message_id,
"SubscriberKeys": args.subscriber_keys,
"DeviceTokens": args.device_tokens
}
if args.is_override:
pushMessageContact.props['Override'] = True
input_data = args.additional_params if args.additional_params is not None else sys.stdin.read()
pushMessageContact.props.update(json.loads(input_data))
pushMessageContactResponse = pushMessageContact.post()
print(json.dumps(pushMessageContactResponse.results))
def fire_event(self, args):
postInteractionEvent = et_objects.ET_InteractionEvents()
postInteractionEvent.auth_stub = self.client
postInteractionEvent.props = {
"ContactKey": args.subscriber_key,
"EventDefinitionKey": args.event_definition_key,
"Data": json.loads(args.data_file.read())
}
postInteractionEventResponse = postInteractionEvent.post()
print(json.dumps(postInteractionEventResponse.results))
|
import os
import argparse
import json
import numpy as np
from PIL import Image
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models, transforms
def main():
args = get_arguments()
model = load_checkpoint(args.checkpoint)
model.idx_to_class = dict([[v,k] for k,v in model.class_to_idx.items()])
if args.gpu_av:
model = model.cuda()
with open(args.cat_file, 'r') as f:
cat_to_name = json.load(f)
a, b = predict(args.input, model, args.gpu_av, topk=int(args.top_k))
b = [model.idx_to_class[x] for x in b]
print(a)
print(b)
print([cat_to_name.get(x, 'NotFound') for x in b])
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--category_names ", action="store", dest="cat_file", default="cat_to_name.json" , help = "Categories to names")
parser.add_argument("--top_k", action="store", dest="top_k", default=5 , help = "Set number of results to return")
parser.add_argument("--gpu", action="store_true", dest="gpu_av", default=False , help = "Wanna use GPU?")
parser.add_argument('input', action="store")
parser.add_argument('checkpoint', action="store")
return parser.parse_args()
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
if checkpoint['arch'] == "vgg19":
model = models.vgg19(pretrained=True)
elif checkpoint['arch'] == "densenet121":
model = models.densenet121(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
for x in model.features:
if type(x) == nn.modules.conv.Conv2d:
x.weight.data = x.weight.data.type(torch.FloatTensor)
if x.bias is not None:
x.bias.data = x.bias.data.type(torch.FloatTensor)
for x in model.classifier:
if type(x) == nn.modules.linear.Linear:
x.weight.data = x.weight.data.type(torch.FloatTensor)
if x.bias is not None:
x.bias.data = x.bias.data.type(torch.FloatTensor)
return model
def process_image(img):
tr1 = transforms.ToTensor()
ratio = img.size[1] / img.size[0]
new_x = 256
new_y = int(ratio * new_x)
img = img.resize((new_x, new_y))
half_the_width = img.size[0] / 2
half_the_height = img.size[1] / 2
cropped = img.crop(
(
half_the_width - 112,
half_the_height - 112,
half_the_width + 112,
half_the_height + 112
)
)
np_image = np.array(cropped)
np_image = np.array(np_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = (np_image - mean) / std
image = image.transpose((2, 0, 1))
return torch.from_numpy(image)
def predict(image_path, model, gpu_av, topk=5):
img = None
with Image.open(image_path) as im:
img = process_image(im)
img = img.type(torch.cuda.FloatTensor)
with torch.no_grad():
img = Variable(img.unsqueeze(0), requires_grad=False)
if gpu_av:
img = img.cuda()
output = model.forward(img)
ps = torch.exp(output)
probs, indeces = ps.topk(topk)
return probs.data[0].cpu().numpy(), indeces.data[0].cpu().numpy()
main() |
# encoding: utf-8
"""
capability/__init__.py
Created by Thomas Mangin on 2012-07-17.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from exabgp.bgp.message.open.capability.capability import Capability
from exabgp.bgp.message.open.capability.addpath import AddPath
from exabgp.bgp.message.open.capability.asn4 import ASN4
from exabgp.bgp.message.open.capability.graceful import Graceful
from exabgp.bgp.message.open.capability.mp import MultiProtocol
from exabgp.bgp.message.open.capability.ms import MultiSession
from exabgp.bgp.message.open.capability.operational import Operational
from exabgp.bgp.message.open.capability.refresh import RouteRefresh
from exabgp.bgp.message.open.capability.refresh import EnhancedRouteRefresh
from exabgp.bgp.message.open.capability.unknown import UnknownCapability
# Must be imported and registered for the register API to work
Capability.register_capability(AddPath)
Capability.register_capability(ASN4)
Capability.register_capability(Graceful)
Capability.register_capability(MultiProtocol)
Capability.register_capability(MultiSession,Capability.CODE.MULTISESSION_CISCO)
Capability.register_capability(MultiSession,Capability.CODE.MULTISESSION)
Capability.register_capability(Operational)
Capability.register_capability(RouteRefresh,Capability.CODE.ROUTE_REFRESH)
Capability.register_capability(RouteRefresh,Capability.CODE.ROUTE_REFRESH_CISCO)
Capability.register_capability(EnhancedRouteRefresh)
Capability.fallback_capability(UnknownCapability)
# End registration
class REFRESH (object):
ABSENT = 0x01
NORMAL = 0x02
ENHANCED = 0x04
|
from . import requests
from .stats import box_plots, date_bins, date_histograms, counts
async def visualizations(startDate,
endDate,
requestTypes=[],
ncList=[]):
bins, start, end = date_bins(startDate, endDate)
fields = [
'requesttype',
'createddate',
'_daystoclose',
'requestsource']
filters = {
'startDate': start,
'endDate': end,
'requestTypes': requestTypes,
'ncList': ncList}
df = requests.standard_query(fields, filters, table='vis')
inner_df = df.loc[
(df['createddate'] >= startDate) &
(df['createddate'] <= endDate)]
return {
'frequency': {
'bins': list(bins.astype(str)),
'counts': date_histograms(
df,
dateField='createddate',
bins=bins,
groupField='requesttype',
groupFieldItems=requestTypes)},
'timeToClose': box_plots(
inner_df,
plotField='_daystoclose',
groupField='requesttype',
groupFieldItems=requestTypes),
'counts': {
'type': counts(inner_df, groupField='requesttype'),
'source': counts(inner_df, groupField='requestsource')}
}
|
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.shortcuts import render, redirect
from .forms import ContactForm, ReplyForm
import requests
from django.contrib.auth.decorators import login_required
from django.contrib import messages
# for contact form queries
from contactform.models import ContactForm_queries, ReplyForm_queries
from django.views.decorators.cache import cache_control
def emailView(request):
if request.method == 'GET':
form = ContactForm()
else:
form = ContactForm(request.POST)
if form.is_valid():
''' code for recaptcha verification '''
recaptcha_response = request.POST.get('g-recaptcha-response')
data = {
'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,
'response': recaptcha_response
}
r = requests.post('https://www.google.com/recaptcha/api/siteverify', data=data)
result = r.json()
''' End reCAPTCHA validation '''
if result['success']:
name = form.cleaned_data['name']
subject = form.cleaned_data['subject']
email = form.cleaned_data['email']
message = form.cleaned_data['message']
try:
contactformqueries = ContactForm_queries.objects.get_or_create(name=name,email=email,subject=subject,message=message)
send_mail('New Enquiry : '+subject,'From : '+name+'\n'+message, email, ['poojariv53@gmail.com'])
except BadHeaderError:
return HttpResponse('Invalid header found.')
return redirect('success')
else:
form.add_error(None, "invalid captcha")
return render(request, "contactform.html", {'form': form})
def successView(request):
return render(request,'thanks_for_message.html')
# @login_required
# def queriesList(request):
# queries_list = ContactForm_queries.objects.order_by('email')
# my_queries={'queries_list':queries_list}
# return render(request,'queries_list.html',context=my_queries)
@cache_control(no_cache=True, must_revalidate=True, no_store=True)
@login_required
def queriesList(request):
if request.method == 'GET':
form = ReplyForm()
else :
form = ReplyForm(request.POST)
if form.is_valid():
query = ContactForm_queries.objects.get(id = form.cleaned_data['pid'])
replymessage = form.cleaned_data['reply_message']
try:
replyformqueries = ReplyForm_queries.objects.get_or_create(query_user = query,reply_message = replymessage)
send_mail('Reg : '+query.subject,'Dear Sir/Madam,\nThanks for your query.\n'+replymessage, 'poojariv53@gmail.com', [query.email])
messages.success(request,'Message sent to user')
except BadHeaderError:
return HttpResponse('Invalid header found.')
queries_list=ContactForm_queries.objects.all()
my_queries={'queries_list':queries_list}
return render(request,'queries_list.html',context=my_queries)
|
#!/usr/bin/env python2
import re
#Author: Stefan Toman
if __name__ == '__main__':
#read input
n = int(raw_input())
s = ""
for _ in range(n):
s += raw_input()
#print output
for r in re.finditer("<a ([^<>]*)href=['\"](\S*)['\"]([^<>]*)>(.*?)</a>", s):
print "%s,%s" % (r.groups()[1].strip(), re.sub("<(.*?)>", "", r.groups()[3]).strip())
|
import os
import time
from functools import wraps
class LazyProperty:
""" 延迟调用、结果保存 """
def __init__(self, func):
self.func = func
def __get__(self, instance, cls):
if instance is None:
return self
else:
value = self.func(instance)
setattr(instance, self.func.__name__, value)
return value
def time_this(func):
""" 运行时间打印 """
time_this.TIMETHIS_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
"logs/run/runtime.log")
@wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
end = time.perf_counter()
with open(time_this.TIMETHIS_PATH, 'a+') as f:
print(func.__name__, round(end - start, 2), 's', file=f)
return result
return wrapper
def singleton(cls, *args, **kw):
""" 单例模式 """
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
|
import tensorflow as tf
if False:
from typing import List
def isolate_rectangle(tensor, # type: tf.Tensor
corner_1, # type: List[int]
corner_2 # type: List[int]
):
shape_to_subtract = [a + (tensor.shape[b] - corner_2[b]) for a, b in zip(corner_1, range(len(corner_2)))]
center_shape = tensor.shape - tf.constant([0] + shape_to_subtract + [0])
shape_to_pad = [[a, tensor.shape[b] - corner_2[b]] for a, b in zip(corner_1, range(len(corner_2)))]
center_box = tf.pad(tf.ones(center_shape), [[0, 0]] + shape_to_pad + [[0, 0]])
output_tensor = tensor * center_box
return output_tensor
def pad_inwards(tensor, paddings):
center_shape = tensor.shape - tf.reduce_sum(paddings, -1)
center_box = tf.pad(tf.ones(center_shape), paddings)
output_tensor = center_box * tensor
return output_tensor
|
from typing import Optional, Sequence
from git import Repo
from flexlate.branch_update import get_flexlate_branch_name_for_feature_branch
from flexlate.constants import DEFAULT_TEMPLATE_BRANCH_NAME, DEFAULT_MERGED_BRANCH_NAME
from flexlate.ext_git import branch_exists, push_to_remote
from flexlate.styles import print_styled, ALERT_STYLE, INFO_STYLE, SUCCESS_STYLE
class Pusher:
def push_main_flexlate_branches(
self,
repo: Repo,
remote: str = "origin",
merged_branch_name: str = DEFAULT_MERGED_BRANCH_NAME,
template_branch_name: str = DEFAULT_TEMPLATE_BRANCH_NAME,
):
_push_branches_to_remote(
repo, [template_branch_name, merged_branch_name], remote=remote
)
def push_feature_flexlate_branches(
self,
repo: Repo,
feature_branch: Optional[str] = None,
remote: str = "origin",
merged_branch_name: str = DEFAULT_MERGED_BRANCH_NAME,
template_branch_name: str = DEFAULT_TEMPLATE_BRANCH_NAME,
):
branch_name = feature_branch or repo.active_branch.name
feature_merged_branch_name = get_flexlate_branch_name_for_feature_branch(
branch_name, merged_branch_name
)
feature_template_branch_name = get_flexlate_branch_name_for_feature_branch(
branch_name, template_branch_name
)
_push_branches_to_remote(
repo,
[feature_template_branch_name, feature_merged_branch_name],
remote=remote,
)
def _push_branches_to_remote(
repo: Repo, branch_names: Sequence[str], remote: str = "origin"
):
for branch in branch_names:
if not branch_exists(repo, branch):
print_styled(
f"Could not push branch {branch} as it does not exist", ALERT_STYLE
)
return
and_branches = " and ".join(branch_names)
print_styled(
f"Pushing {and_branches} to remote {remote}",
INFO_STYLE,
)
for branch in branch_names:
push_to_remote(repo, branch, remote_name=remote)
print_styled("Successfully pushed branches to remote", SUCCESS_STYLE)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from msgvis.apps.base import views
urlpatterns = patterns('',
url(r'^$', views.HomeView.as_view(), name='home'),
# url(r'^explorer/$', views.ExplorerView.as_view(), name='explorer'),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout',{'next_page': '/lariat'}),
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^explorer(?:/(?P<dataset_pk>\d+))?/$', views.ExplorerView.as_view(), name='explorer'),
#url(r'^grouper(?:/(?P<dataset_pk>\d+))?/$', views.GrouperView.as_view(), name='grouper'),
url(r'^lariat(?:/(?P<dataset_pk>\d+))?/$', views.GrouperView.as_view(), name='lariat'),
)
|
from urllib import urlencode
import json
import os
from geomancer.app_config import MANCER_KEYS
from geomancer.helpers import encoded_dict
from geomancer.mancers.geotype import State, StateFIPS
from geomancer.mancers.base import BaseMancer, MancerError
from string import punctuation
import re
from urlparse import urlparse
import us
import requests
import pandas as pd
class BureauLaborStatistics(BaseMancer):
"""
Subclassing the main BaseMancer class
"""
name = 'Bureau of Labor Statistics'
machine_name = 'bureau_labor_statistics'
base_url = 'http://api.bls.gov/publicAPI/v2/timeseries/data'
info_url = 'http://www.bls.gov/'
description = """
Data from the Bureau of Labor Statistics
"""
api_key_required = True
# store the data for each column
# b/c bls api has low limit & it doesn't take long to grab all states
oes_column_data = {}
# a mapping of bls oes series id data codes to geomancer column names
oes_column_lookup = { '13': '2014 Annual Wages - Median',
'12': '2014 Annual Wages - 25th Percentile',
'14': '2014 Annual Wages - 75th Percentile'}
qcew_column_lookup = {
'annual_avg_estabs_count':'2013 Annual Average of 4 Quarterly Establishment Counts',
'annual_avg_emplvl': '2013 Annual Average of Monthly Employment Levels',
'total_annual_wages': '2013 Total Annual Wages (Sum of 4 quarterly total wage levels)',
'taxable_annual_wages':'2013 Taxable Annual Wages (Sum of the 4 quarterly taxable wage totals)',
'annual_contributions':'2013 Annual Contributions (Sum of the 4 quarterly contribution totals)',
'annual_avg_wkly_wage':'2013 Average Weekly Wage (based on the 12-monthly employment levels and total annual wage levels)',
'avg_annual_pay':'2013 Average Annual Pay (based on employment and wage levels)'
}
def __init__(self, api_key=None):
self.api_key = MANCER_KEYS[self.machine_name]
BaseMancer.__init__(self)
def get_metadata(self):
datasets = [
{
'table_id': 'oes',
'human_name': 'Occupational Employment Statistics',
'description': 'Occupational Employment Statistics',
'source_name': self.name,
'source_url': 'http://www.bls.gov/oes/',
'geo_types': [State(), StateFIPS()],
'columns': [self.oes_column_lookup[col] for col in self.oes_column_lookup],
'count': 3
},
{
'table_id': 'qcew',
'human_name': 'Quarterly Census of Employment & Wages',
'description': 'Quarterly Census of Employment & Wages',
'source_name': self.name,
'source_url': 'http://www.bls.gov/cew/home.htm',
'geo_types': [State(), StateFIPS()],
'columns': [self.qcew_column_lookup[col] for col in self.qcew_column_lookup],
'count': 7
}
]
return datasets
def search(self, geo_ids=None, columns=None):
# columns is a list consisting of table_ids from the possible values in get_metadata?
results = {'header':[]}
all_state_fips = ['01', '02', '04', '05', '06', '08', '09', '10',
'12', '13', '15', '16', '17', '18', '19', '20', '21', '22',
'23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33',
'34', '35', '36', '37', '38', '39', '40', '41', '42', '44',
'45', '46', '47', '48', '49', '50', '51', '53', '54', '55', '56']
for table_id in columns:
if table_id == 'oes':
# only grab data when oes_column_data is not populated
if len(self.oes_column_data)==0:
for col in self.oes_column_lookup:
self.oes_column_data[col] = {}
self.grab_oes_data(all_state_fips)
# looping through columns in OES data
for col in self.oes_column_lookup:
results['header'].append(self.oes_column_lookup[col])
# compiling matched geo data for results
for geo_type, geo_id in geo_ids:
if not results.get(geo_id):
results[geo_id] = []
if geo_type == 'state' or geo_type =='state_fips':
if geo_id in self.oes_column_data[col]:
results[geo_id].append(self.oes_column_data[col][geo_id])
else:
results[geo_id].append("")
elif table_id == 'qcew':
for col in self.qcew_column_lookup:
results['header'].append(self.qcew_column_lookup[col])
for geo_type, geo_id in geo_ids:
if not results.get(geo_id):
results[geo_id] = []
if geo_type == 'state' or geo_type == 'state_fips':
summary_df = self.qcewGetSummaryData(geo_id)
for col in self.qcew_column_lookup:
results[geo_id].append(summary_df[col][0])
return results
def geo_lookup(self, search_term, geo_type=None):
regex = re.compile('[%s]' % re.escape(punctuation))
search_term = regex.sub('', search_term)
if geo_type == 'state' or geo_type == 'state_fips':
return {'term': search_term, 'geoid': self.lookup_state_name(search_term)}
else:
return {'term': search_term, 'geoid': search_term}
# given a search term, returns state fips code
def lookup_state_name(self, term):
st = us.states.lookup(term)
if not st:
st = [s for s in us.STATES if getattr(s, 'ap_abbr') == term]
if st:
return st.fips
else:
return search_term
def bls_oes_series_id(self, geo_id, stat_id):
# documentation on constructing series ids at http://www.bls.gov/help/hlpforma.htm#OE
# geo_id is state FIPS code as string
prefix = 'OEU'
area_type = 'S'
area_code = geo_id + '00000'
industry_code = '000000' # this is the code for all industries
occupation_code = '000000' # this is the code for all occupations
datatype_code = stat_id
return prefix+area_type+area_code+industry_code+occupation_code+datatype_code
def grab_oes_data(self, geo_ids=None):
# geo_ids is a list of state fips code strings
for col in self.oes_column_lookup:
series_ids = []
for geo_id in geo_ids:
series_id = self.bls_oes_series_id(geo_id, col)
series_ids.append(series_id)
# make the request
headers = {'Content-type': 'application/json'}
data = json.dumps({"seriesid": series_ids,"startyear":"2014", "endyear":"2014", "registrationKey":self.api_key})
p = requests.post('http://api.bls.gov/publicAPI/v2/timeseries/data/', data=data, headers=headers)
json_data = json.loads(p.text)
self.oes_column_data[col] = {}
# loop through the json data and add it to oes_column_data[col][geo_id]
for result in json_data['Results']['series']:
# grab state id from results series id
this_geo_id = result['seriesID'][4:6]
this_val = result['data'][0]['value']
self.oes_column_data[col][this_geo_id] = this_val
def qcewGetSummaryData(self, state_fips):
urlPath = "http://www.bls.gov/cew/data/api/2013/a/area/"+state_fips+"000.csv"
df = pd.read_csv(urlPath)
summary_df = df[(df['industry_code']=='10') & (df['own_code']==0)] # industry code 10 is all industries, own code 0 is all ownership
return summary_df
|
from adafruit_si7021 import SI7021
from ._i2c_utils import get_busio_i2c
class Si7021(SI7021):
address = 0x40
def __init__(self):
super().__init__(get_busio_i2c())
@property
def data(self):
return {
"temperature": self.temperature,
"humidity": self.relative_humidity
}
@property
def units(self):
return {
"temperature": "celsius",
"humidity": "percent"
}
def __str__(self):
ret = "Temperature: " + str(self.temperature) + " C\n"
ret += "Humidity: " + str(self.relative_humidity) + " %"
return ret
if __name__ == "__main__":
TEMP_RH = Si7021()
print(str(TEMP_RH))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BusinessHoursDesc(object):
def __init__(self):
self._days_in_week = None
self._hours = None
@property
def days_in_week(self):
return self._days_in_week
@days_in_week.setter
def days_in_week(self, value):
if isinstance(value, list):
self._days_in_week = list()
for i in value:
self._days_in_week.append(i)
@property
def hours(self):
return self._hours
@hours.setter
def hours(self, value):
self._hours = value
def to_alipay_dict(self):
params = dict()
if self.days_in_week:
if isinstance(self.days_in_week, list):
for i in range(0, len(self.days_in_week)):
element = self.days_in_week[i]
if hasattr(element, 'to_alipay_dict'):
self.days_in_week[i] = element.to_alipay_dict()
if hasattr(self.days_in_week, 'to_alipay_dict'):
params['days_in_week'] = self.days_in_week.to_alipay_dict()
else:
params['days_in_week'] = self.days_in_week
if self.hours:
if hasattr(self.hours, 'to_alipay_dict'):
params['hours'] = self.hours.to_alipay_dict()
else:
params['hours'] = self.hours
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BusinessHoursDesc()
if 'days_in_week' in d:
o.days_in_week = d['days_in_week']
if 'hours' in d:
o.hours = d['hours']
return o
|
import falcon, json, rethinkdb as r
'''
Set HTTP access control (CORS) headers
https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS
https://github.com/falconry/falcon/issues/303
'''
def corsMiddleware(request, response, params):
response.set_header('Access-Control-Allow-Origin', '*')
response.set_header('Access-Control-Allow-Headers', 'Content-Type')
response.set_header('Access-Control-Allow-Methods', 'GET')
class RecentProjects:
def __init__(self, connection):
self.connection = connection
def on_get(self, req, resp):
filter = (req.get_param('filter') or 'launched').capitalize()
limit = req.get_param_as_int('limit') or 1
projects = r.table('projectsRecently%s' % filter) \
.order_by(r.desc('launched_at')) \
.limit(limit) \
.run(self.connection)
resp.body = json.dumps(projects)
connection = r.connect('52.28.17.23', 28015, db='kickstarter')
api = falcon.API(before=[corsMiddleware])
api.add_route('/recentProjects', RecentProjects(connection))
|
import random
import string
from model.user import User
import jsonpickle
import os.path
import getopt
import sys
# default params
n = 1 # quantity iterations
f = "data/users.json"
# path to file
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
# get option from cmd
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of user", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
# parsing opts value
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
# generate random string
def random_string(prefix, maxlen):
# generate string on letters, numbers symbols and spaces
symbol = string.ascii_letters + string.digits + " "*10
# generate string on random symbols and length
return prefix + "".join([random.choice(symbol) for i in range(random.randrange(maxlen))])
# generate User from random data
test_data = [
User(firstname="", middlename="", lastname="", address="", home="", mobile="", work="", email="",
email2="", email3="", phone2="")
] + [
User(firstname=random_string("firstname", 10), middlename=random_string("lastname", 10),
lastname=random_string("lastname", 10), address=random_string("address", 10), home=random_string("home", 10),
mobile=random_string("mobile", 10), work=random_string("work", 10), email=random_string("email", 10),
email2=random_string("email2", 10), email3=random_string("email3", 10), phone2=random_string("phone2", 10))
for name in range(n)
]
# write generate data (User object) to json file
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2) # format params
out.write(jsonpickle.encode(test_data))
|
import io
import os
import sys
from decorator import decorator
def wrap_cli_test(output, save_output=False):
"""
This decorator captures the stdout and stder and compare it
with the contacts of the specified files.
Instead of save_output you can set the env variable BRG_TOOLS_TESTS_SAVE_OUTPUT
Arguments:
output (string): Path to the output. stdout and stderr prefixes will be added automatically
save_output (bool): Whether to save the output or not. Useful when creating the tests
"""
@decorator
def run_test(func, *args, **kwargs):
stdout = io.StringIO()
backup_stdout = sys.stdout
sys.stdout = stdout
stderr = io.StringIO()
backup_stderr = sys.stderr
sys.stderr = stderr
func(*args, **kwargs)
sys.stdout = backup_stdout
sys.stderr = backup_stderr
if (
save_output
or os.getenv("BRG_TOOLS_TESTS_SAVE_OUTPUT")
):
with open(f"{output}.stdout", "w+") as f:
f.write(stdout.getvalue())
with open(f"{output}.stderr", "w+") as f:
f.write(stderr.getvalue())
with open(f"{output}.stdout", "r") as f:
expected = f.read()
assert stdout.getvalue() == expected
with open(f"{output}.stderr", "r") as f:
expected = f.read()
assert stderr.getvalue() == expected
return run_test
|
from fastapi import status
from fastapi.exceptions import HTTPException
from sqlalchemy.orm import Session
from sqlalchemy.orm.exc import NoResultFound
from app.db.queries import memos
from app.models import models
def check_owner_is_collect(
memo_id: int, db: Session, owner: models.User
) -> bool:
memo = get_memo_by_id(db=db, id=memo_id)
return memo.owner == owner
def get_memo_by_id(db: Session, id: int) -> models.Memo:
try:
memo = memos.get_memo(db=db, memo_id=id)
except NoResultFound:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Your requested id does not exist."
)
return memo
|
# written by David Sommer (david.sommer at inf.ethz.ch) in 2019
import numpy as np
import scipy.stats
from core.probabilitybuckets_light import ProbabilityBuckets
from matplotlib import pyplot as plt
####
# Privacy Buckets is based on the following publication
####
# [*] S. Meiser, E. Mohammadi,
# [*] "Tight on Budget? Tight Bounds for r-Fold Approximate Differential Privacy",
# [*] Proceedings of the 25th ACM Conference on Computer and Communications Security (CCS), 2018
# In this example, we implement custom bucketing for binomial distribution (k, n, p=0.5), shifted by one draw:
# k_vector = np.arange(n+1)
# distribution_A = np.zeros(n+2)
# distribution_B = np.zeros(n+2)
# distribution_A[:-1] = scipy.stats.binom.pmf(k_vector,n,p=0.5)
# distribution_B[1:] = scipy.stats.binom.pmf(k_vector,n,p=0.5)
#
# We compute the optimal privacy loss for an event k as
# L_A/B(k) = log [ (n over k) * 0.5**k * 0.5**(n-k) ] / [ (n over k-1) * 0.5**(k-1) * 0.5**(n-(k-1)) ]
# = log [ (n - k + 1) / k ]
#
# The largest privacy loss is L_A/B(n) = log(n) [ and L_A/B(0) = -log(n) ]. Plug this in the condition for the factor
#
# L_A/B(n) = ln(n) < log(factor) * number_of_buckets / 2
#
# gives
# factor > exp( log(n) * 2 / number_of_buckets )
class ProbabilityBuckets_Binom(ProbabilityBuckets):
def __init__(self, n, **kwargs):
self.n = np.int32(n)
# we want to fit all possible loss values in our bucket_distribution.
# Using the derivation above, we overapproximate the factor with n -> n+1 to avoid numerical issues
# ( L_A/B(n) might fall beyond the last bucket)
kwargs['factor'] = np.exp(np.log(n+1) * 2 / kwargs['number_of_buckets'] )
# Tell the parent __init__() method that we create our own bucket_distribution.
kwargs['skip_bucketing'] = True
# Our custom create_bucket_distribution() method does not set up the error correction
kwargs['error_correction'] = False
super(ProbabilityBuckets_Binom, self).__init__(**kwargs)
self.create_bucket_distribution()
# Caching setup needs to be called after the buckets have been filled as the caching utilized a hash over the bucket distribution
self.caching_setup()
def create_bucket_distribution(self):
self.bucket_distribution = np.zeros(self.number_of_buckets, dtype=np.float64)
self.error_correction = False
# k = 0 gives distringuishing event
k = np.arange(1, self.n + 1)
privacy_loss_AB = np.log( (self.n - k + 1) / k )
indices = privacy_loss_AB / self.log_factor + self.number_of_buckets // 2
indices = np.ceil(indices).astype(int)
distr1 = scipy.stats.binom.pmf(k, self.n, p=0.5)
# fill buckets
for i, a, in zip(indices, distr1 ):
# i = int(np.ceil(i))
if i >= self.number_of_buckets:
assert False # should not happen in this implementation
self.infty_bucket += a
continue
if i < 0:
assert False # should not happen in this implementation
self.bucket_distribution[0] += a
continue
self.bucket_distribution[i] += a
# the infinity-bucket is zero by design
self.infty_bucket = np.float64(0.0)
# k = 0 gives distringuishing event, Pr[k = 0] = 0.5**(n-0)*0.5**0 = 0.5**n
self.distinguishing_events = np.float64(0.5)**self.n
self.one_index = int(self.number_of_buckets // 2) # this is a shortcut to the 0-bucket where L_A/B ~ 1
self.u = np.int64(1) # for error correction. Actually not needd
n = 5000 # the number of draws in a binomial distribution
# Initialize privacy buckets.
privacybuckets = ProbabilityBuckets_Binom(
n = n, # The n of the binominal distribution
number_of_buckets=100000, # number of buckets. The more the better as the resolution gets more finegraind. But you pay with performance
factor = None, # We compute the optimal one in our custom implementation
caching_directory = "./pb-cache", # caching makes re-evaluations faster. Can be turned off for some cases.
free_infty_budget=10**(-20), # how much we can put in the infty bucket before first squaring
error_correction=None, # error correction. We set it to False in our custom implementation internally
)
privacybuckets.print_state()
#
# analytical privacy loss distribution, see 'Sommer et al. "Privacy loss classes: The central limit theorem in differential privacy." Proceedings on Privacy Enhancing Technologies. 2019'
#
# the k's for which we do not hit distinguishing events, i.e., where distribution_B != 0
k_vec = np.arange(1, n + 1)
privacy_loss_AB = np.log((n - k_vec + 1) / k_vec)
distribution_A = scipy.stats.binom.pmf(k_vec, n, p=0.5)
plt.plot(privacy_loss_AB, distribution_A, label='analytic solution')
plt.plot( ( np.arange(privacybuckets.number_of_buckets) - privacybuckets.one_index) * privacybuckets.log_factor, privacybuckets.bucket_distribution, label='numeric solution')
plt.xlabel("privacy loss")
plt.ylabel("probability mass")
plt.legend()
print("distinguishing events (containing only the probability mass from k=0): {} (which should be 0.5**{} but might be 0 due to numerical precision".format(str(scipy.stats.binom.pmf(0,n,p=0.5)), n))
plt.show()
#
# Composition
#
# Now we evaluate how the distributon looks after 2**k independent compositions
k = 5
# input can be arbitrary positive integer, but exponents of 2 are numerically the most stable
privacybuckets_composed = privacybuckets.compose(2**k)
# Print status summary
privacybuckets_composed.print_state()
# Now we build the delta(eps) graphs from the computed distribution.
eps_vector = np.linspace(0,3,100)
upper_bound = [privacybuckets_composed.delta_ADP(eps) for eps in eps_vector]
plt.plot(eps_vector, upper_bound, label="upper_bound")
plt.legend()
plt.title("Binomial(n={},p=0.5) distribution after {:d} compositions".format(n, 2**k))
plt.xlabel("eps")
plt.ylabel("delta")
plt.ticklabel_format(useOffset=False) # Hotfix for the behaviour of my current matplotlib version
plt.show()
# abusing internals, we can look at the bucket distribution
plt.plot(privacybuckets_composed.bucket_distribution)
plt.title("bucket distribution")
plt.xlabel("bucket number")
plt.ylabel("mass")
plt.show()
|
"""
A script used to display the cross-correlation matrix
and the the whole time seriesxX
"""
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from signals.time_series_class import MixAr, AR
from signals.aux_functions import sidekick
from visualization import distance
import os
plot2 = False
plot3 = False
# Time parameters
dt = 0.1
Tmax = 100
# Let's get the axuiliary class
amplitude = 1
w1 = 1
w2 = 5
beta = sidekick(w1, w2, dt, Tmax, amplitude)
# First we need the phi's vector
phi0 = 0.0
phi1 = -0.8
phi2 = 0.3
phi = np.array((phi0, phi1, phi2))
# Now we need the initial conditions
x0 = 1
x1 = 1
x2 = 0
initial_conditions = np.array((x0, x1, x2))
# First we construct the series without the sidekick
B = AR(phi, dt=dt, Tmax=Tmax)
B.initial_conditions(initial_conditions)
normal_series = B.construct_series()
# Second we construct the series with the mix
A = MixAr(phi, dt=dt, Tmax=Tmax, beta=beta)
A.initial_conditions(initial_conditions)
mix_series = A.construct_series()
time = A.time
#########
# Here we will calculate correlations
#########
nlags = 100
unbiased = False
x_auto = sm.tsa.acf(mix_series, unbiased=unbiased, nlags=nlags)
y_auto = sm.tsa.acf(beta, unbiased=unbiased, nlags=nlags)
xy_cross = sm.tsa.ccf(mix_series, beta, unbiased=unbiased)[0:nlags + 1]
# Now the distance matrix
d = np.zeros((nlags + 1, 2, 2))
d[:, 0, 0] = x_auto
d[:, 1, 1] = y_auto
d[:, 1, 0] = xy_cross
d[:, 0, 1] = d[:, 1, 0]
##############
# Now plot the things
##############
# fig = distance.linear(d, cmap='coolwarm', inter='none', origin='upper',
# fontsize=16, aspect='auto')
fig = distance.matrix(d, cmap='coolwarm', inter='none', origin='upper',
fontsize=16, aspect='auto')
# Save the figure
name = 'cross_correlation_transformed'
# Save the figure here
directory = './results/'
extension = '.pdf'
filename = directory + name + extension
plt.savefig(filename)
os.system("pdfcrop %s %s" % (filename, filename))
plt.show(fig)
if plot3:
plt.subplot(3, 2, 1)
plt.plot(x_auto)
plt.ylim([-1, 1])
plt.title('Autocorrelation of mix_series')
plt.subplot(3, 2, 2)
plt.plot(y_auto)
plt.ylim([-1, 1])
plt.title('Autocorrelation of sidekick')
plt.subplot(3, 2, 3)
plt.plot(xy_cross)
plt.ylim([-1, 1])
plt.title('Cross correlation')
plt.subplot(3, 2, 4)
plt.plot(time, beta)
plt.title('Sidekick')
plt.subplot(3, 2, 5)
plt.plot(time, normal_series)
plt.title('Normal series')
plt.subplot(3, 2, 6)
plt.plot(time, mix_series)
plt.title('Mix series')
plt.show()
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
# Copyright 2018 Xu Chen All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for capsule layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models.layers import variables
def _squash(in_tensor):
"""Applies (squash) to capsule layer.
Args:
in_tensor: tensor,
shape [batch, num_cap_types, num_atoms] for a fc capsule layer or
shape [batch, num_cap_types, num_atoms, h, w] for a convolutional
capsule layer.
Returns:
A tensor with same shape
"""
with tf.name_scope('norm_non_linearity'):
norm = tf.norm(in_tensor, axis=2, keepdims=True)
norm_squared = norm * norm
return (in_tensor / norm) * (norm_squared / (1 + norm_squared))
def _leaky_routing(logits, out_dim):
"""Adds extra dimmension to routing logits.
This enables active capsules to be routed to the extra dim if they are not a
good fit for any of the capsules in the layer above.
Args:
logits: the original logits. shape (in_dim, out_dim) if fully connected.
Otherwise, it has two more dimmensions.
out_dim:
Returns:
routing probabilities for each pair of capsules. Same shape as logits.
"""
leak = tf.zeros_like(logits, optimize=True)
leak = tf.reduce_sum(leak, axis=2, keepdims=True)
leaky_logits = tf.concat([leak, logits], axis=2)
leaky_routing = tf.nn.softmax(leaky_logits, axis=2)
return tf.split(leaky_routing, [1, out_dim], 2)[1]
def _update_routing(tower_idx, votes, biases, logit_shape, num_ranks, in_dim, out_dim, reassemble,
leaky, num_routing):
"""Sums over scaled votes and applies squash to compute the activations.
Iteratively updates routing logits (scales) based on the similarity between
the activation of this layer and the votes of the layer below.
Args:
tower_idx: the index number for this tower. Each tower is named
as tower_{tower_idx} and resides on gpu:{tower_idx}.
votes: tensor, the transformed outputs of the layer below.
biases: tensor, bias variable.
logit_shape: tensor, shape of the logit to be initialized.
num_ranks: scalar, rank of the votes tensor. For fully connected capsule it
is 4, for convolutional capsule it is 6.
in_dim: scalar, number of capsule types of input.
out_dim: scalar, number of capsule types of output.
leaky: boolean, whether to use leaky routing.
num_routing: scalar, number of routing iterations.
reassemble: boolean, whether to use reassemble method.
Returns:
The activation tensor of the output layer after `num_routing` iterations.
"""
votes_t_shape = [3, 0, 1, 2]
r_t_shape = [1, 2, 3, 0]
for i in range(num_ranks - 4):
votes_t_shape += [i + 4]
r_t_shape += [i + 4]
votes_trans = tf.transpose(votes, votes_t_shape)
def _body(i, logits, activations):
"""Routing while loop."""
# route: [batch, in_dim, out_dim, ...]
if leaky:
route = _leaky_routing(logits, out_dim)
else:
route = tf.nn.softmax(logits, axis=2)
preact_unrolled = route * votes_trans
preact_trans = tf.transpose(preact_unrolled, r_t_shape)
preactivate = tf.reduce_sum(preact_trans, axis=1) + biases
activation = _squash(preactivate)
activations = activations.write(i, activation)
act_3d = tf.expand_dims(activation, 1)
tile_shape = np.ones(num_ranks, dtype=np.int32).tolist()
tile_shape[1] = in_dim
act_replicated = tf.tile(act_3d, tile_shape)
distances = tf.reduce_sum(votes * act_replicated, axis=3)
# logits = logits.write(i+1, logit + distances)
logits += distances
return (i + 1, logits, activations)
activations = tf.TensorArray(
dtype=tf.float32, size=num_routing, clear_after_read=False)
logits = tf.fill(logit_shape, 0.0)
i = tf.constant(0, dtype=tf.int32)
_, logits, activations = tf.while_loop(
lambda i, logits, activations: i < num_routing - 1,
_body,
loop_vars=[i, logits, activations],
swap_memory=True)
# do it manually
if leaky:
route = _leaky_routing(logits, out_dim)
else:
route = tf.nn.softmax(logits, axis=2) # (?, 512, 10)
"""Normal route section"""
preact_unrolled = route * votes_trans
preact_trans = tf.transpose(preact_unrolled, r_t_shape)
preactivate = tf.reduce_sum(preact_trans, axis=1) + biases
activation = _squash(preactivate)
activations = activations.write(num_routing - 1, activation)
act_3d = tf.expand_dims(activation, 1)
tile_shape = np.ones(num_ranks, dtype=np.int32).tolist()
tile_shape[1] = in_dim
act_replicated = tf.tile(act_3d, tile_shape)
distances = tf.reduce_sum(votes * act_replicated, axis=3)
logits += distances
full_norm = tf.norm(preactivate, axis=2, keepdims=True)
full_norm_squared = full_norm * full_norm
scale = full_norm / (1 + full_norm_squared)
if reassemble:
"""Boost section"""
# transpose route to make compare easier
route_trans = tf.transpose(route, [0, 2, 1])
ten_splits = tf.split(route_trans, num_or_size_splits=10, axis=1)
threshold = tf.get_collection('tower_%d_batched_threshold' % tower_idx)[0]
for split in ten_splits:
split_shape = tf.shape(split) # (?, 1, 512)
valid_cap_indices = tf.less_equal(
split,
tf.fill(split_shape, threshold)) # threshold here
valid_cap_indices_sq = tf.squeeze(valid_cap_indices)
valid_cap_multiplier = tf.cast(valid_cap_indices_sq, tf.float32) # (?, 512) 1.0 or 0.0
valid_cap_multiplier_tiled = tf.tile(
tf.expand_dims(valid_cap_multiplier, -1),
[1, 1, 10])
preact_unrolled = valid_cap_multiplier_tiled * route * votes_trans
preact_trans = tf.transpose(preact_unrolled, r_t_shape)
preactivate = tf.reduce_sum(preact_trans, axis=1) + biases
# activation = _squash(preactivate)
# manual squash
with tf.name_scope('manual_norm_non_linearity'):
activation = preactivate * scale
act_norm = tf.norm(activation, axis=-1, name='act_norm')
tf.add_to_collection('tower_%d_ensemble_acts' % tower_idx, act_norm) # total 10
"""visual"""
for i in range(num_routing):
tf.add_to_collection('tower_%d_visual' % tower_idx, activations.read(i))
return activations.read(num_routing - 1)
def _depthwise_conv3d(tower_idx, in_tensor, in_dim, in_atoms,
out_dim, out_atoms,
kernel, stride=2, padding='SAME'):
"""Perform 2D convolution given a 5D input tensor.
This layer given an input tensor of shape (batch, in_dim, in_atoms, in_h, in_w).
We squeeze this first two dimmensions to get a 4R tensor as the input of
tf.nn.conv2d. Then splits the first dimmension and the last dimmension and
returns the 6R convolution output.
Args:
tower_idx: the index number for this tower. Each tower is named
as tower_{tower_idx} and resides on gpu:{tower_idx}.
in_tensor: 5R tensor, last two dimmensions representing height and width.
in_dim: scalar, number of capsule types of input.
in_atoms: scalar, number of units of each input capsule.
out_dim: scalar, number of capsule types of output.
out_atoms: scalar, number of units of each output capsule.
kernel: tensor, convolutional kernel variable.
stride: scalar, stride of the convolutional kernel.
padding: 'SAME' or 'VALID', padding mechanism for convolutional kernels.
Returns:
6R tensor output of a 2D convolution with shape (batch, in_dim, out_dim,
out_atoms, out_h, out_w), the covolution output shape and the input shape.
"""
with tf.name_scope('conv'):
in_shape = tf.shape(in_tensor) # op
_, _, _, in_height, in_width = in_tensor.get_shape() # (batch, in_dim, in_atoms, in_h, in_w)
# Reshape in_tensor to 4R by merging first two dimmensions.
in_tensor_reshaped = tf.reshape(in_tensor, [
in_shape[0]*in_dim, in_atoms, in_shape[3], in_shape[4]
])
in_tensor_reshaped.set_shape((None, in_atoms, in_height.value, in_width.value))
# do convolution
conv = tf.nn.conv2d(
in_tensor_reshaped,
kernel, [1, 1, stride, stride],
padding=padding,
data_format='NCHW')
conv_shape = tf.shape(conv) # shape (batch*in_dim, out_dim*out_atoms, H, W)
_, _, conv_height, conv_width = conv.get_shape()
# Reshape back to 6R by splitting first dimmension to batch and in_dim
# and splitting the second dimmension to out_dim and out_atoms.
conv_reshaped = tf.reshape(conv, [
in_shape[0], in_dim, out_dim, out_atoms, conv_shape[2], conv_shape[3]
], name='votes')
conv_reshaped.set_shape((None, in_dim, out_dim, out_atoms,
conv_height.value, conv_width.value))
"""visual"""
tf.add_to_collection('tower_%d_visual' % tower_idx, conv_reshaped)
return conv_reshaped, conv_shape, in_shape
def conv_slim_capsule(tower_idx, in_tensor, in_dim, in_atoms,
out_dim, out_atoms, layer_name,
kernel_size=5, stride=2, padding='SAME',
reassemble=False,
**routing_args):
"""Builds a slim convolutional capsule layer.
This layer performs 2D convolution given 5R input tensor of shape
(batch, in_dim, in_atoms, in_h, in_w). Then refines the votes with
routing and applies Squash nonlinearity for each capsule.
Each capsule in this layer is a convolutional unit and shares its kernel
over its positional grid (e.g. 9x9) and different capsules below. Therefore,
number of trainable variables in this layer is:
kernel: (kernel_size, kernel_size, in_atoms, out_dim * out_atoms)
bias: (out_dim, out_atoms)
Output of a conv2d layer is a single capsule with channel number of atoms.
Therefore conv_slim_capsule is suitable to be added on top of a conv2d layer
with num_routing=1, in_dim=1 and in_atoms = conv_channels.
Args:
tower_idx: the index number for this tower. Each tower is named
as tower_{tower_idx} and resides on gpu:{tower_idx}.
in_tensor: 5R tensor, last two dimmensions representing height and width.
in_dim: scalar, number of capsule types of input.
in_atoms: scalar, number of units of each input capsule.
out_dim: scalar, number of capsule types of output.
out_atoms: scalar, number of units of each output capsule.
layer_name: string, name of this layer.
kernel_size: scalar: convolutional kernel size (kernel_size, kernel_size)
stride: scalar, stride of the convolutional kernel.
padding: 'SAME' or 'VALID', padding mechanism for convolutional kernels.
**routing_args: dictionary {leaky, num_routing}, args to be passed to the
routing procedure.
Returns:
Tensor of activations for this layer of shape
(batch, out_dim, out_atoms, out_h, out_w).
"""
with tf.variable_scope(layer_name):
kernel = variables.weight_variable(
shape=[kernel_size, kernel_size, in_atoms, out_dim * out_atoms])
biases = variables.bias_variable(
shape=[out_dim, out_atoms, 1, 1])
votes, votes_shape, in_shape = _depthwise_conv3d(
tower_idx, in_tensor, in_dim, in_atoms, out_dim, out_atoms, kernel, stride, padding)
with tf.name_scope('routing'):
logit_shape = tf.stack([
in_shape[0], in_dim, out_dim, votes_shape[2], votes_shape[3]
])
biases_replicated = tf.tile(biases, [1, 1, votes_shape[2], votes_shape[3]])
activations = _update_routing(
tower_idx,
votes=votes,
biases=biases_replicated,
logit_shape=logit_shape,
num_ranks=6,
in_dim=in_dim,
out_dim=out_dim,
reassemble=reassemble,
**routing_args)
return activations
def capsule(tower_idx, in_tensor, in_dim, in_atoms,
out_dim, out_atoms, layer_name,
reassemble,
**routing_args):
"""Builds a fully connected capsule layer.
Given an input tensor of shape (batch, in_dim, in_atoms), this op
performs the following:
1. For each input capsule, multiplies it with the weight variables
to get votes of shape (batch, in_dim, out_dim, out_atoms);
2. Scales the votes for each output capsule by routing;
3. Squashes the output of each capsule to have norm less than one.
Each capsule of this layer has one weight tensor for each capsule of
layer below. Therefore, this layer has the following number of
trainable variables:
kernel: (in_dim, in_atoms, out_dim * out_atoms)
biases: (out_dim, out_atoms)
Args:
in_tensor: tensor, activation output of the layer below.
in_dim: scalar, number of capsule types in the layer below.
in_atoms: scalar, number of units of input capsule.
out_dim: scalar, number of capsule types in the output layer.
out_atoms: scalar, number of units of output capsule.
layer_name: string, the number of this layer.
**routing_args: dictionary {leaky, num_routing}, args for routing.
Returns:
Tensor of activations for this layer of shape (batch, out_dim, out_atoms).
"""
with tf.variable_scope(layer_name):
weights = variables.weight_variable(
[in_dim, in_atoms, out_dim * out_atoms])
biases = variables.bias_variable([out_dim, out_atoms])
with tf.name_scope('Wx_plus_b'):
# Depthwise matmul: [b, d, c] @ [d, c, o_c] = [b, d, o_c]
# to do this: tile input, do element-wise multiplication and reduce
# sum over in_atoms dimmension.
in_tiled = tf.tile(
tf.expand_dims(in_tensor, -1),
[1, 1, 1, out_dim * out_atoms])
votes = tf.reduce_sum(in_tiled * weights, axis=2)
votes_reshaped = tf.reshape(votes,
[-1, in_dim, out_dim, out_atoms])
with tf.name_scope('routing'):
in_shape = tf.shape(in_tensor)
logit_shape = tf.stack([in_shape[0], in_dim, out_dim])
activations = _update_routing(
tower_idx,
votes=votes_reshaped,
biases=biases,
logit_shape=logit_shape,
num_ranks=4,
in_dim=in_dim,
out_dim=out_dim,
reassemble=reassemble,
**routing_args)
return activations
def reconstruction(capsule_mask, num_atoms, capsule_embedding, layer_sizes,
num_pixels, reuse, image, balance_factor):
"""Adds the reconstruction loss and calculates the reconstructed image.
Given the last capsule output layer as input of shape (batch, 10, num_atoms),
add 3 fully connected layers on top of it.
Feeds the masked output of the model to the reconstruction sub-network.
Adds the difference with reconstruction image as reconstruction loss to the
loss collection.
Args:
capsule_mask: tensor, for each data in the batch it has the one hot
encoding of the target id.
num_atoms: scalar, number of atoms in the given capsule_embedding.
capsule_embedding: tensor, output of the last capsule layer.
layer_sizes: (scalar, scalar), size of the first and second layer.
num_pixels: scalar, number of pixels in the target image.
reuse: if set reuse variables.
image: the reconstruction target image.
balance_factor: scalar, downweight the loss to be in valid range.
Returns:
The reconstruction images of shape (batch_size, num_pixels).
"""
first_layer_size, second_layer_size = layer_sizes
capsule_mask_3d = tf.expand_dims(tf.cast(capsule_mask, tf.float32), -1)
atom_mask = tf.tile(capsule_mask_3d, [1, 1, num_atoms])
filtered_embedding = capsule_embedding * atom_mask
filtered_embedding_2d = tf.contrib.layers.flatten(filtered_embedding)
reconstruction_2d = tf.contrib.layers.stack(
inputs=filtered_embedding_2d,
layer=tf.contrib.layers.fully_connected,
stack_args=[(first_layer_size, tf.nn.relu),
(second_layer_size, tf.nn.relu),
(num_pixels, tf.sigmoid)],
reuse=reuse,
scope='recons',
weights_initializer=tf.truncated_normal_initializer(
stddev=0.1, dtype=tf.float32),
biases_initializer=tf.constant_initializer(0.1))
with tf.name_scope('loss'):
image_2d = tf.contrib.layers.flatten(image)
distance = tf.pow(reconstruction_2d - image_2d, 2)
loss = tf.reduce_sum(distance, axis=-1)
batch_loss = tf.reduce_mean(loss)
balanced_loss = balance_factor * batch_loss
tf.add_to_collection('losses', balanced_loss)
tf.summary.scalar('reconstruction_error', balanced_loss)
return reconstruction_2d
|
import cv2 as cv
import numpy as np
from numpy.linalg import norm
from math import sqrt
class CameraSensor:
SIDE_ORDER = ["L", "R", "B", "U", "D", "F"]
def __init__(self):
capture = cv.VideoCapture(0)
ret, frame = capture.read()
self.camHeight, self.camWidth, self.camColors = frame.shape
capture.release()
self.cubeDim = 294
self.startPoint = None
self.endPoint = None
self.initPoints()
self.cubies = []
self.initCubies()
self.coreColors = []
self.initCoreColors()
self.colorKey = {"r":0, "o":1, "y":2, "g":3, "b":4, "w":5}
def initPoints(self):
xShift = -24
yShift = 14
startX = (self.camWidth // 2) - (self.cubeDim // 2) + xShift
startY = (self.camHeight // 2) - (self.cubeDim // 2) + yShift
endX = startX + self.cubeDim
endY = startY + self.cubeDim
self.startPoint = (startX, startY)
self.endPoint = (endX, endY)
def initCubies(self):
cubieDim = self.cubeDim // 3
sxShift = 8
syShift = 8
exShift = -12
eyShift = -12
for i in range(3):
for j in range(3):
sX = self.startPoint[0] + j*cubieDim + sxShift
sY = self.startPoint[1] + i*cubieDim + syShift
eX = self.startPoint[0] + (j+1)*cubieDim + exShift
eY = self.startPoint[1] + (i+1)*cubieDim + eyShift
sPoint = (sX, sY)
ePoint = (eX, eY)
self.cubies.append([sPoint, ePoint])
sxShift += 4
syShift += 2
exShift += 4
eyShift += 2
def initCoreColors(self, colors=None):
'''
RED = np.array([40,40,120])/255
ORANGE = np.array([100,140,200])/255
YELLOW = np.array([100,200,220])/255
GREEN = np.array([100,170,80])/255
BLUE = np.array([175,120,50])/255
WHITE = np.array([200, 200, 200])/255
self.coreColors = [RED/norm(RED), ORANGE/norm(ORANGE),
YELLOW/norm(YELLOW), GREEN/norm(GREEN),
BLUE/norm(BLUE), WHITE/norm(WHITE)]
'''
RED = np.array([65,85,230])
ORANGE = np.array([80,170,250])
YELLOW = np.array([95,215,210])
GREEN = np.array([125,225,85])
BLUE = np.array([210,115,50])
WHITE = np.array([205, 205, 190])
self.coreColors = [RED, ORANGE, YELLOW, GREEN, BLUE, WHITE]
self.reds = np.array(RED)
self.oranges = np.array(ORANGE)
self.yellows = np.array(YELLOW)
self.greens = np.array(GREEN)
self.blues = np.array(BLUE)
self.whites = np.array(WHITE)
'''
RED = np.array([5.97083801, 157.63980131, 89.28440955])
ORANGE = np.array([11.45890082, 162.49110719, 106.24274956])
YELLOW = np.array([39.14821343, 119.77695882, 85.41884313])
GREEN = np.array([84.52491588, 101.74315014, 124.51610319])
BLUE = np.array([120.1559045, 146.61528601, 37.20781926])
WHITE = np.array([27.47828874, 4.92228809, 128.4619452])
self.coreColors = [RED, ORANGE, YELLOW, GREEN, BLUE, WHITE]
'''
def updateColorOrientation(self, coreColors:dict, order:list):
# test this method
colors = ["r", "o", "y", "g", "b", "w"]
self.colorArrays = list()
for c in colors:
index = order.index(c)
key = self.SIDE_ORDER[index]
pixelVal = coreColors[key]/norm(coreColors[key])
self.colorArrays.append(pixelVal)
for i, o in enumerate(order):
self.colorKey[o] = i
def updateOranges(self):
avgOrange = np.average(self.oranges, axis=0)
normalizedOrange = avgOrange/255
normalizedOrange /= norm(normalizedOrange)
self.coreColors[1] = normalizedOrange
def streamWebcamVideo(self):
videoCaptureObject = cv.VideoCapture(1)
while(True):
ret, frame = videoCaptureObject.read()
cv.imshow('Capturing Video', frame)
if(cv.waitKey(1) & 0xFF == ord('q')):
videoCaptureObject.release()
cv.destroyAllWindows()
def printWebcamProps(self, capture=None):
if capture == None:
capture = cv.VideoCapture(1)
print("CV_CAP_PROP_FRAME_WIDTH : '{}'".format(
capture.get(cv.CAP_PROP_FRAME_WIDTH)))
print("CV_CAP_PROP_FRAME_HEIGHT : '{}'".format(
capture.get(cv.CAP_PROP_FRAME_HEIGHT)))
print("CAP_PROP_BRIGHTNESS : '{}'".format(
capture.get(cv.CAP_PROP_BRIGHTNESS)))
print("CAP_PROP_CONTRAST : '{}'".format(
capture.get(cv.CAP_PROP_CONTRAST)))
print("CAP_PROP_SATURATION : '{}'".format(
capture.get(cv.CAP_PROP_SATURATION)))
print("CAP_PROP_EXPOSURE : '{}'".format(
capture.get(cv.CAP_PROP_EXPOSURE)))
print("CAP_PROP_HUE : '{}'".format(capture.get(cv.CAP_PROP_HUE)))
print("CAP_PROP_SHARPNESS : '{}'".format(
capture.get(cv.CAP_PROP_SHARPNESS)))
print("CAP_PROP_AUTO_EXPOSURE : '{}'".format(
capture.get(cv.CAP_PROP_AUTO_EXPOSURE)))
print("CAP_PROP_TEMPERATURE : '{}'".format(
capture.get(cv.CAP_PROP_TEMPERATURE)))
print("CAP_PROP_ZOOM : '{}'".format(capture.get(cv.CAP_PROP_ZOOM)))
print("CAP_PROP_FOCUS : '{}'".format(capture.get(cv.CAP_PROP_FOCUS)))
print("CAP_PROP_AUTOFOCUS : '{}'".format(
capture.get(cv.CAP_PROP_AUTOFOCUS)))
print("CAP_PROP_ZOOM : '{}'".format(capture.get(cv.CAP_PROP_ZOOM)))
def enhancePicture(self, img):
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) # convert image to HSV color space
hsv = np.array(hsv, dtype = np.float64)
hsv[:,:,0] = hsv[:,:,0]*1.25 # scale pixel values up for channel 0
hsv[:,:,0][hsv[:,:,0]>255] = 255
hsv[:,:,1] = hsv[:,:,1]*1 # scale pixel values up for channel 1
hsv[:,:,1][hsv[:,:,1]>255] = 255
hsv[:,:,2] = hsv[:,:,2]*1 # scale pixel values up for channel 2
hsv[:,:,2][hsv[:,:,2]>255] = 255
hsv = np.array(hsv, dtype = np.uint8)
# converting back to BGR used by OpenCV
img = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
return img
def convertToHSV(self, img):
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) # convert image to HSV color space
hsv = np.array(hsv, dtype = np.float64)
return hsv
def referencePicture(self):
capture = cv.VideoCapture(0)
ret, img = capture.read()
area = [(5, 150), (95, 210)]
leftArea = img[area[0][1]:area[1][1]+1, area[0][0]:area[1][0]+1]
redAverage = np.average(np.average(leftArea, axis=1), axis=0)
area = [(505,150), (595,210)]
rightArea = img[area[0][1]:area[1][1]+1, area[0][0]:area[1][0]+1]
orangeAverage = np.average(np.average(rightArea, axis=1), axis=0)
'''
redAverage /= 255
redAverage /= norm(redAverage)
orangeAverage /= 255
orangeAverage /= norm(orangeAverage)
'''
self.coreColors[0] = redAverage
self.coreColors[1] = orangeAverage
capture.release()
def takePicture(self, name):
capture = cv.VideoCapture(0)
ret, frame = capture.read()
#frame = self.enhancePicture(frame)
cv.imwrite(name, frame)
self.largeBox(name)
capture.release()
def largeBox(self, name):
color = (255, 50, 50)
edit = cv.imread(name, 1)
edit = cv.rectangle(edit, self.startPoint, self.endPoint, color, 2)
cv.imwrite(name[:-4] + "Edit" + name[-4:], edit)
def smallBoxes(self, name, edit):
orig = cv.imread(name, 1)
color = (50, 255, 50)
for c in self.cubies:
orig = cv.rectangle(orig, c[0], c[1], color, 1)
cv.imwrite(edit, orig)
def drawBoxes(self):
original = "./webcam/ToEdit.jpg"
self.takePicture(original)
self.largeBox(original)
self.smallBoxes(original, "./webcam/smallBoxes.jpg")
print(self.cubies)
def cosine_similarity(self, a, b):
return np.dot(a, b)/(norm(a)*norm(b))
def euclidean_similarity(self, a, b):
return sqrt(sum(pow(x-y,2) for x, y in zip(a, b)))
def dot_product_similarity(self, a, b):
return np.dot(a, b)
def averages(self, file: str):
img = cv.imread(file, 1)
#img = self.convertToHSV(img)
averages = []
for area in self.cubies:
subImg = img[area[0][1]:area[1][1]+1, area[0][0]:area[1][0]+1]
avgPixel = np.average(np.average(subImg, axis=1), axis=0)
averages.append(avgPixel)
return averages
def updateColors(self, color, pixel):
if color == "w":
print("White Updated:")
print(self.whites)
self.whites = np.append([self.whites], [pixel], axis=0)
self.whites = np.average(self.whites, axis=0)
print(self.whites, end="\n\n")
elif color == "r":
print("Red Updated:")
print(self.reds)
self.reds = np.append([self.reds], [pixel], axis=0)
self.reds = np.average(self.reds, axis=0)
print(self.reds, end="\n\n")
elif color == "o":
print("Orange Updated:")
print(self.oranges)
self.oranges = np.append([self.oranges], [pixel], axis=0)
self.oranges = np.average(self.oranges, axis=0)
print(self.oranges, end="\n\n")
elif color == "y":
print("Yellow Updated:")
print(self.yellows)
self.yellows = np.append([self.yellows], [pixel], axis=0)
self.yellows = np.average(self.yellows, axis=0)
print(self.yellows, end="\n\n")
elif color == "g":
print("Green Updated:")
print(self.greens)
self.greens = np.append([self.greens], [pixel], axis=0)
self.greens = np.average(self.greens, axis=0)
print(self.greens, end="\n\n")
elif color == "b":
print("Blue Updated:")
print(self.blues)
self.blues = np.append([self.blues], [pixel], axis=0)
self.blues = np.average(self.blues, axis=0)
print(self.blues, end="\n\n")
def printColorAverages(self):
print("R: {}".format(self.reds))
print("O: {}".format(self.oranges))
print("Y: {}".format(self.yellows))
print("G: {}".format(self.greens))
print("B: {}".format(self.blues))
print("W: {}".format(self.whites))
def getColor(self, pixel):
colors = ["r", "o", "y", "g", "b", "w"]
#pixel /= 255
max = 0
index = 0
similarities = []
for i, c in enumerate(self.coreColors):
#similarity = self.euclidean_similarity(pixel, c)
#similarity = self.cosine_similarity(pixel/norm(pixel), c/norm(c))
similarity = self.cosine_similarity(pixel, c)
#similarity = self.dot_product_similarity(pixel, c)
#print("sim: {}-{} is {}".format(pixel, c, similarity))
similarities.append(similarity)
if similarity > max:
index = i
max = similarity
'''
if index == 1:
self.oranges.append(pixel)
self.updateOranges()
'''
returnColor = colors[index]
self.updateColors(returnColor, pixel)
'''
if returnColor == "r" or returnColor == "o":
pixelNorm = norm(pixel/norm(pixel))
redNorm = norm(self.coreColors[0])
orangeNorm = norm(self.coreColors[1])
redRate = abs(redNorm-pixelNorm)
orangeRate = abs(orangeNorm-pixelNorm)
if redRate > orangeRate:
returnColor = "o"
else:
returnColor = "r"
'''
#print(returnColor, end="\n\n")
return returnColor
def reorderVals(self, vals):
final = [0 for i in range(48)]
order = [4,5,6,3,-1,7,2,1,0,
14,15,8,13,-1,9,12,11,10,
20,21,22,19,-1,23,18,17,16,
28,29,30,27,-1,31,26,25,24,
32,33,34,39,-1,35,38,37,36,
40,41,42,47,-1,43,46,45,44]
for i, val in enumerate(vals):
if order[i] != -1:
final[order[i]] = val
return final
def getValues(self, files):
faceVals = []
corePixels = {}
coreColors = []
for file in files:
averages = self.averages(file)
faceVals += averages
corePixels[file[9]] = averages[4]
color = self.getColor(averages[4])
coreColors.append(color)
print(corePixels)
print(coreColors)
#input("Stop here")
self.updateColorOrientation(corePixels, coreColors)
for i, val in enumerate(faceVals):
key = self.getColor(val)
val = self.colorKey[key]
faceVals[i] = val # JEFF: switch to KEY for debugging
print(corePixels)
print(faceVals)
returnVals = self.reorderVals(faceVals)
return coreColors, returnVals
|
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic.base import RedirectView
from .views import signup
urlpatterns = [
path('', RedirectView.as_view(pattern_name='researcher:view'), name='home'),
path('admin/', admin.site.urls),
path('accounts/register', signup, name='register'),
path('accounts/profile/', RedirectView.as_view(pattern_name='researcher:view')),
path('accounts/', include('django.contrib.auth.urls')),
path('r/', include('readinglist.urls')),
path('p/', include('project.urls')),
path('u/', include('researcher.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
# The MIT License (MIT)
#
# Copyright (c) 2015, Nicolas Sebrecht & contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from imapfw import runtime
from imapfw.concurrency.concurrency import *
class TestConcurrency(unittest.TestCase):
def setUp(self):
def noop():
pass
def blocking():
while True:
pass
self.noop = noop
self.blocking = blocking
def test_00_concurrency_interface(self):
self.assertIsInstance(runtime.concurrency, ConcurrencyInterface)
def test_01_queue_interface(self):
self.assertIsInstance(runtime.concurrency.createQueue(), QueueInterface)
def test_02_lock_interface(self):
self.assertIsInstance(runtime.concurrency.createLock(), LockBase)
def test_03_worker_interface(self):
self.assertIsInstance(
runtime.concurrency.createWorker('noop', self.noop, ()),
WorkerInterface)
def test_04_worker_start_join(self):
worker = runtime.concurrency.createWorker('noop', self.noop, ())
worker.start()
self.assertEqual(worker.getName(), 'noop')
worker.join()
def test_05_worker_start_kill(self):
worker = runtime.concurrency.createWorker('blocking', self.blocking, ())
worker.start()
self.assertEqual(worker.getName(), 'blocking')
worker.kill()
if __name__ == '__main__':
unittest.main(verbosity=2)
|
from .cspdarknet import * # noqa F401
|
#!/usr/bin/python
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""Lab1-a: generate layout on physical grid
Instructions
1. For BAG export, copy this file to BAG working directory and uncomment BAG portions
2. For GDS export, make sure laygo is visible in the working folder and prepare the layermap file of given
technology (usually be found in the technology lib folder)
3. modify metal and mpin list for given technology
"""
__author__ = "Jaeduk Han"
__maintainer__ = "Jaeduk Han"
__email__ = "jdhan@eecs.berkeley.edu"
__status__ = "Prototype"
import laygo
import numpy as np
import yaml
#import logging;logging.basicConfig(level=logging.DEBUG)
import os.path
if os.path.isfile("laygo_config.yaml"):
with open("laygo_config.yaml", 'r') as stream:
techdict = yaml.load(stream)
tech = techdict['tech_lib']
metal = techdict['metal_layers']
pin = techdict['pin_layers']
text = techdict['text_layer']
prbnd = techdict['prboundary_layer']
res = techdict['physical_resolution']
else:
print("no config file exists. loading default settings")
tech = "freePDK45"
metal = [['metal0', 'donotuse'],
['metal1', 'drawing'],
['metal2', 'drawing'],
['metal3', 'drawing'],
['metal4', 'drawing'],
['metal5', 'drawing']]
pin = [['text', 'drawing'],
['metal1', 'pin'],
['metal2', 'pin'],
['metal3', 'pin'],
['metal4', 'pin'],
['metal5', 'pin']]
text = ['text', 'drawing']
prbnd = ['prBoundary', 'drawing']
res=0.0025
#working library name
workinglib = 'laygo_working'
laygen = laygo.BaseLayoutGenerator(res=res) #res should be your minimum grid resolution
laygen.add_library(workinglib)
laygen.sel_library(workinglib)
#generation layout elements
mycell = '_generate_example_1'
laygen.add_cell(mycell)
laygen.sel_cell(mycell)
laygen.add_rect(None, np.array([[0, 0], [1.0, 0.1]]), metal[1])
laygen.add_rect(None, np.array([[[0, 0], [0.1, 1.0]], [[1.0, 0], [1.1, 1]]]), metal[2])
mycell2 = '_generate_example_2'
laygen.add_cell(mycell2)
laygen.sel_cell(mycell2)
laygen.add_inst(None, workinglib, mycell, xy=np.array([2, 0]), shape=np.array([1, 1]),
spacing=np.array([1, 1]), transform='R0')
laygen.add_inst(None, workinglib, mycell, xy=np.array([0, 2]), shape=np.array([2, 3]),
spacing=np.array([1, 2]), transform='R0')
laygen.add_text(None, 'text0', np.array([1, 1]), text)
laygen.display()
#bag export, if bag does not exist, gds export
import imp
try:
imp.find_module('bag')
#bag export
print("export to BAG")
import bag
prj = bag.BagProject()
laygen.sel_cell(mycell)
laygen.export_BAG(prj, array_delimiter=['[', ']'])
laygen.sel_cell(mycell2)
laygen.export_BAG(prj, array_delimiter=['[', ']'])
except ImportError:
#gds export
print("export to GDS")
laygen.sel_cell(mycell) #cell selection
laygen.export_GDS('lab1_generated.gds', layermapfile=tech+".layermap") #change layermapfile
laygen.sel_cell(mycell2)
laygen.export_GDS('lab1_generated2.gds', cellname=[mycell, mycell2], layermapfile=tech+".layermap") |
import torch.nn as nn
import torch
import torch.nn.functional as F
class SelfAttention(nn.Module):
def __init__(self, input_size):
super().__init__()
self.input_linear = nn.Linear(input_size, 1, bias=False)
self.dot_scale = nn.Parameter(torch.Tensor(input_size).uniform_(1.0 / (input_size ** 0.5)))
def forward(self, input, memory, mask):
input_dot = self.input_linear(input)
cross_dot = torch.bmm(input * self.dot_scale, memory.permute(0, 2, 1).contiguous())
att = input_dot + cross_dot
att = att - 1e30 * (1 - mask[:, None])
weight_one = F.softmax(att, dim=-1)
output_one = torch.bmm(weight_one, memory)
return torch.cat([input, output_one], dim=-1)
|
import os
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from PIL import Image
def norm(img):
""" Normalize an image down to a 'unit vector' """
n = np.linalg.norm(img)
if n == 0:
return img
return img.astype(np.float32) / n
def rgb2gray(img):
""" Convert RGB image to grayscale values """
# return np.dot(img, [0.2989, 0.5870, 0.1140]).astype(np.uint8)
return np.dot(img, [0.2125, 0.7154, 0.0721]).astype(np.uint8)
# return np.mean(img, axis=2).astype(np.uint8)
def gray2rgb(img):
""" Expand grayscale image into RGB (still gray) """
if len(img.shape) == 2:
return np.repeat(img[:,:,np.newaxis], 3, axis=2)
return img
def rgb2hsv(img):
""" Use matplotlib to convert rgb to hsv (TA allowed) """
return (rgb_to_hsv(img.astype(np.float32) / 255.) * 255).astype(np.uint8)
def hsv2rgb(img):
""" Use matplotlib to convert hsv to rgb (TA allowed) """
return (hsv_to_rgb(img.astype(np.float32) / 255.) * 255).astype(np.uint8)
# from http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html
def histogram_equalization(img, bins=128, clip_limit=None):
""" Use histogram equalization to balance contrast over the entire image """
hist, hist_bins = np.histogram(img.flatten(), bins)
if clip_limit is not None: # Clip and redistribute (simplified)
clip_mask = (hist < clip_limit)
distr = np.sum(hist * (1 - clip_mask) - clip_limit) / np.sum(clip_mask)
hist = np.clip(hist + distr * clip_mask, 0, clip_limit)
cdf = hist.cumsum()
cdf = 255 * cdf / cdf[-1]
equalized = np.interp(img.flatten(), hist_bins[:-1], cdf)
return equalized.reshape(img.shape)
# THIS IMPLEMENTATION IS INCORRECT, DO NOT USE
def CLAHE(img, clip_limit=2.0, tile_size=(8,8), bins=128):
""" Balance contrast locally over an image using tiling approximation """
n, m = img.shape[:2]
u, v = tile_size
output = np.zeros(img.shape)
for r in range(max(1, (n-1) // u + 1)): # Round up integer div
for c in range(max(1, (m-1) // v + 1)):
end_r = min(n, (r+1)*u)
end_c = min(m, (c+1)*v)
output[r*u:end_r, c*v:end_c] = histogram_equalization(
img[r*u:end_r, c*u:end_c], bins=bins, clip_limit=clip_limit)
return output
def binary_dilation(img, iterations=1):
""" Dilates a mask with a square structuring element """
output = np.copy(img)
n, m = img.shape[:2]
for _ in range(iterations): # Move left
output[:,:m-1] |= output[:,1:]
for _ in range(iterations): # Move right
output[:,1:] |= output[:,:m-1]
for _ in range(iterations): # Move up
output[:n-1] |= output[1:]
for _ in range(iterations): # Move down
output[1:] |= output[:n-1]
return output
def binary_erosion(img, iterations=1):
""" Erodes a mask with a square structuring element """
output = np.copy(img)
n, m = img.shape[:2]
for _ in range(iterations): # Move left
output[:,:m-1] &= output[:,1:]
for _ in range(iterations): # Move right
output[:,1:] &= output[:,:m-1]
for _ in range(iterations): # Move up
output[:n-1] &= output[1:]
for _ in range(iterations): # Move down
output[1:] &= output[:n-1]
return output
def gray_dilation(img, iterations=1):
""" Dilates a grayscale image with a square structuring element """
if len(img.shape) == 3:
output = np.max(img, axis=2)
else:
output = np.copy(img)
n, m = img.shape[:2]
for _ in range(iterations): # Move left
np.maximum(output[:,:m-1], output[:,1:], output[:,:m-1])
for _ in range(iterations): # Move right
np.maximum(output[:,1:], output[:,:m-1], output[:,1:])
for _ in range(iterations): # Move up
np.maximum(output[:n-1], output[1:], output[:n-1])
for _ in range(iterations): # Move down
np.maximum(output[1:], output[:n-1], output[1:])
return gray2rgb(output)
def gray_erosion(img, iterations=1):
""" Erodes a grayscale image with a square structuring element """
if len(img.shape) == 3:
output = np.max(img, axis=2)
else:
output = np.copy(img)
n, m = img.shape[:2]
for _ in range(iterations): # Move left
np.minimum(output[:,:m-1], output[:,1:], output[:,:m-1])
for _ in range(iterations): # Move right
np.minimum(output[:,1:], output[:,:m-1], output[:,1:])
for _ in range(iterations): # Move up
np.minimum(output[:n-1], output[1:], output[:n-1])
for _ in range(iterations): # Move down
np.minimum(output[1:], output[:n-1], output[1:])
return gray2rgb(output)
def gray_opening(img, size=1):
""" Computes the opening operation on a grayscale image """
return gray_dilation(gray_erosion(img, iterations=size), iterations=size)
def gray_closing(img, size=1):
""" Computes the closing operation on a grayscale image """
return gray_erosion(gray_dilation(img, iterations=size), iterations=size)
def white_tophat(img, size=1):
""" Applies a white-tophat transform to an image """
return img - gray_opening(img, size=size)
def black_tophat(img, size=1):
""" Applies a black-tophat transform to an image """
return gray_closing(img, size=size) - img
def correlate(A, B, step=1):
""" Correlates image B over image A. Assumes B is normalized """
u, v = B.shape[:2]
n, m = A.shape[:2]
# Padding the input
p1 = u // 2
p2 = v // 2
padded_img = np.pad(A, [(p1, p1), (p2, p2), (0, 0)], mode='constant')
output = np.zeros((n,m)) # Output is same size as input
for r in range(0, n, step):
for c in range(0, m, step):
window = norm(padded_img[r:r+u, c:c+v])
output[r:r+step,c:c+step] = np.vdot(window, B)
return output
def conv2d(img, filter):
""" Convolves a grayscale image with a filter """
k1, k2 = filter.shape[:2]
n, m = img.shape[:2]
if not k1 & k2 & 1:
raise ValueError("Filter should have odd dimensions")
# Padding the input
p1 = k1 // 2
p2 = k2 // 2
padded_img = np.pad(img, [(p1, p1), (p2, p2)], mode='constant')
output = np.zeros(img.shape) # Output is same size as input
for r in range(n):
for c in range(m):
window = padded_img[r:r+k1, c:c+k2]
output[r,c] = np.sum(filter * window, axis=(0,1))
return output
def get_boxes(mask, radius=3, mode="square"):
""" Extracts bounding boxes from a binary mask
'mode' can be wither "square" or "circular" for how neighbors are selected.
The effect of this parameter is more apparent at the edges
"""
# Generate neighbors first
neighbors = []
if mode == "square":
neighbors = [(r,c) for r in range(-radius+1,radius)
for c in range(-radius+1,radius)]
# neighbors = [(-radius, 0), (radius, 0), (0, radius), (0, -radius)]
elif mode == "circular":
neighbors = [(r,c) for r in range(-radius+1,radius)
for c in range(-radius+1,radius)
if r*r + c*r <= radius*radius]
else:
raise ValueError("Unrecognized neighbor mode")
neighbors = np.array(neighbors, dtype=np.int16)
num_neighbors = neighbors.shape[0]
# BFS to find all objects
n, m = mask.shape[:2]
not_visited = mask.astype(np.bool)
queue = np.zeros((mask.size, 2), dtype=np.int16)
i = 0 # End of queue
boxes = []
y1 = n; x1 = m; y2 = 0; x2 = 0 # Initialize bounding box
x = 0; y = 0 # For finding the bounding box
while(not_visited.any()):
# Find a non-zero element as the starting point
queue[0] = np.argwhere(not_visited)[0]
i = 1
y1 = n; x1 = m; y2 = 0; x2 = 0 # Re-initialize bounding box
while i > 0:
i -= 1
y, x = queue[i]
in_bounds = (0 <= x < m) and (0 <= y < n)
# This pixel is set, so propagate
if in_bounds and not_visited[y, x]:
y1 = min(y1, y); x1 = min(x1, x)
y2 = max(y2, y); x2 = max(x2, x)
# not_visited[x:x+radius, y:y+radius] = False # Stop future propagation
not_visited[y, x] = False # Stop future propagation
# Populate queue with neighbors
queue[i:i+num_neighbors] = queue[i] + neighbors
i += num_neighbors
# Save bounding box of this object
boxes.append([int(y1), int(x1), int(y2), int(x2)])
return boxes
def draw_boxes(img, bounding_boxes):
""" Finds and draws red-light bounding boxes, returns the new image """
I = np.copy(img)
# Top, Left, Bottom, Right coords
for t, l, b, r in bounding_boxes:
# Clear red and green (add value for brightness)
I[t:b,l,0:2] = 90 # left wall
I[t:b,r,0:2] = 90 # right wall
I[t,l:r,0:2] = 90 # top wall
I[b,l:r,0:2] = 90 # bottom wall
# Color in blue
I[t:b,l,2] = 255 # left wall
I[t:b,r,2] = 255 # right wall
I[t,l:r,2] = 255 # top wall
I[b,l:r,2] = 255 # bottom wall
return I
def load_filters(lights_path):
orig_filters = []
filters = []
for f_name in os.listdir(lights_path):
filt_img = np.asarray(Image.open(os.path.join(lights_path,f_name)))
filt_img = histogram_equalization(filt_img, clip_limit=2)
orig_filters.append(filt_img.astype(np.uint8))
filters.append(norm(filt_img))
# Generate compound img
max_width = max([x.shape[1] for x in orig_filters])
compound_filter = np.concatenate(
[np.pad(x, [(0,10), (max_width - x.shape[1], 0), (0,0)], mode='constant')
for x in orig_filters], 0)
return filters, compound_filter
# https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print() |
class LazyLoadProxy(object):
# Taken from http://code.activestate.com/recipes/496741-object-proxying/
__slots__ = ["_obj_fn", "__weakref__", "__proxy_storage"]
def __init__(self, fn, storage=None):
object.__setattr__(self, "_obj_fn", fn)
object.__setattr__(self, "__proxy_storage", storage)
def __getattribute__(self, name):
return getattr(object.__getattribute__(self, "_obj_fn")(), name)
def __delattr__(self, name):
delattr(object.__getattribute__(self, "_obj_fn")(), name)
def __setattr__(self, name, value):
setattr(object.__getattribute__(self, "_obj_fn")(), name, value)
def __getitem__(self, index):
return object.__getattribute__(self, "_obj_fn")().__getitem__(index)
def __nonzero__(self):
return bool(object.__getattribute__(self, "_obj_fn")())
def __str__(self):
return str(object.__getattribute__(self, "_obj_fn")())
def __repr__(self):
return repr(object.__getattribute__(self, "_obj_fn")())
def __len__(self):
return len(object.__getattribute__(self, "_obj_fn")())
_special_names = [
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
'__eq__', '__float__', '__floordiv__', '__ge__', #'__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
'__imul__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', #'__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__',
'__rand__', '__rdiv__', '__rdivmod__', '__reduce__', '__reduce_ex__',
'__repr__', '__reversed__', '__rfloorfiv__', '__rlshift__', '__rmod__',
'__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__sub__',
'__truediv__', '__xor__', 'next',
]
@classmethod
def _create_class_proxy(cls, theclass):
"""creates a proxy for the given class"""
def make_method(name):
def method(self, *args, **kw):
return getattr(object.__getattribute__(self, "_obj_fn")(), name)(
*args, **kw)
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
def __new__(cls, obj, *args, **kwargs):
"""
creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
passed to this class' __init__, so deriving classes can define an
__init__ method of their own.
note: _class_proxy_cache is unique per deriving class (each deriving
class must hold its own cache)
"""
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
ins = object.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins
class Proxy(LazyLoadProxy):
# Taken from http://code.activestate.com/recipes/496741-object-proxying/
def __init__(self, obj):
super(Proxy, self).__init__(lambda: obj)
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def tools_list(request):
#return HttpResponse('tools')
return render(request, 'tools/tools_list.html')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.