content stringlengths 5 1.05M |
|---|
from abc import ABCMeta, abstractmethod
from scipy.stats import f_oneway
from scipy.stats import kruskal
from scipy.stats import levene
from scipy.stats import fligner
from scipy.stats import bartlett
from oeda.log import error
from oeda.analysis import Analysis
class NSampleTest(Analysis):
def run(self, data, knobs):
if len(data) < 2:
error("Cannot run " + self.name + " on less than two samples.")
return False
self.y = [[d for d in data[i]] for i in range(len(self.stage_ids))]
return True
class DifferentDistributionsTest(NSampleTest):
__metaclass__ = ABCMeta
def __init__(self, stage_ids, y_key, alpha=0.05):
super(DifferentDistributionsTest, self).__init__(stage_ids, y_key)
self.alpha = alpha
def run(self, data, knobs):
if not super(DifferentDistributionsTest, self).run(data, knobs):
error("Aborting analysis.")
return
statistic, pvalue = self.get_statistic_and_pvalue(self.y)
different_distributions = bool(pvalue <= self.alpha)
result = dict()
result["statistic"] = statistic
result["pvalue"] = pvalue
result["alpha"] = self.alpha
result["different_distributions"] = different_distributions
return result
@abstractmethod
def get_statistic_and_pvalue(self, args):
""" Specific to each different-distribution test """
pass
class OneWayAnova(DifferentDistributionsTest):
"""Tests the null hypothesis that two or more groups have the same population mean.
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis although with some loss of power.
"""
name = "one-way-anova"
def get_statistic_and_pvalue(self, args):
return f_oneway(*args)
class KruskalWallis(DifferentDistributionsTest):
"""Tests the null hypothesis that the population median of all of the groups are equal.
It is a non-parametric version of one-way ANOVA.
"""
name = "kruskal-wallis"
def get_statistic_and_pvalue(self, args):
return kruskal(*args)
class EqualVarianceTest(NSampleTest):
__metaclass__ = ABCMeta
def __init__(self, stage_ids, y_key, alpha=0.05):
super(EqualVarianceTest, self).__init__(stage_ids, y_key)
self.alpha = alpha
def run(self, data, knobs):
if not super(EqualVarianceTest, self).run(data, knobs):
error("Aborting analysis.")
return
statistic, pvalue = self.get_statistic_and_pvalue(self.y)
not_equal_variance = bool(pvalue <= self.alpha)
result = dict()
result["statistic"] = statistic
result["pvalue"] = pvalue
result["alpha"] = self.alpha
result["not_equal_variance"] = not_equal_variance
return result
@abstractmethod
def get_statistic_and_pvalue(self, args):
""" Specific to each different-distribution test """
pass
class Levene(EqualVarianceTest):
"""Tests the null hypothesis that all input samples are from populations with equal variances.
It is a parametric test with robustness w.r.t to deviations from normality.
"""
name = "levene"
def get_statistic_and_pvalue(self, y):
return levene(*y, center="mean")
class Bartlett(EqualVarianceTest):
"""Tests the null hypothesis that all input samples are from populations with equal variances.
It is a parametric test. To be used when samples come from normal populations.
For samples from significantly non-normal populations, Levene's test is more robust.
"""
name = "bartlett"
def get_statistic_and_pvalue(self, y):
return bartlett(*y)
class FlignerKilleen(EqualVarianceTest):
"""Tests the null hypothesis that all input samples are from populations with equal variances.
It is a non-parametric test. It is distribution free when populations are identical.
"""
name = "fligner-killeen"
def get_statistic_and_pvalue(self, y):
return fligner(*y, center="mean")
|
from django.urls import path, re_path
from dashboard import views
from daru_wheel.temp_views import spin
# from daruwheel import views as spinview
app_name = "dashboard"
urlpatterns = [
# Matches any html file
re_path(r"^.*\.html", views.pages, name="pages"),
# The home page
path("", views.index, name="index"),
# path('', views.index, name='index'),
path("deposit_withraw", views.deposit_withraw, name="deposit_withraw"),
path("affiliate", views.affiliate, name="affiliate"),
# path("maps", views.maps, name="maps"),
# path("topo", views.topo, name="topo"),
# path("support", views.support, name="support"),
]
|
# -*- coding: utf-8 -*-
# file: BERT_ASPECT.py
# author: xiangpan <xiangpan.cs@gmail.com>
# Copyright (C) 2019. All Rights Reserved.
import torch
import torch.nn as nn
# TD-BERT
class BERT_ASPECT(nn.Module):
def __init__(self, bert, opt):
super(BERT_ASPECT, self).__init__()
self.bert = bert
self.opt=opt
self.max_pool= nn.MaxPool1d(1)
self.dropout = nn.Dropout(opt.dropout)
self.dense = nn.Linear(opt.bert_dim, opt.polarities_dim)
def forward(self, inputs):
bert_aspect_indices,bert_aspect_segments_ids,aspect_in_text,aspect_len = inputs[0],inputs[1],inputs[2],inputs[3]
# bert_aspect_indices,bert_aspect_segments_ids= inputs[0],inputs[1]
# print(aspect_in_text[0])
word_output, pooled_output = self.bert(bert_aspect_indices, bert_aspect_segments_ids)
all_polled_eb=[]
for i in range(len(word_output)):
aspect_eb_i=word_output[i].index_select(0,torch.LongTensor(range(aspect_in_text[i][0].item()+1,aspect_in_text[i][0].item()+1+aspect_len[i])).to(self.opt.device))
aspect_eb_i=aspect_eb_i.unsqueeze(0)
max_pooled=self.max_pool(aspect_eb_i)
max_pooled=max_pooled.squeeze(0)
all_polled_eb.append(max_pooled[0])
pooled_output = torch.stack(all_polled_eb,0)
pooled_output = self.dropout(pooled_output)
logits = self.dense(pooled_output)
return logits
|
"""AR-specific Form helpers."""
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.translation import gettext_lazy as _
from stdnum.ar import cbu
from stdnum.exceptions import InvalidLength, InvalidChecksum, ValidationError as StdnumValidationError
from .ar_provinces import PROVINCE_CHOICES
class ARProvinceSelect(Select):
"""A Select widget that uses a list of Argentinean provinces/autonomous cities as its choices."""
def __init__(self, attrs=None):
super().__init__(attrs, choices=PROVINCE_CHOICES)
class ARPostalCodeField(RegexField):
"""
A field that accepts a 'classic' NNNN Postal Code or a CPA.
See:
* http://www.correoargentino.com.ar/cpa/que_es
* http://www.correoargentino.com.ar/cpa/como_escribirlo
"""
default_error_messages = {
'invalid': _("Enter a postal code in the format NNNN or ANNNNAAA."),
}
def __init__(self, max_length=8, min_length=4, **kwargs):
super().__init__(
r'^\d{4}$|^[A-HJ-NP-Za-hj-np-z]\d{4}\D{3}$',
max_length=max_length, min_length=min_length, **kwargs
)
def clean(self, value):
value = super().clean(value)
if value in self.empty_values:
return self.empty_value
if len(value) not in (4, 8):
raise ValidationError(self.error_messages['invalid'], code='invalid')
if len(value) == 8:
return '%s%s%s' % (value[0].upper(), value[1:5], value[5:].upper())
return value
class ARDNIField(CharField):
"""A field that validates 'Documento Nacional de Identidad' (DNI) numbers."""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 7 or 8 digits."),
}
def __init__(self, max_length=10, min_length=7, **kwargs):
super().__init__(max_length=max_length, min_length=min_length, **kwargs)
def clean(self, value):
"""Value can be a string either in the [X]X.XXX.XXX or [X]XXXXXXX formats."""
value = super().clean(value)
if value in self.empty_values:
return self.empty_value
if not value.isdigit():
value = value.replace('.', '')
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'], code='invalid')
if len(value) not in (7, 8):
raise ValidationError(self.error_messages['max_digits'], code='max_digits')
return value
class ARCUITField(RegexField):
"""
This field validates a CUIT (Código Único de Identificación Tributaria).
A CUIT is of the form XX-XXXXXXXX-V. The last digit is a check digit.
More info:
http://es.wikipedia.org/wiki/Clave_%C3%9Anica_de_Identificaci%C3%B3n_Tributaria
Info in English:
http://www.justlanded.com/english/Argentina/Argentina-Guide/Visas-Permits/Other-Legal-Documents
.. versionchanged:: 2.1
``ARCUITField`` now also accepts CUIT with prefix 34.
"""
default_error_messages = {
'invalid': _('Enter a valid CUIT in XX-XXXXXXXX-X or XXXXXXXXXXXX format.'),
'checksum': _("Invalid CUIT."),
'legal_type': _('Invalid legal type. Type must be 27, 20, 30, 23, 24, 33 or 34.'),
}
def __init__(self, **kwargs):
super().__init__(r'^\d{2}-?\d{8}-?\d$', **kwargs)
def clean(self, value):
"""Value can be either a string in the format XX-XXXXXXXX-X or an 11-digit number."""
value = super().clean(value)
if value in self.empty_values:
return self.empty_value
value, cd = self._canon(value)
if not value[:2] in ['27', '20', '30', '23', '24', '33', '34']:
raise ValidationError(self.error_messages['legal_type'], code='legal_type')
if self._calc_cd(value) != cd:
raise ValidationError(self.error_messages['checksum'], code='checksum')
return self._format(value, cd)
def _canon(self, cuit):
cuit = cuit.replace('-', '')
return cuit[:-1], cuit[-1]
def _calc_cd(self, cuit):
# Calculation code based on:
# http://es.wikipedia.org/wiki/C%C3%B3digo_%C3%9Anico_de_Identificaci%C3%B3n_Tributaria
mults = (5, 4, 3, 2, 7, 6, 5, 4, 3, 2)
tmp = sum([m * int(cuit[idx]) for idx, m in enumerate(mults)])
result = 11 - (tmp % 11)
if result == 11:
result = 0
elif result == 10:
result = 9
return str(result)
def _format(self, cuit, check_digit=None):
if check_digit is None:
check_digit = cuit[-1]
cuit = cuit[:-1]
return '%s-%s-%s' % (cuit[:2], cuit[2:], check_digit)
class ARCBUField(CharField):
"""
This field validates a CBU (Clave Bancaria Uniforme).
A CBU is a 22-digits long number. The first 8 digits denote bank and branch number,
plus a verifying digit. The remaining 14 digits denote an account number, plus a verifying digit.
More info:
https://es.wikipedia.org/wiki/Clave_Bancaria_Uniforme
.. versionadded:: 1.3
.. versionchanged:: 3.0
"""
default_error_messages = {
'invalid': _('Enter a valid CBU in XXXXXXXXXXXXXXXXXXXXXX format.'),
'max_length': _('CBU must be exactly 22 digits long.'),
'min_length': _('CBU must be exactly 22 digits long.'),
'checksum': _('Invalid CBU.'),
}
def clean(self, value):
"""Value must be a 22 digits long number."""
value = super().clean(value)
if value in self.empty_values:
return self.empty_value
try:
return cbu.validate(value)
except InvalidLength:
raise ValidationError(self.error_messages['max_length'], code='max_length')
except InvalidChecksum:
raise ValidationError(self.error_messages['checksum'], code='checksum')
except StdnumValidationError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
|
import matplotlib.pyplot as plt
import re
with open('logs_baseline.txt', 'r') as f:
data = f.read()
train_loss = re.findall(r" loss: (0\.\d+)", data)
val_loss = re.findall(r" val_loss: (0\.\d+)", data)
train_loss = list(map(lambda x : float(x), train_loss))[:48]
val_loss = list(map(lambda x : float(x), val_loss))[:48]
# with open('logs_1_nolr.txt', 'r') as f:
# data = f.read()
#
# train_loss_nolr = re.findall(r" loss: (0\.\d+)", data)
# val_loss_nolr = re.findall(r" val_loss: (0\.\d+)", data)
#
# train_loss_nolr = list(map(lambda x : float(x), train_loss_nolr))
# val_loss_nolr = list(map(lambda x : float(x), val_loss_nolr))
with open('logs_vgg.txt', 'r') as f:
data = f.read()
train_loss_vgg = re.findall(r" loss: (0\.\d+)", data)
val_loss_vgg = re.findall(r" val_loss: (0\.\d+)", data)
train_loss_vgg = list(map(lambda x : float(x), train_loss_vgg))[:48]
val_loss_vgg = list(map(lambda x : float(x), val_loss_vgg))[:48]
plt.plot(train_loss, "r--", color="r", label='Train Loss')
plt.plot(val_loss, "r--", color="b", label='Val Loss')
plt.plot(train_loss_vgg, color="g", label='Train Loss pretrained vgg')
plt.plot(val_loss_vgg, color="m", label='Val Loss pretrained vgg')
# plt.plot(train_loss_nolr, color="g", label='Train Loss fixed lr')
# plt.plot(val_loss_nolr, color="m", label='Val Loss fixed lr')
plt.title("Log Loss")
plt.legend()
plt.show() |
# import icevision.models.rcnn.backbones as backbones
from icevision.models.rcnn import backbones
from icevision.models.rcnn.loss_fn import *
from icevision.models.rcnn.mask_rcnn.dataloaders import *
from icevision.models.rcnn.mask_rcnn.model import *
from icevision.models.rcnn.mask_rcnn.prediction import *
from icevision.models.rcnn.mask_rcnn.show_results import *
# Soft dependencies
from icevision.soft_dependencies import SoftDependencies
if SoftDependencies.fastai:
import icevision.models.rcnn.mask_rcnn.fastai
if SoftDependencies.pytorch_lightning:
import icevision.models.rcnn.mask_rcnn.lightning
|
# MQTT-to-Kinesis Bridge
from __future__ import print_function
import json
import argparse
import boto
import paho.mqtt.client as paho
from argparse import RawTextHelpFormatter
from boto.kinesis.exceptions import ResourceNotFoundException
# To preclude inclusion of aws keys into this code, you may temporarily add
# your AWS credentials to the file:
# ~/.boto
# as follows:
# [Credentials]
# aws_access_key_id = <your access key>
# aws_secret_access_key = <your secret key>
def get_stream(stream_name):
stream = None
try:
stream = kinesis.describe_stream(stream_name)
print(json.dumps(stream, sort_keys=True, indent=2,
separators=(',', ': ')))
except ResourceNotFoundException as rnfe:
print('Could not find ACTIVE stream:{0} error:{1}'.format(
stream_name, rnfe.message))
return stream
def sum_posts(kinesis_actors):
"""Sum all posts across an array of KinesisPosters
"""
total_records = 0
for actor in kinesis_actors:
total_records += actor.total_records
return total_records
class MQTTKinesisBridge(object):
"""A Bridge that subscribes to a topic and repeatedly posts messages
as records to shards in the given Kinesis stream. Each record will post
to the Kinesis stream with the topic_name as the stream partition key.
"""
def __init__(self, mqtt_host, kinesis_stream, mqtt_port=1883,
mqtt_keepalive=60, mqtt_bind_address='', mqtt_topic_name='#',
quiet=False):
self.mqtt_host = mqtt_host
self.mqtt_port = mqtt_port
self.mqtt_keepalive = mqtt_keepalive
self.mqtt_bind_address = mqtt_bind_address
self.mqtt_topic_name = mqtt_topic_name
self.client = paho.Client()
self.client.on_message = self.on_message
self.client.on_connect = self.on_connect
self._pending_records = []
self.stream_name = kinesis_stream
self.quiet = quiet
self.sleep_interval = 5
self.total_records = 0
def add_records(self, records):
""" Add given records to the Poster's pending records list.
"""
self._pending_records.extend(records)
def put_all_records(self, partition_key='mqttkb'):
"""Put all pending records in the Kinesis stream."""
precs = self._pending_records
self._pending_records = []
self.put_records(precs, partition_key)
self.total_records += len(precs)
return len(precs)
def put_records(self, records, partition_key):
"""Put the given records in the Kinesis stream."""
for record in records:
response = kinesis.put_record(
stream_name=self.stream_name,
data=record, partition_key=partition_key)
if self.quiet is False:
print("-= put seqNum:", response['SequenceNumber'])
def connect(self):
print("Starting MQTT-to-Kinesis bridge")
self.client.connect_async(host=self.mqtt_host,
port=self.mqtt_port,
keepalive=self.mqtt_keepalive,
bind_address=self.mqtt_bind_address)
def on_message(self, mqttc, userdata, msg):
print('on_message topic: "{0}" msg.payload: "{1}"'.format(
msg.topic,
msg.payload)
)
self.add_records(records=[msg.payload])
self.put_all_records(partition_key=msg.topic)
def on_connect(self, mqttc, userdata, flags, msg):
rc = mqttc.subscribe(self.mqtt_topic_name, 0)
print('Connection Msg: '.format(msg))
print('Subscribe topic: {0} RC: {1}'.format(self.mqtt_topic_name, rc))
if __name__ == '__main__':
import platform
parser = argparse.ArgumentParser(
description='''Bridge a MQTT Broker to a Kinesis stream. All messages
on a particular topic will be sent downstream as records.''',
formatter_class=RawTextHelpFormatter)
parser.add_argument('stream_name',
help='''the name of the Kinesis stream to connect''')
parser.add_argument('--host_name', default='localhost',
help='''the name of the MQTT host to connect [default: 'localhost']''')
parser.add_argument('--topic_name', default='mqttkb/+',
help='''the name of the MQTT topic to connect [default: 'mqttkb/+']''')
parser.add_argument('--region', default='us-east-1',
help='''the region of your Kinesis Stream [default: 'us-east-1']''')
args = parser.parse_args()
kinesis = boto.kinesis.connect_to_region(args.region)
# add specific user agent for request tracking
boto.UserAgent = 'mqttkb Boto/{0} Python/{1} {2}/{3}'.format(
boto.__version__,
platform.python_version(),
platform.system(),
platform.release()
)
kinesis_stream = get_stream(args.stream_name)
bridge = MQTTKinesisBridge(
mqtt_host=args.host_name,
mqtt_topic_name=args.topic_name,
kinesis_stream=args.stream_name
)
bridge.connect()
print('Bridge Connected, looping...')
bridge.client.loop_forever()
|
# -*- coding: utf-8 -*-
# Disable while we have Python 2.x compatability
# pylint: disable=useless-object-inheritance
"""Access to the Music Library.
The Music Library is the collection of music stored on your local network.
For access to third party music streaming services, see the
`music_service` module."""
from __future__ import unicode_literals
import logging
import xmltodict
from . import discovery
from .data_structures import SearchResult, DidlResource, DidlObject, DidlMusicAlbum
from .data_structures_entry import from_didl_string
from .exceptions import SoCoUPnPException
from .utils import url_escape_path, really_unicode, camel_to_underscore
from .compat import quote_url
_LOG = logging.getLogger(__name__)
class MusicLibrary(object):
"""The Music Library."""
# Key words used when performing searches
SEARCH_TRANSLATION = {
"artists": "A:ARTIST",
"album_artists": "A:ALBUMARTIST",
"albums": "A:ALBUM",
"genres": "A:GENRE",
"composers": "A:COMPOSER",
"tracks": "A:TRACKS",
"playlists": "A:PLAYLISTS",
"share": "S:",
"sonos_playlists": "SQ:",
"categories": "A:",
"sonos_favorites": "FV:2",
"radio_stations": "R:0/0",
"radio_shows": "R:0/1",
}
# pylint: disable=invalid-name, protected-access
def __init__(self, soco=None):
"""
Args:
soco (`SoCo`, optional): A `SoCo` instance to query for music
library information. If `None`, or not supplied, a random
`SoCo` instance will be used.
"""
self.soco = soco if soco is not None else discovery.any_soco()
self.contentDirectory = self.soco.contentDirectory
def build_album_art_full_uri(self, url):
"""Ensure an Album Art URI is an absolute URI.
Args:
url (str): the album art URI.
Returns:
str: An absolute URI.
"""
# Add on the full album art link, as the URI version
# does not include the ipaddress
if not url.startswith(("http:", "https:")):
url = "http://" + self.soco.ip_address + ":1400" + url
return url
def _update_album_art_to_full_uri(self, item):
"""Update an item's Album Art URI to be an absolute URI.
Args:
item: The item to update the URI for
"""
if getattr(item, "album_art_uri", False):
item.album_art_uri = self.build_album_art_full_uri(item.album_art_uri)
def get_artists(self, *args, **kwargs):
"""Convenience method for `get_music_library_formation`
with ``search_type='artists'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(["artists"] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_album_artists(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='album_artists'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(["album_artists"] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_albums(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='albums'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(["albums"] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_genres(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='genres'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(["genres"] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_composers(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='composers'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(["composers"] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_tracks(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='tracks'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(["tracks"] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_playlists(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='playlists'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
Note:
The playlists that are referred to here are the playlists imported
from the music library, they are not the Sonos playlists.
"""
args = tuple(["playlists"] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_sonos_favorites(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='sonos_favorites'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(["sonos_favorites"] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_favorite_radio_stations(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='radio_stations'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(["radio_stations"] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_favorite_radio_shows(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='radio_stations'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(["radio_shows"] + list(args))
return self.get_music_library_information(*args, **kwargs)
# pylint: disable=too-many-locals, too-many-arguments, too-many-branches
def get_music_library_information(
self,
search_type,
start=0,
max_items=100,
full_album_art_uri=False,
search_term=None,
subcategories=None,
complete_result=False,
):
"""Retrieve music information objects from the music library.
This method is the main method to get music information items, like
e.g. tracks, albums etc., from the music library with. It can be used
in a few different ways:
The ``search_term`` argument performs a fuzzy search on that string in
the results, so e.g calling::
get_music_library_information('artists', search_term='Metallica')
will perform a fuzzy search for the term 'Metallica' among all the
artists.
Using the ``subcategories`` argument, will jump directly into that
subcategory of the search and return results from there. So. e.g
knowing that among the artist is one called 'Metallica', calling::
get_music_library_information('artists',
subcategories=['Metallica'])
will jump directly into the 'Metallica' sub category and return the
albums associated with Metallica and::
get_music_library_information('artists',
subcategories=['Metallica', 'Black'])
will return the tracks of the album 'Black' by the artist 'Metallica'.
The order of sub category types is: Genres->Artists->Albums->Tracks.
It is also possible to combine the two, to perform a fuzzy search in a
sub category.
The ``start``, ``max_items`` and ``complete_result`` arguments all
have to do with paging of the results. By default the searches are
always paged, because there is a limit to how many items we can get at
a time. This paging is exposed to the user with the ``start`` and
``max_items`` arguments. So calling::
get_music_library_information('artists', start=0, max_items=100)
get_music_library_information('artists', start=100, max_items=100)
will get the first and next 100 items, respectively. It is also
possible to ask for all the elements at once::
get_music_library_information('artists', complete_result=True)
This will perform the paging internally and simply return all the
items.
Args:
search_type (str):
The kind of information to retrieve. Can be one of:
``'artists'``, ``'album_artists'``, ``'albums'``,
``'genres'``, ``'composers'``, ``'tracks'``, ``'share'``,
``'sonos_playlists'``, or ``'playlists'``, where playlists
are the imported playlists from the music library.
start (int, optional): starting number of returned matches
(zero based). Default 0.
max_items (int, optional): Maximum number of returned matches.
Default 100.
full_album_art_uri (bool):
whether the album art URI should be absolute (i.e. including
the IP address). Default `False`.
search_term (str, optional):
a string that will be used to perform a fuzzy search among the
search results. If used in combination with subcategories,
the fuzzy search will be performed in the subcategory.
subcategories (str, optional):
A list of strings that indicate one or more subcategories to
dive into.
complete_result (bool): if `True`, will disable
paging (ignore ``start`` and ``max_items``) and return all
results for the search.
Warning:
Getting e.g. all the tracks in a large collection might
take some time.
Returns:
`SearchResult`: an instance of `SearchResult`.
Note:
* The maximum numer of results may be restricted by the unit,
presumably due to transfer size consideration, so check the
returned number against that requested.
* The playlists that are returned with the ``'playlists'`` search,
are the playlists imported from the music library, they
are not the Sonos playlists.
Raises:
`SoCoException` upon errors.
"""
search = self.SEARCH_TRANSLATION[search_type]
# Add sub categories
# sub categories are not allowed when searching shares
if subcategories is not None and search_type != "share":
for category in subcategories:
search += "/" + url_escape_path(really_unicode(category))
# Add fuzzy search
if search_term is not None:
if search_type == "share":
# Don't insert ":" and don't escape "/" (so can't use url_escape_path)
search += quote_url(really_unicode(search_term).encode("utf-8"))
else:
search += ":" + url_escape_path(really_unicode(search_term))
item_list = []
metadata = {"total_matches": 100000}
while len(item_list) < metadata["total_matches"]:
# Change start and max for complete searches
if complete_result:
start, max_items = len(item_list), 100000
# Try and get this batch of results
try:
response, metadata = self._music_lib_search(search, start, max_items)
except SoCoUPnPException as exception:
# 'No such object' UPnP errors
if exception.error_code == "701":
return SearchResult([], search_type, 0, 0, None)
else:
raise exception
# Parse the results
items = from_didl_string(response["Result"])
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self._update_album_art_to_full_uri(item)
# Append the item to the list
item_list.append(item)
# If we are not after the complete results, the stop after 1
# iteration
if not complete_result:
break
metadata["search_type"] = search_type
if complete_result:
metadata["number_returned"] = len(item_list)
# pylint: disable=star-args
return SearchResult(item_list, **metadata)
def browse(
self,
ml_item=None,
start=0,
max_items=100,
full_album_art_uri=False,
search_term=None,
subcategories=None,
):
"""Browse (get sub-elements from) a music library item.
Args:
ml_item (`DidlItem`): the item to browse, if left out or
`None`, items at the root level will be searched.
start (int): the starting index of the results.
max_items (int): the maximum number of items to return.
full_album_art_uri (bool): whether the album art URI should be
fully qualified with the relevant IP address.
search_term (str): A string that will be used to perform a fuzzy
search among the search results. If used in combination with
subcategories, the fuzzy search will be performed on the
subcategory. Note: Searching will not work if ``ml_item`` is
`None`.
subcategories (list): A list of strings that indicate one or more
subcategories to descend into. Note: Providing sub categories
will not work if ``ml_item`` is `None`.
Returns:
A `SearchResult` instance.
Raises:
AttributeError: if ``ml_item`` has no ``item_id`` attribute.
SoCoUPnPException: with ``error_code='701'`` if the item cannot be
browsed.
"""
if ml_item is None:
search = "A:"
else:
search = ml_item.item_id
# Add sub categories
if subcategories is not None:
for category in subcategories:
search += "/" + url_escape_path(really_unicode(category))
# Add fuzzy search
if search_term is not None:
search += ":" + url_escape_path(really_unicode(search_term))
try:
response, metadata = self._music_lib_search(search, start, max_items)
except SoCoUPnPException as exception:
# 'No such object' UPnP errors
if exception.error_code == "701":
return SearchResult([], "browse", 0, 0, None)
else:
raise exception
metadata["search_type"] = "browse"
# Parse the results
containers = from_didl_string(response["Result"])
item_list = []
for container in containers:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self._update_album_art_to_full_uri(container)
item_list.append(container)
# pylint: disable=star-args
return SearchResult(item_list, **metadata)
def browse_by_idstring(
self, search_type, idstring, start=0, max_items=100, full_album_art_uri=False
):
"""Browse (get sub-elements from) a given music library item,
specified by a string.
Args:
search_type (str): The kind of information to retrieve. Can be
one of: ``'artists'``, ``'album_artists'``, ``'albums'``,
``'genres'``, ``'composers'``, ``'tracks'``, ``'share'``,
``'sonos_playlists'``, and ``'playlists'``, where
playlists are the imported file based playlists from the
music library.
idstring (str): a term to search for.
start (int): starting number of returned matches. Default 0.
max_items (int): Maximum number of returned matches. Default 100.
full_album_art_uri (bool): whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
`SearchResult`: a `SearchResult` instance.
Note:
The maximum numer of results may be restricted by the unit,
presumably due to transfer size consideration, so check the
returned number against that requested.
"""
search = self.SEARCH_TRANSLATION[search_type]
# Check if the string ID already has the type, if so we do not want to
# add one also Imported playlist have a full path to them, so they do
# not require the A:PLAYLISTS part first
if idstring.startswith(search) or (search_type == "playlists"):
search = ""
search_item_id = search + idstring
search_uri = "#" + search_item_id
# Not sure about the res protocol. But this seems to work
res = [DidlResource(uri=search_uri, protocol_info="x-rincon-playlist:*:*:*")]
search_item = DidlObject(
resources=res, title="", parent_id="", item_id=search_item_id
)
# Call the base version
return self.browse(search_item, start, max_items, full_album_art_uri)
def _music_lib_search(self, search, start, max_items):
"""Perform a music library search and extract search numbers.
You can get an overview of all the relevant search prefixes (like
'A:') and their meaning with the request:
.. code ::
response = device.contentDirectory.Browse([
('ObjectID', '0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 100),
('SortCriteria', '')
])
Args:
search (str): The ID to search.
start (int): The index of the forst item to return.
max_items (int): The maximum number of items to return.
Returns:
tuple: (response, metadata) where response is the returned metadata
and metadata is a dict with the 'number_returned',
'total_matches' and 'update_id' integers
"""
response = self.contentDirectory.Browse(
[
("ObjectID", search),
("BrowseFlag", "BrowseDirectChildren"),
("Filter", "*"),
("StartingIndex", start),
("RequestedCount", max_items),
("SortCriteria", ""),
]
)
# Get result information
metadata = {}
for tag in ["NumberReturned", "TotalMatches", "UpdateID"]:
metadata[camel_to_underscore(tag)] = int(response[tag])
return response, metadata
@property
def library_updating(self):
"""bool: whether the music library is in the process of being updated.
"""
result = self.contentDirectory.GetShareIndexInProgress()
return result["IsIndexing"] != "0"
def start_library_update(self, album_artist_display_option=""):
"""Start an update of the music library.
Args:
album_artist_display_option (str): a value for the album artist
compilation setting (see `album_artist_display_option`).
"""
return self.contentDirectory.RefreshShareIndex(
[("AlbumArtistDisplayOption", album_artist_display_option),]
)
def search_track(self, artist, album=None, track=None, full_album_art_uri=False):
"""Search for an artist, an artist's albums, or specific track.
Args:
artist (str): an artist's name.
album (str, optional): an album name. Default `None`.
track (str, optional): a track name. Default `None`.
full_album_art_uri (bool): whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
A `SearchResult` instance.
"""
subcategories = [artist]
subcategories.append(album or "")
# Perform the search
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
search_term=track,
complete_result=True,
)
result._metadata["search_type"] = "search_track"
return result
def get_albums_for_artist(self, artist, full_album_art_uri=False):
"""Get an artist's albums.
Args:
artist (str): an artist's name.
full_album_art_uri: whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
A `SearchResult` instance.
"""
subcategories = [artist]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True,
)
reduced = [item for item in result if item.__class__ == DidlMusicAlbum]
# It is necessary to update the list of items in two places, due to
# a bug in SearchResult
result[:] = reduced
result._metadata.update(
{
"item_list": reduced,
"search_type": "albums_for_artist",
"number_returned": len(reduced),
"total_matches": len(reduced),
}
)
return result
def get_tracks_for_album(self, artist, album, full_album_art_uri=False):
"""Get the tracks of an artist's album.
Args:
artist (str): an artist's name.
album (str): an album name.
full_album_art_uri: whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
A `SearchResult` instance.
"""
subcategories = [artist, album]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True,
)
result._metadata["search_type"] = "tracks_for_album"
return result
@property
def album_artist_display_option(self):
"""str: The current value of the album artist compilation setting.
Possible values are:
* ``'WMP'`` - use Album Artists
* ``'ITUNES'`` - use iTunes® Compilations
* ``'NONE'`` - do not group compilations
See Also:
The Sonos `FAQ <https://sonos.custhelp.com
/app/answers/detail/a_id/3056/kw/artist%20compilation>`_ on
compilation albums.
To change the current setting, call `start_library_update` and
pass the new setting.
"""
result = self.contentDirectory.GetAlbumArtistDisplayOption()
return result["AlbumArtistDisplayOption"]
def list_library_shares(self):
"""Return a list of the music library shares.
Returns:
list: The music library shares, which are strings of the form
``'//hostname_or_IP/share_path'``.
"""
response = self.contentDirectory.Browse(
[
("ObjectID", "S:"),
("BrowseFlag", "BrowseDirectChildren"),
("Filter", "*"),
("StartingIndex", "0"),
("RequestedCount", "100"),
("SortCriteria", ""),
]
)
shares = []
matches = response["TotalMatches"]
# Zero matches
if matches == "0":
return shares
xml_dict = xmltodict.parse(response["Result"])
unpacked = xml_dict["DIDL-Lite"]["container"]
# One match
if matches == "1":
shares.append(unpacked["dc:title"])
return shares
# Otherwise it's multiple matches
for share in unpacked:
shares.append(share["dc:title"])
return shares
def delete_library_share(self, share_name):
"""Delete a music library share.
Args:
share_name (str): the name of the share to be deleted, which
should be of the form ``'//hostname_or_IP/share_path'``.
:raises: `SoCoUPnPException`
"""
# share_name must be prefixed with 'S:'
self.contentDirectory.DestroyObject([("ObjectID", "S:" + share_name)])
|
# coding: utf-8
import sys
from setuptools import setup, find_packages
NAME = "openapi_server"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"connexion>=2.0.2",
"swagger-ui-bundle>=0.0.2",
"python_dateutil>=2.6.0"
]
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name=NAME,
version=VERSION,
description="GraphSense API",
author_email="",
keywords=["OpenAPI", "GraphSense API"],
python_requires='>=3.6',
install_requires=REQUIRES,
packages=find_packages(),
package_data={'': ['openapi/openapi.yaml']},
include_package_data=True,
entry_points={
'console_scripts': ['openapi_server=openapi_server.__main__:main']},
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/graphsense/graphsense-REST/',
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.conf import settings
from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .tokens import account_activation_token
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
from django.views import View
from mails.models import SentMail, UserExtended
import random
import string
from accounts.forms import (
LoginForm,
UserRegistrationForm,
ResetPassword,
VolunteerUserRegistrationForm,
UserEditProfileForm
)
def dashboard(request):
if request.user.is_authenticated:
is_admin = request.user.groups.filter(name='admin').exists()
sentMails = (SentMail.objects.order_by('-time')[:10]).values()
return render(request, 'accounts/dashboard.html', {'mails': sentMails, 'is_admin': is_admin, "community": settings.COMMUNITY})
else:
return redirect('login')
def request_reset_password(request):
if request.user.is_authenticated:
user = request.user
return redirect('reset',
str(urlsafe_base64_encode(
force_bytes(request.user.pk)), 'utf-8'),
account_activation_token.make_token(user)
)
else:
return redirect('login')
class EditProfile(View):
'''
If user decides to edit some information of his/her account,
the control is transfered to this function.
'''
def get(self, request):
if request.user.is_authenticated:
is_admin = request.user.groups.filter(name='admin').exists()
form = UserEditProfileForm(user=request.user)
return render(request, 'accounts/forms.html', {
'form_btn_name': 'Edit Information',
'form': form,
'is_admin': is_admin,
"form_page_name": 'Edit Profile',
"community": settings.COMMUNITY})
else:
return redirect('login')
def post(self, request):
if request.user.is_authenticated:
is_admin = request.user.groups.filter(name='admin').exists()
form = UserEditProfileForm(request.POST, user=request.user)
if form.is_valid():
cd = form.cleaned_data
User.objects.filter(pk=request.user.pk).update(
username=cd['username'], email=cd['email'], first_name=cd['first_name'], last_name=cd['last_name'])
form = UserEditProfileForm(user=request.user)
return redirect('dashboard')
return render(request, 'accounts/forms.html', {
'form_btn_name': 'Edit Profile',
'form': form,
'is_admin': is_admin,
"form_page_name": 'Edit Profile',
"community": settings.COMMUNITY})
else:
return redirect('login')
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(
username=cd['username'], password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated successfully')
else:
return HttpResponse('Disabled account')
else:
return HttpResponse('invalid login')
else:
form = LoginForm()
return render(request, 'accounts/login.html', {
'form': form,
"form_page_name": 'Login',
"community": settings.COMMUNITY})
class AccountActivation(View):
'''
AccountActivation class used for activating new user's account.
Once a user clicks on the link from their email. It gives them an oppertunity to set their password.
'''
def get(self, request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
form = ResetPassword()
return render(request, 'accounts/forms.html', {
'form_btn_name': 'Activate Account',
'form': form,
"form_page_name": 'Set Password',
"community": settings.COMMUNITY})
else:
return render(request, 'accounts/messages.html', {"msg_page_name": "Failed", 'message': 'Link is invalid!', "community": settings.COMMUNITY})
def post(self, request, uidb64, token):
form = ResetPassword(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(password=cd['password'])
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.set_password(form.cleaned_data['password'])
user.save()
login(request, user)
return redirect('dashboard')
else:
return render(request, 'accounts/messages.html', {"msg_page_name": "Failed", 'message': 'Link is invalid!', "community": settings.COMMUNITY})
else:
return render(request, 'accounts/forms.html', {
'form_btn_name': 'Activate Account',
'form': form,
"form_page_name": 'Set Password',
"community": settings.COMMUNITY})
class AccountRegistration(View):
'''
AccountRegistration class used for registration of new users.
It sends an email containing set password instructions to user if the form data is valid in post request.
'''
def get(self, request):
form = UserRegistrationForm()
return render(request, 'accounts/forms.html', {
'form_btn_name': 'Register',
'form': form,
"form_page_name": 'Sign-Up',
"community": settings.COMMUNITY})
def post(self, request):
form = UserRegistrationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.set_password(form.cleaned_data['password'])
user.is_active = True
user.save()
userKey = ''.join(random.choice(string.ascii_uppercase +
string.ascii_lowercase + string.digits) for _ in range(25))
UserExtendedSave = UserExtended(
user=user, userKey=userKey)
UserExtendedSave.save()
login(request, user)
return redirect('dashboard')
else:
return render(request, 'accounts/forms.html', {
'form_btn_name': 'Create Account',
'form': form,
"form_page_name": 'Sign Up',
"community": settings.COMMUNITY})
class AccountVolunteerRegister(View):
'''
AccountRegistration class used for registration of new users.
It sends an email containing set password instructions to user if the form data is valid in post request.
'''
def get(self, request):
form = VolunteerUserRegistrationForm()
return render(request, 'accounts/forms.html', {
'form_btn_name': 'Create Account',
'form_btn_name': 'Edit Information',
'form': form, "form_page_name": 'Sign-Up',
"community": settings.COMMUNITY})
def post(self, request):
form = VolunteerUserRegistrationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.set_password(User.objects.make_random_password())
user.is_active = False
user.save()
userKey = ''.join(random.choice(string.ascii_uppercase +
string.ascii_lowercase + string.digits) for _ in range(25))
UserExtendedSave = UserExtended(
user=user, userKey=userKey)
UserExtendedSave.save()
current_site = get_current_site(request)
mail_subject = '[PyDelhi] Please activate your account.'
message = render_to_string('accounts/activate.html', {
'protocol': request.scheme,
'user': user,
'domain': current_site.domain,
'uid': str(urlsafe_base64_encode(force_bytes(user.pk)), 'utf-8'),
'token': account_activation_token.make_token(user),
"community": settings.COMMUNITY,
"unsubscribe": userKey,
})
to_email = form.cleaned_data.get('email')
email = EmailMessage(mail_subject, message, to=[to_email])
email.send()
form = VolunteerUserRegistrationForm()
return render(request, 'accounts/forms.html', {
'form_btn_name': 'Create Account',
'form': form,
"form_page_name": 'Sign-Up',
"community": settings.COMMUNITY})
else:
return render(request, 'accounts/forms.html', {
'form_btn_name': 'Create Account',
'form': form,
"form_page_name": 'Sign Up',
"community": settings.COMMUNITY})
def unsubscribe(request, username, unsubscribe):
try:
user = UserExtended.objects.get(user__username=username)
except UserExtended.DoesNotExist:
return render(request, 'accounts/messages.html', {"msg_page_name": "Failed", 'message': 'You are already unsubscribed, If you think this is an error, please contact the community volunteers.', "community": settings.COMMUNITY})
if unsubscribe == user.userKey:
User.objects.filter(pk=user.user.pk).delete()
return render(request, 'accounts/messages.html', {"msg_page_name": "Success", 'message': 'You have unsubscribed, if you wish to subscribe again, please sign-up again.', "community": settings.COMMUNITY})
else:
return render(request, 'accounts/messages.html', {"msg_page_name": "Failed", 'message': 'Incorrect unsubscribe Key!', "community": settings.COMMUNITY})
def home(request):
if request.user.is_authenticated:
return redirect('dashboard')
return render(request, 'index.html', {"community": settings.COMMUNITY})
|
"""
Class for holding Light Curve Data
"""
from __future__ import absolute_import, print_function, division
from future.utils import with_metaclass
from future.moves.itertools import zip_longest
import abc
from collections import Sequence
import numpy as np
import pandas as pd
from astropy.table import Table
import sncosmo
from .aliases import aliasDictionary
__all__ = ['BaseLightCurve', 'LightCurve']
class BaseLightCurve(with_metaclass(abc.ABCMeta, object)):
"""
Abstract Base Class for Light Curve Data showing methods that need to be
implemented.
"""
@abc.abstractproperty
def props(self):
pass
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractproperty
def lightCurve(self):
"""
`pd.DataFrame` holding the lightCurve information. There can be more
columns, but the following columns are mandatory:
['mjd', 'band', 'flux', 'fluxerr', 'zp', 'zpsys']
"""
pass
@abc.abstractmethod
def snCosmoLC(self, coaddTimes=None):
pass
@abc.abstractmethod
def coaddedLC(self, coaddTimes=None, timeOffset=0., timeStep=1.0, *args, **kwargs):
pass
@abc.abstractmethod
def remap_filters(names, bandNameDict, ignore_case):
pass
@abc.abstractmethod
def missingColumns(self, lcdf):
notFound = self.mandatoryColumns - set(lcdf.columns)
return notFound
@staticmethod
def requiredColumns():
"""
"""
reqd = set(['mjd', 'band', 'flux', 'fluxerr', 'zp', 'zpsys'])
return reqd
@property
def mandatoryColumns(self):
"""
A list of mandatory columns in the light curve dataFrame with
possible aliases in `self.mandatoryColumnAliases`.
mjd : time
band : string
flux : model flux
"""
return self.requiredColumns()
@property
def columnAliases(self):
"""
dictionary that maps standard names as keys to a possible set of
aliases
"""
aliases = {}
aliases['zp'] = ['zp']
aliases['mjd'] = ['time', 'expmjd', 'date']
aliases['zpsys'] = ['magsys']
aliases['band'] = ['filter', 'filtername', 'bandname', 'bands', 'flt']
aliases['flux'] = ['fluxcal']
aliases['fluxerr'] = ['flux_err', 'flux_errs', 'fluxerror', 'fluxcalerr']
return aliases
class LightCurve(BaseLightCurve):
"""
A Class to represent light curve data. Light curve data is often available
with different kinds of column names. This class homogenizes them to a set
of standard names, and allows simple calculations to be based on the same
variable names 'mjd', 'band', 'flux', 'fluxerr', 'zp', 'zpsys' which denote
the time of observation, bandpass of observation, the flux and flux
uncertainty of the observation.
zp represents the zero point to convert the flux value to the phsyical flux
using the zero point system zpsys.
"""
def __init__(self, lcdf, bandNameDict=None, ignore_case=True, propDict=None,
cleanNans=True):
"""
Instantiate Light Curve class
Parameters
----------
lcdf : `pd.DataFrame`, mandatory
light curve information, must contain columns `mjd`, `band`, `flux`,
`flux_err`, `zp`, `zpsys`
bandNameDict : dictionary, optional, default to None
dictionary of the values in the 'band' column or its alias, and
values that it should be mapped to.
ignore_case : bool, optional, defaults to True
ignore the case of the characters in the strings representing
bandpasses
propDict : Dictionary, optional, defaults to None
a dictionary of properties associated with the light curve
cleanNans : Bool, defaults to True
if True, ensures that at the time of returning `snCosmoLC()` objects
which are used in fits, any row that has a `NAN` in it will be
dropped
Example
-------
>>> from analyzeSN import LightCurve
>>> ex_data = sncosmo.load_example_data()
>>> lc = LightCurve(ex_data.to_pandas())
"""
aliases = self.columnAliases
standardNamingDict = aliasDictionary(lcdf.columns, aliases)
if len(standardNamingDict) > 0:
lcdf.rename(columns=standardNamingDict, inplace=True)
missingColumns = self.missingColumns(lcdf)
if len(missingColumns) > 0:
raise ValueError('light curve data has missing columns',
missingColumns)
self.bandNameDict = bandNameDict
self._lightCurve = lcdf
self.ignore_case = ignore_case
self._propDict = propDict
self.cleanNans = cleanNans
@property
def props(self):
return self._propDict
@classmethod
def fromSALTFormat(cls, fname):
_lc = sncosmo.read_lc(fname, format='salt2')
lc = _lc.to_pandas()
lc.MagSys = 'ab'
def filtername(x):
if 'megacam' in x.lower():
return 'megacam'
else:
return x[:-3].lower()
banddict = dict((key.lower(), filtername(key) + key[-1])
for key in lc.Filter.unique())
return cls(lc,
bandNameDict=banddict,
ignore_case=True,
propDict=_lc.meta)
def missingColumns(self, lcdf):
"""
return a set of columns in the light curve dataframe that are missing
from the mandatory set of columns
Parameters
----------
lcdf : `pd.dataFrame`
a light curve represented as a pandas dataframe
"""
notFound = self.mandatoryColumns - set(lcdf.columns)
return notFound
@staticmethod
def remap_filters(name, nameDicts, ignore_case=True):
"""
"""
try:
if ignore_case:
_nameDicts = dict((key.lower(), value)
for (key, value) in nameDicts.items())
return _nameDicts[name.lower()]
else:
return nameDicts[name]
except:
raise NotImplementedError('values for old filter {} not implemented',
name)
@property
def lightCurve(self):
"""
The lightcurve in native format
"""
# light curve
_lc = self._lightCurve.copy()
# return the light curve
_lc.band = _lc.band.apply(lambda x: x.decode())
_lc.band = _lc.band.apply(lambda x: x.strip())
if self.bandNameDict is not None:
_lc.band = _lc.band.apply(lambda x:
self.remap_filters(x, self.bandNameDict,
self.ignore_case))
return _lc
def snCosmoLC(self, coaddTimes=None, mjdBefore=0., minmjd=None):
lc = self.coaddedLC(coaddTimes=coaddTimes, mjdBefore=mjdBefore,
minmjd=minmjd).rename(columns=dict(mjd='time'))
if self.cleanNans:
lc.dropna(inplace=True)
return Table.from_pandas(lc)
@staticmethod
def sanitize_nan(lcs):
"""
.. note:: These methods are meant to be applied to photometric tables
as well
"""
lcs = lcs.copy()
# Stop gap measure to deal with nans
avg_error = lcs.fluxerr.mean(skipna=True)
lcs.fillna(dict(flux=0., fluxerr=avg_error), inplace=True)
return lcs
@staticmethod
def discretize_time(lcs, timeOffset=0., timeStep=1.0):
"""
.. note:: These methods are meant to be applied to photometric tables
as well
"""
lcs['night'] = (lcs.mjd - timeOffset) // timeStep
lcs.night = lcs.night.astype(np.int)
return lcs
@staticmethod
def add_weightedColumns(lcs, avg_cols=('mjd', 'flux', 'fluxerr', 'zp'),
additional_cols=None,
copy=False):
avg_cols = list(tuple(avg_cols))
if additional_cols is not None:
avg_cols += list(additional_cols)
if copy:
lcs = lcs.copy()
if 'weights' not in lcs.columns:
if 'fluxerr' not in lcs.columns:
raise ValueError("Either fluxerr or weights must be a column in the dataFrame")
lcs['weights'] = 1.0 / lcs['fluxerr']**2
for col in avg_cols:
if col != 'fluxerr':
#lcs['weighted_' + col] = lcs[col] * lcs[col] * lcs['weights'] *lcs['weights']
#lcs['weights_squared'] = lcs['weights'] *lcs['weights']
#else:
lcs['weighted_' + col] = lcs[col] *lcs['weights']
return lcs
@staticmethod
def coaddpreprocessed(preProcessedlcs, include_snid=True,
cols=('mjd', 'flux', 'fluxerr', 'zp', 'zpsys'),
additionalAvgCols=None,
additionalColsKept=None,
additionalAggFuncs='first',
keepAll=False,
keepCounts=True):
"""
Parameters
----------
preProcessedlcs :
include_snid :
cols :
additionalAvgCols : list of strings
.. note:: These methods are meant to be applied to photometric tables
as well
"""
grouping = ['band', 'night']
if include_snid:
grouping = ['snid'] + grouping
default_avg_cols = ['mjd', 'flux', 'zp']
avg_cols = default_avg_cols
if additionalAvgCols is not None:
avg_cols += additionalAvgCols
default_add_cols = ['zpsys']
lcs = preProcessedlcs
#lcs = _preprocess(lcs, cols=cols, timeStep=timeStep, timeOffset=timeOffset)
grouped = lcs.groupby(grouping)
aggdict = dict(('weighted_' + col, np.sum) for col in avg_cols)
aggdict['weights'] = np.sum
if keepCounts:
lcs['numExpinCoadd'] = lcs.mjd.copy()
aggdict['numExpinCoadd'] = 'count'
aggdict['zpsys'] = 'first'
# The columns we will finally keep
keptcols = grouping + ['zpsys']
# interpret type of additionalAggFuncs
# Assuming types are
# 1. tuple of aggregate functions (sequence)
# 2. string aggregate functions common to all eg. 'first'
# 3. method aggregate functions common to all eg. np.sum
if additionalColsKept is not None:
aggFuncScalar = True
if isinstance(additionalAggFuncs, basestring):
aggFuncScalar = True
elif isinstance(additionalAggFuncs, Sequence):
aggFuncScalar = False
if len(additionalAggFuncs) != len(additionalColsKept):
raise ValueError('if sequence, length of aggfuncs and additionalColsKept should match')
else:
aggFuncScalar = True
if aggFuncScalar:
newaggs = zip_longest(additionalColsKept, (additionalAggFuncs,),
fillvalue=additionalAggFuncs)
else:
newaggs = zip(additionalColsKept, additionalAggFuncs)
for (col, val) in newaggs:
aggdict[col]=val
# Add these columns to the list keptcols
keptcols += list(additionalColsKept)
x = grouped.agg(aggdict)
weighted_cols = list(col for col in x.reset_index().columns
if (col.startswith('weighted') and col != 'weighted_fluxerr') )
yy = x.reset_index()[weighted_cols].apply(lambda y: y/x.weights.values, axis=0)
yy['weighted_fluxerr_coadded'] = 1.0 / np.sqrt(x.reset_index()['weights'])#/x.reset_index()['weighted_fluxerr']**2)#.apply(lambda y: y/x.weights_squared.values)#/x.weights_squared.values)
yy.rename(columns=dict((col, col.split('_')[1]) for col in yy.columns), inplace=True)
if keepCounts:
keptcols += ['numExpinCoadd']
return x.reset_index()[keptcols].join(yy)
@staticmethod
def summarize(lcdf,
vals=('SNR', 'mjd', 'zp'),
aggfuncs=(max, [max, min], 'count'),
SNRmin=-10000.,
paramsdf=None,
grouping=('snid', 'band'),
summary_prefix='',
prefix_interpret='',
useSNR=True):
"""
summarize a light curve of set of light curves using the functions
`aggfunctions` to aggregate over the values in `vals` over groups
defined by grouping
Parameters
----------
lcdf : `pd.DataFrame`
light curve(s) to aggregate over
vals : tuple of strings
column names to aggregate over
aggfunctions : tuple of functions
tuple of functions to use to aggregate the values in `vals`. Must
have the same length as vals.
paramsdf : `pd.DataFrame`, defaults to None
dataframe with one or more rows of truth parameters indexed by the
snid.
.. note ::
"""
lcdf = lcdf.copy()
noSNID = False
if 'snid' not in lcdf.columns:
lcdf['snid'] = 0
# This hard coding is used later in this function
noSNID = True
raise Warning('SNID not supplied, assuming that all records for a single SN')
# necessary only if we use coadded columns, and recombine with previous
# columns in summary
fluxcol = prefix_interpret + 'flux'
fluxerrcol = prefix_interpret + 'fluxerr'
if 'SNR' not in lcdf.columns and useSNR:
# If useSNR is not False, we need to calculate this quantity
if not (fluxcol in lcdf.columns and fluxerrcol in lcdf.columns):
raise ValueError('The flux and flux error columns cannot be found to calculate SNR', fluxcol, fluxerrcol)
lcdf['SNR'] = lcdf[fluxcol] / lcdf[fluxerrcol]
lcdf = lcdf.query('SNR > @SNRmin')
# single band light curves
grouped = lcdf.groupby(list(grouping))
mapdict = dict(tuple(zip(vals, aggfuncs)))
summary = grouped.agg(mapdict)
# Check for variables to unstack
unstackvars = set(grouping) - set(('snid',))
if len(unstackvars) > 0 :
summary = summary.unstack()
# Build sensible column names
columns = list('_'.join(col).strip() for col in summary.columns.values)
summary.columns = columns
namedicts = dict((col, 'NOBS' + col.split('count')[-1])
for col in columns if 'count' in col)
summary.rename(columns=namedicts, inplace=True)
summary.rename(columns=dict((col, summary_prefix + col)
for col in summary.columns),
inplace=True)
# Join with paramsdf
if paramsdf is None:
# well don't join
return summary
# if the lightcurve/photometry table did not have `snid` values
elif noSNID :
# assume single SN
if len(paramsdf) > 1 :
raise ValueError('Cannot crossmatch SN with paramsdf without SNID')
# Expected situation for light curves
# single SN in both lightcurve and `paramdf` tables,
# but `noSNID` => light curve did not have true SNID
elif 'snid' == paramsdf.index.name:
# paramsdf has a meaningful SNID, so take that value
summary.index = paramsdf.index.values
return summary.join(paramsdf)
# If paramsdf does not have the `snid` value
else :
# Then take the index value from the light curve (which is 0 as
# constructed above
paramsdf.index = 0
return summary.join(paramsdf)
# Expected situation for photometry tables
else :
# If lcdf has SN not in paramsdf
if len(set(lcdf.snid.unique()) - set(paramsdf.index.values)) > 0:
raise ValueError('There are SN in lcdf not in paramsdf')
return summary.join(paramsdf)
def coaddedLC(self,
coaddTimes=None,
minmjd=None,
coaddedValues=['mjd', 'flux', 'fluxerr', 'zp'],
additionalValues=['zpsys'],
mjdBefore=None,
sanitize=True):
"""
"""
# How should we coadd? group observation in steps of coaddTimes and
# offsets described by minmjd
if minmjd is None:
if mjdBefore is None:
minmjd = 0.
else:
minmjd = self.lightCurve.mjd.min() - mjdBefore
# Does the light curve have `snid`
include_snid = 'snid' in self.lightCurve.columns
# preprocess the light curve for coaddition
if not sanitize:
raise NotImplementedError('nan sanitization must be used for coadds\n')
lc = self.sanitize_nan(self.lightCurve)
if coaddTimes is None:
if self.cleanNans:
lc = self.lightCurve.dropna(inplace=False)
return lc
lc = self.discretize_time(lc, timeOffset=minmjd, timeStep=coaddTimes)
lc = self.add_weightedColumns(lc,
avg_cols=coaddedValues,
additional_cols=None,
copy=True)
lc = self.coaddpreprocessed(lc,
include_snid=include_snid,
cols=coaddedValues,
additionalAvgCols=None,
additionalColsKept=None,
keepAll=False,
keepCounts=True)
return lc
def _coaddedLC(self, coaddTimes=None, mjdBefore=None, minmjd=None):
"""
return a coadded light curve
"""
if coaddTimes is None:
return self.lightCurve
# otherwise perform coadd
# minmjd provides an offset for calculating discrete times
if minmjd is None:
if mjdBefore is None:
mjdBefore = 0.
minmjd = self.lightCurve.mjd.min() - mjdBefore
lc = self.lightCurve.copy()
lc['discreteTime'] = (lc['mjd'] - minmjd) // coaddTimes
lc['discreteTime'] = lc.discreteTime.astype(int)
aggregations = {'mjd': np.mean,
'flux': np.mean,
'fluxerr': lambda x: np.sqrt(np.sum(x**2))/len(x),
'discreteTime': 'count',
'zp': np.mean,
'zpsys': 'first'}
groupedbynightlyfilters = lc.groupby(['discreteTime','band'])
glc = groupedbynightlyfilters.agg(aggregations)
glc.reset_index('band', inplace=True)
glc.rename(columns=dict(discreteTime='numCoadded'), inplace=True)
glc['CoaddedSNR'] = glc['flux'] / glc['fluxerr']
return glc
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow_serving/apis/inference.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow_serving.apis import classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2
from tensorflow_serving.apis import input_pb2 as tensorflow__serving_dot_apis_dot_input__pb2
from tensorflow_serving.apis import model_pb2 as tensorflow__serving_dot_apis_dot_model__pb2
from tensorflow_serving.apis import regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow_serving/apis/inference.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_pb=_b('\n\'tensorflow_serving/apis/inference.proto\x12\x12tensorflow.serving\x1a,tensorflow_serving/apis/classification.proto\x1a#tensorflow_serving/apis/input.proto\x1a#tensorflow_serving/apis/model.proto\x1a(tensorflow_serving/apis/regression.proto\"W\n\rInferenceTask\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12\x13\n\x0bmethod_name\x18\x02 \x01(\t\"\xdc\x01\n\x0fInferenceResult\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12I\n\x15\x63lassification_result\x18\x02 \x01(\x0b\x32(.tensorflow.serving.ClassificationResultH\x00\x12\x41\n\x11regression_result\x18\x03 \x01(\x0b\x32$.tensorflow.serving.RegressionResultH\x00\x42\x08\n\x06result\"s\n\x15MultiInferenceRequest\x12\x30\n\x05tasks\x18\x01 \x03(\x0b\x32!.tensorflow.serving.InferenceTask\x12(\n\x05input\x18\x02 \x01(\x0b\x32\x19.tensorflow.serving.Input\"N\n\x16MultiInferenceResponse\x12\x34\n\x07results\x18\x01 \x03(\x0b\x32#.tensorflow.serving.InferenceResultB\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow__serving_dot_apis_dot_classification__pb2.DESCRIPTOR,tensorflow__serving_dot_apis_dot_input__pb2.DESCRIPTOR,tensorflow__serving_dot_apis_dot_model__pb2.DESCRIPTOR,tensorflow__serving_dot_apis_dot_regression__pb2.DESCRIPTOR,])
_INFERENCETASK = _descriptor.Descriptor(
name='InferenceTask',
full_name='tensorflow.serving.InferenceTask',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model_spec', full_name='tensorflow.serving.InferenceTask.model_spec', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='method_name', full_name='tensorflow.serving.InferenceTask.method_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=225,
serialized_end=312,
)
_INFERENCERESULT = _descriptor.Descriptor(
name='InferenceResult',
full_name='tensorflow.serving.InferenceResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model_spec', full_name='tensorflow.serving.InferenceResult.model_spec', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='classification_result', full_name='tensorflow.serving.InferenceResult.classification_result', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='regression_result', full_name='tensorflow.serving.InferenceResult.regression_result', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='result', full_name='tensorflow.serving.InferenceResult.result',
index=0, containing_type=None, fields=[]),
],
serialized_start=315,
serialized_end=535,
)
_MULTIINFERENCEREQUEST = _descriptor.Descriptor(
name='MultiInferenceRequest',
full_name='tensorflow.serving.MultiInferenceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tasks', full_name='tensorflow.serving.MultiInferenceRequest.tasks', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input', full_name='tensorflow.serving.MultiInferenceRequest.input', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=537,
serialized_end=652,
)
_MULTIINFERENCERESPONSE = _descriptor.Descriptor(
name='MultiInferenceResponse',
full_name='tensorflow.serving.MultiInferenceResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='tensorflow.serving.MultiInferenceResponse.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=654,
serialized_end=732,
)
_INFERENCETASK.fields_by_name['model_spec'].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC
_INFERENCERESULT.fields_by_name['model_spec'].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC
_INFERENCERESULT.fields_by_name['classification_result'].message_type = tensorflow__serving_dot_apis_dot_classification__pb2._CLASSIFICATIONRESULT
_INFERENCERESULT.fields_by_name['regression_result'].message_type = tensorflow__serving_dot_apis_dot_regression__pb2._REGRESSIONRESULT
_INFERENCERESULT.oneofs_by_name['result'].fields.append(
_INFERENCERESULT.fields_by_name['classification_result'])
_INFERENCERESULT.fields_by_name['classification_result'].containing_oneof = _INFERENCERESULT.oneofs_by_name['result']
_INFERENCERESULT.oneofs_by_name['result'].fields.append(
_INFERENCERESULT.fields_by_name['regression_result'])
_INFERENCERESULT.fields_by_name['regression_result'].containing_oneof = _INFERENCERESULT.oneofs_by_name['result']
_MULTIINFERENCEREQUEST.fields_by_name['tasks'].message_type = _INFERENCETASK
_MULTIINFERENCEREQUEST.fields_by_name['input'].message_type = tensorflow__serving_dot_apis_dot_input__pb2._INPUT
_MULTIINFERENCERESPONSE.fields_by_name['results'].message_type = _INFERENCERESULT
DESCRIPTOR.message_types_by_name['InferenceTask'] = _INFERENCETASK
DESCRIPTOR.message_types_by_name['InferenceResult'] = _INFERENCERESULT
DESCRIPTOR.message_types_by_name['MultiInferenceRequest'] = _MULTIINFERENCEREQUEST
DESCRIPTOR.message_types_by_name['MultiInferenceResponse'] = _MULTIINFERENCERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InferenceTask = _reflection.GeneratedProtocolMessageType('InferenceTask', (_message.Message,), dict(
DESCRIPTOR = _INFERENCETASK,
__module__ = 'tensorflow_serving.apis.inference_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.InferenceTask)
))
_sym_db.RegisterMessage(InferenceTask)
InferenceResult = _reflection.GeneratedProtocolMessageType('InferenceResult', (_message.Message,), dict(
DESCRIPTOR = _INFERENCERESULT,
__module__ = 'tensorflow_serving.apis.inference_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.InferenceResult)
))
_sym_db.RegisterMessage(InferenceResult)
MultiInferenceRequest = _reflection.GeneratedProtocolMessageType('MultiInferenceRequest', (_message.Message,), dict(
DESCRIPTOR = _MULTIINFERENCEREQUEST,
__module__ = 'tensorflow_serving.apis.inference_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.MultiInferenceRequest)
))
_sym_db.RegisterMessage(MultiInferenceRequest)
MultiInferenceResponse = _reflection.GeneratedProtocolMessageType('MultiInferenceResponse', (_message.Message,), dict(
DESCRIPTOR = _MULTIINFERENCERESPONSE,
__module__ = 'tensorflow_serving.apis.inference_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.MultiInferenceResponse)
))
_sym_db.RegisterMessage(MultiInferenceResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\370\001\001'))
# @@protoc_insertion_point(module_scope)
|
from flask import Blueprint
api_v1_routes = Blueprint("api_v1", __name__)
from . import users, errors # isort:skip
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .function_java_script_udf import *
from .get_job import *
from .job import *
from .output_blob import *
from .output_event_hub import *
from .output_mssql import *
from .output_service_bus_queue import *
from .output_servicebus_topic import *
from .reference_input_blob import *
from .stream_input_blob import *
from .stream_input_event_hub import *
from .stream_input_iot_hub import *
from ._inputs import *
from . import outputs
|
# coding:utf-8
import os
import pickle
import getpass
import keyring
from halo import Halo
class UserInfoManager(object):
CORP_ID = "CORP_ID"
USER_ID = "USER_ID"
USER_PASS = "USER_PASS"
MF_SERVICE = "MF_SERVICE"
USER_INFO_PATH = os.environ["HOME"] + "/.local/share/dakoker"
def get(self) -> hash:
user_info = self.get_cached()
if not user_info:
user_info = {}
print("Please enter your login info.")
user_info[self.CORP_ID] = input("company ID: ")
user_info[self.USER_ID] = input("user ID or email address: ")
user_info[self.USER_PASS] = getpass.getpass("password: ")
return user_info
def get_cached(self):
if os.path.isfile(self.USER_INFO_PATH + "/user_info.pkl"):
with open(self.USER_INFO_PATH + "/user_info.pkl", "rb") as f:
info = pickle.load(f)
passwd = keyring.get_password(self.MF_SERVICE, info[self.USER_ID])
if passwd:
info[self.USER_PASS] = passwd
return info
return None
def save(self, user_info):
if not os.path.isdir(self.USER_INFO_PATH):
os.makedirs(self.USER_INFO_PATH)
keyring.set_password(
self.MF_SERVICE, user_info[self.USER_ID], user_info[self.USER_PASS]
)
del user_info[self.USER_PASS]
pickle.dump(user_info, open(self.USER_INFO_PATH + "/user_info.pkl", "wb"))
@classmethod
def remove(cls) -> bool:
if os.path.isfile(cls.USER_INFO_PATH + "/user_info.pkl"):
os.remove(cls.USER_INFO_PATH + "/user_info.pkl")
return True
return False
@classmethod
def remove_with_message(cls):
spinner = Halo(text="Remove your local data...", spinner="dots")
if UserInfoManager.remove():
spinner.succeed("Data Successfully deleted.")
else:
spinner.warn("Data not found.")
|
from ACI_functions import *
from pprint import pprint
import xml.etree.ElementTree as et
import xmltodict, json
import smtplib
from email.message import EmailMessage
data = ''
#How many errors do you want it to ignore before it tells you about it.
#This is refrenced as if errors > theshold don't ignore it
threshold = 100
def send_email(subject, content):
sender = 'sender_email@gmail.com'
recipient = [
'dhimes@gmail.com',
]
server = 'email_server.fake_company.com'
msg = EmailMessage()
msg.set_content(content)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipient
s = smtplib.SMTP(server)
s.send_message(msg)
s.quit()
#Don't think I ended up using this, but not going to delete just in case
def etree_to_dict(t):
d = {t.tag : map(etree_to_dict, t.iterchildren())}
d.update(('@' + k, v) for k, v in t.attrib.iteritems())
d['text'] = t.text
return d
def find_port_from_dn_data(dn_data):
dn_data = dn_data.split('[')
for dn_part in dn_data:
dn_part = dn_part.split(']')
for tiny_part in dn_part:
if 'eth' in tiny_part:
port = tiny_part
return port
if 'pod' in tiny_part:
continue
if 'po' in tiny_part:
port = tiny_part
return port
def find_node_from_dn_data(dn_data):
dn_data = dn_data.split('/')
for dn_part in dn_data:
if 'node' in dn_part:
node = dn_part
return node
all_data = []
#See ACI_functions for the list of ACIs
for ACI_site in ACI_sites:
#Data center name, used for human readability
dc = ACI_site[0]
#URL to address the ACI
base_url = ACI_site[1]
#Login to the ACI
cookies = get_token(username,password,base_url)
#Grab the CRC errors
ports_data = pull_crc_errors(cookies, base_url)
#Pull the LLDP data
all_lldp_data = pull_lldp_data(cookies, base_url)
#Turn the LLDP data into a dict, makes searching for data way easier IMO
all_lldp_data = build_lldp_dict(all_lldp_data)
#Grab the serial numbers for all the switches
sn_data = pull_all_switch_SNs(ACI_sites,username,password)
#pprint (all_lldp_data)
for port_data in ports_data:
dn_data = port_data['dn']
all_data.append(dn_data)
#find port
port = find_port_from_dn_data( dn_data)
#find node
node = find_node_from_dn_data( dn_data)
#Find the CRC errors
errors = port_data['cRCAlignErrors']
#If the # of errors isn't higher than the threshold ignore this port
if int(errors) > threshold:
tmp_local_node = node.split('-')[1]
#I suck so some times the node is refrenced as "node-#" and sometimes as #
local_sn = sn_data[tmp_local_node]
#This is the string that will be used if an LLDP neighor isn't found, if a neighbor is found this will be over written
tmp_string = '''
LLDP Neighbor not found for this entry
Device with errors:
Data_Center: {}
Node: {}
SN: {} Port:{}
Error Count:{}
------------------------------
'''.format (dc,node,local_sn, port, errors)
#Try and see if it can find an LLDP neighbor for this node/port, if it can build a new string to go into the email
try:
lldp_data = all_lldp_data[node][port]
for lldp_neighbor in lldp_data:
neighbor_node = lldp_neighbor['node']
tmp_neighbor_node = neighbor_node.split('-')[1]
neighbor_port = lldp_neighbor['port']
neighbor_name = lldp_neighbor['sysName']
neighbor_sn = sn_data[tmp_neighbor_node]
neighbor_desc = lldp_neighbor['sysDesc']
tmp_string = """
Data_Center: {}
Device with errors:
Node: {}
SN: {}
Port:{}
Error Count:{}
Neighbor Device:
Name: {}
Desc: {}
Node: {}
SN: {}
Port: {}
------------------------------
""".format (dc,node,local_sn, port, errors,neighbor_name,neighbor_desc,neighbor_node,neighbor_sn,neighbor_port)
data = data + tmp_string
#If all that was found add the new string to the email
tmp_string = ''
except:
#If it wasn't all found add the old string
data = data+ tmp_string
print(data)
#print(data)
#Send an email, subject, and body
send_email("DC Errors Test", data)
|
from PySide.QtGui import QUndoCommand
class EditCommand(QUndoCommand):
"""
Edit the selected cell
:var __model: QTableModel: Model for command
:var __index: QModelIndex: selected cell
:var __oldValue: string: value before redo command executed
:var __newValue: string: value before undo command executed
"""
def __init__(self, model, index):
"""
:param model: QTableModel
:param index: QModelIndex
:return: None
"""
QUndoCommand.__init__(self)
self.__newValue = None
self.__model = model
self.__index = index
self.__oldValue = None
def redo(self):
self.__oldValue = self.__model.data(self.__index)
self.__model.setData(self.__index, self.__newValue)
def undo(self):
self.__newValue = self.__model.data(self.__index)
self.__model.setData(self.__index, self.__oldValue)
def setText(self, *args, **kwargs):
super().setText(*args, **kwargs)
def newVal(self, newVal):
self.__newValue = newVal
class DuplicateRowCommand(QUndoCommand):
def __init__(self, model, index):
QUndoCommand.__init__(self)
self.__model = model
self.__index = index
def redo(self):
self.__model.duplicate_row(self.__index)
def undo(self):
self.__model.removeRows(self.__index, 1)
class InsertRowsCommand(QUndoCommand):
def __init__(self, model, index, amount):
QUndoCommand.__init__(self)
self.__model = model
self.__index = index
self.__amount = amount
def redo(self):
self.__model.insertRows(self.__index, self.__amount)
def undo(self):
self.__model.removeRows(self.__index, self.__amount)
class RemoveRowsCommand(QUndoCommand):
def __init__(self, model, index, amount):
QUndoCommand.__init__(self)
self.__model = model
self.__index = index
self.__amount = amount
self.__oldList = None
self.__oldHeader = None
def redo(self):
self.__oldHeader = list(self.__model.get_header())
self.__oldList = list(self.__model.get_data())
self.__model.removeRows(self.__index, self.__amount)
def undo(self):
self.__model.set_data(self.__oldList, self.__oldHeader) |
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import numpy as np
from app import app
from scripts.read_data import get_language
layout = dbc.Nav([
dbc.DropdownMenu(
[dbc.DropdownMenuItem(["English ", html.I(className='fa fa-language')], className="drop-items", id="english"),
# dbc.DropdownMenuItem(["Spanish ", html.I(className='fa fa-language')], className="drop-items", id="spanish"),
dbc.DropdownMenuItem(["French ", html.I(className='fa fa-language')], className="drop-items", id="french")],
label="Language", id='language', nav=True, className="ml-2", disabled=False,
),
dbc.NavItem(dbc.NavLink("About", id='about'), className="ml-2"),
dbc.Modal(
[
dbc.ModalHeader(id='about-title'),
dbc.ModalBody(id='about-body'),
dbc.ModalFooter(
dbc.Button("Close", id="about-close", className="ml-auto")
),
],
size="lg",
id="about-modal",
)
])
@app.callback(
Output("about-modal", "is_open"),
[Input("about", "n_clicks"),
Input("about-close", "n_clicks")],
[State("about-modal", "is_open")],
)
def toggle_popover(n_1, n_2, is_open):
if n_1 or n_2:
return not is_open
return is_open
@app.callback(
Output('current-language', 'data'),
[Input(language, 'n_clicks_timestamp') for language in ['english', 'french']],
[State('current-language', 'data')],
prevent_initial_call=True
)
def current_language(n1, n2, language):
language_list = ['english', 'french']
n_list = []
for n in [n1, n2]:
if n is None:
n_list.append(0)
else:
n_list.append(n)
if (n1 == n2) and language:
language = language
else:
language_index = np.array(n_list).argmax()
language = language_list[language_index]
return language
@app.callback(
[Output('about', 'children'),
Output('about-title', 'children'),
Output('about-body', 'children'),
Output('about-close', 'children'),
Output('language', 'label')],
[Input('current-language', 'modified_timestamp')],
[State('current-language', 'data')],
)
def update_language(ts, language):
if not language:
raise PreventUpdate
language_dic = get_language(language)
about_content = [html.P(language_dic['about']['body'][0]), html.P(language_dic['about']['body'][1]),
html.P(language_dic['about']['body'][2]), html.P(language_dic['about']['body'][3]),
dbc.Row([dbc.Col(html.A(html.Img(src='../assets/kth.png', style={'height': '130px'}),
href='https://www.energy.kth.se/energy-systems/about-the-division-of-energy-systems-1.937036'),
width=3),
dbc.Col(html.A(html.Img(src='../assets/sei.png', style={'height': '130px'}),
href='https://www.sei.org/'), width=4),
dbc.Col(html.A(html.Img(src='../assets/fao.png', style={'height': '130px'}),
href='http://www.fao.org/home/en/'), width=2)], justify="center")
]
return language_dic['about']['header'], \
language_dic['about']['title'], \
about_content, language_dic['about']['close'], \
language_dic['language']
|
LANGUAGE = (
('', 'select'),
('sw', 'Swahili'),
('en', 'English'),
)
STATUS = (
('Pending','Pending'),
('Approved','Approved'),
('Discarded', 'Discarded'),
)
MEMBERSHIP = (
('','select'),
('Free', 'Free'),
('Premium', 'Premium'),
) |
from functools import cache
from logging import Logger
from typing import Any, Dict, List, Set, Tuple
import pandas as pd
from the_census._api.interface import ICensusApiFetchService
from the_census._data_transformation.interface import ICensusDataTransformer
from the_census._exceptions import EmptyRepositoryException
from the_census._geographies.interface import IGeographyRepository
from the_census._geographies.models import GeoDomain
from the_census._stats.interface import ICensusStatisticsService
from the_census._utils.log.factory import ILoggerFactory
from the_census._utils.timer import timer
from the_census._utils.unique import get_unique
from the_census._variables.models import VariableCode
from the_census._variables.repository.interface import IVariableRepository
class CensusStatisticsService(ICensusStatisticsService[pd.DataFrame]):
_api: ICensusApiFetchService
_transformer: ICensusDataTransformer[pd.DataFrame]
_variable_repo: IVariableRepository[pd.DataFrame]
_geo_repo: IGeographyRepository[pd.DataFrame]
_logger: Logger
def __init__(
self,
api: ICensusApiFetchService,
transformer: ICensusDataTransformer[pd.DataFrame],
variableRepo: IVariableRepository[pd.DataFrame],
geoRepo: IGeographyRepository[pd.DataFrame],
loggerFactory: ILoggerFactory,
) -> None:
self._api = api
self._transformer = transformer
self._variable_repo = variableRepo
self._geo_repo = geoRepo
self._logger = loggerFactory.getLogger(__name__)
@timer
def get_stats(
self,
variables_to_query: List[VariableCode],
for_domain: GeoDomain,
*in_domains: GeoDomain,
) -> pd.DataFrame:
return self.__get_stats(
variables_to_query=tuple(get_unique(variables_to_query)),
for_domain=for_domain,
in_domains=tuple(get_unique(in_domains)),
)
@cache
def __get_stats(
self,
variables_to_query: Tuple[VariableCode],
for_domain: GeoDomain,
in_domains: Tuple[GeoDomain],
) -> pd.DataFrame:
pullStats = lambda: self._api.stats(
list(variables_to_query), for_domain, list(in_domains)
)
apiResults: List[List[List[str]]] = [res for res in pullStats()]
(
column_headers,
type_conversions,
) = self._get_variable_names_and_type_conversions(set(variables_to_query))
geo_domains_queried = [for_domain] + list(in_domains)
supported_geos = self._geo_repo.get_supported_geographies()
df = self._transformer.stats(
apiResults,
type_conversions,
geo_domains_queried,
column_headers,
supported_geos,
)
return df
def _get_variable_names_and_type_conversions(
self, variables_to_query: Set[VariableCode]
) -> Tuple[Dict[VariableCode, str], Dict[str, Any]]:
relevant_variables = {
variable.code: variable
for variable in self._variable_repo.variables.values()
if variable.code in variables_to_query
}
if len(relevant_variables) != len(variables_to_query):
msg = f"Queried {len(variables_to_query)} variables, but found only {len(relevant_variables)} in repository"
self._logger.exception(msg)
raise EmptyRepositoryException(msg)
hasDuplicateNames = len(
{v.cleaned_name for v in relevant_variables.values()}
) < len(variables_to_query)
type_conversions: Dict[str, Any] = {}
column_headers: Dict[VariableCode, str] = {}
for k, v in relevant_variables.items():
if v.predicate_type in ["int", "float"]:
type_conversions.update({k: float})
cleanedVarName = v.cleaned_name
if hasDuplicateNames:
cleanedVarName += f"_{v.group_code}"
column_headers.update({k: cleanedVarName})
return column_headers, type_conversions
|
class ExecutionPolicy:
RUN_ALWAYS = 0
RUN_IF_PREVIOUS_SUCCEED = 1
RUN_IF_PREVIOUS_FAILED = 2
|
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics
# Import applications
import KratosMultiphysics.MeshMovingApplication as KratosMeshMoving
import KratosMultiphysics.TrilinosApplication as TrilinosApplication
# Other imports
import KratosMultiphysics.mpi as KratosMPI
# Import baseclass
from KratosMultiphysics.MeshMovingApplication.mesh_solver_base import MeshSolverBase
class TrilinosMeshSolverBase(MeshSolverBase):
def __init__(self, mesh_model_part, custom_settings):
if not custom_settings.Has("mesh_motion_linear_solver_settings"): # Override defaults in the base class.
linear_solver_settings = KratosMultiphysics.Parameters("""{
"solver_type" : "amesos",
"amesos_solver_type" : "Amesos_Klu"
}""")
custom_settings.AddValue("mesh_motion_linear_solver_settings", linear_solver_settings)
super(TrilinosMeshSolverBase, self).__init__(mesh_model_part, custom_settings)
self.print_on_rank_zero("::[TrilinosMeshSolverBase]:: Construction finished")
#### Public user interface functions ####
def AddVariables(self):
super(TrilinosMeshSolverBase, self).AddVariables()
self.mesh_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.PARTITION_INDEX)
self.print_on_rank_zero("::[TrilinosMeshSolverBase]:: Variables ADDED.")
def ImportModelPart(self):
self.print_on_rank_zero("::[TrilinosMeshSolverBase]:: ", "Importing model part.")
from trilinos_import_model_part_utility import TrilinosImportModelPartUtility
self.trilinos_model_part_importer = TrilinosImportModelPartUtility(self.mesh_model_part, self.settings)
self.trilinos_model_part_importer.ImportModelPart()
self.print_on_rank_zero("::[TrilinosMeshSolverBase]:: ", "Finished importing model part.")
def PrepareModelPart(self):
super(TrilinosMeshSolverBase, self).PrepareModelPart()
# Construct the mpi-communicator
self.trilinos_model_part_importer.CreateCommunicators()
self.print_on_rank_zero("::[TrilinosMeshSolverBase]::", "ModelPart prepared for Solver.")
#### Specific internal functions ####
def get_communicator(self):
if not hasattr(self, '_communicator'):
self._communicator = TrilinosApplication.CreateCommunicator()
return self._communicator
def print_on_rank_zero(self, *args):
KratosMPI.mpi.world.barrier()
if KratosMPI.mpi.rank == 0:
print(" ".join(map(str,args)))
#### Private functions ####
def _create_linear_solver(self):
import trilinos_linear_solver_factory
linear_solver = trilinos_linear_solver_factory.ConstructSolver(self.settings["mesh_motion_linear_solver_settings"])
return linear_solver
def _create_mesh_motion_solving_strategy(self):
raise Exception("Mesh motion solver must be created by the derived class.") |
#!/usr/bin/env python3
import sys
from collections import Counter
from pprint import pprint
from tqdm import tqdm
from tools.lib.route import Route
from tools.lib.logreader import LogReader
if __name__ == "__main__":
r = Route(sys.argv[1])
cnt_valid: Counter = Counter()
cnt_events: Counter = Counter()
for q in tqdm(r.qlog_paths()):
if q is None:
continue
lr = list(LogReader(q))
for msg in lr:
if msg.which() == 'carEvents':
for e in msg.carEvents:
cnt_events[e.name] += 1
if not msg.valid:
cnt_valid[msg.which()] += 1
print("Events")
pprint(cnt_events)
print("\n\n")
print("Not valid")
pprint(cnt_valid)
|
import numpy
import cartopy
class topo:
"""
A class to aid in playing with the design of idealized domains
"""
def __init__(self, nj, ni, dlon=1, dlat=1, lon0=0, lat0=0, D=1):
"""
Create a topo object with a mesh of nj*ni cells with depth -D
on a mesg ranging from lon0..lon0+dlon and lat0..lat0+dlat.
By default D=1, dlon=1, dlat=1, lon0=0 and lat0=0.
"""
self.z = -D * numpy.ones((nj,ni))
self.D0 = D # Nominal deepest depth
# Coordinates of grid nodes (0..1)
self.xg = numpy.arange(ni+1)/ni * dlon + lon0
self.yg = numpy.arange(nj+1)/nj * dlat + lat0
# Coordinates of cell centers (0..1)
self.xc = (numpy.arange(ni)+0.5)/ni * dlon + lon0
self.yc = (numpy.arange(nj)+0.5)/nj * dlat + lat0
# Store 2D arrays of coordinates
self.XG, self.YG = numpy.meshgrid(self.xg, self.yg)
self.XC, self.YC = numpy.meshgrid(self.xc, self.yc)
# Some 1D functions to generate simple shapes
def heaviside(x, x0):
"""Returns 0 for x < x0, 1 or x >= x0"""
b = 0*x
b[x>=x0] = 1
return b
def box(x, x0, x1):
"""Returns 0 for x < x0, 1 or x0 <= x <= x1, 0 for x > x1"""
return topo.heaviside(x, x0) * topo.heaviside(-x, -x1)
def cone(x, x0, dx):
"""Returns 0 for |x-x0| > dx, straight lines peaking at x = x0"""
return numpy.maximum(0, 1. - numpy.abs(x-x0)/dx)
def clipped_cone(x, x0, dx, clip):
"""Returns a cone clipped at height 'clip'"""
return numpy.minimum(clip, topo.cone(x, x0, dx))
def scurve(x, x0, dx):
"""Returns 0 for x<x0 or x>x+dx, and a cubic in between."""
s = numpy.minimum(1, numpy.maximum(0, (x-x0)/dx))
return (3 - 2*s)*( s*s )
# Actual coastal profile
def coastal_sprofile(x, x0, dx, shelf, lf=.125, bf=.125, sf=.5):
"""A 'coastal profile' with coastal shelf and slope.
Of profile width dx:
- lf is the land fraction (value 0)
- bf is the fraction that is the beach slope.
- sf is the fraction that is the shelf slope.
The remaining fraction is the shelf.
"""
s = ( x - x0 )/dx
sbs = s - lf
ssd = s - (1-sf)
return shelf * topo.scurve(sbs,0,bf) + ( 1 - shelf ) * topo.scurve(ssd,0,sf)
# More complicate structures built from the above simple shapes
def add_NS_ridge(self, lon, lat0, lat1, dlon, dH, clip=0, p=1):
r_fn = topo.cone(topo.dist_from_line(self.XC, lon, self.YC, lat0, lat1), 0, dlon)**p
self.z = numpy.maximum(self.z, numpy.minimum(clip, (self.D0 - dH) * ( r_fn - 1 ) - dH))
def add_NS_coast(self, lon, lat0, lat1, dlon, shelf):
r = topo.dist_from_line(self.XC, lon, self.YC, lat0, lat1)
self.z = numpy.maximum(self.z, - self.D0 * topo.coastal_sprofile(r, 0, dlon, shelf/self.D0) )
def add_EW_ridge(self, lon0, lon1, lat, dlat, dH, clip=0, p=1):
r_fn = topo.cone(topo.dist_from_line(self.YC, lat, self.XC, lon0, lon1), 0, dlat)**p
self.z = numpy.maximum(self.z, numpy.minimum(clip, dH * ( r_fn - 1) - self.D0))
def add_EW_coast(self, lon0, lon1, lat, dlat, shelf):
r = topo.dist_from_line(self.YC, lat, self.XC, lon0, lon1)
self.z = numpy.maximum(self.z, -self.D0 * topo.coastal_sprofile(r, 0, dlat, shelf/self.D0) )
def add_angled_coast(self, lon_eq, lat_mer, dr, shelf):
A, B, C = lat_mer, lon_eq, -lon_eq * lat_mer
r = 1. / numpy.sqrt( A*A + B*B )
r = r * ( A * self.XC + B * self.YC + C )
r_fn = topo.coastal_sprofile(r, 0, dr, shelf/self.D0)
self.z = numpy.maximum(self.z, -self.D0 * r_fn )
def add_circular_ridge(self, lon0, lat0, radius, dr, dH, clip=0):
r = numpy.sqrt( (self.XC - lon0)**2 + (self.YC - lat0)**2 )
r = numpy.abs( r - radius)
r_fn = topo.clipped_cone(r, 0, dr, 1 - dH/self.D0)
self.z = numpy.maximum(self.z, numpy.minimum(clip, self.D0 * ( r_fn - 1 ) ) )
def dist_from_line(X,x0,Y,y0,y1):
"""Returns distance from line x=x0 between y=y0 and y=y1"""
dx = X - x0
yr = numpy.minimum( numpy.maximum(Y, y0), y1)
dy = Y - yr
return numpy.sqrt( dx*dx + dy*dy)
def test1d(ax):
"""Displays the library of 1D simple profiles"""
x = numpy.linspace(-.1,1.2,100)
ax.plot(x, topo.box(x,.25,.5), label='box(x,0.25,.5)')
ax.plot(x, topo.cone(x,0.5,.25), label='cone(x,0,.5)')
ax.plot(x, topo.clipped_cone(x,0.5,.2,.8), label='clippedcone(x,0.5,.2,.8)')
ax.plot(x, topo.scurve(x,0,1), label='scurve(x,0,1)')
ax.plot(x, topo.coastal_sprofile(x,0,1,.2), label='coastal_sprofile(x,0,1,.2)')
ax.legend()
def plot(self, fig, Atlantic_lon_offset=None):
ax = fig.add_subplot(2,2,1)
im = ax.contour(self.xc, self.yc, self.z, levels=numpy.arange(-self.D0,1,500))
fig.colorbar(im, ax=ax); ax.set_title('Depth (plan view)')
# Draw coastlines in NeverWorld2 space (i.e. offset in longitude)
if Atlantic_lon_offset is not None:
for geo in cartopy.feature.COASTLINE.geometries():
x,y=geo.xy
ax.plot(numpy.array(x)-Atlantic_lon_offset,y, 'k:')
ax.set_xlim(self.xg.min(), self.xg.max())
ax.set_ylim(self.yg.min(), self.yg.max())
ax.set_aspect('equal')
ax = fig.add_subplot(2,2,2)
ax.plot( self.yc, self.z.max(axis=1), 'k:')
ax.plot( self.yc, self.z[:,::10]); ax.set_title('Profiles at various longitudes');
ax = fig.add_subplot(2,2,3)
ax.plot( self.xc, self.z[::10,:].T); ax.set_title('Profiles at various latitudes');
ax = fig.add_subplot(2,2,4)
im = ax.pcolormesh(self.xg, self.yg, self.z); fig.colorbar(im, ax=ax);
ax.set_aspect('equal')
|
from .logger import Logger
from .utility import *
from .service import *
from .youtube import *
from .nlp import *
from .main import * |
import importlib
import os
import sys
import urllib.request as urllib2
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
import inspect
class Trompi(object):
_PYTHON_DIR_PATH = "./.dynamic_python_file_directory_from_file_service"
_url = '127.0.0.1:8019'
_base_directory_path = "."
_spark = SparkSession.builder.getOrCreate()
_sc = SparkContext.getOrCreate()
@classmethod
def register_file_service_(cls, url):
cls._url = url
@classmethod
def register_base_directory_path(cls, path):
cls._base_directory_path = path
@classmethod
def import_(cls, remote_file_path, import_member=False):
file_name = remote_file_path.split("/")[-1]
_s = file_name.split(".")
assert _s.__len__() == 2
assert _s[-1] == 'py'
module_name = _s[0]
local_file_path = f"{cls._PYTHON_DIR_PATH}/{file_name}"
uri = f"{cls._url}/{cls._base_directory_path}/{remote_file_path}"
if not os.path.exists(cls._PYTHON_DIR_PATH):
os.mkdir(cls._PYTHON_DIR_PATH)
if cls._PYTHON_DIR_PATH not in sys.path:
sys.path.insert(0, cls._PYTHON_DIR_PATH)
with open(local_file_path, 'w') as fw:
with urllib2.urlopen(uri) as f:
fw.write(f.read().decode())
# Call the globals dictionary of the environment where the import_ is located
_g = inspect.stack()[1][0].f_globals
if module_name in _g:
module = _g[module_name]
importlib.reload(module)
else:
module = __import__(module_name)
_g[module_name] = module
# Not Recommend
if import_member:
for key, val in module.__dict__.items():
if not key.startswith('_'):
_g[key] = val
cls._sc.addPyFile(local_file_path)
if __name__ == '__main__':
Trompi.register_file_service_("http://127.0.0.1:8019/")
Trompi.register_base_directory_path("trompi/test_remote_package")
Trompi.import_("util.py")
|
import pickle
import warnings
from pathlib import Path
import sklearn_crfsuite
from .features import transform, tokens_to_instance, load_data
from .utils import tokenize
class AddressParser:
def __init__(self, use_pretrained=True):
if use_pretrained:
here = Path(__file__).parent.absolute()
with open(here / "model", "rb") as f:
self.crf = pickle.load(f)
else:
self.crf = None
warnings.warn("Run train() to learn from some existing labelled data")
def parse(self, address):
if not self.crf:
raise RuntimeError("Model is not loaded")
tokens = tokenize(address)
labels = self.crf.predict([transform(address)])[0]
return list(zip(tokens, labels))
def train(self, data, c1=0.1, c2=0.1):
X, y = load_data(data)
crf = sklearn_crfsuite.CRF(
algorithm="lbfgs",
max_iterations=100,
all_possible_transitions=True,
c1=c1,
c2=c2,
)
crf.fit(X, y)
self.crf = crf
|
#!/usr/bin/env python3
from subprocess import check_output
import argparse
proc = 'sim_server'
command = ['ps', 'x', '-o', 'pid,%cpu,%mem,command', '|', 'grep']
host = ["fisher"]
parser = argparse.ArgumentParser(description='Status of server.')
parser.add_argument("host", nargs=argparse.REMAINDER, help='host, default is fisher')
args = parser.parse_args()
if not args.host == []:
host = args.host
output = check_output(['ssh'] + host + command + [proc])
print("\n" + host[0] + ':')
for line in output.splitlines():
if not b'statusServer' in line and not b'grep' in line:
print(" " + line.decode("utf-8"))
print() |
import numpy as np
import torch
from sklearn.decomposition import PCA
from tqdm import tqdm
def fit_pca(X_train, X_test, X_val, n_components):
"""
Fits Principal Component Analysis on training data (first parameter),
and transforms training, validation and test data accordingly.
Parameters
----------
X_train : np.ndarray
PCA will be fit on this array.
X_test : np.ndarray
Test data that will be transformed.
X_val : np.ndarray
Validation data that will be transformed.
n_components : int
The amount of principal components PCA should produce
Returns
-------
pca : sklearn.PCA
Fitten PCA algorithm.
X_train: np.ndarray
Transformer training data.
X_test: np.ndarray
Transformed testing data.
X_val: np.ndarray
Transformed validation data.
"""
n_rows_train = X_train.shape[0]
n_rows_test = X_test.shape[0]
n_rows_val = X_val.shape[0]
X_train_flatten = X_train.reshape(n_rows_train, -1)
X_val_flatten = X_val.reshape(n_rows_val, -1)
X_test_flatten = X_test.reshape(n_rows_test, -1)
pca = PCA(n_components=n_components, whiten=True).fit(X_train_flatten)
# Apply transformation
X_train = pca.transform(X_train_flatten)
X_test = pca.transform(X_test_flatten)
X_val = pca.transform(X_val_flatten)
return pca, X_train, X_test, X_val
def extract_cnn_features(model, loader, batch_size):
"""
Generate embeddings from last layer of the model.
Parameters
----------
model : torch.model
Model which will be used for generating embeddings
loader : DataLoader
DataLoader where data is stored, and embeddings are created for.
batch_size : int
Batch size for the DataLoader.
Returns
-------
embeddings : List[np.ndarray] of shape (n_batches, batch_size, n_features_flattened)
Returns the feature representations as flattened arrays. For example EfficientNet-b0 returns 1280x7x7 embeddings,
so the resulting n_features_flattened
"""
embeddings = []
labels = []
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with torch.no_grad():
for X, y in tqdm(loader):
X, y = X.to(device), y
if y.shape[0] == batch_size:
features = model.extract_features(X)
# Flatten features
feat_flat = features.detach().flatten(1).cpu().numpy()
labels.append(y.numpy())
embeddings.append(feat_flat)
return embeddings, labels
def load_cnn_embedding(mode: str):
"""
Loads CNN embedding from the local storage.
NOTE: Before this the local folders must be populated with embedding data.
This can be done with the script `initialize_project.sh`.
Parameters
----------
mode : str, in ['train', 'test', 'val']
Which CNN embedding should be loaded.
Returns
-------
X : np.ndarray of shape (n_samples, n_embeddings)
Embeddings of the predictor variables
y : np.ndarray of shape (n_samples, )
The target values, stored just for consistency purposes.
"""
assert mode in ['train', 'test',
'val'], "Mode must be either 'train', 'test', or 'val'"
X = np.load(f'../data/processed/X_{mode}_embeddings.npy', mmap_mode='r')
y = np.load(f'../data/processed/y_{mode}_labels.npy', mmap_mode='r')
y = y.reshape(-1)
return X, y
|
from .fromkey import FromKey
from .tokey import ToKey
from .tensorflowscorer import TensorFlowScorer
from .datasettransformer import DatasetTransformer
from .onnxrunner import OnnxRunner
from .datetimesplitter import DateTimeSplitter
from .tokeyimputer import ToKeyImputer
from .tostring import ToString
__all__ = [
'DateTimeSplitter',
'FromKey',
'ToKey',
'ToKeyImputer',
'ToString',
'TensorFlowScorer',
'DatasetTransformer',
'OnnxRunner'
]
|
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
import mock
import pytest
import qitest.conf
test_gtest_one = {
"name": "gtest_one",
"cmd": ["/path/to/test_one", "--gtest_output", "foo.xml"],
"timeout": 2,
}
test_perf_one = {
"name": "perf_one",
"cmd": ["/path/to/perf_one"],
"perf": True
}
def test_can_add_tests(tmpdir):
qitest_json_path = tmpdir.join("qitest.json").strpath
qitest.conf.add_test(qitest_json_path, **test_gtest_one)
qitest.conf.add_test(qitest_json_path, **test_perf_one)
assert qitest.conf.parse_tests(qitest_json_path) == [test_gtest_one,
test_perf_one]
def test_errors(tmpdir):
qitest_json_path = tmpdir.join("qitest.json").strpath
# pylint: disable-msg=E1101
with pytest.raises(Exception) as e:
qitest.conf.add_test(qitest_json_path, name="foo")
assert "Should provide a test cmd" in e.value.message
# pylint: disable-msg=E1101
with pytest.raises(Exception) as e:
qitest.conf.add_test(qitest_json_path, cmd="foo")
assert "Should provide a test name" in e.value.message
qitest.conf.add_test(qitest_json_path, name="foo", cmd=["/path/to/foo"])
# pylint: disable-msg=E1101
with pytest.raises(Exception) as e:
qitest.conf.add_test(qitest_json_path, name="foo", cmd=["/path/to/bar"])
assert "A test named 'foo' already exists" in e.value.message
def test_relocate():
proj = mock.Mock()
proj.sdk_directory = "/path/to/sdk"
tests = [
{
"name": "test_one",
"cmd": ["/path/to/sdk/bin/test_one", "/path/to/sdk/share/foo/one.txt"]
},
{
"name": "test_two",
"cmd": ["/path/to/sdk/bin/test_two", "/some/other/path"]
}
]
qitest.conf.relocate_tests(proj, tests)
assert tests == [
{
"name": "test_one",
"cmd": ["bin/test_one", "share/foo/one.txt"]
},
{
"name": "test_two",
"cmd": ["bin/test_two", "/some/other/path"],
}
]
|
from math import *
from cmath import rect as from_polar, exp as cexp
from frostsynth import *
from frostsynth.filters.base import *
def decay(source, factor=0.01, gain=1.0, duration=1.0, normalized=True, srate=None):
"""Exponential decay by 'factor' in time 'duration' when fed with a simple impulse."""
srate = get_srate(srate)
a1 = -factor ** (srate / duration)
b0 = gain
if normalized:
b0 *= (1 + a1)
return onepole(source, 1.0, a1, b0)
def attenuate(source, factor=0.01, duration=1.0, srate=None):
"""Exponential attenuation towards target value within 'factor' in time 'duration' for constant signals."""
if srate is None:
srate = get_srate()
return onepole(source, 1.0, -factor ** (srate / duration), 1.0 - factor ** (srate / duration))
def dc_block(source, pole=0.995): #TODO: factor srate in
"""Removes the DC (zero frequency) component from the signal while trying to preserve other frequencies intact."""
return polezero(source, 1.0, -pole, (1.0 + pole) * 0.5, -(1.0 + pole) * 0.5)
def allpass(source, g):
"""First order Shroeder all pass filter. y[n] + g y[n-1] = g.conjugate() x[n] + x[n-1]."""
return polezero(source, 1.0, g.conjugate(), g, 1.0)
def ping_filter(source, frequency, decay, srate=None):
"""This filter responds to a unit impulse by producing a sinusoid "ping".
The functional form of the response is: sin(2 * pi * frequency * t) * exp(-decay * t).
"""
if srate is None:
srate = get_srate()
d = exp(-decay / srate)
w = 2 * pi * frequency / srate
return twopole(source, 1.0, -2.0 * d * cos(w), d * d, sin(w) * d)
def pong_filter(source, frequency, decay, srate=None):
"""This filter responds to a unit impulse by producing a hard sinusoid "ping".
The functional form of the response is: cos(2*pi*frequency*t)*exp(-decay*t).
"""
if srate is None:
srate = get_srate()
d = exp(-decay / srate)
w = 2 * pi * frequency / srate
return biquad(source, 1.0, -2.0 * d * cos(w), d * d, 1.0, -cos(w) * d, 0.0)
def onepole_lpf(source, d):
return onepole(source, 1, -d, 1 - d)
#Filters from "Cookbook formulae for audio EQ biquad filter coefficients" by Robert Bristow-Johnson
#http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt
i_sqrt_two = 1.0 / sqrt(2.0)
def _lpf_coefs(frequency, Q, dw):
w0 = dw * frequency
cosw0 = cos(w0)
alpha = sin(w0) / (2.0 * Q)
cosw0_h = 0.5 * (1.0 - cosw0)
return (1.0 + alpha, -2.0 * cosw0, 1.0 - alpha, cosw0_h, cosw0_h + cosw0_h, cosw0_h)
def lpf(source, frequency, Q=i_sqrt_two, srate=None):
dw = two_pi / get_srate(srate)
return biquad(source, *_lpf_coefs(frequency, Q, dw))
def dynamic_lpf(source, frequency, Q, srate=None):
dw = two_pi / get_srate(srate)
return dynamic_biquad(source, map(_lpf_coefs, frequency, Q, repeat(dw)))
def _hpf_coefs(frequency, Q, dw):
w0 = dw * frequency
cosw0 = cos(w0)
alpha = sin(w0) / (2.0 * Q)
cosw0_h = 0.5 * (1.0 + cosw0)
return (1.0 + alpha, -2.0 * cosw0, 1.0 - alpha, cosw0_h, -cosw0_h - cosw0_h, cosw0_h)
def hpf(source, frequency, Q=i_sqrt_two, srate=None):
dw = two_pi / get_srate(srate)
return biquad(source, *_hpf_coefs(frequency, Q, dw))
def dynamic_hpf(source, frequency, Q, srate=None):
dw = two_pi / get_srate(srate)
return dynamic_biquad(source, map(_hpf_coefs, frequency, Q, repeat(dw)))
#Spam the rest using an exec macro:
_filter_names=["bpfQ", "bpf0", "notch", "apf"]
_filter_formulas=["""
b0 = Q*alpha
b1 = 0
b2 = -Q*alpha
a0 = 1 + alpha
a1 = -2*cosw0
a2 = 1 - alpha""","""
b0 = alpha
b1 = 0
b2 = -alpha
a0 = 1 + alpha
a1 = -2*cosw0
a2 = 1 - alpha""","""
b0 = 1
b1 = -2*cosw0
b2 = 1
a0 = 1 + alpha
a1 = -2*cosw0
a2 = 1 - alpha""","""
b0 = 1 - alpha
b1 = -2*cosw0
b2 = 1 + alpha
a0 = 1 + alpha
a1 = -2*cosw0
a2 = 1 - alpha"""]
for name, formula in zip(_filter_names, _filter_formulas):
exec("def _" + name + """_coefs(frequency, Q, srate=None):
if srate is None:
srate = get_srate()
w0 = two_pi * frequency / srate
cosw0 = cos(w0)
alpha = sin(w0) / (2.0 * Q)""" + formula + """
return (a0, a1, a2, b0, b1, b2)
def """ + name + """(source, frequency, Q=i_sqrt_two, srate=None):
return biquad(source, *_""" + name + """_coefs(frequency, Q, srate))
def dynamic_""" + name + """(source, frequency, Q, srate=None):
return dynamic_biquad(source, map(_""" + name + """_coefs, frequency, Q, repeat(srate)))""")
if False:
_filter_names=["peakingEQ", "lowshelf", "highshelf"]
_filter_formulas=["""
b0 = 1 + alpha*A
b1 = -2*cosw0
b2 = 1 - alpha*A
a0 = 1 + alpha/A
a1 = -2*cosw0
a2 = 1 - alpha/A""","""
b0 = A*( (A+1) - (A-1)*cosw0 + 2*sqrtA*alpha )
b1 = 2*A*( (A-1) - (A+1)*cosw0 )
b2 = A*( (A+1) - (A-1)*cosw0 - 2*sqrtA*alpha )
a0 = (A+1) + (A-1)*cosw0 + 2*sqrtA*alpha
a1 = -2*( (A-1) + (A+1)*cosw0 )
a2 = (A+1) + (A-1)*cosw0 - 2*sqrtA*alpha""","""
b0 = A*( (A+1) + (A-1)*cosw0 + 2*sqrtA*alpha )
b1 = -2*A*( (A-1) + (A+1)*cosw0 )
b2 = A*( (A+1) + (A-1)*cosw0 - 2*sqrtA*alpha )
a0 = (A+1) - (A-1)*cosw0 + 2*sqrtA*alpha
a1 = 2*( (A-1) - (A+1)*cosw0 )
a2 = (A+1) - (A-1)*cosw0 - 2*sqrtA*alpha"""]
for name, formula in zip(_filter_names, _filter_formulas):
exec("""def _"""+name+"""_coefs(frequency, Q, A, srate=None):
if srate is None:
srate = get_srate()
sqrtA = sqrt(A)
w0 = two_pi * frequency / srate
cosw0 = cos(w0)
alpha = sin(w0) / (2.0 * Q)"""+formula+"""
return (a0, a1, a2, b0, b1, b2)
def """+name+"""(source, frequency, Q, A, srate=None):
return biquad(source,*_"""+name+"""_coefs(frequency, Q, A, srate))
def dynamic_"""+name+"""(source, frequency, Q, A, srate=None):
return _dynamic_biquad(source, imap(_"""+name+"""_coefs, frequency, Q, A, repeat(srate)))""")
def dynamic_critical_lpf(source, time_constant, srate=None):
"""Low pass filter with impulse response proportional to t*exp(-t*time_constant).
Normalized so that dynamic_critical_lpf(repeat(1), repeat(T)) approaches 1 for all values of T.
"""
if srate is None:
srate = get_srate()
dt = 1.0/srate
#Actually this is a dynamic TwoPole with a double pole at exp(-dt*T).
source = iter(source)
c = (exp(-dt*T) for T in time_constant)
d = next(c)
x0 = next(source)
y1 = x0 - (x0 + x0 - x0*d)*d
yield y1
d = next(c)
x0 = next(source)
y0 = x0 + (y1 + y1 - x0 - x0 + x0*d)*d
yield y0
while True:
d = next(c)
y2 = y0
x0 = next(source)
y0 = x0 + (y0 + y0 - x0 - x0 + (x0 - y1)*d)*d
yield y0
d = next(c)
y1 = y0
x0 = next(source)
y0 = x0 + (y0 + y0 - x0 - x0 + (x0 - y2)*d)*d
yield y0
def resonator(source, b1, frequency, decay, srate=None):
"""
Delayed resonant filter.
Peak amplitude_normalized.
"""
srate = get_srate(srate)
dt = 1 / srate
z = from_polar(1, -two_pi * frequency * dt)
a1 = exp(-decay * frequency * dt) * z
b1 *= 2j / abs(1j / (1 - a1 * z) - 1j / (1 - a1.conjugate() * z))
y0 = 0.0j
for sample in source:
y0 = b1 * sample + y0 * a1
yield y0.real
def dynamic_resonator(source, b1, frequency, decay, srate=None):
"""
Delayed dynamic resonant filter that doesn't suffer from transients.
Peak amplitude normalized.
"""
frequency = to_iterable(frequency)
Q = to_iterable(Q)
srate = get_srate(srate)
dt = 1 / srate
y0 = 0.0j
for sample, b, f, d in zip(source, b1, frequency, decay):
z = from_polar(1, -two_pi * f * dt)
a1 = exp(-d * f * dt) * z
i_norm_j = 2j / abs(1j / (1 - a1 * z) - 1j / (1 - a1.conjugate() * z))
y0 = i_norm_j * b * sample + y0 * a1
yield y0.real
def _nyquist_twozero(source):
source = iter(source)
x2 = next(source)
yield x2
x1 = next(source)
yield x1 + x2 + x2
while True:
x0 = next(source)
yield x0 + x1 + x1 + x2
x2 = next(source)
yield x2 + x0 + x0 + x1
x1 = next(source)
yield x1 + x2 + x2 + x0
def dynamic_lowpass(source, frequency, Q=i_sqrt_two, srate=None):
"""
Dynamic low pass filter that doesn't suffer from transients.
Normalized at DC.
"""
frequency = to_iterable(frequency)
Q = to_iterable(Q)
srate = get_srate(srate)
dw = two_pi / srate
y0 = 0j
for sample, f, q in zip(_nyquist_twozero(source), frequency, Q):
w0 = dw * f
cosw0 = cos(w0)
alpha = sin(w0) / (2 * q)
sqrt_discriminant = sqrt(1 - alpha * alpha - cosw0 * cosw0)
a1 = (cosw0 + 1j * sqrt_discriminant) / (1 + alpha)
b1 = 0.5 * (1.0 - cosw0) / sqrt_discriminant
y0 = 1j * b1 * sample + a1 * y0
yield y0.real
def _dc_twozero(source):
source = iter(source)
x2 = next(source)
yield x2
x1 = next(source)
yield x1 - x2 - x2
while True:
x0 = next(source)
yield x0 - x1 - x1 + x2
x2 = next(source)
yield x2 - x0 - x0 + x1
x1 = next(source)
yield x1 - x2 - x2 + x0
def dynamic_highpass(source, frequency, Q=i_sqrt_two, srate=None):
"""
Dynamic high pass filter that doesn't suffer from transients.
Normalized at nyquist.
"""
frequency = to_iterable(frequency)
Q = to_iterable(Q)
srate = get_srate(srate)
dw = two_pi / srate
y0 = 0j
for sample, f, q in zip(_dc_twozero(source), frequency, Q):
w0 = dw * f
cosw0 = cos(w0)
alpha = sin(w0) / (2 * q)
sqrt_discriminant = sqrt(1 - alpha * alpha - cosw0 * cosw0)
a1 = (cosw0 + 1j * sqrt_discriminant) / (1 + alpha)
b1 = 0.5 * (1.0 + cosw0) / sqrt_discriminant
y0 = 1j * b1 * sample + a1 * y0
yield y0.real
def _dc_nyquist_twozero(source):
source = iter(source)
x2 = next(source)
yield x2
x1 = next(source)
yield x1
while True:
x0 = next(source)
yield x0 - x2
x2 = next(source)
yield x2 - x1
x1 = next(source)
yield x1 - x0
def dynamic_bandpass(source, frequency, Q=i_sqrt_two, srate=None):
"""
Dynamic band pass filter that doesn't suffer from transients.
Peak amplitude normalized.
"""
frequency = to_iterable(frequency)
Q = to_iterable(Q)
srate = get_srate(srate)
dw = two_pi / srate
y0 = 0j
for sample, f, q in zip(_dc_nyquist_twozero(source), frequency, Q):
w0 = dw * f
cosw0 = cos(w0)
alpha = sin(w0) / (2 * q)
sqrt_discriminant = sqrt(1 - alpha * alpha - cosw0 * cosw0)
a1 = (cosw0 + 1j * sqrt_discriminant) / (1 + alpha)
b1 = alpha / sqrt_discriminant
y0 = 1j * b1 * sample + a1 * y0
yield y0.real
def dynamic_allpass(source, frequency, Q=i_sqrt_two, srate=None):
"""
Dynamic all pass filter that doesn't suffer from transients.
"""
frequency = to_iterable(frequency)
Q = to_iterable(Q)
srate = get_srate(srate)
dw = two_pi / srate
y0 = 0j
x1 = 0.0
x2 = 0.0
for sample, f, q in zip(source, frequency, Q):
w0 = dw * f
cosw0 = cos(w0)
alpha = sin(w0) / (2 * q)
sqrt_discriminant = sqrt(1 - alpha * alpha - cosw0 * cosw0)
a1 = (cosw0 + 1j * sqrt_discriminant) / (1 + alpha)
i_norm_j = 1j / sqrt_discriminant
b1 = 1 - alpha
b2 = -2 * cosw0
b3 = 1 + alpha
y0 = i_norm_j * (b1 * sample + b2 * x1 + b3 * x2) + a1 * y0
yield y0.real
x2 = x1
x1 = sample
def dynamic_bandreject(source, frequency, Q=i_sqrt_two, srate=None):
"""
Dynamic band reject filter that doesn't suffer from transients.
Normalized at DC and nyquist.
"""
frequency = to_iterable(frequency)
Q = to_iterable(Q)
srate = get_srate(srate)
dw = two_pi / srate
y0 = 0j
x1 = 0.0
x2 = 0.0
for sample, f, q in zip(source, frequency, Q):
w0 = dw * f
cosw0 = cos(w0)
alpha = sin(w0) / (2 * q)
sqrt_discriminant = sqrt(1 - alpha * alpha - cosw0 * cosw0)
a1 = (cosw0 + 1j * sqrt_discriminant) / (1 + alpha)
i_norm_j = 1j / sqrt_discriminant
b2 = -2 * cosw0
y0 = i_norm_j * (sample + b2 * x1 + x2) + a1 * y0
yield y0.real
x2 = x1
x1 = sample
|
from PIL import Image
from os import listdir, system
from sys import argv
from time import sleep
system('cls')
path = './beta/img/' + argv[1]
feet = int(argv[2])
ratio = 0.82, 0.57
print(path)
base = Image.open(path)
result = Image.new('RGBA', base.size, (0, 0, 0, 0))
W, H = base.size
print(W, H)
for y in range(H):
z = feet - y
offx = ratio[0] * z
offy = ratio[1] * z
if (y % 10 == 0):
print(int(y / H * 100), '%', int(z), int(offx), int(offy), end=' \r')
# sleep(.1)
for x in range(W):
if base.getpixel((x, y)) != (0, 0, 0, 0):
sx = int(x - offx)
sy = int(feet - offy)
if (0 < sx < W and 0 < sy < H):
color = (0, 0, 0, 255)
result.putpixel((sx, sy), color)
result.save(path[:-4] + '_shadow.png') |
from torch import nn, optim, as_tensor
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from torch.optim import lr_scheduler
from torch.nn.init import *
from torchvision import transforms, utils, datasets, models
from models.inception_resnet_v1 import InceptionResnetV1
import cv2
from PIL import Image
from pdb import set_trace
import time
import copy
from pathlib import Path
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage import io, transform
from tqdm import trange, tqdm
import csv
import glob
import dlib
import pandas as pd
import numpy as np
from IPython.display import Video
Video("data/IMG_2411.MOV", width=200, height=350)
vidcap = cv2.VideoCapture('IMG_2411.MOV')
success,image = vidcap.read()
count = 0
success = True
while success:
cv2.imwrite(f"./Michael_Chaykowsky/Michael_Chaykowsky_{
format(count, '04d') }.png", image)
success,image = vidcap.read()
print('Read a new frame: ', success)
count += 1
%%!
for szFile in ./Michael_Chaykowsky/*.png
do
magick mogrify -rotate 90 ./Michael_Chaykowsky/"$(basename "$szFile")" ;
done |
import pytest
import methylize
from pathlib import Path
import pandas as pd
import logging
logging.basicConfig(level=logging.DEBUG)
def test_diff_meth_regions_default():
test_folder = '450k_test'
def run_once():
import methylcheck
g69,meta = methylcheck.load_both('/Volumes/LEGX/SCS/GSE69238/')
meta = meta[ meta.Sample_ID.isin(g69.columns) ] # meta was larger than beta data
import methylprep
man_df = methylprep.Manifest('450k').data_frame
pheno = [1 if x == 'Male' else 0 for x in meta.gender]
chrom = man_df[man_df.CHR.isin(['16','21'])].index
sample = g69[ g69.index.isin( chrom ) ]
sample.to_pickle(Path('tests','test_sample_betas_450k.pkl'))
import pickle
with open(Path('docs','example_data','test_sample_betas_450k_phenotype.pkl'),'wb') as f:
pickle.dump(pheno, f)
print(f"{g69.shape} --> {sample.shape} | pheno: {len(pheno)}")
return pheno, sample
#pheno, sample = run_once()
sample = pd.read_pickle(Path('docs','example_data','test_sample_betas_450k.pkl'))
pheno = pd.read_pickle(Path('docs','example_data','test_sample_betas_450k_phenotype.pkl'))
stats = methylize.diff_meth_pos(sample, pheno, verbose=False, regression_method='logistic')
manifest_or_array_type = '450k'
if not Path('docs','example_data', test_folder).exists():
Path('docs','example_data', test_folder).mkdir()
files_created = methylize.diff_meth_regions(stats, manifest_or_array_type, prefix='docs/example_data/450k_test/g69')
print(files_created)
#test_final_results = methylize.fetch_genes('tests/blah_test/blah_blah_stats.csv', save=True)
#print(test_final_results)
failures = []
for _file in files_created:
if isinstance(_file, type(None)):
continue
if not Path(_file).exists():
failures.append(_file)
if failures != []:
raise FileNotFoundError(f"These output files were not found / path missing: {failures}")
def test_diff_meth_positions_no_regions_found():
""" also covers:
[x] passing in manifest
[x] step None (auto calc)
[x] verbose True
[x] genome control True
cannot test
- passing in no prefix
- notebook environment
"""
test_folder = 'epic_plus'
if not Path('docs','example_data', test_folder).exists():
Path('docs','example_data', test_folder).mkdir()
# THIS example doesn't return a regions file, because no clusters are found.
sample = pd.read_pickle(Path('docs','example_data','test_sample_betas_epicplus_30k.pkl'))
pheno = [1,1,0,0,1,1,0,0]
stats = methylize.diff_meth_pos(sample, pheno, verbose=False)
#manifest_or_array_type = 'epic+'
import methylprep
man = methylprep.Manifest(methylprep.ArrayType('epic+'))
files_created = methylize.diff_meth_regions(stats,
manifest_or_array_type=man, prefix='docs/example_data/epic_plus/epic_plus',
genome_build='OLD',
genome_control=True)
print(files_created)
failures = []
# regions file gets deleted in middle of processing when no regions are found.
for _file in files_created:
if isinstance(_file, type(None)):
continue
if not Path(_file).exists():
failures.append(_file)
if failures != []:
raise FileNotFoundError(f"These output files were not found / path missing: {failures}")
"""
import pytest
import methylize
from pathlib import Path
import pandas as pd
sample = pd.read_pickle(Path('docs','example_data','test_sample_betas_450k.pkl'))
pheno = pd.read_pickle(Path('docs','example_data','test_sample_betas_450k_phenotype.pkl'))
stats = methylize.diff_meth_pos(sample, pheno, verbose=False)
manifest_or_array_type = '450k'
files_created = methylize.diff_meth_regions(stats, manifest_or_array_type, prefix='docs/example_data/450k_test/g69', tissue='all')
"""
|
"""
Syntax
myTuple = (element1, ...., elementN)
myTupleTwo = tuple( listObject )
"""
newCars = ('Tesla','Abracada','Sopapos')
cars = tuple( ['GT86', 'Impreza','Civic','Gol','Astra','Fusca'] ) |
math_Value = {'joni':5,
'edward' : 8,
'edi' : 7,
'hendrik' : 9}
name = input("Enter the student's name: ")
if name in math_Value:
print("math value" , name , "is",
math_Value [name] )
else:
print("student data not found.")
print("the following is the name of the student:")
for i in math_Value.keys():
print(i) |
class Solution:
def XXX(self, n: int) -> str:
def go(s):
res = ''
s += '#'
cnt = 1
for i in range(1, len(s)):
if s[i] != s[i - 1]:
res += str(cnt) + s[i - 1]
cnt = 1
else:
cnt += 1
return res
res = '1'
for i in range(n - 1):
res = go(res)
return res
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 16:20:14 2020
@author: SethHarden
Nodes in a Subtree
You are given a tree that contains N nodes, each containing an integer u which corresponds to a lowercase character c in the string s using 1-based indexing.
You are required to answer Q queries of type [u, c], where u is an integer and c is a lowercase letter. The query result is the number of nodes in the subtree of node u containing c.
Signature
int[] countOfNodes(Node root, ArrayList<Query> queries, String s)
Input
A pointer to the root node, an array list containing Q queries of type [u, c], and a string s
Constraints
N and Q are the integers between 1 and 1,000,000
u is a unique integer between 1 and N
s is of the length of N, containing only lowercase letters
c is a lowercase letter contained in string s
Node 1 is the root of the tree
Output
An integer array containing the response to each query
Example
1(a)
/ \
2(b) 3(a)
s = "aba"
RootNode = 1
query = [[1, 'a']]
Note: Node 1 corresponds to first letter 'a', Node 2 corresponds to second letter of the string 'b', Node 3 corresponds to third letter of the string 'a'.
output = [2]
Both Node 1 and Node 3 contain 'a', so the number of nodes within the subtree of Node 1 containing 'a' is 2.
"""
import math
# Add any extra import statements you may need here
class Node:
def __init__(self, data):
self.val = data
self.children = []
# Add any helper functions you may need here
def count_of_nodes(root, queries, s):
# Write your code here
def postorder(array, node, queries, s, parents):
if node is not None:
parents.append(node.val)
for child in node.children:
postorder(result, child, queries, s, parents)
for i in range(len(queries)):
qRootVal = queries[i][0]
qChar = queries[i][1]
if qRootVal in parents and qChar == s[node.val-1]:
result[i] += 1
parents.pop()
array = [0] * len(queries)
postorder(array, root, queries, s, [])
return array
# These are the tests we use to determine if the solution is correct.
# You can add your own at the bottom, but they are otherwise not editable!
def printIntegerList(array):
size = len(array)
print('[', end='')
for i in range(size):
if i != 0:
print(', ', end='')
print(array[i], end='')
print(']', end='')
test_case_number = 1
def check(expected, output):
global test_case_number
expected_size = len(expected)
output_size = len(output)
result = True
if expected_size != output_size:
result = False
for i in range(min(expected_size, output_size)):
result &= (output[i] == expected[i])
rightTick = '\u2713'
wrongTick = '\u2717'
if result:
print(rightTick, 'Test #', test_case_number, sep='')
else:
print(wrongTick, 'Test #', test_case_number, ': Expected ', sep='', end='')
printIntegerList(expected)
print(' Your output: ', end='')
printIntegerList(output)
print()
test_case_number += 1
if __name__ == "__main__":
# Testcase 1
n_1 ,q_1 = 3, 1
s_1 = "aba"
root_1 = Node(1)
root_1.children.append(Node(2))
root_1.children.append(Node(3))
queries_1 = [(1, 'a')]
output_1 = count_of_nodes(root_1, queries_1, s_1)
expected_1 = [2]
check(expected_1, output_1)
# Testcase 2
n_2 ,q_2 = 7, 3
s_2 = "abaacab"
root_2 = Node(1)
root_2.children.append(Node(2))
root_2.children.append(Node(3))
root_2.children.append(Node(7))
root_2.children[0].children.append(Node(4))
root_2.children[0].children.append(Node(5))
root_2.children[1].children.append(Node(6))
queries_2 = [(1, 'a'),(2, 'b'),(3, 'a')]
output_2 = count_of_nodes(root_2, queries_2, s_2)
expected_2 = [4, 1, 2]
check(expected_2, output_2)
# Add your own test cases here
|
#pylint: disable=too-many-function-args
#pylint: disable=no-member
""" Kacper Stysinski """
from copy import deepcopy
import pygame
import sys
GREEN = (0, 255, 133)
GRAY = (123, 123, 123)
LIGHT_GRAY = (100, 100, 100)
BLUE = (20, 20, 123)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
def check_pos(grid, row, col, val):
""" check whether a value can be inserted into given cell """
for i in range(9):
if grid[i][col] == val:
return False
for i in range(9):
if grid[row][i] == val:
return False
start_col = col // 3
start_row = row // 3
for i in range(start_row * 3, start_row * 3 + 3):
for j in range(start_col * 3, start_col * 3 + 3):
if grid[i][j] == val:
return False
return True
def solve(grid, screen):
""" main solver function """
empty = []
for j in range(9):
for i in range(9):
if grid[i][j] == 0:
empty.append([i, j])
return helper(grid, empty, screen)
def helper(grid, empty, screen):
""" recursive solver """
if not empty:
return grid
i, j = empty[0][0], empty[0][1]
for val in range(1, 10):
cell = create_cell(val, BLACK)
cell_x = (70 - cell.get_rect().width) // 2
cell_y = (70 - cell.get_rect().height) // 2
screen.blit(cell, (i * 71 + 1 + cell_x, j * 71 + 3 + cell_y))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
if check_pos(grid, i, j, val) is True:
cell = create_cell(val, GREEN)
cell_x = (70 - cell.get_rect().width) // 2
cell_y = (70 - cell.get_rect().height) // 2
screen.blit(cell, (i * 71 + 1 + cell_x, j * 71 + 3 + cell_y))
pygame.display.update()
temp_grid = deepcopy(grid)
temp_grid[i][j] = val
result = helper(temp_grid, empty[1:], screen)
if result is not False:
return result
cell = pygame.Rect(i * 71 + 1, j * 71 + 1, 70, 70)
pygame.draw.rect(screen, GRAY, cell)
return False
def create_cell(number, color):
""" create surface of a cell with a number """
font = pygame.font.Font(None, 65)
num_surface = font.render(str(number), True, color)
return num_surface
def map_click(num):
""" like p5 map """
return int((num / 640) * 9)
def highlight_cell(pos, screen, color=LIGHT_GRAY):
""" highlight selected cell """
cell = pygame.Rect(pos[0] * 71 + 1, pos[1] * 71 + 1, 70, 70)
pygame.draw.rect(screen, color, cell)
pygame.display.update()
def input_number(pos, number, grid, screen, no_draw = False):
""" print known numbers """
#to prevent drawing when reading from file
if not no_draw:
cell = pygame.Rect(pos[0] * 71 + 1, pos[1] * 71 + 1, 70, 70)
pygame.draw.rect(screen, GRAY, cell)
if number == -1:
grid[pos[0]][pos[1]] = 0
return
grid[pos[0]][pos[1]] = number
cell = create_cell(number, BLUE)
cell_x = (70 - cell.get_rect().width) // 2
cell_y = (70 - cell.get_rect().height) // 2
screen.blit(cell, (pos[0] * 71 + 1 + cell_x, pos[1] * 71 + 3 + cell_y))
pygame.display.update()
def main():
""" main function """
pygame.init()
width, height = 640, 640
# main window setup
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Sudoku solver')
icon = pygame.image.load('logo.png')
pygame.display.set_icon(icon)
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill(WHITE)
screen.blit(background, (0, 0))
# drawing the grid
for j in range(1, 640, 71):
for i in range(1, 640, 71):
cell = pygame.Rect(i, j, 70, 70)
pygame.draw.rect(screen, GRAY, cell)
selected_cell = [0, 0]
selected_cell_used = True
solved = False
grid = [[0] * 9 for _ in range(9)]
# optional: read from provided file
if len(sys.argv) > 1:
file_input = open(sys.argv[1], 'r')
if file_input is None:
raise Exception('Specified file {} could not be opened'.format(argv[1]))
for j in range(9):
line = file_input.readline()
for i in range(9):
if line[i * 2] != '0':
input_number([i, j], int(line[i * 2]), grid, screen, True)
print(grid)
solve(grid, screen)
solved = True
# main loop
running = True
while running is True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN and not solved:
if selected_cell_used is False:
highlight_cell(selected_cell, screen, GRAY)
selected_cell = list(map(map_click, event.pos))
highlight_cell(selected_cell, screen)
selected_cell_used = False
elif event.type == pygame.KEYDOWN and not solved and selected_cell_used is False:
if event.key == pygame.K_1:
input_number(selected_cell, 1, grid, screen)
selected_cell_used = True
elif event.key == pygame.K_2:
input_number(selected_cell, 2, grid, screen)
selected_cell_used = True
elif event.key == pygame.K_3:
input_number(selected_cell, 3, grid, screen)
selected_cell_used = True
elif event.key == pygame.K_4:
input_number(selected_cell, 4, grid, screen)
selected_cell_used = True
elif event.key == pygame.K_5:
input_number(selected_cell, 5, grid, screen)
selected_cell_used = True
elif event.key == pygame.K_6:
input_number(selected_cell, 6, grid, screen)
selected_cell_used = True
elif event.key == pygame.K_7:
input_number(selected_cell, 7, grid, screen)
selected_cell_used = True
elif event.key == pygame.K_8:
input_number(selected_cell, 8, grid, screen)
selected_cell_used = True
elif event.key == pygame.K_9:
input_number(selected_cell, 9, grid, screen)
selected_cell_used = True
elif event.key == pygame.K_ESCAPE:
input_number(selected_cell, -1, grid, screen)
selected_cell_used = True
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
if selected_cell_used is False:
highlight_cell(selected_cell, screen, GRAY)
solve(grid, screen)
solved = True
pygame.display.update()
# printing result
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/GuidanceResponse) on 2020-02-03.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .annotation import Annotation
from .codeableconcept import CodeableConcept
from .datarequirement import DataRequirement
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .identifier import Identifier
@dataclass
class GuidanceResponse(DomainResource):
""" The formal response to a guidance request.
A guidance response is the formal response to a guidance request, including
any output parameters returned by the evaluation, as well as the
description of any proposed actions to be taken.
"""
resource_type: ClassVar[str] = "GuidanceResponse"
requestIdentifier: Optional[Identifier] = None
identifier: Optional[List[Identifier]] = None
moduleUri: str = field(default=None, metadata=dict(one_of_many='module',))
moduleCanonical: str = field(default=None, metadata=dict(one_of_many='module',))
moduleCodeableConcept: CodeableConcept = field(default=None, metadata=dict(one_of_many='module',))
status: str = None
subject: Optional[FHIRReference] = None
encounter: Optional[FHIRReference] = None
occurrenceDateTime: Optional[FHIRDate] = None
performer: Optional[FHIRReference] = None
reasonCode: Optional[List[CodeableConcept]] = None
reasonReference: Optional[List[FHIRReference]] = None
note: Optional[List[Annotation]] = None
evaluationMessage: Optional[List[FHIRReference]] = None
outputParameters: Optional[FHIRReference] = None
result: Optional[FHIRReference] = None
dataRequirement: Optional[List[DataRequirement]] = None |
#CLIENT
import socket
from tictactoe import *
class Client:
def __init__(self):
self.ip = input("Digite o IP do servidor\n")
self.porta = input("Digite uma porta\n")
self.endereco = (self.ip, int(self.porta))
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(60)
def fazer_jogada(self):
data = input("Faca a sua jogada: ")
self.sock.sendto(data.encode(), (self.endereco))
def receber(self):
data, addr = self.sock.recvfrom(1024)
return (data)
def construir_tabuleiro(self, tabstr):
'''
Converte um tabuleiro em formato de string
para uma lista e o imprime na tela
'''
if len(tabstr) == 28:
lista = tabstr.split(",")
velha.desenhar(lista)
else:
print (tabstr)
def iniciar(self):
nome = input("Nome do usuario: ")
self.sock.sendto(nome.encode(), self.endereco)
print ("Conexao com o servidor em andamento...")
jogadores = self.receber()
print (jogadores.decode())
data = self.receber()
self.construir_tabuleiro(data.decode())
while True:
ordem = self.receber()
if ordem:
if ordem.decode() == '000':
print ("Empate!")
break
elif ordem.decode() == '101':
print ("Jogador X venceu!")
break
elif ordem.decode() == '201':
print ("Jogador O venceu!")
break
elif ordem.decode() == '999':
print ("Conexao perdida com um dos jogadores")
break
self.construir_tabuleiro(ordem.decode())
if str(ordem.decode()) == "Turno do oponente":
#Esperar
tab = self.receber()
self.construir_tabuleiro(tab.decode())
if str(ordem.decode()) == "Seu turno":
#Jogar
self.fazer_jogada()
tab = self.receber()
self.construir_tabuleiro(tab.decode())
if __name__ == '__main__':
try:
cliente = Client()
cliente.iniciar()
except socket.timeout:
print ("Time out - socket encerrado")
|
'''
This module creates a basic logistic system.
'''
import random
class Location:
'''
Class for locations.
'''
def __init__(self, city:str, postoffice:int):
self.city = city
self.postoffice = postoffice
class Item:
'''
Class for items.
'''
def __init__(self, name:str, price:float):
self.name = name
self.price = price
def __str__(self):
return f"{self.name}, price: {self.price}"
class Vehicle:
'''
Class for vehicles.
'''
def __init__(self, vehicleNo:int):
self.vehicleNo = vehicleNo
self.isAvailable = True
class Order:
'''
Class for orders.
'''
def __init__(self, user_name:str, city, postoffice, items:list):
self.orderId = random.randint(100000, 1000000)
self.user_name = user_name
self.items = items
self.location = Location(city, postoffice)
def __str__(self):
return f"Your order number is {self.orderId}"
def calculateAmount(self):
'''
Returns a total sum for all items.
'''
total = 0
for item in self.items:
total += item.price
return total
def assignVehicle(self, vehicle: Vehicle):
'''
Checks if vehicle is available, returns True or False.
'''
return vehicle.isAvailable
class LogisticSystem:
'''
Class for logistic system.
'''
def __init__(self, vehicles):
self.vehicles = vehicles
self.orders = []
def placeOrder(self, order: Order):
'''
Adds order to the order list if there is an available vehicle.
'''
for vehicle in self.vehicles:
if vehicle.isAvailable:
self.orders.append(order)
vehicle.isAvailable = False
return "Your order was placed successfully!"
return "There is no available vehicle to deliver an order."
def trackOrder(self, orderId: int):
'''
Returns order info by order ID.
'''
for order in self.orders:
if orderId == order.orderId:
return f"Your order #{orderId} is sent to {order.location.city}. \
Total price: {order.calculateAmount()} UAH."
return 'No such order.'
|
from infobip.clients import get_number_context_logs
from __init__ import configuration
get_delivery_reports_client = get_number_context_logs(configuration)
response = get_delivery_reports_client.execute({"limit": 1})
print(unicode(response))
|
nome = str(input("Informe o seu nome: "))
idade = int(input("Informe sua idade: "))
salario = float(input("Informe o seu salário: "))
print("-"*40)
print("M para masculino")
print("F para feminino")
print("-"*40)
sexo = str(input("Informe o seu sexo: "))
print("-"*40)
print("solteiro")
print("casado")
print("viúvo")
print("divorciado")
print("junto")
print("-"*40)
estado = str(input("Informe o seu Estado Civil: "))
while (len(nome) <= 3):
print("Seu nome deve ter mais do que 3 caracteres.")
nome = str(input("Informe o seu nome novamente: "))
while idade < 0 or idade > 150:
print("Sua idade deve estar entre 0 e 150 anos.")
idade = int(input("Informe sua idade novamente: "))
while salario < 0:
print("Seu salário deve ser igual a zero ou maior que zero. Informe-o novamente.")
salario = float(input("Informe o seu salário novamente: "))
while sexo != "F" and sexo != "M":
print("Sexo inválido. Informe novamente!")
sexo = str(input("Informe o seu sexo novamente: "))
while estado != "solteiro" and estado != "casado" and estado != "viuvo" and estado != "divorciado" and estado != "junto":
print("Estado cívil inválido. Tente novamente.")
estado = str(input("Informe o seu estado civil novamente: "))
print("-"*40)
print("Olá, {} " .format(nome))
print("Você tem {} anos" .format(idade))
print("Seu sálario é: R${} ".format(salario))
print("Do sexo: {}" .format(sexo))
print("Está atualmente: {}" .format(estado))
print("-"*40)
|
from django.contrib import admin
from vozila_specials.models import ProjectPost, CommentP, DisLikeP, LikeP
class LikeInline(admin.TabularInline):
model = LikeP
class DisLikeInline(admin.TabularInline):
model = DisLikeP
class SiteAdmin(admin.ModelAdmin):
list_display = ('id', 'owner', 'title', 'date_created')
list_filter = ('title', 'id', 'date_created')
inlines = (
LikeInline,
DisLikeInline
)
admin.site.register(ProjectPost, SiteAdmin)
admin.site.register(CommentP)
admin.site.register(LikeP)
admin.site.register(DisLikeP)
|
import matplotlib.pyplot as plt
import numpy as np
import mpl_toolkits.axisartist.angle_helper as angle_helper
from mpl_toolkits.axisartist import Subplot
from mpl_toolkits.axisartist import SubplotHost, ParasiteAxesAuxTrans
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
import astropy.units as u
def display_array_pointing_in_sky(array):
#TODO: take an array class and plot the pointings FoV in the sky
# need to handle projection
raise NotImplementedError("TODO")
def sky_fov(telescope, ax=None):
"""
Display the telescope FoV in the sky
Parameters
----------
telescope: `Telescope`
ax: `matplotlib.pyplot.axes`
Returns
-------
ax: `matplotlib.pyplot.axes`
"""
raise NotImplementedError("TODO")
def polar_stuff(fig, telescope):
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.).translate(+np.pi/2.,0) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
n = 1
extreme_finder = angle_helper.ExtremeFinderCycle(n, n,
lon_cycle=360,
lat_cycle=None,
lon_minmax=None,
lat_minmax=(-90, 90),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks = 0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks = 1
fig.add_subplot(ax1)
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anything you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
#ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50),
# linewidth=2.0)
x = np.rad2deg(telescope.az.value) * np.cos(telescope.alt.value)
y = np.rad2deg(telescope.alt.value)
circle = plt.Circle((np.rad2deg(telescope.az.value - np.pi) * np.sin(telescope.alt.value),
np.rad2deg(-telescope.alt.value * np.cos((telescope.az.value - np.pi)))),
radius=7.7 / 2,
color="red",
alpha=0.2,
)
circle = plt.Circle((x, y),
radius=7.7 / 2,
color="red",
alpha=0.2,
)
ax1.add_artist(circle)
# point = ax1.scatter(x, y, c="b", s=20, zorder=10, transform=ax2.transData)
ax2.annotate(1, (x, y), fontsize=15, xytext=(4, 4), textcoords='offset pixels')
ax1.set_xlim(-180, 180)
ax1.set_ylim(0, 90)
ax1.set_aspect(1.)
ax1.grid(True, zorder=0)
ax1.set_xlabel("Azimuth in degrees", fontsize=20)
ax1.set_ylabel("Zenith in degrees", fontsize=20)
plt.show()
return fig
|
from genetic_optimizer import *
import pdb
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
import itertools
import time
columns = ['Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']
dataset = pd.read_csv("data/titanic/train.csv")[columns]
dataset = pd.get_dummies(dataset, columns=['Sex', 'Embarked'])
columns = dataset.columns
X_original = dataset[columns[1:]]
Y = dataset[columns[:1]]
from sklearn import preprocessing
mm_scaler = preprocessing.MinMaxScaler()
X = mm_scaler.fit_transform(X_original)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, random_state=0)
from config_train import *
OUTPUT_PATH = "outputs/titanic_train/"
comb = [generations_list, populations_list, elitism_list, mutables_list]
experiments = list(itertools.product(*comb))
def generate_model():
genetic_model = Sequential()
genetic_model.add(Dense(10, input_shape=(9,), activation='sigmoid'))
genetic_model.add(Dense(1, activation='sigmoid'))
opt = SGD(lr=0.1)
genetic_model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
return genetic_model
f = open('{}results.csv'.format(OUTPUT_PATH), 'w')
f.write("generations,population,elitism,mutables,time,train_loss,test_loss,train_acc,test_acc\n")
for e in experiments:
exp_train_loss = []
exp_test_loss = []
exp_train_acc = []
exp_test_acc = []
exp_time = []
plt.figure()
for i in range(iterations):
model = generate_model()
exp_generation = e[0]
exp_population = e[1]
exp_elite = int(e[2] * exp_population)
exp_mutables = e[3]
print("Starting experiment \n\tg{}\n\tp{}\n\te{}\n\tm{}".format(exp_generation,
exp_population,
exp_elite,
exp_mutables))
ga = GeneticNeuralOptimizer(model,
mutation_prob=0.9,
iterations=exp_generation,
mutables=exp_mutables,
elite=exp_elite,
)
pop = ga.generate_population(exp_population)
start = time.time()
best, best_value, history = ga.fit(pop, x_train, y_train, x_test[:100], y_test[:100])
end = time.time()
print("Best weights found: {}".format(best))
print("Best value found: {}".format(best_value))
test_loss, test_acc = ga.model.evaluate(x_test, y_test)
train_loss, train_acc = ga.model.evaluate(x_train, y_train)
print('Test accuracy: ' + str(test_acc))
print('Train accuracy: ' + str(train_acc))
f.write("{},{},{},{},{},{},{},{},{}\n".format(exp_generation,
exp_population,
exp_elite,
exp_mutables,
round(end - start, 2),
round(train_loss, 2),
round(test_loss, 2),
round(train_acc, 2),
round(test_acc, 2)
))
exp_train_loss.append(train_loss)
exp_test_loss.append(test_loss)
exp_train_acc.append(train_acc)
exp_test_acc.append(test_acc)
exp_time.append(end-start)
plt.plot(history)
plt.title("Evolution g{}_p{}_e{}_m{}".format(exp_generation, exp_population, exp_elite, exp_mutables))
plt.xlabel("Generation")
plt.ylabel("Fitness")
plt.savefig("{}g{}_p{}_e{}_m{}_result.png".format(OUTPUT_PATH, exp_generation, exp_population, exp_elite,
exp_mutables))
f.write(",,,,{},{},{},{},{}\n".format(round(np.mean(exp_time), 2),
round(np.mean(exp_train_loss), 2),
round(np.mean(exp_test_loss), 2),
round(np.mean(exp_train_acc), 2),
round(np.mean(exp_test_acc), 2),
))
f.close()
|
import matplotlib.pyplot as plt
import numpy as np
from environment.corridor_gridworld import ShortCorridor
class Agent:
def __init__(self, env):
self.env = env
def play(self, number_of_episodes, prob_to_right=0):
reward_cumulate = 0
for _ in range(number_of_episodes):
self.env.reset()
while True:
action = np.random.choice(env.action_space.n, 1, p=[1. - prob_to_right, prob_to_right])
action = action[0]
new_state, reward, is_done, _ = self.env.step(action)
reward_cumulate += reward
if is_done:
break
return float(reward_cumulate / number_of_episodes)
if __name__ == '__main__':
env = ShortCorridor()
agent = Agent(env)
steps = []
x_axis = []
for i in range(150, 350):
print('probability: ', i / 500.)
x_axis.append(i / 500)
steps.append(agent.play(500, i / 500.))
plt.plot(x_axis, steps, alpha=0.7)
plt.show()
|
from PyQt4 import Qt, QtCore, QtGui
from taurus.qt.qtgui.resource import getThemeIcon
import panic, fandango
from widgets import iLDAPValidatedWidget
class dacWidget(QtGui.QWidget):
def __init__(self,parent=None,container=None,device=None):
QtGui.QWidget.__init__(self,parent)
self._dacwi = devattrchangeForm()
self._dacwi.devattrchangeSetupUi(self)
self._kontainer = container
self.setDevCombo(device)
def setDevCombo(self,device=None):
#self._dacwi.setComboBox(True,self._dacwi.api.devices)
self._dacwi.setDevCombo(device)
def show(self):
QtGui.QWidget.show(self)
class devattrchangeForm(iLDAPValidatedWidget,object):
api=None
def __init__(self,api=None):
print 'creating devattrchangeForm ...'
type(self).api = api or self.api or panic.current()
object.__init__(self)
def devattrchangeSetupUi(self, Form):
self.Form = Form
Form.setObjectName("Form")
self.GridLayout = QtGui.QGridLayout(Form)
self.GridLayout.setObjectName("GridLayout")
self.deviceCombo = QtGui.QComboBox(Form)
self.deviceCombo.setObjectName("deviceCombo")
self.GridLayout.addWidget(self.deviceCombo, 0, 0, 1, 1)
self.tableWidget = QtGui.QTableWidget(Form)
self.tableWidget.setObjectName("tableWidget")
self.GridLayout.addWidget(self.tableWidget, 1, 0, 1, 1)
self.refreshButton = QtGui.QPushButton(Form)
self.refreshButton.setObjectName("refreshButton")
self.GridLayout.addWidget(self.refreshButton, 2, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "PyAlarm Device Configuration", None, QtGui.QApplication.UnicodeUTF8))
self.refreshButton.setText(QtGui.QApplication.translate("Form", "Refresh", None, QtGui.QApplication.UnicodeUTF8))
self.refreshButton.setIcon(getThemeIcon("view-refresh"))
self.refreshButton.setToolTip("Refresh list")
QtCore.QObject.connect(self.tableWidget, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.onEdit)
QtCore.QObject.connect(self.deviceCombo, QtCore.SIGNAL("currentIndexChanged(QString)"), self.buildList)
QtCore.QObject.connect(self.refreshButton, QtCore.SIGNAL("clicked()"), self.buildList)
Form.resize(430, 600)
def setDevCombo(self,device=None):
self.deviceCombo.clear()
devList=self.api.devices
[self.deviceCombo.addItem(QtCore.QString(d)) for d in self.api.devices]
self.deviceCombo.model().sort(0, Qt.Qt.AscendingOrder)
print 'setDevCombo(%s)'%device
if device in self.api.devices:
i = self.deviceCombo.findText(device)
print '\t%s at %s'%(device,i)
self.deviceCombo.setCurrentIndex(i)
else:
print '\t%s not in AlarmsAPI!'%device
def buildList(self,device=None):
self.tableWidget.blockSignals(True)
index = -1 if device is None else self.deviceCombo.findText(device)
if index<0:
device = str(self.deviceCombo.currentText())
else:
self.deviceCombo.setCurrentIndex(index)
device = str(device)
data=self.api.devices[device].get_config(True) #get_config() already manages extraction and default values replacement
print '%s properties: %s' % (device,data)
rows=len(data)
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(rows)
self.tableWidget.setHorizontalHeaderLabels(["Attribute Name", "Attribute Value"])
for row,prop in enumerate(sorted(panic.ALARM_CONFIG)):
for col in (0,1):
if not col:
item=QtGui.QTableWidgetItem("%s" % prop)
item.setFlags(QtCore.Qt.ItemIsEnabled)
else:
item=QtGui.QTableWidgetItem("%s" % data[prop])
if row%2==0:
item.setBackgroundColor(QtGui.QColor(225,225,225))
self.tableWidget.setItem(row, col, item)
self.tableWidget.resizeColumnsToContents()
self.tableWidget.blockSignals(False)
def onEdit(self):
try:
row=self.tableWidget.currentRow()
dev=self.api.devices[str(self.deviceCombo.currentText())]
if not self.validate('onEditDeviceProperties(%s)'%dev):
return
prop=str(self.tableWidget.item(row,0).text())
value=str(self.tableWidget.item(row,1).text())
print 'DeviceAttributeChanger.onEdit(%s,%s = %s)'%(dev,prop,value)
ptype=fandango.device.cast_tango_type(panic.PyAlarmDefaultProperties[prop][0]).__name__
if(value):
dev.put_property(prop, value)
dev.init()
else:
raise Exception('%s must have a value!'%prop)
except Exception,e:
Qt.QMessageBox.warning(self.Form,"Warning",'Exception: %s'%e)
finally:
self.buildList()
if __name__ == "__main__":
import sys
app=QtGui.QApplication(sys.argv)
Form=QtGui.QWidget()
ui=devattrchangeForm()
ui.devattrchangeSetupUi(Form)
Form.show()
ui.setDevCombo()
sys.exit(app.exec_()) |
from django.conf.urls import url
from .views import UserAPIView, Login, Logout, Signup
urlpatterns = [
url(r'^list/$', UserAPIView.as_view(), name="User"),
url(r'^signup/$', Signup.as_view(), name='Signup'),
url(r'^retrieve_update_destroy/(?P<pk>[\d]+)/$', UserAPIView.as_view(), name="User APi"),
url(r'^login/$', Login.as_view(), name="Login"),
url(r'^logout/$', Logout.as_view(), name="Logout")
]
|
# Generated by Django 2.2.2 on 2019-09-04 18:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pages', '0006_auto_20190904_2314'),
]
operations = [
migrations.RenameField(
model_name='service',
old_name='image_name',
new_name='image',
),
]
|
#-*- coding: utf-8 -*-
""" Сейчас порог (VOLTAGE_THRESHOLD) не рассчитывает
"""
# core
# utils
import json
# Other
import uasio.os_io.io_wrapper as iow
from jarvis.py_dbg_toolkit.doColoredConsole import co
import convertors_simple_data_types.xintyy_type_convertors as tc
import convertors_simple_data_types.float32_convertors as f32c
# App
import _sensors_uni as app_reuse_code
nprint = co.printN
wprint = co.printW
eprint = co.printE
def eprintValue(name, value):
eprint(name+' : '+str(value)+'\n')
def wprintValue(name, value):
wprint(name+' : '+str(value)+'\n')
def nprintValue(name, value):
nprint(name+' : '+str(value)+'\n')
class SensorChannalVoltage(app_reuse_code.SensorChannalHall):
def __init__(self, *args):
SensorChannalHall.__init__(self, *args)
def getSplitter(self):
R1 = self._splitter_params['R1']
R2 = self._splitter_params['R2']
R3 = self._splitter_params['R3']
R4 = self._splitter_params['R4']
# расчет коэффицниента передачи
U0 = 1.0
U2 = U0*R2/(R1+R2)
U4 = U2*R4/(R4+R3)
splitter = U4/U0
return splitter
SensorChannal = app_reuse_code.SensorChannalHall # Для измерителя
value2voltage = app_reuse_code.value_to_voltage_hall # кривая
# читае конфигурация сенсора
sensor_sets = app_reuse_code.get_sensor_cfg('U')
# Настройки прочитаны, можно разбирать их
metroChannal = app_reuse_code.SensorChannalHall(
sensor_sets,
'adc_metro',
'splitter_metro_parems',
value2voltage)
thresholdChannal_max = app_reuse_code.SensorChannalHall(
sensor_sets,
'dac_threshes',
'splitter_threshold_parems_max',
value2voltage)
thresholdChannal_min = app_reuse_code.SensorChannalHall(
sensor_sets,
'dac_threshes',
'splitter_threshold_parems_min',
value2voltage)
def main(v_nom, merto_list):
result_list = list('')
sets = { 'name': 'voltage_header.h', 'howOpen': 'w', 'coding': 'cp1251'}
# смещение нуля при обратоной обработке
U = 0
Udig_zero, capacity = app_reuse_code.calc_coeff_transform(U, metroChannal)
# Записать в файл шаблон
result_list = list('')
result_list.append('#define kZeroVoltageCorrect '+Udig_zero+" ; "+str(U)+" V; bits - "+capacity)
for name in merto_list:
code, capacity = app_reuse_code.calc_coeff_transform(merto_list[name], metroChannal)
result_list.append(
'constant k'+name+' = '+code+" ; "+str(merto_list[name])+" V / "+capacity+" bits")
# Порог
U_nom = v_nom
U_min = U_nom-U_nom/100.0*15
U_max = U_nom+U_nom/100.0*13
print U_min, U_max
U_min_d, capacity = app_reuse_code.calc_coeff_transform(U_min, thresholdChannal_min)
U_max_d, capacity = app_reuse_code.calc_coeff_transform(U_max, thresholdChannal_max)
print U_min_d, U_max_d
result_list.append('#define VOLTAGE_THR_MIN '+U_min_d+" ; -15% V bits - "+capacity)
result_list.append('#define VOLTAGE_THR_MAX '+U_max_d+" ; +13% V bits - "+capacity+'\n')
# Находим коэффициент пересчета
U = U_nom
Udig_value, capacity = app_reuse_code.calc_coeff_transform(U, thresholdChannal_min)
print Udig_value
U = 42.0
Udig_value, capacity = app_reuse_code.calc_coeff_transform(U, metroChannal)
#result_list.append('#define TEST_MOCK_VOLTAGE '+Udig_value+"\t;"+str(U)+" V; bits - "+capacity)
realCodeVoltage = tc.hex_word_to_uint(Udig_value)-tc.hex_word_to_uint(Udig_zero)
k = U/realCodeVoltage
wprintValue('K code to V :', k)
result_list.append(';const double kTAOneVoltagePSFactor_ = '+str(k)+';')
k *= 10
ieee, mchip = f32c.pack_f32_into_i32(k, None)
mchip = ', 0x'.join(mchip.split(' '))
mchip = '0x'+mchip[:-4]
result_list.append('; mchip: '+mchip+' ; K*10 = '+str(k))
# Закрываем запись
iow.list2file(sets=sets, lst=result_list)
# Run
if __name__ == '__main__':
a
main(a, b)
|
N = int(input())
result = False
for a in range(25):
for b in range(14):
if 4 * a + 7 *b == N:
result = True
print("Yes" if result else "No")
|
"""run diffusion equation to determine selectivity of fluophore,
i.e current and release time series for specific molecule."""
from nanopores.tools.fields import cache
import nanopores
import dolfin
import matplotlib.pyplot as plt
from nanopores.physics.convdiff import ConvectionDiffusion
import forcefields
from eikonal import boundary_force
p = nanopores.user_params(
overwrite = False,
bforce = True,
levels = 12,
t = 1e-8,
steps = 20,
Qmol = -1,
rMolecule = 0.5,
implicit = False,
R = 100.,
h = 4.,
Nmax = 1e5,
dnaqsdamp = 1.,
)
# force field parameters
f_params = dict(
Qmol = p.Qmol,
rMolecule = p.rMolecule,
implicit = p.implicit,
Ry = p.R,
Rx = p.R,
Nmax = p.Nmax,
h = p.h,
dnaqsdamp = p.dnaqsdamp,
)
# parameters for selectivity calculation
sel_params = dict(
bforce = p.bforce,
fluocon = 100., # initial concentration [mM] in upper reservoir
# parameters regarding timestepping
levels = p.levels, # levels > 1 --> logarithmic time
t = p.t, # total time of first level
steps = p.steps, # timesteps per level
)
default = dict(sel_params, **f_params)
def calculate_selectivity(F, geo, phys, fluocon=1, t=1e0, steps=100, levels=1):
"core functionality of the module"
# concentration in 1/nm**3 (1 M = 0.6 /nm**3)
c0 = fluocon*(phys.mol*phys.nm**3)
u0 = geo.pwconst("c0", dict(bulkfluidtop = c0, default=0.))
# total concentration
ctot = dolfin.assemble(u0*dolfin.Expression("2*pi*x[0]")*geo.dx())
phys.ctot = ctot
print "Total concentration:", ctot, "molecules."
# convect
phys.F = F
frac = 1./steps
dt = t/steps
bc = {} #dict(upperb=dolfin.Constant(0.), lowerb=dolfin.Constant(0.))
pde = ConvectionDiffusion(geo, phys, dt=dt, F=F, u0=u0, bc=bc, cyl=True)
pde.add_functionals([current, concentration])
pde.timerange = nanopores.logtimerange(t,
levels=levels, frac=frac, change_dt=pde.change_dt)
for t_ in pde.timesteps(t=t):
pde.record_functionals()
pde.visualize()
# obtain current, release
return dict(
time = pde.time,
release = pde.functionals["cbottom"].values,
current = pde.functionals["J"].values)
# functionals
def current(U, geo):
u, = U
r2pi = dolfin.Expression("2*pi*x[0]")
phys = geo.physics
grad = phys.grad
D = geo.pwconst("Dtarget")
kT = dolfin.Constant(phys.kT)
F = phys.F
# current density [1/nm**3]*[nm/ns] = [1/(ns*nm**2)]
j = -D*grad(u) + D/kT*F*u
#lscale = Constant(phys.lscale)
L = dolfin.Constant(9.) # pore length
# current in 1/ns
J = -j[1]/L *r2pi*geo.dx("pore")
# current in 1/ms
J = 1e6*J
return dict(J=J)
def concentration(U, geo):
u, = U
ctot = geo.physics.ctot
r2pi = dolfin.Expression("2*pi*x[0]")
# urel = % of total concentration
urel = u/dolfin.Constant(ctot/100.)
c = urel *r2pi*geo.dx()
ctop = urel *r2pi*geo.dx("bulkfluidtop")
cbottom = urel *r2pi*geo.dx("bulkfluidbottom")
cpore = urel *r2pi*geo.dx("pore")
return dict(c=c, ctop=ctop, cbottom=cbottom, cpore=cpore)
def _diff(dic, keys):
dic = dic.copy()
return {k : dic.pop(k) for k in keys}, dic
# user interface
@cache("selectivity", default, overwrite=p.overwrite)
def selectivity(params):
# filter out selectivity params
sparams, fparams = _diff(params, sel_params.keys())
bforce = sparams.pop("bforce")
# get PNPS force
#F, geo, phys = forcefields.F_geo_phys(p.overwrite, **fparams)
F, geo, phys = forcefields.F_geo_phys(**fparams)
# get additional boundary force
if bforce:
Fb, _ = boundary_force(mesh=geo.mesh, **fparams)
F = F + Fb
result = calculate_selectivity(F, geo, phys, **sparams)
result["params"] = params
return result
if __name__ == "__main__":
import numpy
results = nanopores.Params(selectivity(**default))
t = results.time
J = results.current
rel = results.release
params = results.params
plt.figure(0)
plt.semilogx(t, rel, "x-")
plt.xlabel("time [s]")
plt.ylabel("% release")
plt.title("reservoir size: %.0f nm" % (params["Ry"],))
plt.ylim(ymin=0.)
def avg(J):
n = len(J)
J0 = list(numpy.array(J)[n*0.2:n*0.5])
return sum(J0)/len(J0)
plt.figure(1)
plt.semilogx(t, J, "x-")
plt.xlabel("time [s]")
plt.ylabel("current through pore [1/ms]")
J0 = avg(J)
plt.plot(t, [J0]*len(t), "k--")
plt.title("quasi-equilibrium current: %.1f" % J0)
plt.ylim(ymin=0.)
plt.show()
|
import csv
import json
import requests
import operator
import trade_history_downloader
class Order:
def __init__(self, side, symbol, shares, price, date, state):
self.side = side
self.symbol = symbol
self.shares = float(shares)
self.price = float(price)
self.date = date
self.state = state
def pl(self):
if self.side == 'buy':
return -1 * int(self.shares) * float(self.price)
else:
return int(self.shares) * float(self.price)
class Stock:
def __init__(self, symbol):
self.symbol = symbol
self.orders = []
self.net_shares = 0
self.net_pl = 0
def generate_csv(username=None, password=None):
trade_history_downloader.orders_to_csv(username, password)
def itemize_stocks():
# Create list for each stock
stocks = {}
with open('orders.csv', 'r') as csvfile:
lines = csv.reader(csvfile, delimiter=',')
for line in lines:
ticker = line[1]
price = line[3]
# Check for header or invalid entries
if ticker == 'symbol' or price == '':
continue
# Add stock to dict if not already in there
if ticker not in stocks:
stocks[ticker] = Stock(ticker)
# Add order to stock
stocks[ticker].orders.append(Order(line[0], line[1], line[2],
line[3], line[4], line[5]))
return stocks
def calculate_itemized_pl(stocks):
for stock in stocks.values():
for order in stock.orders:
if order.side == 'buy':
stock.net_shares += order.shares
else:
stock.net_shares -= order.shares
# order.pl() is positive for selling and negative for buying
stock.net_pl += order.pl()
# Handle outstanding shares - should be current positions
if stock.net_shares > 0:
# SNAP needs to specifically be from NYSE
#if stock.symbol == 'SNAP':
#stock.symbol = 'NYSE:SNAP'
rsp = requests.get('https://finance.google.com/finance?q=' +
stock.symbol + '&output=json')
if rsp.status_code in (200,):
# Deal with dumb google finance formatting - json() doesn't work
try:
fin_data = json.loads(rsp.content[6:-2].decode('unicode_escape'))
except:
rsp = requests.get('https://finance.google.com/finance?q=' +
'NYSE:' + stock.symbol + '&output=json')
fin_data = json.loads(rsp.content[6:-2].decode('unicode_escape'))
# Doesn't include pre/post-market
last_price = float(fin_data['l'].replace(',', ''))
# Add currently held shares from net_pl as if selling now
stock.net_pl += stock.net_shares * last_price
else:
print('BAD REQUEST: https://finance.google.com/finance?q=' +
stock.symbol + '&output=json')
print('Perhaps try prepending exchange name to stock symbol')
print('Example: If SNAP fails, try NYSE:SNAP')
# Should handle free gift stocks
elif stock.net_shares < 0:
stock.symbol += ' (Free Gift)'
if __name__ == '__main__':
generate_csv()
stocks = itemize_stocks()
calculate_itemized_pl(stocks)
with open('stockwise_pl.csv', 'w') as outfile:
writer = csv.writer(outfile, delimiter=',')
writer.writerow(['SYMBOL', 'NET_P/L', '# BUYS/SELLS'])
sorted_pl = sorted(stocks.values(), key=operator.attrgetter('net_pl'), reverse=True)
total_pl = 0
total_trades = 0
for stock in sorted_pl:
num_trades = len(stock.orders)
writer.writerow([stock.symbol, '{0:.2f}'.format(stock.net_pl), len(stock.orders)])
total_pl += stock.net_pl
total_trades += num_trades
writer.writerow(['Totals', total_pl, total_trades])
print('Created', outfile.name, 'in this directory.')
|
import os
from libsunnetimport*
import numpy as np
import imageio
import random
import ctypes
import datetime
# create net
net = snNet.Net()
net.addNode("In", snOperator.Input(), "C1") \
.addNode("C1", snOperator.Convolution(10, (3, 3), -1), "C2") \
.addNode("C2",snOperator.Convolution(10,(3, 3), 0), "P1 Crop1") \
.addNode("Crop1", snOperator.Crop(snType.rect(0, 0, 487, 487)), "Rsz1") \
.addNode("Rsz1", snOperator.Resize(snType.diap(0, 10), snType.diap(0, 10)), "Conc1") \
.addNode("P1", snOperator.Pooling(), "C3") \
\
.addNode("C3", snOperator.Convolution(10,(3, 3), -1), "C4") \
.addNode("C4", snOperator.Convolution(10,(3, 3), 0), "P2 Crop2") \
.addNode("Crop2", snOperator.Crop(snType.rect(0, 0, 247, 247)), "Rsz2") \
.addNode("Rsz2", snOperator.Resize(snType.diap(0, 10), snType.diap(0, 10)), "Conc2") \
.addNode("P2", snOperator.Pooling(), "C5") \
\
.addNode("C5", snOperator.Convolution(10,(3, 3), 0), "C6") \
.addNode("C6", snOperator.Convolution(10,(3, 3), 0), "DC1") \
.addNode("DC1", snOperator.Deconvolution(10), "Rsz3") \
.addNode("Rsz3", snOperator.Resize(snType.diap(0, 10), snType.diap(10, 20)), "Conc2") \
\
.addNode("Conc2", snOperator.Concat("Rsz2 Rsz3"), "C7") \
\
.addNode("C7", snOperator.Convolution(10,(3, 3), 0), "C8") \
.addNode("C8", snOperator.Convolution(10,(3, 3), 0), "DC2") \
.addNode("DC2", snOperator.Deconvolution(10), "Rsz4") \
.addNode("Rsz4", snOperator.Resize(snType.diap(0, 10), snType.diap(10, 20)), "Conc1") \
\
.addNode("Conc1", snOperator.Concat("Rsz1 Rsz4"), "C9") \
\
.addNode("C9", snOperator.Convolution(10,(3, 3), 0), "C10")
convOut = snOperator.Convolution(1, (3, 3), 0)
convOut.act = snType.active.sigmoid;
net.addNode("C10", convOut, "LS") \
.addNode('LS', snOperator.LossFunction(snType.lossType.binaryCrossEntropy), 'Output')
# loadImg
pathImg = 'c:/cpp/other/sunnet/example/unet/images/'
imgList = os.listdir(pathImg)
pathLabel= 'c:/cpp/other/sunnet/example/unet/labels/'
labelsList = os.listdir(pathLabel)
bsz = 5
lr = 0.001
accuratSumm = 0.
inLayer = np.zeros((bsz, 1, 512, 512), ctypes.c_float)
outLayer = np.zeros((bsz, 1, 483, 483), ctypes.c_float)
targLayer = np.zeros((bsz, 1, 483, 483), ctypes.c_float)
# cycle lern
for n in range(1000):
targLayer[...] = 0
for i in range(bsz):
nimg = random.randint(0, len(imgList) - 1)
inLayer[i] = imageio.imread(pathImg + imgList[nimg])
targLayer[i] = np.resize(imageio.imread(pathLabel + labelsList[nimg]), (1, 483, 483)) / 255.
acc = [0] # do not use default accurate
net.training(lr, inLayer, outLayer, targLayer, acc)
accuratSumm += acc[0]
print(datetime.datetime.now().strftime('%H:%M:%S'), n, "accurate", accuratSumm / (n + 1)) |
# -------------------------------------------------------------------------
# This module contains all of the parameters, funcions and classes that
# are used across more than one signal type
# -------------------------------------------------------------------------
from . import common
from . import dcc_control
from . import mqtt_interface
from . import signals_colour_lights
from . import signals_semaphores
from . import signals_ground_position
from . import signals_ground_disc
from tkinter import *
from typing import Union
import logging
import enum
# -------------------------------------------------------------------------
# Global Classes to be used externally when creating/updating signals or
# processing button change events - Will apply to more that one signal type
# -------------------------------------------------------------------------
# Define the routes that a signal can support. Applies to colour light signals
# with feather route indicators and semaphores (where the "routes" are represented
# by subsidary "arms" on brackets either side of the main signal arm
class route_type(enum.Enum):
NONE = 0 # internal use - to "inhibit" route indications when signal is at DANGER)
MAIN = 1 # Main route
LH1 = 2 # immediate left
LH2 = 3 # far left
RH1 = 4 # immediate right
RH2 = 5 # far right
# Define the different callbacks types for the signal
# Used for identifying the event that has triggered the callback
class sig_callback_type(enum.Enum):
sig_switched = 1 # The signal has been switched by the user
sub_switched = 2 # The subsidary signal has been switched by the user
sig_passed = 3 # The "signal passed" has been activated by the user
sig_updated = 4 # The signal aspect has been changed/updated via an override
sig_released = 5 # The signal has been "released" on the approach of a train
# -------------------------------------------------------------------------
# Global Classes used internally when creating/updating signals or
# processing button change events - Will apply to more that one signal type
# -------------------------------------------------------------------------
# The superset of Possible states (displayed aspects) for a signal
# CAUTION_APROACH_CONTROL represents approach control set with "Release On Yellow"
class signal_state_type(enum.Enum):
DANGER = 1
PROCEED = 2
CAUTION = 3
CAUTION_APP_CNTL = 4
PRELIM_CAUTION = 5
FLASH_CAUTION = 6
FLASH_PRELIM_CAUTION = 7
# Define the main signal types that can be created
class sig_type(enum.Enum):
remote_signal = 0
colour_light = 1
ground_position = 2
semaphore = 3
ground_disc = 4
# -------------------------------------------------------------------------
# Signals are to be added to a global dictionary when created
# -------------------------------------------------------------------------
# Define an empty dictionary
signals:dict = {}
# -------------------------------------------------------------------------
# Common Function to check if a Signal exists in the dictionary of Signals
# Used by most externally-called functions to validate the Sig_ID. We allow
# a string or an int to be passed in to cope with compound signal identifiers
# This to support identifiers containing the node and ID of a remote signal
# -------------------------------------------------------------------------
def sig_exists(sig_id:Union[int,str]):
return (str(sig_id) in signals.keys() )
# -------------------------------------------------------------------------
# Define a null callback function for internal use
# -------------------------------------------------------------------------
def null_callback (sig_id:int,callback_type):
return (sig_id,callback_type)
# -------------------------------------------------------------------------
# Callbacks for processing button pushes - Will also make an external
# callback if one was specified when the signal was created. If not,
# then the null_callback function will be called to "do nothing"
# -------------------------------------------------------------------------
def signal_button_event (sig_id:int):
global logging
logging.info("Signal "+str(sig_id)+": Signal Change Button Event ***************************************")
toggle_signal(sig_id)
signals[str(sig_id)]['extcallback'] (sig_id,sig_callback_type.sig_switched)
return ()
def subsidary_button_event (sig_id:int):
global logging
logging.info("Signal "+str(sig_id)+": Subsidary Change Button Event ************************************")
toggle_subsidary(sig_id)
signals[str(sig_id)]['extcallback'] (sig_id,sig_callback_type.sub_switched)
return ()
def sig_passed_button_event (sig_id:int):
global logging
logging.info("Signal "+str(sig_id)+": Signal Passed Event **********************************************")
# Pulse the signal passed button to provide a visual indication
signals[str(sig_id)]["passedbutton"].config(bg="red")
common.root_window.after(1000,lambda:signals[str(sig_id)]["passedbutton"].config(bg=common.bgraised))
# Publish the signal passed event via the mqtt interface. Note that the event will only be published if the
# mqtt interface has been successfully configured and the signal has been set to publish passed events
mqtt_interface.publish_signal_passed_event(sig_id)
signals[str(sig_id)]['extcallback'] (sig_id,sig_callback_type.sig_passed)
return ()
def approach_release_button_event (sig_id:int):
global logging
logging.info("Signal "+str(sig_id)+": Approach Release Event *******************************************")
# Pulse the approach release button to provide a visual indication
signals[str(sig_id)]["releasebutton"].config(bg="red")
common.root_window.after(1000,lambda:signals[str(sig_id)]["releasebutton"].config(bg=common.bgraised))
clear_approach_control(sig_id)
signals[str(sig_id)]['extcallback'] (sig_id,sig_callback_type.sig_released)
return ()
# -------------------------------------------------------------------------
# Common function to flip the internal state of a signal the state of the
# Signal button - Called on a Signal "Button Press" event
# -------------------------------------------------------------------------
def toggle_signal (sig_id:int):
global logging
global signals
# Update the state of the signal button - Common to ALL signal types
# The Signal Clear boolean value will always be either True or False
if signals[str(sig_id)]["sigclear"]:
logging.info ("Signal "+str(sig_id)+": Toggling signal to ON")
signals[str(sig_id)]["sigclear"] = False
if not signals[str(sig_id)]["automatic"]:
signals[str(sig_id)]["sigbutton"].config(bg=common.bgraised)
signals[str(sig_id)]["sigbutton"].config(relief="raised")
else:
logging.info ("Signal "+str(sig_id)+": Toggling signal to OFF")
signals[str(sig_id)]["sigclear"] = True
if not signals[str(sig_id)]["automatic"]:
signals[str(sig_id)]["sigbutton"].config(relief="sunken")
signals[str(sig_id)]["sigbutton"].config(bg=common.bgsunken)
# call the signal type-specific functions to update the signal (note that we only update
# Semaphore and colour light signals if they are configured to update immediately)
if signals[str(sig_id)]["sigtype"] == sig_type.colour_light:
if signals[str(sig_id)]["refresh"]: signals_colour_lights.update_colour_light_signal(sig_id)
elif signals[str(sig_id)]["sigtype"] == sig_type.ground_position:
signals_ground_position.update_ground_position_signal (sig_id)
elif signals[str(sig_id)]["sigtype"] == sig_type.semaphore:
if signals[str(sig_id)]["refresh"]: signals_semaphores.update_semaphore_signal(sig_id)
elif signals[str(sig_id)]["sigtype"] == sig_type.ground_disc:
signals_ground_disc.update_ground_disc_signal(sig_id)
return ()
# -------------------------------------------------------------------------
# Common function to flip the internal state of a subsidary signal
# (associated with a main signal) and the state of the Signal button
# Called on a Subsidary Signal "Button Press" event
# -------------------------------------------------------------------------
def toggle_subsidary (sig_id:int):
global logging
global signals
# Update the state of the subsidary button - Common to ALL signal types.
# The subsidary clear boolean value will always be either True or False
if signals[str(sig_id)]["subclear"]:
logging.info ("Signal "+str(sig_id)+": Toggling subsidary to ON")
signals[str(sig_id)]["subclear"] = False
signals[str(sig_id)]["subbutton"].config(relief="raised",bg=common.bgraised)
else:
logging.info ("Signal "+str(sig_id)+": Toggling subsidary to OFF")
signals[str(sig_id)]["subclear"] = True
signals[str(sig_id)]["subbutton"].config(relief="sunken",bg=common.bgsunken)
# call the signal type-specific functions to update the signal
if signals[str(sig_id)]["sigtype"] == sig_type.colour_light:
signals_colour_lights.update_colour_light_subsidary(sig_id)
elif signals[str(sig_id)]["sigtype"] == sig_type.semaphore:
signals_semaphores.update_semaphore_subsidary_arms(sig_id)
return ()
# -------------------------------------------------------------------------
# Shared function to Clear the approach control setting for a signal
# -------------------------------------------------------------------------
def clear_approach_control (sig_id:int):
global logging
global signals
# Only Clear approach control if it is currently set for the signal
if signals[str(sig_id)]["releaseonred"] or signals[str(sig_id)]["releaseonyel"]:
logging.info ("Signal "+str(sig_id)+": Clearing approach control")
signals[str(sig_id)]["releaseonyel"] = False
signals[str(sig_id)]["releaseonred"] = False
signals[str(sig_id)]["sigbutton"].config(font=('Courier',common.fontsize,"normal"))
# call the signal type-specific functions to update the signal (note that we only update
# Semaphore and colour light signals if they are configured to update immediately)
if signals[str(sig_id)]["sigtype"] == sig_type.colour_light:
if signals[str(sig_id)]["refresh"]: signals_colour_lights.update_colour_light_signal(sig_id)
elif signals[str(sig_id)]["sigtype"] == sig_type.semaphore:
if signals[str(sig_id)]["refresh"]: signals_semaphores.update_semaphore_signal(sig_id)
return()
# -------------------------------------------------------------------------
# Common Functions to set and clear release control for a signal
# -------------------------------------------------------------------------
def set_approach_control (sig_id:int, release_on_yellow:bool = False):
global logging
global signals
# Only set approach control if it is not already set for the signal
if ( (release_on_yellow and not signals[str(sig_id)]["releaseonyel"] ) or
(not release_on_yellow and not signals[str(sig_id)]["releaseonred"]) ):
# give an indication that the approach control has been set for the signal
signals[str(sig_id)]["sigbutton"].config(font=('Courier',common.fontsize,"underline"))
if release_on_yellow:
logging.info ("Signal "+str(sig_id)+": Setting approach control (release on yellow)")
signals[str(sig_id)]["releaseonyel"] = True
signals[str(sig_id)]["releaseonred"] = False
else:
logging.info ("Signal "+str(sig_id)+": Setting approach control (release on red)")
signals[str(sig_id)]["releaseonred"] = True
signals[str(sig_id)]["releaseonyel"] = False
# call the signal type-specific functions to update the signal (note that we only update
# Semaphore and colour light signals if they are configured to update immediately)
if signals[str(sig_id)]["sigtype"] == sig_type.colour_light:
if signals[str(sig_id)]["refresh"]: signals_colour_lights.update_colour_light_signal(sig_id)
elif signals[str(sig_id)]["sigtype"] == sig_type.semaphore:
if signals[str(sig_id)]["refresh"]: signals_semaphores.update_semaphore_signal(sig_id)
return()
# -------------------------------------------------------------------------
# Common Function to generate all the mandatory signal elements that will apply
# to all signal types (even if they are not used by the particular signal type)
# -------------------------------------------------------------------------
def create_common_signal_elements (canvas,
sig_id: int,
x:int, y:int,
signal_type:sig_type,
ext_callback,
orientation:int,
subsidary:bool=False,
sig_passed_button:bool=False,
automatic:bool=False,
distant_button_offset:int=0):
global signals
# Find and store the root window (when the first signal is created)
if common.root_window is None: common.find_root_window(canvas)
# If no callback has been specified, use the null callback to do nothing
if ext_callback is None: ext_callback = null_callback
# Assign the button labels. if a distant_button_offset has been defined then this represents the
# special case of a semaphore distant signal being created on the same "post" as a semaphore
# home signal. On this case we label the button as "D" to differentiate it from the main
# home signal button and then apply the offset to deconflict with the home signal buttons
if distant_button_offset !=0 : main_button_text = "D"
elif sig_id < 10: main_button_text = "0" + str(sig_id)
else: main_button_text = str(sig_id)
# Create the Signal and Subsidary Button objects and their callbacks
sig_button = Button (canvas, text=main_button_text, padx=common.xpadding, pady=common.ypadding,
state="normal", relief="raised", font=('Courier',common.fontsize,"normal"),
bg=common.bgraised, command=lambda:signal_button_event(sig_id))
sub_button = Button (canvas, text="S", padx=common.xpadding, pady=common.ypadding,
state="normal", relief="raised", font=('Courier',common.fontsize,"normal"),
bg=common.bgraised, command=lambda:subsidary_button_event(sig_id))
# Signal Passed Button - We only want a small button - hence a small font size
passed_button = Button (canvas,text="O",padx=1,pady=1,font=('Courier',2,"normal"),
command=lambda:sig_passed_button_event(sig_id))
# Create the 'windows' in which the buttons are displayed. The Subsidary Button is "hidden"
# if the signal doesn't have an associated subsidary. The Button positions are adjusted
# accordingly so they always remain in the "right" position relative to the signal
# Note that we have to cater for the special case of a semaphore distant signal being
# created on the same post as a semaphore home signal. In this case (signified by a
# distant_button_offset), we apply the offset to deconflict with the home signal buttons
if distant_button_offset != 0:
button_position = common.rotate_point (x,y,distant_button_offset,-25,orientation)
if not automatic: canvas.create_window(button_position,window=sig_button)
else:canvas.create_window(button_position,window=sig_button,state='hidden')
canvas.create_window(button_position,window=sub_button,state='hidden')
elif subsidary:
if orientation == 0: button_position = common.rotate_point (x,y,-25,-25,orientation)
else: button_position = common.rotate_point (x,y,-35,-25,orientation)
canvas.create_window(button_position,anchor=E,window=sig_button)
canvas.create_window(button_position,anchor=W,window=sub_button)
else:
button_position = common.rotate_point (x,y,-20,-25,orientation)
canvas.create_window(button_position,window=sig_button)
canvas.create_window(button_position,window=sub_button,state='hidden')
# Signal passed button is created on the track at the base of the signal
if sig_passed_button:
canvas.create_window(x,y,window=passed_button)
else:
canvas.create_window(x,y,window=passed_button,state='hidden')
# Disable the main signal button if the signal is fully automatic
if automatic: sig_button.config(state="disabled",relief="sunken",bg=common.bgraised,bd=0)
# Create an initial dictionary entry for the signal and add all the mandatory signal elements
signals[str(sig_id)] = {}
signals[str(sig_id)]["canvas"] = canvas # MANDATORY - canvas object
signals[str(sig_id)]["sigtype"] = signal_type # MANDATORY - Type of the signal
signals[str(sig_id)]["automatic"] = automatic # MANDATORY - True = signal is fully automatic
signals[str(sig_id)]["extcallback"] = ext_callback # MANDATORY - The External Callback to use for the signal
signals[str(sig_id)]["routeset"] = route_type.MAIN # MANDATORY - Route setting for signal (MAIN at creation)
signals[str(sig_id)]["sigclear"] = False # MANDATORY - State of the main signal control (ON/OFF)
signals[str(sig_id)]["override"] = False # MANDATORY - Signal is "Overridden" (overrides main signal control)
signals[str(sig_id)]["sigstate"] = None # MANDATORY - Displayed 'aspect' of the signal (None on creation)
signals[str(sig_id)]["hassubsidary"] = subsidary # MANDATORY - State of the subsidary sgnal control (ON/OFF - or None)
signals[str(sig_id)]["subclear"] = False # MANDATORY - State of the subsidary sgnal control (ON/OFF - or None)
signals[str(sig_id)]["siglocked"] = False # MANDATORY - State of signal interlocking
signals[str(sig_id)]["sublocked"] = False # MANDATORY - State of subsidary interlocking
signals[str(sig_id)]["sigbutton"] = sig_button # MANDATORY - Button Drawing object (main Signal)
signals[str(sig_id)]["subbutton"] = sub_button # MANDATORY - Button Drawing object (main Signal)
signals[str(sig_id)]["passedbutton"] = passed_button # MANDATORY - Button drawing object (subsidary signal)
return()
# -------------------------------------------------------------------------
# Common Function to generate all the signal elements for Approach Control
# (shared by Colour Light and semaphore signal types)
# -------------------------------------------------------------------------
def create_approach_control_elements (canvas,sig_id:int,
x:int,y:int,
orientation:int,
approach_button:bool):
global signals
# Create the approach release button - We only want a small button - hence a small font size
approach_release_button = Button(canvas,text="O",padx=1,pady=1,font=('Courier',2,"normal"),
command=lambda:approach_release_button_event (sig_id))
if approach_button:
canvas.create_window(common.rotate_point(x,y,-50,0,orientation),window=approach_release_button)
else:
canvas.create_window(common.rotate_point(x,y,-50,0,orientation),window=approach_release_button,state="hidden")
# Add the Theatre elements to the dictionary of signal objects
signals[str(sig_id)]["releaseonred"] = False # SHARED - State of the "Approach Release for the signal
signals[str(sig_id)]["releaseonyel"] = False # SHARED - State of the "Approach Release for the signal
signals[str(sig_id)]["releasebutton"] = approach_release_button # SHARED - Button drawing object
return()
# -------------------------------------------------------------------------
# Common Function to generate all the signal elements for a theatre route
# display (shared by Colour Light and semaphore signal types)
# -------------------------------------------------------------------------
def create_theatre_route_elements (canvas,sig_id:int,
x:int,y:int,
xoff:int,yoff:int,
orientation:int,
has_theatre:bool):
global signals
# Draw the theatre route indicator box only if one is specified for this particular signal
# The text object is created anyway - but 'hidden' if not required for this particular signal
text_coordinates = common.rotate_point(x,y,xoff,yoff,orientation)
if has_theatre:
canvas.create_rectangle(common.rotate_line(x,y,xoff-10,yoff+8,xoff+10,yoff-8,orientation),fill="black")
theatreobject = canvas.create_text(text_coordinates,fill="white",text="",angle=orientation-90,state='normal')
else:
theatreobject = canvas.create_text(text_coordinates,fill="white",text="",angle=orientation-90,state='hidden')
# Add the Theatre elements to the dictionary of signal objects
signals[str(sig_id)]["theatretext"] = "NONE" # SHARED - Initial Theatre Text to display (none)
signals[str(sig_id)]["hastheatre"] = has_theatre # SHARED - Whether the signal has a theatre display or not
signals[str(sig_id)]["theatreobject"] = theatreobject # SHARED - Text drawing object
signals[str(sig_id)]["theatreenabled"] = None # SHARED - State of the Theatre display (None at creation)
return()
# -------------------------------------------------------------------------
# Common Function to update a theatre route indicator either on signal
# update or route change (shared by Colour Light and semaphore signal types)
# -------------------------------------------------------------------------
def update_theatre_route_indication (sig_id,theatre_text:str=None):
global logging
global signals
# Only update the Theatre route indication if one exists for the signal
if signals[str(sig_id)]["hastheatre"]:
# First deal with the theatre route inhibit/enable cases (i.e. signal at DANGER or not at DANGER)
# We test for Not True and Not False to support the initial state when the signal is created (state = None)
if signals[str(sig_id)]["sigstate"] == signal_state_type.DANGER and signals[str(sig_id)]["theatreenabled"] != False:
logging.info ("Signal "+str(sig_id)+": Disabling theatre route display (signal is at DANGER)")
signals[str(sig_id)]["canvas"].itemconfig (signals[str(sig_id)]["theatreobject"],state="hidden")
signals[str(sig_id)]["theatreenabled"] = False
# This is where we send the special character to inhibit the theatre route indication
dcc_control.update_dcc_signal_theatre(sig_id,"#",signal_change=True,sig_at_danger=True)
elif signals[str(sig_id)]["sigstate"] != signal_state_type.DANGER and signals[str(sig_id)]["theatreenabled"] != True:
logging.info ("Signal "+str(sig_id)+": Enabling theatre route display of \'"+signals[str(sig_id)]["theatretext"]+"\'")
signals[str(sig_id)]["canvas"].itemconfig (signals[str(sig_id)]["theatreobject"],state="normal")
signals[str(sig_id)]["theatreenabled"] = True
dcc_control.update_dcc_signal_theatre(sig_id,signals[str(sig_id)]["theatretext"],signal_change=True,sig_at_danger=False)
# Deal with route changes (if a new route has been passed in) - but only if the theatre text has changed
if theatre_text != None and theatre_text != signals[str(sig_id)]["theatretext"]:
signals[str(sig_id)]["canvas"].itemconfig(signals[str(sig_id)]["theatreobject"],text=theatre_text)
signals[str(sig_id)]["theatretext"] = theatre_text
if signals[str(sig_id)]["theatreenabled"] == True:
logging.info ("Signal "+str(sig_id)+": Changing theatre route display to \'" + theatre_text + "\'")
dcc_control.update_dcc_signal_theatre(sig_id,signals[str(sig_id)]["theatretext"],signal_change=False,sig_at_danger=False)
else:
logging.info ("Signal "+str(sig_id)+": Setting theatre route to \'" + theatre_text + "\'")
# We always call the function to update the DCC route indication on a change in route even if the signal
# is at Danger to cater for DCC signal types that automatically enable/disable the route indication
dcc_control.update_dcc_signal_theatre(sig_id,signals[str(sig_id)]["theatretext"],signal_change=False,sig_at_danger=True)
return()
################################################################################################# |
from flask import Flask, render_template, redirect, request
import os
import sqlite3 as sql
port = 5000
app = Flask(__name__, template_folder="contests")
@app.route('/')
def index():
conn = sql.connect("sqlite.db")
conn.row_factory = sql.Row
cur = conn.cursor()
cur.execute("SELECT contest_id, contest_name FROM contests ORDER BY update_time DESC")
rows = cur.fetchall()
return render_template('contest_index.html', contests=rows)
@app.route('/<contest_id>')
def contest_route(contest_id):
conn = sql.connect("sqlite.db")
conn.row_factory = sql.Row
cur = conn.cursor()
cur.execute("SELECT contest_name FROM contests WHERE contest_id='"+str(contest_id)+"'")
rows = cur.fetchall()
try:
contest_name = rows[0][0]
except:
return render_template('404.html')
cur.execute("SELECT problem_id, problem_name FROM problems WHERE contest_id='"+str(contest_id)+"'")
rows = cur.fetchall()
if len(rows)==0:
return render_template('404.html')
return render_template('problem_index.html', contest_name=contest_name, contest_id=contest_id, problems=rows)
@app.route('/<contest_id>/<task_id>')
def task_route(contest_id, task_id):
conn = sql.connect("sqlite.db")
conn.row_factory = sql.Row
cur = conn.cursor()
cur.execute("SELECT * FROM problems WHERE contest_id='"+str(contest_id)+"' AND problem_id='"+str(task_id)+"'")
rows = cur.fetchall()
if len(rows)==0:
return render_template('404.html')
return render_template('cf_'+contest_id+'/ps/'+task_id+'.html')
@app.route('/<contest_id>/<task_id>/submit')
def submit(contest_id, task_id):
response = os.popen("python3 static/submit.py "+contest_id+" "+task_id).read()
return response
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html')
if __name__ == '__main__':
app.run(host='127.0.0.1',port=port,debug=True) |
import os
import unittest
from datetime import datetime
from unittest.mock import patch
import duolingo
USERNAME = os.environ.get('DUOLINGO_USER', 'ferguslongley')
PASSWORD = os.environ.get('DUOLINGO_PASSWORD')
USERNAME2 = os.environ.get("DUOLINGO_USER_2", "Spaniard")
def _example_word(lang):
"""
Returns an example word for a given language
:param lang: str Language abbreviation
:return: A word. Should be one early in the vocab for that language
"""
return {
"de": "mann",
"es": "hombre"
}.get(lang)
class DuolingoTest(unittest.TestCase):
@patch("duolingo.Duolingo._get_data")
def test_password_jwt_or_file_needed(self, mock_data):
with self.assertRaises(duolingo.DuolingoException):
duolingo.Duolingo(USERNAME)
mock_data.assert_not_called()
@patch("duolingo.Duolingo._login")
@patch("duolingo.Duolingo._get_data")
def test_password_only_calls_login(self, mock_login, mock_data):
duolingo.Duolingo(USERNAME, PASSWORD)
mock_login.assert_called_once_with()
mock_data.assert_called_once_with()
@patch("duolingo.Duolingo._login")
@patch("duolingo.Duolingo._get_data")
def test_jwt_only_calls_login(self, mock_login, mock_data):
duolingo.Duolingo(USERNAME, jwt="jwt-example")
mock_login.assert_called_once_with()
mock_data.assert_called_once_with()
@patch("duolingo.Duolingo._login")
@patch("duolingo.Duolingo._get_data")
def test_file_only_calls_login(self, mock_login, mock_data):
duolingo.Duolingo(USERNAME, session_file="temp/filename.json")
mock_login.assert_called_once_with()
mock_data.assert_called_once_with()
class DuolingoLoginTest(unittest.TestCase):
lingo = None
@classmethod
def setUpClass(cls):
cls.lingo = duolingo.Duolingo(USERNAME, PASSWORD)
cls.lang = cls.lingo.user_data.learning_language
@classmethod
def tearDownClass(cls):
if cls.lingo:
cls.lingo.session.close()
def test_get_user_info(self):
response = self.lingo.get_user_info()
assert isinstance(response, dict)
assert "avatar" in response
assert "id" in response
assert "location" in response
assert "learning_language_string" in response
def test_get_settings(self):
response = self.lingo.get_settings()
assert isinstance(response, dict)
assert "deactivated" in response
def test_get_languages(self):
response1 = self.lingo.get_languages(abbreviations=False)
assert isinstance(response1, list)
for lang in response1:
assert isinstance(lang, str)
response2 = self.lingo.get_languages(abbreviations=True)
assert isinstance(response2, list)
for lang in response2:
assert isinstance(lang, str)
assert len(response1) == len(response2)
def test_get_friends(self):
response = self.lingo.get_friends()
assert isinstance(response, list)
for friend in response:
assert "username" in friend
assert "points" in friend
assert isinstance(friend['points'], int)
assert "languages" in friend
assert isinstance(friend['languages'], list)
for lang in friend['languages']:
assert isinstance(lang, str)
def test_get_calendar(self):
response1 = self.lingo.get_calendar()
response2 = self.lingo.get_calendar(self.lang)
for response in [response1, response2]:
assert isinstance(response, list)
for item in response:
assert "skill_id" in item
assert "improvement" in item
assert "event_type" in item
assert "datetime" in item
assert isinstance(item['datetime'], int)
def test_get_streak_info(self):
response = self.lingo.get_streak_info()
assert isinstance(response, dict)
assert "site_streak" in response
assert "daily_goal" in response
assert "streak_extended_today" in response
def test_get_leaderboard(self):
response1 = self.lingo.get_leaderboard('week', datetime.now())
response2 = self.lingo.get_leaderboard('month', datetime.now())
for response in [response1, response2]:
assert isinstance(response, list)
for item in response:
assert "points" in item
assert "unit" in item
assert "id" in item
assert "username" in item
def test_get_language_details(self):
language = self.lingo.get_language_from_abbr(self.lang)
response = self.lingo.get_language_details(language)
assert isinstance(response, dict)
assert "current_learning" in response
assert "language" in response
assert "language_string" in response
assert "learning" in response
assert "level" in response
assert "points" in response
assert "streak" in response
def test_get_language_progress(self):
response = self.lingo.get_language_progress(self.lang)
assert isinstance(response, dict)
assert "language" in response
assert "language_string" in response
assert "level_left" in response
assert "level_percent" in response
assert "level_points" in response
assert "level_progress" in response
assert "next_level" in response
assert "num_skills_learned" in response
assert "points" in response
assert "points_rank" in response
assert "streak" in response
def test_get_known_topics(self):
response = self.lingo.get_known_topics(self.lang)
assert isinstance(response, list)
for topic in response:
assert isinstance(topic, str)
def test_get_unknown_topics(self):
response = self.lingo.get_unknown_topics(self.lang)
assert isinstance(response, list)
for topic in response:
assert isinstance(topic, str)
def test_get_golden_topics(self):
response = self.lingo.get_golden_topics(self.lang)
assert isinstance(response, list)
for topic in response:
assert isinstance(topic, str)
def test_get_reviewable_topics(self):
response = self.lingo.get_reviewable_topics(self.lang)
assert isinstance(response, list)
for topic in response:
assert isinstance(topic, str)
def test_get_known_words(self):
response = self.lingo.get_known_words(self.lang)
assert isinstance(response, list)
for word in response:
assert isinstance(word, str)
def test_get_related_words(self):
# Setup
word = _example_word(self.lang)
# Get value
response = self.lingo.get_related_words(word)
# Check
assert isinstance(response, list)
def test_get_learned_skills(self):
response = self.lingo.get_learned_skills(self.lang)
assert isinstance(response, list)
for skill in response:
assert "language_string" in skill
assert "id" in skill
assert "title" in skill
assert "explanation" in skill
assert "progress_percent" in skill
assert "words" in skill
assert "name" in skill
def test_get_language_from_abbr(self):
response = self.lingo.get_language_from_abbr(self.lang)
assert isinstance(response, str)
def test_get_abbreviation_of(self):
response = self.lingo.get_abbreviation_of('french')
assert isinstance(response, str)
def test_get_translations(self):
response1 = self.lingo.get_translations('e')
response2 = self.lingo.get_translations('e', self.lang)
response3 = self.lingo.get_translations('e', self.lang, 'fr')
for response in [response1, response2, response3]:
assert isinstance(response, dict)
assert "e" in response
assert isinstance(response['e'], list)
response = self.lingo.get_translations(['e', 'a'])
assert isinstance(response, dict)
assert "e" in response
assert isinstance(response['e'], list)
assert "a" in response
assert isinstance(response['a'], list)
def test_segment_translation_word_list(self):
# Nothing should happen to short list
short_list = ["a", "e", "i", "o", "u"]
result = self.lingo._segment_translations_list(short_list)
assert result == [short_list]
# Just under count limit
just_under_count = ["a"] * 1999
result = self.lingo._segment_translations_list(just_under_count)
assert result == [just_under_count]
# Just over count limit
just_over_count = ["a"] * 2000
result = self.lingo._segment_translations_list(just_over_count)
assert result != [just_over_count]
assert result == [["a"] * 1999, ["a"]]
# Just under json length limit
just_under_length = ["aaaaaaaa"] * 1066
result = self.lingo._segment_translations_list(just_under_length)
assert result == [just_under_length]
# Just over json length limit
just_over_length = ["aaaaaaaa"] * 1067
result = self.lingo._segment_translations_list(just_over_length)
assert result != [just_over_length]
assert result == [["aaaaaaaa"] * 1066, ["aaaaaaaa"]]
def test_get_vocabulary(self):
response1 = self.lingo.get_vocabulary()
response2 = self.lingo.get_vocabulary(self.lang)
for response in [response1, response2]:
assert isinstance(response, dict)
assert response['language_string']
assert "language_string" in response
assert "learning_language" in response
assert response["learning_language"] == self.lang
assert "from_language" in response
assert "language_information" in response
assert "vocab_overview" in response
assert isinstance(response["vocab_overview"], list)
def test_get_audio_url(self):
# Setup
word = _example_word(self.lang)
# Test
response = self.lingo.get_audio_url(word)
assert isinstance(response, str)
response = self.lingo.get_audio_url(word, self.lang)
assert isinstance(response, str)
response = self.lingo.get_audio_url("zz")
assert response is None
def test_get_word_definition_by_id(self):
response = self.lingo.get_word_definition_by_id("52383869a8feb3e5cf83dbf7fab9a018")
assert isinstance(response, dict)
keys = ["alternative_forms", "translations", "learning_language_name", "from_language_name", "word"]
for key in keys:
assert key in response
def test_get_daily_xp_progress(self):
response = self.lingo.get_daily_xp_progress()
assert isinstance(response['xp_goal'], int)
assert isinstance(response['xp_today'], int)
assert isinstance(response['lessons_today'], list)
class DuolingoOtherUsernameTest(DuolingoLoginTest):
@classmethod
def setUpClass(cls):
cls.lingo = duolingo.Duolingo(USERNAME, PASSWORD)
cls.lingo.set_username(USERNAME2)
cls.lang = cls.lingo.user_data.learning_language
def test_get_daily_xp_progress(self):
try:
self.lingo.get_daily_xp_progress()
assert False, "Should have failed to read daily XP progress."
except duolingo.DuolingoException as e:
assert USERNAME2 in str(e)
assert "Could not get daily XP progress for user" in str(e)
def test_get_vocabulary(self):
try:
self.lingo.get_vocabulary()
assert False, "Should have failed to get vocabulary."
except duolingo.OtherUserException as e:
assert "Vocab cannot be listed when the user has been switched" in str(e)
def test_get_related_words(self):
try:
word = _example_word(self.lang)
self.lingo.get_related_words(word)
assert False, "Should have failed to get related words."
except duolingo.OtherUserException as e:
assert "Vocab cannot be listed when the user has been switched" in str(e)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Jiyuan Zhou
Enable an agent to follow a hard coded trajectory in the form of
a square with rounded corners using trained straight and circle models.
"""
import argparse
import cProfile
import pstats
import sys
import time
import math
import yaml
import joblib
import matplotlib.pyplot as plt
import numpy as np
from rllab.misc import tensor_utils
from aa_simulation.envs.renderer import _Renderer
from aa_simulation.envs.straight_env import StraightEnv
def render(renderer, state, action):
"""
Render simulation environment.
"""
renderer.update(state, action)
def modify_state_curve(state, move_param):
"""
Convert state [x, y, yaw, x_dot, y_dot, yaw_dot] to
[dx, theta, ddx, dtheta]
"""
x_0, y_0, r = move_param
x, y, yaw, x_dot, y_dot, yaw_dot = state
x -= x_0
y -= y_0
dx = np.sqrt(np.square(x) + np.square(y)) - r
theta = _normalize_angle(np.arctan2(-x, y) + np.pi - yaw)
ddx = x/(x**2 + y**2)**0.5*x_dot + y/(x**2 + y**2)**0.5*y_dot
dtheta = x/(x**2 + y**2)*x_dot - y/(x**2 + y**2)*y_dot - yaw_dot
return np.array([dx, theta, ddx, dtheta])
def _normalize_angle(angle):
"""
Normalize angle to [-pi, pi).
"""
angle = angle % (2*np.pi)
if (angle >= np.pi):
angle -= 2*np.pi
return angle
def _normalize_angle2(angle):
"""
Normalize angle to [0, 2 * pi).
"""
angle = angle % (2*np.pi)
return angle
def modify_state_straight(state, move_param):
"""
Add target direction and target velocity to state, to feed
in the NN.
"""
x_0, y_0, target_dir = move_param
return StraightEnv.project_line(state, x_0, y_0, target_dir)[1:]
def _cal_distance(x, y, move_param):
init_x, init_y, target_dir = move_param
position_dir = np.arctan2((y - init_y), (x - init_x))
projection_dir = _normalize_angle(position_dir - target_dir)
dist = np.sqrt(np.square(x - init_x) + np.square(y - init_y))
new_y = dist * np.sin(projection_dir)
new_x = 0
return (new_x, new_y)
def _check_point(state, way_point):
x, y, _, _, _, _ = state
check_point_x, check_point_y, direction = way_point
state_direction = np.arctan2((y - check_point_y), (x - check_point_x))
intersect_angle = _normalize_angle(state_direction - direction)
return np.absolute(intersect_angle) <= math.pi / 2
def rollout(env, agent, way_point=[], animated=False, speedup=1,
always_return_paths=False, renderer=None, state=np.zeros(6),
isCurve=False, move_param=[]):
observations = []
actions = []
rewards = []
agent_infos = []
env_infos = []
path_length = 0
env._wrapped_env._state = state
while _check_point(state, way_point):
if isCurve:
o = modify_state_curve(state, move_param)
else:
o = modify_state_straight(state, move_param)
_, agent_info = agent.get_action(o)
a = agent_info['mean']
next_o, r, d, env_info = env.step(a)
observations.append(env.observation_space.flatten(o))
rewards.append(r)
actions.append(env.action_space.flatten(a))
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if d:
break
o = next_o
state = env._wrapped_env._state
if animated:
render(renderer, state, a)
timestep = 0.0001
time.sleep(timestep / speedup)
return state
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--speedup', type=float, default=100000,
help='Speedup')
parser.add_argument('--render', dest='render',
action='store_true', help='Rendering')
parser.add_argument('--no-render', dest='render',
action='store_false', help='Rendering')
parser.set_defaults(render=True)
args = parser.parse_args()
return args
def move(env, policy, args, way_point, renderer,\
state, isCurve, move_param):
final_state = rollout(env, policy, way_point=way_point,
animated=args.render, speedup=args.speedup,
always_return_paths=True, renderer=renderer,
state=state, isCurve=isCurve,\
move_param=move_param)
return final_state
def init_render():
stream = open('aa_simulation/envs/model_params.yaml', 'r')
params = yaml.load(stream)
return _Renderer(params, None)
def main():
args = parse_arguments()
profiler = cProfile.Profile()
data_curve = joblib.load("data/roundedsquare_demo/circle.pkl")
policy_curve = data_curve['policy']
env_curve = data_curve['env']
data_straight = joblib.load("data/roundedsquare_demo/straight.pkl")
policy_straight = data_straight['policy']
env_straight = data_straight['env']
plt.ion()
# Set fixed random seed
np.random.seed(9)
# Sample one rollout
profiler.enable()
# Define initial state
renderer = init_render()
state = [0, 0, 0, 0, 0, 0]
render(renderer, state, None)
way_points = [
[0,0,np.pi], [1,0,np.pi],
[2,1,-np.pi/2], [2,2,-np.pi/2],
[1,3,0], [0,3,0], [-1,2,np.pi/2], [-1,1,np.pi/2]]
curve_params = [
[0,1,1], [1,1,1], [1,2,1], [0,2,1]]
straight_params = [
[0,0,0], [2,1,np.pi/2],
[1,3,-np.pi], [-1,2,-np.pi/2]]
point = 0
for i in range(400):
i %= 4
# Turn left for 90 degrees
point %= 8
state = move(env_curve, policy_curve, args,\
way_points[point], renderer, state,\
True, curve_params[i])
point += 1
# Move straightly for length 2
point %= 8
state = move(env_straight, policy_straight, args,\
way_points[point], renderer, state,\
False, straight_params[i])
point += 1
profiler.disable()
# Block until key is pressed
sys.stdout.write("Press <enter> to continue: ")
input()
if __name__ == '__main__':
main() |
import searchInsertPositions
import unittest
class SearchInsertPositionsCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_searchInsertPositions(self):
s = searchInsertPositions.Solution()
for nums, target, sol in [ ([1, 3, 5, 6], 7, 4),
([1, 3, 5, 6], 0, 0),
([1, 3, 5, 6, 9], 6, 3),
([1, 3, 5, 6, 10], 5, 2)]:
self.assertEqual(s.searchInsert(nums, target), sol)
|
from abc import ABC, abstractmethod
import math
import lru
import numpy as np
import torch
from textattack.goal_function_results.goal_function_result import (
GoalFunctionResultStatus,
)
from textattack.shared import utils, validators
from textattack.shared.utils import batch_model_predict, default_class_repr
class GoalFunction(ABC):
"""
Evaluates how well a perturbed attacked_text object is achieving a specified goal.
Args:
model: The model used for evaluation.
maximizable: Whether the goal function is maximizable, as opposed to a boolean result
of success or failure.
query_budget (float): The maximum number of model queries allowed.
model_batch_size (int): The batch size for making calls to the model
model_cache_size (int): The maximum number of items to keep in the model
results cache at once
"""
def __init__(
self,
model,
maximizable=False,
tokenizer=None,
use_cache=True,
query_budget=float("inf"),
model_batch_size=32,
model_cache_size=2 ** 18,
):
validators.validate_model_goal_function_compatibility(
self.__class__, model.__class__
)
self.model = model
self.maximizable = maximizable
self.tokenizer = tokenizer
if not self.tokenizer:
if hasattr(self.model, "tokenizer"):
self.tokenizer = self.model.tokenizer
else:
raise NameError("Cannot instantiate goal function without tokenizer")
if not hasattr(self.tokenizer, "encode"):
raise TypeError("Tokenizer must contain `encode()` method")
self.use_cache = use_cache
self.query_budget = query_budget
self.model_batch_size = model_batch_size
if self.use_cache:
self._call_model_cache = lru.LRU(model_cache_size)
else:
self._call_model_cache = None
def init_attack_example(self, attacked_text, ground_truth_output):
"""
Called before attacking ``attacked_text`` to 'reset' the goal
function and set properties for this example.
"""
self.initial_attacked_text = attacked_text
self.ground_truth_output = ground_truth_output
self.num_queries = 0
result, _ = self.get_result(attacked_text, check_skip=True)
return result, _
def get_output(self, attacked_text):
"""
Returns output for display based on the result of calling the model.
"""
return self._get_displayed_output(self._call_model([attacked_text])[0])
def get_result(self, attacked_text, **kwargs):
"""
A helper method that queries ``self.get_results`` with a single
``AttackedText`` object.
"""
results, search_over = self.get_results([attacked_text], **kwargs)
result = results[0] if len(results) else None
return result, search_over
def get_results(self, attacked_text_list, check_skip=False):
"""
For each attacked_text object in attacked_text_list, returns a result
consisting of whether or not the goal has been achieved, the output for
display purposes, and a score. Additionally returns whether the search
is over due to the query budget.
"""
results = []
if self.query_budget < float("inf"):
queries_left = self.query_budget - self.num_queries
attacked_text_list = attacked_text_list[:queries_left]
self.num_queries += len(attacked_text_list)
model_outputs = self._call_model(attacked_text_list)
for attacked_text, raw_output in zip(attacked_text_list, model_outputs):
displayed_output = self._get_displayed_output(raw_output)
goal_status = self._get_goal_status(
raw_output, attacked_text, check_skip=check_skip
)
goal_function_score = self._get_score(raw_output, attacked_text)
results.append(
self._goal_function_result_type()(
attacked_text,
raw_output,
displayed_output,
goal_status,
goal_function_score,
self.num_queries,
self.ground_truth_output,
)
)
return results, self.num_queries == self.query_budget
def _get_goal_status(self, model_output, attacked_text, check_skip=False):
should_skip = check_skip and self._should_skip(model_output, attacked_text)
if should_skip:
return GoalFunctionResultStatus.SKIPPED
if self.maximizable:
return GoalFunctionResultStatus.MAXIMIZING
if self._is_goal_complete(model_output, attacked_text):
return GoalFunctionResultStatus.SUCCEEDED
return GoalFunctionResultStatus.SEARCHING
@abstractmethod
def _is_goal_complete(self, model_output, attacked_text):
raise NotImplementedError()
def _should_skip(self, model_output, attacked_text):
return self._is_goal_complete(model_output, attacked_text)
@abstractmethod
def _get_score(self, model_output, attacked_text):
raise NotImplementedError()
def _get_displayed_output(self, raw_output):
return raw_output
@abstractmethod
def _goal_function_result_type(self):
"""
Returns the class of this goal function's results.
"""
raise NotImplementedError()
@abstractmethod
def _process_model_outputs(self, inputs, outputs):
"""
Processes and validates a list of model outputs.
This is a task-dependent operation. For example, classification
outputs need to make sure they have a softmax applied.
"""
raise NotImplementedError()
def _call_model_uncached(self, attacked_text_list):
"""
Queries model and returns outputs for a list of AttackedText
objects.
"""
if not len(attacked_text_list):
return []
ids = utils.batch_tokenize(self.tokenizer, attacked_text_list)
with torch.no_grad():
outputs = batch_model_predict(
self.model, ids, batch_size=self.model_batch_size
)
return self._process_model_outputs(attacked_text_list, outputs)
def _call_model(self, attacked_text_list):
""" Gets predictions for a list of ``AttackedText`` objects.
Gets prediction from cache if possible. If prediction is not in the
cache, queries model and stores prediction in cache.
"""
if not self.use_cache:
return self._call_model_uncached(attacked_text_list)
else:
uncached_list = []
for text in attacked_text_list:
if text in self._call_model_cache:
# Re-write value in cache. This moves the key to the top of the
# LRU cache and prevents the unlikely event that the text
# is overwritten when we store the inputs from `uncached_list`.
self._call_model_cache[text] = self._call_model_cache[text]
else:
uncached_list.append(text)
uncached_list = [
text
for text in attacked_text_list
if text not in self._call_model_cache
]
outputs = self._call_model_uncached(uncached_list)
for text, output in zip(uncached_list, outputs):
self._call_model_cache[text] = output
all_outputs = [self._call_model_cache[text] for text in attacked_text_list]
return all_outputs
def extra_repr_keys(self):
if self.query_budget < float("inf"):
return ["query_budget"]
return []
__repr__ = __str__ = default_class_repr
|
from setuptools import setup
setup(
name='zpylib',
version='0.1dev',
packages=['zpylib'],
license='See LICENSE.txt',
)
|
import ctypes
import os.path
class DeviceUnderTest:
def __init__(self):
dll_name = "simulation.so"
dllabspath = \
os.path.dirname(os.path.abspath(os.path.abspath(__file__))) \
+ os.path.sep + ".." + os.path.sep + ".." \
+ os.path.sep + "Software" + os.path.sep + "Simulation" \
+ "build" + os.path.sep + dll_name
self.dut = ctypes.CDLL(dllabspath)
pathToSharedLibraryOfDUT = "../../Software/Simulation/build"
self.dut.Lib_Simulation_Init(pathToSharedLibraryOfDUT.encode())
def sendSCPI(self, command):
self.dut.Lib_Simulation_SendSCPIRequest.argtypes = \
[ctypes.POINTER(ctypes.c_char)]
return self.dut.Lib_Simulation_SendSCPIRequest(command.encode())
def receiveSCPI(self):
self.dut.Lib_Simulation_ReceiveSCPIResponse.restype = ctypes.c_char_p
return self.dut.Lib_Simulation_ReceiveSCPIResponse().decode()
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
# Written by Hu Di
import pytest
import os
import subprocess
def pytest_sessionstart(session):
faster_rcnn_working_dic = os.path.join(os.path.dirname(__file__), '../')
subprocess.run(['make'], shell=True, cwd=faster_rcnn_working_dic)
|
import lib.Settings as settings
from lib.Locale import _
from lib.Locale import locCurrency
from lib.Locale import locDate
import os
import math
import threading
import platform
from tkinter import *
from tkinter import messagebox
from lib.Widgets import Frame_
from lib.Widgets import LabelButton_
from lib.Widgets import Label_
from lib.Img import loadIcon
from lib.Browser import openItemInBrowser
from lib.Browser import openVGCInBrowser
from lib.Download import downloadCover
from gui.GUI_Popups import Pop_CoverViewer
import lib.Var as VAR
######################
# GUI_ItemInfo
# --------------------
class GUI_ItemInfo(Frame_):
def __init__(self, master, width=0, height=0):
super().__init__(master=master, width=width, height=height, style=VAR.FRAME_STYLE_SECONDARY)
self.setDefaultLabelStyle(VAR.LABEL_STYLE_SECONDARY)
# Icons
# ------------------
self.item_bookmark_ico = loadIcon("bookmark-outline", 15, 15)
self.item_finished_ico = loadIcon("checkmark-circle-outline", 15, 15)
self.item_link_ico = loadIcon("link-outline", 15, 15)
self.item_refresh_ico = loadIcon("refresh-outline", 15, 15)
self.item_view_ico = loadIcon("eye-outline", 15, 15)
self.activeItemIndex = master.activeItemIndex
self.activeItem = master.activeItem
self.toggleBookmark = master.toggleBookmark
self.toggleFinished = master.toggleFinished
self.getOnlineCollectionListPage = master.getOnlineCollectionListPage
self.pop_coverViewer = Pop_CoverViewer(self)
self.init()
def init(self):
# Item info
# ------------------
self.item_spacer = Label_(self, width=2)
if platform.system() == "Linux":
labelWidth = 16
else:
labelWidth = 22
self.item_title_txt = Label_(self, text=_("Title"), anchor="nw")
self.item_title = Label_(self, anchor="nw", width=labelWidth, wraplength=135)
self.item_date_txt = Label_(self, text=_("Date (purchased)"), anchor="nw")
self.item_date = Label_(self, anchor="nw", width=labelWidth)
self.item_dateAdded_txt = Label_(self, text=_("Date (added)"), anchor="nw")
self.item_dateAdded = Label_(self, anchor="nw", width=labelWidth)
self.item_price_txt = Label_(self, text=_("Purchase price"), anchor="nw")
self.item_price = Label_(self, anchor="nw", width=labelWidth)
# Front cover widgets
self.item_front_txt = Label_(self, text=_("Front cover"), anchor="w")
self.item_front = Label_(self, anchor="w", _imgdef=VAR.IMG_COVER_NONE, _imgwidth=VAR.COVER_WIDTH)
self.item_front.bind("<Enter>", lambda x:self.onCoverEnter(self.item_front, VAR.COVER_TYPE_FRONT))
self.item_front.bind("<Leave>", lambda x:self.onCoverLeave(self.item_front))
# Back cover widgets
self.item_back_txt = Label_(self, text=_("Back cover"), anchor="w")
self.item_back = Label_(self, anchor="w", _imgdef=VAR.IMG_COVER_NONE, _imgwidth=VAR.COVER_WIDTH)
self.item_back.bind("<Enter>", lambda x:self.onCoverEnter(self.item_back, VAR.COVER_TYPE_BACK))
self.item_back.bind("<Leave>", lambda x:self.onCoverLeave(self.item_back))
# Cart cover widgets
self.item_cart_txt = Label_(self, text=_("Cart cover"), anchor="w")
self.item_cart = Label_(self, anchor="w", _imgdef=VAR.IMG_COVER_NONE, _imgwidth=VAR.COVER_WIDTH)
self.item_cart.bind("<Enter>", lambda x:self.onCoverEnter(self.item_cart, VAR.COVER_TYPE_CART))
self.item_cart.bind("<Leave>", lambda x:self.onCoverLeave(self.item_cart))
self.item_title_txt.grid(row=1, column=0, columnspan=2, sticky="nwe", pady=(5,0))
self.item_spacer.grid(row=2, column=0, sticky="nwe")
self.item_title.grid(row=2, column=1, sticky="nwe", padx=(0,10))
self.item_date_txt.grid(row=3, column=0, columnspan=2, sticky="nwe", pady=(5,0))
self.item_date.grid(row=4, column=1, sticky="nwe")
self.item_dateAdded_txt.grid(row=5, column=0, columnspan=2, sticky="nwe", pady=(5,0))
self.item_dateAdded.grid(row=6, column=1, sticky="nwe")
self.item_price_txt.grid(row=7, column=0, columnspan=2, sticky="nwe", pady=(5,0))
self.item_price.grid(row=8, column=1, sticky="nwe")
self.item_front_txt.grid(row=9, column=0, columnspan=2, sticky="nwe", pady=(5,0))
self.item_front.grid(row=10, column=1, sticky="nwe")
self.item_back_txt.grid(row=11, column=0, columnspan=2, sticky="nwe", pady=(5,0))
self.item_back.grid(row=12, column=1, sticky="nwe")
self.item_cart_txt.grid(row=13, column=0, columnspan=2, sticky="nwe", pady=(5,0))
self.item_cart.grid(row=14, column=1, sticky="nwe")
# Frame for item toolbar
self.item_tool_frame = Frame_(self , width=200 , height=10, style=VAR.FRAME_STYLE_SECONDARY)
self.item_tool_frame.grid(row=0, column=0, sticky="nwe", columnspan=2, pady=0 , padx=(0,10))
# Item Toolbar
self.item_open_website = LabelButton_(self.item_tool_frame, image=self.item_link_ico, command=self.openOnVGCollect)
self.item_bookmark = LabelButton_(self.item_tool_frame, image=self.item_bookmark_ico, command=self.toggleBookmark)
self.item_finished = LabelButton_(self.item_tool_frame, image=self.item_finished_ico, command=self.toggleFinished)
self.item_id = Label_(self.item_tool_frame)
self.item_spacer = Label_(self.item_tool_frame)
self.item_tool_frame.columnconfigure(0, weight=1)
self.item_open_website.grid(row=0, column=1, sticky="ne", padx=3, pady=5)
self.item_bookmark.grid(row=0, column=2, sticky="ne", padx=(3,0), pady=5)
self.item_finished.grid(row=0, column=3, sticky="ne", padx=(3,0), pady=5)
self.item_spacer.grid(row=1, column=0)
self.item_id.grid(row=1, column=1, columnspan=4, sticky="e", padx=(3,0))
######################
# update
# --------------------
def update(self, refresh = False):
# Show basic item data
self.item_title.set(self.activeItem().name)
self.item_date.set(locDate(self.activeItem().date, showDay=True))
self.item_dateAdded.set(locDate(self.activeItem().dateAdded, showDay=True))
self.item_price.set(locCurrency(self.activeItem().price))
self.item_id.set("VGC ID: " + str(self.activeItem().VGC_id))
# Update front cover
self.updateCover(self.activeItem(), VAR.COVER_TYPE_FRONT, self.item_front, refresh)
# Update back cover
self.updateCover(self.activeItem(), VAR.COVER_TYPE_BACK, self.item_back, refresh)
# Update cart cover
self.updateCover(self.activeItem(), VAR.COVER_TYPE_CART, self.item_cart, refresh)
######################
# updateCover
# --------------------
def updateCover(self, item, coverType, widget, refresh = False):
# Run cover update thread
thread = threading.Thread(target=self.coverUpdateThread, args=(item, coverType, widget, refresh))
thread.start()
######################
# coverUpdateThread
# --------------------
def coverUpdateThread(self, item, coverType, widget, refresh):
coverCached = False
# Check if cover already cached
if os.path.exists(VAR.getCoverPath(item, coverType)) or item.getLocalData("missingCover" + coverType):
coverCached = True
# Show currently known cover
if coverCached or settings.get("display", "hideCoverLoadingAnimation", False):
self.showCover(item, coverType, widget)
# When the cover is not cached or shall be refreshed
if coverCached == False or refresh:
# Start loading animation
if not settings.get("display", "hideCoverLoadingAnimation", False):
widget.startAnimation(VAR.IMG_COVER_LOADING_120, 12, 100)
# Download cover
downloadCover(item, coverType, refresh)
# Stop loading animation
if not settings.get("display", "hideCoverLoadingAnimation", False):
widget.stopAnimation()
# Finally show the updated cover
# but only if the user hasn not switched to another
# entry since the download was initiated
if item.VGC_id == self.activeItem().VGC_id:
self.showCover(item, coverType, widget)
######################
# onCoverEnter
# --------------------
def onCoverEnter(self, label, type):
self.coverButton_coverViewer = LabelButton_(label, image=self.item_view_ico,
command=lambda:self.pop_coverViewer.show(type, self.activeItem()))
self.coverButton_coverViewer.place(height=35, width=35, x=39, y=2)
self.coverButton_coverUpdate = LabelButton_(label, image=self.item_refresh_ico,
command=lambda:self.updateCover(self.activeItem(), type, label, True))
self.coverButton_coverUpdate.place(height=35, width=35, x=2, y=2)
######################
# onCoverLeave
# --------------------
def onCoverLeave(self, label):
self.coverButton_coverViewer.destroy()
self.coverButton_coverUpdate.destroy()
######################
# openOnVGCollect
# --------------------
def openOnVGCollect(self):
if self.activeItem().VGC_id > 0:
openItemInBrowser(str(self.activeItem().VGC_id))
######################
# openVGCollectCollectionList
# --------------------
def openVGCollectCollectionList(self):
username = settings.get("vgc", "username", "")
if self.activeItem().VGC_id > 0:
if len(username) == 0:
messagebox.showinfo("VGC Analyze",
_("A VGCollect.com username is needed to access the online collection list.\n\n") +
_("Please provide a username at File > Settings > VGC"))
else:
page = math.ceil((self.activeItemIndex()+1) / 25)
pageData = self.getOnlineCollectionListPage(username, str(page))
idStart = pageData.find("item_" + str(self.activeItem().VGC_id) + "_")
idEnd = pageData.find("\"", idStart)
id = pageData[idStart:idEnd]
openVGCInBrowser(username + "/" + str(page) + "#" + id)
######################
# showCover
# --------------------
def showCover(self, item, coverType, widget):
widget.setImage(VAR.getCoverPath(item, coverType))
|
import setuptools
with open('README.md') as f:
long_description = f.read()
setuptools.setup(
name='m3u8',
version='0.0.1',
author='WaizungTaam',
author_email='waizungtaam@gmail.com',
license='MIT',
description='Python m3u8 parser',
long_description=long_description,
packages=['m3u8'],
include_package_data=True,
install_requires=[
'python-dateutil',
'requests',
],
extras_require={},
python_requires='>=3.6',
)
|
#!/usr/bin/env python
# file: osversion.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# Created: 2018-04-06 22:34:00 +0200
# Last modified: 2018-08-19T14:18:16+0200
"""Print the __FreeBSD_version. This is also called OSVERSION in scripts."""
from ctypes import CDLL
import sys
if "freebsd" not in sys.platform:
print("This script only works on FreeBSD!")
sys.exit(1)
with open("/usr/include/osreldate.h") as h:
lines = h.readlines()
line = [ln for ln in lines if ln.startswith("#define")][0]
print("Compilation environment version:", line.split()[-1])
libc = CDLL("/lib/libc.so.7")
print("Execution environment version:", libc.getosreldate())
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
# Create Flask application, load configuration, and create
# the SQLAlchemy object
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///network.db'
db = SQLAlchemy(app)
# This is the database model object
class Device(db.Model):
__tablename__ = 'devices'
id = db.Column(db.Integer, primary_key=True)
hostname = db.Column(db.String(120), index=True)
vendor = db.Column(db.String(40))
def __init__(self, hostname, vendor):
self.hostname = hostname
self.vendor = vendor
def __repr__(self):
return '<Device %r>' % self.hostname
if __name__ == '__main__':
db.create_all()
r1 = Device('lax-dc1-core1', 'Juniper')
r2 = Device('sfo-dc1-core1', 'Cisco')
db.session.add(r1)
db.session.add(r2)
db.session.commit()
|
import cv2
class DisplayDraw(object):
def __init__(self, parent=None):
self.indicator_text_setting = {
'fontFace': cv2.FONT_HERSHEY_SIMPLEX,
'fontScale': 1.0,
'color': (0, 255, 0),
'thickness': 1,
'lineType': cv2.LINE_AA
}
self.last_text = "Hello, I'm Miris"
self.last_text_setting = self.indicator_text_setting
def drawText(self, frame, text, text_setting, tech_zone=0):
"""
"Look at camera" text and face bounding box
"""
ts = text_setting
screen_height, screen_width, _ = frame.shape
text_color = ts['color']
# if text == "Verification failed":
# text_color = (0, 0, 255) # red
text_size = cv2.getTextSize(text, ts['fontFace'], ts['fontScale'],
ts['thickness'])
# print(text_size, screen_width)
if tech_zone == 0:
text_position = (
int((screen_width / 2) - (text_size[0][0] / 2)),
int(screen_height - 25)
)
elif tech_zone == 1:
text_position = (
int((screen_width / 2) - (text_size[0][0] / 2)),
int(25)
)
# print(text_position)
# print(ts)
cv2.putText(
frame, text, text_position,
ts['fontFace'], ts['fontScale'],
text_color,
thickness=ts['thickness'],
lineType=ts['lineType']
)
self.last_text = text
self.last_text_setting = text_setting
return frame
def drawFailedText(self, frame):
text = "Verification failed"
ts = self.indicator_text_setting
ts['color'] = (0, 0, 255) # red
frame = self.drawText(frame, text, ts)
return frame
def drawDefaultText(self, frame):
text = "Hello, I'm Miris"
ts = self.indicator_text_setting
ts['color'] = (255, 255, 255) # white
frame = self.drawText(frame, text, ts)
return frame
def drawLACText(self, frame):
text = "Please look at the camera"
ts = self.indicator_text_setting
ts['color'] = (255, 255, 255) # white
frame = self.drawText(frame, text, ts)
return frame
def drawSuccessText(self, frame, name):
text = "Verification success, hello {}!".format(name)
ts = self.indicator_text_setting
ts['color'] = (0, 255, 0) # green
frame = self.drawText(frame, text, ts)
return frame
def drawMCText(self, frame):
text = "Please move closer"
ts = self.indicator_text_setting
ts['color'] = (255, 255, 255) # white
frame = self.drawText(frame, text, ts)
return frame
def drawLastText(self, frame):
return self.drawText(frame, self.last_text, self.last_text_setting)
def drawFaceAtts(self, frame, emotion, age, gender):
ts = self.indicator_text_setting
ts['color'] = (255, 255, 255) # white
text = f"{gender}, {age}, Cam xuc: {emotion}"
return self.drawText(frame, text, ts, tech_zone=1)
|
"""Constants for the DLNA DMR component."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from typing import Final
from async_upnp_client.profiles.dlna import PlayMode as _PlayMode
from homeassistant.components.media_player import const as _mp_const
LOGGER = logging.getLogger(__package__)
DOMAIN: Final = "dlna_dmr"
CONF_LISTEN_PORT: Final = "listen_port"
CONF_CALLBACK_URL_OVERRIDE: Final = "callback_url_override"
CONF_POLL_AVAILABILITY: Final = "poll_availability"
CONF_BROWSE_UNFILTERED: Final = "browse_unfiltered"
DEFAULT_NAME: Final = "DLNA Digital Media Renderer"
CONNECT_TIMEOUT: Final = 10
PROTOCOL_HTTP: Final = "http-get"
PROTOCOL_RTSP: Final = "rtsp-rtp-udp"
PROTOCOL_ANY: Final = "*"
STREAMABLE_PROTOCOLS: Final = [PROTOCOL_HTTP, PROTOCOL_RTSP, PROTOCOL_ANY]
# Map UPnP class to media_player media_content_type
MEDIA_TYPE_MAP: Mapping[str, str] = {
"object": _mp_const.MEDIA_TYPE_URL,
"object.item": _mp_const.MEDIA_TYPE_URL,
"object.item.imageItem": _mp_const.MEDIA_TYPE_IMAGE,
"object.item.imageItem.photo": _mp_const.MEDIA_TYPE_IMAGE,
"object.item.audioItem": _mp_const.MEDIA_TYPE_MUSIC,
"object.item.audioItem.musicTrack": _mp_const.MEDIA_TYPE_MUSIC,
"object.item.audioItem.audioBroadcast": _mp_const.MEDIA_TYPE_MUSIC,
"object.item.audioItem.audioBook": _mp_const.MEDIA_TYPE_PODCAST,
"object.item.videoItem": _mp_const.MEDIA_TYPE_VIDEO,
"object.item.videoItem.movie": _mp_const.MEDIA_TYPE_MOVIE,
"object.item.videoItem.videoBroadcast": _mp_const.MEDIA_TYPE_TVSHOW,
"object.item.videoItem.musicVideoClip": _mp_const.MEDIA_TYPE_VIDEO,
"object.item.playlistItem": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.item.textItem": _mp_const.MEDIA_TYPE_URL,
"object.item.bookmarkItem": _mp_const.MEDIA_TYPE_URL,
"object.item.epgItem": _mp_const.MEDIA_TYPE_EPISODE,
"object.item.epgItem.audioProgram": _mp_const.MEDIA_TYPE_EPISODE,
"object.item.epgItem.videoProgram": _mp_const.MEDIA_TYPE_EPISODE,
"object.container": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.container.person": _mp_const.MEDIA_TYPE_ARTIST,
"object.container.person.musicArtist": _mp_const.MEDIA_TYPE_ARTIST,
"object.container.playlistContainer": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.container.album": _mp_const.MEDIA_TYPE_ALBUM,
"object.container.album.musicAlbum": _mp_const.MEDIA_TYPE_ALBUM,
"object.container.album.photoAlbum": _mp_const.MEDIA_TYPE_ALBUM,
"object.container.genre": _mp_const.MEDIA_TYPE_GENRE,
"object.container.genre.musicGenre": _mp_const.MEDIA_TYPE_GENRE,
"object.container.genre.movieGenre": _mp_const.MEDIA_TYPE_GENRE,
"object.container.channelGroup": _mp_const.MEDIA_TYPE_CHANNELS,
"object.container.channelGroup.audioChannelGroup": _mp_const.MEDIA_TYPE_CHANNELS,
"object.container.channelGroup.videoChannelGroup": _mp_const.MEDIA_TYPE_CHANNELS,
"object.container.epgContainer": _mp_const.MEDIA_TYPE_TVSHOW,
"object.container.storageSystem": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.container.storageVolume": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.container.storageFolder": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.container.bookmarkFolder": _mp_const.MEDIA_TYPE_PLAYLIST,
}
# Map media_player media_content_type to UPnP class. Not everything will map
# directly, in which case it's not specified and other defaults will be used.
MEDIA_UPNP_CLASS_MAP: Mapping[str, str] = {
_mp_const.MEDIA_TYPE_ALBUM: "object.container.album.musicAlbum",
_mp_const.MEDIA_TYPE_ARTIST: "object.container.person.musicArtist",
_mp_const.MEDIA_TYPE_CHANNEL: "object.item.videoItem.videoBroadcast",
_mp_const.MEDIA_TYPE_CHANNELS: "object.container.channelGroup",
_mp_const.MEDIA_TYPE_COMPOSER: "object.container.person.musicArtist",
_mp_const.MEDIA_TYPE_CONTRIBUTING_ARTIST: "object.container.person.musicArtist",
_mp_const.MEDIA_TYPE_EPISODE: "object.item.epgItem.videoProgram",
_mp_const.MEDIA_TYPE_GENRE: "object.container.genre",
_mp_const.MEDIA_TYPE_IMAGE: "object.item.imageItem",
_mp_const.MEDIA_TYPE_MOVIE: "object.item.videoItem.movie",
_mp_const.MEDIA_TYPE_MUSIC: "object.item.audioItem.musicTrack",
_mp_const.MEDIA_TYPE_PLAYLIST: "object.item.playlistItem",
_mp_const.MEDIA_TYPE_PODCAST: "object.item.audioItem.audioBook",
_mp_const.MEDIA_TYPE_SEASON: "object.item.epgItem.videoProgram",
_mp_const.MEDIA_TYPE_TRACK: "object.item.audioItem.musicTrack",
_mp_const.MEDIA_TYPE_TVSHOW: "object.item.videoItem.videoBroadcast",
_mp_const.MEDIA_TYPE_URL: "object.item.bookmarkItem",
_mp_const.MEDIA_TYPE_VIDEO: "object.item.videoItem",
}
# Translation of MediaMetadata keys to DIDL-Lite keys.
# See https://developers.google.com/cast/docs/reference/messages#MediaData via
# https://www.home-assistant.io/integrations/media_player/ for HA keys.
# See http://www.upnp.org/specs/av/UPnP-av-ContentDirectory-v4-Service.pdf for
# DIDL-Lite keys.
MEDIA_METADATA_DIDL: Mapping[str, str] = {
"subtitle": "longDescription",
"releaseDate": "date",
"studio": "publisher",
"season": "episodeSeason",
"episode": "episodeNumber",
"albumName": "album",
"trackNumber": "originalTrackNumber",
}
# For (un)setting repeat mode, map a combination of shuffle & repeat to a list
# of play modes in order of suitability. Fall back to _PlayMode.NORMAL in any
# case. NOTE: This list is slightly different to that in SHUFFLE_PLAY_MODES,
# due to fallback behaviour when turning on repeat modes.
REPEAT_PLAY_MODES: Mapping[tuple[bool, str], list[_PlayMode]] = {
(False, _mp_const.REPEAT_MODE_OFF): [
_PlayMode.NORMAL,
],
(False, _mp_const.REPEAT_MODE_ONE): [
_PlayMode.REPEAT_ONE,
_PlayMode.REPEAT_ALL,
_PlayMode.NORMAL,
],
(False, _mp_const.REPEAT_MODE_ALL): [
_PlayMode.REPEAT_ALL,
_PlayMode.REPEAT_ONE,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_OFF): [
_PlayMode.SHUFFLE,
_PlayMode.RANDOM,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_ONE): [
_PlayMode.REPEAT_ONE,
_PlayMode.RANDOM,
_PlayMode.SHUFFLE,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_ALL): [
_PlayMode.RANDOM,
_PlayMode.REPEAT_ALL,
_PlayMode.SHUFFLE,
_PlayMode.NORMAL,
],
}
# For (un)setting shuffle mode, map a combination of shuffle & repeat to a list
# of play modes in order of suitability. Fall back to _PlayMode.NORMAL in any
# case.
SHUFFLE_PLAY_MODES: Mapping[tuple[bool, str], list[_PlayMode]] = {
(False, _mp_const.REPEAT_MODE_OFF): [
_PlayMode.NORMAL,
],
(False, _mp_const.REPEAT_MODE_ONE): [
_PlayMode.REPEAT_ONE,
_PlayMode.REPEAT_ALL,
_PlayMode.NORMAL,
],
(False, _mp_const.REPEAT_MODE_ALL): [
_PlayMode.REPEAT_ALL,
_PlayMode.REPEAT_ONE,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_OFF): [
_PlayMode.SHUFFLE,
_PlayMode.RANDOM,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_ONE): [
_PlayMode.RANDOM,
_PlayMode.SHUFFLE,
_PlayMode.REPEAT_ONE,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_ALL): [
_PlayMode.RANDOM,
_PlayMode.SHUFFLE,
_PlayMode.REPEAT_ALL,
_PlayMode.NORMAL,
],
}
|
from setuptools import setup, find_packages
import pathlib
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name = "Physical Quantity calculating",
version = '0.1.0',
description=
'''
In arithmetic operations, the corresponding physical quantity is automatically generated
and the corresponding coherent unit is selected
''',
long_description = README,
long_description_content_type="text/markdown",
author='chenmich',
author_email='403189920@qq.com',
url='',
packages=['quantities'],
) |
import os
import re
import torch
import numpy as np
import ConfigSpace
from functools import partial, wraps
from pathlib import Path
from torch.autograd import Variable
from ConfigSpace.read_and_write import json as cs_json
from nes.darts.baselearner_train.genotypes import Genotype, PRIMITIVES
only_numeric_fn = lambda x: int(re.sub("[^0-9]", "", x))
custom_sorted = partial(sorted, key=only_numeric_fn)
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def sample_random_genotype(steps, multiplier):
"""Function to sample a random genotype (architecture).
Args:
steps (int): number of intermediate nodes in the DARTS cell
multiplier (int): number of nodes to concatenate in the output cell
Returns:
nes.optimizers.baselearner_train.genotypes.Genotype:
the randomly sampled genotype
"""
def _parse():
gene = []
n = 2
start = 0
for i in range(steps):
end = start + n
edges = np.random.choice(range(i + 2), 2, False).tolist()
for j in edges:
k_best = np.random.choice(list(range(8)))
while k_best == PRIMITIVES.index('none'):
k_best = np.random.choice(list(range(8)))
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
gene_normal, gene_reduce = _parse(), _parse()
concat = range(2+steps-multiplier, steps+2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
def create_genotype(func):
@wraps(func)
def genotype_wrapper(*args, **kwargs):
normal = func(*args, cell_type='normal', **kwargs)
reduction = func(*args, cell_type='reduce', **kwargs)
concat = list(range(2, 6))
return Genotype(normal, concat, reduction, concat)
return genotype_wrapper
@create_genotype
def parse_config(config, config_space, cell_type):
"""Function that converts a ConfigSpace representation of the architecture
to a Genotype.
"""
cell = []
config = ConfigSpace.Configuration(config_space, config)
edges = custom_sorted(
list(
filter(
re.compile('.*edge_{}*.'.format(cell_type)).match,
config_space.get_active_hyperparameters(config)
)
)
).__iter__()
nodes = custom_sorted(
list(
filter(
re.compile('.*inputs_node_{}*.'.format(cell_type)).match,
config_space.get_active_hyperparameters(config)
)
)
).__iter__()
op_1 = config[next(edges)]
op_2 = config[next(edges)]
cell.extend([(op_1, 0), (op_2, 1)])
for node in nodes:
op_1 = config[next(edges)]
op_2 = config[next(edges)]
input_1, input_2 = map(int, config[node].split('_'))
cell.extend([(op_1, input_1), (op_2, input_2)])
return cell
|
from iso3166 import countries
TYPE = ('alert', 'update', 'test', 'cancel')
COUNTRY = {y[0]:y[1] for y in sorted(((x.numeric, x.name) for x in countries), key=lambda x: int(x[0]))}
PROVIDER = ('none', )
CATEGORY = (
('geo1', (
'earthquake',
'tsunami',
'sinkhole',
'avalanche',
'satellite debris',
'pyroclastic flow',
'lava flow',
'volcanic mud flow',
'glacial ice avalanche',
'tidal wave',
'landslide'
)
),
('geo2', (
'debris flow',
'ash fall',
'volcanic eruption'
)
),
('met1', (
'typhoon',
'tornado',
'storm',
'hail',
'dust storm',
'storm surge',
'heavy rain',
'black ice',
'high uv radiation',
'plague of insects',
'pest infestation',
'epizootic',
'contaminated drinking water'
)
),
('met2', (
'heavy snow',
'flood',
'lightning',
'extreme heat',
'frost',
'derecho',
'fog',
'snow drifts'
),
),
('safety', (
'chemical hazard',
'biological hazard',
'radiological hazard',
'nuclear hazard',
'explosive hazard',
'unidentified animal',
'chemical accident',
'hazardous material accident',
'demonstration',
'odour nuisance',
'major event',
'risk of infection',
'noise pollution',
'food safety alert',
'safety warning'
)
),
('security', (
'shooting',
'ballistic missile attack',
'guerrilla attack',
'large-scale terrorism',
'air strike',
'hijack',
'chemical attack',
'explosive attack',
'nuclear weapon attack',
'life threatening situation',
'health hazard',
'first/second world war bomb',
'bomb discovery',
'it system outage'
)
),
('transport', (
'maritime disaster',
'train accident',
'bridge collapse',
'aircraft crash',
'oil spill',
'road traffic accident',
'traffic alert',
'gas supply outage',
'emergency number outage',
'telephone line outage',
'power outage'
)
),
('fire', (
'forest fire',
'structure fire',
'solar storm',
'missing person',
'air pollution',
'building collapse',
'dam failure',
'dike failure',
'fire gases',
'risk of fire',
'gas leak',
'nuclear power station accident',
'raw sewage',
'siren test',
'warning',
'acid rain'
)
)
)
SERVERITY = ('extreme', 'severe', 'moderate', 'minor')
DURATION = ('no duration',
'duration < 0.25 hour',
'0.25 <= duration < 0.5 hour',
'0.5 <= duration < 0.75 hour',
'0.75 <= duration < 1 hour',
'1 <= duration < 1.5 hour ',
'1.5 <= duration < 2 hour',
'2 <= duration < 3 hour',
'3 <= duration < 4 hour',
'4 <= duration < 6 hour',
'6 <= duration < 8 hour',
'8 <= duration < 12 hour',
'12 <= duration < 18 hour',
'18 <= duration < 24 hour',
'24 <= duration < 48 hour',
'45 <= duration hour'
)
GUIDANCE_TO_REACT = ('international guidance library',
'national guidance library',
'regional guidance library',
'new guidance library under validation'
)
AXIS = (
316,
635,
1277,
2565,
5154,
10355,
20806,
41803,
83993,
168761,
339081,
681292,
1368875,
2750388,
5526170,
11103363
)
RESPONSE = ('none', )
INSTRUCTION = ('none', )
PARAM_TYPE = ('none', )
DEG_INTERVAL = 180/((2**16)-1)
AZIMUTH_INTERVAL = 180/((2**5)-1)
POSITION = (2, 10, 4, 3, 4, 2, 5, 5, 6, 4, 2, 4, 4, 16, 17, 4, 4, 5, 21)
|
#-*- coding: UTF-8 -*-
import sys, os, click
from . import browser
learn = browser.Learn()
@click.command(help = 'Download all course files')
def download():
learn.init()
lessons = learn.get_lessons()
for lesson in lessons:
click.echo("Check " + lesson[1])
groups = learn.get_files_id(lesson[0])
for group in groups:
learn.download_files(lesson[0], lesson[1], group)
learn.download_homework(lesson[0], lesson[1])
@click.command(help = 'Reset configurations.')
def reset():
learn.set_user()
learn.set_path()
@click.command(help = 'Show configurations.')
def config():
username, _ = learn.get_user()
path = learn.get_path()
print('Username: %s' % username)
print('Path: %s' % path)
@click.command(help = 'Clear records of all downloaded files.')
def clear():
learn.set_local()
@click.command(help = 'Submit homework.')
@click.argument('name', default = '')
@click.option('-m', default = '', help = 'The message to submit')
def submit(name, m):
learn.init()
id_path = '.' + os.sep + ".xszyid"
if (not os.path.exists(id_path)):
print("Homwork Id Not Found!")
return
if (name != '' and not os.path.exists('.' + os.sep + name)):
print("Upload File Not Found!")
return
with open(id_path, 'r') as f:
xszyid = f.read().strip()
f.close()
learn.upload(xszyid, '.' + os.sep + name, m)
def align(string, length=0):
len_en = len(string)
len_utf8 = len(string.encode('utf-8'))
lent = len_en + (len_utf8 - len_en) // 2
return string + ' ' * (length - lent)
@click.command(help = 'Show homework deadlines.')
def ddl():
learn.init()
ddls = learn.get_ddl()
print('Total %d ddl(s)' % (len(ddls)))
for ddl in ddls:
print(align(ddl[0][0:8], 25), align(ddl[1][0:20], 30) + align(ddl[3][0:20], 30), ddl[4])
@click.group()
def main():
pass
main.add_command(download)
main.add_command(reset)
main.add_command(clear)
main.add_command(submit)
main.add_command(ddl)
main.add_command(config)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
import math
import random
'''
The AutoSortedArray class encapsulates a list and arranges such that
if we add items one by one (using the .append() method), we do not have
to remember to sort the list after populating it-- items are inserted
in sorted order.
A binary search is done to determine the correct index to insert to
maintain the sort order. Thus, adding n items to a previously empty
collection is O(n*log(n)), just as adding n items and then sorting the list
would be.
'''
class AutoSortedArray:
def __init__(self):
self._array = []
def __str__(self):
return str(self._array)
def append(self, item):
i = self.index_of_successor(item)
self._array = self._array[:i] + [item] + self._array[i:]
def __contains__(self, item):
i = self.index_of(item)
return i is not None
'''
Find the index at which the given item would have to
be inserted to maintain my _array's sort order.
Examples:
_array is empty: return 0
_array = [1, 3, 4], item = 2: return 1
_array = [1, 3, 4], item = 5: return 3
Preconditions:
* my _array is sorted in ascending order
* my _array contains no duplicate items
* item is comparable to elements already in the _array using the
comparison operators
* item is not equivalent to any element currently in my _array
Postcondition: either any item in the _array is larger than the given
item, and the return value is the _array of the smallest such; or
the return value is the size of the _array.
'''
def index_of_successor(self, item):
''' Outcome A: either postcondition (return value size of _array) holds
or we actually have to do a search '''
if 0 == len(self._array):
return 0
''' Outcome B: either we have the smallest index of an element larger
than item, or none such exists and we have None '''
result = self.index_of_successor_in_range(item, 0, len(self._array)-1)
if result is None:
''' Outcome C1: postcondition for no larger items in _array '''
return len(self._array)
else:
''' Outcome C2: postcondition for when a larger item in _array '''
return result
'''
Recursive support function for the index_of_successor() method.
Find the first index of an element of my _array that is larger than
the given item, if any, within the range low_end to high_end inclusive!!!
Returns None if there is no such element.
Example:
_array = [1, 3, 5, 7], low_end = 0, high_end = 3, item = 4; return 2
Preconditions:
* All preconditions listed for index_of_successor() apply here.
* low_end >= high_end
* low_end and high_end are valid indices with my _array
Postcondition:
No element is larger than item, and the return value is None; or
return value is the index of an element that is larger than item, and
return value is minimal.
'''
def index_of_successor_in_range(self, item, low_end, high_end):
# Outcome A: solvable immediately: all elements in this space of
# the _array are smaller than item, and return value is None
if item > self._array[high_end]:
return None
# Outcome B1: if we are looking at one position in the _array,
# either this index satisfies the postcondition on this space,
# or a precondition was violated.
if low_end == high_end:
if item < self._array[low_end]:
return low_end
# Else if item > the one element: ruled out in O.A above
else: # 3rd possibility: equals: a violation
raise Exception("Attempt to add duplicate item")
# Outcome B2: if we are looking at more than one position,
# divide into two spaces.
mid = math.floor((low_end+high_end)/2)
# Outcome C1: either first part may contain a qualifying index,
# and we satisfy postcondition there...
if item <= self._array[mid]:
return self.index_of_successor_in_range(item, low_end, mid)
# Outcome C2: ...or we satisfy postcondition on second part
return self.index_of_successor_in_range(item, mid+1, high_end)
'''
Return the index of the given item in my _array, or None if it's not
in the _array.
Examples:
_array = [1, 2, 3], item = 1; return 0
_array = [1, 2, 3], item = 0; return None
Preconditions:
* my _array is sorted in ascending order
* my _array contains no duplicate items
* item is comparable to elements already in the _array using the
comparison operators
Postcondition: my _array at the index given by the return value is
equivalent to item, or the return value is None. In other words,
satisfy the postcondition of index_of_in_range() for the entire
extent of the list.
'''
def index_of(self, item):
return self.index_of_in_range(item, 0, len(self._array)-1)
'''
Return the index of the given item in my _array, if it appears between
indices low_end and high_end (inclusive!) or None if it doesn't appear
in that range.
Examples:
_array = [1, 2, 3], item = 1, low_end = 1; return None
_array = [1, 2, 3], item = 1, low_end = 0; return 0
Preconditions:
See Preconditions for index_of() method.
Also:
* low_end and high_end are valid indices within my _array.
Postcondition: my _array at the index given by the return value is
equivalent to item, and low_end <= return value <= high_end
or the return value is None
'''
def index_of_in_range(self, item, low_end, high_end):
# Outcome A: (solvable immediately?): no elements in range;
# or, there are elements to be searched
if low_end > high_end:
return None
# Outcome B: (solvable immediately?):
# item is found at first index, return this value;
# or, we have ruled out that item is at first index
if self._array[low_end] == item:
return low_end
# Outcome C: (solvable immedately?):
# search space consists of one element, & it's not item (see O.B);
# or, there's space to be searched, so we divide into two parts
if low_end == high_end:
return None
mid = math.floor((low_end+high_end)/2)
# Outcome D1: either first part may contain the item,
# and we satisfy postcondition there...
if self._array[mid] >= item:
return self.index_of_in_range(item, low_end+1, mid)
# Outcome D2: or we satisfy the postcondition on the second part
return self.index_of_in_range(item, mid+1, high_end)
def __len__(self):
return len(self._array)
def __getitem__(self, key):
return self._array[key]
def run_unit_tests():
a = AutoSortedArray()
a.append(3)
try:
a.append(3)
exception_raised = False
except:
exception_raised = True
if not exception_raised:
raise Exception("Should've raised exeption on duplicate 3")
if not 3 in a:
raise Exception("Couldn't find 3 in a")
if 5 in a:
raise Exception("Hey, I didn't add 5 to a")
for _ in range(250):
r = random.randint(-250, 250)
if r not in a:
a.append(r)
print(a)
for i in range(1, len(a)):
if a[i-1] > a[i]:
raise Exception("Yo, didn't work!")
if 27 not in a:
a.append(27)
try:
a.append(27)
exception_raised = False
except:
exception_raised = True
if not exception_raised:
raise Exception("Should've raised exeption on duplicate 27")
a = AutoSortedArray()
a.append("banana")
a.append("date")
a.append("cherry")
a.append("apple")
print(a)
for i in range(1, len(a)):
if a[i-1] > a[i]:
raise Exception("Yo, didn't work!")
if __name__ == "__main__":
run_unit_tests()
|
"""BackgroundFrame for the App."""
from __future__ import annotations
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import (
QFrame,
QGraphicsDropShadowEffect,
QHBoxLayout,
QWidget,
)
class BackgroundFrame(QFrame):
"""BackgroundFrame for the App."""
def __init__(self, parent: QWidget | None = None) -> None:
"""Create a new BackgroundFrame."""
super().__init__(parent=parent)
self.setObjectName("app_background")
layout = QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(2)
shadow = QGraphicsDropShadowEffect()
shadow.setBlurRadius(20)
shadow.setXOffset(0)
shadow.setYOffset(0)
shadow.setColor(QColor(0, 0, 0, 160))
self.setGraphicsEffect(shadow)
def set_stylesheet(self, border_radius: int, border_size: int) -> None:
"""Set the stylesheet with custom border radius and size."""
self.setStyleSheet(
f"""
#app_background {{
border-radius: {border_radius};
border: {border_size}px;
}}
"""
)
|
from __future__ import annotations
from collections import defaultdict
from typing import Dict, List, Tuple
from .solution import Solution
BagColor = str
SHINY_GOLD = "shiny gold"
class BagsRules:
__slots__ = "_graph"
def __init__(self):
self._graph: Dict[BagColor,
List[Tuple[BagColor, int]]] = defaultdict(list)
def _add_edge(self, u: BagColor, v: BagColor, c: int) -> None:
self._graph[u].append((v, c))
def _reverse(self) -> BagsRules:
reversed_graph = BagsRules()
for u, edges in self._graph.items():
for v, c in edges:
reversed_graph._add_edge(v, u, c)
return reversed_graph
def _count_nodes_reachable_from(self, bag: BagColor) -> int:
stack = [bag]
visited = set(stack)
count = 0
while stack:
curr = stack.pop()
count += 1
for v, _ in self._graph[curr]:
if v not in visited:
visited.add(v)
stack.append(v)
return count
def count_bag_colors_containing(self, bag: BagColor) -> int:
return self._reverse()._count_nodes_reachable_from(bag) - 1
def count_bags_inside(self, bag: BagColor) -> int:
total = 0
for other, count in self._graph[bag]:
total += count * (self.count_bags_inside(other) + 1)
return total
@ classmethod
def from_rules_text(cls, rules_text: str) -> BagsRules:
bags_rules = cls()
for bag_rule_text in rules_text.splitlines():
container_bag, bags_inside = cls._parse_bag_rule(bag_rule_text)
for bag_inside, count in bags_inside:
bags_rules._add_edge(container_bag, bag_inside, count)
return bags_rules
@ staticmethod
def _parse_bag_rule(bag_rule: str) -> Tuple[str, List[Tuple[str, int]]]:
words = bag_rule.split()
container_bag = " ".join(words[:2])
bags_inside: List[Tuple[str, int]] = []
if words[4] == "no":
return container_bag, bags_inside
for i in range(4, len(words), 4):
count_str, *bag_color = words[i:i+3]
count = int(count_str, base=10)
other_bag = " ".join(bag_color)
bags_inside.append((other_bag, count))
return container_bag, bags_inside
class Day07(Solution):
def first_task(self, bags_text: str) -> str:
bags_rules = BagsRules.from_rules_text(bags_text)
return str(bags_rules.count_bag_colors_containing(SHINY_GOLD))
def second_task(self, bags_text: str) -> str:
bags_rules = BagsRules.from_rules_text(bags_text)
return str(bags_rules.count_bags_inside(SHINY_GOLD))
|
# Philip Brady
# This is the Weekly Task 6.
# The program takes a positive floating-point
# number as input and outputs an approximation
# of its square root. The function called sqrt
# does this.
# Imported the math module.
import math
# Created a function that takes the value x.
def sqrt(x):
# Used the sqrt function of the math module.
# Rounded answer to one decimal place.
answer = round(math.sqrt(x),1)
return answer
# Created the variable number which would be
# assigned the inputted floating point number.
number = float(
input("Please enter a positive number: "))
# Called the sqrt function from the print function.
print("The square root of {} is approx. {}."
.format(number, sqrt(number)))
# References:
# [1] “A Whirlwind Tour of Python by Jake VanderPlas (O’Reilly).
# Copyright 2016 O’Reilly Media, Inc., 978-1-491-96465-1.”
# [2] https://docs.python.org/3/library/math.html |
# Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
from python_toolbox.nifty_collections import WeakKeyDefaultDict
from python_toolbox import gc_tools
class WeakreffableObject:
''' '''
def __lt__(self, other):
# Arbitrary sort order for testing.
return id(self) < id(other)
def test():
'''Test the basic workings of `WeakKeyDefaultDict`.'''
wkd_dict = WeakKeyDefaultDict(default_factory=lambda: 7)
weakreffable_object_0 = WeakreffableObject()
weakreffable_object_1 = WeakreffableObject()
weakreffable_object_2 = WeakreffableObject()
weakreffable_object_3 = WeakreffableObject()
wkd_dict[weakreffable_object_0] = 2
assert wkd_dict[weakreffable_object_0] == 2
assert wkd_dict[weakreffable_object_1] == 7
assert wkd_dict[weakreffable_object_2] == 7
assert weakreffable_object_0 in wkd_dict
assert weakreffable_object_1 in wkd_dict
assert weakreffable_object_2 in wkd_dict
assert 'meow' not in wkd_dict
assert sorted(wkd_dict.items()) == sorted(wkd_dict.items()) == sorted(
((weakreffable_object_0, 2),
(weakreffable_object_1, 7),
(weakreffable_object_2, 7), )
)
assert set(wkd_dict.iterkeys()) == set(wkd_dict.keys()) == \
{ref() for ref in wkd_dict.iterkeyrefs()} == \
{ref() for ref in wkd_dict.keyrefs()} == \
{weakreffable_object_0, weakreffable_object_1, weakreffable_object_2}
weakreffable_object_3 = WeakreffableObject()
wkd_dict[weakreffable_object_3] = 123
assert len(list(wkd_dict.keys())) == 4
del weakreffable_object_3
gc_tools.collect()
assert len(list(wkd_dict.keys())) == 3
assert wkd_dict.pop(weakreffable_object_2) == 7
assert len(wkd_dict) == 2
popped_key, popped_value = wkd_dict.popitem()
assert popped_key in (weakreffable_object_0, weakreffable_object_1)
assert popped_value in (2, 7)
weakreffable_object_4 = WeakreffableObject()
weakreffable_object_5 = WeakreffableObject()
weakreffable_object_6 = WeakreffableObject()
assert weakreffable_object_4 not in wkd_dict
wkd_dict.setdefault(weakreffable_object_4, 222)
assert wkd_dict[weakreffable_object_4] == 222
wkd_dict.update({weakreffable_object_5: 444,})
assert wkd_dict[weakreffable_object_5] == 444 |
from inspect import signature, Signature, Parameter
from typing import Any, _TypedDictMeta, T_co, Union, _GenericAlias
from i2.errors import InputError
COMPLEX_TYPE_MAPPING = {}
JSON_TYPES = [list, str, int, float, dict, bool]
def mk_sub_dict_schema_from_typed_dict(typed_dict):
total = getattr(typed_dict, '__total__', False)
required_properties = []
def set_property(key, value):
optional = False
if getattr(value, '__origin__', None) == Union:
optional = type(None) in value.__args__
value = [x for x in value.__args__ if type(None) != x][0]
if total and not optional:
required_properties.append(key)
if value in JSON_TYPES:
properties[key]['type'] = value
elif getattr(value, '_name', None) == 'Iterable':
properties[key]['type'] = list
properties[key]['items'] = mk_sub_list_schema_from_iterable(value)
elif isinstance(value, _TypedDictMeta):
properties[key]['type'] = dict
(
sub_dict_props,
sub_dict_required_props,
) = mk_sub_dict_schema_from_typed_dict(value)
properties[key]['properties'] = sub_dict_props
if sub_dict_required_props:
properties[key]['required'] = sub_dict_required_props
properties = {}
for key, value in typed_dict.__annotations__.items():
properties[key] = {}
properties[key]['type'] = Any
set_property(key, value)
return properties, required_properties
def mk_sub_list_schema_from_iterable(iterable_type):
result = {}
items_type = iterable_type.__args__[0]
if type(items_type) == _TypedDictMeta:
result['type'] = dict
sub_dict_props, sub_dict_required = mk_sub_dict_schema_from_typed_dict(
items_type
)
result['properties'] = sub_dict_props
if sub_dict_required:
result['required'] = sub_dict_required
elif getattr(items_type, '_name', None) == 'Iterable':
result['type'] = list
result['items'] = mk_sub_list_schema_from_iterable(items_type)
elif items_type in JSON_TYPES:
result['type'] = items_type
else:
result['type'] = Any
return result
# changes: simplified from sig.parameters[key] to looping over items of parameters
# changes: added include_func_params and handling
# changes: added docs and doctests
def mk_input_schema_from_func(func, exclude_keys=None, include_func_params=False):
"""Make the openAPI input schema for a function.
:param func: A callable
:param exclude_keys: keys to exclude in the schema
:param include_func_params: Boolean indicating whether the python Parameter objects should
also be included (under the field `x-py-param`)
:return: An openAPI input schema dict
>>> from py2http.schema_tools import mk_input_schema_from_func
>>> import typing
>>>
>>> def add(a, b: float = 0.0) -> float:
... '''Adds numbers'''
... return a + b
...
>>> got = mk_input_schema_from_func(add)
>>> expected = {
... 'type': dict,
... 'properties': {
... 'a': {'type': typing.Any},
... 'b': {'default': 0.0, 'type': float}},
... 'required': ['a']}
>>> assert got == expected, f"\\n expected {expected}\\n got {got}"
>>>
>>>
>>> # TODO: Look into this one: it results in a x default=None (there should be no default)
>>> # and a type for y (there should be no type, unless by convention)
>>> def mult(x: float, y=1, z: int=1):
... return (x * y) ** z
...
>>> got = mk_input_schema_from_func(mult)
>>> expected = {
... 'type': dict,
... 'properties': {
... 'x': {'type': float},
... 'y': {'default': 1, 'type': int},
... 'z': {'type': int, 'default': 1}},
... 'required': ['x']}
>>> assert got == expected, f"\\n expected {expected}\\n got {got}"
"""
if not exclude_keys:
exclude_keys = {}
input_properties = {}
required_properties = []
input_schema = {'type': dict, 'properties': input_properties}
params = signature(func).parameters
for key, param in params.items():
if key in exclude_keys:
continue
default_type = Any
p = {}
if param.default != Parameter.empty:
default = param.default
if type(default) in JSON_TYPES:
default_type = type(default)
p['default'] = default
elif (
param.kind == Parameter.VAR_POSITIONAL
): # TODO: See how to handle a tuple instead of a list (not JSON compatible)
p['default'] = []
default_type = list
elif param.kind == Parameter.VAR_KEYWORD:
p['default'] = {}
default_type = dict
else:
required_properties.append(key)
arg_type = default_type # TODO: Not used. Check why (seems the if clause does covers all)
if param.annotation != Signature.empty:
arg_type = param.annotation
if isinstance(arg_type, _TypedDictMeta):
(
sub_dict_props,
sub_dict_required,
) = mk_sub_dict_schema_from_typed_dict(arg_type)
p['properties'] = sub_dict_props
if sub_dict_required:
p['required'] = sub_dict_required
arg_type = dict
elif getattr(arg_type, '_name', None) == 'Iterable':
p['items'] = mk_sub_list_schema_from_iterable(arg_type)
arg_type = list
elif arg_type not in JSON_TYPES and not COMPLEX_TYPE_MAPPING.get(arg_type):
arg_type = default_type
p['type'] = arg_type
if include_func_params:
p['x-py-param'] = param
# map key to this p info
input_properties[key] = p
if required_properties:
input_schema['required'] = required_properties
return input_schema
def mk_output_schema_from_func(func):
result = {}
sig = signature(func)
output_type = sig.return_annotation
# print(f'output_type: {output_type}') # TODO: Remove: Use conditional logging instead
if output_type in [Signature.empty, Any]:
return {}
if isinstance(output_type, _TypedDictMeta):
result['type'] = dict
result['properties'] = mk_sub_dict_schema_from_typed_dict(output_type)[0]
elif getattr(output_type, '_name', None) == 'Iterable':
result['type'] = list
result['items'] = mk_sub_list_schema_from_iterable(output_type)
elif output_type not in JSON_TYPES and not COMPLEX_TYPE_MAPPING.get(output_type):
return {}
else:
result['type'] = output_type
return result
def validate_input(raw_input: Any, schema: dict):
def _validate_dict(input_value: dict, schema: dict, root_path: str):
for param_name, spec in schema.items():
param_path = f'{root_path}.{param_name}' if root_path else param_name
if not isinstance(spec, dict):
raise TypeError(
'Bad schema for input validation. Must contain dictionaries only.'
)
if param_name in input_value:
param = input_value[param_name]
_validate_input(param, spec, param_path)
elif spec.get('required', False) and 'default' not in spec:
errors.append(f'Parameter "{param_path}" is missing.')
def _validate_input(param, spec, param_path):
invalid_input_msg = (
f'Invalid parameter "{param_path}"' if param_path else 'Invalid input'
)
param_type = spec.get('type', Any)
if param_type != Any and not isinstance(param, param_type):
errors.append(
f'{invalid_input_msg}. Must be of type "{param_type.__name__}".'
)
elif param_type == list and 'items' in spec:
for i, element in enumerate(param):
_validate_input(element, spec['items'], f'{param_path}[{i}]')
elif param_type == dict and 'properties' in spec:
_validate_dict(param, spec['properties'], param_path)
errors = []
_validate_input(raw_input, schema, '')
if len(errors) > 0:
error_msg = ' '.join(errors)
raise InputError(error_msg)
# TODO write this function to take the output from
# mk_input_schema_from_func and create a validator function
# that takes an input_kwargs dict and makes sure the type of each value
# matches the schema
# def mk_input_validator_from_schema(schema):
# def input_validator(input_kwargs):
# print('Your arguments are fallacious.')
# return False
# TODO write this function to take a dict like the following and create an input mapper
# (assume deserialization has already been taken care of)
#
# example_transform = {
# 'outer_to_outer': {
# 'output_key': 'outer1',
# 'type': str, # Python type is str, JSON type is string
# },
# 'outer_to_inner': {
# 'output_key': 'inner1.value2',
# 'type': int, # Python type is int, JSON type is number
# },
# 'container': {
# 'inner_to_inner': {
# 'output_key': 'inner1.value1',
# 'type': Iterable[Iterable[int]] # Complex type, will be mapped to a nested list in JSON schema for OpenAPI
# },
# 'inner_to_outer': {
# 'output_key': 'outer2',
# 'type': np.array, # Python type is array, JSON type is list; requires custom handling
# }
# }
# }
def mk_input_mapper(transform):
pass
# def input_mapper(req_body):
# def map_value(output, key, transform_value):
# output_key = transform_value.get('output_key', None)
# if output_key:
# output[output_key] = get_nested_prop(req_body, output_key)
# result = {}
# for k, v in transform.items():
# map_value(result, k, v)
# return result
# return handle_json(input_mapper)
|
"""Support for a Emonitor channel sensor."""
from __future__ import annotations
from aioemonitor.monitor import EmonitorChannel, EmonitorStatus
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import POWER_WATT
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from . import name_short_mac
from .const import DOMAIN
SENSORS = (
SensorEntityDescription(key="inst_power"),
SensorEntityDescription(
key="avg_power", name="Average", entity_registry_enabled_default=False
),
SensorEntityDescription(
key="max_power", name="Max", entity_registry_enabled_default=False
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
channels = coordinator.data.channels
entities: list[EmonitorPowerSensor] = []
seen_channels = set()
for channel_number, channel in channels.items():
seen_channels.add(channel_number)
if not channel.active:
continue
if channel.paired_with_channel in seen_channels:
continue
entities.extend(
EmonitorPowerSensor(coordinator, description, channel_number)
for description in SENSORS
)
async_add_entities(entities)
class EmonitorPowerSensor(CoordinatorEntity, SensorEntity):
"""Representation of an Emonitor power sensor entity."""
_attr_device_class = SensorDeviceClass.POWER
_attr_native_unit_of_measurement = POWER_WATT
_attr_state_class = SensorStateClass.MEASUREMENT
def __init__(
self,
coordinator: DataUpdateCoordinator,
description: SensorEntityDescription,
channel_number: int,
) -> None:
"""Initialize the channel sensor."""
self.entity_description = description
self.channel_number = channel_number
super().__init__(coordinator)
mac_address = self.emonitor_status.network.mac_address
device_name = name_short_mac(mac_address[-6:])
label = self.channel_data.label or f"{device_name} {channel_number}"
if description.name:
self._attr_name = f"{label} {description.name}"
self._attr_unique_id = f"{mac_address}_{channel_number}_{description.key}"
else:
self._attr_name = label
self._attr_unique_id = f"{mac_address}_{channel_number}"
self._attr_device_info = DeviceInfo(
connections={(dr.CONNECTION_NETWORK_MAC, mac_address)},
manufacturer="Powerhouse Dynamics, Inc.",
name=device_name,
sw_version=self.emonitor_status.hardware.firmware_version,
)
@property
def channels(self) -> dict[int, EmonitorChannel]:
"""Return the channels dict."""
channels: dict[int, EmonitorChannel] = self.emonitor_status.channels
return channels
@property
def channel_data(self) -> EmonitorChannel:
"""Return the channel data."""
return self.channels[self.channel_number]
@property
def emonitor_status(self) -> EmonitorStatus:
"""Return the EmonitorStatus."""
return self.coordinator.data
def _paired_attr(self, attr_name: str) -> float:
"""Cumulative attributes for channel and paired channel."""
channel_data = self.channels[self.channel_number]
attr_val = getattr(channel_data, attr_name)
if paired_channel := channel_data.paired_with_channel:
attr_val += getattr(self.channels[paired_channel], attr_name)
return attr_val
@property
def native_value(self) -> StateType:
"""State of the sensor."""
return self._paired_attr(self.entity_description.key)
@property
def extra_state_attributes(self) -> dict:
"""Return the device specific state attributes."""
return {"channel": self.channel_number}
|
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
from .qio import (
ParallelTempering,
SimulatedAnnealing,
Tabu,
QuantumMonteCarlo,
PopulationAnnealing,
SubstochasticMonteCarlo,
)
|
from django.db.models.query import RawQuerySet
from django.db.models import sql
from django.core.paginator import Paginator
from rest_framework.pagination import PageNumberPagination
class Pagination(PageNumberPagination):
"""
DRF pagination_class, you use it by saying:
class MyView(GenericAPIView):
pagination_class = Pagination
"""
page_size_query_param = 'page_size'
class PaginatedRawQuerySet(RawQuerySet):
"""
Replacement for a RawQuerySet that handles pagination, stolen from:
https://stackoverflow.com/questions/32191853/best-way-to-paginate-a-raw-sql-query-in-a-django-rest-listapi-view/43921793#43921793
https://gist.github.com/eltongo/d3e6bdef17b0b14384ba38edc76f25f6
"""
def __init__(self, raw_query, **kwargs):
super(PaginatedRawQuerySet, self).__init__(raw_query, **kwargs)
self.original_raw_query = raw_query
self._result_cache = None
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int,)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.set_limits(start, stop)
return qs
qs = self._clone()
qs.set_limits(k, k + 1)
return list(qs)[0]
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def count(self):
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db) # Originally was: return self.model.objects.count()
def set_limits(self, start, stop):
limit_offset = ''
new_params = tuple()
if start is None:
start = 0
elif start > 0:
new_params += (start,)
limit_offset = ' OFFSET %s'
if stop is not None:
new_params = (stop - start,) + new_params
limit_offset = 'LIMIT %s' + limit_offset
self.params = self.params + new_params
self.raw_query = self.original_raw_query + limit_offset
self.query = sql.RawQuery(sql=self.raw_query, using=self.db, params=self.params)
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(super(PaginatedRawQuerySet, self).__iter__())
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.model.__name__)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def _clone(self):
clone = self.__class__(raw_query=self.raw_query, model=self.model, using=self._db, hints=self._hints,
query=self.query, params=self.params, translations=self.translations)
return clone
class RawQuerysetPaginator(Paginator):
"""
This is a Django paginator, meant to adapt RawQueryset
to DRF pagination classes.
Stolen from:
https://stackoverflow.com/questions/2532475/django-paginator-raw-sql-query
Stopped using it in favor of PaginatedRawQueryset, but will keep it just in case.
"""
def __init__(self, object_list, per_page, count=1, **kwargs):
super(RawQuerysetPaginator, self).__init__(object_list, per_page, **kwargs)
self._raw_count = count
@property
def count(self):
return self._raw_count
def page(self, number):
number = self.validate_number(number)
return self._get_page(self.object_list, number, self)
class RawQuerysetPagination(Pagination):
"""
DRF pagination_class for raw querysets.
Stopped using it in favor of PaginatedRawQueryset, but will keep it just in case.
"""
django_paginator_class = RawQuerysetPaginator
|
# -*- coding: utf8 -*-
from __future__ import print_function, unicode_literals
import json
import logging
from . import BaseParser
logger = logging.getLogger("config")
class JsonParser(BaseParser):
"""
Parse config from json.
"""
def __init__(self, name):
"""
:param name: parser name
"""
self.name = name
def parse(self, data):
"""
parse json text into python dict
:param data: text in json format
:return:
"""
try:
return json.loads(data)
except Exception as err:
logger.error("config %s: fail to parse json config, the error is %s", self.name, err)
return {}
def dump(self, config, **kwargs):
"""
dump dict to json format
:param config: config as dict
"""
try:
return json.dumps(config, **kwargs)
except Exception as err:
logger.error("config %s: fail to dump config to json, the error is %s", self.name, err)
return ""
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""Dependencies that linter rules depend on."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def lowrisc_misc_linters_dependencies():
"""Declares workspaces linting rules depend on.
Make sure to call this in your WORKSPACE file."""
http_archive(
name = "rules_python",
url = "https://github.com/bazelbuild/rules_python/releases/download/0.5.0/rules_python-0.5.0.tar.gz",
sha256 = "cd6730ed53a002c56ce4e2f396ba3b3be262fd7cb68339f0377a45e8227fe332",
)
|
"""
"""
# IMPORT modules. Must have unittest, and probably coast.
import coast
from coast import general_utils
import unittest
import numpy as np
import pytz
import datetime
import unit_test_files as files
class test_general_utils(unittest.TestCase):
def test_copy_coast_object(self):
sci = coast.Gridded(files.fn_nemo_dat, files.fn_nemo_dom, config=files.fn_config_t_grid)
sci_copy = sci.copy()
check1 = sci_copy.dataset == sci.dataset
self.assertTrue(check1, msg="check1")
def test_getitem(self):
sci = coast.Gridded(files.fn_nemo_dat, files.fn_nemo_dom, config=files.fn_config_t_grid)
check1 = sci.dataset["ssh"].equals(sci["ssh"])
self.assertTrue(check1, msg="check1")
def test_coast_variable_renaming(self):
sci = coast.Gridded(files.fn_nemo_dat, files.fn_nemo_dom, config=files.fn_config_t_grid)
sci_copy = sci.copy()
sci_copy.rename({"ssh": "renamed"})
check1 = sci["ssh"].equals(sci_copy["renamed"])
self.assertTrue(check1, "check1")
def test_day_of_week(self):
check1 = general_utils.day_of_week(np.datetime64("2020-10-16")) == "Fri"
self.assertTrue(check1, msg="check1")
def test_bst_to_gmt(self):
time_str = "11/10/2020 12:00"
datetime_obj = datetime.datetime.strptime(time_str, "%d/%m/%Y %H:%M")
bst_obj = pytz.timezone("Europe/London")
check1 = np.datetime64(bst_obj.localize(datetime_obj).astimezone(pytz.utc)) == np.datetime64(
"2020-10-11T11:00:00"
)
self.assertTrue(check1, msg="check1")
|
from allauth.account.forms import SignupForm
from django import forms
from bims.models import Profile
class CustomSignupForm(SignupForm):
first_name = forms.CharField(
max_length=150,
label='First Name',
required=True)
last_name = forms.CharField(
max_length=150,
label='Last Name',
required=True
)
organization = forms.CharField(
max_length=100,
label='Organization/Institution',
required=True
)
role = forms.ChoiceField(
choices=Profile.ROLE_CHOICES,
initial='citizen',
required=True
)
def custom_signup(self, request, user):
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.organization = self.cleaned_data['organization']
user.save()
bims_profile, created = Profile.objects.get_or_create(
user=user
)
bims_profile.role = self.cleaned_data['role']
bims_profile.save()
return user
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from ldshell.helpers import create_socket_address, parse_socket_address
from nubia import context
from nubia.internal.cmdbase import Command
from nubia.internal.io.eventbus import Message
from termcolor import cprint
class Connect(Command):
cmds = {"connect": "connect to a given cluster"}
def __init__(self):
super(Connect, self).__init__()
self._built_in = True
def run_interactive(self, cmd, arg_str, raw):
if len(arg_str) < 1:
msg = "Cluster admin server address required"
cprint(msg, "red")
return -1
ctx = context.get_context()
# Updating the context with the new socket for the admin server
ctx._set_admin_server_socket_address(parse_socket_address(arg_str))
return self._run()
def run_cli(self, args):
return self._run()
def _run(self):
self._command_registry.dispatch_message(Message.CONNECTED)
return 0
def get_command_names(self):
return self.cmds.keys()
def add_arguments(self, parser):
parser.add_parser("connect")
def get_help(self, cmd, *args):
return self.cmds[cmd]
|
from lms_code.analysis.run_bem import bemify, boundary_conditions,\
assemble, constrain, solve, evaluate_surface_disp, set_params,\
create_fault_mesh, create_surface_mesh, pin_ends_constraint,\
apply_jump_constraint
import lms_code.lib.rep2 as rep2
from codim1.core.tools import plot_mesh
import sys
import matplotlib.pyplot as plt
sys.setrecursionlimit(50000)
def set_params(d):
d['intersection_pt'] = (461386, 1590)
d['degree'] = 3
d['skip_vertices'] = 1
d['fault_refine'] = 1
d['quad_max'] = 20
d['quad_logr'] = 20
d['quad_oneoverr'] = 20
d['surface_points'] = 10
d['shear_modulus'] = 30e9
d['poisson_ratio'] = 0.25
d['slip_magnitude'] = 1.0
d['far_per_step'] = 5
d['far_steps'] = 14
d['far_mult'] = 5000.0
far_ray_lengths = [1.0] * d['far_per_step']
for i in range(1, d['far_steps']):
new_els = [d['far_mult'] * (2.0 ** float(i))] * d['far_per_step']
far_ray_lengths.extend(new_els)
d['far_ray_lengths'] = far_ray_lengths
if __name__ == "__main__":
# Compute the BEM solution
joint_x = 4.20012e5 + 1.6
beichuan = lambda x: x > joint_x + 10
detachment = lambda x: x < joint_x - 10
faults = dict()
faults['beichuan'] = beichuan
faults['detachment'] = detachment
solns = dict()
for name in faults.keys():
print name
geom = rep2.load('lms_geometry')
d = dict()
set_params(d)
create_fault_mesh(d, geom, faults[name])
create_surface_mesh(d, geom)
bemify(d)
# plot_mesh(d['combined_mesh'])
# plt.show()
boundary_conditions(d)
assemble(d)
pin_ends_constraint(d['matrix'], d['rhs'], d['surf_mesh'], [0, 0], [0, 0])
if name == 'beichuan':
apply_jump_constraint(d['fault_mesh'], d['matrix'], d['rhs'])
solve(d)
evaluate_surface_disp(d)
solns[name] = d
# plt.plot(d['x'][0, :], d['u_soln'][0, :])
# plt.show()
rep2.save("bem_all_details_inverse1", solns)
|
# -*- coding: utf-8 -*-
from cradmin_legacy import crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from devilry.devilry_deadlinemanagement.cradmin_app import ExaminerDeadlineManagementApp
from devilry.devilry_deadlinemanagement.views import deadline_listview
from devilry.devilry_deadlinemanagement.views import manage_deadline_view
class ExaminerDeadlineListView(deadline_listview.DeadlineListView):
def get_backlink_url(self):
return reverse_cradmin_url(
instanceid='devilry_examiner_assignment',
appname='grouplist',
roleid=self.request.cradmin_role.id,
viewname=crapp.INDEXVIEW_NAME
)
class ExaminerManageDeadlineFromPreviousView(manage_deadline_view.ManageDeadlineFromPreviousView):
"""
"""
class App(ExaminerDeadlineManagementApp):
@classmethod
def get_index_view_class(cls):
return ExaminerDeadlineListView
@classmethod
def get_manage_deadline_from_previous_view_class(cls):
return ExaminerManageDeadlineFromPreviousView
|
# encoding: utf-8
from django.conf.urls import url
from uploader.views import FileListView
from uploader.views import FileCreateView
urlpatterns = [
url(r'^$', FileListView.as_view(), name='list'),
]
|
from flask_wtf import FlaskForm
from wtforms import SubmitField,StringField, IntegerField, RadioField, SelectField
class SearchForm(FlaskForm):
animal_name = StringField('Animal Name')
species = StringField('Species')
# gender = RadioField("Gender",choices=[('M',"Male"),('F',"Female")])
category = SelectField("Category",choices=[("total","All"),("compound","Mammals"),("aviary","Birds"),("hothouse","Reptiles"),("hothouse","Amphibians"),("aquarium","Fishes")])
fromDate = IntegerField('From')
toDate = IntegerField('To')
submit = SubmitField("Search")
|
import Crypto.Util.Counter
from Crypto.Cipher import AES
from math import ceil
from math import log
def to_int(b):
num = 0
for i in range(len(b)):
num |= b[i] << (i * 8)
return num
class AESCTR(object):
def __init__(self, key, iv):
self.__aes = AES.new(
key,
AES.MODE_CTR,
counter = Crypto.Util.Counter.new(
128,
initial_value = to_int(iv),
allow_wraparound = True
)
)
def randint(self, min, max):
if max < min:
raise Exception('max < min')
elif min == max:
return min
bit_length = ceil(log(max - min, 2))
byte_length = ceil(bit_length / 8)
mask = (1 << bit_length) - 1
while True:
rand = self.__aes.encrypt(b'\x00' * byte_length)
num = to_int(rand) & mask
if num <= max - min:
return min + num
def bytes(self):
while True:
yield self.__aes.encrypt(b'\x00')[0]
|
import spellchecker
from nltk.stem import PorterStemmer, WordNetLemmatizer
from sacremoses import MosesDetokenizer
from lti_app.caching import caching
from lti_app.core.api import LanguageToolClient
from lti_app.core.text_processing.parser import Parser
from lti_app.helpers import Singleton
class Tools(metaclass=Singleton):
def __init__(self):
self.lemmatizer = WordNetLemmatizer()
self.stemmer = PorterStemmer()
self.parser = Parser()
self.languagetool = LanguageToolClient()
self.spell = spellchecker.SpellChecker()
self.spell.word_frequency.load_words(["we're", "you're", "won't"])
self.word_detokenizer = MosesDetokenizer()
|
import numpy as np
def get_data(file):
f = open(file, 'r')
data = []
for line in f:
line = line.strip()
data.append(line)
return data
# L = sequence length, P = motif length
def init_EM(L, P):
lmbda = np.random.uniform(0,1,size=(L,))
lmbda = lmbda/np.sum(lmbda) # normalization
psi_0 = np.random.uniform(0,1,size=(4,P))
psi_0 = psi_0/psi_0.sum(axis=0)
psi_1 = np.random.uniform(0,1,size=(4,P))
psi_1 = psi_1/psi_1.sum(axis=0)
theta = {'lmbda': lmbda, 'psi_0': psi_0, 'psi_1': psi_1}
return theta
def E_step(data, theta, P):
dict = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
C = []
for i in range(len(data)):
C_i = []
for j in range(len(data[0])-P+1): # 0 to 38-6+1
C_ij = np.log(theta['lmbda'][j])
# Iterate through all positions of the motif
for p in range(P):
base = data[i][j+p]
k = dict[base]
C_ij += np.log(theta['psi_0'][k][p])
# Iterate through all positions of the non-motif
for jpr in range(len(data[0])-P+1): # j' is the start position of a non-motif sequence
if jpr == j: # if j:j+p includes a base that is non motif, score it as background
continue
for p in range(P):
base = data[i][jpr+p]
k = dict[base]
C_ij += np.log(theta['psi_0'][k][p])
C_i.append(np.exp(C_ij)) # move cij back to probability space
sm = sum(C_i) # denominator
C_i = [item/sm for item in C_i] # normalization
C.append(C_i)
return C
def M_step(data, C, P):
dict = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
lmbda = np.array(C).sum(axis=0) # sum by column of matrix
lmbda = lmbda / 357 # divide all elements in list by N (normalization)
# Initialize new psi matrices
psi_1 = np.zeros((4, P))
psi_0 = np.zeros((4, P))
for p in range(P):
for i in range(len(data)):
for j in range(len(data[0])-P+1):
base = data[i][j+p]
k = dict[base]
psi_1[k, p] += C[i][j]
psi_0[k, p] += 1 - (C[i][j])
psi_1 /= len(data) # normalization
psi_0 /= len(data)*(len(data[0])-P) # normalization
theta = {'lmbda': lmbda, 'psi_1': psi_1, 'psi_0': psi_0}
return theta
def LLH(data, theta, C, P):
dict = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
# First term
first = 0
for i in range(len(data)):
for j in range(len(C[i])):
first += C[i][j] * np.log(theta['lmbda'][j])
# Second term
second = 0
for i in range(len(data)):
for j in range(len(data[0])-P+1):
for p in range(P):
base = data[i][j+p] # what base? ACGT?
k = dict[base] # each base has a k and p
second += C[i][j]*np.log(theta['psi_1'][k,p]) + (1-C[i][j])*np.log(theta['psi_0'][k,p])
return (first + second)
# X is matrix of inputs, conv is convergence criterion
def EM(P, seed, conv=0.001, iter=5):
data = get_data("sequence.padded.txt")
# Initialization
LLH_prev = -200000
LLHs = []
LLHs.append(LLH_prev)
LLH_curr = 0
np.random.seed(seed)
theta = init_EM(len(data[0]), P)
# 1st E step and M step
posteriors = E_step(data, theta, P)
theta = M_step(data, posteriors, P)
# Main loop
counter = 0
while counter < iter:
LLH_prev = LLH_curr
print("Iteration", counter, "prev", LLH_prev, "curr", LLH_curr)
posteriors = E_step(data, theta, P)
theta = M_step(data, posteriors, P)
# Recalculate LLH with theta from the M step
LLH_curr = LLH(data, theta, posteriors, P)
LLHs.append(LLH_curr)
counter += 1
return LLHs
# test code
#data = get_data("small_seqs.txt")
#theta = init_EM(len(data[0]), 6)
#print(len(theta['lmbda']))
#posteriors = E_step(data, theta, 6)
#theta = M_step(data, posteriors, 6)
#loglh = LLH(data, theta, posteriors, 6)
final_theta = EM(18, 1)
print(final_theta)
"""
elbo = evidence lower bound
# LLH = ELBO + entropy
#entropy
qlogq = posteriors * np.log(posteriors);
qlogq[np.where(posteriors == 0)] = 0 #0log0 = 0
return (expected_complete_LL - np.sum(np.sum(qlogq,axis=1) * ohe_matrix)) #log likelihood = ELBO + entropy, when q=p
sum_i sum_j q(c_i)logq(ci)
"""
|
#!/usr/bin/env python
"""
Contains modules for platform-specific methods.
"""
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from python_pachyderm.client.pps import pps_pb2 as client_dot_pps_dot_pps__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class APIStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateJob = channel.unary_unary(
'/pps.API/CreateJob',
request_serializer=client_dot_pps_dot_pps__pb2.CreateJobRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.Job.FromString,
)
self.InspectJob = channel.unary_unary(
'/pps.API/InspectJob',
request_serializer=client_dot_pps_dot_pps__pb2.InspectJobRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.JobInfo.FromString,
)
self.ListJob = channel.unary_unary(
'/pps.API/ListJob',
request_serializer=client_dot_pps_dot_pps__pb2.ListJobRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.JobInfos.FromString,
)
self.ListJobStream = channel.unary_stream(
'/pps.API/ListJobStream',
request_serializer=client_dot_pps_dot_pps__pb2.ListJobRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.JobInfo.FromString,
)
self.FlushJob = channel.unary_stream(
'/pps.API/FlushJob',
request_serializer=client_dot_pps_dot_pps__pb2.FlushJobRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.JobInfo.FromString,
)
self.DeleteJob = channel.unary_unary(
'/pps.API/DeleteJob',
request_serializer=client_dot_pps_dot_pps__pb2.DeleteJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.StopJob = channel.unary_unary(
'/pps.API/StopJob',
request_serializer=client_dot_pps_dot_pps__pb2.StopJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.InspectDatum = channel.unary_unary(
'/pps.API/InspectDatum',
request_serializer=client_dot_pps_dot_pps__pb2.InspectDatumRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.DatumInfo.FromString,
)
self.ListDatum = channel.unary_unary(
'/pps.API/ListDatum',
request_serializer=client_dot_pps_dot_pps__pb2.ListDatumRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.ListDatumResponse.FromString,
)
self.ListDatumStream = channel.unary_stream(
'/pps.API/ListDatumStream',
request_serializer=client_dot_pps_dot_pps__pb2.ListDatumRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.ListDatumStreamResponse.FromString,
)
self.RestartDatum = channel.unary_unary(
'/pps.API/RestartDatum',
request_serializer=client_dot_pps_dot_pps__pb2.RestartDatumRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreatePipeline = channel.unary_unary(
'/pps.API/CreatePipeline',
request_serializer=client_dot_pps_dot_pps__pb2.CreatePipelineRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.InspectPipeline = channel.unary_unary(
'/pps.API/InspectPipeline',
request_serializer=client_dot_pps_dot_pps__pb2.InspectPipelineRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.PipelineInfo.FromString,
)
self.ListPipeline = channel.unary_unary(
'/pps.API/ListPipeline',
request_serializer=client_dot_pps_dot_pps__pb2.ListPipelineRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.PipelineInfos.FromString,
)
self.DeletePipeline = channel.unary_unary(
'/pps.API/DeletePipeline',
request_serializer=client_dot_pps_dot_pps__pb2.DeletePipelineRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.StartPipeline = channel.unary_unary(
'/pps.API/StartPipeline',
request_serializer=client_dot_pps_dot_pps__pb2.StartPipelineRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.StopPipeline = channel.unary_unary(
'/pps.API/StopPipeline',
request_serializer=client_dot_pps_dot_pps__pb2.StopPipelineRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.RerunPipeline = channel.unary_unary(
'/pps.API/RerunPipeline',
request_serializer=client_dot_pps_dot_pps__pb2.RerunPipelineRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteAll = channel.unary_unary(
'/pps.API/DeleteAll',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetLogs = channel.unary_stream(
'/pps.API/GetLogs',
request_serializer=client_dot_pps_dot_pps__pb2.GetLogsRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.LogMessage.FromString,
)
self.GarbageCollect = channel.unary_unary(
'/pps.API/GarbageCollect',
request_serializer=client_dot_pps_dot_pps__pb2.GarbageCollectRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.GarbageCollectResponse.FromString,
)
self.ActivateAuth = channel.unary_unary(
'/pps.API/ActivateAuth',
request_serializer=client_dot_pps_dot_pps__pb2.ActivateAuthRequest.SerializeToString,
response_deserializer=client_dot_pps_dot_pps__pb2.ActivateAuthResponse.FromString,
)
class APIServicer(object):
# missing associated documentation comment in .proto file
pass
def CreateJob(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InspectJob(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListJob(self, request, context):
"""ListJob returns information about current and past Pachyderm jobs. This is
deprecated in favor of ListJobStream
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListJobStream(self, request, context):
"""ListJobStream returns information about current and past Pachyderm jobs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FlushJob(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteJob(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StopJob(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InspectDatum(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListDatum(self, request, context):
"""ListDatum returns information about each datum fed to a Pachyderm job. This
is deprecated in favor of ListDatumStream
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListDatumStream(self, request, context):
"""ListDatumStream returns information about each datum fed to a Pachyderm job
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RestartDatum(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreatePipeline(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InspectPipeline(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListPipeline(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeletePipeline(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StartPipeline(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StopPipeline(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RerunPipeline(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteAll(self, request, context):
"""DeleteAll deletes everything
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLogs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GarbageCollect(self, request, context):
"""Garbage collection
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ActivateAuth(self, request, context):
"""An internal call that causes PPS to put itself into an auth-enabled state
(all pipeline have tokens, correct permissions, etcd)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_APIServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateJob': grpc.unary_unary_rpc_method_handler(
servicer.CreateJob,
request_deserializer=client_dot_pps_dot_pps__pb2.CreateJobRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.Job.SerializeToString,
),
'InspectJob': grpc.unary_unary_rpc_method_handler(
servicer.InspectJob,
request_deserializer=client_dot_pps_dot_pps__pb2.InspectJobRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.JobInfo.SerializeToString,
),
'ListJob': grpc.unary_unary_rpc_method_handler(
servicer.ListJob,
request_deserializer=client_dot_pps_dot_pps__pb2.ListJobRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.JobInfos.SerializeToString,
),
'ListJobStream': grpc.unary_stream_rpc_method_handler(
servicer.ListJobStream,
request_deserializer=client_dot_pps_dot_pps__pb2.ListJobRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.JobInfo.SerializeToString,
),
'FlushJob': grpc.unary_stream_rpc_method_handler(
servicer.FlushJob,
request_deserializer=client_dot_pps_dot_pps__pb2.FlushJobRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.JobInfo.SerializeToString,
),
'DeleteJob': grpc.unary_unary_rpc_method_handler(
servicer.DeleteJob,
request_deserializer=client_dot_pps_dot_pps__pb2.DeleteJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'StopJob': grpc.unary_unary_rpc_method_handler(
servicer.StopJob,
request_deserializer=client_dot_pps_dot_pps__pb2.StopJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'InspectDatum': grpc.unary_unary_rpc_method_handler(
servicer.InspectDatum,
request_deserializer=client_dot_pps_dot_pps__pb2.InspectDatumRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.DatumInfo.SerializeToString,
),
'ListDatum': grpc.unary_unary_rpc_method_handler(
servicer.ListDatum,
request_deserializer=client_dot_pps_dot_pps__pb2.ListDatumRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.ListDatumResponse.SerializeToString,
),
'ListDatumStream': grpc.unary_stream_rpc_method_handler(
servicer.ListDatumStream,
request_deserializer=client_dot_pps_dot_pps__pb2.ListDatumRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.ListDatumStreamResponse.SerializeToString,
),
'RestartDatum': grpc.unary_unary_rpc_method_handler(
servicer.RestartDatum,
request_deserializer=client_dot_pps_dot_pps__pb2.RestartDatumRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreatePipeline': grpc.unary_unary_rpc_method_handler(
servicer.CreatePipeline,
request_deserializer=client_dot_pps_dot_pps__pb2.CreatePipelineRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'InspectPipeline': grpc.unary_unary_rpc_method_handler(
servicer.InspectPipeline,
request_deserializer=client_dot_pps_dot_pps__pb2.InspectPipelineRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.PipelineInfo.SerializeToString,
),
'ListPipeline': grpc.unary_unary_rpc_method_handler(
servicer.ListPipeline,
request_deserializer=client_dot_pps_dot_pps__pb2.ListPipelineRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.PipelineInfos.SerializeToString,
),
'DeletePipeline': grpc.unary_unary_rpc_method_handler(
servicer.DeletePipeline,
request_deserializer=client_dot_pps_dot_pps__pb2.DeletePipelineRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'StartPipeline': grpc.unary_unary_rpc_method_handler(
servicer.StartPipeline,
request_deserializer=client_dot_pps_dot_pps__pb2.StartPipelineRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'StopPipeline': grpc.unary_unary_rpc_method_handler(
servicer.StopPipeline,
request_deserializer=client_dot_pps_dot_pps__pb2.StopPipelineRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'RerunPipeline': grpc.unary_unary_rpc_method_handler(
servicer.RerunPipeline,
request_deserializer=client_dot_pps_dot_pps__pb2.RerunPipelineRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteAll': grpc.unary_unary_rpc_method_handler(
servicer.DeleteAll,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetLogs': grpc.unary_stream_rpc_method_handler(
servicer.GetLogs,
request_deserializer=client_dot_pps_dot_pps__pb2.GetLogsRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.LogMessage.SerializeToString,
),
'GarbageCollect': grpc.unary_unary_rpc_method_handler(
servicer.GarbageCollect,
request_deserializer=client_dot_pps_dot_pps__pb2.GarbageCollectRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.GarbageCollectResponse.SerializeToString,
),
'ActivateAuth': grpc.unary_unary_rpc_method_handler(
servicer.ActivateAuth,
request_deserializer=client_dot_pps_dot_pps__pb2.ActivateAuthRequest.FromString,
response_serializer=client_dot_pps_dot_pps__pb2.ActivateAuthResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'pps.API', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# @fb-only: from . import fb, fbnet_v2 # noqa
# Explicitly expose all registry-based modules
__all__ = [
"fbnet_v2",
# @fb-only: "fb",
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.