text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
"""
FormatLF.py: Takes output from SALF.py and MonteCarlo.py,
processes it and presents it in a clean
format.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
from scipy.interpolate import UnivariateSpline
from scipy.integrate import simps
from lib.Iso import Isochrone
from lib.Helpers import (gauss, setup_plot, colours,
RC_sigma, pnts_from_samples,
MC_Points, SALF_Points)
from lib.Constants import *
import matplotlib
matplotlib.rcParams.update({'font.size': 13})
"""
This script is to be run after samples have been generated
with MonteCarlo.py and SALF.py
It smooths and presents the data as well as doing some nice
analysis on it
"""
# SECTION THREE: The Reconstruction (Very Important)
# Plots a given branch from its points (does smoothing)
def reconstruct_LF(bin_centres, counts, step, color, t, method):
# Convolving with gaussian
smoothed = gaussian_filter(counts, sigma/step)
x = np.linspace(xmin, xmax, 10000)
spl = UnivariateSpline(bin_centres, smoothed, k=1, s=0) # Cubic
if t == 1:
# Just snapping off the exponential background
# very roughly
pnts = np.column_stack((bin_centres, smoothed))
pnts = pnts[(pnts[:,0] < -1.89) | (pnts[:,0] > -0.63)]
spl1 = UnivariateSpline(pnts[:,0], pnts[:,1], s=0)
spl2 = UnivariateSpline(
bin_centres, smoothed-spl1(bin_centres), s=0)
y1 = spl1(x)
y2 = spl2(x)
plt.plot(x, y1, color=colours[4])
plt.plot(x, y2, color=color)
else:
# Otherwise we just have the one spline to plot
y = spl(x)
plt.plot(x, y, color=color)
# Fit parameters to RC
if t==2:
RC_sigma(x, spl(x))
return spl, smoothed
def plot_Method(pnts_fn):
plt.figure()
setup_plot()
spls = []
for t in typs:
# Calls the above functions
bcs, counts, step, NORM, method = pnts_fn(t)
counts = NORM*counts
spl, smoothed = reconstruct_LF(bcs, counts, step, colours[t], t, method)
spls.append(spl)
x = np.linspace(xmin, xmax, 10000)
y = x*0
for spl in spls:
y = y+spl(x)
plt.plot(x,y, linestyle="-.", color='black')
area = simps(y,x)
print("Area:", area)
plt.legend([
"Red Giant Branch",
"Red Giant Branch Bump",
"Red Clump",
"Asymptotic Giant Branch",
"Total"
])
return x,y
def plot_comparison(x, y1, y2):
fig, (ax1, ax2) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [4, 1]}, sharex=True)
ax1.set_ylabel("Luminosity Function (Arbitrary Units)")
ax2.set_xlabel("$M_{K_s}$")
ax2.set_ylabel("Difference $\\times 10^2$")
#plt.ylim(0.0, 1.3) # Adjust as necessary
ax1.set_xlim(xmin, xmax)
ax1.plot(x, y1)
ax1.plot(x, y2)
ax2.plot(x, 100*np.abs(y2-y1), color="red")
ax1.legend([
"Monte Carlo",
"Semi-Analytic",
])
if __name__ == "__main__":
print("Monte Carlo")
x, y1 = plot_Method(MC_Points)
plt.title("Monte Carlo")
print()
print("SALF")
x, y2 = plot_Method(SALF_Points)
plt.title("SALF")
plot_comparison(x, y1, y2)
# iso = Isochrone()
# iso.plot()
plt.show()
|
# *_*coding:utf-8 *_*
import requests,json
class PublicDingDing(object):
def __init__(self):
pass
def get_message(self,token,content):
self.url = 'https://oapi.dingtalk.com/robot/send?access_token=%s'%token
self.pagrem = {
"msgtype": "text",
"text": {
"content": content
},
"isAtAll": True
}
self.headers = {
'Content-Type': 'application/json'
}
f = requests.post(url=self.url, data=json.dumps(self.pagrem), headers=self.headers)
|
from cPickle import Unpickler
from importlib import import_module
import os
from .._utils import ui # , logger
def map_paths(module_name, class_name):
"Subclass to handle changes in module paths"
if module_name == 'eelbrain.vessels.data':
# logger.debug("Legacy pickle: %r / %r" % (module_name, class_name))
module_name = 'eelbrain._data_obj'
class_names = {'var':'Var', 'factor':'Factor', 'ndvar':'NDVar',
'datalist':'Datalist', 'dataset':'Dataset'}
class_name = class_names[class_name]
elif module_name.startswith('eelbrain.data.'):
if module_name.startswith('eelbrain.data.load'):
rev = module_name.replace('.data.load', '.load')
elif module_name.startswith('eelbrain.data.stats'):
rev = module_name.replace('.data.stats', '._stats')
elif module_name.startswith('eelbrain.data.data_obj'):
rev = module_name.replace('.data.data_obj', '._data_obj')
else:
raise NotImplementedError("%r / %r" % (module_name, class_name))
# logger.debug("Legacy pickle %r: %r -> %r" % (class_name, module_name, rev))
module_name = rev
module = import_module(module_name)
return getattr(module, class_name)
def unpickle(file_path=None):
"""Load pickled Python objects from a file.
Almost like ``cPickle.load(open(file_path))``, but also loads object saved
with older versions of Eelbrain, and allows using a system file dialog to
select a file.
Parameters
----------
file_path : None | str
Path to a pickled file. If None (default), a system file dialog will be
shown. If the user cancels the file dialog, a RuntimeError is raised.
"""
if file_path is None:
filetypes = [("Pickles (*.pickled)", '*.pickled'), ("All files", '*')]
file_path = ui.ask_file("Select File to Unpickle", "Select a pickled "
"file to unpickle", filetypes)
if file_path is False:
raise RuntimeError("User canceled")
else:
print repr(file_path)
else:
file_path = os.path.expanduser(file_path)
if not os.path.exists(file_path):
new_path = os.extsep.join((file_path, 'pickled'))
if os.path.exists(new_path):
file_path = new_path
with open(file_path, 'r') as fid:
unpickler = Unpickler(fid)
unpickler.find_global = map_paths
obj = unpickler.load()
return obj
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-31 17:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainApi', '0003_auto_20161030_2351'),
]
operations = [
migrations.RemoveField(
model_name='usermedhistories',
name='allMedHistories',
),
migrations.RemoveField(
model_name='usermedhistories',
name='userProfiles',
),
migrations.RenameField(
model_name='userprofiles',
old_name='image',
new_name='profilePicture',
),
migrations.AddField(
model_name='allevents',
name='profile',
field=models.ManyToManyField(through='mainApi.UserEvents', to='mainApi.UserProfiles'),
),
migrations.AddField(
model_name='allmedhistories',
name='profile',
field=models.ManyToManyField(to='mainApi.UserProfiles'),
),
migrations.DeleteModel(
name='UserMedHistories',
),
]
|
import numpy as np
npArray = np.arange(1, 27)
indexNum = npArray[19]
print("---Indexing---")
print("npArray:", npArray, " Type Of npArray:", type(npArray))
print("Index(19):", indexNum, " Type Of Index:", type(indexNum))
indexNumAdd = indexNum + 6
print("Index Addition:", indexNumAdd, " Type:", type(indexNumAdd))
indAdd = indexNum + npArray[5]
print("Another Index Add:", indAdd, "Type:", type(indAdd))
print("Negative Indexing:", npArray[-1], npArray[25], npArray[-26], npArray[0])
# print(npArray.slice(1, 12, 4))
print("\n---Slicing---")
arr_slice = slice(2, 26, 3)
slice_array = npArray[arr_slice] # Starts with 2nd index, ends up till 26th index with step of 3
print("Slicing:", slice_array)
print("Arranging into 2 dimensional:\n", slice_array.reshape(4, 2))
arra3d = slice_array.reshape(2, 2, 2)
print("Arranging into 3 dimensional:\n", arra3d)
print("Shape:", arra3d.shape, "Dimension:", arra3d.ndim, "Items:", arra3d.itemsize)
print("Between Slicing:", npArray[3:20])
print("Before Slicing:", npArray[:20])
print("After Slicing:", npArray[5:])
print("\n---Slicing Multi Dimensional Array--")
print("Row x Column")
arrMd = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print("0:2 x 0:2:\n", arrMd[0:2, 0:2])
print("0:5 x 1:2:\n", arrMd[0:5, 1:2])
print(":2 x 1: :\n", arrMd[:2, 1:])
print("\n---Printing random Array---")
print("Int array:", np.empty([3,4], dtype=int))
print("Float Array:", np.empty([3, 2], dtype=float))
print("Byte Array:", np.empty([3, 2], dtype=bytes)) |
"""
Created by: Gabriele Pompa (gabriele.pompa@gmail.com)
File: options.py
Created on Tue Jul 14 2020 - Version: 1.0
Description:
This file contains definitions for EuropeanOption abstract base-class as well
as PlainVanillaOption and DigitalOption derived classes.
"""
# ----------------------- standard imports ---------------------------------- #
# for statistical functions
from scipy import stats
# for optimization routines
import scipy.optimize as sc_opt
# for some mathematical functions
import math
# for warning messages
import warnings
# ----------------------- sub-modules imports ------------------------------- #
from ..utils.utils import *
# -----------------------------------------------------------------------------#
class EuropeanOption:
"""
EuropeanOption abstract class: an interface setting the template for any option with european-style exercise.
It uses a MarketEnvironment object to define the current market conditions under which the option is modeled.
This class is not meant to be instantiated.
Attributes:
-----------
mkt_env (MarketEnvironment): Instance of MarketEnvironment class
type (str): Optional. Type of the option. Can be either 'call' or 'put';
S_t (float): 'S' attribute of mkt_env.
K (float): Optional. Strike price;
t (str; dt.datetime): 't' attribute of mkt_env.
T (str; dt.datetime): Optional. Expiration date. Can be either "dd-mm-YYYY" String or dt.datetime object
tau (float): time to maturity in years, computed as tau=T-t by time_to_maturity() method
r (float): 'r' attribute of mkt_env.
sigma (float): 'sigma' attribute of mkt_env.
Public Methods:
--------
time_to_maturity: float
Computes the time-to-maturity of the option.
process_pricing_parameters: float
Parses underlying, strike-price, time, volatility and short-rate parameters,
discriminating between time-to-maturity and valuation date
time parameter and coordinating pricing parameters together.
d1_and_d2: float, float
Computes the d1 and d2 terms of Black-Scholes pricing formula
payoff: float
Computes the payoff of the option.
price: float
Computes the Black-Scholes price of the option.
PnL: float
Computes the P&L of the option.
implied_volatility: float
Computes the Black-Scholes implied-volatility of the option.
delta: float
Computes the Black-Scholes delta of the option.
theta: float
Computes the Black-Scholes theta of the option.
gamma: float
Computes the Black-Scholes gamma of the option.
vega: float
Computes the Black-Scholes vega of the option.
rho: float
Computes the Black-Scholes rho of the option.
Template Methods:
--------
getters for all common private attributes
setters for common private attributes, not belonging to mkt_env
price_upper_limit: float
Template method for upper limit. Raises NotImplementedError if called.
price_lower_limit: float
Template method for lower limit. Raises NotImplementedError if called.
Usage examples:
--------
- example_options.py
- example_options_other_params.py
- options_IV.py
- options_numeric_analytic_greeks_comparison.py
"""
def __init__(self, mkt_env, option_type='call', K=100.0, T="31-12-2020"):
print("Initializing the EuropeanOption!")
# option type check
if option_type not in ['call', 'put']:
raise NotImplementedError("Option Type: '{}' does not exist!".format(option_type))
self.__type = option_type
self.__S = mkt_env.get_S()
self.__K = K
self.__t = mkt_env.get_t()
self.__T = date_string_to_datetime_obj(T)
self.__tau = self.time_to_maturity()
self.__r = mkt_env.get_r()
self.__sigma = mkt_env.get_sigma()
# empty initial price of the option
self.__initial_price = None
# empty informations dictionary
self.__docstring_dict = {}
# string representation method template
def __repr__(self):
raise NotImplementedError()
#
# getters
#
def get_type(self):
return self.__type
def get_S(self):
return self.__S
def get_K(self):
return self.__K
def get_t(self):
return self.__t
def get_T(self):
return self.__T
def get_tau(self):
return self.__tau
def get_r(self):
return self.__r
def get_sigma(self):
return self.__sigma
def get_initial_price(self):
return NotImplementedError()
# doctring getter template
def get_docstring(self, label):
raise NotImplementedError()
#
# setters
#
def set_type(self, option_type):
self.__type = option_type
# option type check
if option_type not in ['call', 'put']:
raise NotImplementedError("Option Type: '{}' does not exist!".format(option_type))
def set_K(self, K):
self.__K = K
def set_T(self, T):
self.__T = date_string_to_datetime_obj(T)
# update time to maturity, given changed T, to keep internal consistency
self.__update_tau()
def set_tau(self, tau):
self.__tau = tau
# update expiration date, given changed tau, to keep internal consistency
self.__update_T()
#
# update methods (private)
#
def __update_tau(self):
self.__tau = self.time_to_maturity()
def __update_T(self):
self.__T = self.__t + dt.timedelta(days=math.ceil(self.__tau * 365))
#
# utility methods
#
def time_to_maturity(self, *args, **kwargs):
"""
Utility method to compute time-to-maturity
"""
# parsing optional parameters
t = args[0] if len(args) > 0 else kwargs['t'] if 't' in kwargs else self.get_t()
T = args[1] if len(args) > 1 else kwargs['T'] if 'T' in kwargs else self.get_T()
# convert to dt.datetime objects, if needed
t = date_string_to_datetime_obj(t)
T = date_string_to_datetime_obj(T)
# compute and return time to maturity (in years)
return homogenize((T - t).days / 365.0, sort=False)
def process_pricing_parameters(self, *args, **kwargs):
"""
Utility method to parse underlying, strike-price, time, volatility and
short-rate parameters
"""
#
# Parsing input parameters
#
# underlying value
S = args[0] if len(args) > 0 else kwargs['S'] if 'S' in kwargs else self.get_S()
# strike price
K = kwargs['K'] if 'K' in kwargs else self.get_K()
# time parameter:
time_param = args[1] if len(args) > 1 \
else kwargs['tau'] if 'tau' in kwargs \
else (kwargs['t'] if 't' in kwargs else None)
# underlying volatility
sigma = kwargs['sigma'] if 'sigma' in kwargs else self.get_sigma()
# span the x-axis with volatility values if True, otherwise distribute its values
sigma_axis = kwargs['sigma_axis'] if 'sigma_axis' in kwargs else False
# short rate
r = kwargs['r'] if 'r' in kwargs else self.get_r()
# span the x-axis with short-rate values if True, otherwise distribute its values
r_axis = kwargs['r_axis'] if 'r_axis' in kwargs else False
# squeeze output flag
np_output = kwargs['np_output'] if 'np_output' in kwargs else True
#
# Iterable parameters check
#
# counter for iterable parameters in input
iterable_parameters = 0
iterable_S = False
iterable_K = False
iterable_tau = False
iterable_sigma = False
iterable_r = False
if is_iterable(S):
iterable_S = True
iterable_parameters += 1
if is_iterable(K):
iterable_K = True
iterable_parameters += 1
if is_iterable_not_string(time_param):
iterable_tau = True
iterable_parameters += 1
if is_iterable(sigma):
iterable_sigma = True
iterable_parameters += 1
if is_iterable(r):
iterable_r = True
iterable_parameters += 1
#
# Checking that only one of S or K is iterable
#
if iterable_S and iterable_K:
raise NotImplementedError("Just one between 'S' and 'K' parameters allowed to be iterable."
" Both iterable given in input:\nS={}\nK={}".format(S, K))
# flag for iterability of S or K
iterable_S_or_K = iterable_S or iterable_K
#
# Checking consistency between iterable_sigma and sigma_axis
#
if not iterable_sigma and sigma_axis:
raise ValueError("Non-iterable sigma cannot span the x-axis.")
#
# Checking consistency between iterable_r and r_axis
#
if not iterable_r and r_axis:
raise ValueError("Non-iterable r cannot span the x-axis.")
#
# Checking that sigma_axis and r_axis are not simultaneously True
#
if sigma_axis and r_axis:
raise NotImplementedError("x-axis cannot be spanned simultaneously by sigma and r")
#
# Checking that if S/K are iterables and sigma is vector, then sigma_axis == False
#
if iterable_S_or_K and iterable_sigma:
if sigma_axis:
raise NotImplementedError("x-axis already spanned by S/K, cannot be spanned by sigma.")
#
# Checking that if S/K are iterables and r is vector, then r_axis == False
#
if iterable_S_or_K and iterable_r:
if r_axis:
raise NotImplementedError("x-axis already spanned by S/K, cannot be spanned by r.")
#
# Homogenizing and checking each parameters
#
#
# 1) Underlying value
#
# homogenize underlying in input
S = homogenize(S)
# checking whether any value in S is smaller than zero. Works if S is scalar too.
if np.any(S < 0):
warnings.warn("Warning: S = {} < 0 value encountered".format(S))
#
# 2) Strike price
#
# homogenize strike in input
K = homogenize(K)
# checking whether any value in K is smaller than zero. Works if K is scalar too.
if np.any(K <= 0):
warnings.warn("Warning: K = {} <= 0 value encountered".format(K))
#
# 3) Time parameter
#
time_name = "tau"
# time parameter interpretation (and homogenization) according to its type
# case 1: no time-parameter in input
if time_param is None:
tau = time_param = self.get_tau()
# case 2: valid time-to-maturity in input
elif is_numeric(time_param):
time_param = homogenize(time_param, reverse_order=True)
tau = time_param
# case 3: valuation date in input, to be converted into time-to-maturity
elif is_date(time_param):
time_name = "t"
time_param = homogenize(time_param, sort_func=date_string_to_datetime_obj)
tau = self.time_to_maturity(t=time_param)
# error case: the time parameter in input has a data-type that is not recognized
else:
raise TypeError("Type {} of input time parameter not recognized".format(type(time_param)))
# checking whether any value in tau is smaller than zero. Works if tau is scalar too.
if np.any(tau < 0):
warnings.warn("Warning: tau = {} < 0 value encountered".format(tau))
#
# 4) Underlying volatility
#
# homogenize underlying volatility in input
sigma = homogenize(sigma, sort=False)
# We allow for deterministic dynamics (sigma==0), but we raise a warning anyway
# if any value of sigma is smaller-or-equal than zero. Works if sigma is scalar too.
if np.any(sigma <= 0):
warnings.warn("Warning: sigma = {} <= 0 value encountered".format(sigma))
#
# 5) Short-rate
#
# homogenize short-rate in input
r = homogenize(r, sort=False)
# We allow for negative short rate, but we raise a warning anyway
# if any value in r is smaller than zero. Works if r is scalar too.
if np.any(r < 0):
warnings.warn("Warning: r = {} < 0 value encountered".format(r))
#
# Coordinate parameters
#
# Case 0: all scalar parameters
#
# make the 4 parameters coordinated together as 1-dim np.ndarray
# or pd.DataFrame
if iterable_parameters == 0:
coord_params = coordinate(x=S, y=tau,
x_name="S", y_name=time_name,
others_scalar={"K": K, "sigma": sigma, "r": r},
np_output=np_output,
col_labels=S, ind_labels=time_param)
# Case 1: S (or K) and/or tau iterable parameters
#
# Make x-axis spanned by S, K, sigma or r, creating a (x-axis,time)
# grid if both are iterable. Parameters sigma and r are either
# distributed along the other(s) axes (shape-match is required) or can
# span be used to span the x-axis too (sigma_axis or r_axis flags must
# be set to True)
elif iterable_S_or_K or iterable_tau:
scalar_params = {}
vector_params = {}
# x-axis default setup
x = S
x_name = "S"
x_col = S
scalar_params["K"] = K
if iterable_sigma:
if sigma_axis:
x = sigma
x_name = "sigma"
x_col = sigma
scalar_params["S"] = S
else:
vector_params["sigma"] = sigma
else:
scalar_params["sigma"] = sigma
if iterable_r:
if r_axis:
x = r
x_name = "r"
x_col = r
scalar_params["S"] = S
else:
vector_params["r"] = r
else:
scalar_params["r"] = r
if iterable_K:
x = K
x_name = "K"
x_col = K
del scalar_params["K"]
scalar_params["S"] = S
coord_params = coordinate(x=x, y=tau,
x_name=x_name, y_name=time_name,
others_scalar=scalar_params,
others_vector=vector_params,
np_output=np_output,
col_labels=x_col, ind_labels=time_param)
# Case 2: sigma and/or r are iterable 1-dim vectors
# and S, K and tau are both scalar
elif iterable_sigma or iterable_r:
# case 2.1: sigma and r are iterable 1-dim vectors
#
# make sigma and r coordinated np.ndarray or pd.DataFrames
# creating a (sigma, r) grid and S, K and tau coordinated accordingly
if iterable_sigma and iterable_r:
coord_params = coordinate(x=sigma, y=r,
x_name="sigma", y_name="r",
others_scalar={"S": S, "K": K, time_name: tau},
np_output=np_output,
col_labels=sigma, ind_labels=r)
# case 2.2: sigma is a 1-dim vector and r is scalar
#
# make sigma and tau coordinated np.ndarray or pd.DataFrames
# and S, K and r coordinated accordingly
elif iterable_sigma:
coord_params = coordinate(x=sigma, y=tau,
x_name="sigma", y_name=time_name,
others_scalar={"S": S, "K": K, "r": r},
np_output=np_output,
col_labels=sigma, ind_labels=time_param)
# case 2.3: r is a 1-dim vector and sigma is scalar
#
# make r and tau coordinated np.ndarray or pd.DataFrames
# and S, K and sigma coordinated accordingly
elif iterable_r:
coord_params = coordinate(x=r, y=tau,
x_name="r", y_name=time_name,
others_scalar={"S": S, "K": K, "sigma": sigma},
np_output=np_output,
col_labels=r, ind_labels=time_param)
# return coordinated parameters
return {"S": coord_params["S"],
"K": coord_params["K"],
"tau": coord_params[time_name],
"sigma": coord_params["sigma"],
"r": coord_params["r"],
"np_output": np_output}
def d1_and_d2(self, *args, **kwargs):
"""
Utility method to compute d1 and d2 terms of Black-Scholes pricing formula
"""
# parsing optional parameters
S = args[0] if len(args) > 0 else kwargs['S'] if 'S' in kwargs else self.get_S()
tau = args[1] if len(args) > 1 else kwargs['tau'] if 'tau' in kwargs else self.get_tau()
K = args[2] if len(args) > 2 else kwargs['K'] if 'K' in kwargs else self.get_K()
r = args[3] if len(args) > 3 else kwargs['r'] if 'r' in kwargs else self.get_r()
sigma = args[4] if len(args) > 4 else kwargs['sigma'] if 'sigma' in kwargs else self.get_sigma()
# compute d1 and d2
d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * tau) / (sigma * np.sqrt(tau))
d2 = d1 - sigma * np.sqrt(tau)
return d1, d2
#
# Template methods
#
# upper price limit template
def price_upper_limit(self):
raise NotImplementedError()
# lower price limit template
def price_lower_limit(self):
raise NotImplementedError()
#
# Public methods
#
def payoff(self, *args, **kwargs):
"""
Calculates and returns the payoff of the option.
Usage example:
- example_options.py
- example_options_other_params.py
Can be called using (underlying,
strike-price),
signature, where:
- underlying can be specified either as the 1st positional argument or as keyboard argument 'S'.
It's value can be:
- Empty: .get_S() is used,
- A number (e.g. S=100),
- A List of numbers (allowed only if parameter 'K' is a scalar)
- strike-price can be specified as keyboard argument 'K'.
It's value can be:
- Empty: .get_K() is used,
- A number (e.g. K=100),
- A List of numbers (allowed only if parameter 'S' is a scalar)
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# underlying value and strike price
S = param_dict["S"]
K = param_dict["K"]
# call case
if self.get_type() == 'call':
return self.call_payoff(S=S, K=K)
# put case
else:
return self.put_payoff(S=S, K=K)
def price(self, *args, **kwargs):
"""
Calculates and returns the price of the option.
Usage examples:
- example_options.py
- example_options_other_params.py
Can be called using (underlying,
strike-price,
time-parameter,
sigma,
short-rate)
signature, where:
- underlying can be specified either as the 1st positional argument or as keyboard argument 'S'.
It's value can be:
- Empty: .get_S() is used,
- A number (e.g. S=100),
- A List of numbers (allowed only if parameter 'K' is a scalar)
- strike-price can be specified as keyboard argument 'K'.
It's value can be:
- Empty: .get_K() is used,
- A number (e.g. K=100),
- A List of numbers (allowed only if parameter 'S' is a scalar)
- time-parameter can be specified either as the 2nd positional argument or as keyboard argument 't' or 'tau'.
If tau==0, the payoff of the option is returned, its price otherwise.
It's value can be:
- Empty: .get_tau() is used,
- A single (e.g. t='15-05-2020') / Iterable (e.g. pd.date_range) valuation date(s):
accepted types are either a 'dd-mm-YYYY' String or a dt.datetime object
- A single (e.g. tau=0.5) / Iterable time-to-maturity value(s)
- sigma can be specified as keyboard argument 'sigma'.
It's value can be:
- Empty: .get_sigma() is used,
- A volatility value (e.g. 0.2 for 20% per year)
- An iterable:
- if sigma_axis == False (default): an iterable of the same
shape of x-axis variable (S or K) x tau (if both the x-axis
and tau are iterable variables) or of the same shape of x-axis
(tau) if the x-axis (tau) is iterable but tau (x-axis) is scalar.
In this case, volatility parameter is distributed along the
other(s) vectorial dimension(s).
- if sigma_axis == True: an iterable of arbitrary length.
In this case, the x-axis dimension is spanned by sigma parameter.
This setup is mutually exclusive w.r.t. to the r_axis == True
setup.
- short-rate can be specified as keyboard argument 'r'.
It's value can be:
- Empty: .get_r() is used,
- A short-rate value (e.g. 0.05 for 5% per year)
- An iterable:
- if r_axis == False (default): an iterable of the same
shape of x-axis variable (S or K) x tau (if both the x-axis
and tau are iterable variables) or of the same shape of x-axis
(tau) if the x-axis (tau) is iterable but tau (x-axis) is scalar.
In this case, short-rate parameter is distributed along the
other(s) vectorial dimension(s).
- if r_axis == True: an iterable of arbitrary length.
In this case, the x-axis dimension is spanned by sigma parameter.
This setup is mutually exclusive w.r.t. to the sigma_axis == True
setup.
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# underlying value, strike-price, time-to-maturity, volatility and short-rate
S = param_dict["S"]
K = param_dict["K"]
tau = param_dict["tau"]
sigma = param_dict["sigma"]
r = param_dict["r"]
np_output = param_dict["np_output"]
#
# for tau==0 output the payoff, otherwise price
#
if np_output:
# initialize an empty structure to hold prices
price = np.empty_like(S, dtype=float)
# filter positive times-to-maturity
tau_pos = tau > 0
else:
# initialize an empty structure to hold prices
price = pd.DataFrame(index=S.index, columns=S.columns)
# filter positive times-to-maturity
tau_pos = tau.iloc[:, 0] > 0
# call case
if self.get_type() == 'call':
# tau > 0 case
price[tau_pos] = self.call_price(S=S[tau_pos], K=K[tau_pos], tau=tau[tau_pos], sigma=sigma[tau_pos],
r=r[tau_pos])
# tau == 0 case
price[~tau_pos] = self.call_payoff(S=S[~tau_pos], K=K[~tau_pos])
# put case
else:
# tau > 0 case
price[tau_pos] = self.put_price(S=S[tau_pos], K=K[tau_pos], tau=tau[tau_pos], sigma=sigma[tau_pos],
r=r[tau_pos])
# tau == 0 case
price[~tau_pos] = self.put_payoff(S=S[~tau_pos], K=K[~tau_pos])
return price
def PnL(self, *args, **kwargs):
"""
Calculates and returns the P&L generated owning an option.
Usage example:
- example_options.py
- example_options_other_params.py
Can be called with the same signature of the .price() public method.
We distinguish two cases:
1) if tau==0, this is the P&L at option's expiration.
That is, the PnL if the option is kept until maturity.
It is computed as:
P&L = payoff - initial price
2) if tau > 0, this is the P&L as if the option position is closed before maturity,
when the time-to-maturity is tau. It is computed as:
P&L = current price - initial price
The choice between returning the payoff and current price is delegated
to .price() method.
"""
return self.price(*args, **kwargs) - scalarize(self.get_initial_price())
def implied_volatility(self, *args, iv_estimated=0.25, epsilon=1e-8,
minimization_method="Newton", max_iter=100, **kwargs):
"""
Calculates and returns the Black-Scholes Implied Volatility of the option.
Usage example:
- example_options.py
- example_options_other_params.py
- options_IV.py
Implements two minimization routines:
- Newton (unconstrained) method;
- Least-Squares constrained method.
Can be called with the same signature of the .price() public method
with additional optional parameters:
- iv_estimated: an initial guess for implied volatility;
- target_price: target price to use for implied volatility calculation;
- epsilon: minimization stopping threshold;
- minimization_method: minimization method to use;
- max_iter: maximum number of iterations.
"""
# preliminary consistency check
if ('sigma_axis' in kwargs) and (kwargs['sigma_axis'] is True):
raise NotImplementedError(
".implied_volatility() method not implemented for x-axis spanned by 'sigma' parameter.")
# target price
target_price = kwargs["target_price"] if "target_price" in kwargs else self.price(*args, **kwargs)
# shape of output IV
output_shape = target_price.shape
# delete "np_output" from kwargs if it exists, to do calculations
# with np.ndarray (returns True if not in kwargs)
np_output = kwargs.pop("np_output", True)
# casting output as pd.DataFrame, if necessary
if not np_output:
ind_output = target_price.index
col_output = target_price.columns
target_price = target_price.values.squeeze()
# delete "sigma" from kwargs if it exists
kwargs.pop("sigma", None)
if minimization_method == "Newton":
# initial guess for implied volatility: iv_{n} and iv_{n+1}
# iv_{n+1} will be iteratively updated
iv_np1 = coordinate_y_with_x(x=target_price, y=iv_estimated, np_output=True)
# stopping criterion:
#
# - SRSR > epsilon threshold or
# - maximum iterations exceeded
#
# where: SRSR is the Sum of Relative Squared Residuals between
# n-th and (n+1)-th iteration solutions, defined as:
#
# SRSR = \Sum_{i} ((x_{n+1} - x_{n})/x_{n})**2 (NaN excluded)
# SRSR is initialized at value greater than epsilon by construction
SRSR = epsilon + 1
# iterations counter initialized at 1
iter_num = 1
while (SRSR > epsilon) and (iter_num <= max_iter):
# update last solution found
iv_n = iv_np1
# function to minimize at last solution found
f_iv_n = self.price(*args, sigma=iv_n, **kwargs) - target_price
# derivative w.r.t. to sigma (that is, Vega) at last solution found
df_div_n = self.vega(*args, sigma=iv_n, factor=1.0, **kwargs)
# new solution found
iv_np1 = iv_n - f_iv_n / df_div_n
# calculation of new value for stopping metrics
SRSR = np.nansum(((iv_np1 - iv_n) / iv_n) ** 2)
# iteration counter update
iter_num += 1
print("\nTermination value for Sum of Relative Squared Residuals \
\nbetween n-th and (n+1)-th iteration solutions metric \
\n(NaN excluded): {:.1E} (eps = {:.1E}). Iterations: {} \n"
.format(SRSR, epsilon, iter_num))
elif minimization_method == "Least-Squares":
#
# See documentation for scipy.optimize.least_squares function
#
# at: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy
# .optimize.least_squares
#
# minimization function (function of implied volatility only)
f = lambda iv: (self.price(*args, sigma=iv, **kwargs) - target_price).flatten()
# initial implied volatility guess
iv0 = np.repeat(iv_estimated, repeats=target_price.size)
# positivity bounds: iv > 0
iv_bounds = (0.0, np.inf)
# minimization method: Trust-Region-Reflective algorithm
min_method = 'trf'
# tolerance for termination by the change of the cost function
cost_tolerance = kwargs["cost_tolerance"] if "cost_tolerance" in kwargs else 1e-12
# tolerance for termination by the change of the solution found
sol_tolerance = kwargs["sol_tolerance"] if "sol_tolerance" in kwargs else 1e-12
# optimization
res = sc_opt.least_squares(fun=f, x0=iv0, bounds=iv_bounds, method=min_method,
ftol=cost_tolerance, xtol=sol_tolerance)
# output message
print("\nTermination message: " + res.message + " Success? {}".format(res.success))
# optimal iv found
iv_np1 = res.x
# output reshape and cast as pd.DataFrame, if needed
iv_np1 = iv_np1.reshape(output_shape)
if not np_output:
iv_np1 = pd.DataFrame(data=iv_np1, index=ind_output, columns=col_output)
return iv_np1
def delta(self, *args, **kwargs):
"""
Calculates and returns the Gamma of the option.
Usage example:
- example_options.py
- example_options_other_params.py
- options_numeric_analytic_greeks_comparison.py
Can be called with the same signature of the .price() public method.
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# underlying value, strike-price, time-to-maturity, volatility and short-rate
S = param_dict["S"]
K = param_dict["K"]
tau = param_dict["tau"]
sigma = param_dict["sigma"]
r = param_dict["r"]
# call case
if self.get_type() == 'call':
return self.call_delta(S=S, K=K, tau=tau, sigma=sigma, r=r)
# put case
else:
return self.put_delta(S=S, K=K, tau=tau, sigma=sigma, r=r)
def theta(self, *args, **kwargs):
"""
Calculates and returns the Theta of the option.
Usage example:
- example_options.py
- example_options_other_params.py
- options_numeric_analytic_greeks_comparison.py
Can be called with the same signature of the .price() public method.
Optionally, the theta can be rescaled using the "factor" keyboard parameter.
By default it is scaled to consider variation of +1 calendar day of t (not +1 year).
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# underlying value, strike-price, time-to-maturity volatility and short-rate
S = param_dict["S"]
K = param_dict["K"]
tau = param_dict["tau"]
sigma = param_dict["sigma"]
r = param_dict["r"]
# rescaling factor
rescaling_factor = kwargs["factor"] if "factor" in kwargs else 1.0 / 365.0
# call case
if self.get_type() == 'call':
return self.call_theta(S=S, K=K, tau=tau, sigma=sigma, r=r) * rescaling_factor
# put case
else:
return self.put_theta(S=S, K=K, tau=tau, sigma=sigma, r=r) * rescaling_factor
def gamma(self, *args, **kwargs):
"""
Calculates and returns the Gamma of the option.
Usage example:
- example_options.py
- example_options_other_params.py
- options_numeric_analytic_greeks_comparison.py
Can be called with the same signature of the .price() public method.
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# underlying value, strike-price, time-to-maturity volatility and short-rate
S = param_dict["S"]
K = param_dict["K"]
tau = param_dict["tau"]
sigma = param_dict["sigma"]
r = param_dict["r"]
# call case
if self.get_type() == 'call':
return self.call_gamma(S=S, K=K, tau=tau, sigma=sigma, r=r)
# put case
else:
return self.put_gamma(S=S, K=K, tau=tau, sigma=sigma, r=r)
def vega(self, *args, **kwargs):
"""
Calculates and returns the Vega of the option.
Usage example:
- example_options.py
- example_options_other_params.py
- options_numeric_analytic_greeks_comparison.py
Can be called with the same signature of the .price() public method.
Optionally, the vega can be rescaled using the "factor" keyboard parameter.
By default it is scaled to consider variation of +1% of sigma (not +100%).
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# underlying value, strike-price, time-to-maturity volatility and short-rate
S = param_dict["S"]
K = param_dict["K"]
tau = param_dict["tau"]
sigma = param_dict["sigma"]
r = param_dict["r"]
# rescaling factor
rescaling_factor = kwargs["factor"] if "factor" in kwargs else 0.01
# call case
if self.get_type() == 'call':
return self.call_vega(S=S, K=K, tau=tau, sigma=sigma, r=r) * rescaling_factor
# put case
else:
return self.put_vega(S=S, K=K, tau=tau, sigma=sigma, r=r) * rescaling_factor
def rho(self, *args, **kwargs):
"""
Calculates and returns the Rho of the option.
Usage example:
- example_options.py
- example_options_other_params.py
- options_numeric_analytic_greeks_comparison.py
Can be called with the same signature of the .price() public method.
Optionally, the rho can be rescaled using the "factor" keyboard parameter.
By default it is scaled to consider variation of +1% of r (not +100%).
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# underlying value, strike-price, time-to-maturity volatility and short-rate
S = param_dict["S"]
K = param_dict["K"]
tau = param_dict["tau"]
sigma = param_dict["sigma"]
r = param_dict["r"]
# rescaling factor
rescaling_factor = kwargs["factor"] if "factor" in kwargs else 0.01
# call case
if self.get_type() == 'call':
return self.call_rho(S=S, K=K, tau=tau, sigma=sigma, r=r) * rescaling_factor
# put case
else:
return self.put_rho(S=S, K=K, tau=tau, sigma=sigma, r=r) * rescaling_factor
# -----------------------------------------------------------------------------#
class PlainVanillaOption(EuropeanOption):
"""
PlainVanillaOption class implementing payoff and pricing of plain-vanilla call and put options.
Inherits from EuropeanOption base-class. Put price is calculated using put-call parity
Attributes:
-----------
mkt_env (MarketEnvironment): Instance of MarketEnvironment class
type (str): From 'type' attribute of EuropeanOption base class.
S_t (float): 'S' attribute of mkt_env.
K (float): From 'K' attribute of EuropeanOption base class.
t (str; dt.datetime): 't' attribute of mkt_env.
T (str; dt.datetime): From 'T' attribute of EuropeanOption base class.
tau (float): time to maturity in years, computed as tau=T-t by time_to_maturity() method
r (float): 'r' attribute of mkt_env.
sigma (float): 'sigma' attribute of mkt_env.
Public Methods:
--------
public methods inherited from EuropeanOption class
price_upper_limit: float
Overridden method. Returns the upper limit for a vanilla option price.
price_lower_limit: float
Overridden method. Returns the lower limit for a vanilla option price.
Usage examples:
--------
- example_options.py
- example_options_other_params.py
- options_IV.py
- options_numeric_analytic_greeks_comparison.py
Instantiation
--------
- default: PlainVanillaOption(mkt_env) is equivalent to
PlainVanillaOption(mkt_env, option_type='call', K=100.0, T="31-12-2020")
- general: PlainVanillaOption(mkt_env, option_type='call' or 'put' String, K=Float, T="DD-MM-YYYY" String)
where: mkt_env is a MarketEnvironment object.
"""
# initializer with optional *args and **kwargs parameters
def __init__(self, *args, **kwargs):
# calling the EuropeanOption initializer
super(PlainVanillaOption, self).__init__(*args, **kwargs)
# info strings
self.__info = r"Plain Vanilla {} [K={:.1f}, T={} (tau={:.2f}y)]".format(self.get_type(), self.get_K(),
datetime_obj_to_date_string(
self.get_T()), self.get_tau())
self.__mkt_info = r"[S_t={:.1f}, r={:.1f}%, sigma={:.1f}%, t={}]".format(self.get_S(), self.get_r() * 100,
self.get_sigma() * 100,
datetime_obj_to_date_string(
self.get_t()))
# initial price of the option (as scalar value)
self.__initial_price = self.price()
# informations dictionary
self.__docstring_dict = {
'call': {
'price_upper_limit': r"Upper limit: $S_t$",
'payoff': r"Payoff: $max(S-K, 0)$",
'price_lower_limit': r"Lower limit: $max(S_t - K e^{-r \tau}, 0)$"
},
'put': {
'price_upper_limit': r"Upper limit: $K e^{-r \tau}$",
'payoff': r"Payoff: $max(K-S, 0)$",
'price_lower_limit': r"Lower limit: $max(K e^{-r \tau} - S_t, 0)$"}
}
def __repr__(self):
return r"PlainVanillaOption('{}', S_t={:.1f}, K={:.1f}, t={}, T={}, tau={:.2f}y, r={:.1f}%, sigma={:.1f}%)". \
format(self.get_type(), self.get_S(), self.get_K(), self.get_t().strftime("%d-%m-%Y"),
self.get_T().strftime("%d-%m-%Y"), self.get_tau(), self.get_r() * 100, self.get_sigma() * 100)
#
# getters
#
def get_info(self):
return self.__info
def get_mkt_info(self):
return self.__mkt_info
def get_initial_price(self):
return self.__initial_price
def get_docstring(self, label):
return self.__docstring_dict[self.get_type()][label]
#
# Public methods
#
def call_payoff(self, S, K):
"""Plain-Vanilla call option payoff"""
# Function np.maximum(arr, x) returns the array of the maximum
# between each element of arr and x
return np.maximum(S - K, 0.0)
def put_payoff(self, S, K):
"""Plain-Vanilla put option payoff"""
return np.maximum(K - S, 0.0)
def price_upper_limit(self, *args, **kwargs):
"""
Calculates and returns the upper limit of the Plain-Vanilla option price.
Usage example:
- example_options.py
- example_options_other_params.py
Can be called with the same signature of the .price() public method.
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# underlying value, strike-price, time-to-maturity volatility and short-rate
S = param_dict["S"]
K = param_dict["K"]
tau = param_dict["tau"]
r = param_dict["r"]
if self.get_type() == 'call':
# call case
return self.call_price_upper_limit(S=S)
else:
# put case
return self.put_price_upper_limit(K=K, tau=tau, r=r)
def call_price_upper_limit(self, S):
"""Plain-Vanilla call option price upper limit"""
return S
def put_price_upper_limit(self, K, tau, r):
"""Plain-Vanilla call option price upper limit"""
return K * np.exp(-r * tau)
def price_lower_limit(self, *args, **kwargs):
"""
Calculates and returns the lower limit of the Plain-Vanilla option price.
Usage example:
- example_options.py
- example_options_other_params.py
Can be called with the same signature of the .price() public method.
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# underlying value, strike-price, time-to-maturity and short-rate
S = param_dict["S"]
K = param_dict["K"]
tau = param_dict["tau"]
r = param_dict["r"]
# call case
if self.get_type() == 'call':
return self.call_price_lower_limit(S=S, K=K, tau=tau, r=r)
# put case
else:
return self.put_price_lower_limit(S=S, K=K, tau=tau, r=r)
def call_price_lower_limit(self, S, K, tau, r):
"""Plain-Vanilla call option price lower limit"""
# Function np.maximum(arr, x) returns the array of the maximum
# between each element of arr and x
return np.maximum(S - K * np.exp(-r * tau), 0.0)
def put_price_lower_limit(self, S, K, tau, r):
"""Plain-Vanilla put option price lower limit"""
return np.maximum(K * np.exp(-r * tau) - S, 0.0)
def call_price(self, S, K, tau, sigma, r):
""""Plain-Vanilla call option price """
# get d1 and d2 terms
d1, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute price
price = S * stats.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * tau) * stats.norm.cdf(d2, 0.0, 1.0)
return price
def put_price(self, S, K, tau, sigma, r):
""" Plain-Vanilla put option price from Put-Call parity relation: Call + Ke^{-r*tau} = Put + S"""
return self.call_price(S=S, K=K, tau=tau, sigma=sigma, r=r) + K * np.exp(-r * tau) - S
def call_delta(self, S, K, tau, sigma, r):
""""Plain-Vanilla call option Delta """
# get d1 term
d1, _ = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute delta
delta = stats.norm.cdf(d1, 0.0, 1.0)
# stats.norm.cdf down-cast pd.DataFrames to np.ndarray
if isinstance(S, pd.DataFrame):
delta = pd.DataFrame(data=delta, index=S.index, columns=S.columns)
return delta
def put_delta(self, S, K, tau, sigma, r):
""""Plain-Vanilla put option Delta """
return self.call_delta(S=S, K=K, tau=tau, sigma=sigma, r=r) - 1.0
def call_theta(self, S, K, tau, sigma, r):
""""Plain-Vanilla call option Theta """
# get d1 and d2 terms
d1, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute theta
theta = - (S * sigma * stats.norm.pdf(d1, 0.0, 1.0) / (2.0 * np.sqrt(tau))) - r * K * np.exp(
-r * tau) * stats.norm.cdf(d2, 0.0, 1.0)
return theta
def put_theta(self, S, K, tau, sigma, r):
""""Plain-Vanilla put option Theta """
# get d1 and d2 terms
d1, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute theta
theta = - (S * sigma * stats.norm.pdf(d1, 0.0, 1.0) / (2.0 * np.sqrt(tau))) + r * K * np.exp(
-r * tau) * stats.norm.cdf(-d2, 0.0, 1.0)
return theta
def call_gamma(self, S, K, tau, sigma, r):
""""Plain-Vanilla call option Gamma """
# get d1 term
d1, _ = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute gamma
gamma = stats.norm.pdf(d1, 0.0, 1.0) / (S * sigma * np.sqrt(tau))
return gamma
def put_gamma(self, S, K, tau, sigma, r):
""""Plain-Vanilla put option Gamma """
return self.call_gamma(S=S, K=K, tau=tau, sigma=sigma, r=r)
def call_vega(self, S, K, tau, sigma, r):
""""Plain-Vanilla call option vega """
# get d1 term
d1, _ = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute vega
vega = S * np.sqrt(tau) * stats.norm.pdf(d1, 0.0, 1.0)
return vega
def put_vega(self, S, K, tau, sigma, r):
"""Plain-Vanilla put option vega """
return self.call_vega(S=S, K=K, tau=tau, sigma=sigma, r=r)
def call_rho(self, S, K, tau, sigma, r):
""""Plain-Vanilla call option Rho """
# get d2 term
_, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute rho
rho = tau * K * np.exp(-r * tau) * stats.norm.cdf(d2, 0.0, 1.0)
return rho
def put_rho(self, S, K, tau, sigma, r):
"""Plain-Vanilla put option Rho """
# get d2 term
_, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute rho
rho = - tau * K * np.exp(-r * tau) * stats.norm.cdf(-d2, 0.0, 1.0)
return rho
# -----------------------------------------------------------------------------#
class DigitalOption(EuropeanOption):
"""
DigitalOption class implementing payoff and pricing of digital call and put options.
Inherits from EuropeanOption base-class. Put price is calculated using put-call parity
Attributes:
-----------
mkt_env (MarketEnvironment): Instance of MarketEnvironment class
Q (float): cash amount
type (str): From 'type' attribute of EuropeanOption base class.
S_t (float): 'S' attribute of mkt_env.
K (float): From 'K' attribute of EuropeanOption base class.
t (str; dt.datetime): 't' attribute of mkt_env.
T (str; dt.datetime): From 'T' attribute of EuropeanOption base class.
tau (float): time to maturity in years, computed as tau=T-t by time_to_maturity() method
r (float): 'r' attribute of mkt_env.
sigma (float): 'sigma' attribute of mkt_env.
Public Methods:
--------
public methods inherited from EuropeanOption class
price_upper_limit: float
Overridden method. Returns the upper limit for a vanilla option price.
price_lower_limit: float
Overridden method. Returns the lower limit for a vanilla option price.
Usage examples:
--------
- example_options.py
- example_options_other_params.py
- options_IV.py
- options_numeric_analytic_greeks_comparison.py
Instantiation
--------
- default: DigitalOption(mkt_env) is equivalent to
DigitalOption(mkt_env, cash_amount=1.0, option_type='call', K=100.0, T="31-12-2020")
- general: DigitalOption(mkt_env, cash_amount=Float, option_type='call' or 'put' String, K=Float, T="DD-MM-YYYY"
String)
where: mkt_env is a MarketEnvironment object.
"""
# initializer with optional *args and **kwargs parameters and default cash_amount
# default keyword arguments (like cash_amount here) must go after args list argument in function def
def __init__(self, *args, cash_amount=1.0, **kwargs):
# calling the EuropeanOption initializer
super(DigitalOption, self).__init__(*args, **kwargs)
# amount of cash in case of payment
self.__Q = cash_amount
# info strings
self.__info = r"CON {} [K={:.1f}, T={} (tau={:.2f}y), Q={:.1f}]".format(self.get_type(), self.get_K(),
datetime_obj_to_date_string(
self.get_T()), self.get_tau(),
self.get_Q())
self.__mkt_info = r"[S_t={:.1f}, r={:.1f}%, sigma={:.1f}%, t={}]".format(self.get_S(), self.get_r() * 100,
self.get_sigma() * 100,
datetime_obj_to_date_string(
self.get_t()))
# initial price of the option
self.__initial_price = self.price()
# informations dictionary
self.__docstring_dict = {
'call': {
'price_upper_limit': r"Upper limit: $Q e^{-r \tau}$",
'payoff': r"Payoff: $Q$ $I(S > K)$",
'price_lower_limit': r"Lower limit: $0$"
},
'put': {
'price_upper_limit': r"Upper limit: $Q e^{-r \tau}$",
'payoff': r"Payoff: $Q$ $I(S \leq K)$",
'price_lower_limit': r"Lower limit: $0$"}
}
def __repr__(self):
return r"DigitalOption('{}', cash={:.1f}, S_t={:.1f}, K={:.1f}, t={}, T={}, tau={:.2f}y, r={:.1f}%, " \
r"sigma={:.1f}%)".format(self.get_type(), self.get_Q(), self.get_S(), self.get_K(),
self.get_t().strftime("%d-%m-%Y"), self.get_T().strftime("%d-%m-%Y"),
self.get_tau(), self.get_r() * 100, self.get_sigma() * 100)
#
# getters
#
def get_info(self):
return self.__info
def get_mkt_info(self):
return self.__mkt_info
def get_Q(self):
return self.__Q
def get_initial_price(self):
return self.__initial_price
def get_docstring(self, label):
return self.__docstring_dict[self.get_type()][label]
#
# setters
#
def set_Q(self, cash_amount):
self.__Q = cash_amount
#
# Public methods
#
def call_payoff(self, S, K):
""" CON call option payoff"""
# Function np.heaviside(arr, x) returns:
#
# 0 if arr < 0
# x if arr == 0
# 1 if arr > 0
return np.heaviside(S - K, 0.0)
def put_payoff(self, S, K):
""" CON put option payoff"""
return np.heaviside(K - S, 1.0)
def price_upper_limit(self, *args, **kwargs):
"""
Calculates and returns the upper limit of the CON option price.
Usage example:
- example_options.py
- example_options_other_params.py
Can be called with the same signature of the .price() public method.
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# time-to-maturity volatility and short-rate
tau = param_dict["tau"]
r = param_dict["r"]
# the same for call and put
return self.get_Q() * np.exp(-r * tau)
def price_lower_limit(self, *args, **kwargs):
"""
Calculates and returns the lower limit of the CON option price.
Usage example:
- example_options.py
- example_options_other_params.py
Can be called with the same signature of the .price() public method.
"""
# process input parameters
param_dict = self.process_pricing_parameters(*args, **kwargs)
# underlying value
S = param_dict["S"]
# the same for call and put
return 0.0 * S
def call_price(self, S, K, tau, sigma, r):
""" CON call option Black-Scholes price"""
Q = self.get_Q()
# get d2 term
_, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute price
price = Q * np.exp(-r * tau) * stats.norm.cdf(d2, 0.0, 1.0)
return price
def put_price(self, S, K, tau, sigma, r):
""" CON put option price from Put-Call parity relation: CON_Call + CON_Put = Qe^{-r*tau}"""
return self.get_Q() * np.exp(- r * tau) - self.call_price(S=S, K=K, tau=tau, sigma=sigma, r=r)
def call_delta(self, S, K, tau, sigma, r):
""" CON call option Black-Scholes Delta"""
Q = self.get_Q()
# get d2 term
_, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute delta
delta = Q * np.exp(-r * tau) * stats.norm.pdf(d2, 0.0, 1.0) / (S * sigma * np.sqrt(tau))
return delta
def put_delta(self, S, K, tau, sigma, r):
""" CON put option Black-Scholes Delta"""
return - self.call_delta(S=S, K=K, tau=tau, sigma=sigma, r=r)
def call_theta(self, S, K, tau, sigma, r):
""" CON call option Black-Scholes Theta"""
Q = self.get_Q()
# get d1 and d2 terms
d1, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute theta
theta = Q * np.exp(- r * tau) * (
((d1 * sigma * np.sqrt(tau) - 2.0 * r * tau) / (2.0 * sigma * tau * np.sqrt(tau))) *
stats.norm.pdf(d2, 0.0, 1.0) + r * stats.norm.cdf(d2, 0.0, 1.0))
return theta
def put_theta(self, S, K, tau, sigma, r):
""" CON put option Black-Scholes Theta"""
Q = self.get_Q()
return - self.call_theta(S=S, K=K, tau=tau, sigma=sigma, r=r) + r * Q * np.exp(- r * tau)
def call_gamma(self, S, K, tau, sigma, r):
""" CON call option Black-Scholes Gamma"""
Q = self.get_Q()
# get d1 and d2 terms
d1, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute gamma
gamma = - (d1 * Q * np.exp(- r * tau) * stats.norm.pdf(d2, 0.0, 1.0)) / (S * S * sigma * sigma * tau)
return gamma
def put_gamma(self, S, K, tau, sigma, r):
""" CON put option Black-Scholes Gamma"""
return - self.call_gamma(S=S, K=K, tau=tau, sigma=sigma, r=r)
def call_vega(self, S, K, tau, sigma, r):
""" CON call option Black-Scholes Vega"""
Q = self.get_Q()
# get d1 and d2 terms
d1, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute vega
vega = - (d1 * Q * np.exp(- r * tau) * stats.norm.pdf(d2, 0.0, 1.0)) / sigma
return vega
def put_vega(self, S, K, tau, sigma, r):
""" CON put option Black-Scholes Vega"""
return - self.call_vega(S=S, K=K, tau=tau, sigma=sigma, r=r)
def call_rho(self, S, K, tau, sigma, r):
"""CON call option Rho """
Q = self.get_Q()
# get d2 term
_, d2 = self.d1_and_d2(S=S, K=K, tau=tau, sigma=sigma, r=r)
# compute rho
rho = Q * np.exp(- r * tau) * (
((np.sqrt(tau) * stats.norm.pdf(d2, 0.0, 1.0)) / sigma) - tau * stats.norm.cdf(d2, 0.0, 1.0))
return rho
def put_rho(self, S, K, tau, sigma, r):
"""Plain-Vanilla put option Rho """
Q = self.get_Q()
return - self.call_rho(S=S, K=K, tau=tau, sigma=sigma, r=r) - tau * Q * np.exp(- r * tau)
|
# pylint: disable=missing-docstring,function-redefined
import json
from behave import given, then, when
from features.steps import s4s, utils
from testsuite import systems
ERROR_ENTRY_COUNT = "Found {count} entries."
ERROR_FIELD_MISSING = "Resource field '{field_name}' is missing."
ERROR_FIELD_UNEXPECTED_VALUE = """
Resource field '{field_name}' does not match expected '{expected}', got '{actual}'.
"""
ERROR_NO_ACCESS = 'Could not fetch Patient demographics'
ERROR_NO_NEXT_LINK = 'Link with relation "next" not found.'
ERROR_UNRESOLVED_REFERENCE = "Reference '{reference}' failed to resolve."
def assert_field_exists(context, resource, field_name):
assert resource.get(field_name) is not None, \
utils.bad_response_assert(context.response,
ERROR_FIELD_MISSING,
field_name=field_name)
def assert_field_value_matches(context, resource, field_name, expected_value):
assert resource[field_name] == expected_value, \
utils.bad_response_assert(context.response,
ERROR_FIELD_UNEXPECTED_VALUE,
field_name=field_name,
expected=expected_value,
actual=resource[field_name])
@given(u'I have access to Patient demographics')
def step_impl(context):
query = s4s.MU_CCDS_MAPPINGS['Patient demographics']
query = query.format(patientId=context.vendor_config['versioned_api'].get('patient'))
response = utils.get_resource(context, query)
assert response.status_code == 200, \
utils.bad_response_assert(response,
ERROR_NO_ACCESS)
@when('I follow the "next" link')
def step_impl(context):
resource = context.response.json()
links = resource.get('link', [])
urls = [link['url'] for link in links
if link['relation'] == 'next']
if len(urls) is not 1:
context.scenario.skip(reason=ERROR_NO_NEXT_LINK)
return
context.response = utils.get_resource(context, urls[0])
@then('all the codes will be valid')
def step_impl(context):
resource = context.response.json()
if resource['resourceType'] == 'Bundle':
entries = [entry['resource'] for entry in resource.get('entry', [])
if entry.get('search', {}).get('mode', 'match') == 'match']
else:
entries = [resource]
context.scenario.systems = []
bad_codings = []
for entry in entries:
found = utils.find_named_key(entry, 'coding')
for codings in found:
for coding in codings:
if not coding:
bad_codings.append(coding)
continue
try:
valid = systems.validate_coding(coding)
recognized = True
except systems.SystemNotRecognized:
valid = True
recognized = False
context.scenario.systems.append({
'system': coding.get('system'),
'code': coding.get('code'),
'valid': valid,
'recognized': recognized,
})
if not valid:
bad_codings.append(coding)
assert not bad_codings, \
utils.bad_response_assert(context.response,
'Bad codings: {codings}',
codings=json.dumps(bad_codings, indent=2))
@then('the {field_name} field will be the queried ID')
def step_impl(context, field_name):
resource = context.response.json()
patient_id = context.vendor_config['versioned_api'].get('patient')
assert_field_exists(context, resource, field_name)
assert_field_value_matches(context, resource, field_name, patient_id)
@then('the {field_name} field will be {value}')
def step_impl(context, field_name, value):
resource = context.response.json()
assert_field_exists(context, resource, field_name)
assert_field_value_matches(context, resource, field_name, value)
@then('the {field_name} field will exist')
def step_impl(context, field_name):
resource = context.response.json()
assert_field_exists(context, resource, field_name)
@then('all references will resolve')
def step_impl(context):
if context.vendor_skip:
return
resource = context.response.json()
if resource['resourceType'] == 'Bundle':
entries = [entry['resource'] for entry in resource.get('entry', [])
if entry.get('search', {}).get('mode', 'match') == 'match']
else:
entries = [resource]
for entry in entries:
found = utils.find_named_key(entry, 'reference')
for reference in found:
check_reference(reference, entry, context)
@then('there should be at least 1 {resource_type} entry')
def step_impl(context, resource_type):
resource = context.response.json()
entries = [entry['resource'] for entry in resource.get('entry', [])
if entry.get('search', {}).get('mode', 'match') == 'match' and
entry.get('resource', {}).get('resourceType') == resource_type]
assert len(entries) >= 1, \
utils.bad_response_assert(context.response,
ERROR_ENTRY_COUNT,
count=len(entries))
@given('there is at least 1 {resource_type} entry')
def step_impl(context, resource_type):
resource = context.response.json()
entries = [entry['resource'] for entry in resource.get('entry', [])
if entry.get('search', {}).get('mode', 'match') == 'match' and
entry.get('resource', {}).get('resourceType') == resource_type]
if len(entries) < 1:
context.scenario.skip(reason=ERROR_ENTRY_COUNT.format(count=0))
@then('all resources will have a {field_name} field')
def step_impl(context, field_name):
resource = context.response.json()
if resource['resourceType'] == 'Bundle':
entries = [entry['resource'] for entry in resource.get('entry', [])
if entry.get('search', {}).get('mode', 'match') == 'match']
else:
entries = [resource]
for entry in entries:
assert_field_exists(context, entry, field_name)
def check_reference(reference, orig, context):
""" Follow references and make sure they exist.
Args:
reference (str): A reference in the format:
* Resource/id
* http://example.com/base/Resource/id
* #id
orig (dict): The original resource, used when checking contained references.
context: The behave context
"""
if reference.startswith('#'):
matches = [contained for contained in orig.get('contained', [])
if contained['id'] == reference[1:]]
assert len(matches) == 1, \
utils.bad_response_assert(context.response,
ERROR_UNRESOLVED_REFERENCE,
reference=reference)
else:
response = utils.get_resource(context, reference)
assert int(response.status_code) == 200, \
utils.bad_response_assert(context.response,
ERROR_UNRESOLVED_REFERENCE,
reference=reference)
|
from threading import Thread
from threading import Timer
#This problem was asked by Apple.
#Implement a job scheduler which takes in a function f and an integer n, and calls f after n milliseconds.
def cons(a, b):
def pair(f):
return f(a, b)
return pair
def car(pair):
def inner_pair():
print pair(lambda a, b: a)
return inner_pair
def print_something():
print "mates"
def shedule(f, ms):
ms /= 1000
def delayed_execution(f, ms):
Timer(ms, f).start()
job = Thread(target=delayed_execution, args=(f, ms))
job.start()
shedule(car(cons(1,2)), 1000)
shedule(print_something, 1)
|
#!/usr/bin/python3.6
import pymysql
import yaml
import multiprocessing
class execMysql:
def __init__(self):
f = open('./mysql.yaml',encoding = 'utf-8')
self.config = yaml.load(f)
def executeOne(self, config, project):
db = pymysql.connect(project[1], project[2], project[3], project[2], charset='utf8')
cursor = db.cursor(pymysql.cursors.DictCursor)
sql = """ALTER TABLE `t_loan` ADD COLUMN `close_time` datetime(0) DEFAULT NULL COMMENT 'loan close time' """
res = cursor.execute(sql)
result = cursor.fetchall()
db.commit()
cursor.close()
db.close()
print("<<< %s 影响行数: %s " %(project[0], res) )
print(result)
print("\n")
def execute(self):
for project in self.config['projects']:
my_process = multiprocessing.Process(target=self.executeOne, args=(self.config, project,))
my_process.start()
my_process.join()
print("\n************* all cmd finished ***************")
if __name__ == '__main__':
exec = execMysql()
exec.execute()
|
from util import mean
class Eye:
def __init__(self):
self.x = 90
self.y = 90
self.xMin = 0
self.xMax = 180
self.yMin = 0
self.yMax = 180
self._m = 3
self._x_list = []
self._y_list = []
def SetBounds(self, xMin, xMax, yMin, yMax):
self.xMin = xMin
self.xMax = xMax
self.yMin = yMin
self.yMax = yMax
def SetPosFromRange(self, tx, ty):
x = (self.xMax - self.xMin) * tx + self.xMin
y = (self.yMax - self.yMin) * ty + self.yMin
self._x_list.append(x)
self._y_list.append(y)
if (len(self._x_list) > self._m):
self.x = mean(self._x_list)
self.y = mean(self._y_list)
self._x_list.pop(0)
self._y_list.pop(0)
class Mouth:
def __init__(self):
self.pos = 90
self.minPos = 0
self.maxPos = 180
self._m = 1
self._pos_list = []
def SetBounds(self, minPos, maxPos):
self.minPos = minPos
self.maxPos = maxPos
def SetPosFromRange(self, t):
pos = (self.maxPos - self.minPos) * (1 - t) + self.minPos
self._pos_list.append(pos)
if (len(self._pos_list) > self._m):
self.pos = mean(self._pos_list)
self._pos_list.pop(0)
class Neck:
def __init__(self):
self.roll = 90
self.yaw = 90
self.minRoll = 0
self.maxRoll = 180
self.minYaw = 0
self.maxYaw = 90
self._m = 6
self._roll_list = []
self._yaw_list = []
def SetBounds(self, minRoll, maxRoll, minYaw, maxYaw):
self.minRoll = minRoll
self.maxRoll = maxRoll
self.minYaw = minYaw
self.maxYaw = maxYaw
def SetPosFromRange(self, troll, tyaw):
roll = (self.maxRoll - self.minRoll) * (1 - troll) + self.minRoll
yaw = (self.maxYaw - self.minYaw) * tyaw + self.minYaw
self._roll_list.append(roll)
self._yaw_list.append(yaw)
if (len(self._roll_list) > self._m):
self.roll = mean(self._roll_list)
self.yaw = mean(self._yaw_list)
self._roll_list.pop(0)
self._yaw_list.pop(0)
class RoboFace:
def __init__(self):
self.rightEye = Eye()
self.leftEye = Eye()
self.mouth = Mouth()
self.neck = Neck()
def Serialize(self):
msg = ""
msg += f"RE {self.rightEye.x} {self.rightEye.y}\n"
msg += f"LE {self.leftEye.x} {self.leftEye.y}\n"
msg += f"M {self.mouth.pos}\n"
msg += f"N {self.neck.roll} {self.neck.yaw}\n"
return msg
|
from random import randint
class Die():
def __init__(self,sides=6):
self.sides=sides
def roll_die(self):
x=randint(1,self.sides)
return x
print("6 sides Die follows(10 times):\n")
die=Die()
for i in range(1,11):
x=die.roll_die()
print(x)
print("10 sides Die follows(10 times):\n")
die=Die(10)
for i in range(1,11):
x=die.roll_die()
print(x)
print("20 sides Die follows(10 times):\n")
die=Die(20)
for i in range(1,11):
x=die.roll_die()
print(x)
|
def projectPartners(n):
""" project_partners == PEP8 (forced mixedCase by CodeWars) """
return n * (n - 1) / 2
|
"""byte-pysqlite - executor transaction module."""
from __future__ import absolute_import, division, print_function
from byte.executors.core.models.database import DatabaseTransaction
from byte.executors.pysqlite.models.cursor import PySqliteCursor
class PySqliteTransaction(DatabaseTransaction, PySqliteCursor):
"""PySQLite transaction class."""
def begin(self):
"""Begin transaction."""
self.instance.execute('BEGIN;')
def commit(self):
"""Commit transaction."""
self.connection.instance.commit()
def rollback(self):
"""Rollback transaction."""
self.connection.instance.rollback()
def close(self):
"""Close transaction."""
# Close cursor
self.instance.close()
self.instance = None
# Close transaction
super(PySqliteTransaction, self).close()
|
import os.path
from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError
from ... import models
class Command(BaseCommand):
help="Adds the given filesystem path as a backup root"
def add_arguments(self, parser):
parser.add_argument("root", type=str)
def handle(self, *args, **kwargs):
root_path = os.path.abspath(kwargs.pop("root"))
try:
entry = models.FSEntry.objects.get(
path=root_path,
)
except models.FSEntry.DoesNotExist:
raise CommandError("Path not being backed up: {}".format(root_path))
if entry.parent_id is not None:
raise CommandError("Path not a backup root: {}".format(root_path))
entry.delete()
self.stdout.write("Path removed from backup set: {}".format(root_path))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Projeto e Analise de Algoritmos (PAA)
# 2o sem/2018 - IFMG - Campus Formiga
# Pedro Henrique Oliveira Veloso (0002346)
# Saulo Ricardo Dias Fernandes (0021581)
from player import *;
from piece import *;
from move import *;
from board import *;
from dawgMin import *;
from playerIA import *;
class Game():
def __init__(self, boardFile, dawgFile):
self.dict = load(dawgFile).root;
self.board = Board(boardFile, self.dict);
self.nPieces = 120;
self.pieces = {'#': 3, 'a': 14, 'e': 11, 'i': 10, 'o': 10,
's': 8, 'u': 7 , 'm': 6 , 'r': 6 , 't': 5 ,
'd': 5, 'l': 5 , 'c': 4 , 'p': 4 , 'n': 4 ,
'b': 3, 'ç': 2 , 'f': 2 , 'g': 2 , 'h': 2 ,
'v': 2, 'j': 2 , 'q': 1 , 'x': 1 , 'z': 1 };
self.player1 = None;
self.player2 = None;
def start(self):
# Definir jogador Humano/Computador:
opcao = 0;
while(opcao < 1) or (opcao > 3):
try:
opcao = int(input("\nSCRABBLE\n1 - Jogador vs Jogador.\n2 - Jogador vs Com\n3 - Com vs Com\n\nOpcao: "));
except ValueError:
opcao = 0;
print("");
self.setupPlayers(opcao);
# Loop de jogadas:
self.run();
def run(self):
""" Loop de jogo """
firstPlay = True;
while(True):
# Mostra o estado atual do jogo:
self.showBoard();
# Faz a jogada do jogador atual:
(pecasTrocar, jogada) = self.turn.play(firstPlay);
# Mostra a jogada feita:
self.showMove(jogada);
# Determina se nao e mais a primeira jogada:
if(jogada is not None) and (firstPlay):
firstPlay = False;
# Se o jogador passar o turno
# troca as pecas da mao que ele pedir para trocar:
self.changePieces(pecasTrocar, self.turn);
# Mantem a mao do jogador com 7 pecas:
self.fillHand(self.turn);
# Passa o turno do jogador atual:
self.changeTurn();
# Checa se o jogo chegou ao fim (ambos os jogadores passaram 2x):
gameover = self.isGameOver();
if(gameover):
self.showBoard();
print("\n\tFim do jogo.\n");
print("Jogador " + str(self.player1) + ". Rack: " + str(self.player1.showHand()));
print("Jogador " + str(self.player2) + ". Rack: " + str(self.player2.showHand()) + "\n");
return;
def fillHand(self, player):
""" Sorteia uma mao inicial e a retorna como dicionario de Pieces."""
res = player.hand;
i = player.handSize();
while(i < 7):
# Checa se existem pecas no saquinho:
if(self.nPieces == 0):
return;
# Sorteia uma peca aleatoria:
(piece, qtd) = random.choice(list(self.pieces.items()));
if(qtd > 0):
# Remove a peca do saquinho:
self.pieces[piece] -= 1;
self.nPieces -= 1;
i += 1;
# Adiciona a peca a mao:
res[piece].quantity += 1;
def changePieces(self, pieces, player):
""" Troca as pecas selecionadas pelo jogador.
No final da troca passa o turno.
Se nao for informada nenhuma peca, apenas passa o turno.
"""
# Checa se existem pecas suficiente para fazer a troca:
qtdPedida = 0;
qtdSaquinho = 0;
for piece, quantity in self.pieces.items():
if quantity > 0:
qtdSaquinho += quantity;
for piece, quantity in pieces.items():
if quantity > 0:
qtdPedida += quantity;
# Foram pedidas mais pecas do que existe no saquinho:
if(qtdPedida > qtdSaquinho):
print("Nao existem pecas suficientes para fazer a troca.");
return;
for piece, quantity in pieces.items():
# Sorteia uma nova peca aleatoria:
(new, qtd) = random.choice(list(self.pieces.items()));
# Garante que foi sorteada uma peca que esta no saquinho:
while(qtd == 0):
(new, qtd) = random.choice(list(self.pieces.items()));
# Adiciona a nova peca a mao do jogador:
player.hand[new].quantity += 1;
# Adiciona a peca antiga ao saquinho do jogo:
self.pieces[piece] += 1;
return;
def showBoard(self):
""" Mostra o estado atual do tabuleiro e os dados dos jogadores. """
self.board.show(self.player1, self.player2);
print(self.player1.__str__() + "\t" + self.turn.showHand() + "\t" + self.player2.__str__());
def changeTurn(self):
""" Troca o turno do jogador atual.
Reseta as informacoes utilizadas para criar a jogada.
"""
if(self.turn == self.player1):
self.turn = self.player2;
self.player1.reset();
else:
self.turn = self.player1;
self.player2.reset();
def setupPlayers(self, opcao):
""" Define os jogadores humano/computador e da a mao inicial. """
if(opcao == 1):
self.player1 = Player("JOGADOR 1", self.board, self.dict);
self.player2 = Player("JOGADOR 2", self.board, self.dict);
elif(opcao == 2):
self.player1 = Player("JOGADOR", self.board, self.dict);
self.player2 = PlayerIA("COMPUTADOR", self.board, self.dict);
else:
self.player1 = PlayerIA("COM1", self.board, self.dict);
self.player2 = PlayerIA("COM2", self.board, self.dict);
self.fillHand(self.player1);
self.fillHand(self.player2);
# Sorteia um jogador para comecar:
if(random.choice([True, False])):
self.turn = self.player1;
else:
self.turn = self.player2;
def showMove(self, move):
if(move is not None):
print("\n# O jogador " + self.turn.name + " colocou '" + move.getWords() + "' por " + str(move.value) + " pontos.\n");
else:
print("\n# O jogador " + self.turn.name + " passou o turno.\n");
def isGameOver(self):
""" Determina quando o jogo acaba. """
# Se nao houve mais pedras no saquinho:
if(self.nPieces == 0):
# Um jogador ficar sem letras, ou os dois jogadores passarem duas vezes:
if((self.player1.handSize() == 0) or (self.player2.handSize() == 0) or
((self.player1.nPass >= 2) and (self.player2.nPass >= 2))):
return True;
return False;
if __name__ == "__main__":
jogo = Game("board.txt", "dict.dawg");
jogo.start(); |
"""
class Node:
def __init__(self, freq,data):
self.freq= freq
self.data=data
self.left = None
self.right = None
"""
# Enter your code here. Read input from STDIN. Print output to STDOUT
def decodeHuff(root, s):
result = ''
l = list(s)
n = root
key = ''
while l:
if n.data != '\0':
result += n.data
n = root
elif l.pop(0) == '1':
n = n.right
else:
n = n.left
if n.data != '\0':
result += n.data
print result
|
import graphene
import logging
import dotenv
import iso8601
from dateutil.parser import parse
from django.forms.models import model_to_dict
from django.core.mail import EmailMessage
from django.core.exceptions import ObjectDoesNotExist
from django.template.loader import get_template
from django.utils import timezone
from django_filters import FilterSet, CharFilter
from graphene import relay, ObjectType
from graphql_relay import from_global_id, to_global_id
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.types import DjangoObjectType
from graphql import GraphQLError
from graphql_schemas.utils.helpers import (is_not_admin,
update_instance,
send_calendar_invites,
validate_event_dates,
raise_calendar_error,
not_valid_timezone,
send_bulk_update_message,
add_event_to_calendar,
remove_event_from_all_calendars)
from graphql_schemas.scalars import NonEmptyString
from graphql_schemas.utils.hasher import Hasher
from api.models import (Event, Category, AndelaUserProfile,
Interest, Attend, RecurrenceEvent)
from api.slack import (get_slack_id,
notify_user,
new_event_message,
get_slack_channels_list, notify_channel)
from api.utils.backgroundTaskWorker import BackgroundTaskWorker
from api.constants import SLACK_CHANNEL_DATA
from ..attend.schema import AttendNode
logging.basicConfig(
filename='warning.log',
level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
class EventFilter(FilterSet):
"""
Handles the filtering of events
"""
creator = CharFilter(method='user_profile')
def user_profile(self, queryset, name, value):
"""
Gets the events created by a user
Params:
queryset(dict): the queryset to filter
name(dict): the name of the user
value(dict): the google id
Returns:
filter the event based on the user
"""
try:
user_profile = AndelaUserProfile.objects.get(
google_id=value
)
except AndelaUserProfile.DoesNotExist:
raise GraphQLError(
"AndelaUserProfile does not exist")
return queryset.filter(creator=user_profile)
class Meta:
model = Event
fields = {'start_date': ['exact', 'istartswith'],
'social_event': ['exact'], 'venue': ['exact'],
'title': ['exact', 'istartswith'], 'creator': ['exact'],
'location': ['icontains'], }
class EventNode(DjangoObjectType):
attendSet = AttendNode()
def resolve_attendSet(self, info, **kwargs):
return self.attendSet.filter(status="attending")
class Meta:
model = Event
filter_fields = {'start_date': ['exact', 'istartswith'],
'social_event': ['exact'], 'venue': ['exact'],
'title': ['exact', 'istartswith'], 'creator': ['exact'],
'location': ['icontains'], }
interfaces = (relay.Node,)
class Frequency(graphene.Enum):
Daily = "DAILY"
Weekly = "WEEKLY"
Monthly = "MONTHLY"
class CreateEvent(relay.ClientIDMutation):
"""
Handles the creation of events
"""
class Input:
title = NonEmptyString(required=True)
description = NonEmptyString(required=True)
venue = NonEmptyString(required=True)
start_date = graphene.DateTime(required=True)
end_date = graphene.DateTime(required=True)
featured_image = graphene.String(required=True)
category_id = graphene.ID(required=True)
timezone = graphene.String(required=False)
slack_channel = graphene.String(required=False)
frequency = Frequency()
recurring = graphene.Boolean(required=False)
recurrence_end_date = graphene.DateTime(required=False)
add_to_calendar = graphene.Boolean(required=False)
location = graphene.String(required=False)
new_event = graphene.Field(EventNode)
slack_token = graphene.Boolean()
@staticmethod
def create_event(category, user_profile, recurrence_event, **input):
"""
create an event
Params:
category(str): the category of the event
user_profile(dict): the profile of the user
recurrence_event(bool): to indicate if event is reoccuring
Returns:
create the new event
"""
is_date_valid = validate_event_dates(input, 'event_date')
if not is_date_valid.get('status'):
raise GraphQLError(is_date_valid.get('message'))
if not input.get('timezone'):
input['timezone'] = user_profile.timezone
if not_valid_timezone(input.get('timezone')):
return GraphQLError("Timezone is invalid")
input.pop('recurring', None)
input.pop('frequency', None)
input.pop('add_to_calendar', None)
new_event = Event.objects.create(
**input,
creator=user_profile,
social_event=category,
recurrence=recurrence_event
)
new_event.save()
return new_event
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
"""
calls the create event mutations
Params:
root(dict): root query field data
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns:
returns the event created
"""
category_id = from_global_id(input.pop('category_id'))[1]
recurrence_event = None
add_to_calendar = input.get('add_to_calendar')
try:
category = Category.objects.get(
pk=category_id)
user_profile = AndelaUserProfile.objects.get(
user=info.context.user
)
if input.get('recurring'):
recurrence_event = cls.create_recurrent_event(**input)
input.pop('recurrence_end_date', None)
new_event = CreateEvent.create_event(
category, user_profile, recurrence_event, **input)
args_dict = {
"new_event": new_event,
"recurrence_event": recurrence_event,
"user_profile": user_profile
}
if add_to_calendar is None:
add_to_calendar = True
if add_to_calendar:
cls.start_background_task(**args_dict)
CreateEvent.notify_event_in_slack(category, input, new_event)
except ValueError as e:
logging.warn(e)
raise GraphQLError("An Error occurred. Please try again")
slack_token = False
if user_profile.slack_token:
slack_token = True
CreateEvent.notify_event_in_slack(category, input, new_event)
return cls(
slack_token=slack_token,
new_event=new_event
)
@classmethod
def start_background_task(cls, new_event, recurrence_event, user_profile):
"""
start background task to add event to calendar
Params:
new_event(dict): the new event to be created
recurrence_event(bool): reoccuring event
user_profile(dict): user profile
Returns:
run the addevent function as background task
"""
if recurrence_event:
BackgroundTaskWorker.start_work(add_event_to_calendar,
(user_profile, new_event, True))
else:
BackgroundTaskWorker.start_work(add_event_to_calendar,
(user_profile, new_event))
@staticmethod
def create_recurrent_event(**input):
"""
creates the recurrent event
Params:
input(dict): the input
Returns:
create the re-occuring event
"""
frequency = input.get('frequency')
start_date = input.get('start_date')
end_date = input.get('recurrence_end_date')
is_date_valid = validate_event_dates(input, 'recurrent_date')
if not is_date_valid.get('status'):
raise GraphQLError(is_date_valid.get('message'))
recurrence_event = RecurrenceEvent.objects.create(
frequency=frequency,
start_date=start_date,
end_date=end_date
)
return recurrence_event
@staticmethod
def notify_event_in_slack(category, input, new_event):
"""
notify user on new event
Params:
category(dict): the category of the event
input(dict): the inputs
new_event(dict): the new event
Returns:
notify the users on new event
"""
try:
category_followers = Interest.objects.filter(
follower_category_id=category.id)
event_id = to_global_id(EventNode._meta.name, new_event.id)
event_url = f"{dotenv.get('FRONTEND_BASE_URL')}/{event_id}"
message = (f"*A new event has been created in `{category.name}` group*\n"
f"> *Title:* {input.get('title')}\n"
f"> *Description:* {input.get('description')}\n"
f"> *Venue:* {input.get('venue')}\n"
f"> *Date:* {input.get('start_date').date()}\n"
f"> *Time:* {input.get('start_date').time()}")
blocks = new_event_message(
message, event_url, str(new_event.id), input.get('featured_image'))
slack_id_not_in_db = []
all_users_attendance = []
for instance in category_followers:
new_attendance = Attend(
user=instance.follower, event=new_event)
all_users_attendance.append(new_attendance)
if instance.follower.slack_id:
text = "New upcoming event from Andela socials"
BackgroundTaskWorker.start_work(
notify_user, (
blocks,
instance.follower.slack_id,
text
)
)
else:
slack_id_not_in_db.append(instance)
Attend.objects.bulk_create(all_users_attendance)
if slack_id_not_in_db:
for instance in slack_id_not_in_db:
retrieved_slack_id = get_slack_id(
model_to_dict(instance.follower.user))
if retrieved_slack_id != '':
instance.follower.slack_id = retrieved_slack_id
instance.follower.save()
text = "New upcoming event from Andela socials"
BackgroundTaskWorker.start_work(
notify_user, (blocks, retrieved_slack_id, text))
else:
continue
except BaseException as e:
logging.warn(e)
class UpdateEvent(relay.ClientIDMutation):
"""
Handle updating events
"""
class Input:
"""
inputs send by the user
"""
title = graphene.String()
description = graphene.String()
venue = graphene.String()
start_date = graphene.DateTime()
end_date = graphene.DateTime()
featured_image = graphene.String()
timezone = graphene.String()
category_id = graphene.ID()
slack_channel = graphene.String(required=False)
event_id = graphene.ID(required=True)
updated_event = graphene.Field(EventNode)
action_message = graphene.String()
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
"""
Update an event
Params:
root(dict): root query field data
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns:
returns the updated event
"""
try:
user = AndelaUserProfile.objects.get(user=info.context.user)
event_instance = Event.objects.get(
pk=from_global_id(input.get('event_id'))[1]
)
old_venue = event_instance.venue
old_start_date = iso8601.parse_date(event_instance.start_date)
old_end_date = iso8601.parse_date(event_instance.end_date)
if event_instance.creator != user \
and not info.context.user.is_superuser:
raise GraphQLError(
"You are not authorized to edit this event.")
if input.get("category_id"):
input["social_event"] = Category.objects.get(
pk=from_global_id(input.get('category_id'))[1]
)
if event_instance:
updated_event = update_instance(
event_instance,
input,
exceptions=["category_id", "event_id"]
)
new_venue = updated_event.venue
new_start_date = updated_event.start_date
new_end_date = updated_event.end_date
message_content = ''
if old_venue != new_venue:
message_content += (f"> *Former Venue:* {old_venue}\n"
f"> *New Venue:* {new_venue}\n\n")
if old_start_date != new_start_date or old_end_date != new_end_date:
message_content += (f"> *Former Date:* {old_start_date.date()} {old_start_date.time()}\n"
f"> *New Date:* {new_start_date.date()} {new_start_date.time()}")
if message_content:
message = f"The following details about the *{event_instance.title}* event has been changed\n"
message += message_content
BackgroundTaskWorker.start_work(
send_bulk_update_message, (event_instance, message, "An event you are attending was updated"))
return cls(
action_message="Event Update is successful.",
updated_event=updated_event
)
except Exception as e:
""" return an error if something wrong happens """
logging.warn(e)
raise GraphQLError("An Error occurred. Please try again")
class DeactivateEvent(relay.ClientIDMutation):
action_message = graphene.String()
class Input:
event_id = graphene.ID(required=True)
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
"""
Deactivates an event
Params:
root(dict): root query field data
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns:
returns deactivation message
"""
user = info.context.user
event_id = input.get('event_id')
db_event_id = from_global_id(event_id)[1]
event = Event.objects.get(id=db_event_id)
if not event:
raise GraphQLError('Invalid event')
if user.id != event.creator.user_id and is_not_admin(user):
raise GraphQLError("You aren't authorised to deactivate the event")
Event.objects.filter(id=db_event_id).update(active=False)
message = f"The *{event.title}* event has been cancelled\n"
andela_user = AndelaUserProfile.objects.get(
user_id=user.id)
BackgroundTaskWorker.start_work(
remove_event_from_all_calendars, (andela_user, event))
BackgroundTaskWorker.start_work(
send_bulk_update_message,
(event, message, "An event you are attending has been cancelled"))
return cls(action_message="Event deactivated")
class SendEventInvite(relay.ClientIDMutation):
message = graphene.String()
class Input:
event_id = graphene.ID(required=True)
receiver_email = graphene.String(required=True)
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
"""
sends event invites to user
Params:
root(dict): root query field data
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns:
event message
"""
event_id = input.get('event_id')
sender = AndelaUserProfile.objects.get(
user_id=info.context.user.id)
receiver_email = input.get('receiver_email')
try:
receiver = AndelaUserProfile.objects.get(
user__email=receiver_email)
event = Event.objects.get(id=from_global_id(event_id)[1])
assert sender.user.id != receiver.user.id
except AndelaUserProfile.DoesNotExist:
raise GraphQLError(
"Recipient User does not exist")
except Event.DoesNotExist:
raise GraphQLError(
"Event does not exist")
except AssertionError:
raise GraphQLError(
"User cannot invite self")
invite_hash = Hasher.gen_hash([
event.id, receiver.user.id, sender.user.id])
invite_url = info.context.build_absolute_uri(
f"/invite/{invite_hash}")
data_values = {
'title': event.title,
'imgUrl': event.featured_image,
'venue': event.venue,
'startDate': event.start_date,
'url': invite_url
}
message = get_template('event_invite.html').render(data_values)
msg = EmailMessage(
f"You have been invited to an event by {sender.user.username}",
message,
to=[receiver_email]
)
msg.content_subtype = 'html'
sent = msg.send()
if sent:
return cls(message="Event invite delivered")
else:
raise GraphQLError("Event invite not delivered")
class ValidateEventInvite(relay.ClientIDMutation):
"""
validate the event invites
"""
isValid = graphene.Boolean()
event = graphene.Field(EventNode)
message = graphene.String()
class Input:
hash_string = graphene.String(required=True)
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
"""
checks if event is valid
Params:
root(dict): root query field data
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns:
return the necessary message if event is valid or not
"""
hash_string = input.get('hash_string')
user_id = info.context.user.id
try:
data = Hasher.reverse_hash(hash_string)
if not data or len(data) != 3:
raise GraphQLError("Bad Request: Invalid invite URL")
event_id, receiver_id, sender_id = data
assert user_id == receiver_id
event = Event.objects.get(id=event_id)
if timezone.now() > parse(event.end_date):
raise GraphQLError("Expired Invite: Event has ended")
AndelaUserProfile.objects.get(user_id=sender_id)
return cls(
isValid=True, event=event,
message="OK: Event invite is valid")
except AssertionError:
return cls(
isValid=False,
message="Forbidden: Unauthorized access"
)
except ObjectDoesNotExist:
return cls(
isValid=False,
message="Not Found: Invalid event/user in invite"
)
except GraphQLError as err:
return cls(
isValid=False,
message=str(err)
)
class ChannelList(graphene.ObjectType):
"""
Slack group channel list data
"""
id = graphene.ID()
name = graphene.String()
is_channel = graphene.String()
created = graphene.Int()
creator = graphene.String()
is_archived = graphene.Boolean()
is_general = graphene.Boolean()
name_normalized = graphene.String()
is_shared = graphene.Boolean()
is_org_shared = graphene.Boolean()
is_member = graphene.Boolean()
is_private = graphene.Boolean()
is_group = graphene.Boolean()
members = graphene.List(graphene.String)
class ResponseMetadata(graphene.ObjectType):
"""
response meta data
"""
next_cursor = graphene.String()
class SlackChannelsList(graphene.ObjectType):
"""
handles slack channel list
"""
ok = graphene.Boolean()
channels = graphene.List(ChannelList)
response_metadata = graphene.Field(ResponseMetadata)
class Meta:
interfaces = (relay.Node,)
class ShareEvent(relay.ClientIDMutation):
"""
Handles event sharing on the channel
"""
class Input:
event_id = graphene.ID()
channel_id = graphene.String()
event = graphene.Field(EventNode)
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
"""
share event on the channel
Params:
root(dict): root query field data
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns:
post event on the necessary channel
"""
event_id = from_global_id(input.get('event_id'))[1]
channel_id = input.get('channel_id')
event_url = f"{dotenv.get('FRONTEND_BASE_URL')}/{event_id}"
event = Event.objects.get(pk=event_id)
try:
start_date = parse(event.start_date)
message = (f"*A new event has been created by <@{event.creator.slack_id}>.*\n"
f"> *Title:* {event.title}\n"
f"> *Description:* {event.description}\n"
f"> *Venue:* {event.venue}\n"
f"> *Date:* {start_date.strftime('%d %B %Y, %A')} \n"
f"> *Time:* {start_date.strftime('%H:%M')}")
blocks = new_event_message(
message, event_url, event_id, event.featured_image)
notify_channel(
blocks, "New upcoming event from Andela socials", channel_id)
except ValueError as e:
logging.warn(e)
raise GraphQLError("An Error occurred. Please try again")
return ShareEvent(event=event)
class EventQuery(object):
event = relay.Node.Field(EventNode)
events_list = DjangoFilterConnectionField(EventNode, filterset_class=EventFilter)
slack_channels_list = graphene.Field(SlackChannelsList)
def resolve_event(self, info, **kwargs):
"""
resolve event and return event that is gotten with the event id
Params:
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns:
returns the event or none if no event
"""
id = kwargs.get('id')
if id is not None:
event = Event.objects.get(pk=id)
if not event.active:
return None
return event
return None
def resolve_events_list(self, info, **kwargs):
"""
resolve all event and return all event
Params:
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns:
returns all event
"""
return Event.objects.exclude(active=False)
def resolve_slack_channels_list(self, info, **kwargs):
"""
resolve slack channel
Params:
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns:
returns all channels
"""
channels = []
slack_list = get_slack_channels_list()
responseMetadata = ResponseMetadata(**slack_list.get('response_metadata'))
for items in slack_list.get('channels'):
selection = SLACK_CHANNEL_DATA
filtered_channel = dict(filter(lambda x: x[0] in selection, items.items()))
channel = ChannelList(**filtered_channel)
channels.append(channel)
return SlackChannelsList(
ok=slack_list.get('ok'), channels=channels, response_metadata=responseMetadata)
class EventMutation(ObjectType):
"""
Handles event mutations
"""
create_event = CreateEvent.Field()
deactivate_event = DeactivateEvent.Field()
send_event_invite = SendEventInvite.Field()
update_event = UpdateEvent.Field()
validate_event_invite = ValidateEventInvite.Field()
share_event = ShareEvent.Field()
|
# adapted from https://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html#create-raster-from-array
import gdal, ogr, os, osr
import numpy as np
import hdr_writer as hdr
or_x = -90. # menor longitude
or_y = 22.50 # maior latitude
origin = (or_x, or_y) #(upper left corner)
def read_bin_ob(filename,x=120,y=160,pd=32):
array = hdr.catch_data(filename,1,nx=x,ny=y)
return array
def read_bin_mb(filename,x=120,y=160,pd=32):
nbands = hdr.catch_nt(filename,x,y,pd)
array = hdr.catch_data(filename,nbands,nx=x,ny=y)
return array
def array2raster(newRasterfn,rasterOrigin,pixelWidth,pixelHeight,nbands,array):
if nbands == 1:
cols = array.shape[1]
rows = array.shape[0]
else:
cols = array.shape[2]
rows = array.shape[1]
originX = rasterOrigin[0]
originY = rasterOrigin[1]
driver = gdal.GetDriverByName('HFA')
outRaster = driver.Create(newRasterfn, cols, rows, nbands, gdal.GDT_Float32)
outRaster.SetGeoTransform((originX, pixelWidth, 0.0, originY, 0.0, pixelHeight))
if nbands == 1:
outband = outRaster.GetRasterBand(nbands)
outband.SetNoDataValue(-9999.0)
outband.WriteArray(array)
mask = array == -9999.0
masked_data = np.ma.masked_array(array, mask)
min_, max_ = outband.ComputeRasterMinMax(0)
mean_, std_ = np.mean(masked_data), np.std(masked_data)
outband.SetStatistics(min_,max_, mean_, std_)
outband.FlushCache()
outband= None
else:
for i in list(range(nbands)):
outband = outRaster.GetRasterBand(i+1)
outband.SetNoDataValue(-9999.0)
outband.WriteArray(array[i])
mask = array[i] == -9999.0
masked_data = np.ma.masked_array(array[i], mask)
min_, max_ = outband.ComputeRasterMinMax(0)
mean_, std_ = np.mean(masked_data), np.std(masked_data)
outband.SetStatistics(min_,max_, mean_, std_)
outband.FlushCache()
outband = None
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromEPSG(4326)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
|
class Solution:
road_num = 0
def uniquePath(self, m, n):
return self.uniquePath_iter(m, n)
# 递归解法(超时)
def uniquePath_recu(self, m, n):
if m == 1 and n == 1:
self.road_num += 1
return
elif m == 1 and n == 2:
# 向右走一步就到终点了
self.road_num += 1
return
elif n == 1 and m == 2:
# 向下走一步就到终点了
self.road_num += 1
return
elif n>=1 and m>=1:
# 向右走
self.uniquePath_recu(m-1,n)
# 向左走
self.uniquePath_recu(m,n-1)
# 动态规划解法
def uniquePath_iter(self, m, n):
dp = [[0 for j in range(n)] for i in range(m)]
for i in range(m):
dp[i][0] = 1
for j in range(n):
dp[0][j] = 1
#print(dp)
for i in range(1,m):
for j in range (1,n):
dp[i][j] = dp[i-1][j] + dp[i][j-1]
#print(dp)
return dp[m-1][n-1]
if __name__ == '__main__':
my_solution = Solution()
print(my_solution.uniquePath(3,7)) |
import cv2
# load an image using 'imread' specifying the path to image
image = cv2.imread('./images/input.jpg')
# if the image is not loading properly "use full Path"
# C:\\Users\\userName\\Master OpenCV\\images\\digits.png
# display image using 'im.show('Title',imageVariable)
cv2.imshow('FirstImage',image)
# waitkey allows us to input information when an image window is open
# leaving blank it just waits for anykey to be pressed before continuing
# by placing number except(0) we can specify a delay(in milliseconds)
# cv2.waitkey(2) # waits for 2 milliseconds
cv2.waitkey( )
# closes all open windows
# if not used program hangs
cv2.destroyAllWindows()
# closer look how images are stored
import numpy as np
print(image.shape) # gives image dimensions (height,width,no.of ColorChannels (RGB)
print('Height of a image:', image.shape[0],'pixels')
print('Width of a image:', image.shape[1],'pixels')
# alternative to above TWO line of codes
#(height,width) = image.shape[:2]
# save/Write images
# imwrite('Title',imageVariable)
cv2.imwrite('output_image.jpg',image)
cv2.imwrite('Output.png',image)
# Converting to Grey Scale Images
# why Images are converted to GRAY
# IF we are working with 'CONTOURS ( IMAGE SEGMENTATION TECHNIQUE ),Contours processes only GRAY SCALE IMAGE
# img = cv2.imread('input.jpg')
# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# alternative
img = cv2.imread('input.jpg',0) # 0 specifies to convert the 'rgb image into grey image
cv2.imshow('GreyImage',img)
cv2.waitkey()
cv2.destroyAllWindows()
|
# -*- coding: utf-8 -*-:
import os
import time
from multiprocessing import Pool
import numpy as np
import datetime
from tqdm import tqdm
# 模拟流
def get_sim_flows():
vectors_co = np.array([[1, 1, 10, 10], [1, 1, 8, 12], [0, 3, 11, 11], [2, 0, 10, 10], [2, 2, 12, 9],
[51, 50, 40, 60], [50, 51, 41, 59], [49, 51, 42, 60], [52, 50, 42, 58]])
# 高高聚集,低低聚集
vectors_z = np.array([30, 29, 31, 30, 32, 2, 1, 3, 2])
# 高低交错
# vectors_z = np.array([30, 2, 31, 3, 32, 2, 51, 31, 3])
return vectors_co, vectors_z
# 计算流距离(method参数表示选择什么方法计算距离,待补充)
def get_distance(v1, v2, method=0):
distance = np.sqrt(np.sum((v1 - v2) ** 2))
return distance
def task(vectors, i, j, n):
pid = os.getpid()
print('start process', pid)
start_time = time.clock()
l = j - i + 1
w = np.zeros((l, n), dtype=float)
for r in range(l):
if r % 2000 == 0:
print('process', pid, ':', r, '/', l)
w[r] = 1 / np.sqrt(np.sum((vectors - vectors[r + i]) ** 2, axis=1))
w[r, i + r] = 0
print('process', pid, 'completed: ', '%.3f' % (time.clock() - start_time), 'secs.')
return w
# 计算权重矩阵(standardization参数表示是否对权重矩阵标准化)
def get_weight_matrix(vectors, num_of_process, standardization=False):
print('calculate weight matrix...')
n = len(vectors)
pool = Pool(processes=num_of_process)
results = []
task_allo = [i/num_of_process for i in range(num_of_process+1)]
for idx in range(num_of_process):
i = int(task_allo[idx] * n)
j = int(task_allo[idx + 1] * n)
print('pid', idx, ':', 'start from', i, 'end in', j)
results.append(pool.apply_async(task, args=(vectors,i, j, n, )))
pool.close()
pool.join()
return np.vstack((res.get() for res in results))
# 计算流的空间自相关指数
def flow_autocorrelation(flows_co, flows_z, num_of_process, standardization=False):
n = len(flows_z)
start_time = time.clock()
w = get_weight_matrix(flows_co, num_of_process)
print('compute the weighted matrix: ', '%.3f' % (time.clock() - start_time), 'secs.')
start_time = time.clock()
dif_z = flows_z - np.average(flows_z)
sum1 = 0
for i in tqdm(range(n)):
sum1 += np.sum(dif_z[i] * dif_z * w[i])
print('compute cross product: ', '%.3f' % (time.clock() - start_time), 'secs.')
print('step c')
sum2 = np.sum(dif_z**2)
print('step d')
s = np.sum(w)
return n * sum1 / s / sum2
def get_flows_from_file(filename, column_num, minSpeed=2, maxSpeed=150):
si = {}
with open(filename, 'r') as f:
f.readline()
while True:
line1 = f.readline().strip()
if line1:
sl1 = line1.split(',')
sl2 = f.readline().strip().split(',')
if sl1[1] == '1' and minSpeed < float(sl1[-2]) < maxSpeed:
ogid = int(sl1[-1])
dgid = int(sl2[-1])
if (ogid, dgid) not in si:
si[(ogid, dgid)] = 0
si[(ogid, dgid)] += 1
else:
break
flows_co = []
flows_z = []
for k, v in si.items():
oy, ox, dy, dx = k[0] // column_num, k[0] % column_num, k[1] // column_num, k[1] % column_num
flows_co.append(np.array([ox, oy, dx, dy]))
flows_z.append(v)
return np.array(flows_co), np.array(flows_z)
if __name__ == '__main__':
print('starting time: \n', datetime.datetime.now().strftime("%Y.%m.%d-%H:%M:%S"))
# flows_co, flows_z = get_sim_flows()
flows_co, flows_z = get_flows_from_file('./data/sj_051316_1km.csv', 30)
moran_i = flow_autocorrelation(flows_co, flows_z, num_of_process=5)
print('moran\'s I: ', moran_i)
|
#!/usr/bin/python
import sys
from math import fabs
from org.apache.pig.scripting import Pig
k = 4
tolerance = 0.01
MAX_SCORE = 4
MIN_SCORE = 0
MAX_ITERATION = 5
# initial centroid, equally divide the space
initial_centroids = ""
last_centroids = [None] * k
for i in range(k):
last_centroids[i] = MIN_SCORE + float(i)/k*(MAX_SCORE-MIN_SCORE)
initial_centroids = initial_centroids + str(last_centroids[i])
if i!=k-1:
initial_centroids = initial_centroids + ":"
P = Pig.compile("""register udf.jar
DEFINE find_centroid FindCentroid('$centroids');
raw = load '/user/hdfs/data/data1/student.txt' as (name:chararray, age:int, gpa:double);
centroided = foreach raw generate gpa, find_centroid(gpa) as centroid;
grouped = group centroided by centroid;
result = foreach grouped generate group, AVG(centroided.gpa);
store result into 'output';
""")
converged = False
iter_num = 0
while iter_num<MAX_ITERATION:
Q = P.bind({'centroids':initial_centroids})
results = Q.runSingle()
if results.isSuccessful() == "FAILED":
raise "Pig job failed"
iter = results.result("result").iterator()
centroids = [None] * k
distance_move = 0
# get new centroid of this iteration, caculate the moving distance with last iteration
for i in range(k):
tuple = iter.next()
centroids[i] = float(str(tuple.get(1)))
distance_move = distance_move + fabs(last_centroids[i]-centroids[i])
distance_move = distance_move / k;
Pig.fs("rmr output")
print("iteration " + str(iter_num))
print("average distance moved: " + str(distance_move))
if distance_move<tolerance:
sys.stdout.write("k-means converged at centroids: [")
sys.stdout.write(",".join(str(v) for v in centroids))
sys.stdout.write("]\n")
converged = True
break
last_centroids = centroids[:]
initial_centroids = ""
for i in range(k):
initial_centroids = initial_centroids + str(last_centroids[i])
if i!=k-1:
initial_centroids = initial_centroids + ":"
iter_num += 1
if not converged:
print("not converge after " + str(iter_num) + " iterations")
sys.stdout.write("last centroids: [")
sys.stdout.write(",".join(str(v) for v in last_centroids))
sys.stdout.write("]\n")
|
# https://stackoverflow.com/questions/5904969/how-to-print-a-dictionarys-key
# lista os arquivos que tem a segmentação presente no json
import json
import os
with open('/home/pam/Desktop/streamlit_tcc/Mask_RCNN-Multi-Class-Detection/Leaf_OLD/train/via_region_data.json') as json_file:
data = json.load(json_file)
for key, value in data.items() :
a = open("dataset.txt", "w")
for key in data:
f = key
print(f)
a.write(str(f) + os.linesep) |
import pytest
@pytest.fixture
def one_word_tag_str():
return ['toyota']
@pytest.fixture
def two_words_tag_str():
return ['toyota corolla']
@pytest.fixture
def three_words_tag_str():
return ['toyota corolla 2007']
@pytest.fixture
def four_words_tag_str():
return ['camera And navigation system']
|
from django.shortcuts import render
from datetime import date
from mealplanner.models import Week, Day
def home_page(request):
week_number = date.today().isocalendar()[1]
try:
week = Week.objects.get(week_number=week_number)
except:
week = Week.objects.create(week_number=week_number)
for day_name in Day.DAY_NAME_CHOICES:
if Day.objects.filter(week=week, name=day_name[0]):
pass
else:
Day.objects.create(week=week, name=day_name[0])
if request.method == 'POST':
for key, val in request.POST.items():
try:
daystr, meal = key.split('_')
except ValueError:
continue
daystr = Day.NORWEGIAN_TO_ENGLISH_DICT[daystr.capitalize()]
day = Day.objects.get(week=week, name=daystr)
setattr(day, meal, val)
day.save()
return render(request, 'home.html', {'week': week})
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pywt
_CACHED_VALUES = dict()
def _get_swt(scalar_field, pad_method, wavelet, separation_scale):
# use python's memory ID of the swt dict for a poor-mans caching to
# avoid recalculating it
array_id = id(scalar_field)
if array_id in _CACHED_VALUES:
return _CACHED_VALUES[array_id]
swt = compute_swt(scalar_field, pad_method, wavelet, separation_scale)
_CACHED_VALUES[array_id] = swt
return swt
def _debug_plot(scalar_field, k, specs):
labs = ["Horizontal", "Vertical", "Diagonal"]
fig, axs = plt.subplots(ncols=2, figsize=(8, 4))
axs[0].imshow(scalar_field, "gist_ncar")
axs[0].set_xticks([])
axs[0].set_yticks([])
axs[0].set_title("CWP")
for i in range(3):
axs[1].plot(k[1:], specs[:, i], label=labs[i])
axs[1].set_xscale("log")
axs[1].set_xlabel(r"Scale number $k$")
axs[1].set_ylabel("Energy")
axs[1].set_title("Wavelet energy spectrum")
axs[1].legend()
plt.tight_layout()
plt.show()
def compute_swt(scalar_field, pad_method, wavelet, separation_scale, debug=False):
"""
Computes the stationary/undecimated Direct Wavelet Transform
(SWT, https://pywavelets.readthedocs.io/en/latest/ref/swt-stationary-wavelet-transform.html#multilevel-2d-swt2)
of a scalar field. See the documentation of woi1 for additional details.
Parameters
----------
scalar_field : numpy array of shape (npx,npx) - npx is number of pixels
(Cloud) scalar input.
pad_method : string, optional
Which type of padding to use,
wavelet : string, optional
Which wavelet to use.
separation_scale : int, optional
Which power of 2 to use as a cutoff scale that separates 'small' scales
from 'large' scales.
return_spectra : bool, optional
Whether to return the spectra from the wavelet transform. The default is False.
debug : bool, optional
If True, plots the averaged wavelet spectra in horizontal, vertical and
diagonal directions. The default is False.
Returns
-------
Ebar : float
Direction-averaged, squared coefficients of the SWT
Elbar : float
Direction-averaged, squared coefficients of the SWT, over scales larger than
`separation_scale` (inclusive)
Esbar : float
Direction-averaged, squared coefficients of the SWT, over scales smaller than
`separation_scale` (exclusive)
Eld : numpy array of shape (3,)
Sum of squared coefficients of the SWT, over scales larger than
`separation_scale` (inclusive), in the horizontal, vertical and diagonal
direction
Esd : float
Sum of squared coefficients of the SWT, over scales smaller than
`separation_scale` (exclusive), in the horizontal, vertical and diagonal
direction
"""
# Pad if necessary
pad_sequence = []
scale_i = []
for shi in scalar_field.shape:
pow2 = np.log2(shi)
pow2 = int(pow2 + 1) if pow2 % 1 > 0 else int(pow2)
pad = (2**pow2 - shi) // 2
pad_sequence.append((pad, pad))
scale_i.append(pow2)
scalar_field = pywt.pad(scalar_field, pad_sequence, pad_method)
# Compute wavelet coefficients
scale_max = np.max(scale_i) # FIXME won't work for non-square scenes
coeffs = pywt.swt2(scalar_field, wavelet, scale_max, norm=True, trim_approx=True)
# Structure of coeffs:
# - coeffs -> list with n_scales indices. Each scale is a 2-power of
# the image resolution. For 512x512 images we have
# 512 = 2^9 -> 10 scales
# - coeffs[i] -> Contains three directions:
# [0] - Horizontal
# [1] - Vertical
# [2] - Diagonal
specs = np.zeros((len(coeffs), 3)) # Shape (n_scales,3)
k = np.arange(0, len(specs))
for i in range(len(coeffs)):
if i == 0:
ec = coeffs[i] ** 2
specs[i, 0] = np.mean(ec)
else:
for j in range(len(coeffs[i])):
ec = coeffs[i][j] ** 2 # Energy -> squared wavelet coeffs
specs[i, j] = np.mean(ec) # Domain-averaging at each scale
# Decompose into ''large scale'' energy and ''small scale'' energy
# Large scales are defined as 0 < k < separation_scale
specs = specs[
1:
] # Remove first (mean) component, as it always distributes over horizontal dimension
specL = specs[:separation_scale, :]
specS = specs[separation_scale:, :]
# Average over scales
Ebar = np.sum(np.mean(specs, axis=1))
Elbar = np.sum(np.mean(specL, axis=1))
Esbar = np.sum(np.mean(specS, axis=1))
# Sum over large/small scales
Eld = np.sum(specL, axis=0)
Esd = np.sum(specS, axis=0)
if debug:
_debug_plot(scalar_field, k, specs)
return Ebar, Elbar, Esbar, Eld, Esd
def woi1(scalar_field, pad_method="periodic", wavelet="haar", separation_scale=5):
"""
Computes the first Wavelet Organisation Index WOI1 proposed by
Brune et al. (2018) https://doi.org/10.1002/qj.3409 from the stationary/undecimated
Direct Wavelet Transform (https://pywavelets.readthedocs.io/en/latest/ref/swt-stationary-wavelet-transform.html#multilevel-2d-swt2)
of a scalar field. Based off https://rdrr.io/cran/calcWOI/, but does not
mirror, taper or blow the `scalar_field` up. Instead, preprocessing the
`scalar_field` input is limited to padding fields that do not have
dimensions that are a power of 2 (all padding methods in pywt are available).
Parameters
----------
scalar_field : numpy array of shape (npx,npx) - npx is number of pixels
(Cloud) scalar input. Can be any field of choice (Brune et al. (2018) use
rain rates; Janssens et al. (2021) use liquid water path).
periodic_domain : Bool, optional
Whether the domain is periodic. If False, mirror the domain in all
directions before performing the SWT. The default is False.
pad_method : string, optional
Which type of padding to use, in case the field shape is not a power of 2.
The default is 'periodic'. Other options can be found here:
https://pywavelets.readthedocs.io/en/latest/ref/signal-extension-modes.html#ref-modes
wavelet : string, optional
Which wavelet to use. The default is 'haar'. Other options can be found here:
https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html
separation_scale : int, optional
Which power of 2 to use as a cutoff scale that separates 'small' scales
from 'large' scales. The default is 5; i.e. energy contained in scales
larger than 2^5=32 pixles is considered 'large-scale energy'.
Returns
-------
woi1 : float
First wavelet organisation index.
"""
Ebar, Elbar, Esbar, Eld, Esd = _get_swt(
scalar_field, pad_method, wavelet, separation_scale
)
return Elbar / Ebar
def woi2(scalar_field, pad_method="periodic", wavelet="haar", separation_scale=5):
"""
Computes the second Wavelet Organisation Index WOI2 proposed by
Brune et al. (2018) https://doi.org/10.1002/qj.3409 (see :func:`cloudmetrics.metrics.woi1`
for more details)
Parameters
----------
scalar_field : numpy array of shape (npx,npx) - npx is number of pixels
(Cloud) scalar input.
periodic_domain : Bool, optional
Whether the domain is periodic.
pad_method : string, optional
Which type of padding to use, in case the field shape is not a power of 2.
wavelet : string, optional
Which wavelet to use. The default is 'haar'.
separation_scale : int, optional
Which power of 2 to use as a cutoff scale that separates 'small' scales
from 'large' scales.
Returns
-------
woi2 : float
Second wavelet organisation index.
"""
Ebar, Elbar, Esbar, Eld, Esd = _get_swt(
scalar_field, pad_method, wavelet, separation_scale
)
return (Elbar + Esbar) / scalar_field[scalar_field > 0].size
def woi3(scalar_field, pad_method="periodic", wavelet="haar", separation_scale=5):
"""
Computes the second Wavelet Organisation Index WOI3 proposed by
Brune et al. (2018) https://doi.org/10.1002/qj.3409 (see :func:`cloudmetrics.metrics.woi1`
for more details)
Parameters
----------
scalar_field : numpy array of shape (npx,npx) - npx is number of pixels
(Cloud) scalar input.
periodic_domain : Bool, optional
Whether the domain is periodic.
pad_method : string, optional
Which type of padding to use, in case the field shape is not a power of 2.
wavelet : string, optional
Which wavelet to use. The default is 'haar'.
separation_scale : int, optional
Which power of 2 to use as a cutoff scale that separates 'small' scales
from 'large' scales.
Returns
-------
woi3 : float
Third wavelet organisation index.
"""
Ebar, Elbar, Esbar, Eld, Esd = _get_swt(
scalar_field, pad_method, wavelet, separation_scale
)
if Elbar == 0:
woi3 = 1.0 / 3 * np.sum((Esd - Esbar) / Esbar)
elif Esbar == 0:
woi3 = 1.0 / 3 * np.sum((Eld - Elbar) / Elbar)
else:
woi3 = (
1.0
/ 3
* np.sqrt(
np.sum(((Esd - Esbar) / Esbar) ** 2 + ((Eld - Elbar) / Elbar) ** 2)
)
)
return woi3
|
import torch
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
def get_dataloaders_with_index(path="../../data", batch_size=64, num_labeled=250,
lbl_idxs=None, unlbl_idxs=None, valid_idxs=None, which_dataset='cifar10', validation=True):
"""
Returns data loaders for Semi-Supervised Learning
Split between train_labeled, train_unlabeled, validation and test
"""
# Define transform to normalize data
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
if which_dataset == 'cifar10':
train_set = CustomCIFAR10(root=path, train=True, transform=transform)
test_set = CustomCIFAR10(root=path, train=False, transform=transform)
elif which_dataset == 'svhn':
train_set = datasets.SVHN(root=path, split='train', download=True, transform=transform)
test_set = datasets.SVHN(root=path, split='test', download=True, transform=transform)
else:
raise Exception('Not supported yet')
# Split indexes between labeled, unlabeled and validation
if which_dataset == 'cifar10':
training_labels = train_set.targets
elif which_dataset == 'svhn':
training_labels = train_set.labels
else :
training_labels = train_set.targets
if validation:
train_labeled_idxs, train_unlabeled_idxs, val_idxs = labeled_unlabeled_val_split(training_labels, int(num_labeled / 10))
else:
train_labeled_idxs, train_unlabeled_idxs = labeled_unlabeled_split(training_labels, int(num_labeled / 10))
val_idxs = []
# If indexes are provided, use them
if lbl_idxs is not None:
train_labeled_idxs = lbl_idxs
train_unlabeled_idxs = unlbl_idxs
val_idxs = valid_idxs
# Define samplers using indexes
train_labeled_sampler = SubsetRandomSampler(train_labeled_idxs)
train_unlabeled_sampler = SubsetRandomSampler(train_unlabeled_idxs)
val_sampler = SubsetRandomSampler(val_idxs)
# Create data loaders
train_labeled_loader = DataLoader(train_set, batch_size=batch_size, sampler=train_labeled_sampler, num_workers=0)
train_unlabeled_loader = DataLoader(train_set, batch_size=batch_size, sampler=train_unlabeled_sampler, num_workers=0)
val_loader = DataLoader(train_set, batch_size=batch_size, sampler=val_sampler, num_workers=0)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=0)
if not validation:
val_loader = test_loader
return train_labeled_loader, train_unlabeled_loader, val_loader, test_loader, train_labeled_idxs, train_unlabeled_idxs, val_idxs
def labeled_unlabeled_val_split(labels, n_labeled_per_class):
labels = np.array(labels)
train_labeled_idxs = []
train_unlabeled_idxs = []
val_idxs = []
for i in range(10):
idxs = np.where(labels == i)[0]
np.random.shuffle(idxs)
train_labeled_idxs.extend(idxs[:n_labeled_per_class])
train_unlabeled_idxs.extend(idxs[n_labeled_per_class:-500])
val_idxs.extend(idxs[-500:]) # 5000 validation samples
np.random.shuffle(train_labeled_idxs)
np.random.shuffle(train_unlabeled_idxs)
np.random.shuffle(val_idxs)
return train_labeled_idxs, train_unlabeled_idxs, val_idxs
def labeled_unlabeled_split(labels, n_labeled_per_class):
labels = np.array(labels)
train_labeled_idxs = []
train_unlabeled_idxs = []
val_idxs = []
for i in range(10):
idxs = np.where(labels == i)[0]
np.random.shuffle(idxs)
train_labeled_idxs.extend(idxs[:n_labeled_per_class])
train_unlabeled_idxs.extend(idxs[n_labeled_per_class:])
np.random.shuffle(train_labeled_idxs)
np.random.shuffle(train_unlabeled_idxs)
return train_labeled_idxs, train_unlabeled_idxs
class CustomCIFAR10(Dataset):
"""
Returns triplet (data, target, index) in __getitem__()
"""
def __init__(self, root, train, transform):
self.cifar10 = datasets.CIFAR10(root=root,
download=True,
train=train,
transform=transform)
self.targets = self.cifar10.targets
def __getitem__(self, index):
data, target = self.cifar10[index]
return data, target, index
def __len__(self):
return len(self.cifar10)
if __name__ == "__main__":
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
samples = np.array([1,2,3,4,5,6])
sampler = SubsetRandomSampler(samples)
dataset = CustomCIFAR10("../../data", transform)
loader = DataLoader(dataset, batch_size=2, sampler=sampler, num_workers=0)
for batch_idx, (data, target, idx) in enumerate(loader):
print('Batch idx {}, dataset index {}'.format(
batch_idx, idx))
|
import sys
from pyspark import SparkConf, SparkContext
from collections import defaultdict
import time
import random
start = time.time()
sc = SparkContext()
data_file = sys.argv[1]
out_file = sys.argv[2]
index = -1
index1 = -1
index2 = -1
row_per_band = 2
# # ======================================== FUNCTION : GENERATE COEFFICIENTS FOR HASH FUNCTION ========================================
# list.
def find_next_prime(a):
for p in range(a, 2 * a):
x = 0
for i in range(2, p):
x = i
if p % i == 0:
break
if x == p - 1:
return p
break
def generate_random_coeffs(k, nrows):
# Create a list of 'k' random values.
coeffsList = list()
while k > 0:
# Get a random row.
randRow = random.randint(0, nrows)
# Ensure that each random number is unique.
while randRow in coeffsList:
randRow = random.randint(0, nrows)
# Add the random number to the list.
coeffsList.append(randRow)
k = k - 1
# print(str(coeffsList))
return coeffsList
def minhashing(x):
global a
# global b
global m
minNum = [min((ax * x + 1) % m for x in x[1]) for ax in a]
# business, minNum
return (x[0], minNum)
def get_business_signatures(x):
global index
index = index + 1
business_id = businesses[index]
return tuple((business_id, x))
def intermediate_step1(x):
global index1
global row_per_band
bands = int(len(x[1]) / row_per_band)
index1 = index1 + 1
business_id = x[0]
signatures_list = x[1]
# print(str(len(signatures_list)))
bands_list = []
rowindex = 0
for b in range(0, bands):
row = []
for r in range(0, row_per_band):
# print(str(rowindex))
row.append(signatures_list[rowindex])
rowindex = rowindex + 1
# print(str(row))
bands_list.append(((b, tuple(row)), [business_id]))
row.clear()
return bands_list
def get_candidates(x):
businesses = x[1]
businesses.sort()
candidates = []
# print(str(businesses))
for i in range(0, len(businesses)):
for j in range(i + 1, len(businesses)):
if (j > i):
candidates.append(((businesses[i], businesses[j]), 1))
return candidates
def find_jaccard_similarity(x):
business1 = x[0][0]
business2 = x[0][1]
# print(business1+","+business2)
# return 1
users1 = set(businesswise_users[business1])
users2 = set(businesswise_users[business2])
js = float(len(users1.intersection(users2)) / len(users1.union(users2)))
return (((business1, business2), js))
def index_businesses(x):
global index2
index2 = index2 + 1
return ((x[0], index2))
# ======================================================= DRIVER PROGRAM START ======================================================
read_data = sc.textFile(data_file)
rdd_data = read_data.map(lambda x: x.split(','))
rdd = rdd_data.filter(lambda x: x[0] != "user_id").persist()
# ----------------------------------------------- Creating Characteristic (0/1) Matrix -----------------------------------------------
users_rdd = rdd.map(lambda a: a[0]).distinct()
businesses_rdd = rdd.map(lambda a: a[1]).distinct()
businesswise_users = rdd.map(lambda x: (x[1], [x[0]])).reduceByKey(lambda x, y: x + y).collectAsMap()
# businesswise_indexes= businesswise_users.map(lambda x: index_businesses(x)).collectAsMap()
users = users_rdd.collect()
businesses = businesses_rdd.collect()
nrows = len(users)
ncols = len(businesses)
users_dict = {}
for u in range(0, nrows):
users_dict[users[u]] = u
businesses_dict = {}
for b in range(0, ncols):
businesses_dict[businesses[b]] = b
characteristic_matrix = rdd\
.map(lambda x: (x[1], [users_dict[x[0]]]))\
.reduceByKey(lambda x, y: x + y)
print(characteristic_matrix.take(5))
# # ------------------------------------------------- Generating Minhash Signatures -------------------------------------------------
a = [1, 3, 9, 11, 13, 17, 19, 27, 29, 31, 33, 37, 39, 41, 43, 47, 51, 53, 57, 59]
m = nrows
num_of_hash_functions = 20
signature_matrix = characteristic_matrix.map(lambda x: minhashing(x))
# print(signature_matrix.take(5))
# --------------------------------------------------- Locality Sensitive Hashing --------------------------------------------------
# Divide signature matrix into bands of rows
sig = signature_matrix.flatMap(lambda x: intermediate_step1(x))
# Get candidates by filtering out the business pairs that have at least one band in common
candidate_gen = sig.reduceByKey(lambda x, y: x + y).filter(lambda x: len(x[1]) > 1)
# Locate candidates in pairs
candidates = candidate_gen.flatMap(lambda x: get_candidates(x)).distinct()
# Get all candidates with jaccard similarity greater than 0.5
jaccard_similarity_rdd = candidates.map(lambda x: find_jaccard_similarity(x)).filter(lambda x: x[1] >= 0.5)
# Sort candidates
sorted_js_rdd = jaccard_similarity_rdd.map(lambda x: (x[0][1], (x[0][0], x[1]))).sortByKey().map(
lambda x: (x[1][0], (x[0], x[1][1]))).sortByKey()
# Get ground truth
ground = sc.textFile("/Users/anupriyachakraborty/Documents/USC-Semester-Work/Sem-4/Data-Mining/Homework/HW3/pure_jaccard_similarity.csv")\
.map(lambda x: x.split(",")).map(lambda x: (x[0], x[1]))
rrr = jaccard_similarity_rdd.map(lambda x: (x[0][0], x[0][1]))
rrrrr = list(rrr.collect())
ggggg = list(ground.collect())
tp = rrr.intersection(ground)
ttttt= list(tp.collect())
# Get precision and recall
precision = len(ttttt)/len(rrrrr)
recall = len(ttttt)/len(ggggg)
print("precision:")
print(precision)
print("recall:")
print(recall)
# #====================================================== WRITING TO OUTPUT FILE ======================================================
f = open(out_file, 'w')
f.write("business_id_1, business_id_2, similarity")
for i in sorted_js_rdd.collect():
f.write("\n")
f.write(i[0] + "," + i[1][0] + "," + str(i[1][1]))
f.close()
end = time.time()
# print(str(jaccard_similarity_rdd.count()))
# print(str(len(ggggg)))
print("Duration: " + str(end - start)) |
#!/usr/bin/env python3
#
#Suin Kim
#CS270-002
#Lab 9
import sys
import unittest
from gInt import gInt
class gIntTest(unittest.TestCase):
'''Tests for gInt class'''
def setUp(self):
self.n1 = gInt(-4,-9)
self.n1copy = gInt(-4,-9)
self.p1 = gInt(6,2)
self.p1copy = gInt(6,2)
def tearDown(self):
pass
def test_add(self):
r = self.n1.real + self.p1.real
self.assertEqual( self.n1.real, self.n1copy.real, 'Negative real part changed after addition.' )
self.assertEqual( self.p1.real, self.p1copy.real, 'Positive real part changed after addition.' )
self.assertEqual( r, 2, 'Addition of real parts failed.' )
i = self.n1.imag + self.p1.imag
self.assertEqual( self.n1.imag, self.n1copy.imag, 'Negative imaginary part changed after addition.' )
self.assertEqual( self.p1.imag, self.p1copy.imag, 'Positive imaginary part changed after addition.' )
self.assertEqual( i, -7, 'Addition of imaginary parts failed.' )
g_int = gInt( r, i )
self.assertEqual( g_int, gInt( 2, -7 ), 'Addition of Gaussian Integers failed.')
def test_mul(self):
r1 = self.n1.real * self.p1.real
self.assertEqual( self.n1.real, self.n1copy.real, 'Negative real part changed after multiplication.' )
self.assertEqual( self.p1.real, self.p1copy.real, 'Positive real part changed after multiplication.' )
self.assertEqual( r1, -24, 'Multiplication of real parts failed.' )
r2 = self.n1.imag * self.p1.imag
self.assertEqual( self.n1.imag, self.n1copy.imag, 'Negative imaginary part changed after multiplication.' )
self.assertEqual( self.p1.imag, self.p1copy.imag, 'Positive imaginary part changed after multiplication.' )
self.assertEqual( r2, -18, 'Multiplication of imaginary parts failed.' )
i1 = self.n1.real * self.p1.imag
self.assertEqual( self.n1.real, self.n1copy.real, 'Negative real part changed after multiplication.' )
self.assertEqual( self.p1.imag, self.p1copy.imag, 'Positive imaginary part changed after multiplication.' )
self.assertEqual( i1, -8, 'Multiplication of real and imaginary parts failed.' )
i2 = self.n1.imag * self.p1.real
self.assertEqual( self.n1.imag, self.n1copy.imag, 'Negative imaginary part changed after multiplication.' )
self.assertEqual( self.p1.real, self.p1copy.real, 'Positive real part changed after multiplication.' )
self.assertEqual( i2, -54, 'Multiplication of real and imaginary parts failed.' )
r = r1 - r2
i = i1 + i2
self.assertEqual( r, -6, 'Combination of real parts failed.' )
self.assertEqual( i, -62, 'Combination of imaginary parts failed.' )
g_int = gInt( r, i )
self.assertEqual( g_int, gInt( -6, -62 ), 'Multiplication of Gaussian Integers failed.' )
def test_norm(self):
n1_r_sq = self.n1.real * self.n1.real
self.assertEqual( self.n1.real, self.n1copy.real, 'Negative real part changed after squaring.' )
self.assertEqual( n1_r_sq, 16, 'Squaring of negative real part failed.' )
n1_i_sq = self.n1.imag * self.n1.imag
self.assertEqual( self.n1.imag, self.n1copy.imag, 'Negative imaginary part changed after squaring.' )
self.assertEqual( n1_i_sq, 81, 'Squaring of negative imaginary part failed.' )
p1_r_sq = self.p1.real * self.p1.real
self.assertEqual( self.p1.real, self.p1copy.real, 'Positive real part changed after squaring.' )
self.assertEqual( p1_r_sq, 36, 'Squaring of positive real part failed.' )
p1_i_sq = self.p1.imag * self.p1.imag
self.assertEqual( self.p1.imag, self.p1copy.imag, 'Positive imaginary part changed after squaring.' )
self.assertEqual( p1_i_sq, 4, 'Squaring of positive imaginary part failed.' )
norm_n = n1_r_sq + n1_i_sq
norm_p = p1_r_sq + p1_i_sq
self.assertEqual( norm_n, 97, 'Failed to get norm of a negative Gaussian Integer.' )
self.assertEqual( norm_p, 40, 'Failed to get norm of a positive Gaussian Integer.' )
if __name__ == '__main__':
sys.argv.append( '-v' )
unittest.main()
|
'''
패션이미지
8. 패션이미지에 따른 상품을 예측하시오(텐서플로우, 케라스)
'''
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.examples.tutorials.mnist import input_data
import warnings
warnings.filterwarnings('ignore')
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
print(train_images.shape)
print(train_labels.shape)
print(test_images.shape)
print(test_labels.shape)
|
import numpy as np
def get_std(data,mean):
error = 0
for x in data:
error += (x-mean)**2
error = np.sqrt(error/len(data))
return error
f = open('./temp/joint_images/real_cdr_0_3.txt','r')
data = f.readlines()
data_list = [float(x.strip()) for x in data]
cdr = 0.3
abs_errors = []
for x in data_list:
abs_error = abs(x - cdr)
abs_errors.append(abs_error)
abs_errors = np.array(abs_errors)
mean = np.mean(abs_errors)
error = get_std(abs_errors,mean)
np_error = np.std(abs_errors)
print('-------------')
print('set cdr=0.3')
print('-------------')
print('abs error mean:',mean)
print('abs error std:',error)
print('np std:',np_error)
|
import os
import logging
from flask import current_app
import boto3
from botocore.exceptions import ClientError
from werkzeug.utils import secure_filename
AWS_ACCESS_KEY = os.environ["AWS_ACCESS_KEY"]
AWS_SECRET_KEY = os.environ["AWS_SECRET_KEY"]
s3_client = boto3.client(
"s3", aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY
)
# create directory to store uploaded files
INPUT_FILE_DIR = "./input_files"
os.makedirs(INPUT_FILE_DIR, exist_ok=True)
logger = logging.getLogger()
class FileManager:
"""Manages saving files, and s3 storage"""
def save_file(input_file):
"""Saves file to tmp directory"""
safe_filename = secure_filename(input_file.filename)
filepath = os.path.join(INPUT_FILE_DIR, safe_filename)
logger.info(f"saving file to {filepath}")
input_file.save(filepath)
return filepath, safe_filename
def upload_to_s3(file_name, object_name=None, bucket_name="drum-separator"):
"""Upload a file to an S3 bucket, return signed url
:param file_name: File to upload
:param object_name: S3 object name. If not specified then file_name is used
:param bucket_name: Bucket to upload to
:return: s3 object name
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
try:
logger.info(f"uploading file: {file_name}")
s3_client.upload_file(file_name, bucket_name, object_name)
return object_name
except ClientError as e:
logging.error(e)
return None
def get_presigned_url(bucket_name, object_name, expiration=3600):
"""Generate a presigned URL to share an S3 object
:param bucket_name: string
:param object_name: string
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Presigned URL as string. If error, returns None.
"""
# Generate a presigned URL for the S3 object
try:
response = s3_client.generate_presigned_url(
"get_object",
Params={"Bucket": bucket_name, "Key": object_name},
ExpiresIn=expiration,
)
except ClientError as e:
logging.error(e)
return None
# The response contains the presigned URL
return response
|
import datetime as dt
from datetime import datetime, timedelta
from django.db import models
from django.utils import timezone
# class UserData(models.Model):
# user = models.CharField(max_length=200)
# tasks_todo = models.IntegerField("tasks todo")
# tasks_overdue = models.IntegerField("tasks overdue")
# tasks_done = models.IntegerField("tasks done")
# tasks_done_recent = models.IntegerField("tasks done recent")
#
# def find_tasks_todo(self):
# return len(list(Task.objects.filter_by(associated_user=self, done=False)))
#
# def find_tasks_overdue(self):
# return len(
# list(Task.objects.filter_by(associated_user=self, done=False, overdue=True))
# )
#
# def find_tasks_done(self):
# return len(
# list(
# Task.objects.filter_by(associated_user=self, done=True).order_by(
# completion_time
# )[:5]
# )
# )
#
# def find_tasks_done_recent(self):
# return len(list(Task.objects.filter_by(associated_user=self, done=True)))
#
# def __str__(self):
# return f"User data for {self.user}"
#
# def __repr__(self):
# return f"User data for {self.user}"
class Task(models.Model):
# Define the choices to be used in the priority field
LOW = 1
MED = 2
HIGH = 3
PRIORITY_LIST = [
(LOW, "Low"),
(MED, "Normal"),
(HIGH, "High"),
]
# Define the attributes that tasks will have
title = models.CharField(max_length=200)
description = models.CharField(max_length=1000)
due_date = models.DateField("due date", default=timezone.now().date())
due_time = models.TimeField("due time", default=timezone.now().time())
time_estimate = models.DurationField("time estimate", default=timedelta(minutes=0))
priority = models.IntegerField("priority", choices=PRIORITY_LIST, default=2)
done = models.BooleanField(default=False)
# I realised later that task would additionally need these fields,
# for the purpose of statistics tracking
completion_time = models.DateTimeField("completion time", null=True, blank=True)
completed_on_time = models.BooleanField(default=False)
completed_in_time = models.BooleanField(default=False)
time_spent = models.DurationField(
"time spent", default=timedelta(hours=0, minutes=0)
)
completion_delta = models.DurationField("completion delta", null=True, blank=True)
estimate_accuracy = models.DecimalField(
"estimate accuracy", max_digits=4, decimal_places=1, null=True, blank=True
)
def __str__(self):
return self.title
# Check whether the task is overdue
def is_overdue(self):
due_datetime = datetime.combine(self.due_date, self.due_time)
return due_datetime <= datetime.now()
# Mark the task as done
def mark_done(self):
self.done = True
# At this point we can mark whether the task was completed before it's due date,
# and within the user's time estimate
self.completion_time = timezone.now()
if self.is_overdue():
self.completed_on_time = False
else:
self.completed_on_time = True
if self.time_spent <= self.time_estimate:
self.completed_in_time = True
else:
self.completed_in_time = False
# Unmark the task as done
def mark_todo(self):
self.done = False
# These values need to be reset
self.completed_on_time = False
self.completed_in_time = False
self.completion_time = None
# Alter the time spent on the task
def alter_time_spent(self, delta):
# Time spent can't be negative, so need to check
if (self.time_spent + delta).total_seconds() >= 0:
self.time_spent += delta
# If the value would be negative, just set it to 0 minutes
else:
self.time_spent = timedelta(minutes=0)
def get_absolute_url(self):
return f"/task/{self.id}/"
class Event(models.Model):
title = models.CharField(max_length=200)
date = models.DateField("date")
start_time = models.TimeField("start time")
end_time = models.TimeField("end time")
override_routine = models.BooleanField(default=False)
def __str__(self):
return self.title
def get_date(self):
return self.date
def get_start(self):
return self.start_time
def get_end(self):
return self.end_time
def get_name(self):
return self.title
def does_clash(self, other):
# For events A and B to clash,
# A must start before B ends and end after B starts
if (
self.date == other.date
and self.start_time < other.end_time
and self.end_time > other.start_time
):
return True
else:
return False
def get_absolute_url(self):
return f"/event/{self.id}/"
class Routine(models.Model):
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
DAY_CHOICES = [
(MON, "Monday"),
(TUE, "Tuesday"),
(WED, "Wednesday"),
(THU, "Thursday"),
(FRI, "Friday"),
(SAT, "Saturday"),
(SUN, "Sunday"),
]
title = models.CharField(max_length=200, null=True)
day = models.IntegerField("day", choices=DAY_CHOICES)
start_time = models.TimeField("start time")
end_time = models.TimeField("end time")
def get_day(self):
return self.day
def get_start(self):
return self.start_time
def get_end(self):
return self.end_time
def get_absolute_url(self):
return f"/routine/{self.id}/"
class TimeSlot(models.Model):
# Define the options to be used in the associated type field
TYPE_CHOICES = [
("T", "task"),
("E", "event"),
("R", "routine"),
]
# Define the attributes
date = models.DateField("date")
start_time = models.TimeField("start time")
end_time = models.TimeField("end time")
# Faciltate tracking of the associated object
associated_type = models.CharField("type", max_length=200, choices=TYPE_CHOICES)
associated_task = models.ForeignKey(Task, on_delete=models.CASCADE, null=True)
associated_event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True)
associated_routine = models.ForeignKey(Routine, on_delete=models.CASCADE, null=True)
def get_date(self):
return self.date
def get_start(self):
return self.start_time
def get_end(self):
return self.end_time
def __str__(self):
return f"TimeSlot type {self.associated_type}"
def __repr__(self):
return f"TimeSlot type {self.associated_type}"
|
import numpy as np
import scipy.io as sio
import scipy.misc
from keras.preprocessing import image
from skimage.transform import rotate
from time import time
from utils import pro_process, BW_img
import cv2
import os
import Model_MNet as MNetModel
def load_model(pre_model_MNetSeg, CDRSeg_size=256):
# build and load pretrained model
CDRSeg_model = MNetModel.DeepModel(size_set=CDRSeg_size)
CDRSeg_model.load_weights(pre_model_MNetSeg, by_name=True)
return CDRSeg_model
'''
return the polar image and polar segment image of the input
'''
def mnet_segment(img_path, CDRSeg_model, DiscROI_size=256, CDRSeg_size = 256):
# temp_txt = [elt.strip() for elt in file_test_list[lineIdx].split(',')]
disc_region = np.asarray(image.load_img(img_path))
disc_region = scipy.misc.imresize(disc_region, (DiscROI_size, DiscROI_size, 3))
Disc_flat = rotate(cv2.linearPolar(disc_region, (DiscROI_size/2, DiscROI_size/2),
DiscROI_size/2, cv2.WARP_FILL_OUTLIERS), -90)
temp_img = pro_process(Disc_flat, CDRSeg_size)
temp_img = np.reshape(temp_img, (1,) + temp_img.shape)
[prob_6, prob_7, prob_8, prob_9, prob_10] = CDRSeg_model.predict(temp_img)
#[400,400,2]
prob_map = np.reshape(prob_10, (prob_10.shape[1], prob_10.shape[2], prob_10.shape[3]))
disc_map = scipy.misc.imresize(prob_map[:, :, 0], (DiscROI_size, DiscROI_size))
cup_map = scipy.misc.imresize(prob_map[:, :, 1], (DiscROI_size, DiscROI_size))
#denoising
disc_map[-round(DiscROI_size / 3):, :] = 0
cup_map[-round(DiscROI_size / 2):, :] = 0
#converts the grayscale image I to binary image BW
disc_map = BW_img(disc_map, 0.5)
cup_map = BW_img(cup_map, 0.5)
De_disc_map = cv2.linearPolar(rotate(disc_map, 90), (DiscROI_size/2, DiscROI_size/2),
DiscROI_size/2, cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP)
De_cup_map = cv2.linearPolar(rotate(cup_map, 90), (DiscROI_size/2, DiscROI_size/2),
DiscROI_size/2, cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP)
rgbArray = np.zeros((DiscROI_size, DiscROI_size, 3))
rgbArray[..., 0] = BW_img(De_disc_map, 0.5)
rgbArray[..., 1] = BW_img(De_cup_map, 0.5)
segment_flat = rotate(cv2.linearPolar(rgbArray, (DiscROI_size / 2, DiscROI_size / 2),
DiscROI_size / 2, cv2.WARP_FILL_OUTLIERS), -90)
return Disc_flat, segment_flat
#save polar image
# scipy.misc.imsave(data_save_path + temp_txt[0][:-4] + '_flat.png', Disc_flat)
# scipy.misc.imsave(data_save_path + temp_txt[0][:-4] + '_seg.png', segment_flat)
def get_image_cdr(disc_region, CDRSeg_model, DiscROI_size=256, CDRSeg_size = 256):
# temp_txt = [elt.strip() for elt in file_test_list[lineIdx].split(',')]
# disc_region = np.asarray(image.load_img(img_path))
disc_region = scipy.misc.imresize(disc_region, (DiscROI_size, DiscROI_size, 3))
Disc_flat = rotate(cv2.linearPolar(disc_region, (DiscROI_size / 2, DiscROI_size / 2),
DiscROI_size / 2, cv2.WARP_FILL_OUTLIERS), -90)
temp_img = pro_process(Disc_flat, CDRSeg_size)
temp_img = np.reshape(temp_img, (1,) + temp_img.shape)
[prob_6, prob_7, prob_8, prob_9, prob_10] = CDRSeg_model.predict(temp_img)
# [400,400,2]
prob_map = np.reshape(prob_10, (prob_10.shape[1], prob_10.shape[2], prob_10.shape[3]))
disc_map = scipy.misc.imresize(prob_map[:, :, 0], (DiscROI_size, DiscROI_size))
cup_map = scipy.misc.imresize(prob_map[:, :, 1], (DiscROI_size, DiscROI_size))
# denoising
disc_map[-round(DiscROI_size / 3):, :] = 0
cup_map[-round(DiscROI_size / 2):, :] = 0
# converts the grayscale image I to binary image BW
disc_map = BW_img(disc_map, 0.5)
cup_map = BW_img(cup_map, 0.5)
De_disc_map = cv2.linearPolar(rotate(disc_map, 90), (DiscROI_size / 2, DiscROI_size / 2),
DiscROI_size / 2, cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP)
De_cup_map = cv2.linearPolar(rotate(cup_map, 90), (DiscROI_size / 2, DiscROI_size / 2),
DiscROI_size / 2, cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP)
cdr = calculate_cdr(De_cup_map, De_disc_map)
return cdr
def calculate_cdr(cup_map, disc_map):
cup_x = np.where(cup_map>0)[0]
disc_x = np.where(disc_map>0)[0]
if len(cup_x) > 0:
cup_dia = max(cup_x) - min(cup_x)
else:
cup_dia = 1
if len(disc_x) > 0:
disc_dia = max(disc_x) - min(disc_x)
else:
disc_dia = 1
CDR = cup_dia / disc_dia
return CDR
|
import cv2
import numpy as np
import math
cap = cv2.VideoCapture(0)
counter = [0,0,0,0,0,0] # change values if you want to add more images or this will overwrite the existing data
while(cap.isOpened()):
ret, img = cap.read()
cv2.rectangle(img,(0,0),(200,200),(0,255,0),0)
crop_img = img[0:200, 0:200]
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
_, thresh1 = cv2.threshold(blurred, 127, 255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('img', img)
cv2.imshow('Thresholded', thresh1)
k = cv2.waitKey(10)
if k == 48: #0
cv2.imwrite("./data/0/"+ str(counter[0]) + ".jpg",thresh1)
counter[0] = counter[0] + 1
elif k == 49:
cv2.imwrite("./data/1/"+str(counter[1])+".jpg",thresh1)
counter[1] = counter[1] + 1
elif k == 50:
cv2.imwrite("./data/2/"+str(counter[2])+".jpg",thresh1)
counter[2] = counter[2] + 1
elif k == 51:
cv2.imwrite("./data/3/"+str(counter[3])+".jpg",thresh1)
counter[3] = counter[3] + 1
elif k == 52:
cv2.imwrite("./data/4/"+str(counter[4])+".jpg",thresh1)
counter[4] = counter[4] + 1
elif k == 53:
cv2.imwrite("./data/5/"+str(counter[5])+".jpg",thresh1)
counter[5] = counter[5] + 1
elif k == 54:
cv2.imwrite("./data/6/"+str(counter[6])+".jpg",thresh1)
counter[6] = counter[6] + 1
elif k == 27:
break
|
"""
perceptron.py
The Perceptron is another simple algorithm suitable for large scale learning.
By default:
- It does not require a learning rate.
- It is not regularized (penalized).
- It updates its model only on mistakes.
The last characteristic implies that the Perceptron is slightly faster to
train than SGD with the hinge loss and that the resulting models are sparser.
https://en.wikipedia.org/wiki/Perceptron
o(x1, x2, ..., xN) = ((w0 + w1x1 + w2x2 + ... + wNxN) > 0) ? (1) : (-1)
Results:
Number of correct matches: 1273
Total number of data points: 1348
Ratio of correct predictions: 0.944362017804
Classification report
precision recall f1-score support
0 1.00 0.97 0.99 139
1 0.94 0.95 0.94 137
2 0.98 0.98 0.98 129
3 0.96 0.94 0.95 141
4 0.97 0.95 0.96 127
5 0.88 0.96 0.92 147
6 0.95 0.97 0.96 135
7 0.92 1.00 0.96 126
8 0.93 0.84 0.88 134
9 0.93 0.89 0.91 133
avg / total 0.95 0.94 0.94 1348
Confusion matrix
[[135 0 0 0 1 3 0 0 0 0]
[ 0 130 1 0 0 3 1 1 0 1]
[ 0 0 126 0 0 0 0 2 1 0]
[ 0 0 0 132 0 3 0 1 3 2]
[ 0 2 0 0 121 0 0 2 0 2]
[ 0 1 0 0 1 141 1 1 0 2]
[ 0 0 0 0 0 2 131 0 2 0]
[ 0 0 0 0 0 0 0 126 0 0]
[ 0 4 1 4 1 4 5 1 112 2]
[ 0 2 0 2 1 4 0 3 2 119]]
"""
import matplotlib.pyplot as plt
from sklearn.linear_model import Perceptron
from sklearn.model_selection import train_test_split
from sklearn import metrics
# handwritten digits data set
from sklearn.datasets import load_digits
digits = load_digits()
# Display the data that will be used, represented as multiple subplots in a
# grid format. Each subplot will contain the string representation of the
# image's number.
def plot_data(n = 64):
# size of image: a width x height tuple in inches
fig = plt.figure(figsize=(6, 6))
# add space at the right and top of each image
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# add n sub_plots
for i in range(n):
# nrows, ncols, index
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
# indexed image in black/white/grey with no blur between pixels
ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest')
# x = 0 y = 7 (origin is top left corner)
ax.text(0, 7, str(digits.target[i]))
def perceptron():
# split the data into random train and test subsets
X_train, X_test, y_train, y_test = train_test_split(digits.data,
digits.target,
test_size = 0.75,
train_size = 0.25,
random_state=0)
# Create the model
# Perceptron Classifier.
"""
penalty : None, 'l2' or 'l1' or 'elasticnet'
- Regularization term; Default is None
alpha : float
- Regularization Coefficient; Defaults to 0.0001 if regularization is used
fit_intercept : bool
- Estimation; Defaults to True
max_iter : int, optional
- Max Epochs; Defaults to 5
tol : float or None, optional
- Stopping criterion; Defaults to None
shuffle : bool, optional, default True
- Should shuffle after epoch
verbose : integer, optional
- Verbosity level
eta0 : double
- Update Coefficient; Defaults to 1
n_jobs : integer, optional
- Number of CPUs for OVA; Defaults to 1
random_state : int, RandomState instance or None, optional, default None
- seed of the Shuffle pseudo random number generator
class_weight : dict, {class_label: weight} or "balanced" or None, optional
- Weights associated with classes; Defaults to 1
warm_start : bool, optional
- If True, reuse previous call's solution as init, else, erase solution
Perceptron() is equivalent to
SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant",
penalty=None)
Penalty=None
Number of correct matches: 1257
Total number of data points: 1348
Ratio of correct predictions: 0.932492581602
Penalty='l2'
Number of correct matches: 1221
Total number of data points: 1348
Ratio of correct predictions: 0.905786350148
Penalty='l1'
Number of correct matches: 1256
Total number of data points: 1348
Ratio of correct predictions: 0.93175074184
Penalty: None (93.2%), 'l1' (93.1%), and 'l2' (90.5%)
perceptron = Perceptron(penalty=None, alpha=0.0001, fit_intercept=True,
max_iter=5, tol=None, shuffle=True, verbose=0,
eta0=1.0, n_jobs=1, random_state=0,
class_weight=None, warm_start=False)
Shuffle: True
Number of correct matches: 1267
Total number of data points: 1348
Ratio of correct predictions: 0.939910979228
Shuffle: False
Number of correct matches: 1225
Total number of data points: 1348
Ratio of correct predictions: 0.908753709199
Shuffle=True
perceptron = Perceptron(penalty=None, alpha=0.0001, fit_intercept=True,
max_iter=63, tol=None, shuffle=False, verbose=0,
eta0=1.0, n_jobs=1, random_state=0,
class_weight=None, warm_start=False)
Fit_intercept: True
Number of correct matches: 1267
Total number of data points: 1348
Ratio of correct predictions: 0.939910979228
Fit_intercept: False
Number of correct matches: 1240
Total number of data points: 1348
Ratio of correct predictions: 0.919881305638
fit_intercept=True
perceptron = Perceptron(penalty=None, alpha=0.0001, fit_intercept=True,
max_iter=63, tol=None, shuffle=True, verbose=0,
eta0=1.0, n_jobs=1, random_state=0,
class_weight=None, warm_start=False)
tol: 1.0
Number of correct matches: 1233
Total number of data points: 1348
Ratio of correct predictions: 0.9146884273
tol: None
Number of correct matches: 1267
Total number of data points: 1348
Ratio of correct predictions: 0.939910979228
tol=None
perceptron = Perceptron(penalty=None, alpha=0.0001, fit_intercept=True,
max_iter=63, tol=None, shuffle=True, verbose=0,
eta0=1.0, n_jobs=1, random_state=0,
class_weight=None, warm_start=False)
class_weight: "balanced"
Number of correct matches: 1262
Total number of data points: 1348
Ratio of correct predictions: 0.936201780415
class_weight: None
Number of correct matches: 1267
Total number of data points: 1348
Ratio of correct predictions: 0.939910979228
class_weight=None
perceptron = Perceptron(penalty=None, alpha=0.0001, fit_intercept=True,
max_iter=63, tol=None, shuffle=True, verbose=0,
eta0=1.0, n_jobs=1, random_state=0,
class_weight=None, warm_start=False)
"""
perceptron = Perceptron(penalty=None, alpha=0.0001, fit_intercept=True,
max_iter=56, tol=None, shuffle=True, verbose=0,
eta0=1.0, n_jobs=1, random_state=0,
class_weight=None, warm_start=False)
# Train the model
# Fit linear model with Stochastic Gradient Descent
perceptron.fit(X_train, y_train)
# estimated data
est = perceptron.predict(X_test)
# actual data
act = y_test
# Display the results
# size of image: a width x height tuple in inches
fig = plt.figure(figsize=(6, 6))
# add space at the right and top of each image
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# add n sub_plots
for i in range(64):
# nrows, ncols, index
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
# indexed results in black/white/grey with no blur between pixels
ax.imshow(X_test.reshape(-1, 8, 8)[i], cmap=plt.cm.binary,
interpolation='nearest')
# label the image with the target value
if est[i] == act[i]: # correct
ax.text(0, 7, str(est[i]), color='green')
else: # error
ax.text(0, 7, str(est[i]), color='red')
# Quantify the performance
# Correct matches
matches = (est == act)
print "\nNumber of correct matches:", matches.sum()
# Count of data points
print "Total number of data points:", len(matches)
# Ratio of correct predictions
print "Ratio of correct predictions:", matches.sum() / float(len(matches))
# Classification report
print "\nClassification report\n", metrics.classification_report(act, est)
# Confusion matrix
print "\tConfusion matrix\n", metrics.confusion_matrix(act, est)
plot_data()
perceptron()
|
# 因为只调用400次,可以直接差分,然后每次遍历求差分和
class MyCalendarThree:
def __init__(self):
self.dic={}
def book(self, start: int, end: int) -> int:
if start in self.dic:
self.dic[start]+=1
else:
self.dic[start]=1
if end in self.dic:
self.dic[end]-=1
else:
self.dic[end]=-1
f=sorted(self.dic.items(),key=lambda x:x[0])
cur=0
cmax=0
for i in f:
cur+=i[1]
cmax=max(cmax,cur)
return cmax
|
from collections import namedtuple
def loadDataWithTeacher(line):
data = namedtuple("ConfigData","input output")
number = line.find(" ") + 1
data.output = float(line[number:-1])
data.input = float(line[:number])
return data |
from _typeshed import Incomplete
def generic_graph_view(G, create_using: Incomplete | None = None): ...
def subgraph_view(G, filter_node=..., filter_edge=...): ...
def reverse_view(G): ...
|
import requests
import json
import re
import time
import random
from random import sample
Google_API = 'https://trends.google.com.tw/trends/api/dailytrends?hl=zh-TW&tz=-480&geo=TW&ns=15'
if __name__ == '__main__':
count = 0 #google trend裡面有幾筆資料
output = []
a = requests.get(Google_API).text[6:]
b = json.loads(a)
for item in b['default']['trendingSearchesDays'][0]['trendingSearches']: #定位字典
hot = item['title']['query'] #關鍵字
#print('第'+str(count+1)+'個關鍵字 : '+hot)
result = hot+'\n'
for news in range(0,3):
try:
tit = b['default']['trendingSearchesDays'][0]['trendingSearches'][count]['articles'][news]['title']
tit = re.sub(r'</?\w+[^>]*>','',tit) #標題
href = b['default']['trendingSearchesDays'][0]['trendingSearches'][count]['articles'][news]['url'] #連結
result = result +tit+href+'\n'
except IndexError:
pass
#content = b['default']['trendingSearchesDays'][0]['trendingSearches'][count]['articles'][news]['snippet'] #內容
#content = re.sub(r'</?\w+[^>]*>','',content) #去除標籤
#content = content[0:-9]+'...' #去除 
#print(content)
#==============相關搜尋=================
for keys in range(0,4):
try:
related = item['relatedQueries'][keys]['query'] #相關搜尋
result = result +'\n相關搜尋 : '+related
except IndexError:
pass
output.append(result)
count += 1
print(random.choice(output))
|
import pygame as pg
vec = pg.math.Vector2
from settings import *
class SpriteSheet:
#Utility for loading and parsing spritesheets
def __init__(self, filename):
self.spritesheet = pg.image.load(filename).convert()
def get_image(self, x, y, width, height):
#Gets image off sprite sheet
image = pg.Surface((width, height))
image.blit(self.spritesheet, (0,0), (x, y, width, height))
image = pg.transform.scale(image, ((width*2, height*2)))
image.set_colorkey((WHITE))
return image
def loadImage(self, inimage, x, y, width, height):
#Gets image off sprite sheet
image = pg.Surface((width, height))
image.blit(inimage, (0,0), (x, y, width, height))
image = pg.transform.scale(image, ((width*2, height*2)))
image.set_colorkey((WHITE))
return image
class Player(pg.sprite.Sprite):
def __init__(self, game, x, y):
self.game = game
self.groups = game.all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.player_img
self.rect = self.image.get_rect()
self.vx, self.vy = 0, 0
self.x = x * 16
self.y = y * 16
self.last_update = pg.time.get_ticks()
self.frame = 0
self.frame_rate = ANIMATIONSPEED
self.direction = 'down'
self.attack = False
self.centerrect = 0
def get_keys(self):
self.vx, self.vy = 0, 0
if self.game.attack == False:
keys = pg.key.get_pressed()
if keys[pg.K_LEFT] or keys[pg.K_a]:
self.game.walk_sound.play(-1)
now = pg.time.get_ticks()
self.vx = -200
self.direction = 'left'
if now - self.last_update > self.frame_rate:
self.last_update = now
try:
self.game.player_img = self.game.walkleft[self.frame]
self.frame += 1
except IndexError:
self.frame = 0
self.swing = False
elif keys[pg.K_RIGHT] or keys[pg.K_d]:
self.game.walk_sound.play(-1)
now = pg.time.get_ticks()
self.vx = 200
self.direction = 'right'
if now - self.last_update > self.frame_rate:
self.last_update = now
try:
self.game.player_img = self.game.walkright[self.frame]
self.frame += 1
except IndexError:
self.frame = 0
self.swing = False
elif keys[pg.K_UP] or keys[pg.K_w]:
self.game.walk_sound.play(-1)
now = pg.time.get_ticks()
self.vy = -200
self.direction = 'up'
if now - self.last_update > self.frame_rate:
self.last_update = now
try:
self.game.player_img = self.game.walkup[self.frame]
self.frame += 1
except IndexError:
self.frame = 0
self.swing = False
elif keys[pg.K_DOWN] or keys[pg.K_s]:
self.game.walk_sound.play(-1)
now = pg.time.get_ticks()
self.direction = 'down'
self.vy = 200
if now - self.last_update > self.frame_rate:
self.last_update = now
try:
self.game.player_img = self.game.walkdown[self.frame]
self.frame += 1
except IndexError:
self.frame = 0
self.swing = False
elif self.vx != 0 and self.vy != 0:
self.vx *= 0.7071
self.vy *= 0.7071
self.swing = False
elif keys[pg.K_t]:
self.x = 1 * TILESIZE
self.y = 1 * TILESIZE
self.swing = False
elif self.attack and not keys[pg.K_SPACE]:
self.playersword.kill()
self.attack = False
else:
self.game.walk_sound.stop()
if self.direction == 'down':
self.game.player_img = self.game.walkdown1
if self.direction == 'up':
self.game.player_img = self.game.walkup3
if self.direction == 'right':
self.game.player_img = self.game.walkright1
if self.direction == 'left':
self.game.player_img = self.game.walkleft1
self.centerrect = self.rect.right
self.swing = False
def collide_with_walls(self, dir):
if dir == 'x':
hits = pg.sprite.spritecollide(self, self.game.walls, False)
if hits:
if self.vx > 0:
self.x = hits[0].rect.left - self.rect.width
if self.vx < 0:
self.x = hits[0].rect.right
self.vx = 0
self.rect.x = self.x
if dir == 'y':
hits = pg.sprite.spritecollide(self, self.game.walls, False)
if hits:
if self.vy > 0:
self.y = hits[0].rect.top - self.rect.height
if self.vy < 0:
self.y = hits[0].rect.bottom
self.vy = 0
self.rect.y = self.y
def update(self):
self.get_keys()
self.image = self.game.player_img
self.image.set_colorkey(BLACK)
self.x += self.vx * self.game.dt
self.y += self.vy * self.game.dt
self.rect.x = self.x
self.collide_with_walls('x')
self.rect.y = self.y
self.collide_with_walls('y')
class MapTile(pg.sprite.Sprite):
def __init__(self, game, x, y, img):
self.groups = game.all_sprites, game.ground
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = img
self.image = pg.transform.scale(self.image, (TILESIZE, TILESIZE))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.rect.x = x * TILESIZE
self.rect.y = y * TILESIZE
class Sword(pg.sprite.Sprite):
def __init__(self, game, x, y, entity, rot):
self.groups = game.all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = self.game.sword
self.image.set_colorkey(WHITE)
self.rect = pg.Rect(x, y, 16, 7)
self.x = x
self.y = y
self.rect.x = x
self.rect.y = y
self.image = pg.transform.rotate(self.image, rot)
class Obstacle(pg.sprite.Sprite):
def __init__(self, game, x, y, w, h):
self.x = x*(TILESIZE/TILEPIXEL)
self.y = y*(TILESIZE/TILEPIXEL)
self.w = w*(TILESIZE/TILEPIXEL)
self.h = h*(TILESIZE/TILEPIXEL)
self.groups = game.walls
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.rect = pg.Rect(self.x, self.y, self.w, self.h)
self.rect.x = self.x
self.rect.y = self.y
'''
class Sword(pg.sprite.Sprite):
def __init__(self, game, x, y):
self.groups = game.all_sprites, game.swords
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = self.game.sword
self.image.set_colorkey(WHITE)
self.image = pg.transform.scale(self.image, (TILESIZE+50, TILESIZE-0))
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.rect.x = x * TILESIZE
self.rect.y = y * TILESIZE
''' |
class Solution(object):
def searchSubTree(self, node, p, q):
c = 0
if node == None:
return c
if node == p:
c += 1
if node == q:
c += 1
if c == 2:
return 2
elif node.left != None and node.right != None:
return c + self.searchSubTree(node.left,p,q) + self.searchSubTree(node.right,p,q)
elif node.left != None and node.right == None:
return c + self.searchSubTree(node.left,p,q)
elif node.left == None and node.right != None:
return c + self.searchSubTree(node.right,p,q)
else:
return c
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root == None:
return None
if root == p or root == q:
return root
c= self.searchSubTree(root.left,p,q)
if c==1:
return root
elif c==2:
return self.lowestCommonAncestor(root.left,p,q)
else:
return self.lowestCommonAncestor(root.right,p,q)
|
# -*- encoding: utf-8 -*-
##############################################################################
# Copyright (c) 2011 OpenERP Venezuela (http://openerp.com.ve)
# All Rights Reserved.
# Programmed by: Israel Fermín Montilla <israel@openerp.com.ve>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
###############################################################################
from openerp.osv import fields, osv
from openerp.addons.decimal_precision import decimal_precision as dp
class InheritPurchase(osv.Model):
""" """
_inherit = 'purchase.order'
_columns = {
'flete': fields.float('Flete',
digits_compute=dp.get_precision('Cost Imex'),
help='Price to be paid by renting a boat, plane or truck, or\
cargo carried'),
'percent_apply': fields.many2many('national.special.tax',
'special_national_tax_rel', 'purchase_id', 'special_tax_id',
'Percents to Apply', help='Percent to compute'),
'percent_special': fields.float('Other Percent',
digits_compute=dp.get_precision(
'Cost Imex'),
help='Percent to special compute'),
'import_purchase': fields.boolean('Importation Purchase',
help='Indicate if purchase is a importation '),
'percent_imex_ids': fields.one2many('percent.imex', 'purchase_id',
'Percen', domain=[('percent_lines', '!=', False)]),
'percent_imex_s_ids': fields.one2many('percent.imex', 'purchase_id',
'Percen', domain=[('percent_lines', '=', False)]),
}
def compute_percent(self, cr, uid, ids, imex_line, order_line, base,
context=None):
if context is None:
context = {}
amount = (base * (imex_line.percent / 100))
imex_lines = {
'percent': imex_line.percent,
'date': imex_line.date,
'amount': amount,
}
return imex_lines
def compute_import_taxes(self, cr, uid, ids, context=None):
if context is None:
context = {}
lines_s = []
lines = []
rate = False
for purchase in self.browse(cr, uid, ids, context=context):
lines_s = []
lines = []
print 'purchase.flete', purchase.flete
print 'purchase.percent_special', purchase.percent_special
if purchase.flete and purchase.percent_special:
for line in purchase.order_line:
total_with_flete = (
purchase.flete * line.price_subtotal
) + line.price_subtotal
if purchase.pricelist_id and\
purchase.pricelist_id.currency_id and \
purchase.pricelist_id.currency_id.id !=\
purchase.company_id and \
purchase.company_id.currency_id and \
purchase.company_id.currency_id.id:
rate = [round((
1 / rate.rate), 4)
for rate
in purchase.pricelist_id.currency_id.rate_ids
if rate.name <= purchase.date_order]
print 'rate', rate
tax_base = total_with_flete * (rate and rate[0] or 1)
price_unit_bf_flete = total_with_flete / line.product_qty
percent_lines = [(0, 0, self.compute_percent(
cr, uid, ids, i, line,
tax_base, context=context))
for i in purchase.percent_apply]
total_national_expense = sum([i[2].get(
'amount') for i in percent_lines])
cost_unit = (
total_national_expense + tax_base) / line.product_qty
cost_unit_total = (
price_unit_bf_flete + (total_national_expense / (
rate and rate[0] or 1)) / line.product_qty) * (purchase.percent_special)
print '(price_unit_bf_flete + (total_national_expense /(rate and rate[0] or 1))/ line.product_qty)', (price_unit_bf_flete + (total_national_expense / (rate and rate[0] or 1)) / line.product_qty)
# cost_unit_total = ((total_with_flete *
# purchase.percent_special) + total_national_expense
# )/line.product_qty
cost_qty = cost_unit_total * line.product_qty
lines.append((0, 0, {
'line_purchase_id': line.id,
'total_with_flete': total_with_flete,
'price_unit_bf_flete': price_unit_bf_flete,
'tax_base': tax_base,
'percent_lines': percent_lines,
'total_national_expense': total_national_expense,
'cost_unit': cost_unit,
}
))
lines_s.append((0, 0, {
'line_purchase_id': line.id,
'cost_unit_total': cost_unit_total,
'cost_qty': cost_qty,
}))
# print 'line',lines
# print 'line_s',lines_s
self.write(cr, uid, [purchase.id], {'percent_imex_ids': lines,
'percent_imex_s_ids': lines_s}, context=context)
return True
|
import numpy as np
class Trajectories:
def getJerkMatrix(self,a,b,T):
A = np.array([a,b,0,0,0,0])
B = np.array([
[0,0,0,0,0,1],
[T**5,T**4,T**3,T**2,T,1],
[0,0,0,0,1,0],
[5*T**4,4*T**3,3*T**2,2*T,1,0],
[0,0,0,2,0,0],
[20*T**3,12*T**2,6*T,2,0,0]
])
inv_B = np.linalg.inv(B)
return np.matmul(inv_B,A).tolist()
def setCoeff_MinJerkTraj(self,X_0,X_T,T):
self.X_0 = X_0
self.X_T = X_T
x0 = X_0[0]
xT = X_T[0]
self.x_coeffs = self.getJerkMatrix(x0,xT,T)
y0 = X_0[1]
yT = X_T[1]
self.y_coeffs = self.getJerkMatrix(y0,yT,T)
z0 = X_0[2]
zT = X_T[2]
self.z_coeffs = self.getJerkMatrix(z0,zT,T)
psi0 = X_0[8]
psiT = X_T[8]
self.psi_coeffs = self.getJerkMatrix(psi0,psiT,T)
def getPos(self,coeffs,t):
[a5,a4,a3,a2,a1,a0] = coeffs
pos = a5*t**5 + a4*t**4 + a3*t**3 + a2*t**2 + a1*t + a0
return pos
def getVel(self,coeffs,t):
[a5,a4,a3,a2,a1,a0] = coeffs
vel = 5*a5*t**4 + 4*a4*t**3 + 3*a3*t**2 + 2*a2*t + a1
return vel
def getAcc(self,coeffs,t):
[a5,a4,a3,a2,a1,a0] = coeffs
acc = 20*a5*t**3 + 12*a4*t**2 + 6*a3*t + 2*a2
return acc
def getReferences(self,t):
x = self.getPos(self.x_coeffs,t)
x_dot = self.getVel(self.x_coeffs,t)
x_dd = self.getAcc(self.x_coeffs,t)
y = self.getPos(self.y_coeffs,t)
y_dot = self.getVel(self.y_coeffs,t)
y_dd = self.getAcc(self.y_coeffs,t)
z = self.getPos(self.z_coeffs,t)
z_dot = self.getVel(self.z_coeffs,t)
z_dd = self.getAcc(self.z_coeffs,t)
psi = self.getPos(self.psi_coeffs,t)
psi_dot = self.getVel(self.psi_coeffs,t)
psi_dd = self.getAcc(self.psi_coeffs,t)
ref = [x,x_dot,x_dd,y,y_dot,y_dd,z,z_dot,z_dd,psi,psi_dot,psi_dd]
return ref
|
"""
This module demonstrates simple ways to:
-- READ FROM and
-- WRITE to
TEXT files.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
and their colleagues. October 2015.
"""
import math
# ----------------------------------------------------------------------
# Students: Read and run this program. Before you leave this example,
# make sure you know WHAT these examples show, namely:
# -- How to READ a file (see the reading_in_one_chunk example)
# -- How to WRITE to a file (see the writing example)
# -- How to refer to files in OTHER folders
# (see the files_in_other_folders example)
# ----------------------------------------------------------------------
def main():
""" Calls the other functions in this module to demonstrate them. """
input_until_a_SENTINEL_value()
reading_line_by_line()
writing()
files_in_other_folders()
def input_until_a_SENTINEL_value():
"""
Example: how to INPUT repeatedly until a SENTINEL value is entered.
Demonstrates using TRY ... EXCEPT to guard against bad user input.
"""
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of integers')
print(' that STOPS when 0 is entered as a SENTINEL value.')
print()
print('Uses TRY ... EXCEPT to guard against bad user input.')
print('--------------------------------------------------')
print('I will ADD UP the SQUARE ROOTS of all the numbers you give me.')
total = 0
while True:
s = input('Enter a positive number (0 to stop): ')
try:
n = float(s)
if n == 0:
break
total = total + math.sqrt(n)
except ValueError as exception:
print(exception)
print('You entered:', s)
print('That is NOT a number.')
print('Try again, entering something like: 17.4')
except Exception as exception:
print(exception)
f_string = 'The total of the square roots of your numbers is: {:0.2f}'
print(f_string.format(total))
def is_prime(n):
""" Returns True if n is prime, else returns False. """
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
def reading_in_one_chunk():
""" Example: how to read an ENTIRE text file into a single STRING. """
# ------------------------------------------------------------------
f = open('AliceInWonderland.txt', 'r')
s = f.read()
f.close()
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# The above 3 lines:
# 1. OPEN the file for reading. The returned value, f, is the
# FILE OBJECT (aka STREAM) used for all subsequent actions.
# 2. Read the ENTIRE FILE into a single STRING.
# 3. CLOSE the file to leave it in a clean state for anything
# that the rest of the program might want to do with the file.
#
# From here, you would use s however you want, for example:
# -- use any of the powerful STRING methods
# -- loop through the STRING with a FOR loop.
# For example:
# ------------------------------------------------------------------
print('The file contains', s.count('!'), 'exclamation marks.')
print('The first character in the file is:', s[0])
print('The first 10 characters of the file are:')
for k in range(10):
print(s[k])
lines = s.split('\n')
print('The last line in the file is', lines[len(lines) - 1])
print('The first character of the last line in the file is',
lines[len(lines) - 1][0])
def reading_line_by_line():
""" Example: how to read a text file LINE BY LINE. """
# ------------------------------------------------------------------
f = open('AliceInWonderland.txt', 'r')
for line in f:
# Do what you want with the STRING called line, for example:
if 'garden' in line:
print('Line with garden:', line)
f.close()
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# The FOR loop loops through the file, line by line, setting its
# loop variable to each line as it gets to it. The loop variable
# contains the newline character that ends the line (except for
# the last line in the file, which may lack a newline character).
# This FOR LOOP form for reading a file is useful when
# the file is too big to process in a single string.
# ------------------------------------------------------------------
def writing():
""" Example: how to WRITE to a text file. """
# ------------------------------------------------------------------
f = open('my_new_file.txt', 'w')
f.write('This is\n')
f.write('how you')
f.write('write to a file.\n')
f.close()
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# *** BE CAREFUL: *** Opening a file for writing OVERWRITES
# any existing file with that name!!!
# ------------------------------------------------------------------
def files_in_other_folders():
""" Example: how to refer to files NOT in the current folder. """
# ------------------------------------------------------------------
# Example: using a pathname RELATIVE to the current folder.
# .. means to go UP one folder from the current folder.
# ------------------------------------------------------------------
f = open('../foo/blah.txt', 'w')
f.write('Are you listening?')
f.close()
# ------------------------------------------------------------------
# Example: using an ABSOLUTE pathname.
# ------------------------------------------------------------------
workspace_folder = 'C:/EclipseWorkspaces/csse120/'
session_folder = 'Session01_IntroductionToPython/src/'
first_module = 'm1e_comments_strings_print.py'
filename = workspace_folder + session_folder + first_module
f = open(filename, 'r')
print(f.read())
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
from flask import Flask
from flask_sqlalchemy import SQLALchemy
from app import config
app = Flask(__name__)
app.config.from_object(config) # carga variables de configuración
db = SQLALchemy(app) # objeto que representa la BD
# rutas
@app.route('/')
def inicio():
return 'Hello, World!' # esto habría que cambiarlo por: un render_template("inicio.html")
# errores
@app.errorhandler(404)
def page_not_found(error):
return 'Error 404' # esto habría que cambiarlo por: render_template("error.html", error="Página no encontrada"), 404
if __name__ == '__main__':
app.run()
|
from sqlalchemy import create_engine
class Storage:
#Constructor
def __init__(self,readYaml):
self.__dbString="postgres://"+readYaml.user+":"+readYaml.password+"@"+readYaml.host+":"+readYaml.port+"/"+readYaml.dbName
self.__db=create_engine(self.__dbString)
self.__createTable()
#if table does not exist , we create it
def __createTable(self):
self.__db.execute("CREATE TABLE IF NOT EXISTS solution (queens integer, responses text, xvalues text,yvalues text,runedat timestamp with time zone DEFAULT now())")
#insert the values on postgress
def insert(self,queens,responses,xvalues,yvalues):
self.__db.execute("INSERT INTO solution (queens, responses, xvalues,yvalues) VALUES ("+str(queens)
+", '"+str(responses)+"','"+str(xvalues)+"','"+str(yvalues)+"')") |
# -*- coding: utf-8 -*-
import cssutils
import logging
import re
import requests
from pycolorname.color_system import ColorSystem
class LogoDesignTeam(ColorSystem):
def __init__(self, *args, **kwargs):
ColorSystem.__init__(self, *args, **kwargs)
cssutils.log.setLevel(logging.CRITICAL)
self.load()
def refresh(self):
full_data = self.request(
'GET',
'http://www.logodesignteam.com/logo-design-pantone-color-chart'
'.html')
css_request = requests.request(
'GET',
'http://www.logodesignteam.com/css/style.css')
# Parse css and save only required data
css_data = cssutils.parseString(css_request.text, validate=False)
css_rules = {}
for css_rule in css_data.cssRules:
if not isinstance(css_rule, cssutils.css.CSSStyleRule):
continue
css_bgcolor = css_rule.style.backgroundColor
if not css_bgcolor:
continue
for css_selector in css_rule.selectorList:
if css_selector.selectorText.startswith(".color"):
css_classname = css_selector.selectorText.replace(".", "")
css_rules[css_classname] = css_bgcolor
name_uls = full_data.find_all('ul', {'class': "color_text"})
color_uls = full_data.find_all('ul', {'class': "colors"})
data = {}
for name_ul, color_ul in zip(name_uls, color_uls):
name_lis = name_ul.find_all('li')
color_lis = color_ul.find_all('li')
for name_li, color_li in zip(name_lis, color_lis):
for color_class in color_li['class']:
color = css_rules.get(color_class, None)
# Color not found or invalid color class
if color is None or not color_class.startswith("color"):
continue
color = self.hex_to_rgb(color)
name = name_li.text.strip()
if 'Pantone' not in name:
name = 'Pantone ' + name
name = re.sub(r'Pantone (?P<ID>\d+)', r'PMS \g<ID>', name)
data[name] = color
break
return data
|
import serial
import time
from socketIO_client import SocketIO
import logging
logging.basicConfig(level=logging.DEBUG)
ser = serial.Serial('/dev/tty.usbserial-AH001BS6')
x = ser.read(3)
print x
"""
In each iteration:
-> read button press from bot
-> process video to find positions
-> read directions from remote
-> pass directions to firebird
-> send position and button press info to remote
"""
# to be done by pranav
def process_video():
return "pos"
# to be implemented
def read_remote():
return "dummy"
# to be implemented
def write_remote(str):
return
def on_directions(*args):
print "Hello host"
print 'on_directions', args
print "rblah"
SERVER_IP = 'https://10.129.26.35'
socketH = SocketIO(SERVER_IP, 8080, verify=False)
socketB = SocketIO(SERVER_IP, 8080, verify=False)
print "rt"
socketH.emit('create or join', 'h0')
socketB.emit('create or join', 'b0')
# socketH.on('dir_response', on_directions)
socketH.on('dir', on_directions)
# while True:
# pass
while True:
button_pressed = ser.read(1)
# print button_pressed
if (button_pressed == "1"):
print "Button pressed\n"
# socketIO.emit('
socketB.emit('message', {'type': 'ping', 'bot': 'b0'})
## positions = process_video()
## direction = read_remote()
## ser.write(direction)
## write_remote(button_pressed)
## write_remote(positions)
##
ser.close()
|
# This code will create from a mp4 videofile some metadata (vtts files and js etc) and a html5 object in a zip-directory
# Author: Benjamin Fuchs
import argparse
# get scripts
from CaptionParser import CaptionParser
from ChapterParser import ChapterParser
from QuestionParser import QuestionParser
from OCRQRforVTT import OCRQRforVTT
from createHtml import createHtml
from createZip import createZip
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("mp4", help="must be a .mp4 file")
parser.add_argument("-o", "--output", type=str, default='raw.vtt', help="choose if output is on terminal or in output-file")
parser.add_argument("-t", "--title", type=str, default="-", help="choose title for webpage")
parser.add_argument("-l", "--language", type=str, default="deu", choices=["eng", "deu"], help="choose language for OCR and for html5-page")
#parser.add_argument("-len", "--length", type=int, default=3, help="choose length of strings which will be deleted")
parser.add_argument("-z", "--toZip", default=False, action='store_true', help="choose if result is zipped or not")
args = parser.parse_args()
language = args.language
output = args.output
title = args.title
toZip = args.toZip
filename = args.mp4
# check if given mp4 file exists
if not (os.path.exists(filename)):
print("Given file doesn't exists!")
# check if mp4 file is in same directory as script or not, then extract title and path of it
if "/" in filename:
filepath = os.path.abspath(filename)
filename = filepath.split("/")[-1] # just need simple filename
filepath = filepath.replace(filename,"") # path w/o
else: filepath = ""
# if no title is given, use the name of the mp4-file
title = args.title
if title == "-": # and filepath != "": test
#title = os.path.splitext(filepath)[0]
#title = title.split("/")[-1]
title = filename.split(".")[0] # filename mustn't have more than one dot
print("title = " + title)
print("filepath = " + filepath)
print("filename = " + filename)
print("toZip = " + str(toZip))
# step 1 parse video for raw.vtt
OCRQRforVTT(sys.argv[1], output, language)
# step 2 create captions.vtt from raw.vtt
CaptionParser(output, "captions.vtt")
ChapterParser(output, "chapters.vtt")
QuestionParser(output, "questions.vtt")
# step 3 create html
createHtml(title, filename, language)
# step 4 create zip
createZip(title, filepath, filename, toZip) |
import copy
import json
from collections import namedtuple
import requests
from flask import Flask, request
app = Flask(__name__)
with open('config.json') as config_file:
config = json.load(config_file, object_hook=lambda d: namedtuple('Config', d.keys())(*d.values()))
@app.route('/<string:path>', methods=['POST'])
def home(path):
"""
:return:
"""
print('path: %s' % path)
team = request.args.get('team')
print('team: %s' % team)
payload = json.loads(request.form.to_dict()['payload'])
print('payload: %s' % payload)
response = None
for r in config.relays:
print(r)
if r.path == '/%s' % path:
for h in r.hooks:
if team is not None and team.casefold() != h.team.casefold():
continue
# copy for next changes
new_payload = copy.deepcopy(payload)
if hasattr(h, 'channel') and not 'channel' in payload:
new_payload['channel'] = h.channel
# abort if no channel exists in new_payload
if 'channel' not in new_payload:
continue
print('send: %s' % new_payload)
response = requests.post(h.url, json=new_payload, headers={'content-type': 'x-www-form-urlencoded'})
if response is not None:
return response.text, response.status_code, response.headers.items()
else:
return "Bad Request", 400
if __name__ == '__main__':
app.run(host="0.0.0.0")
|
import pygame, point
class GameObject(pygame.sprite.Sprite):
def __init__(self, location, radius):
self.loc = location
self.radius = radius
self.size = radius * 2 + 1
self.vel = Point(0,0)
self.rect = pygame.Rect(self.loc.x - self.radius, self.loc.y + self.radius, self.size, self.size)
|
'''
Josh Rudolph
BIMM185
creates graphs for operon project: shows number of occurrences of distances between genes in and out of operons
'''
import sys
from scipy import stats
import matplotlib.pyplot as mpl
import numpy as np
# open file get data= genes and their locations
with open(sys.argv[1], 'r') as rfile:
data = []
next(rfile)
# gene_id exon locus_tag name name length left_position right_position start end strand
for line in rfile:
data.append(line.rstrip('\n').split('\t'))
print("length: ",len(data))
# initialize some values to use later
prevOp = ""
prevRight = 0
prevLeft = 0
right = 0
left = 0
prevDirection = "reverse"
h0 = []
h1 = []
# sort the data
sorted_data = sorted( data, key=lambda x: int(x[0]) )
# go through each gene and get negative and positive control values
for x in sorted_data:
print(x)
# linedata = line.split('\t')
left = int(x[6])
right = int(x[7])
# filter for multi-gene operons get positive control
if x[4] == prevOp and prevDirection==x[-1]:
distGene = left - prevRight + 1
h1.append(distGene)
# if the operon is different from the previous one, calculate the distance and append to h0 (negative control)
if x[4] != prevOp and prevDirection==x[-1] :
distOp = left - prevRight + 1
h0.append(distOp)
# take data to save as previous line data
prevOp = x[4]
prevRight = int(x[7])
prevLeft = int(x[6])
prevDirection = x[-1]
h0_processed = []
print(h1)
for i in range(len(h0)):
if h0[i] < 1000:
h0_processed.append(h0[i])
print(h0)
print(h0_processed)
xs = np.linspace(-100,3000,200)
kernel_h1 = stats.gaussian_kde(h1)
kernel_h1.covariance_factor = lambda:1.0
kernel_h1._compute_covariance()
# kernel_h0 = stats.gaussian_kde(h1)
# kernel_h0.covariance_factor = lambda:1.0
# kernel_h0._compute_covariance()
mpl.plot(xs, kernel_h1(xs))
# mpl.plot(xs, kernel_h0(xs))
mpl.show()
|
import sys
'''
Given the current state probability distribution, return the probabilities for the
different emissions after the next transition.
'''
def hmm1():
# Parse space-separated stdin values into arrays
transition_line = sys.stdin.readline().split()
emission_line = sys.stdin.readline().split()
pi_line = sys.stdin.readline().split()
# Reshape into needed matrices
transition_matrix = reshape(transition_line[2:], int(transition_line[0]), int(transition_line[1]))
emission_matrix = reshape(emission_line[2:], int(emission_line[0]), int(emission_line[1]))
pi_matrix = reshape(pi_line[2:], int(pi_line[0]), int(pi_line[1]))
# Multiply
pi_A = multiply_matrices(pi_matrix, transition_matrix)
pi_A_B = multiply_matrices(pi_A, emission_matrix)
result = str(len(pi_A_B)) + " " + str(len(pi_A_B[0])) + " " # rows and cols
flattened_pi_A_B = [str(round(elem, 6)) for row in pi_A_B for elem in row]
result += ' '.join(flattened_pi_A_B)
return result
def reshape(elems, rows, cols):
"""
Reshape a 1-dimensional list into a nested 2-dimensional list.
Parameters
----------
elems : list
List of elements to reshape into a 2-D matrix
rows : int
Number of rows in reshaped matrix.
cols : int
Number of columns in reshaped matrix.
"""
elems = [float(x) for x in elems]
return [elems[i:i+cols] for i in range(0, len(elems), cols)]
def multiply_matrices(A, B):
"""
Multiply two 2-dimensional matrices.
"""
return [[sum(a*b for a,b in zip(A_row,B_col)) for B_col in zip(*B)] for A_row in A]
print(hmm1()) |
import os
import numpy as np
import matplotlib.pyplot as plt
from .utils.plotting import plot_trajectory_time_evolution, plot_mean_trajectories
from .utils.stats_utils import empirical_distribution
class SamplingResults(object):
"""
An abstraction on the results obtained
"""
_importance_sampled = False
def __init__(self, sampler_name, true_trajectory=None, histbin_range=None):
self.sampler_name = sampler_name
self.true_trajectory = true_trajectory
self._all_trajectories = None
self._trajectories = None
self._posterior_particles = None
self._posterior_weights = None
self._histbin_range = histbin_range
@classmethod
def from_information(ResultClass,
sampler_name,
all_trajectories,
trajectories,
particles='auto',
weights='auto'):
"""
Quick constructor class for Results
:param sampler_name: name of sampler
:param all_trajectories: all trajectories
:param trajectories: trajectories that were successful
:param particles: particles in the posterior
:param weights: weights of each particle.
:return:
"""
result = ResultClass(sampler_name)
result.all_trajectories(all_trajectories)
result.trajectories(trajectories)
if weights == 'auto' and particles == 'auto':
result.create_posterior()
elif weights == 'auto' or particles == 'auto':
raise KeyError('Both weights and particles must be auto.')
else:
result.posterior_particles(particles)
result.posterior_weights(weights)
return result
def all_trajectories(self, trajectories=None):
"""
Used to set or retrieve the all trajectories
:param trajectories:
:return:
"""
if trajectories is None:
return self._all_trajectories
else:
self._all_trajectories = trajectories
def trajectories(self, trajectories=None):
"""
Used to set or retrieve accepted trajectories
:param trajectories:
:return:
"""
if trajectories is None:
return self._trajectories
else:
self._trajectories = trajectories
def posterior_particles(self, posterior_particles=None):
return self.posterior(posterior_particles)
def posterior(self, posterior_particles=None):
"""
Used to set the values in the posterior
:param posterior_particles:
:return:
"""
if posterior_particles is not None:
assert self._posterior_particles is None
self._posterior_particles = np.array(posterior_particles)
# Backward compat:
if len(self._all_trajectories[0]) == 1:
self._posterior_particles = self._posterior_particles.reshape(-1)
return self._posterior_particles
else:
return self._posterior_particles
def posterior_weights(self, posterior_weights=None):
"""
Used to weight the respective values in the posterior
:param posterior_weights:
:return:
"""
if posterior_weights is not None:
assert self._posterior_weights is None
self._posterior_weights = np.array(posterior_weights).reshape(-1)
return self._posterior_weights
else:
return self._posterior_weights
def create_posterior(self):
"""
Automatically creates a posterior if only trajectories() has been set
:return:
"""
assert self._trajectories is not None, 'No trajectories to create posterior from'
assert self._posterior_particles is None and self._posterior_weights is None, 'Posterior already initialized'
self._posterior_particles = []
self._posterior_weights = []
for trajectory in self._trajectories:
self._posterior_particles.append(trajectory[0])
self._posterior_weights.append(1)
self._posterior_weights = np.array(self._posterior_weights).reshape(-1)
self._posterior_particles = np.array(self._posterior_particles).reshape(-1)
def expectation(self, weighted=False):
"""
The expected value of the posterior.
:param weighted: Will be weighted by the likelihood ratios
:return:
"""
posterior_particles = self.posterior_particles()
posterior_weights = self.posterior_weights()
numerator = np.sum(posterior_particles * posterior_weights)
if weighted:
estimate = numerator / np.sum(posterior_weights)
else:
estimate = numerator / len(self._posterior_particles)
return estimate
def variance(self, weighted=False):
"""
The variance of the posterior
# from https://statweb.stanford.edu/~owen/mc/Ch-var-is.pdf
:param weighted:
:return:
"""
posterior_particles = self.posterior_particles()
posterior_weights = self.posterior_weights()
expected_value = self.expectation(weighted)
if weighted:
modified_weights = (posterior_weights / posterior_weights.sum())**2
terms = (posterior_particles - expected_value)**2
return np.sum(modified_weights*terms)
else:
return np.mean((posterior_weights*posterior_particles - expected_value)**2)
def plot_distribution(self, histbin_range=None, ax=None, **kwargs):
"""
Plots the distribution of the posterior
:param histbin_range:
:param ax:
:param kwargs:
:return:
"""
if ax is None:
ax = plt.gca()
_histbin_range = self._histbin_range if not histbin_range else histbin_range
ax.hist(self.posterior_particles(),
normed=True,
bins=np.arange(-_histbin_range-2, _histbin_range+2)+0.5,
weights=self.posterior_weights(),
**kwargs)
ax.set_xlabel('x_0')
ax.set_ylabel('Frequency')
ax.set_title('Histogram of trajectory starting positions')
return ax
def plot_trajectory_evolution(self, dimension=0, step=5, ax=None):
"""
Plots the evolution of accepted trajectories over time
:param dimension:
:param step:
:param ax:
:return:
"""
if type(dimension) is list:
for dim in dimension:
plot_trajectory_time_evolution(self.trajectories(), dim, step=step, ax=ax)
else:
return plot_trajectory_time_evolution(self.trajectories(), dimension, step=step, ax=ax)
def plot_all_trajectory_evolution(self, dimension=0, step=20, ax=None):
"""
Plots the evolution all trajectories over time
:param dimension:
:param step:
:param ax:
:return:
"""
if type(dimension) is list:
for dim in dimension:
plot_trajectory_time_evolution(self.all_trajectories(), dim, step=step, ax=ax)
else:
return plot_trajectory_time_evolution(self.all_trajectories(), dimension, step=step, ax=ax)
def plot_mean_trajectory(self, label=None, ax=None):
"""
Plots the mean successful trajectory
:param label:
:param ax:
:return:
"""
trajectories = self.trajectories()
ts = np.arange(len(trajectories[0]))
if label is None:
label = self.sampler_name
return plot_mean_trajectories(trajectories, ts, self.true_trajectory, ax=ax)
def plot_mean_all_trajectory(self, label=None, ax=None):
"""
Plots the mean of all trajectories
:param label:
:param ax:
:return:
"""
trajectories = self.all_trajectories()
ts = np.arange(len(trajectories[0]))
if label is None:
label = self.sampler_name
return plot_mean_trajectories(trajectories, ts, self.true_trajectory, ax=ax)
def save_results(self, path):
"""
Saves results into a json file
:param path:
:return:
"""
prepared_posterior_particles = None
if len(self._all_trajectories[0]) == 1:
if self._posterior_particles is not None:
prepared_posterior_particles = [ float(p) for p in self._posterior_particles]
elif len(self._all_trajectories[0]) > 1:
if self._posterior_particles is not None:
prepared_posterior_particles = [ p.tolist() for p in self._posterior_particles]
results_dict = dict(
sampler_name=self.sampler_name,
true_trajectory=self.true_trajectory.tolist(),
all_trejctories=[traj.tolist() for traj in self._all_trajectories],
trajectories=[traj.tolist() for traj in self._trajectories],
posterior_particles=prepared_posterior_particles,
posterior_weights= [ float(w) for w in self._posterior_weights] if self._posterior_weights is not None else None)
import json
with open(os.path.join(path, 'trajectory_results_{}'.format(self.sampler_name)), 'w') as f:
json.dump(results_dict, f)
def prop_success(self):
"""
Calculates the proportion of successful trajectories
:return:
"""
return len(self.trajectories())/len(self.all_trajectories())
def summary_statistics(self):
"""
Returns all summary statistics that are calculatable
:return:
"""
return [self.expectation(), self.variance(), self.prop_success()]
def summary_builder(self):
"""
Returns a human readable summary of the statistics calculated.
:return:
"""
return 'Start Estimate: {:3g}, Variance: {:3g}, Prop Success: {:3g}'.format(self.expectation(), self.variance(), self.prop_success())
def summary(self, extra=''):
"""
Pretty print of the human readable summary
:param extra:
:return:
"""
template_string = '\n'
template_string += '*' * 45
template_string += '\nSampler: {}\n'.format(self.sampler_name)
template_string += str(self.summary_builder()) +'\n'
if extra != '': template_string += '{}\n'.format(extra)
template_string += '*'*45
template_string += '\n'
return template_string
def summary_title(self):
"""
Summary for titling plots.
:return:
"""
return '{} Mean: {:3g} Var:{:3g}\nProp: {:3g}'.format(self.sampler_name, *self.summary_statistics())
def empirical_distribution(self, histbin_range=None):
"""
Returns an empirical distribution
:param histbin_range:
:return:
"""
_histbin_range = self._histbin_range if not histbin_range else histbin_range
return empirical_distribution(self.posterior_particles(),self. posterior_weights(), histbin_range=_histbin_range)
class ImportanceSamplingResults(SamplingResults):
_importance_sampled = True
def effective_sample_size(self):
"""
A diagnostic for the quality of the importance sampling scheme.
The variance in the estimate of the expectation is equal to
that if we had done `effective_sample_size` number of monte carlo
estimates.
:return:
"""
posterior_weights = np.array(self._posterior_weights).reshape(-1)
denominator = np.sum(posterior_weights**2)
numerator = np.sum(posterior_weights)**2
return numerator/denominator
def variance(self, weighted=True):
"""
Variance of the posterior weighted by
importance sampled weights
:param weighted:
:return:
"""
return super().variance(weighted)
def expectation(self, weighted=True):
"""
Excpected value of the posterior weighted by
importance sampled weights
:param weighted:
:return:
"""
return super().expectation(weighted)
def summary_builder(self):
template = super().summary_builder()
ess_string = ' ESS: {:3g}'.format(self.effective_sample_size())
template += ess_string
return template
def summary_statistics(self):
return super().summary_statistics() + [self.effective_sample_size()]
def summary_title(self):
return '{} Mean: {:3g} Var:{:3g}\nProp: {:3g} ESS: {:3g}'.format(self.sampler_name, *self.summary_statistics())
def plot_posterior_weight_histogram(self, ax=None, **kwargs):
"""
Plots the histogram of importance sampled weights
:param ax:
:param kwargs:
:return:
"""
if ax is None:
f = plt.figure()
ax = f.add_subplot(111)
weights = np.array(self._posterior_weights).reshape(-1)
ax.hist(weights, bins=np.linspace(0, 1, 500), **kwargs)
ax.set_ylabel('Count')
ax.set_xlabel('Weight')
ax.set_title('Min: {:3g}, Max: {:3g}, Mean: {:3g}'.format(np.min(weights), np.max(weights), np.mean(weights)))
return ax
class RLSamplingResults(ImportanceSamplingResults):
def plot_reward_curves(self, ax):
ax.plot(self.rewards_per_episode)
ax.set_xlabel('Episode')
ax.set_ylabel('Reward For Episode')
def plot_loss_curves(self, ax):
ax.plot(self.loss_per_episode)
ax.set_xlabel('Episode')
ax.set_ylabel('Loss') |
# =========================================================
# =========================================================
# Utilities for laying out:
# 1) processing configuration ( e.g. which
# subset of documents will be processed )
# 2) tools/taggers that will be evaluated;
# =========================================================
# =========================================================
import os, os.path, re, sys
from collections import defaultdict
from random import sample
from psycopg2.sql import SQL, Identifier
from estnltk.text import Text
from estnltk.taggers import NerTagger
# =================================================
# =================================================
# Choosing a random subset for processing
# =================================================
# =================================================
def fetch_document_indexes( storage, schema, collection, logger ):
""" Fetches and returns all document ids of the collection from the PostgreSQL storage.
"""
# Construct the query
sql_str = 'SELECT id FROM {}.{} ORDER BY id'
doc_ids = []
with storage.conn as conn:
# Named cursors: http://initd.org/psycopg/docs/usage.html#server-side-cursors
with conn.cursor('read_collection_doc_ids', withhold=True) as read_cursor:
try:
read_cursor.execute(SQL(sql_str).format(Identifier(schema),
Identifier(collection)))
except Exception as e:
logger.error(e)
raise
finally:
logger.debug( read_cursor.query.decode() )
for items in read_cursor:
doc_ids.append ( items[0] )
return doc_ids
def pick_random_doc_ids( k, storage, schema, collection, logger, sort=True ):
''' Picks a random sample of k document ids from the given collection. '''
all_doc_ids = fetch_document_indexes( storage, schema, collection, logger )
resulting_sample = sample(all_doc_ids, k) if k < len(all_doc_ids) else all_doc_ids
return sorted(resulting_sample) if sort else resulting_sample
def load_in_doc_ids_from_file( fnm, storage, schema, collection, logger, sort=True ):
'''Loads processable document ids from a text file.
In the text file, each document id should be on a separate line.
Returns a list with document ids.
'''
if not os.path.isfile( fnm ):
log.error('Error at loading document index: invalid index file {!r}. '.format( fnm ))
exit(1)
all_doc_ids = set(fetch_document_indexes(storage, schema, collection, logger))
ids = []
with open( fnm, 'r', encoding='utf-8' ) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
if int(line) not in all_doc_ids and line not in all_doc_ids:
logger.warning(f'Document id {line} is missing from {collection} indexes. Skipping id.')
continue
ids.append( int(line) )
if len(ids) == 0:
log.error('No valid document ids were found from the index file {!r}.'.format( fnm ))
exit(1)
if sort:
ids = sorted( ids )
return ids
# =================================================
# =================================================
# Finding & fixing dependency layers
# =================================================
# =================================================
def find_ner_dependency_layers( ner_input_layers, ner_layer, collection, log,
incl_prefix='', incl_suffix='' ):
''' Finds a mapping from ner_input_layers to layers available in the
collection.
Mapping relies on an assumption that NER input layer names are substrings
of the corresponding layer names in the collection.
If incl_prefix and incl_suffix have been specified (that is: are non-empty
strings), then they are used to filter collection layers. Only those
collection layer names that satisfy the constraint startswith( incl_prefix )
and endswith( incl_suffix ) will be used for the mapping.
'''
# 1) Match ner_input_layers to collection's layers
if ner_input_layers is None:
ner_input_layers = ['morph_analysis', 'words', 'sentences']
input_layer_matches = defaultdict(list)
for input_layer in ner_input_layers:
for collection_layer in collection.layers:
if not collection_layer.startswith(incl_prefix):
# If the layer name does not have required prefix, skip it
continue
if not collection_layer.endswith(incl_suffix):
# If the layer name does not have required suffix, skip it
continue
if input_layer in collection_layer:
input_layer_matches[input_layer].append( collection_layer )
if len( input_layer_matches[input_layer] ) > 1:
log.error(("(!) NER input layer {!r} has more than 1 "+\
"possible matches in the collection {!r}: {!r}").format(input_layer,
collection.name,
input_layer_matches[input_layer]))
log.error(("Please use arguments in_prefix and/or in_suffix to specify, "+
"which layers are relevant dependencies of the {!r} layer.").format(ner_layer))
exit(1)
if len( input_layer_matches[input_layer] ) == 0:
log.error(("(!) NER input layer {!r} could not be found from "+\
"layers of the collection {!r}. Collection's layers are: {!r}").format(input_layer,
collection.name,
collection.layers))
exit(1)
# 2) Convert value types from list to string
for input_arg in input_layer_matches.keys():
val = input_layer_matches[input_arg]
assert isinstance(val, list) and len(val) == 1
input_layer_matches[input_arg] = val[0]
return input_layer_matches
def flip_ner_input_layer_names( text_obj, ner_input_layers_mapping ):
''' Flips NER input layer names in the text_obj.
Switches from the layer names listed in ner_input_layers_mapping.keys()
to layer names listed in ner_input_layers_mapping.values();
'''
reverse_map = { v:k for (k, v) in ner_input_layers_mapping.items() }
new_layers = []
# Remove layers in the order of dependencies
for in_layer in ['morph_analysis', 'sentences', 'words']:
if in_layer in ner_input_layers_mapping:
collection_layer_name = ner_input_layers_mapping[in_layer]
if collection_layer_name == in_layer:
# No need to rename: move along!
continue
# Remove collection layer from text object
layer_pointer = text_obj[collection_layer_name]
collection_layer = text_obj.pop_layer( collection_layer_name )
if len(layer_pointer) == 0 and collection_layer is None:
# Hack for getting around of pop_layer()'s bug
# (returns None on empty layer)
collection_layer = layer_pointer
# Rename layer
collection_layer.name = in_layer
# Rename layer's dependencies
if collection_layer.parent != None and \
collection_layer.parent in reverse_map:
collection_layer.parent = reverse_map[collection_layer.parent]
if collection_layer.enveloping != None and \
collection_layer.enveloping in reverse_map:
collection_layer.enveloping = reverse_map[collection_layer.enveloping]
new_layers.append( collection_layer )
if new_layers:
# Add layers in the reversed order of dependencies
for in_layer in ['words', 'sentences', 'morph_analysis']:
for layer in new_layers:
if layer.name == in_layer:
text_obj.add_layer(layer)
# =================================================
# =================================================
# Creating named entity taggers
# =================================================
# =================================================
def create_ner_tagger( old_ner_layer, collection, log, new_ner_layer,
incl_prefix='', incl_suffix='' ):
''' Creates NerTagger for analysing given collection.
Collects input layers of the tagger based on the layers
available in the collection. '''
# NerTagger's input layers
default_ner_input_layers = ['morph_analysis', 'words', 'sentences']
# Mapping from NerTagger's input layers to corresponding layers in the collection
input_layers_mapping = \
find_ner_dependency_layers( default_ner_input_layers, old_ner_layer, collection, log,
incl_prefix=incl_prefix, incl_suffix=incl_suffix )
#
# TODO: NerTagger's current interface does not allow to properly use customized
# layer names, as names of the default layers are hard-coded. Therefore,
# we cannot specify collection's input layers upon initialization of NerTagger,
# but we return them so that they can be used for quiering the collection;
#
ner_tagger = NerTagger( output_layer=new_ner_layer )
log.info(' Initialized {!r} for evaluation. '.format( ner_tagger ) )
return ner_tagger, input_layers_mapping
def create_ner_tagger_from_model( ner_layer, model_location, collection, log,
incl_prefix='', incl_suffix='' ):
''' Creates NerTagger from specific model with the input layers
from given collection.
Collects input layers of the tagger based on the layers
available in the collection. '''
# NerTagger's input layers
default_ner_input_layers = ['morph_analysis', 'words', 'sentences']
# Mapping from NerTagger's input layers to corresponding layers in the collection
input_layers_mapping = \
find_ner_dependency_layers( default_ner_input_layers, ner_layer, collection, log,
incl_prefix=incl_prefix, incl_suffix=incl_suffix )
#
# TODO: NerTagger's current interface does not allow to properly use customized
# layer names, as names of the default layers are hard-coded. Therefore,
# we cannot specify collection's input layers upon initialization of NerTagger,
# but we return them so that they can be used for quiering the collection;
#
assert os.path.isdir(model_location), \
'(!) Invalid model_dir for NerTagger: {}'.format(model_location)
ner_tagger = NerTagger( output_layer=ner_layer, model_dir = model_location )
log.info(' Initialized {!r} for evaluation. '.format( ner_tagger ) )
return ner_tagger, input_layers_mapping
|
#!/usr/bin/env python3
import queue
class Queue:
def __init__(self):
self.__queue = queue.Queue()
pass
def put(self, item, type):
self.__queue.put(item)
def get(self):
return self.__queue.get()
def empty(self):
return self.__queue.empty()
class Dispatcher:
speaker = Queue()
logger = Queue()
|
import os
import re
import sublime
import sublime_plugin
version = sublime.version()
from unittest import TextTestRunner
if version >= '3000':
from .unittesting import TestLoader
from .unittesting import DeferringTextTestRunner
from .utils import settings as plugin_settings
from .utils import Jfile
else:
from unittesting import TestLoader
from utils import settings as plugin_settings
from utils import Jfile
# st3 has append command, it is needed for st2.
class OutputPanelInsert(sublime_plugin.TextCommand):
def run(self, edit, characters):
self.view.set_read_only(False)
self.view.insert(edit, self.view.size(), characters)
self.view.set_read_only(True)
self.view.show(self.view.size())
class OutputPanel:
def __init__(
self, name, file_regex='', line_regex='', base_dir=None,
word_wrap=False, line_numbers=False, gutter=False,
scroll_past_end=False, syntax='Packages/Text/Plain text.tmLanguage'
):
self.name = name
self.window = sublime.active_window()
self.output_view = self.window.get_output_panel(name)
# default to the current file directory
if (not base_dir and self.window.active_view()
and self.window.active_view().file_name()):
base_dir = os.path.dirname(self.window.active_view().file_name())
settings = self.output_view.settings()
settings.set("result_file_regex", file_regex)
settings.set("result_line_regex", line_regex)
settings.set("result_base_dir", base_dir)
settings.set("word_wrap", word_wrap)
settings.set("line_numbers", line_numbers)
settings.set("gutter", gutter)
settings.set("scroll_past_end", scroll_past_end)
settings.set("syntax", syntax)
self.closed = False
def write(self, s):
self.output_view.run_command('output_panel_insert', {'characters': s})
def flush(self):
pass
def show(self):
self.window.run_command("show_panel", {"panel": "output." + self.name})
def close(self):
self.closed = True
pass
def input_parser(package):
m = re.match(r'([^:]+):(.+)', package)
if m:
return m.groups()
else:
return (package, "test*.py")
class UnitTestingCommand(sublime_plugin.ApplicationCommand):
@property
def project_name(self):
"""Return back the project name of the current project
"""
project_name = sublime.active_window().project_file_name()
if project_name is None:
folders = sublime.active_window().folders()
if len(folders) > 0:
project_name = folders[0].rsplit(os.sep, 1)[1]
else:
project_name = project_name.rsplit(os.sep, 1)[1].split('.')[0]
return project_name
def run(self, package=None, output=None):
if package:
if package == "<current>":
package = self.project_name
plugin_settings.set("recent-package", package)
package, pattern = input_parser(package)
jfile = os.path.join(sublime.packages_path(), package, "unittesting.json")
if os.path.exists(jfile):
ss = Jfile(jfile).load()
tests_dir = ss.get("tests_dir", "tests")
async = ss.get("async", False)
deferred = ss.get("deferred", False)
verbosity = ss.get("verbosity", 2)
else:
tests_dir, async, deferred, verbosity = "tests", False, False, 2
if version < '3000':
deferred = False
async = False
if output == "panel":
output_panel = OutputPanel(
'unittests', file_regex=r'File "([^"]*)", line (\d+)')
output_panel.show()
stream = output_panel
else:
if output:
outfile = output
else:
outputdir = os.path.join(
sublime.packages_path(),
'User', 'UnitTesting', "tests_output"
)
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
outfile = os.path.join(outputdir, package)
if os.path.exists(outfile):
os.remove(outfile)
stream = open(outfile, "w")
if async:
sublime.set_timeout_async(
lambda: self.testing(
package, tests_dir, pattern, stream, False, verbosity
), 100)
else:
self.testing(package, tests_dir, pattern, stream, deferred, verbosity)
else:
# bootstrap run() with package input
view = sublime.active_window().show_input_panel(
'Package:', plugin_settings.get("recent-package", "Package Name"),
lambda x: sublime.run_command(
"unit_testing", {
"package": x,
"output": output
}), None, None
)
view.run_command("select_all")
def testing(self, package, tests_dir, pattern, stream, deferred=False, verbosity=2):
try:
# use custom loader which support ST2 and reloading modules
loader = TestLoader(deferred)
test = loader.discover(os.path.join(
sublime.packages_path(), package, tests_dir), pattern
)
# use deferred test runner or default test runner
if deferred:
testRunner = DeferringTextTestRunner(stream, verbosity=verbosity)
else:
testRunner = TextTestRunner(stream, verbosity=verbosity)
testRunner.run(test)
except Exception as e:
if not stream.closed:
stream.write("ERROR: %s\n" % e)
if not deferred:
stream.close()
|
from sys import maxsize as infinity
from utils import Node, PriorityQueue
"""
Информирано пребарување во рамки на граф
"""
def memoize(fn, slot=None):
""" Запамети ја пресметаната вредност за која била листа од
аргументи. Ако е специфициран slot, зачувај го резултатот во
тој slot на првиот аргумент. Ако slot е None, зачувај ги
резултатите во речник.
:param fn: зададена функција
:type fn: function
:param slot: име на атрибут во кој се чуваат резултатите од функцијата
:type slot: str
:return: функција со модификација за зачувување на резултатите
:rtype: function
"""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
def memoized_fn(*args):
if args not in memoized_fn.cache:
memoized_fn.cache[args] = fn(*args)
return memoized_fn.cache[args]
memoized_fn.cache = {}
return memoized_fn
def best_first_graph_search(problem, f):
"""Пребарувај низ следбениците на даден проблем за да најдеш цел. Користи
функција за евалуација за да се одлучи кој е сосед најмногу ветува и
потоа да се истражи. Ако до дадена состојба стигнат два пата, употреби
го најдобриот пат.
:param problem: даден проблем
:type problem: Problem
:param f: дадена функција за евалуација (проценка)
:type f: function
:return: Node or None
:rtype: Node
"""
f = memoize(f, 'f')
node = Node(problem.initial)
if problem.goal_test(node.state):
return node
frontier = PriorityQueue(min, f)
frontier.append(node)
explored = set()
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.append(child)
elif child in frontier:
incumbent = frontier[child]
if f(child) < f(incumbent):
del frontier[incumbent]
frontier.append(child)
return None
def greedy_best_first_graph_search(problem, h=None):
""" Greedy best-first пребарување се остварува ако се специфицира дека f(n) = h(n).
:param problem: даден проблем
:type problem: Problem
:param h: дадена функција за хевристика
:type h: function
:return: Node or None
"""
h = memoize(h or problem.h, 'h')
return best_first_graph_search(problem, h)
def astar_search(problem, h=None):
""" A* пребарување е best-first graph пребарување каде f(n) = g(n) + h(n).
:param problem: даден проблем
:type problem: Problem
:param h: дадена функција за хевристика
:type h: function
:return: Node or None
"""
h = memoize(h or problem.h, 'h')
return best_first_graph_search(problem, lambda n: n.path_cost + h(n))
def recursive_best_first_search(problem, h=None):
"""Recursive best first search - ја ограничува рекурзијата
преку следење на f-вредноста на најдобриот алтернативен пат
од било кој јазел предок (еден чекор гледање нанапред).
:param problem: даден проблем
:type problem: Problem
:param h: дадена функција за хевристика
:type h: function
:return: Node or None
"""
h = memoize(h or problem.h, 'h')
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node, 0 # (втората вредност е неважна)
successors = node.expand(problem)
if len(successors) == 0:
return None, infinity
for s in successors:
s.f = max(s.path_cost + h(s), node.f)
while True:
# Подреди ги според најниската f вредност
successors.sort(key=lambda x: x.f)
best = successors[0]
if best.f > flimit:
return None, best.f
if len(successors) > 1:
alternative = successors[1].f
else:
alternative = infinity
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result, best.f
node = Node(problem.initial)
node.f = h(node)
result, bestf = RBFS(problem, node, infinity)
return |
"""I've tracked my spending via ad hoc text files and shell scripts. I
thought I should try entering the world of spreadsheets and databases,
and wrote this to convert the files.
started 18:07, basically done 19:22
played around, then added sorting and date, 20:35
file names e.g. Jan2014, Apr2010, June2014
older files JAN2000, different format, skipped
"""
header= '"Date","Year","Month","Day","Amount","Category","Desc"\n'
import os
from os import listdir
import csv
import datetime
"""
grep F$ $1 | asum
grep R$ $1 | asum
grep RP$ $1 | asum
grep toilet $1 | asum
grep transport $1 | asum
grep laundry $1 | asum
grep medic $1 | asum
grep enter $1 | asum
grep phone $1 | asum
grep power $1 | asum
grep rent $1 | asum
grep fees $1 | asum
grep book $1 | asum
grep comic $1 | asum
grep video $1 | asum
grep photo $1 | asum
grep gift $1 | asum
grep text $1 | asum
grep IU $1 | asum
grep big $1 | asum
grep misc $1 | asum
"""
def categorize(s): #stripped string input, returns string
""" str -> str
given an entry, categorize it. Includes logic for flagging
problems in the input."""
category = ""; categorized=False; ambiguous=False
if l[-1] == 'F':
category= "grocery"
categorized=True
if l[-1] == 'R':
if categorized: ambiguous = True
category= "eat out"
categorized=True
if l[-2:] == 'RP':
if categorized: ambiguous = True
category= "eat out"
categorized=True
if "toilet" in l:
if categorized: ambiguous = True
category= "toilet"
categorized=True
if "transport" in l:
if categorized: ambiguous = True
category= "transport"
categorized=True
if "laundry" in l:
if categorized: ambiguous = True
category= "laundry"
categorized=True
if "medic" in l:
if categorized: ambiguous = True
category= "medical"
categorized=True
if "enter" in l:
if categorized: ambiguous = True
category= "entertainment"
categorized=True
if "phone" in l:
if categorized: ambiguous = True
category= "phone"
categorized=True
if "power" in l:
if categorized: ambiguous = True
category= "power"
categorized=True
if "rent" in l:
if categorized: ambiguous = True
category= "rent"
categorized=True
if "fees" in l:
if categorized: ambiguous = True
category= "fees"
categorized=True
if "book" in l:
if categorized: ambiguous = True
category= "book"
categorized=True
if "comic" in l:
if categorized: ambiguous = True
category= "comic"
categorized=True
if "video" in l:
if categorized: ambiguous = True
category= "video"
categorized=True
if "photo" in l:
if categorized: ambiguous = True
category= "photo"
categorized=True
if "gift" in l:
if categorized: ambiguous = True
category= "gift"
categorized=True
if "text" in l:
if categorized: ambiguous = True
category= "textbook"
categorized=True
if "IU" in l:
if categorized: ambiguous = True
category= "IU"
categorized=True
if "big" in l:
if categorized: ambiguous = True
category= "bigmisc"
categorized=True
if "misc" in l:
if categorized: ambiguous = True
category= "misc"
categorized=True
#unneeded in the final run, but were useful in cleaning up the input
if category=="":
print("uncategorized",l)
if ambiguous:
print("ambiguous",l)
return category
def transmonth(m):
""" str -> int
month name->numeric translation function for my filenames"""
months={'Jan':1,'Feb':2,'Mar':3,'Apr':4,'May':5,'June':6,'July':7,'Aug':8,'Sep':9,'Oct':10,'Nov':11,'Dec':12}
return months[m]
#main
with open("money.csv","w") as outfile:
outfile.write(header)
outlines=[]
csvwriter = csv.writer(outfile,dialect='unix')
for fn in os.listdir():
if "csv" in fn: continue #avoid reading this code
if fn.upper() == fn: continue #skipping older files in different format
year = int(fn[-4:])
month = fn[:-4]
print("FILE",year,month,fn) #progress tracker
with open(fn,"r") as f:
for l in f:
l=l.strip()
if l=="": continue
cat = categorize(l)
fields = l.split()
amount=float(fields[0])
desc = " ".join(fields[1:])
date = datetime.date(int(year),transmonth(month),1)
out=[date,year,month,1,amount,cat,desc]
#print(out)
outlines.append(out)
#done with files, sort by date
outlines.sort(key=lambda k:k[0])
for l in outlines:
l[0]=str(l[0]) #csv needs everything to be strings
csvwriter.writerow(l)
|
from flask import Flask, jsonify, abort, request, make_response, url_for
from app.models import *
from app import app, db
from app.database import *
@app.route('/api/v1/franchises/<string:franchise_id>')
def get_franchise(franchise_id):
franchise = session.query(TeamsFranchises).filter(TeamsFranchises.franchID == franchise_id).first()
return jsonify(franchise)
@app.route('/api/v1/franchises/<string:franchise_id>/teams')
def get_franchise_teams(franchise_id):
team = session.query(Teams).filter(Teams.franchID == franchise_id).first()
return jsonify(team)
@app.route('/api/v1/teams/<string:team_id>')
def get_team(team_id):
team = session.query(Teams).filter(Teams.teamID == team_id).first()
return jsonify(team)
|
import pytest
import datetime
import numpy as np
import scipy.io
import os
from shutil import copyfile
import pelops.utils as utils
from pelops.datasets.compcar import CompcarDataset
@pytest.fixture
def compcar(tmpdir):
""" Set up some test files and an instance of CompcarDataset() """
# Write a file to read back
FILE_NAMES = (
# filepath, car_id, cam_id, time, misc
("1/asdasf1123123.jpg", 1, None, None, {"color": "blue", "make": "BMW", "model": "X5", "model_id": 105}),
("1/qjfas123189798.jpg", 1, None, None, {"color": "black", "make": "BMW", "model": "X5", "model_id": 105}),
("2/345sdjkhjlsh33.jpg", 2, None, None, {"color": "red", "make": "Zotye", "model": "Z300", "model_id": 1035}),
("3/werfsdbfuw3223.jpg", 3, None, None, {"color": "yellow", "make": "Hyundai", "model": "Santafe", "model_id": 961}),
("3/asdfj21348wesd.jpg", 3, None, None, {"color": "champagne", "make": "Hyundai", "model": "Santafe", "model_id": 961}),
("4/kjdfgjhlsdg322.jpg", 4, None, None, {"color": "champagne", "make": "Toyota", "model": "Crown", "model_id": 1322}),
)
# The contents of the files do not matter, the name is enough
name_test = tmpdir.join("test_surveillance.txt")
name_train = tmpdir.join("train_surveillance.txt")
name_train.write("TEST")
tmpdir.mkdir("image")
model_mat = tmpdir.join("sv_make_model_name.mat")
model_matrix = np.array([
# make, model, web_id
[["BWM"], ["BWM X5"], 105],
[["Zoyte"], ["Zotye Z300"], 1035],
[["Hyundai"], ["Santafe"], 961],
[["Toyota"], ["Crown"], 1322],
])
scipy.io.savemat(os.path.join(model_mat.dirname, "sv_make_model_name.mat"), mdict={"sv_make_model_name": model_matrix})
color_mat = tmpdir.join("color_list.mat")
color_matrix = np.array([
# filepath, color_num
[["1/asdasf1123123.jpg"], 4],
[["1/qjfas123189798.jpg"], 0],
[["2/345sdjkhjlsh33.jpg"], 2],
[["3/werfsdbfuw3223.jpg"], 3],
[["3/asdfj21348wesd.jpg"], 8],
[["4/kjdfgjhlsdg322.jpg"], 8],
])
scipy.io.savemat(os.path.join(color_mat.dirname, "color_list.mat"), mdict={"color_list": color_matrix})
names = ""
for name, _, _, _, _ in FILE_NAMES:
names += name + "\n"
name_test.write(names)
# Setup the class
instantiated_class = CompcarDataset(name_test.dirname, utils.SetType.TEST)
# Rename filepath
FILE_NAMES = (
# filepath, car_id, cam_id, time, misc
(os.path.join(name_test.dirname, "image", "1/asdasf1123123.jpg"), 1, None, None, {"color": "blue", "make": "BMW", "model": "X5", "model_id": 105}),
(os.path.join(name_test.dirname, "image", "1/qjfas123189798.jpg"), 1, None, None, {"color": "black", "make": "BMW", "model": "X5", "model_id": 105}),
(os.path.join(name_test.dirname, "image", "2/345sdjkhjlsh33.jpg"), 2, None, None, {"color": "red", "make": "Zotye", "model": "Z300", "model_id": 1035}),
(os.path.join(name_test.dirname, "image", "3/werfsdbfuw3223.jpg"), 3, None, None, {"color": "yellow", "make": "Hyundai", "model": "Santafe", "model_id": 961}),
(os.path.join(name_test.dirname, "image", "3/asdfj21348wesd.jpg"), 3, None, None, {"color": "champagne", "make": "Hyundai", "model": "Santafe", "model_id": 961}),
(os.path.join(name_test.dirname, "image", "4/kjdfgjhlsdg322.jpg"), 4, None, None, {"color": "champagne", "make": "Toyota", "model": "Crown", "model_id": 1322}),
)
return (instantiated_class, FILE_NAMES)
def test_compcar_chips_len(compcar):
""" Test that CompcarDataset.chips is the correct length """
instantiated_class = compcar[0]
FILE_NAMES = compcar[1]
# check that self.chips has been created, is not empty, and has the right
# number of entries
assert len(FILE_NAMES)
assert len(FILE_NAMES) == len(instantiated_class.chips)
def test_compcar_chips_vals(compcar):
""" Test that CompcarDatset chips have the correct values. """
instantiated_class = compcar[0]
FILE_NAMES = compcar[1]
# Check that the correct chips exist
for filepath, car_id, cam_id, time, misc in FILE_NAMES:
chip = instantiated_class.chips[filepath]
assert car_id == chip.car_id
assert cam_id is None
assert time is None
assert misc["color"] == chip.misc["color"]
assert misc["make"] == chip.misc["make"]
assert misc["model"] == chip.misc["model"]
assert misc["model_id"] == chip.misc["model_id"]
# Filepath should be filled
assert chip.filepath
def test_get_all_chips_by_car_id(compcar):
""" Test CompcarDatset.get_all_chips_by_car_id() """
instantiated_class = compcar[0]
FILE_NAMES = compcar[1]
seen_ids = []
for filepath, car_id, cam_id, time, misc in FILE_NAMES:
# Generate all the chips by hand, and compare
if car_id in seen_ids:
continue
seen_ids.append(car_id)
chips = []
for key, val in instantiated_class.chips.items():
if val.car_id == car_id:
chips.append(val)
chips.sort()
test_chips = sorted(instantiated_class.get_all_chips_by_car_id(car_id))
assert chips == test_chips
def test_get_all_chips_by_cam_id(compcar):
""" Test CompcarDatset.get_all_chips_by_cam_id() """
instantiated_class = compcar[0]
FILE_NAMES = compcar[1]
seen_ids = []
for filepath, car_id, cam_id, time, misc in FILE_NAMES:
# Generate all the chips by hand, and compare
if cam_id in seen_ids:
continue
seen_ids.append(cam_id)
chips = []
for key, val in instantiated_class.chips.items():
if val.cam_id == cam_id:
chips.append(val)
chips.sort()
test_chips = sorted(instantiated_class.get_all_chips_by_cam_id(cam_id))
assert chips == test_chips
def test_get_distinct_cams_by_car_id(compcar):
""" Test CompcarDatset.get_distinct_cams_by_car_id() and get_distinct_cams_per_car """
instantiated_class = compcar[0]
CAR_ID = 1
TEST_CAMS = []
for test_cam, cam in zip(TEST_CAMS, sorted(instantiated_class.get_distinct_cams_by_car_id(CAR_ID))):
assert test_cam == cam
def test_get_all_cam_ids(compcar):
""" Test CompcarDatset.get_distinct_cams_by_car_id() """
instantiated_class = compcar[0]
TEST_CAMS = []
for test_cam, cam in zip(TEST_CAMS, sorted(instantiated_class.get_all_cam_ids())):
assert test_cam == cam
def test_get_all_car_ids(compcar):
""" Test CompcarDatset.get_distinct_cams_by_car_id() """
instantiated_class = compcar[0]
TEST_CARS = [1, 2, 3, 4]
for test_car, car in zip (TEST_CARS, sorted(instantiated_class.get_all_car_ids())):
assert test_car == car
def test_compcar_iter(compcar):
""" Test CompcarDatset.__iter__() """
instantiated_class = compcar[0]
FILE_NAMES = compcar[1]
chip_ids = tuple(i for i, _, _, _, _ in FILE_NAMES)
for chip in instantiated_class:
assert chip.filepath in chip_ids
|
from HorseRace.common import get_pages, get_tables
from HorseRace import common as c
import pandas as pd
from pathlib import Path
from typing import Dict
class Jockey(object):
"""docstring for ."""
def __init__(self, name = None):
self.name = name
self.data_exist = False
if self.name is not None:
self.data = self.load_jockey_data()
if isinstance(self.data,pd.DataFrame):
self.data_exist = True
def load_jockey_data(self):
p = Path(r"D:\horserace\data\jockey")
if not p.exists():
p = Path(r"E:\horserace\data\jockey")
dpath = p / (self.name + ".csv")
if not dpath.exists():
print('ドライブがアクティブではありません')
return None
return pd.read_csv(dpath)
def analize_jockey_data(self):
if not self.data_exist:
print('dataがありません')
return None
self.data['開催'] = self.data['開催'].replace('[0-9]','',regex=True)
self.data['グレード'] = self.data['レース名'].apply(c.categorize_races)
self.data = self.data.apply(c.split_distance,axis=1)
self.data["着順"] = pd.to_numeric(self.data["着順"], errors='coerce')
self.data["人気"] = pd.to_numeric(self.data["人気"], errors='coerce')
self.data = self.data[['日付', '開催', '天気', 'R', 'レース名', '頭数', '枠番', '馬番','単勝', '人気', '着順', '馬名', '斤量', '距離', '馬場', 'タイム', '着差', '通過', 'ペース','上り', '馬体重', '勝ち馬', 'グレード', 'タイプ']]
self.data = self.data.dropna(how='any')
self.gdata = self.data.groupby(['開催','タイプ','距離'])
self.gdata2 = self.gdata['人気','着順'].mean()
self.gdata2['期待値'] = self.gdata2['人気']-self.gdata2['着順']
spath = Path(r"D:\horserace\data\jockey\ana") / (self.name + '.csv')
self.gdata2.to_csv(spath)
|
#Beatiful Soup - To parse the html
from bs4 import BeautifulSoup
#urllib.request - to make http request
from urllib.request import Request, urlopen
#To remove any language special characters
import unicodedata
# EMAIL library
import smtplib
# URL parser
from urllib.parse import urlparse
from urllib.parse import parse_qs
# mysql connector
import mysql.connector
from mysql.connector import Error
# ENV file
from dotenv import load_dotenv
# env
from os import environ
# time
import time
# mail driver
import maildriver
def get_employment_type(endpoint):
if endpoint[0] == '/' and endpoint[1] == '/':
endpoint = "https:" + endpoint
req = Request(endpoint, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, 'lxml')
info = soup.find("li", class_="posInfo posInfo--employmentType")
job_type = ""
if info is not None:
job_type = info.find('div', class_="posInfo__Value").text.strip()
return job_type
def get_jobs(soup):
job_array = []
jobs = soup.find_all("li", class_='BambooHR-ATS-Jobs-Item')
for job in jobs:
job_title = job.find('a').text.strip()
job_url = job.find('a').attrs['href'].strip()
job_type = get_employment_type(job_url)
url = urlparse(job_url)
params = parse_qs(url.query)
job_id = params['id'][0]
job_location = job.find('span', class_="BambooHR-ATS-Location").text.strip()
job_array.append({
'job_title' : job_title,
'job_url' : job_url,
'job_id' : job_id,
'job_location' : job_location,
'job_type' : job_type
})
print(job_id + " has been parsed!")
return job_array
def get_jobids_from_jobs(jobs):
ids = []
for job in jobs:
ids.append(job['job_id'])
return ids
def save_jobs_to_mysql(jobs):
try:
db_host = environ.get("DB_HOST")
db_user = environ.get("DB_USER")
db_password = environ.get("DB_PASSWORD")
db_name = environ.get("DB_NAME")
connection = mysql.connector.connect(host=db_host,
database=db_name,
user=db_user,
password=db_password)
if connection.is_connected():
jids_array = get_jobids_from_jobs(jobs)
jids_str = ','.join(jids_array)
cursor = connection.cursor()
mySql_delete_query = "DELETE from `jobs` WHERE ID NOT IN (" + jids_str + ")"
# delete jobs which are not at the job lists
cursor.execute(mySql_delete_query)
print("Table Data Removed!")
for job in jobs:
try:
cursor = connection.cursor()
mySql_insert_query = """INSERT INTO jobs (ID, job_title, url, location, employment_type)
VALUES
(%s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
job_title=%s,url=%s,location=%s,employment_type=%s
"""
cursor.execute(mySql_insert_query, (job['job_id'], job['job_title'], job['job_url'], job['job_location'], job['job_type'], job['job_title'], job['job_url'], job['job_location'], job['job_type']))
connection.commit()
print(job['job_id'] + " has been updated!")
except Error as e:
print("Error while updating the table!", e)
maildriver.send_table_update_error()
except Error as e:
print("Error while connecting to MySQL", e)
maildriver.send_database_connection_error()
finally:
if (connection.is_connected()):
maildriver.send_job_run_alert()
cursor.close()
connection.close()
print("MySQL connection is closed")
def run():
start_time = time.time()
endpoint = "https://sohodragon.bamboohr.com/jobs/embed2.php"
req = Request(endpoint, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, 'lxml')
jobs_ = get_jobs(soup)
# print(jobs_)
save_jobs_to_mysql(jobs_)
elapsed_time = time.time() - start_time
print("Elapsed Time: ", elapsed_time)
return jobs_
if __name__ == '__main__':
load_dotenv()
run() |
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[BETA] [DEPRECATED] Use ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`` instead.
Convert a PIL Image or ndarray to tensor and scale the values accordingly.
.. v2betastatus:: ToTensor transform
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])``.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
import socket
import sys
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(('', 6667))
except socket.error as msg:
print 'Bind failed. Error code: ' + str(msg[0]) + ', message: ' + msg[1]
sys.exit()
sock.listen(10)
while 1:
conn, addr = sock.accept()
print 'Connected with ' + addr[0] + ':' + str(addr[1])
conn.send(':irc.hellface.com NOTICE * :*** Please reconnect on port 6697 with SSL enabled.\n');
conn.close()
sock.close() |
"""This contains all of the forms for the Shepherd application."""
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Row, Column
from datetime import date
from .models import (Domain, History, DomainNote, DomainServerConnection,
DomainStatus)
from .models import (StaticServer, TransientServer, ServerHistory,
ServerNote, ServerStatus, AuxServerAddress)
from ghostwriter.rolodex.models import Project
class DateInput(forms.DateInput):
input_type = 'date'
class CheckoutForm(forms.ModelForm):
"""Form used for domain checkout. Updates the domain (status) and creates
a history entry.
"""
class Meta:
"""Modify the attributes of the form."""
model = History
fields = ('__all__')
widgets = {
'operator': forms.HiddenInput(),
'domain': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
"""Override the `init()` function to set some attributes."""
super(CheckoutForm, self).__init__(*args, **kwargs)
self.fields['client'].empty_label = '-- Select a Client --'
self.fields['client'].label = ''
self.fields['activity_type'].empty_label = '-- Select Activity --'
self.fields['activity_type'].label = ''
self.fields['project'].empty_label = '-- Select a Client First --'
self.fields['project'].label = ''
self.fields['project'].queryset = Project.objects.none()
self.fields['start_date'].widget.attrs['placeholder'] = 'mm/dd/yyyy'
self.fields['start_date'].widget.attrs['autocomplete'] = 'off'
self.fields['end_date'].widget.attrs['placeholder'] = 'mm/dd/yyyy'
self.fields['end_date'].widget.attrs['autocomplete'] = 'off'
self.fields['note'].widget.attrs[
'placeholder'] = 'This domain will be used for...'
self.fields['note'].label = ''
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.form_method = 'post'
self.helper.field_class = \
'h-100 justify-content-center align-items-center'
# Prevent "not one of the valid options" errors from AJAX project
# filtering
if 'client' in self.data:
try:
client_id = int(self.data.get('client'))
self.fields['project'].queryset = Project.objects.\
filter(client_id=client_id).order_by('codename')
except (ValueError, TypeError):
pass
elif self.instance.pk:
self.fields['project'].queryset = \
self.instance.client.project_set.order_by('codename')
def clean_end_date(self):
"""Clean and sanitize user input."""
end_date = self.cleaned_data['end_date']
start_date = self.cleaned_data['start_date']
# Check if end_date comes before the start_date
if end_date < start_date:
raise ValidationError(_('Invalid date: The provided end date '
'comes before the start date.'))
# Return the cleaned data
return end_date
def clean_domain(self):
insert = self.instance.pk == None
domain = self.cleaned_data['domain']
if insert:
unavailable = DomainStatus.objects.get(domain_status='Unavailable')
expired = domain.expiration < date.today()
if expired:
raise ValidationError("This domain's registration has expired!")
if domain.domain_status == unavailable:
raise ValidationError('Someone beat you to it. This domain has '
'already been checked out!')
# Return the cleaned data
return domain
class ServerCheckoutForm(forms.ModelForm):
"""Form used for server checkout. Updates the server (status) and creates
a history entry.
"""
class Meta:
"""Modify the attributes of the form."""
model = ServerHistory
fields = ('__all__')
widgets = {
'operator': forms.HiddenInput(),
'server': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
"""Override the `init()` function to set some attributes."""
super(ServerCheckoutForm, self).__init__(*args, **kwargs)
self.fields['client'].empty_label = '-- Select a Client --'
self.fields['client'].label = ''
self.fields['activity_type'].empty_label = '-- Select Activity --'
self.fields['activity_type'].label = ''
self.fields['server_role'].empty_label = '-- Select Role --'
self.fields['server_role'].label = ''
self.fields['project'].empty_label = '-- Select a Client First --'
self.fields['project'].label = ''
self.fields['project'].queryset = Project.objects.none()
self.fields['start_date'].widget.attrs['placeholder'] = 'mm/dd/yyyy'
self.fields['start_date'].widget.attrs['autocomplete'] = 'off'
self.fields['end_date'].widget.attrs['placeholder'] = 'mm/dd/yyyy'
self.fields['end_date'].widget.attrs['autocomplete'] = 'off'
self.fields['note'].widget.attrs[
'placeholder'] = 'This server will be used for C2 with ...'
self.fields['note'].label = ''
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.form_method = 'post'
self.helper.field_class = \
'h-100 justify-content-center align-items-center'
# Prevent "not one of the valid options" errors from AJAX project
# filtering
if 'client' in self.data:
try:
client_id = int(self.data.get('client'))
self.fields['project'].queryset = Project.objects.\
filter(client_id=client_id).order_by('codename')
except (ValueError, TypeError):
pass
elif self.instance.pk:
self.fields['project'].queryset = \
self.instance.client.project_set.order_by('codename')
def clean_end_date(self):
"""Clean and sanitize user input."""
end_date = self.cleaned_data['end_date']
start_date = self.cleaned_data['start_date']
# Check if end_date comes before the start_date
if end_date < start_date:
raise ValidationError(_('Invalid date: The provided end date '
'comes before the start date.'))
# Return the cleaned data
return end_date
def clean_server(self):
insert = self.instance.pk == None
server = self.cleaned_data['server']
if insert:
unavailable = ServerStatus.objects.get(server_status='Unavailable')
if server.server_status == unavailable:
raise ValidationError('Someone beat you to it. This server has '
'already been checked out!')
# Return the cleaned data
return server
class DomainCreateForm(forms.ModelForm):
"""Form used with the DomainCreate CreateView in views.py."""
class Meta:
"""Modify the attributes of the form."""
model = Domain
exclude = ('last_used_by', 'burned_explanation', 'all_cat',
'dns_record', 'health_dns', 'expired')
def __init__(self, *args, **kwargs):
"""Override the `init()` function to set some attributes."""
super(DomainCreateForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs['placeholder'] = 'specterops.io'
self.fields['name'].label = ''
self.fields['registrar'].widget.attrs['placeholder'] = 'Namecheap'
self.fields['registrar'].label = ''
self.fields['creation'].widget.attrs['placeholder'] = 'mm/dd/yyyy'
self.fields['domain_status'].empty_label = '-- Select Status --'
self.fields['domain_status'].label = ''
self.fields['whois_status'].empty_label = '-- Select Status --'
self.fields['whois_status'].label = ''
self.fields['health_status'].empty_label = '-- Select Status --'
self.fields['health_status'].label = ''
self.fields['creation'].widget.attrs['autocomplete'] = 'off'
self.fields['expiration'].widget.attrs['placeholder'] = 'mm/dd/yyyy'
self.fields['expiration'].widget.attrs['autocomplete'] = 'off'
self.fields['bluecoat_cat'].widget.attrs[
'placeholder'] = 'Category A, Category B, ...'
self.fields['fortiguard_cat'].widget.attrs[
'placeholder'] = 'Category A, Category B, ...'
self.fields['ibm_xforce_cat'].widget.attrs[
'placeholder'] = 'Category A, Category B, ...'
self.fields['opendns_cat'].widget.attrs[
'placeholder'] = 'Category A, Category B, ...'
self.fields['talos_cat'].widget.attrs[
'placeholder'] = 'Category A, Category B, ...'
self.fields['trendmicro_cat'].widget.attrs[
'placeholder'] = 'Category A, Category B, ...'
self.fields['mx_toolbox_status'].widget.attrs[
'placeholder'] = 'Spamhaus Blacklist ...'
self.fields['note'].widget.attrs['placeholder'] = \
'This domain is an effective lookalike of populardomain.tld ...'
self.fields['note'].label = ''
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.form_method = 'post'
self.helper.field_class = \
'h-100 justify-content-center align-items-center'
def clean_expiration(self):
"""Clean and sanitize user input."""
expiration = self.cleaned_data['expiration']
creation = self.cleaned_data['creation']
# Check if expiration comes before the creation date
if expiration < creation:
raise ValidationError(_('Invalid date: The provided expiration '
'date comes before the purchase date.'))
# Return the cleaned data
return expiration
class ServerCreateForm(forms.ModelForm):
"""Form used with the ServerCreate CreateView in views.py."""
class Meta:
"""Modify the attributes of the form."""
model = StaticServer
exclude = ('last_used_by',)
def __init__(self, *args, **kwargs):
"""Override the `init()` function to set some attributes."""
super(ServerCreateForm, self).__init__(*args, **kwargs)
self.fields['ip_address'].widget.attrs['placeholder'] = '172.10.10.236'
self.fields['name'].widget.attrs['placeholder'] = 'hostname'
self.fields['server_status'].empty_label = '-- Select Status --'
self.fields['server_provider'].empty_label = '-- Select Provider --'
self.fields['note'].widget.attrs[
'placeholder'] = 'The server lives in the data center...'
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.form_method = 'post'
self.helper.field_class = \
'h-100 justify-content-center align-items-center'
self.helper.form_show_labels = False
class TransientServerCreateForm(forms.ModelForm):
"""Form used with the TransientServer CreateView in views.py."""
class Meta:
"""Modify the attributes of the form."""
model = TransientServer
fields = ('__all__')
widgets = {
'operator': forms.HiddenInput(),
'project': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
"""Override the `init()` function to set some attributes."""
super(TransientServerCreateForm, self).__init__(*args, **kwargs)
self.fields['ip_address'].widget.attrs['placeholder'] = '172.10.10.236'
self.fields['name'].widget.attrs['placeholder'] = 'hostname'
self.fields['activity_type'].empty_label = '-- Select Activity --'
self.fields['server_role'].empty_label = '-- Select Role --'
self.fields['server_provider'].empty_label = '-- Select Provider --'
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.form_method = 'post'
self.helper.field_class = \
'h-100 justify-content-center align-items-center'
self.helper.form_show_labels = False
class DomainLinkForm(forms.ModelForm):
"""Form used with the DomainLServerConnection views."""
class Meta:
"""Modify the attributes of the form."""
model = DomainServerConnection
fields = ('__all__')
widgets = {
'project': forms.HiddenInput(),
}
def __init__(self, project=None, *args, **kwargs):
"""Override the `init()` function to set some attributes."""
super(DomainLinkForm, self).__init__(*args, **kwargs)
if project:
self.fields['domain'].queryset = History.objects.\
filter(project=project)
self.fields['domain'].empty_label = \
'-- Select a Domain [Required] --'
self.fields['static_server'].queryset = ServerHistory.objects.\
filter(project=project)
self.fields['static_server'].empty_label = \
'-- Select Static Server --'
self.fields['transient_server'].queryset = TransientServer.\
objects.filter(project=project)
self.fields['transient_server'].empty_label = '-- Select VPS --'
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.form_method = 'post'
self.helper.field_class = \
'h-100 justify-content-center align-items-center'
def clean(self):
"""Clean and sanitize user input."""
if (
self.cleaned_data['static_server'] and
self.cleaned_data['transient_server']
):
raise ValidationError(_('Invalid Server Selection: Select only '
'one server'))
if (
not self.cleaned_data['static_server'] and
not self.cleaned_data['transient_server']
):
raise ValidationError(_('Invalid Server Selection: You must '
'select one server'))
class DomainNoteCreateForm(forms.ModelForm):
"""Form used with the DomainNote CreateView in views..py."""
class Meta:
"""Modify the attributes of the form."""
model = DomainNote
fields = ('__all__')
widgets = {
'timestamp': forms.HiddenInput(),
'operator': forms.HiddenInput(),
'domain': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
"""Override the `init()` function to set some attributes."""
super(DomainNoteCreateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.form_method = 'post'
self.helper.field_class = \
'h-100 justify-content-center align-items-center'
self.helper.form_show_labels = False
class ServerNoteCreateForm(forms.ModelForm):
"""Form used with the ServerNote CreateView in views.py."""
class Meta:
"""Modify the attributes of the form."""
model = ServerNote
fields = ('__all__')
widgets = {
'timestamp': forms.HiddenInput(),
'operator': forms.HiddenInput(),
'server': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
"""Override the `init()` function to set some attributes."""
super(ServerNoteCreateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.form_method = 'post'
self.helper.field_class = \
'h-100 justify-content-center align-items-center'
self.helper.form_show_labels = False
class BurnForm(forms.ModelForm):
"""Form used with the `burn` in views.py."""
class Meta:
"""Modify the attributes of the form."""
model = Domain
fields = ('burned_explanation',)
def __init__(self, *args, **kwargs):
"""Override the `init()` function to set some attributes."""
super(BurnForm, self).__init__(*args, **kwargs)
self.fields['burned_explanation'].widget.attrs['placeholder'] = \
'This domain was flagged for spam after being used for phishing...'
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.form_method = 'post'
self.helper.field_class = \
'h-100 justify-content-center align-items-center'
self.helper.form_show_labels = False
class AuxServerAddressCreateForm(forms.ModelForm):
"""Form used with the AuxAddress CreateView in views.py."""
class Meta:
"""Modify the attributes of the form."""
model = AuxServerAddress
fields = ('__all__')
widgets = {
'static_server': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
"""Override the `init()` function to set some attributes."""
super(AuxServerAddressCreateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.form_method = 'post'
self.helper.field_class = \
'h-100 justify-content-center align-items-center'
self.fields['primary'].label = 'Make Primary Address'
self.fields['ip_address'].label = '' |
#!/usr/local/env python
#--*-- coding:utf-8 --*--
#功能:分词
import jieba
FILE_NAME = './source.txt'
def fenci(filename):
fp = file(filename, 'r')
fp2 = file("./tmp1.txt", "a+")
comments = fp.readlines()
for i in comments:
tmp = list(jieba.cut(i)) #分词
for j in tmp:
print j
fp2.write(j.encode("utf8"))
fp2.write(" ")
print '-----------'
fp.close()
fp2.close()
if __name__ == "__main__":
fenci(FILE_NAME)
|
import sys
sys.path.append('../linked_lists');
from node import *
from collections import deque
#make a tree class
class binary_tree:
def __init__(self, input_array=[]):
#initialize tree
self.root = None;
self.count = 0;
self.ref_list = [];
#set tree attributes
self.extend_tree(input_array);
#searches for a value in the tree, returns the node
def search_tree(self, input_value):
#handling special case
if (not self.root):
return None;
#searching for the value
curr_ref, prev_ref = self.root, self.root;
while(curr_ref and curr_ref.value != input_value):
prev_ref = curr_ref;
curr_ref = (curr_ref.prev if input_value <= curr_ref.value else curr_ref.next);
if (not curr_ref):
return None;
else:
return curr_ref;
def give_depth(self, node):
#handle special case
if ( (not self.root) or (not node) ):
return float('inf');
#try finding node in tree
curr, prev, counter = self.root, self.root, 0;
while(curr and curr.value!=node.value):
prev = curr;
curr = (curr.prev if (node.value <= curr.value) else curr.next);
counter += 1;
if (not curr):
return float('inf');
else:
return counter;
def extend_tree(self, input_array):
if (input_array):
#convert received input to a list
if (isinstance(input_array, str) or isinstance(input_array, tuple) or isinstance(input_array, set) or isinstance(input_array, dict)):
input_array = list(input_array);
#method to add each element to the tree
def add_element(self, element):
#increment counter
self.count += 1;
element = node(element);
self.ref_list.append(element);
#add the node to the tree
if (not self.root):
self.root = element;
else:
prev_ref, curr_ref = None, self.root;
while(curr_ref):
prev_ref = curr_ref;
if (element.value <= curr_ref.value):
curr_ref = curr_ref.prev;
else:
curr_ref = curr_ref.next;
#check where the new node fits
if (element.value <= prev_ref.value):
prev_ref.prev = element;
else:
prev_ref.next = element;
element.parent = prev_ref;
#add each element to the tree
for element in input_array:
add_element(self, element);
def give_height(self, input_node=None):
#handling special case
if (not input_node):
input_node = self.root;
#write a recursive function
def height(input_node):
if (not input_node):
return (-1);
else:
return 1+max(height(input_node.prev), height(input_node.next));
#return the output
return height(input_node);
def inorder_traversal(self):
#repeatedly calling 'successors' to do an in-order walk
output = [];
curr_ref = self.give_minimum();
while(curr_ref):
output.append(curr_ref);
curr_ref = self.give_successor(curr_ref);
#return of list of all the encountered nodes during the in-order walk
return output;
def give_minimum(self, input_node=None):
#take care of the special case
if (not input_node):
input_node = self.root;
#traverse as left as possible
curr_ref = input_node;
prev_ref = input_node;
while(curr_ref):
prev_ref = curr_ref;
curr_ref = curr_ref.prev;
#return the minimum node
return prev_ref;
def give_maximum(self, input_node=None):
#special case
if (not input_node):
input_node = self.root;
#traverse as right as possible
prev_ref, curr_ref = input_node, input_node;
while(curr_ref):
prev_ref = curr_ref;
curr_ref = curr_ref.next;
#return the largest node
return prev_ref;
def give_successor(self, input_node):
#basic error checking
if (not input_node):
return None;
#find the successor
if (input_node.next):
#if a right-subtree exists, we return the left-most node in it
prev_ref = input_node.next;
next_node = prev_ref.prev;
while(next_node):
prev_ref = next_node;
next_node = next_node.prev;
return prev_ref;
else:
#if no right-subtree exists, we go to the parent...
#we either return the parent, or the successor of the parent's parent;
if (input_node.parent):
if (input_node.parent.prev == input_node):
return input_node.parent;
else:
current_ref = input_node.parent;
prev_ref = input_node;
while(current_ref and current_ref.next == prev_ref):
prev_ref = current_ref;
current_ref = current_ref.parent;
if (not current_ref):
return None;
else:
return current_ref;
else:
return None;
#prints tree level-by-level
def print_tree(self):
print_list = [];
queue = deque([self.root, None]);
counter = self.give_height()+1;
while(queue and counter):
curr_ref = queue.popleft();
#check if we have encountered another level
if (not curr_ref):
counter -= 1;
print(print_list);
print_list = [];
queue.append(None);
continue;
new_elements = [curr_ref.prev, curr_ref.next];
new_elements = [element for element in new_elements if element];
queue.extend(new_elements);
print_list.append(curr_ref.value);
#this function returns 'False' if the value couldn't be removed, 'Yes' otherwise
def delete_value(self, input_value):
#handle the special case
if (not self.root):
return False;
#find the element to be deleted
curr, prev = self.root, self.root;
while(curr and curr.value!=input_value):
prev = curr;
if (input_value <= curr.value):
curr = curr.prev;
else:
curr = curr.next;
#check the cause of termination
if (not curr):
return False;
else:
#check if the found node is the root, add a pseduo-node to simplify work
is_root = False;
if (curr == prev):
is_root = True;
prev = node(self.root.value-1);
prev.next = self.root;
#change the 'operating' node based on whether 'curr' <= 'prev' or not
connect_node = None;
if (prev.prev == curr):
connect_node = -1;
else:
connect_node = 1;
#re-attach the sub-trees of the removed node
replacement_node = None;
if (curr.next):
min_node = self.give_minimum(curr.next);
min_node.prev = curr.prev;
replacement_node = curr.next;
elif (curr.prev):
max_node = self.give_maximum(curr.prev);
max_node.next = curr.next;
replacement_node = curr.prev;
if (connect_node == -1):
prev.prev = replacement_node;
else:
prev.next = replacement_node;
#remove pseudo-node if the removed node was the roor
if (is_root):
self.root = prev.next;
#remove the deleted node from the 'ref_list'
#NOTE: this makes the algorithm O(n)
idx = self.ref_list.index(curr);
self.ref_list.pop(idx);
self.count -= 1;
return True;
def give_predecessor(self, input_node):
if (not input_node):
return None;
#if we have a left-subtree, we return the maximum of it
if (input_node.prev):
return self.give_maximum(input_node.prev);
else:
#if we have no left-subtree, we check if we are the left-child of our parent
if (input_node.parent and input_node.parent.next == input_node):
return input_node.parent;
else:
#if we don't have a parent whose right child is us, we find the first ancestor whose right sub-tree contains us
if (not input_node.parent):
return None;
else:
curr_ref, prev_ref = input_node.parent, input_node;
while(curr_ref and curr_ref.prev == prev_ref):
prev_ref = curr_ref;
curr_ref = curr_ref.parent;
if (not curr_ref):
return None;
else:
return curr_ref;
#tree = binary_tree();
#tree.extend_tree([0, -1, 1, -3, -3, 3, 2, 5, 7]);
#tree.print_tree();
|
"""Classes to define graphics."""
import datetime
import json
import inspect
from itertools import chain
import os
from django.conf import settings
from django.utils.encoding import smart_bytes, smart_str
from django.utils.translation import gettext as _, gettext_lazy
from modoboa.admin import models as admin_models
from modoboa.lib import exceptions
from modoboa.lib.sysutils import exec_cmd
from modoboa.parameters import tools as param_tools
class Curve:
"""Graphic curve.
Simple way to represent a graphic curve.
"""
def __init__(self, dsname, color, legend, cfunc="AVERAGE"):
"""Constructor.
"""
self.dsname = dsname
self.color = color
self.legend = legend
self.cfunc = cfunc
def to_rrd_command_args(self, rrdfile):
"""Convert this curve to the approriate RRDtool command.
:param str rrdfile: RRD file name
:return: a list
"""
rrdfile = os.path.join(
param_tools.get_global_parameter("rrd_rootdir"), "%s.rrd" % rrdfile
)
return [
'DEF:%s=%s:%s:%s' %
(self.dsname, rrdfile, self.dsname, self.cfunc),
'CDEF:%(ds)spm=%(ds)s,UN,0,%(ds)s,IF,60,*' % {"ds": self.dsname},
'XPORT:%spm:"%s"' % (self.dsname, self.legend)
]
class Graphic:
"""Graphic."""
def __init__(self):
"""Constructor."""
self._curves = []
try:
order = getattr(self, "order")
except AttributeError:
for member in inspect.getmembers(self):
if isinstance(member[1], Curve):
self._curves += [member[1]]
else:
for name in order:
try:
curve = getattr(self, name)
except AttributeError:
continue
if not isinstance(curve, Curve):
continue
self._curves += [curve]
@property
def display_name(self):
return self.__class__.__name__.lower()
@property
def rrdtool_binary(self):
"""Return path to rrdtool binary."""
dpath = None
code, output = exec_cmd("which rrdtool")
if not code:
dpath = output.strip()
else:
known_paths = getattr(
settings, "RRDTOOL_LOOKUP_PATH",
("/usr/bin/rrdtool", "/usr/local/bin/rrdtool")
)
for fpath in known_paths:
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
dpath = fpath
if dpath is None:
raise exceptions.InternalError(
_("Failed to locate rrdtool binary."))
return smart_str(dpath)
def export(self, rrdfile, start, end):
"""Export data to JSON using rrdtool."""
result = []
cmdargs = []
for curve in self._curves:
result += [{
"name": str(curve.legend),
"backgroundColor": curve.color, "data": []
}]
cmdargs += curve.to_rrd_command_args(rrdfile)
code = 0
cmd = "{} xport --json -t --start {} --end {} ".format(
self.rrdtool_binary, str(start), str(end))
cmd += " ".join(cmdargs)
code, output = exec_cmd(smart_bytes(cmd))
if code:
return []
xport = json.loads(output)
for row in xport['data']:
timestamp = int(row[0])
date = datetime.datetime.fromtimestamp(timestamp).isoformat(sep=' ')
for (vindex, value) in enumerate(row[1:]):
result[vindex]['data'].append(
{'x': date, 'y': value, 'timestamp': timestamp}
)
return result
class GraphicSet(object):
"""A set of graphics."""
domain_selector = False
title = None
_graphics = []
def __init__(self, instances=None):
if instances is None:
instances = []
self.__ginstances = instances
@property
def html_id(self):
return self.__class__.__name__.lower()
@property
def graphics(self):
if not self.__ginstances:
self.__ginstances = [graphic() for graphic in self._graphics]
return self.__ginstances
def get_graphic_names(self):
return [graphic.display_name for graphic in self._graphics]
def get_file_name(self, request, searchq):
"""Return database file name."""
return self.file_name
def export(self, rrdfile, start, end, graphic=None):
result = {}
for graph in self.graphics:
if graphic is None or graphic == graph.display_name:
result[graph.display_name] = {
"title": str(graph.title),
"series": graph.export(rrdfile, start, end)
}
return result
class AverageTraffic(Graphic):
"""Average traffic."""
title = gettext_lazy('Average traffic (msgs/min)')
# Curve definitions
sent = Curve("sent", "lawngreen", gettext_lazy("sent messages"))
recv = Curve("recv", "steelblue", gettext_lazy("received messages"))
bounced = Curve("bounced", "yellow", gettext_lazy("bounced messages"))
reject = Curve("reject", "tomato", gettext_lazy("rejected messages"))
virus = Curve("virus", "orange", gettext_lazy("virus messages"))
spam = Curve("spam", "silver", gettext_lazy("spam messages"))
order = ["reject", "bounced", "recv", "sent", "virus", "spam"]
def __init__(self, greylist=False):
if greylist:
self.greylist = Curve(
"greylist", "dimgrey", gettext_lazy("greylisted messages"))
self.order = [
"reject", "greylist", "bounced", "recv", "sent", "virus",
"spam"
]
super().__init__()
class AverageTrafficSize(Graphic):
"""Average traffic size."""
title = gettext_lazy('Average normal traffic size (bytes/min)')
# Curve definitions
size_recv = Curve("size_recv", "orange", gettext_lazy("received size"))
size_sent = Curve(
"size_sent", "mediumturquoise", gettext_lazy("sent size")
)
class MailTraffic(GraphicSet):
"""Mail traffic graphic set."""
domain_selector = True
title = gettext_lazy("Mail traffic")
_graphics = [AverageTraffic, AverageTrafficSize]
def __init__(self, greylist=False):
instances = [AverageTraffic(greylist), AverageTrafficSize()]
super().__init__(instances)
def _check_domain_access(self, user, pattern):
"""Check if an administrator can access a domain.
If a non super administrator asks for the global view, we give him
a view on the first domain he manage instead.
:return: a domain name (str) or None.
"""
if pattern in [None, "global"]:
if not user.is_superuser:
domains = admin_models.Domain.objects.get_for_admin(user)
if not domains.exists():
return None
return domains.first().name
return "global"
results = list(
chain(
admin_models.Domain.objects.filter(name__startswith=pattern),
admin_models.DomainAlias.objects.filter(
name__startswith=pattern)
)
)
if len(results) != 1:
return None
if not user.can_access(results[0]):
raise exceptions.PermDeniedException
return results[0].name
def get_file_name(self, user, searchq):
"""Retrieve file name according to user and args."""
return self._check_domain_access(user, searchq)
class AccountCreationGraphic(Graphic):
"""Account creation over time."""
title = gettext_lazy("Average account creation (account/hour)")
accounts = Curve(
"new_accounts", "steelblue", gettext_lazy("New accounts"))
class AccountGraphicSet(GraphicSet):
"""A graphic set for accounts."""
file_name = "new_accounts"
title = gettext_lazy("Accounts")
_graphics = [AccountCreationGraphic]
|
import numpy as np
class Arena(object):
def __init__(self, n, h, t):
self.arena = np.array(np.zeros([n, n]), dtype=int)
self.size = n
self.humans = h
self.targets = t
self.action_space = 3
self.time = 0
self.state = 's'
def game_state(self):
if self.humans == 0:
self.state = 'l' # loss
elif self.targets == 0:
self.state = 'w' # win
else:
self.state = 's' # in progress
def print_arena(self):
print(self.arena)
return
def remove_agents(self):
self.arena[self.arena == 1] = 0
self.arena[self.arena == 2] = 0
return
|
import data_utilities
from config import config
data = data_utilities.get_data(config['data.file'])
sm = data_utilities.normalise_data(data[:,0:8])
coordm = data[:,8:10]
print "Count: sm=", len(sm), "x", len(sm[0]), ", coordm=", len(coordm), "x", len(coordm[0])
|
import math as m, random as r, sys
max, iter = int(sys.argv[1]), int(sys.argv[2])
count = 0
for x in range(iter):
# count occurences of gcd == 1
if m.gcd(r.randrange(1, max), r.randrange(1, max)) == 1:
count = count + 1
# Sqrt(6 / probability(gcd == 1))
# will give you an estimation of the value of Pi
print(str(m.sqrt(6/(count/iter)))) |
# 简单判断就行
class Solution:
def maxTurbulenceSize(self, arr: List[int]) -> int:
n = len(arr)
res = 1
cnt = 0
# 0表示> ; 1表示<; -1表示初始状态
pre = -1
for i in range(n):
if i == 0:
cnt += 1
else:
if arr[i] > arr[i-1] and (pre == 0 or pre == -1):
cnt += 1
pre = 1
elif arr[i] < arr[i-1] and (pre == 1 or pre == -1):
cnt += 1
pre = 0
else:
if arr[i] == arr[i-1]:
cnt = 1
pre = -1
else:
cnt = 2
pre = 1 if arr[i-1] < arr[i] else 0
res = max(res, cnt)
return res
|
def rot_energies(B, Jmin, Jmax):
return [B*x*(x+1) for x in range(Jmin,Jmax+1)] if Jmin<=Jmax and B>0 else []
'''
Quantum mechanics tells us that a molecule is only allowed to have specific,
discrete amounts of internal energy. The 'rigid rotor model', a model for
describing rotations, tells us that the amount of rotational energy a molecule can have is given by:
E = B * J * (J + 1),
where J is the state the molecule is in, and B is the 'rotational constant' (specific to the molecular species).
Write a function that returns an array of allowed energies for levels between Jmin and Jmax.
Notes:
return empty array if Jmin is greater than Jmax (as it make no sense).
Jmin, Jmax are integers.
physically B must be positive, so return empty array if B <= 0
'''
|
# @Author: Xiangxin Kong
# @Date: 2020.5.30
from downloader import *
import tkinter as tk
from tkinter import *
class mainWindow(tk.Tk):
def __init__(self):
# initialize the widow
super().__init__()
super().title('Manhuagui Downloader')
super().geometry('400x160')
baseY = 30
# initialize the labels
tk.Label(self, text='Url:', font=('Arial', 16,)).place(x=10, y=baseY)
tk.Label(self, text='To:', font=('Arial', 16,)).place(x=10, y=baseY + 40)
# initialize the labels
self.var_address = tk.StringVar()
self.var_url = tk.StringVar()
self.var_address.set('manga/')
self.var_url.set('https://www.manhuagui.com/comic/24973/')
tk.Entry(self, textvariable=self.var_url, font=('Arial', 14), width=28).place(x=60, y=baseY) # url field
tk.Entry(self, textvariable=self.var_address, font=('Arial', 14), width=28).place(x=60,
y=baseY + 40) # address field
# initialize the button
tk.Button(self, text='Download', font=('Arial', 12), command=self.download).place(x=290, y=baseY + 80)
self.mainloop()
def download(self):
try:
s = MangaDownloader(self.var_url.get(), self.var_address.get())
except:
print("Manga not Found")
self.var_url.set("")
return
downloadPanel(s)
class downloadPanel(Toplevel):
# begin to download
def __init__(self, s):
super().__init__()
super().title('Manhuagui Downloader')
super().geometry('900x160')
super().geometry('900x' + str(40 * (s.length // 6) + 260))
# initialize labels
self.place_label(s)
# iniitialie select buttons
self.place_buttons(s)
# initialize check all functions
var = IntVar()
def checkAll():
for i in self.buttons:
if var.get() == 1:
i.select()
elif i.cget("state") == 'normal':
i.deselect()
# check all button
tk.Checkbutton(self, text='Select All', font=('Arial', 18), variable=var,
command=checkAll).place(x=0, y=self.baseY + 80)
# dowmload buttons
tk.Button(self, text='Download', font=('Arial', 16),
command=lambda: self.downloadChapters(s)).place(x=450, y=self.baseY + 80)
self.mainloop()
def place_label(self, s):
tk.Label(self, text=s.title, font=('Arial', 33,)).place(x=10, y=10)
tk.Label(self, text="作者: " + s.author, font=('Arial', 12,)).place(x=10, y=70)
tk.Label(self, text="年代: " + s.year, font=('Arial', 12,)).place(x=160, y=70)
tk.Label(self, text="地区: " + s.region, font=('Arial', 12,)).place(x=280, y=70)
tk.Label(self, text="类型: " + s.plot, font=('Arial', 12,)).place(x=400, y=70)
self.baseY = 120
def place_buttons(self, s):
self.buttons = []
for i in range(len(s.chapters)):
s.chapters[i][2] = IntVar()
cha = tk.Checkbutton(self, text=s.chapters[i][0], font=('Arial', 14), variable=s.chapters[i][2])
cha.place(x=(i % 6) * 150, y=self.baseY + (i // 6) * 40)
if s.chapters[i][0] in s.existedChapters():
cha.select()
cha.config(state='disabled')
self.buttons.append(cha)
self.baseY += (s.length // 6) * 40
def downloadChapters(self, s):
for i in range(s.length):
if self.buttons[i].cget("state") == 'normal' and s.chapters[i][2].get():
s.downloadChapter(s.chapters[i][1])
if __name__ == '__main__':
mainWindow()
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################################
# #
# run_otg_proc.py: run otg process #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Sep 16, 2021 #
# #
#############################################################################################
import os
import sys
import re
import string
import random
import operator
import math
import numpy
import time
import unittest
path = '/data/mta/Script/Dumps/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
sys.path.append(bin_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
#
#--- set several data lists
#
h_list = ['4HILSA','4HRLSA','4HILSB', '4HRLSB']
l_list = ['4LILSA','4LRLSA','4LILSBD','4LRLSBD']
l_dict = {'H':h_list, 'L':l_list}
ph_list = ['4HPOSARO', '4HPOSBRO']
pl_list = ['4LPOSARO', '4LPOSBRO']
p_dict = {'H':ph_list, 'L':pl_list}
ovc = "OFLVCDCT"
cvc = "CCSDSVCD"
#
#--- temp writing file name
#
rtail = int(time.time()*random.random())
zspace = '/tmp/zspace' + str(rtail)
#-----------------------------------------------------------------------------------------------
#-- run_otg_proc: run OTG process --
#-----------------------------------------------------------------------------------------------
def run_otg_proc():
"""
run OTG process
input: none but read from <main_dir>/PRIMARYOTG_*.tl files
output: <arc_dir>/OTG_summary.rdb
"""
#
#--- set temp file
#
tmp_file = work_dir + 'gratstat.in.tl'
fmt4 = 0
#
#--- if gratstat.in.tl already exists, append. otherwise create it.
#
if os.path.isfile(tmp_file):
fmt4 = 1
#
#--- open each data file and read data
#
cmd = 'ls ' + main_dir + '*OTG*tl > ' + zspace
os.system(cmd)
tlist = mcf.read_data_file(zspace, remove=1)
if len(tlist) == 0:
exit(1)
for dfile in tlist:
schk = 0
tdata = mcf.read_data_file(dfile)
header = tdata[0]
seline = tdata[1]
for ent in tdata[2:]:
#
#--- use only FMT4 data
#
mc = re.search('FMT4', ent)
if mc is not None:
schk = 1
atemp = re.split('FMT4', ent)
atime = atemp[0]
ntime = convert_time_format(atime)
ent = ent.replace(atime.strip(), ntime)
if fmt4 > 0:
with open(tmp_file, 'a') as fo:
fo.write(ent + '\n')
else:
with open(tmp_file, 'w') as fo:
fo.write(header + '\n')
fo.write(seline + '\n')
fmt4 = 1
else:
#
#--- move finished create a summary file
#
if fmt4 > 0:
cmd = 'head -1 ' + tmp_file + '>' + tmp_file + '.tmp'
os.system(cmd)
cmd = 'tail -n +2 ' + tmp_file + ' | sort -k 29,29 -u >> '
cmd = cmd + tmp_file + '.tmp'
os.system(cmd)
cmd = 'mv -f ' + tmp_file + '.tmp ' + tmp_file
os.system(cmd)
gratstat()
cmd = 'rm -f ' + tmp_file
os.system(cmd)
fmt4 = 0
cmd = 'rm -f ' + tmp_file
os.system(cmd)
#-----------------------------------------------------------------------------------------------
#-- convert_time_format: convert tl file time format to yyyddd.hhmmss --
#-----------------------------------------------------------------------------------------------
def convert_time_format(itime):
"""
convert tl file time format to yyyddd.hhmmss
example: 2019 10 18:38:28.5 ---> 2019010.183828
input: time in tl time format
output: time in yyyyddd.hhmmss
"""
itime = itime.strip()
year = itime[0] + itime[1] + itime[2] + itime[3]
ydate = itime[5] + itime[6] + itime[7]
hh = itime[9] + itime[10]
mm = itime[12] + itime[13]
ss = itime[15] + itime[16]
ydate = mcf.add_leading_zero(ydate, 3)
hh = mcf.add_leading_zero(hh)
mm = mcf.add_leading_zero(mm)
ss = mcf.add_leading_zero(ss)
date = year + ydate + '.' + hh + mm + ss + '00'
return date
#-----------------------------------------------------------------------------------------------
#-- gratstat: analyze OTG moves --
#-----------------------------------------------------------------------------------------------
def gratstat():
"""
analyze OTG moves
input: <work_dir>/gratstat.in.tl
output: <arc_dir>/OTG_summary.rdb
"""
sline = ''.join([char*70 for char in '*'])
eline = ''.join([char*60 for char in '='])
#
#--- define variable back-emf thresholds for long moves and nudges
#
thresh = {'N':3.9, 'L':4.5, 'H':4.5}
ovc = cvc
#
#--- input file
#
tmp_file = work_dir + 'gratstat.in.tl'
if os.path.isfile(tmp_file):
d_dict = read_gratsat_in(tmp_file)
else:
exit(1)
#
#--- initial identificaiton of moves, using back-emf
#
st_list = []
pst = 'N'
for k in range(0, len(d_dict['TIME'])):
st = 'N'
if (d_dict['4MP28AV'][k] >thresh[pst]) and (d_dict['4MP28BV'][k] > thresh[pst])\
and (d_dict['4MP5AV'][k] > 4.5) and (d_dict['4MP5BV'][k] > 4.5):
if d_dict['4HENLBX'][k] == 'ENAB':
st = 'H'
if d_dict['4LENLBX'][k] == 'ENAB':
st = 'L'
st_list.append(st)
pst = st
#
#--- open stat summary page
#
bline = sline + "\nProcessing /data/mta/Script/Dumps/Scripts/gratstat.in.tl\n"
#
#--- find separate move
#
out = find_moves(d_dict, st_list)
if out[0] == 'NA':
print(out[1])
exit(1)
else:
[st_list, m_dict, aline] = out
bline = bline + aline
#
#--- analyze the moves
#
rex = sorted(m_dict.keys())
rex.append(len(st_list) + 1)
for k in range(0, len(rex) -1):
ibeg = rex[k]
iend = rex[k+1] -1
#iend = rex[k+1]
otg = m_dict[ibeg]
oline = bline + eline + '\n'
oline = oline + '\n\n' + otg + 'ETG move, between records '
oline = oline + str(ibeg) + ' and ' + str(iend) + '\n'
[aline, t0] = analyze_move(otg, ibeg, iend, st_list, d_dict, ovc, cvc)
oline = oline + aline + '\n' + sline + '\n'
ofile = arc_dir + 'Sub_html/' + str(t0)
with open(ofile, 'w') as fo:
fo.write(oline)
#-----------------------------------------------------------------------------------------------
#--read_gratsat_in: read an ACORN tracelog file into a hash of arrays --
#-----------------------------------------------------------------------------------------------
def read_gratsat_in(tmp_file):
"""
read an ACORN tracelog file into a hash of arrays
input: tmp_file
output: d_dict --- data saved in a dictionary form
"""
data = mcf.read_data_file(tmp_file)
#
#--- first row of the data keeps column names
#
header = re.split('\s+', data[0])
clen = len(header)
save = []
#
#--- create <clen> numbers of emptry lists
#
for k in range(0, clen):
save.append([])
for ent in data[1:]:
atemp = re.split('\s+', ent)
if len(atemp) != clen:
continue
for k in range(0, clen):
#
#--- convert numerical value into float
#
try:
if k == 0:
save[k].append(atemp[k].strip())
else:
val = float(atemp[k])
save[k].append(val)
except:
save[k].append(atemp[k].strip())
#
#--- save the data in dict form
#
d_dict = {}
for k in range(0, clen):
#
#--- shift the arrays of MSIDs 4HENLBX, 4HEXRBX, 4LENLBX, 4LEXRBX
#--- by 1 element later to align with the back-emf telemetry
#
tmsid = header[k].upper()
if tmsid in ['4HENLBX', '4HEXRBX', '4LENLBX', '4LEXRBX']:
val = save[k].pop()
save[k] = [val] + save[k]
d_dict[tmsid] = save[k]
return d_dict
#-----------------------------------------------------------------------------------------------
#-- find_moves: find separate OTG moves --
#-----------------------------------------------------------------------------------------------
def find_moves(d_dict, st_list):
"""
find separate OTG moves
input: d_dict
st_list
output: st_list --- updated st_list
m_dict --- dictionary holds movements
"""
st_len = len(st_list)
#
#--- check any OTG moves exit
#
chk = 0
if 'L' in st_list:
chk = 1
elif 'H' in st_list:
chk = 1
#
#--- if no move return 'NA'
#
if chk == 0:
return 'NA'
#
#--- there are some movements, analyze farther
#
m0_list = []
m1_list = []
mg_list = []
pst = 'N'
k0 = 0
for k in range(0, st_len):
if st_list[k] == pst:
continue
if (pst != 'N') and (st_list[k] != 'N'):
line = 'Bad state transition at record ' + str(k) + ': '
line = line + pst + ' to ' + st_list[k] + '. Stopping!\n'
return ['NA',line]
if pst == 'N':
k0 = k
else:
m0_list.append(k0)
m1_list.append(k)
mg_list.append(pst)
pst = st_list[k]
if len(m0_list) == 0:
line = "No OTG moves found. Stopping!\n"
return ['NA', line]
#
#--- revise start and stop of long moves, based on bi-level telemetry
#
line = ''
for k in range(0, len(m0_list)):
ml = m1_list[k] - m0_list[k] -1
if ml < 10:
continue
otg = mg_list[k]
otg2 = otg + 'ETG'
ben = '4' + otg + 'ENLBX'
bex = '4' + otg + 'EXRBX'
j0 = m0_list[k] - 2
if j0 < 0:
j0 = 0
j1 = m1_list[k] + 2
if j1 > st_len:
j1 = st_len
nl = 0
line = line + '\nRevising long ' + str(otg2) + ' move between records ' + str(j0)
line = line + ' and ' + str(j1) + '\n'
for m in range(j0, j1):
st_list[m] = 'N'
if (d_dict[ben][m] == 'ENAB') and (d_dict[bex][m] == 'ENAB'):
st_list[m] = otg
nl += 1
else:
continue
if abs(ml - nl) > 3:
line = line + '\n>>WARNING! Large revision in move length at record ' + str(m0[k]) + ': '
line = line + str(ml) + ' to ' + str(nl) + '\n'
#
#--- report revised long moves
#
pst = 'N'
m_dict = {}
k0 = 0
for k in range(0, st_len):
if st_list[k] == pst:
continue
if (pst != 'N') and (st_list[k] != 'N'):
line = line + 'Bad state transition at record ' + str(k) + ': '
line = line + str(pst) + ' to ' + str(k) + '. Stopping!\n'
return ['NA', line]
if pst == 'N':
k0 = k
else:
if k -k0 > 100:
m_dict[k0] = pst
pst = st_list[k]
if len(m_dict) == 0:
line = line + "No long OTG moves found. Stopping!\n"
return ['NA', line]
return [st_list, m_dict, line]
#-----------------------------------------------------------------------------------------------
#-- analyze_move: report data on OTG move --
#-----------------------------------------------------------------------------------------------
def analyze_move(otg, ibeg, iend, st_list, d_dict, ovc, cvc):
"""
report data on OTG move
inpupt: otg --- OTG: H, L or N
ibeg --- beginning of ETG move
iend --- ending of ETG move
st_list --- a list of postion H, L, or N
d_dict --- a dictonary of data; keys are msids and give a list of data
ovc --- either 'OFLVCDCT' or 'CCSDSVCD' if asvt data, former otherwise latter
cvs --- 'CCSDSVCD'
output: <arc_dir>/OTG_summary.rdb
"""
i0 = []
i1 = []
dt = []
emf_l = []
emf_s = []
l_list = l_dict[otg] #--- see top area for definition
p_list = p_dict[otg] #--- see top area for definition
line = otg + 'ETG'
arc = [line]
aline = ''
#
#--- find moves
#
pst = 'N'
for k in range(ibeg, iend):
if (pst == 'N') and (st_list[k] == otg):
i0.append(k)
if (pst == otg) and (st_list[k] == 'N'):
i1.append(k)
pst = st_list[k]
if len(i0) != len(i1):
line = 'Oops! Found ' + str(len(i0) + 1) + ' move starts and '
line = line + str(len(i1) + 1) + 'move stops.\n'
return line
if i0[0] < 5:
aline = aline + '\n >>WARNING! Move starts at data record ' + str(i0[0]) + '\n'
if (len(st_list) - i1[-1] < 5):
aline = aline + '\n >>WARNING! Move ends at data record ' + str(i1[-1]) + '\n'
#
#--- move times
#
t0 = d_dict['TIME'][i0[0]]
t1 = d_dict['TIME'][i1[-1]]
aline = aline + '\nMove started at ' + str(t0) + '; VCDU = ' + str(d_dict[ovc][i0[0]]) + '\n'
aline = aline + 'Move stopped at ' + str(t1) + '; VCDU = ' + str(d_dict[ovc][i1[-1]]) + '\n'
aline = aline + '\n Number of movements = ' + str(len(i0)) + '\n'
vcdu = '%10d' % d_dict[ovc][i0[0]]
arc.append(t0)
arc.append(vcdu)
vcdu = '%10d' % d_dict[ovc][i1[-1]]
arc.append(t1)
arc.append(vcdu)
nl = 0
tl = 0
ns = 0
aline = aline + " Move durations (seconds):" + '\n'
for k in range(0, len(i0)):
dv = (d_dict[cvc][i1[k]] - d_dict[cvc][i0[k]]) * 0.25625
dt.append(dv)
if dv > 100:
for m in range(i0[k], i1[k]-1):
emf_l.append(d_dict["4MP28AV"][m])
nl += 1
tl += dv
if dv < 2:
for m in range(i0[k], i1[k]-1):
emf_s.append(d_dict["4MP28AV"][m])
ns += 1
aline = aline + "%6d: %8.3f" % (k+1, dt[k]) + '\n'
#
#--- N_MOVES N_LONG T_LONG N_SHORT
#
arc.append(len(i0))
arc.append(nl)
try:
vchk = float(tl) / float(nl)
except:
vchk = 0.0
vchk = '%3.3f' % vchk
arc.append(vchk)
#
#--- make sure that ns matches with a definition!
#
if ns != len(i0) -1:
ns = len(i0) - 1
arc.append(ns)
#
#--- Limit switch data
#
aline = aline + "\n Limit Switch states: " + '\n'
#
#--- pre move
#
j = 0
while (d_dict["4MP5AV"][i0[0] -j] > 4.5) and (d_dict["4MP5BV"][i0[0]-j] > 4.5) and (j < 5):
j += 1
j -= 1
if j > 2:
k = i0[0] -2
elif j == 2:
k = i0[0] -1
elif j < 2:
k = i0[0]
aline = aline + " Pre-move: (Time = " + str(d_dict["TIME"][k]) + ')' + '\n'
for ent in l_list:
aline = aline + '%-7s = %s' % (ent, d_dict[ent][k]) + '\n'
arc.append(d_dict[ent][k])
#
#--- post move
#
j = 0
while (d_dict["4MP5AV"][i1[-1]+j] > 4.5) and (d_dict["4MP5BV"][i1[-1]+j] > 4.5) and (j < 5):
j += 1
j -= 1
if j > 2:
k = i1[-1] +2
elif j == 2:
k = i1[-1] +1
elif j < 2:
k = i1[-1]
aline = aline + "Post-move: (Time = " + str(d_dict["TIME"][k]) + ')' + '\n'
for ent in l_list:
aline = aline + '%-7s = %s' % (ent, d_dict[ent][k]) + '\n'
arc.append(d_dict[ent][k])
#
#--- potentiometer data
#
aline = aline + "\nPotentiometer Angles (degrees):\n"
if i0[0] -2 < 0:
k0 = 0
else:
k0 = i0[0] - 2
err0 = d_dict['COERRCN'][k0]
aline = aline + "Pre-move: (Time = " + str(d_dict['TIME'][k0]) + ')' + '\n'
for ent in p_list:
aline = aline + str(ent) + ' = ' + str(d_dict[ent][k0]) + '\n'
arc.append(d_dict[ent][k0])
p0_list = arc[-2:]
#
#
if i1[-1] + 2 > len(st_list):
k1 = len(st_list)
else:
k1 = i1[-1] + 2
err1 = d_dict['COERRCN'][k1]
aline = aline + "Post-move: (Time = " + str(d_dict["TIME"][k1]) + '\n'
for ent in p_list:
aline = aline + str(ent) + ' = ' + str(d_dict[ent][k1]) + '\n'
arc.append(d_dict[ent][k1])
p1_list = arc[-2:]
#
#--- move direction
#
dirct = "UNDF"
if(p0_list[0] > p1_list[0]) and (p0_list[1] > p1_list[1]):
dirct = "INSR"
elif(p0_list[0] < p1_list[0]) and (p0_list[1] < p1_list[1]):
dirct = "RETR"
aline = aline + "\nMove Direction = " + str(dirct) + '\n'
arc = [dirct] + arc
#
#--- back-emf data
#
aline = aline + "\nBack-emf statistics:" + '\n'
if len(emf_l) > 0:
aline = aline + "Long moves: " + '\n'
[emf_min, emf_avg, emf_max] = emf_stats(emf_l)
aline = aline + "Min. back-emf (V) = " + str(emf_min) + '\n'
aline = aline + "Avg. back-emf (V) = " + str(emf_avg) + '\n'
aline = aline + "Max. back-emf (V) = " + str(emf_max) + '\n'
arc.append(emf_min)
arc.append(emf_avg)
arc.append(emf_max)
else:
arc.append(0.0)
arc.append(0.0)
arc.append(0.0)
if len(emf_s) > 0:
aline = aline + "Short moves: " + '\n'
[emf_min, emf_avg, emf_max] = emf_stats(emf_s)
aline = aline + "Min. back-emf (V) = " + str(emf_min) + '\n'
aline = aline + "Avg. back-emf (V) = " + str(emf_avg) + '\n'
aline = aline + "Max. back-emf (V) = " + str(emf_max) + '\n'
arc.append(emf_min)
arc.append(emf_avg)
arc.append(emf_max)
else:
arc.append(0.0)
arc.append(0.0)
arc.append(0.0)
#
#--- OBC error count
#
ediff = err1 - err0
aline = aline + "\n OBC Error Count increment = " + str(ediff) + '\n'
arc.append(ediff)
#
#--- print summary record to archive
#
sumfile = arc_dir + 'OTG_summary.rdb'
hchk = 0
if os.path.isfile(sumfile):
hchk = 1
fo = open(sumfile, 'a')
#
#---printing the header
#
if hchk == 0:
hout = prep_file(l_list, p_list, sumfile)
fo.write(hout)
for m in range(0, 18):
fo.write(str(arc[m]) + '\t')
for m in range(18, 28):
fo.write('%.2f\t' % arc[m])
fo.write(str(arc[-1]) + '\n')
fo.close()
aline = aline + "\nMove data record appended to " + sumfile + '\n'
return [aline, t0]
#-----------------------------------------------------------------------------------------------
#-- prep_file: creating header --
#-----------------------------------------------------------------------------------------------
def prep_file(l_list, p_list, sumfile):
"""
creating header
input: l_list --- a list of msids
p_list --- a list of msids
sumfile --- output file name
output: line --- header line
"""
hdr1 = ['DIRN', 'GRATING', 'START_TIME', 'START_VCDU', 'STOP_TIME', 'STOP_VCDU',\
'N_MOVES', 'N_LONG', 'T_LONG', 'N_SHORT']
hdr2 = ['EMF_MIN_LONG', 'EMF_AVG_LONG', 'EMF_MAX_LONG', 'EMF_MIN_SHORT',\
'EMF_AVG_SHORT', 'EMF_MAX_SHORT', 'OBC_ERRS']
htyp = ['S', 'S', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'S', 'S', 'S', 'S', 'S',\
'S', 'S', 'S', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N']
pre_l_list = []
post_l_list = []
pre_p_list = []
post_p_list = []
for ent in l_list:
pre_l_list.append('i' + ent)
post_l_list.append('f' + ent)
for ent in p_list:
pre_p_list.append('i' + ent)
post_p_list.append('f' + ent)
thdr = hdr1 + pre_l_list + post_l_list + pre_p_list + post_p_list + hdr2[:-1]
line = ''
for ent in thdr:
line = line + ent + '\t'
line = line + hdr2[-1] + '\n'
for ent in htyp[:-1]:
line = line + ent + '\t'
line = line + htyp[-1] + '\n'
print("\nRDB header records output to " + sumfile)
return line
#-----------------------------------------------------------------------------------------------
#-- emf_stats: calculate back-emf statistics --
#-----------------------------------------------------------------------------------------------
def emf_stats(edata):
"""
calculate back-emf statistics
"""
if len(edata) > 0:
try:
out = numpy.mean(edata)
except:
out = 0.0
avg = float('%.2f' % out)
emin = min(edata)
emax = max(edata)
return [emin, avg, emax]
else:
return[0, 0, 0]
#-----------------------------------------------------------------------------------------------
if __name__ == "__main__":
run_otg_proc()
|
import json
import boto3
def get_launch_configs(region):
"""
Function to get all laucnh configs name
"""
# connect to the instance
client = boto3.client('autoscaling', region_name=region)
response = client.describe_launch_configurations()
# select only launch config key
response = response['LaunchConfigurations']
config_name = []
# Filter all the names
for i in range(len(response)):
temp = response[i]
config_name.append(temp['LaunchConfigurationName'])
return config_name
def get_auto_scale_groups(region):
"""
Funtion to get names of autoscale groups
"""
client = boto3.client('autoscaling', region_name=region)
response = client.describe_auto_scaling_groups()
# Selecting the auto scale groups
response = response['AutoScalingGroups']
as_grp_names = []
# Filter all AutoScale Groups
for i in range(len(response)):
temp = response[i]
as_grp_names.append(temp['AutoScalingGroupName'])
return as_grp_names
def auto_scale_info(region, launch_req, as_req):
"""
Funtion to Describe the config and autoscale groups fully
"""
client = boto3.client('autoscaling', region_name=region)
# selecting the required launch req
response = client.describe_launch_configurations(
LaunchConfigurationNames=[launch_req]
)
response = response['LaunchConfigurations'][0]
# selecting only the required keys in the dict
req_keys = ['LaunchConfigurationName', 'LaunchConfigurationARN', 'ImageId', 'KeyName',
'SecurityGroups', 'InstanceType', 'BlockDeviceMappings', 'CreatedTime']
req_info = []
for k, v in response.items():
for i in range(len(req_keys)):
if k == req_keys[i]:
# append only req key info
req_info.append(response[k])
print("Launch Configuration")
print()
for i in range(len(req_info)):
# breaking down Blckdevmap to retrieve only req info
if req_keys[i] == 'BlockDeviceMappings':
temp = req_info[i]
temp = temp[0]
temp = temp['Ebs']
print("VolumeSize : {}".format(temp['VolumeSize']))
print('VolumeType : {}'.format(temp['VolumeType']))
else:
print("{0} : {1}".format(req_keys[i], req_info[i]))
print()
# Selecting the req as grp
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[as_req]
)
response = response['AutoScalingGroups'][0]
# Filter the required keys
req_keys = ['AutoScalingGroupName', 'AutoScalingGroupARN', 'LaunchConfigurationName', 'MinSize', 'MaxSize',
'AvailabilityZones', 'HealthCheckType', 'HealthCheckGracePeriod', 'Instances', 'VPCZoneIdentifier']
req_info = []
for k, v in response.items():
for i in range(len(req_keys)):
# appending only req info
if k == req_keys[i]:
req_info.append(response[k])
for i in range(len(req_info)):
# breaking down instances for only req info
if req_keys[i] == 'Instances':
temp = req_info[i]
temp = temp[0]
print("Instance ID: {}".format(temp['InstanceId']))
print('AvailabilityZone : {}'.format((temp['AvailabilityZone'])))
else:
print("{0} : {1}".format(req_keys[i], req_info[i]))
def all_launch_info():
# Connecting to aws
conn = boto3.client('ec2')
# List all regions
regions = [region['RegionName'] for region in conn.describe_regions()['Regions']]
launch_info = []
auto_scale = []
for region in regions:
conn = boto3.client('autoscaling', region_name=region)
response = conn.describe_launch_configurations()
response = response['LaunchConfigurations']
for i in response:
# selecting only the required keys in the dict
req_keys = ['LaunchConfigurationName', 'LaunchConfigurationARN', 'ImageId', 'KeyName',
'SecurityGroups', 'InstanceType', 'BlockDeviceMappings', 'CreatedTime']
req_info = []
for k, v in i.items():
for j in range(len(req_keys)):
if k == req_keys[j]:
# append only req key info
req_info.append(i[k])
# launch_info.append(req_info)
for j in range(len(req_info)):
# breaking down Blckdevmap to retrieve only req info
if req_keys[j] == 'BlockDeviceMappings':
temp = req_info[j]
temp = temp[0]
temp = temp['Ebs']
del req_info[j]
req_info.append(temp['VolumeSize'])
req_info.append(temp['VolumeType'])
launch_info.append(req_info)
# Selecting the req as grp
response = conn.describe_auto_scaling_groups()
response = response['AutoScalingGroups']
for i in response:
# Filter the required keys
req_keys = ['AutoScalingGroupName', 'AutoScalingGroupARN', 'LaunchConfigurationName', 'MinSize', 'MaxSize',
'AvailabilityZones', 'HealthCheckType', 'HealthCheckGracePeriod', 'Instances',
'VPCZoneIdentifier']
req_info = []
for k, v in i.items():
for j in range(len(req_keys)):
# appending only req info
if k == req_keys[j]:
req_info.append(i[k])
for j in range(len(req_info)):
# breaking down instances for only req info
if req_keys[j] == 'Instances':
temp = req_info[j]
temp = temp[0]
del req_info[j]
req_info.append(temp['InstanceId'])
req_info.append(temp['AvailabilityZone'])
auto_scale.append(req_info)
list_conf = []
list_auto_scale = []
req_keys_conf = ['LaunchConfigurationName', 'LaunchConfigurationARN', 'ImageId', 'KeyName',
'SecurityGroups', 'InstanceType', 'CreatedTime', 'Volume Size', 'Volume Type']
req_keys_auto = ['AutoScalingGroupName', 'AutoScalingGroupARN', 'LaunchConfigurationName', 'MinSize', 'MaxSize',
'AvailabilityZones', 'HealthCheckType', 'HealthCheckGracePeriod',
'VPCZoneIdentifier', 'InstanceId', 'AvailabilityZone']
for i in launch_info:
dictionary_conf = dict(zip(req_keys_conf, i))
list_conf.append(dictionary_conf)
for i in auto_scale:
dictionary_auto = dict(zip(req_keys_auto, i))
list_auto_scale.append(dictionary_auto)
final_dict_conf = {"LaunchConfiguration": list_conf}
final_dict_auto = {"AutoScale Groups": list_auto_scale}
json_conf = json.dumps(final_dict_conf, indent=4, default=str)
json_auto = json.dumps(final_dict_auto, indent=4, default=str)
print("Launch Configs")
print(json_conf)
print()
print("json_auto")
print(json_auto)
# launch_value = get_launch_configs('ap-south-1')
# as_grp_value = get_auto_scale_groups('ap-south-1')
# auto_scale_info('ap-south-1', launch_value[0], as_grp_value[0])
all_launch_info()
|
#!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
"""Unittest class for max_integer"""
def test_max_integer(self):
self.assertEqual(max_integer([9, 5, 1, 7, 5, 3, 8, 5, 2, 4, 5, 6]), 9)
def test_one(self):
self.assertEqual(max_integer([1]), 1)
def test_empty(self):
self.assertEqual(max_integer([]), None)
def test_sorted(self):
self.assertEqual(max_integer([1, 2, 3, 4]), 4)
def test_floats(self):
self.assertEqual(max_integer([1.11, 2.22, 3.33, 4.44]), 4.44)
def test_string(self):
self.assertEqual(max_integer("qwertyuiop"), "y")
def test_string_nums(self):
self.assertEqual(max_integer("36902580147"), "9")
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import operator
import matplotlib.pyplot as plt
x_axis_values = ['QR', 'R1A', 'R1B', 'R1C', 'R2', 'R3', 'FR']
allYears = ["09", "10", "11", "12", "13", "14", "15", "16"]
five_langs = ['C', 'Java', 'Python', 'C#', 'C++']
def make_plot(lang, my_data):
rows = np.where(my_data['lang'] == lang)
sum_by_columns = np.nansum(my_data["values"], axis=0)
values_per_round = my_data[rows][0][1]
array_percentages = []
for index, val in enumerate(values_per_round):
percen = (val / sum_by_columns[index]) * 100
array_percentages.append(percen)
other_langs[index] -= percen
total_percentages[index] -= percen
# if lang != 'C++':
plt.scatter(plot_values, array_percentages)
plt.plot(plot_values, array_percentages, label=lang, linewidth=2)
def deal_with_year(year):
file_name = '/home/tiaghoul/PycharmProjects/iic-GoogleJamStudy/langsPerYear/langs_year_' + year + '.csv'
data = np.genfromtxt(file_name,
comments="?", dtype=[('lang', np.str_, 50), ('values', np.int32, (7,))], skip_header=1,
delimiter=',')
number_langs = len(data)
for lan in five_langs:
make_plot(lan, data)
plt.scatter(plot_values, other_langs)
plt.plot(plot_values, other_langs, label="Other " + str(number_langs), linewidth=2)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), shadow=True, ncol=6, title="Year 20" + year,
fancybox=True)
plt.savefig('/home/tiaghoul/PycharmProjects/iic-GoogleJamStudy/images/best_five_evo_per_round_withC++/best_five_evo_per_round_withC++' + year + '.png')
plt.close()
for ano in allYears:
plot_values = list(range(1, 8))
plt.xticks(plot_values, x_axis_values)
other_langs = [100] * 7
total_percentages = [100] * 7
deal_with_year(ano)
|
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from NeuralGraph.dataset import AllData
from NeuralGraph.model import GraphConvAutoEncoder, QSAR
import pandas as pd
import numpy as np
from NeuralGraph.processing import data_parser
from NeuralGraph.util import Timer
def main():
with Timer() as t1:
data_path = '/home/ubuntu/wangzhongxu/gcnn2/NGFP/dataset'
pd_filename = 'pd_test.txt'
pd_lst = data_parser(data_path, pd_filename)
tmp_lst = [0 for _ in pd_lst]
print('data parse:')
with Timer() as t2:
train_set, valid_set, _, _ = train_test_split(pd_lst, tmp_lst, test_size=0.2, random_state=0)
train_set, valid_set = AllData(train_set, data_path), AllData(valid_set, data_path)
print('tensorize:')
with Timer() as t3:
print(len(train_set), len(valid_set))
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
valid_loader = DataLoader(valid_set, batch_size=BATCH_SIZE)
print('data load:')
with Timer() as t4:
# net = GraphConvAutoEncoder(hid_dim_m=128, hid_dim_p=128, n_class=2)
# net = net.fit(train_loader, epochs=100)
net = QSAR(hid_dim_m=216, hid_dim_p=512, n_class=2)
net = net.fit(train_loader, valid_loader, epochs=N_EPOCH, path='output/gcnn')
print('model:')
if __name__ == '__main__':
BATCH_SIZE = 4
N_EPOCH = 1000
main()
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
import os
import pdb
import shutil
if __name__ == '__main__':
files = [f for f in os.listdir('.') if f.endswith('.html')]
# files.remove('alluvial.html')
files = [
(f, f.rsplit('.', 1)[0] + '_full.pdf', f.rsplit('.', 1)[0] + '.pdf')
for f in files
]
# files = files[:1]
print('converting to pdf...')
for f in files:
cmd = '"C:\\Program Files\\wkhtmltopdf\\bin\wkhtmltopdf.exe" ' +\
f[0] + ' ' + f[1]
os.system(cmd)
print('cropping pdf...')
for f in files:
cmd = 'pdfcrop --margins 15 ' + f[1] + ' ' + f[2]
os.system(cmd)
print('copying pdfs...')
for f in files:
shutil.copy(
f[2],
'D:\\Dropbox\\Papers\\paper_recnet\\2016-07-XX (Revision)\\figures'
)
print('done')
|
from django.urls import path
from .views import SavedBooksView
urlpatterns = [
path('saves/<slug>', SavedBooksView.as_view(), name = 'saves')
]
|
# _ * _ coding: utf-8 _ * _ #
# @Time :2020/7/23 17:22
# @FileName :project_information_widget.py
# @Author :LiuYang
import dayu_widgets as dy
from PySide2 import QtWidgets
from collections import OrderedDict
Theme = dy.MTheme("dark")
class ProjectInfoWidget(QtWidgets.QWidget):
def __init__(self):
super(ProjectInfoWidget, self).__init__()
self.MainLayout = QtWidgets.QVBoxLayout(self)
self.BasicInformationLabel = dy.MLabel(u"基础信息").h3()
self.BasicInformationLayout = QtWidgets.QVBoxLayout()
self.DeliveryInformationLabel = dy.MLabel(u"交付信息").h3()
self.DeliveryInformationLayout = QtWidgets.QVBoxLayout()
self.setup_ui()
self.set_style_sheet()
def setup_ui(self):
self.MainLayout.addWidget(self.BasicInformationLabel)
self.MainLayout.addLayout(self.BasicInformationLayout)
self.MainLayout.addWidget(self.DeliveryInformationLabel)
self.MainLayout.addLayout(self.DeliveryInformationLayout)
def set_project_data(self, project_data):
basic_info = OrderedDict()
basic_info[u"项目名称:"] = project_data.name
basic_info[u"项目状态:"] = project_data.project_status
basic_info[u"描述:"] = project_data.duration
for info_name, info_value in basic_info.items():
basic_info_label = InfoLabel(info_name, info_value)
self.BasicInformationLayout.addWidget(basic_info_label)
delivery_info = OrderedDict()
delivery_info[u"交付时间"] = project_data.delivery_time
delivery_info[u"交付平台"] = project_data.delivery_platform
delivery_info[u"集数"] = project_data.episodes
delivery_info[u"帧率"] = project_data.rate
delivery_info[u"分辨率"] = project_data.resolution
for info_name, info_value in delivery_info.items():
delivery_info_label = InfoLabel(info_name, info_value)
self.DeliveryInformationLayout.addWidget(delivery_info_label)
def set_style_sheet(self):
Theme.apply(self)
class InfoLabel(QtWidgets.QWidget):
def __init__(self, info_name, info_value):
super(InfoLabel, self).__init__()
self.MainLayout = QtWidgets.QHBoxLayout(self)
self.infoNameLabel = QtWidgets.QLabel(info_name)
self.infoValueLabel = QtWidgets.QLabel(info_value)
self.setup_ui()
self.set_style_sheet()
def setup_ui(self):
self.MainLayout.addWidget(self.infoNameLabel)
self.MainLayout.addStretch()
self.MainLayout.addWidget(self.infoValueLabel)
def set_style_sheet(self):
self.setStyleSheet("color: #cccccc; font-weight:normal;\nfont-size: 14px '微软雅黑'")
if __name__ == '__main__':
app = QtWidgets.QApplication([])
Alert_Example = ProjectInfoWidget()
Alert_Example.show()
app.exec_()
|
from postprocessing import unique_features, get_indexes , get_new_predictions, writeoutput
from preprocessing import read, split
data = read('data.csv')
unique_features = unique_features(data)
quiz = read('quiz.csv')
index_dict = get_indexes(unique_features, quiz)
output = read('output.csv')
print('getting new predictions')
modified_output = get_new_predictions(index_dict, output)
writeoutput('modified_output.csv', modified_output['Prediction'])
|
import csv
import json
from argparse import ArgumentParser
from tqdm.auto import tqdm
from pathlib import Path
from utils.ipa_encoder import IPAEncoder
from utils.logger import get_logger
from utils.ipa_utils import IPAError
logger = get_logger('asr.train')
def collect_data(directory, langs_list=None, subset='train', max_num_files=None):
"""Traverses directory collecting input and target files.
Args:
directory: base path to extracted audio and transcripts.
langs_list: optional list of languages to be processed. All languages are processed by default.
subset: 'train', 'test' or 'dev'
Returns:
list of (media_filepath, label, language) tuples
"""
data_files = list()
langs = [d for d in Path(directory).iterdir()
if d.is_dir()]
if langs_list is not None:
langs = [d for d in langs if d.name in langs_list]
for lang_dir in langs:
lang = lang_dir.name[:2]
logger.info(f'Parsing language {lang}')
transcript_path = lang_dir / f'{subset}.tsv'
with open(transcript_path, 'r') as transcript_file:
transcript_reader = csv.reader(transcript_file, dialect='excel-tab')
# skip header
_ = next(transcript_reader)
for transcript_line in transcript_reader:
_, media_name, label = transcript_line[:3]
if '.mp3' not in media_name:
media_name += '.mp3'
filename = lang_dir / 'clips' / media_name
if not filename.exists() or filename.stat().st_size == 0:
logger.warning(f'File {str(filename)} not found or corrupted.')
continue
data_files.append((str(filename), label, lang))
if max_num_files is not None and len(data_files) >= max_num_files:
return data_files
return data_files
def encode_data(data_files, encoder, skip_lang_tags=False, **ipa_kwargs):
"""Encodes targets
Args:
data_files: result of `collect_data` call
encoder: instance of IPAEncoder
ipa_kwargs: arguments for ipa processing
Returns:
list of (media_filepath, encoded_ipa)
"""
logger.info('Encoding data')
encoded_data = list()
errors_count = 0
for filename, label, lang in tqdm(data_files):
try:
ids = encoder.encode(label, lang,
skip_lang_tags=skip_lang_tags, **ipa_kwargs)
except IPAError:
errors_count += 1
continue
encoded_data.append((filename, ids))
if errors_count > 0:
logger.warning(f'{errors_count} files skipped due to IPA errors.')
return encoded_data
def serialize_encoded_data(encoded_data, output_path, subset='train'):
filename = str(Path(output_path) / f'{subset}.tsv')
logger.info(f'Serializing data to {filename}')
with open(filename, 'w') as fid:
writer = csv.writer(fid, dialect='excel-tab')
for row in encoded_data:
writer.writerow(row)
def main():
parser = ArgumentParser()
parser.add_argument('--dataset_dir', type=str, required=True,
help='Path to Common Voice directory')
parser.add_argument('--output_dir', type=str, required=True,
help='Path to directory where to store preprocessed dataset')
parser.add_argument('--langs', nargs='*', type=str, default=None,
help='A list of languages to prepare. ISO-639 names should be used. All languages are processed by default')
parser.add_argument('--subset', choices=['train', 'test', 'dev'],
help='Data subset to prepare')
parser.add_argument('--not_remove_semi_stress', action='store_false', dest='remove_semi_stress',
help='Disable removing semistress symbol.')
parser.add_argument('--not_split_diphthongs', action='store_false', dest='split_all_diphthongs',
help='Disable splitting diphthongs.')
parser.add_argument('--not_split_stress_gemination', action='store_false', dest='split_stress_gemination',
help='Keep stress and gemination symbols sticked to a phone symbol.')
parser.add_argument('--not_remove_lang_markers', action='store_false', dest='remove_lang_markers',
help='Keep language markers returned by eSpeak-ng.')
parser.add_argument('--remove_all_stress', action='store_true',
help='Remove all stress marks')
parser.add_argument('--skip_lang_tags', action='store_true',
help='Skip language tags.')
parser.add_argument('--max_samples_count', type=int, default=None,
help='Maximal number of audio files to use. Use all by default.')
parser.add_argument('--plain_text', action='store_true',
help='Use characteres as targets instead of IPA.')
args = parser.parse_args()
encoder = IPAEncoder(args.output_dir, logger)
data_files = collect_data(args.dataset_dir, args.langs, args.subset, args.max_samples_count)
encoded_data = encode_data(data_files, encoder,
skip_lang_tags=args.skip_lang_tags,
plain_text=args.plain_text,
remove_semi_stress=args.remove_semi_stress,
split_all_diphthongs=args.split_all_diphthongs,
split_stress_gemination=args.split_stress_gemination,
remove_lang_markers=args.remove_lang_markers,
remove_all_stress=args.remove_all_stress)
serialize_encoded_data(encoded_data, args.output_dir, args.subset)
encoder.save_vocab()
with open(Path(args.output_dir) / 'args.json', 'w') as fid:
json.dump(vars(args), fid)
if __name__ == '__main__':
main()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
import os
import re
import shlex
import textwrap
from dataclasses import dataclass
from enum import Enum
from typing import ClassVar, Iterable, Mapping
from pants.core.util_rules.environments import EnvironmentTarget
from pants.core.util_rules.system_binaries import BashBinary
from pants.engine.fs import CreateDigest, Digest, FileContent, FileDigest, MergeDigests
from pants.engine.internals.selectors import Get
from pants.engine.process import FallibleProcessResult, Process, ProcessCacheScope
from pants.engine.rules import collect_rules, rule
from pants.engine.target import CoarsenedTarget
from pants.jvm.compile import ClasspathEntry
from pants.jvm.resolve.common import Coordinate, Coordinates
from pants.jvm.resolve.coursier_fetch import CoursierLockfileEntry
from pants.jvm.resolve.coursier_setup import Coursier
from pants.jvm.subsystems import JvmSubsystem
from pants.jvm.target_types import JvmJdkField
from pants.option.global_options import GlobalOptions
from pants.util.docutil import bin_name
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.meta import classproperty
from pants.util.strutil import fmt_memory_size, softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class Nailgun:
classpath_entry: ClasspathEntry
class DefaultJdk(Enum):
SYSTEM = "system"
SOURCE_DEFAULT = "source_default"
@dataclass(frozen=True)
class JdkRequest:
"""Request for a JDK with a specific major version, or a default (`--jvm-jdk` or System)."""
version: str | DefaultJdk
@classproperty
def SYSTEM(cls) -> JdkRequest:
return JdkRequest(DefaultJdk.SYSTEM)
@classproperty
def SOURCE_DEFAULT(cls) -> JdkRequest:
return JdkRequest(DefaultJdk.SOURCE_DEFAULT)
@staticmethod
def from_field(field: JvmJdkField) -> JdkRequest:
version = field.value
if version == "system":
return JdkRequest.SYSTEM
return JdkRequest(version) if version is not None else JdkRequest.SOURCE_DEFAULT
@staticmethod
def from_target(target: CoarsenedTarget) -> JdkRequest:
fields = [t[JvmJdkField] for t in target.members if t.has_field(JvmJdkField)]
if not fields:
raise ValueError(
f"Cannot construct a JDK request for {target}, since none of its "
f"members have a `{JvmJdkField.alias}=` field:\n{target.bullet_list()}"
)
field = fields[0]
if not all(f.value == field.value for f in fields):
values = {f.value for f in fields}
raise ValueError(
f"The members of {target} had mismatched values of the "
f"`{JvmJdkField.alias}=` field ({values}):\n{target.bullet_list()}"
)
return JdkRequest.from_field(field)
@dataclass(frozen=True)
class JdkEnvironment:
_digest: Digest
nailgun_jar: str
coursier: Coursier
jre_major_version: int
global_jvm_options: tuple[str, ...]
java_home_command: str
bin_dir: ClassVar[str] = "__jdk"
jdk_preparation_script: ClassVar[str] = f"{bin_dir}/jdk.sh"
java_home: ClassVar[str] = "__java_home"
def args(
self, bash: BashBinary, classpath_entries: Iterable[str], chroot: str | None = None
) -> tuple[str, ...]:
def in_chroot(path: str) -> str:
if not chroot:
return path
return os.path.join(chroot, path)
return (
bash.path,
in_chroot(self.jdk_preparation_script),
f"{self.java_home}/bin/java",
"-cp",
":".join([in_chroot(self.nailgun_jar), *classpath_entries]),
)
@property
def env(self) -> dict[str, str]:
return self.coursier.env
@property
def append_only_caches(self) -> dict[str, str]:
return self.coursier.append_only_caches
@property
def immutable_input_digests(self) -> dict[str, Digest]:
return {**self.coursier.immutable_input_digests, self.bin_dir: self._digest}
@dataclass(frozen=True)
class InternalJdk(JdkEnvironment):
"""The JDK configured for internal Pants usage, rather than for matching source compatibility.
The InternalJdk should only be used in situations where no classfiles are required for a user's
firstparty or thirdparty code (such as for codegen, or analysis of source files).
"""
@classmethod
def from_jdk_environment(cls, env: JdkEnvironment) -> InternalJdk:
return cls(
env._digest,
env.nailgun_jar,
env.coursier,
env.jre_major_version,
env.global_jvm_options,
env.java_home_command,
)
VERSION_REGEX = re.compile(r"version \"(.+?)\"")
def parse_jre_major_version(version_lines: str) -> int | None:
for line in version_lines.splitlines():
m = VERSION_REGEX.search(line)
if m:
major_version, _, _ = m[1].partition(".")
return int(major_version)
return None
@rule
async def fetch_nailgun() -> Nailgun:
nailgun = await Get(
ClasspathEntry,
CoursierLockfileEntry(
coord=Coordinate.from_coord_str("com.martiansoftware:nailgun-server:0.9.1"),
file_name="com.martiansoftware_nailgun-server_0.9.1.jar",
direct_dependencies=Coordinates(),
dependencies=Coordinates(),
file_digest=FileDigest(
fingerprint="4518faa6bf4bd26fccdc4d85e1625dc679381a08d56872d8ad12151dda9cef25",
serialized_bytes_length=32927,
),
),
)
return Nailgun(nailgun)
@rule
async def internal_jdk(jvm: JvmSubsystem) -> InternalJdk:
"""Creates a `JdkEnvironment` object based on the JVM subsystem options.
This is used for providing a predictable JDK version for Pants' internal usage rather than for
matching compatibility with source files (e.g. compilation/testing).
"""
request = JdkRequest(jvm.tool_jdk) if jvm.tool_jdk is not None else JdkRequest.SYSTEM
env = await Get(JdkEnvironment, JdkRequest, request)
return InternalJdk.from_jdk_environment(env)
@rule
async def prepare_jdk_environment(
jvm: JvmSubsystem,
jvm_env_aware: JvmSubsystem.EnvironmentAware,
coursier: Coursier,
nailgun_: Nailgun,
bash: BashBinary,
request: JdkRequest,
env_target: EnvironmentTarget,
) -> JdkEnvironment:
nailgun = nailgun_.classpath_entry
version = request.version
if version == DefaultJdk.SOURCE_DEFAULT:
version = jvm.jdk
# TODO: add support for system JDKs with specific version
if version is DefaultJdk.SYSTEM:
coursier_jdk_option = "--system-jvm"
else:
coursier_jdk_option = shlex.quote(f"--jvm={version}")
# TODO(#16104) This argument re-writing code should use the native {chroot} support.
# See also `run` for other argument re-writing code.
def prefixed(arg: str) -> str:
if arg.startswith("__"):
return f"${{PANTS_INTERNAL_ABSOLUTE_PREFIX}}{arg}"
else:
return arg
optionally_prefixed_coursier_args = [
prefixed(arg) for arg in coursier.args(["java-home", coursier_jdk_option])
]
# NB: We `set +e` in the subshell to ensure that it exits as well.
# see https://unix.stackexchange.com/a/23099
java_home_command = " ".join(("set +e;", *optionally_prefixed_coursier_args))
env = {
"PANTS_INTERNAL_ABSOLUTE_PREFIX": "",
**coursier.env,
}
java_version_result = await Get(
FallibleProcessResult,
Process(
argv=(
bash.path,
"-c",
f"$({java_home_command})/bin/java -version",
),
append_only_caches=coursier.append_only_caches,
immutable_input_digests=coursier.immutable_input_digests,
env=env,
description=f"Ensure download of JDK {coursier_jdk_option}.",
cache_scope=env_target.executable_search_path_cache_scope(),
level=LogLevel.DEBUG,
),
)
if java_version_result.exit_code != 0:
raise ValueError(
f"Failed to locate Java for JDK `{version}`:\n"
f"{java_version_result.stderr.decode('utf-8')}"
)
java_version = java_version_result.stderr.decode("utf-8").strip()
jre_major_version = parse_jre_major_version(java_version)
if not jre_major_version:
raise ValueError(
"Pants was unable to parse the output of `java -version` for JDK "
f"`{request.version}`. Please open an issue at "
"https://github.com/pantsbuild/pants/issues/new/choose with the following output:\n\n"
f"{java_version}"
)
# TODO: Locate `ln`.
version_comment = "\n".join(f"# {line}" for line in java_version.splitlines())
jdk_preparation_script = textwrap.dedent( # noqa: PNT20
f"""\
# pants javac script using Coursier {coursier_jdk_option}. `java -version`:"
{version_comment}
set -eu
/bin/ln -s "$({java_home_command})" "${{PANTS_INTERNAL_ABSOLUTE_PREFIX}}{JdkEnvironment.java_home}"
exec "$@"
"""
)
jdk_preparation_script_digest = await Get(
Digest,
CreateDigest(
[
FileContent(
os.path.basename(JdkEnvironment.jdk_preparation_script),
jdk_preparation_script.encode("utf-8"),
is_executable=True,
),
]
),
)
return JdkEnvironment(
_digest=await Get(
Digest,
MergeDigests(
[
jdk_preparation_script_digest,
nailgun.digest,
]
),
),
global_jvm_options=jvm_env_aware.global_options,
nailgun_jar=os.path.join(JdkEnvironment.bin_dir, nailgun.filenames[0]),
coursier=coursier,
jre_major_version=jre_major_version,
java_home_command=java_home_command,
)
@dataclass(frozen=True)
class JvmProcess:
jdk: JdkEnvironment
argv: tuple[str, ...]
classpath_entries: tuple[str, ...]
input_digest: Digest
description: str = dataclasses.field(compare=False)
level: LogLevel
extra_jvm_options: tuple[str, ...]
extra_nailgun_keys: tuple[str, ...]
output_files: tuple[str, ...]
output_directories: tuple[str, ...]
timeout_seconds: int | float | None
extra_immutable_input_digests: FrozenDict[str, Digest]
extra_env: FrozenDict[str, str]
cache_scope: ProcessCacheScope | None
use_nailgun: bool
remote_cache_speculation_delay: int | None
def __init__(
self,
jdk: JdkEnvironment,
argv: Iterable[str],
classpath_entries: Iterable[str],
input_digest: Digest,
description: str,
level: LogLevel = LogLevel.INFO,
extra_jvm_options: Iterable[str] | None = None,
extra_nailgun_keys: Iterable[str] | None = None,
output_files: Iterable[str] | None = None,
output_directories: Iterable[str] | None = None,
extra_immutable_input_digests: Mapping[str, Digest] | None = None,
extra_env: Mapping[str, str] | None = None,
timeout_seconds: int | float | None = None,
cache_scope: ProcessCacheScope | None = None,
use_nailgun: bool = True,
remote_cache_speculation_delay: int | None = None,
):
object.__setattr__(self, "jdk", jdk)
object.__setattr__(self, "argv", tuple(argv))
object.__setattr__(self, "classpath_entries", tuple(classpath_entries))
object.__setattr__(self, "input_digest", input_digest)
object.__setattr__(self, "description", description)
object.__setattr__(self, "level", level)
object.__setattr__(self, "extra_jvm_options", tuple(extra_jvm_options or ()))
object.__setattr__(self, "extra_nailgun_keys", tuple(extra_nailgun_keys or ()))
object.__setattr__(self, "output_files", tuple(output_files or ()))
object.__setattr__(self, "output_directories", tuple(output_directories or ()))
object.__setattr__(self, "timeout_seconds", timeout_seconds)
object.__setattr__(self, "cache_scope", cache_scope)
object.__setattr__(
self, "extra_immutable_input_digests", FrozenDict(extra_immutable_input_digests or {})
)
object.__setattr__(self, "extra_env", FrozenDict(extra_env or {}))
object.__setattr__(self, "use_nailgun", use_nailgun)
object.__setattr__(self, "remote_cache_speculation_delay", remote_cache_speculation_delay)
self.__post_init__()
def __post_init__(self):
if not self.use_nailgun and self.extra_nailgun_keys:
raise AssertionError(
"`JvmProcess` specified nailgun keys, but has `use_nailgun=False`. Either "
"specify `extra_nailgun_keys=None` or `use_nailgun=True`."
)
_JVM_HEAP_SIZE_UNITS = ["", "k", "m", "g"]
@rule
async def jvm_process(
bash: BashBinary, request: JvmProcess, jvm: JvmSubsystem, global_options: GlobalOptions
) -> Process:
jdk = request.jdk
immutable_input_digests = {
**jdk.immutable_input_digests,
**request.extra_immutable_input_digests,
}
env = {
"PANTS_INTERNAL_ABSOLUTE_PREFIX": "",
**jdk.env,
**request.extra_env,
}
def valid_jvm_opt(opt: str) -> str:
if opt.startswith("-Xmx"):
raise ValueError(
softwrap(
f"""
Invalid value for JVM options: {opt}.
For setting a maximum heap size for the JVM child processes, use
`[GLOBAL].process_per_child_memory_usage` option instead.
Run `{bin_name()} help-advanced global` for more information.
"""
)
)
return opt
max_heap_size = fmt_memory_size(
global_options.process_per_child_memory_usage, units=_JVM_HEAP_SIZE_UNITS
)
jvm_user_options = [*jdk.global_jvm_options, *request.extra_jvm_options]
jvm_options = [
f"-Xmx{max_heap_size}",
*[valid_jvm_opt(opt) for opt in jvm_user_options],
]
use_nailgun = []
if request.use_nailgun:
use_nailgun = [*jdk.immutable_input_digests, *request.extra_nailgun_keys]
remote_cache_speculation_delay_millis = 0
if request.remote_cache_speculation_delay is not None:
remote_cache_speculation_delay_millis = request.remote_cache_speculation_delay
elif request.use_nailgun:
remote_cache_speculation_delay_millis = jvm.nailgun_remote_cache_speculation_delay
return Process(
[*jdk.args(bash, request.classpath_entries), *jvm_options, *request.argv],
input_digest=request.input_digest,
immutable_input_digests=immutable_input_digests,
use_nailgun=use_nailgun,
description=request.description,
level=request.level,
output_directories=request.output_directories,
env=env,
timeout_seconds=request.timeout_seconds,
append_only_caches=jdk.append_only_caches,
output_files=request.output_files,
cache_scope=request.cache_scope or ProcessCacheScope.SUCCESSFUL,
remote_cache_speculation_delay_millis=remote_cache_speculation_delay_millis,
)
def rules():
return collect_rules()
|
def sum(p):
i = 1
s = 0
while i <= p:
s += i
i += 1
return s
end = int(input("n: "))
print(sum(end))
# 檔名: exercise0804.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
# Author:ambiguoustexture
# Date: 2020-02-24
from chunk_analysis import chunk_analysis
file_parsed = './neko.txt.cabocha'
file_result = './verbs_case_frame.txt'
with open(file_parsed, 'r') as text_parsed, open(file_result, 'w') as text_result:
sentences = chunk_analysis(text_parsed)
for sentence in sentences:
for chunk in sentence:
verbs = chunk.get_morphs_by_pos('動詞')
if len(verbs) < 1:
continue
chunks_with_particle = []
for src in chunk.srcs:
if len(sentence[src].get_case_particle()) > 0:
chunks_with_particle.append(sentence[src])
if len(chunks_with_particle) < 1:
continue
chunks_with_particle.sort(key = lambda chunk: chunk.get_case_particle())
text_result.write('{}\t{}\t{}\n'.\
format(\
verbs[0].base,\
' '.join(chunk.get_case_particle() for chunk in chunks_with_particle),\
' '.join(chunk.get_chunk_string() for chunk in chunks_with_particle)
))
|
def length_of_line(array):
(x, y), (x2, y2) = array
return '{:.2f}'.format(((x2 - x) ** 2 + (y2 - y) ** 2) ** 0.5)
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import re
from enum import Enum
from typing import Pattern
from pants.base.deprecated import warn_or_error
from pants.engine.addresses import Addresses
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import collect_rules, goal_rule
from pants.engine.target import RegisteredTargetTypes, Tags, Target, UnrecognizedTargetTypeException
from pants.option.option_types import EnumOption, StrListOption
from pants.util.docutil import bin_name
from pants.util.enums import match
from pants.util.filtering import TargetFilter, and_filters, create_filters
from pants.util.memo import memoized
from pants.util.strutil import help_text, softwrap
class TargetGranularity(Enum):
all_targets = "all"
file_targets = "file"
build_targets = "BUILD"
class FilterSubsystem(LineOriented, GoalSubsystem):
name = "filter"
help = help_text(
"""
Filter the input targets based on various criteria.
Most of the filtering options below are comma-separated lists of filtering criteria, with
an implied logical OR between them, so that a target passes the filter if it matches any of
the criteria in the list. A '-' prefix inverts the sense of the entire comma-separated list,
so that a target passes the filter only if it matches none of the criteria in the list.
Each of the filtering options may be specified multiple times, with an implied logical AND
between them.
"""
)
target_type = StrListOption(
metavar="[+-]type1,type2,...",
help="Filter on these target types, e.g. `resources` or `python_sources`.",
)
granularity = EnumOption(
default=TargetGranularity.all_targets,
help=softwrap(
"""
Filter to rendering only targets declared in BUILD files, only file-level
targets, or all targets.
"""
),
)
address_regex = StrListOption(
metavar="[+-]regex1,regex2,...",
help="Filter on target addresses matching these regexes.",
)
tag_regex = StrListOption(
metavar="[+-]regex1,regex2,...",
help="Filter on targets with tags matching these regexes.",
)
def target_type_filters(
self, registered_target_types: RegisteredTargetTypes
) -> list[TargetFilter]:
def outer_filter(target_alias: str) -> TargetFilter:
if target_alias not in registered_target_types.aliases:
raise UnrecognizedTargetTypeException(target_alias, registered_target_types)
target_type = registered_target_types.aliases_to_types[target_alias]
if target_type.deprecated_alias and target_alias == target_type.deprecated_alias:
warn_deprecated_target_type(target_type)
def inner_filter(tgt: Target) -> bool:
return tgt.alias == target_alias or bool(
tgt.deprecated_alias and tgt.deprecated_alias == target_alias
)
return inner_filter
return create_filters(self.target_type, outer_filter)
def address_regex_filters(self) -> list[TargetFilter]:
def outer_filter(address_regex: str) -> TargetFilter:
regex = compile_regex(address_regex)
return lambda tgt: bool(regex.search(tgt.address.spec))
return create_filters(self.address_regex, outer_filter)
def tag_regex_filters(self) -> list[TargetFilter]:
def outer_filter(tag_regex: str) -> TargetFilter:
regex = compile_regex(tag_regex)
return lambda tgt: any(bool(regex.search(tag)) for tag in tgt.get(Tags).value or ())
return create_filters(self.tag_regex, outer_filter)
def granularity_filter(self) -> TargetFilter:
return match(
self.granularity,
{
TargetGranularity.all_targets: lambda _: True,
TargetGranularity.file_targets: lambda tgt: tgt.address.is_file_target,
TargetGranularity.build_targets: lambda tgt: not tgt.address.is_file_target,
},
)
def all_filters(self, registered_target_types: RegisteredTargetTypes) -> TargetFilter:
return and_filters(
[
*self.target_type_filters(registered_target_types),
*self.address_regex_filters(),
*self.tag_regex_filters(),
self.granularity_filter(),
]
)
def is_specified(self) -> bool:
"""Return true if any of the options are set."""
return bool(self.target_type or self.address_regex or self.tag_regex or self.granularity)
def compile_regex(regex: str) -> Pattern:
try:
return re.compile(regex)
except re.error as e:
raise re.error(f"Invalid regular expression {repr(regex)}: {e}")
# Memoized so the deprecation doesn't happen repeatedly.
@memoized
def warn_deprecated_target_type(tgt_type: type[Target]) -> None:
assert tgt_type.deprecated_alias_removal_version is not None
warn_or_error(
removal_version=tgt_type.deprecated_alias_removal_version,
entity=f"using `--filter-target-type={tgt_type.deprecated_alias}`",
hint=f"Use `--filter-target-type={tgt_type.alias}` instead.",
)
class FilterGoal(Goal):
subsystem_cls = FilterSubsystem
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
@goal_rule
def filter_targets(
addresses: Addresses, filter_subsystem: FilterSubsystem, console: Console
) -> FilterGoal:
# When removing, also remove the special casing in `help_info_extractor.py` to reclassify the
# subsystem as not a goal with `pants_help`.
warn_or_error(
"3.0.0.dev0",
"using `filter` as a goal",
softwrap(
f"""
You can now specify `filter` arguments with any goal, e.g. `{bin_name()}
--filter-target-type=python_test test ::`.
This means that the `filter` goal is now identical to `list`. For example, rather than
`{bin_name()} filter --target-type=python_test ::`, use
`{bin_name()} --filter-target-type=python_test list ::`.
Often, the `filter` goal was combined with `xargs` to build pipelines of commands. You
can often now simplify those to a single command. Rather than `{bin_name()} filter
--target-type=python_test filter :: | xargs {bin_name()} test`, simply use
`{bin_name()} --filter-target-type=python_test test ::`.
"""
),
)
# `SpecsFilter` will have already filtered for us.
with filter_subsystem.line_oriented(console) as print_stdout:
for address in sorted(addresses):
print_stdout(address.spec)
return FilterGoal(exit_code=0)
def rules():
return collect_rules()
|
#!/usr/bin/env python3
# Hack to load parent module
from sys import path
path.append('..')
# Import the Template Orchestrator
from base_orchestrator.base_orchestrator import base_orchestrator, ctl_base
# Import the System and Name methods from the OS module
from os import system, name
# Import signal
import signal
from time import time
from netaddr import IPAddress, IPNetwork
# Import SONAr services
from services.ndb import ndb
from services.path_engine import PathEngine
# Import SONAr modules
from sonar.scoe import scoe
from sonar.she import she
from sonar.nad import nad
def cls():
system('cls' if name=='nt' else 'clear')
class tn_orchestrator(base_orchestrator):
def post_init(self, **kwargs):
# OVS Controller Handler
self.ovs_ctl = ctl_base(
name="OVS",
host_key="ovs_host",
port_key="ovs_port",
default_host="20.1.0.1",
default_port="3200",
request_key="ovs_req",
reply_key="ovs_rep",
create_msg='ovc_crs',
request_msg='ovc_rrs',
update_msg='ovc_urs',
delete_msg='ovc_drs',
topology_msg='ovc_trs')
# setting link speeds manually
# TODO: to create a service to fetch these values automatically from ovsdb or ofconfig
catalog = ndb()
catalog.set_link_capacity('s01','s02', 1000)
catalog.set_link_capacity('s01','s04', 1000)
catalog.set_link_capacity('s02','s03', 1000)
catalog.set_link_capacity('s02','s01', 1000)
catalog.set_link_capacity('s03','s04', 1000)
catalog.set_link_capacity('s03','s02', 1000)
catalog.set_link_capacity('s04','s01', 1000)
catalog.set_link_capacity('s04','s03', 1000)
'''
Setting known hosts and networks manually.
It could be automatic if we develop LLDP and ARP functions in the ovs controller...
... but it is out of scope.
'''
# catalog.add_network('30.0.1.0/24', 's01', 4)
# catalog.add_network('10.0.4.0/24', 's01', 4)
catalog.add_network('30.0.5.0/24', 's01', 3)
catalog.add_network('30.0.6.0/24', 's01', 4)
catalog.add_network('30.0.7.0/24', 's01', 5)
catalog.add_network('10.0.0.0/24', 's03', 3)
# catalog.add_network('10.20.0.0/24', 's05', 3)
# catalog.add_network('10.30.0.0/24', 's05', 3)
def network_info(self, **kwargs):
# Access resource catalogue
catalog = ndb()
# Get network topology
(topo_success, topo_msg) = self.ovs_ctl.get_topology()
if not topo_success:
# Send error message
msg = '[ERROR]: Could not retrieve the network topology from ovs controller'
print('failed', (time()-st)*1000, 'ms')
# Inform the user about the creation
return False, msg
topology = topo_msg.get('topology')
catalog.set_topology(topology)
# Return information
return True, {"tn": {
"topology": catalog.get_topology(),
"capacity": catalog.get_capacity(),
"routes": catalog.get_routes(),
# "networks": catalog.get_networks(),
"usage": catalog.get_usage(),
"flows": catalog.get_flows(),
"virtual_ifaces": catalog.get_virtual_ifaces()
}}
def create_slice(self, **kwargs):
catalog = ndb()
st = time()
# Extract parameters from keyword arguments
s_id = kwargs.get('s_id', None)
source, destination = self.get_address_params(kwargs)
requirements = kwargs.get('requirements', None)
# Append it to the list of service IDs
self.s_ids[s_id] = requirements
# Get network topology
(topo_success, topo_msg) = self.ovs_ctl.get_topology()
if not topo_success:
# Send error message
msg = '[ERROR]: Could not retrieve the network topology from ovs controller'
print('failed', (time()-st)*1000, 'ms')
# Inform the user about the creation
return False, msg
topology = topo_msg.get('topology')
catalog.set_topology(topology)
# Define the route which can support the required QoS
route = self.build_route(topology, source, destination, requirements)
if route is None:
# Send error message
msg = '[WARN]: There is no available path for source '+ str(source) + ' and destination ' + str(destination) + ' supporting the follow QoS: ' + str(requirements)
print('failed', (time()-st)*1000, 'ms')
# Inform the user about the creation
return False, msg
# Send message to OVS SDN controller
self._log('Delegating it to the OVS Controller')
# Send the message to create a slice
success, msg = self.ovs_ctl.create_slice(
**{'s_id': s_id,
'route': route
})
print('success', (time()-st)*1000, 'ms')
if success:
catalog.add_route(s_id, route)
# Inform the user about the creation
return success, msg
def request_slice(self, **kwargs):
s_id = kwargs.get('s_id', None)
catalog = ndb()
if s_id is not None:
route = catalog.get_route(s_id)
if route is not None:
msg = {}
msg[s_id] = route
else:
msg = catalog.get_routes()
return (False, "Service not found.") \
if (s_id and not msg) else (True, msg)
def update_slice(self, **kwargs):
pass
def delete_slice(self, **kwargs):
# Extract parameters from keyword arguments
s_id = kwargs.get('s_id', None)
route = kwargs.get('route', None)
complete_remove = False
catalog = ndb()
if route is None:
# Retrieve the route previously applied
complete_remove = True
route = catalog.get_route(s_id)
if route is None:
return False, 'Route not found for s_id ' + s_id
# Send message to remove slice
success, msg = self.ovs_ctl.delete_slice(**{'s_id': s_id,
'route': route})
if success:
path = route['path']
for p in range(0, len(path) - 1):
catalog.add_flow_count(path[p], path[p + 1], -1)
if route['throughput'] is not None:
catalog.add_link_usage(path[p], path[p + 1], -route['throughput'])
if complete_remove:
catalog.remove_route(s_id)
# Inform the user about the removal
return success, msg
# TODO: initial version is using create_slice service. Change it to have its own services.
def reconfigure_slice(self, **kwargs):
s_id = kwargs.get('s_id', None)
catalog = ndb()
old_route = catalog.get_route(s_id)
source = old_route.get('src')
destination = old_route.get('dst')
latency = old_route.get('latency')
throughput = old_route.get('throughput')
slice_args = {'s_id': s_id,
'source': source,
'destination': destination,
'requirements': {'throughput': throughput,
'latency': latency}
}
print('slice args ', slice_args)
(success, msg) = self.create_slice(**slice_args)
print('create success ', success)
if success:
switches = []
new_route = catalog.get_route(s_id)
print('new_route', new_route)
if old_route.get('path_string') != new_route.get('path_string'):
for old in old_route.get('switches'):
current = self.get_in_switches(old, new_route.get('switches'))
if len(current) > 0:
#if old.get('in_port') != current[0].get('in_port'):
# switches.append(old)
if old.get('in_port') != current[0].get('in_port'):
if old.get('out_port') != current[0].get('out_port'):
old['direction'] = 'full'
switches.append(old)
else:
old['direction'] = 'half-fw'
switches.append(old)
elif old.get('out_port') != current[0].get('out_port'):
old['direction'] = 'half-rv'
switches.append(old)
else:
old['direction'] = 'full'
switches.append(old)
route_to_delete = self.generate_route_to_delete(old_route, switches)
success, msg = self.delete_slice(**{'s_id': s_id,
'route': route_to_delete})
return success, msg
def get_in_switches(self, switch, switches):
return [
i for i in switches
if i.get('node') == switch.get('node')
]
def generate_route_to_delete(self, old_route, switches):
route = old_route
route['switches'] = switches
return route
def find_border_switch(self, address):
catalog = ndb()
resp = None
networks = catalog.get_networks()
for network in networks:
if IPAddress(address) in IPNetwork(network):
resp = networks[network]
break
return resp
def build_route(self, topology, src, dst, requirements):
catalog = ndb()
engine = PathEngine()
# Fetch switches which can arrive to the src and dst networks
src_network = self.find_border_switch(src)
dst_network = self.find_border_switch(dst)
if src_network is None or dst_network is None:
print('\t', 'Impossible to arrive from ', src, 'to ', dst)
return None
# Define the path to apply
path = engine.get_path(topology, src_network.get('switch'), dst_network.get('switch'), requirements)
print('path ', path)
if path is None:
return None
print('\t', 'Path to be applied: ', path)
(ipv4_src, ipv4_src_netmask) = self.convert_cidr_to_netmask(src)
(ipv4_dst, ipv4_dst_netmask) = self.convert_cidr_to_netmask(dst)
(min_rate, max_rate, priority) = self.define_queue_parameters(requirements)
first_port = src_network.get('port')
last_port = dst_network.get('port')
switches = engine.generate_match_switches(topology, path, first_port, last_port)
path_string = '-'.join(map(str, path))
route = {
'src': src,
'dst': dst,
'ipv4_src': ipv4_src,
'ipv4_src_netmask': ipv4_src_netmask,
'ipv4_dst': ipv4_dst,
'ipv4_dst_netmask': ipv4_dst_netmask,
'min_rate': min_rate,
'max_rate': max_rate,
'priority': priority,
'switches': switches,
'path_string': path_string,
'path': path,
'latency': requirements.get('latency'),
'throughput': requirements.get('throughput')
}
return route
def get_address_params(self, kwargs):
source = self.convert_cidr_to_netmask(kwargs.get('source', None))[0]
destination = self.convert_cidr_to_netmask(kwargs.get('destination', None))[0]
return source, destination
def convert_cidr_to_netmask(self, address):
if "/" not in address:
address = address + "/32"
(ipv4, original_cidr) = address.split('/')
addr = ipv4.split('.')
cidr = int(original_cidr)
mask = [0, 0, 0, 0]
for i in range(cidr):
mask[i//8] = mask[i//8] + (1 << (7 - i % 8))
net = []
for i in range(4):
net.append(int(addr[i]) & mask[i])
ipv4_netmask = ".".join(map(str, mask))
return (ipv4, ipv4_netmask)
def define_queue_parameters(self, requirements):
min_rate = None
max_rate = None
priority = None
if requirements.get('throughput') is not None:
min_rate = self.to_byte(requirements.get('throughput'))
#max_rate = self.to_byte(requirements.get('throughput'))
priority = 10
if requirements.get('latency') is not None:
priority = 1
return min_rate, max_rate, priority
def to_byte(self, value):
return int(value * 1024 * 1024)
if __name__ == "__main__":
# clear screen
cls()
# Handle keyboard interrupt (SIGINT)
try:
# Instantiate the Transport Network Orchestrator thread
tn_orchestrator_thread = tn_orchestrator(
name='TN',
req_header='tn_req',
rep_header='tn_rep',
error_msg='msg_err',
create_msg='tn_cc',
info_msg='ns_tn',
request_msg='tn_rc',
update_msg='tn_uc',
delete_msg='tn_dc',
topology_msg='tn_tc',
host='0.0.0.0',
port=2200
)
# Start the Transport Network Orchestrator
tn_orchestrator_thread.start()
# Start SONAr modules
scoe_thread = scoe(tn_orchestrator_thread, "0.0.0.0", 5500)
scoe_thread.start()
she_thread = she(tn_orchestrator_thread)
she_thread.start()
api_thread = nad()
api_thread.start()
# Pause the main thread
signal.pause()
except KeyboardInterrupt:
# Terminate the TN Orchestrator Server
tn_orchestrator_thread.safe_shutdown()
scoe_thread.shutdown_flag.set()
scoe_thread.join()
she_thread.shutdown_flag.set()
she_thread.join()
api_thread.shutdown_flag.set()
api_thread.join()
|
from django.shortcuts import render, redirect
from .models import Users
def index( request ):
if 'success_username' in request.session:
request.session.pop( 'success_username' )
return render( request, "main_app/index.html" )
def add_user( request ):
request.session['username'] = request.POST['username']
if len( request.session['username'] ) < 8:
request.session['error_msg'] = "Username is NOT valid -- too short"
return redirect( "/" )
elif len( request.session['username'] ) > 16:
request.session['error_msg'] = "Username is NOT valid -- too long"
return redirect( "/" )
elif len( Users.objects.filter( email__startswith = request.session['username'] + "@") ) > 0:
request.session['error_msg'] = "Username is NOT valid -- already used"
return redirect( "/" )
else:
Users.objects.create(
email = request.session['username'] + "@codingdojo.com"
)
request.session['success_username'] = request.session['username']
if 'error_msg' in request.session:
request.session.pop( 'error_msg' )
request.session.pop( 'username' )
return redirect( "/success" )
def delete_user( request, id ):
Users.objects.get( id = id ).delete()
return redirect( "/success" )
def success( request ):
context = {
'users': Users.objects.all(),
'username': request.session['success_username'],
}
return render( request, "main_app/success.html", context )
|
import discord
from discord import ui
from redbot.core import commands
class Test(commands.Cog):
def __init__(self, bot):
self.bot = bot |
#!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Verifies whether any encodings have changed compared to what's in the
# database.
# Intended for use after upgrading software.
#
import argparse
import sys
import mpeg_settings
import encoder
import optimizer
import pick_codec
import score_tools
def VerifyOneTarget(codecs, rate, videofile, criterion):
change_count = 0
for codec_name in codecs:
codec = pick_codec.PickCodec(codec_name)
my_optimizer = optimizer.Optimizer(codec,
score_function=score_tools.PickScorer(criterion))
bestsofar = my_optimizer.BestEncoding(rate, videofile)
if not bestsofar.Result():
print '%s rate %s file %s has no score' % (
codec_name, rate, videofile.basename)
else:
if not bestsofar.VerifyEncode():
print '%s rate %s file %s has changed encoding' % (
codec_name, rate, videofile.basename)
change_count += 1
return change_count
def VerifyResults(codecs, criterion):
change_count = 0
for rate, filename in mpeg_settings.MpegFiles().AllFilesAndRates():
videofile = encoder.Videofile(filename)
change_count += VerifyOneTarget(codecs, rate, videofile, criterion)
return change_count
def main():
parser = argparse.ArgumentParser()
parser.add_argument('codecs', nargs='*',
default=pick_codec.AllCodecNames())
parser.add_argument('--criterion', default='psnr')
args = parser.parse_args()
change_count = VerifyResults(args.codecs, args.criterion)
print 'Number of changes: %d' % change_count
if change_count > 0:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
from fasterpay.gateway import Gateway
from random import randint
if __name__ == "__main__":
gateway = Gateway("<Your Private key>", "<Your Public key>", True)
response = gateway.subscription().cancel(order_id=7923)
print (response)
|
import socket
import logging
from config import configuration
i = configuration()
server = i.server
port = i.port
channel = i.channel
user = i.user
passw = i.passw
readbuffer = i.readbuffer
#Start loging
logging.basicConfig(filename="WheeleBot.log", level=logging.DEBUG)
#Connect to server
s = socket.socket()
s.connect((server, port))
#Send required information to Authenitcate wtih the server
s.send("PASS " + passw + "\r\n")
s.send("NICK " + user + "\r\n")
s.send("USER " + user + "\r\n")
s.send("JOIN " + channel + "\r\n")
#Maintain connection
while True:
data = s.recv(4096)
print data
#Main help command
if data.find('!help') != -1:
from lib.text_to_send import help_command
fetch = help_command()
s.send("PRIVMSG " + channel + " : " + fetch.help_msg)
#Help with social commands
elif data.find('!socialCmds') != -1:
from lib.text_to_send import help_command_social
fetch = help_command_social()
s.send("PRIVMSG " + channel + " : " + fetch.help_social)
#Help with Misc commands
elif data.find('!miscCmds') != -1:
from lib.text_to_send import help_command_misc
fetch = help_command_misc()
s.send("PRIVMSG " + channel + " : " + fetch.help_misc)
#More help info
elif data.find('!infoCmds') != -1:
from lib.text_to_send import help_command_info
fetch = help_command_info()
s.send("PRIVMSG " + channel + " : " + fetch.help_info)
|
mine = [
[ 1, 3, 1, 5 ],
[ 2, 2, 4, 1 ],
[ 5, 0, 2, 3 ],
[ 0, 6, 1, 2 ]
]
def get_max(line,row,n):
if row==0:
return(max(line[row],line[row+1]))
if row==n-1:
return(max(line[row],line[row-1]))
return (max(line[row],line[row-1],line[row+1]))
def gold_gain(mine, n):
l = [0 for _ in range(n)]
for i in range(n):
l[i] = mine[i][-1]
for j in range(n-2, -1, -1):
new_l=[0 for _ in range(n)]
for k in range(n):
g_m=get_max(l,k,n)
new_l[k]=mine[k][j]+g_m
l=new_l
return max(l)
print gold_gain(mine, len(mine)) |
import io
import logging
import os
import tarfile
import tempfile
from typing import Optional
from art.config import ArtConfig
from art.manifest import Manifest
log = logging.getLogger(__name__)
def create_wrapfile(config: ArtConfig, manifest: Manifest) -> Optional[str]:
if not config.wrap:
return None
wrap_temp = tempfile.mktemp(prefix="art-wrap-", suffix=".tar")
wrap_tar = tarfile.open(wrap_temp, "w")
wrap_tar.addfile(
tarinfo=tarfile.TarInfo(name=".manifest.json"),
fileobj=io.BytesIO(manifest.as_json_bytes()),
)
for dest_filename, fileinfo in manifest["files"].items():
local_path = os.path.join(config.work_dir, fileinfo["path"])
wrap_tar.add(local_path, dest_filename)
wrap_tar.close()
log.info("Created wrapfile: %d bytes" % os.stat(wrap_temp).st_size)
return wrap_temp
|
""" Script to check conversion from nPE to MeV of neutrons/protons/positrons, respectively, which were simulated
with tut_detsim.py of JUNO offline version J18v1r1-pre1.
Results of this script are used to get the visible energy of a particle depending on the number of PE AND
depending on the position of the energy deposition in the detector.
DIFFERENCE to check_conversion_npe_mev.py:
also the position of the particle inside the detector is used for the conversion from nPE to MeV
(see script check_DMsignal_simulation.py).
With this conversion the cut on the energy of a possible prompt signal can be made in the PE-regime and efficiency
of this cut can be calculated.
"""
import datetime
import ROOT
import NC_background_functions
import numpy as np
from matplotlib import pyplot as plt
from decimal import Decimal
from matplotlib.colors import LogNorm
# get the date and time, when the script was run:
date = datetime.datetime.now()
now = date.strftime("%Y-%m-%d %H:%M")
def get_npe_redep_qedep_from_file(input_path, filename, first_file, last_file, e_min, e_max):
"""
function to get total number of PE, distance to the detector center in mm and quenched deposited energy
(visible energy) in MeV of each event of each file
:param input_path: path, where the user-root files are saved
:param filename: name of the files that are read (e.g. user_neutron_500_MeV_)
:param first_file: number of the first file to be read
:param last_file: number of the last file to be read
:param e_min: minimum visible energy in MeV, that should be analyzed
:param e_max: maximum visible energy in MeV, that should be analyzed
:return:
"""
# preallocate array, where total number of PE is stored:
array_total_pe = []
# preallocate array, where distance to detector center of position, where energy is deposited, is stored (in mm):
array_r_edep = []
# preallocate array, where quenched deposited energy is stored (in MeV):
array_qedep = []
# preallocate array, where deposited energy is stored (in MeV):
array_edep = []
# loop over user-root files:
for filenumber in range(first_file, last_file+1, 1):
# input user-root file:
input_file = input_path + filename + "{0:d}.root".format(filenumber)
# load the ROOT file:
rfile = ROOT.TFile(input_file)
# get the "evt"-TTree from the TFile:
rtree_evt = rfile.Get("evt")
# get 'prmtrkdep' tree from TFile:
rtree_prmtrkdep = rfile.Get("prmtrkdep")
# get number of event of the file:
number_events_file = rtree_evt.GetEntries()
# loop ever each event:
for event in range(number_events_file):
# get the current event in prmtrkdep tree:
rtree_prmtrkdep.GetEntry(event)
# get number of initial particles:
n_init_particles = int(rtree_prmtrkdep.GetBranch("nInitParticles").GetLeaf("nInitParticles").GetValue())
# preallocate total quenched deposited energy of event in MeV:
qedep = 0.0
# loop over number of initial particles:
for index in range(n_init_particles):
# get quenched deposited energy (visible energy) in MeV:
qedep_value = float(rtree_prmtrkdep.GetBranch("Qedep").GetLeaf("Qedep").GetValue(index))
# add it to qedep:
qedep += qedep_value
# check if qedep is in the correct energy window:
if qedep > e_max or qedep < e_min:
continue
# append qedep to array:
array_qedep.append(qedep)
# get the current event in geninfo tree:
rtree_evt.GetEntry(event)
# get total number of PE:
total_pe = int(rtree_evt.GetBranch("totalPE").GetLeaf("totalPE").GetValue())
# append total_pe to array:
array_total_pe.append(total_pe)
# get deposited energy of the event in MeV:
edep = float(rtree_evt.GetBranch("edep").GetLeaf("edep").GetValue())
# append edep to array:
array_edep.append(edep)
# get x, y, z position, where energy is deposited, in mm:
edepx = float(rtree_evt.GetBranch("edepX").GetLeaf("edepX").GetValue())
edepy = float(rtree_evt.GetBranch("edepY").GetLeaf("edepY").GetValue())
edepz = float(rtree_evt.GetBranch("edepZ").GetLeaf("edepZ").GetValue())
# calculate distance to center in mm:
r_edep = np.sqrt(edepx**2 + edepy**2 + edepz**2)
# append r_edep to array:
array_r_edep.append(r_edep)
return array_total_pe, array_r_edep, array_qedep, array_edep
# set energy window, that should be analyzed in MeV:
E_min_window = 0.0
E_max_window = 120.0
# preallocate array, where total PE of all events are stored:
array_totalPE = []
# preallocate array, where distance to center of all events are stored in mm:
array_Redep = []
# preallocate array, where Qedep of all events are stored in MeV:
array_Qedep = []
""" read neutron files: """
print("read neutron events...")
path_neutron = "/local/scratch1/pipc51/astro/blum/conversion_nPE_MeV/neutron_output/"
first_file_neutron = 0
last_file_neutron = 99
# 10 MeV neutrons:
# filename_neutron_10MeV = "user_neutron_10_MeV_"
# nPE_neutron_10MeV, Redep_neutron_10MeV, Qedep_neutron_10MeV, Edep_neutron_10MeV = \
# get_npe_redep_qedep_from_file(path_neutron, filename_neutron_10MeV, first_file_neutron, last_file_neutron,
# E_min_window, E_max_window)
# 100 MeV neutrons:
# filename_neutron_100MeV = "user_neutron_100_MeV_"
# nPE_neutron_100MeV, Redep_neutron_100MeV, Qedep_neutron_100MeV, Edep_neutron_100MeV = \
# get_npe_redep_qedep_from_file(path_neutron, filename_neutron_100MeV, first_file_neutron, last_file_neutron,
# E_min_window, E_max_window)
# 300 MeV neutrons:
# filename_neutron_300MeV = "user_neutron_300_MeV_"
# nPE_neutron_300MeV, Redep_neutron_300MeV, Qedep_neutron_300MeV, Edep_neutron_300MeV = \
# get_npe_redep_qedep_from_file(path_neutron, filename_neutron_300MeV, first_file_neutron, last_file_neutron,
# E_min_window, E_max_window)
# 500 MeV neutron:
last_file_neutron_500MeV = 699
# filename_neutron_500MeV = "user_neutron_500_MeV_"
# nPE_neutron_500MeV, Redep_neutron_500MeV, Qedep_neutron_500MeV, Edep_neutron_500MeV = \
# get_npe_redep_qedep_from_file(path_neutron, filename_neutron_500MeV, first_file_neutron, last_file_neutron_500MeV,
# E_min_window, E_max_window)
# 1000 MeV neutron:
# filename_neutron_1000MeV = "user_neutron_1000_MeV_"
# nPE_neutron_1000MeV, Redep_neutron_1000MeV, Qedep_neutron_1000MeV, Edep_neutron_1000MeV = \
# get_npe_redep_qedep_from_file(path_neutron, filename_neutron_1000MeV, first_file_neutron, last_file_neutron,
# E_min_window, E_max_window)
# # total PE of all neutron events:
# totalPE_neutron = nPE_neutron_10MeV + nPE_neutron_100MeV + nPE_neutron_300MeV + nPE_neutron_500MeV + nPE_neutron_1000MeV
# # distance to center of all neutron events:
# Redep_neutron = (Redep_neutron_10MeV + Redep_neutron_100MeV + Redep_neutron_300MeV + Redep_neutron_500MeV +
# Redep_neutron_1000MeV)
# # Qedep of all neutron events:
# Qedep_neutron = (Qedep_neutron_10MeV + Qedep_neutron_100MeV + Qedep_neutron_300MeV + Qedep_neutron_500MeV +
# Qedep_neutron_1000MeV)
# Edep of all neutron events:
# Edep_neutron = (Edep_neutron_10MeV + Edep_neutron_100MeV + Edep_neutron_300MeV + Edep_neutron_500MeV +
# Edep_neutron_1000MeV)
""" read proton files: """
print("read proton events...")
path_proton = "/local/scratch1/pipc51/astro/blum/conversion_nPE_MeV/proton_output/"
first_file_proton = 0
last_file_proton = 99
# 10 MeV protons:
# filename_proton_10MeV = "user_proton_10_MeV_"
# nPE_proton_10MeV, Redep_proton_10MeV, Qedep_proton_10MeV, Edep_proton_10MeV = \
# get_npe_redep_qedep_from_file(path_proton, filename_proton_10MeV, first_file_proton, last_file_proton,
# E_min_window, E_max_window)
# 100 MeV proton:
# filename_proton_100MeV = "user_proton_100_MeV_"
# nPE_proton_100MeV, Redep_proton_100MeV, Qedep_proton_100MeV, Edep_proton_100MeV = \
# get_npe_redep_qedep_from_file(path_proton, filename_proton_100MeV, first_file_proton, last_file_proton,
# E_min_window, E_max_window)
# 1000 MeV proton:
# filename_proton_1000MeV = "user_proton_1000_MeV_"
# nPE_proton_1000MeV, Redep_proton_1000MeV, Qedep_proton_1000MeV, Edep_proton_1000MeV = \
# get_npe_redep_qedep_from_file(path_proton, filename_proton_1000MeV, first_file_proton, last_file_proton,
# E_min_window, E_max_window)
# # total PE of all proton events:
# totalPE_proton = nPE_proton_10MeV + nPE_proton_100MeV + nPE_proton_1000MeV
# # distance to center of all proton events:
# Redep_proton = Redep_proton_10MeV + Redep_proton_100MeV + Redep_proton_1000MeV
# # Qedep of all proton events:
# Qedep_proton = Qedep_proton_10MeV + Qedep_proton_100MeV + Qedep_proton_1000MeV
# # Edep of all proton events:
# Edep_proton = Edep_proton_10MeV + Edep_proton_100MeV + Edep_proton_1000MeV
""" read positron files: """
print("read positron events...")
path_positron = "/local/scratch1/pipc51/astro/blum/positron_output/"
first_file_positron = 0
last_file_positron = 99
# positrons from 10 MeV to 100 MeV:
# filename_positron = "user_positron_"
# totalPE_positron, Redep_positron, Qedep_positron, Edep_positron = \
# get_npe_redep_qedep_from_file(path_positron, filename_positron, first_file_positron, last_file_positron,
# E_min_window, E_max_window)
# positrons of 10 MeV:
filename_positron_10MeV = "user_positron_10_MeV_"
totalPE_positron_10MeV, Redep_positron_10MeV, Qedep_positron_10MeV, Edep_positron_10MeV = \
get_npe_redep_qedep_from_file(path_positron, filename_positron_10MeV, first_file_positron, last_file_positron,
E_min_window, E_max_window)
# positrons of 100 MeV:
filename_positron_100MeV = "user_positron_100_MeV_"
totalPE_positron_100MeV, Redep_positron_100MeV, Qedep_positron_100MeV, Edep_positron_100MeV = \
get_npe_redep_qedep_from_file(path_positron, filename_positron_100MeV, first_file_positron, last_file_positron,
E_min_window, E_max_window)
# positrons of 50 MeV in detector center:
path_positron_CDcenter = "/local/scratch1/pipc51/astro/blum/positron_output_CDcenter/"
filename_positron_50MeV = "user_positron_50MeV_"
totalPE_positron_50MeV, Redep_positron_50MeV, Qedep_positron_50MeV, Edep_positron_50MeV = \
get_npe_redep_qedep_from_file(path_positron_CDcenter, filename_positron_50MeV, first_file_positron,
last_file_positron, E_min_window, E_max_window)
# convert list Qedep_positron_50MeV to numpy array:
Qedep_positron_50MeV = np.asarray(Qedep_positron_50MeV)
# smear Qedep_positron_50MeV with the energy resolution of JUNO:
sigma = NC_background_functions.energy_resolution(Qedep_positron_50MeV)
Qedep_positron_50MeV_smeared = np.random.normal(Qedep_positron_50MeV, sigma)
""" read IBD files: """
print("read IBD events...")
path_IBD = "/local/scratch1/pipc51/astro/blum/IBD_hepevt/"
first_file_IBD = 0
last_file_IBD = 199
# IBD (positron and neutron) events from 10 MeV to 100 MeV:
# filename_IBD = "user_IBD_hepevt_"
# totalPE_IBD, Redep_IBD, Qedep_IBD, Edep_IBD = \
# get_npe_redep_qedep_from_file(path_IBD, filename_IBD, first_file_IBD, last_file_IBD, E_min_window, E_max_window)
""" plot Qedep vs. nPE and Qedep vs. Redep: """
# h1 = plt.figure(1, figsize=(15, 8))
# plt.plot(totalPE_neutron, Qedep_neutron, "bx", label="neutron events")
# plt.plot(totalPE_proton, Qedep_proton, "rx", label="proton events")
# plt.plot(totalPE_positron, Qedep_positron, "gx", label="positron events")
# plt.plot(totalPE_IBD, Qedep_IBD, "mx", label="IBD events")
# plt.xlabel("number of p.e.")
# plt.ylabel("visible energy in MeV")
# plt.title("Visible energy vs. number of p.e.")
# plt.grid()
# plt.legend()
#
# h2 = plt.figure(2, figsize=(15, 8))
# plt.plot(Redep_neutron, Qedep_neutron, "bx", label="neutron events")
# plt.plot(Redep_proton, Qedep_proton, "rx", label="proton events")
# plt.plot(Redep_positron, Qedep_positron, "gx", label="positron events")
# plt.plot(Redep_IBD, Qedep_IBD, "mx", label="IBD events")
# plt.xlabel("distance to detector center in mm")
# plt.ylabel("visible energy in MeV")
# plt.title("Visible energy vs. distance to center")
# plt.grid()
# plt.legend()
h3 = plt.figure(3, figsize=(15, 8))
Bins = np.arange(E_min_window, E_max_window, 0.1)
plt.hist(Edep_positron_10MeV, bins=Bins, label="10 MeV positron")
plt.hist(Edep_positron_100MeV, bins=Bins, label="100 MeV positron")
plt.hist(Edep_positron_50MeV, bins=Bins, label="50 MeV positron")
plt.xlabel("Edep in MeV")
plt.ylabel("entries per bin")
plt.title("Deposited energy of 10 MeV, 50 MeV and 100 MeV positrons")
plt.grid()
plt.legend()
h4 = plt.figure(4, figsize=(15, 8))
plt.hist2d(Edep_positron_10MeV, Redep_positron_10MeV, bins=[Bins, np.arange(0.0, 17700+400, 1000)], cmap="Reds")
# plt.hist2d(Edep_positron_100MeV, Redep_positron_100MeV, bins=[Bins, np.arange(0.0, 17700+400, 1000)], cmap="Reds")
plt.xlabel("Edep in MeV")
plt.ylabel("distance to detector center in mm")
plt.title("10 MeV and 100 MeV positrons")
plt.colorbar()
plt.grid()
h5 = plt.figure(5, figsize=(15, 8))
Bins_PE = np.arange(min(totalPE_positron_10MeV), max(totalPE_positron_100MeV), 150)
plt.hist2d(totalPE_positron_10MeV, Redep_positron_10MeV, bins=[Bins_PE, np.arange(0.0, 17700+400, 1000)], cmap="Reds")
# plt.hist2d(totalPE_positron_100MeV, Redep_positron_100MeV, bins=[Bins_PE, np.arange(0.0, 17700+400, 1000)], cmap="Reds")
plt.xlabel("total number of p.e.")
plt.ylabel("distance to detector center in mm")
plt.title("10 MeV and 100 MeV positrons")
plt.colorbar()
plt.grid()
h6 = plt.figure(6, figsize=(15, 8))
plt.hist(totalPE_positron_10MeV, bins=Bins_PE, label="10 MeV positron")
plt.hist(totalPE_positron_100MeV, bins=Bins_PE, label="100 MeV positron")
plt.hist(totalPE_positron_50MeV, bins=Bins_PE, label="50 MeV positron in CD center")
plt.xlabel("total number of p.e.")
plt.ylabel("entries per bin")
plt.title("total number of p.e. of 10 MeV, 50 MeV and 100 MeV positrons")
plt.grid()
plt.legend()
h7 = plt.figure(7, figsize=(15, 8))
plt.hist(Qedep_positron_10MeV, bins=Bins, label="10 MeV positron")
plt.hist(Qedep_positron_100MeV, bins=Bins, label="100 MeV positron")
plt.hist(Qedep_positron_50MeV, bins=Bins, label="50 MeV positron in CD center")
plt.xlabel("Qedep in MeV")
plt.ylabel("entries per bin")
plt.title("Quenched deposited energy of 10 MeV, 50 MeV and 100 MeV positrons")
plt.grid()
plt.legend()
h8 = plt.figure(8, figsize=(15, 8))
plt.hist(Qedep_positron_50MeV_smeared, bins=Bins, label="50 MeV positron in CD center\n"
"(smeared with energy resolution)")
plt.xlabel("Qedep in MeV")
plt.ylabel("entries per bin")
plt.title("Quenched deposited energy of 50 MeV positrons in the detector center")
plt.grid()
plt.legend()
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.