text stringlengths 8 6.05M |
|---|
from tqdm import tqdm
from src.model_neurosat_v2 import NeuroSAT2
from utils.config import Config
import PyMiniSolvers.minisolvers as minisolvers
import random
import numpy as np
import os
from utils.create_database_random import DataGenerator, ProblemsLoader
from src.model_neurosat import *
from torch.utils.tensorboard import SummaryWriter
from src.trainer import train_model, solve_pb
import json
import argparse
import datetime
from utils.utils import str2bool, dir_path, two_args_str_int
import time
config = Config()
# initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument("--seed", default=config.general.seed, type=two_args_str_int, choices=[0, 1, 2])
parser.add_argument("--task_name", default=config.general.task_name)
parser.add_argument("--device", default=config.general.device, type=two_args_str_int, choices=[0, 1, 2, 3])
parser.add_argument("--nbre_plot", default=config.general.nbre_plot, type=two_args_str_int)
parser.add_argument("--do_it", default=config.generate_data.do_it, type=str2bool, nargs='?', const=False)
parser.add_argument("--n_pairs", default=config.generate_data.n_pairs, type=two_args_str_int)
parser.add_argument("--min_n", default=config.generate_data.min_n, type=two_args_str_int)
parser.add_argument("--max_n", default=config.generate_data.max_n, type=two_args_str_int)
parser.add_argument("--max_nodes_per_batch", default=config.generate_data.max_nodes_per_batch, type=two_args_str_int)
parser.add_argument("--one", default=config.generate_data.one, type=two_args_str_int)
parser.add_argument("--p_k_2", default=config.generate_data.p_k_2, type=two_args_str_int)
parser.add_argument("--p_geo", default=config.generate_data.p_geo, type=two_args_str_int)
parser.add_argument("--train_dir", default=config.path.train_dir)
parser.add_argument("--val_dir", default=config.path.val_dir)
parser.add_argument("--test_dir", default=config.path.test_dir)
parser.add_argument("--logs_tensorboard", default=config.path.logs_tensorboard)
parser.add_argument("--model1", default=config.path.model1, type=dir_path)
parser.add_argument("--model2", default=config.path.model2)
parser.add_argument("--n_epochs", default=config.training.n_epochs, type=two_args_str_int)
parser.add_argument("--embbeding_dim", default=config.training.embbeding_dim, type=two_args_str_int)
parser.add_argument("--weight_decay", default=config.training.weight_decay, type=two_args_str_int)
parser.add_argument("--lr", default=config.training.lr, type=two_args_str_int)
parser.add_argument("--T", default=config.training.T, type=two_args_str_int)
parser.add_argument("--sparse", default=config.training.sparse, type=str2bool, nargs='?', const=False)
parser.add_argument("--l1weight", default=config.training.l1weight, type=two_args_str_int)
parser.add_argument("--sparseKL", default=config.training.sparseKL, type=str2bool, nargs='?', const=False)
parser.add_argument("--KL_distribval", default=config.training.KL_distribval, type=two_args_str_int)
parser.add_argument("--initialisation", default=config.training.initialisation, choices=['random', 'predict_model'])
parser.add_argument("--initialisation_eval", default=config.eval.initialisation, choices=['random', 'predict_model'])
args = parser.parse_args()
device = torch.device("cuda:"+str(args.device) if torch.cuda.is_available() else "cpu")
date = str(datetime.datetime.now()).replace(" ", "_").replace("-", "_").replace(":", "_").replace(".", "_")
model_test = args.task_name + "/"
writer = SummaryWriter(args.logs_tensorboard + model_test + date)
path_save_model = args.logs_tensorboard + model_test + date + "/"
with open(path_save_model+'commandline_args.txt', 'w') as f:
json.dump(args.__dict__, f, indent=2)
print("Use Hardware : ", device)
# Reproductibilites
seed = args.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
test_problems_loader = ProblemsLoader([args.test_dir + "/" + f for f in os.listdir(args.test_dir)])
dataloaders = {'test': test_problems_loader}
model = NeuroSAT(args, device)
net = torch.load(args.model2, map_location=torch.device(device))
model.load_state_dict(net['state_dict'])
model.eval()
problems_test, train_filename = dataloaders["test"].get_next()
solve_pb(problems_test, model, path_save_model)
|
# /usr/local/lib/python2.7/dist-packages/scrapy/core/scheduler.py |
import os
def clearscreen():
print("start clear")
os.system('clear')
print("clear complete")
def fun1():
print("start clear")
os.system('clear')
print("clear complete")
|
print('input your elements(one by one in a column) then just enter:')
try:
my_list = []
while True:
my_list.append(int(input()))
except:
print('your list: ', my_list, end='\n\n')
for i in my_list:
if i%3 == 0 :
print(i)
input('press enter to exit')
|
import pylab as P
def plotAB(dataA,dataB):
"""Draw two data lines on two axes"""
P.clf()
f=P.figure(figsize=(8,5))
ax1 = f.add_subplot(111)
ax2 = ax1.twinx()
xA = range(len(dataA))
lA=ax1.plot(xA,dataA,color="blue", ls="-")
xB = range(len(dataB))
lA=ax2.plot(xB,dataB,color="green", ls="--")
#xs=range(0,121,20)
#ax1.set_xticks(xs)
#ys=[0, 20000, 40000, 60000]
#ax1.set_yticks(ys)
#ax1.set_yticklabels(["0","20,000","40,000","60,000"])
ax1.get_xaxis().tick_bottom()
ax1.get_yaxis().set_tick_params(direction='out')
ax2.get_yaxis().set_tick_params(direction='out')
ax1.get_xaxis().set_tick_params(direction='out')
ax1.spines['top'].set_visible(False)
ax2.spines['top'].set_visible(False)
def testAB():
A=20*P.random(50)
B=30+10*P.random(52)
plotAB(A,B)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 15 15:08:08 2019
@author: Vall
"""
import iv_save_module as ivs
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.widgets as wid
import os
from tkinter import Tk, messagebox
#%%
def interactiveLegend(ax, labels=False, show_default=True,
loc='best', **kwargs):
"""Adds an interactive save button to a given figure.
Parameters
----------
ax : plt.Axes
The axes to which the interactive legend should be added.
labels=False : bool, list
If not false, the list of string names for the different lines that
are plotted.
show_default=True : bool, list
If not bool, the list of boolean values that say whether to show at
first or not the different lines that are plotted.
loc='best' : str
A string that indicates where to add the legend on the plot area.
Can be 'best', 'upper right', 'upper left', 'lower right',
'lower left'.
Returns
-------
buttons : wid.Button
Interactive legend instance.
"""
# First, get the lines that are currently plotted
lines = ax.lines
if labels is False:
labels = [l.get_label() for l in lines]
# Now, if needed, correct labels and default show parameters
try:
N = len(labels)
except:
N = 1
if N == 1:
labels = list(labels)
try:
M = len(show_default)
except:
M = 1
if M != N and M == 1:
show_default = [show_default for l in labels]
# Choose legend location
number = len(labels)
height = .05 * number
extra_y = .05 * (number - 1)
try:
fsize = kwargs['fontsize']
except:
fsize = 10
if fsize == 10:
width = .23
extra_x = 0
else:
width = .23 * fsize / 10
extra_x = .23 * (fsize/10 - 1)
try:
x0 = kwargs.pop('x0')
except:
x0 = (.14, .65)
try:
y0 = kwargs.pop('y0')
except:
y0 = (.03, .81)
if loc=='best':
xmin = min([min(l.get_data()[0]) for l in lines])
xmax = max([max(l.get_data()[0]) for l in lines])
ymin = min([min(l.get_data()[1]) for l in lines])
ymax = max([max(l.get_data()[1]) for l in lines])
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if abs(ymin-ylim[0]) > abs(ymax-ylim[1]):
loc = 'lower '
else:
loc = 'upper '
if abs(xmin-xlim[0]) > abs(xmax-xlim[1]):
loc = loc + 'left'
else:
loc = loc + 'right'
if loc=='upper right':
position = [x0[1] - extra_x, y0[1] - extra_y, width, height]
elif loc=='upper left':
position = [x0[0] + extra_x, y0[1] - extra_y, width, height]
elif loc=='lower right':
position = [x0[1] - extra_x, y0[0] + extra_y, width, height]
elif loc=='lower left':
position = [x0[0] + extra_x, y0[0] + extra_y, width, height]
else:
raise ValueError("Unvalid legend location")
# Create legend buttons
ax_buttons = plt.axes(position)
buttons = wid.CheckButtons(ax_buttons, labels, show_default)
legends = buttons.labels
for l, leg in zip(lines, legends):
leg.set_color(l.get_color())
leg.set(**kwargs)
for l, sd in zip(lines, show_default):
l.set_visible(sd)
# Add callback function and run
def buttons_callback(label):
for l, leg in zip(lines, legends):
if label == leg.get_text():
l.set_visible(not l.get_visible())
plt.draw()
return
buttons.on_clicked(buttons_callback)
plt.show()
return buttons
#%%
def interactiveSaveButton(filename, **kwargs):
"""Adds an interactive save button to a given figure.
Parameters
----------
filename : str
A model filename, which must include full path.
Other parameters
----------------
overwrite=False : bool
Says whether to overwrite files or not.
sufix='' : str
A sufix to be always added to the given filename.
newformat='{}_v{}' : str
A formatter that allows to make new filenames in order to avoid
overwriting. If 'F:\Hola.png' does already exist, new file is saved as
'F:\Hola_v2.png'.
Returns
-------
save_button : wid.Button
Interactive save button instance.
"""
# Since I can, I would also like an interactive 'Save' button
ax_save = plt.axes([0.8, 0.01, 0.1, 0.04])
save_button = wid.Button(ax_save, 'Guardar')
# For that, I'll need another callback function
def check_save_callback(event):
Tk().withdraw()
# tk.newfilename = askopenfilename()
ax_save.set_visible(False)
ivs.saveFig(filename, **kwargs)
ax_save.set_visible(True)
messagebox.showinfo('¡Listo!', 'Imagen guardada')
save_button.on_clicked(check_save_callback)
plt.show()
return save_button
#%%
def interactiveValueSelector(ax, x_value=True, y_value=True):
"""Allows to choose values from the axes of a plot.
Parameters
----------
ax : plt.Axes
The axes instance of the plot from where you want to choose.
x_value=True : bool
Whether to return the x value.
y_value=True : bool
Whether to return the y value.
Returns
-------
value : float
If only one value is required. This is the x value if 'x_value=True'
and 'y_value=False'. Otherwise, it is the y value.
values : tuple
If both values are required. Then it returns (x value, y value).
See also
--------
plt.Axes
wid.Cursor
"""
ax.autoscale(False)
cursor = wid.Cursor(ax, useblit=True,
linestyle='--', color='red', linewidth=2)
if not y_value:
cursor.horizOn = False
if not x_value:
cursor.vertOn = False
plt.show()
values = plt.ginput()[0]
if x_value:
plt.vlines(values[0], ax.get_ylim()[0], ax.get_ylim()[1],
linestyle='--', linewidth=2, color='red')
if y_value:
plt.hlines(values[1], ax.get_xlim()[0], ax.get_xlim()[1],
linestyle='--', linewidth=2, color='red')
cursor.visible = False
cursor.active = False
if x_value and y_value:
return values
elif x_value:
return values[0]
else:
return values[1]
#%%
def interactiveIntegerSelector(ax, min_value=0, max_value=5):
"""Adds an integer selector bar to a single-plot figure.
Allows to choose an integer value looking at a plot.
Parameters
----------
ax : plt.Axes
The axis instance from the single-plot figure.
min_value=0 : int
Minimum integer value that can be chosen.
max_value=5 : int
Maximum integer value that can be chosen.
Returns
-------
integer : int
Selected integer value.
See also
--------
ivp.IntFillingCursor
plt.Axes
"""
position = ax.get_position()
if ax.xaxis.label.get_text() == '':
ax.set_position([position.x0,
position.y0 + position.height*0.16,
position.width,
position.height*0.84])
else:
ax.set_position([position.x0,
position.y0 + position.height*0.18,
position.width,
position.height*0.82])
ax_selector = plt.axes([0.18, 0.1, 0.65, 0.03])
ax_selector.yaxis.set_visible(False)
ax_selector.set_xlim(min_value, max_value+1)
selector = IntFillingCursor(ax_selector, color='r', linewidth=2)
selector.horizOn = False
plt.show()
plt.annotate("¿Cantidad?", (0.01, 1.3), xycoords='axes fraction');
plt.annotate(
"Elija un número desde {:.0f} hasta {:.0f}.".format(
min_value,
max_value),
(0.45, 1.3), xycoords='axes fraction');
integer = int(plt.ginput()[0][0])
ax_selector.autoscale(False)
plt.fill([ax_selector.get_xlim()[0], integer,
integer, ax_selector.get_xlim()[0]],
[ax_selector.get_ylim()[0], ax_selector.get_ylim()[0],
ax_selector.get_ylim()[1], ax_selector.get_ylim()[1]],
'r')
selector.visible = False
return integer
#%%
def interactiveTimeSelector(filename, autoclose=True):
"""Allows to select a particular time instant on a Pump Probe file.
Parameters
----------
filename : str
Filename, which must include full path and extension.
autoclose=True : bool
Says whether to automatically close this picture or not.
Returns
-------
ti : float
Selected value.
See also
--------
ivs.loadNicePumpProbe
"""
t, V, details = ivs.loadNicePumpProbe(filename)
fig = plotPumpProbe(filename, autosave=False)[0]
ax = fig.axes[0]
ti = interactiveValueSelector(ax, y_value=False)
ti = t[np.argmin(abs(t-ti))]
if autoclose:
plt.close(fig)
return ti
#%%
class FillingCursor(wid.Cursor):
"""Subclass that fills one side of the cursor"""
def __init__(self, ax, horizOn=True, vertOn=True, **lineprops):
self.fill, = ax.fill([ax.get_xbound()[0], ax.get_xbound()[0],
ax.get_xbound()[0], ax.get_xbound()[0]],
[ax.get_xbound()[0], ax.get_xbound()[0],
ax.get_xbound()[0], ax.get_xbound()[0]],
**lineprops)
# self.fill.set_visible(False)
self.myax = ax
super().__init__(ax, horizOn=horizOn, vertOn=vertOn,
useblit=False, **lineprops)
def clear(self, event):
"""Internal event handler to clear the cursor."""
self.fill.set_visible(False)
super().clear(event)
def onmove(self, event):
"""Internal event handler to draw the cursor when the mouse moves."""
if self.ignore(event):
return
if not self.canvas.widgetlock.available(self):
return
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
self.fill.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible:
return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
if self.vertOn and self.horizOn:
self.fill.set_xy(np.array([[self.myax.get_xbound()[0],
self.myax.get_xbound()[0],
event.xdata,
event.xdata],
[self.myax.get_ybound()[0],
event.ydata,
event.ydata,
self.myax.get_xbound()[0]]]).T)
elif self.horizOn:
self.fill.set_xy(np.array([[self.myax.get_xbound()[0],
self.myax.get_xbound()[0],
self.myax.get_xbound()[1],
self.myax.get_xbound()[1]],
[self.myax.get_ybound()[0],
event.ydata,
event.ydata,
self.myax.get_ybound()[0]]]).T)
else:
self.fill.set_xy(np.array([[self.myax.get_xbound()[0],
event.xdata,
event.xdata,
self.myax.get_xbound()[0]],
[self.myax.get_ybound()[0],
self.myax.get_ybound()[0],
self.myax.get_ybound()[1],
self.myax.get_ybound()[1]]]).T)
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self.fill.set_visible(self.visible and (self.horizOn or self.vertOn))
self._update()
#%%
class IntFillingCursor(FillingCursor):
"""Subclass that only allows integer values on the filling cursor"""
def __init__(self, ax, horizOn=True, vertOn=True,
**lineprops):
super().__init__(ax, horizOn=horizOn, vertOn=vertOn, **lineprops)
def onmove(self, event):
"""Internal event handler to draw the cursor when the mouse moves."""
if self.ignore(event):
return
if not self.canvas.widgetlock.available(self):
return
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
self.fill.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible:
return
self.linev.set_xdata((int(event.xdata), int(event.xdata)))
self.lineh.set_ydata((int(event.ydata), int(event.ydata)))
if self.vertOn and self.horizOn:
self.fill.set_xy(np.array([[self.myax.get_xbound()[0],
self.myax.get_xbound()[0],
int(event.xdata),
int(event.xdata)],
[self.myax.get_ybound()[0],
int(event.ydata),
int(event.ydata),
self.myax.get_xbound()[0]]]).T)
elif self.horizOn:
self.fill.set_xy(np.array([[self.myax.get_xbound()[0],
self.myax.get_xbound()[0],
self.myax.get_xbound()[1],
self.myax.get_xbound()[1]],
[self.myax.get_ybound()[0],
int(event.ydata),
int(event.ydata),
self.myax.get_ybound()[0]]]).T)
else:
self.fill.set_xy(np.array([[self.myax.get_xbound()[0],
int(event.xdata),
int(event.xdata),
self.myax.get_xbound()[0]],
[self.myax.get_ybound()[0],
self.myax.get_ybound()[0],
self.myax.get_ybound()[1],
self.myax.get_ybound()[1]]]).T)
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self.fill.set_visible(self.visible and (self.horizOn or self.vertOn))
self._update()
#%%
def plotPumpProbe(filename, extension='.png', interactive=False, autosave=True,
overwrite=False, **kwargs):
"""Plots a PumpProbe experiment from a file and its mean.
Can also make an interactive plot, which holds a save button and allows to
choose only certain experiments to be shown from the legend.
By default, it also saves a picture on the file's path.
Parameters
----------
filename : str
File's root (must include directory and termination).
extension='.png' : str
Image file's format.
interactive=True : bool
Says whether to make an interactive plot or not.
autosave=True : bool
Says whether to automatically save or not.
overwrite=False : bool
Says whether to allow overwriting or not.
Returns
-------
fig : plt.Figure instance
Figure containing the desired plot.
legend_buttons : wid.CheckButtons
Interactive legend. Only returned if making an interactive plot.
save_button : wid.Button
Interactive save button. Only returned if making an interactive plot.
Raises
------
pngfile : .png file
PNG image file. Only raised if 'autosave=True'.
See also
--------
ivs.loadPumpProbe
ivp.interactiveLegend
ivp.interactiveSaveButton
"""
t, V, details = ivs.loadNicePumpProbe(filename)
meanV = np.mean(V, axis=1)
Nrepetitions = details['nrepetitions']
fig = plt.figure(figsize=[6.4, 4.4])
ax = plt.subplot()
plt.plot(t, V, linewidth=0.8, zorder=0)
plt.plot(t, meanV, linewidth=1.5, zorder=2)
labels = ['Experimento {:.0f}'.format(i+1) for i in range(Nrepetitions)]
labels.append('Promedio')
plt.ylabel(r'Voltaje ($\mu$V)', fontsize=14)
plt.xlabel(r'Tiempo (ps)', fontsize=14)
ax.tick_params(labelsize=12)
ax.minorticks_on()
ax.tick_params(axis='y', which='minor', left=False)
ax.tick_params(length=5)
ax.grid(axis='x', which='both')
ax = fig.axes[0]
position = ax.get_position()
ax.set_position([position.x0*1.2, position.y0*1.3,
position.width, position.height])
if interactive:
show_default = [True for lab in labels]
legend_buttons = interactiveLegend(ax, labels, show_default,
fontsize=12,
x0=(.17, .68), y0=(.06, .84), **kwargs)
save_button = interactiveSaveButton(filename, extension=extension,
overwrite=overwrite,
folder='Figuras',
sufix='_fig')
else:
plt.legend(labels, fontsize=12, framealpha=1, **kwargs)
if autosave:
if interactive:
save_button.ax.set_visible(False)
save_kwargs = dict()
if 'newformat' in kwargs.keys():
save_kwargs.add('newformat', kwargs['newformat'])
ivs.saveFig(filename, extension=extension, overwrite=overwrite,
folder='Figuras', sufix='_fig')
if interactive:
save_button.ax.set_visible(True)
if interactive:
return fig, legend_buttons, save_button
else:
return fig, None, None
#%%
def plotAllPumpProbe(path, extension='.png', autosave=True, autoclose=False,
**kwargs):
"""Plots all PumpProbe experiments on the files from a given path.
The data files must be '.txt' files that begin with 'M'.
Parameters
----------
path : str
Files' folder (must include directory).
extension='.png' : str
Image file's format.
autosave=True : bool
Says whether to save or not.
autoclose=False : bool
Says whether to close the figures or not.
Returns
-------
figures : list
A list containing plt.Figure instances -only returned if autoclose is
deactivated.
Raises
------
pngfiles : .png files
PNG image files. Only raised if 'autosave=True'.
See also
--------
ivp.plotPumpProbe
"""
files = []
for file in os.listdir(path):
if file.endswith(".txt") and file.startswith("M"):
files.append(os.path.join(path,file))
figures = []
for f in files:
fig = plotPumpProbe(f, extension=extension, interactive=False,
autosave=autosave, **kwargs)[0]
if autoclose:
plt.close(fig)
else:
figures.append(fig)
if not autoclose:
return figures
#%%
def linearPredictionPlot(filename, plot_results, extension='.png',
folder='Figuras', autosave=True, overwrite=False,
showgrid=False):
"""Plots the results of a linear prediction plot.
Parameters
----------
filename : str
File's root (must include directory and extension).
plot_results : ivu.InstancesDict
Fit results that allow to plot. Must include...
...numpy array 'fit', that holds time, data, fit and fit terms
...numpy.array 'raman', that holds frequencies, fit spectrum and fit
terms' spectrum.
extension='.png' : str
Image file's format.
folder='Figuras' : str
Folder to include in figure's filename.
autosave=True : bool
Says whether to save or not.
overwrite=False : bool
Says whether to allow overwriting or not.
showgrid=False : bool
Says whether to show or not the vertical grid on the time space plot.
Returns
-------
fig : plt.Figure instance
Figure containing the desired plot.
legend_buttons : wid.CheckButtons
Interactive legend. Only returned if making an interactive plot.
save_button : wid.Button
Interactive save button. Only returned if making an interactive plot.
Raises
------
Image file. Only raised if 'autosave=True'.
See also
--------
iva.linearPrediction
"""
# First I deglose data
fit = plot_results.fit
raman = plot_results.raman
Nfit_terms = fit.shape[1] - 3
# In order to save, if needed, I will need...
filename = os.path.splitext(filename)[0] + extension
# Then, to plot, I first start a figure
fig = plt.figure()
grid = plt.GridSpec(3, 5, hspace=0.1)
# In the upper subplot, I put the Raman-like spectrum
ax_spectrum = plt.subplot(grid[0,:4])
plt.plot(raman[:,0], raman[:,1], linewidth=2)
lspectrum_terms = plt.plot(raman[:,0], raman[:,2:],
linewidth=2)
for l in lspectrum_terms: l.set_visible(False)
plt.xlabel("Frecuencia (GHz)")
plt.ylabel("Amplitud (u.a.)")
ax_spectrum.xaxis.tick_top()
ax_spectrum.xaxis.set_label_position('top')
# In the lower subplot, I put the data and fit
ax_data = plt.subplot(grid[1:,:])
ldata, = plt.plot(fit[:,0], fit[:,1], 'k', linewidth=0.4)
# ax_data.autoscale(False)
lfit, = plt.plot(fit[:,0], fit[:,2], linewidth=2)
lfit_terms = plt.plot(fit[:,0], fit[:,3:], linewidth=1)
for l in lfit_terms: l.set_visible(False)
plt.xlabel("Tiempo (ps)")
plt.ylabel(r"Voltaje ($\mu$V)")
ax_data.tick_params(labelsize=12)
if showgrid:
ax_data.minorticks_on()
ax_data.tick_params(axis='y', which='minor', left=False)
ax_data.tick_params(length=5)
ax_data.grid(axis='x', which='both')
ldata.set_linewidth(0.6)
lfit.set_linewidth(2.3)
# Because it's pretty, I make an interactive legend
ax_legend = plt.axes([0.75, 0.642, 0.155, 0.24])
legend_buttons = wid.CheckButtons(ax_legend, ('Data',
'Ajuste',
*['Término {:.0f}'.format(i+1)
for i in range(Nfit_terms)]),
(True, True, *[False for i in range(Nfit_terms)]))
legend_buttons.labels[1].set_color(lfit.get_color())
for leg, l in zip(legend_buttons.labels[2:], lfit_terms):
leg.set_color(l.get_color())
# For that, I'll need a callback function
def legend_callback(label):
if label == 'Data':
ldata.set_visible(not ldata.get_visible())
elif label == 'Ajuste':
lfit.set_visible(not lfit.get_visible())
else:
for i in range(Nfit_terms):
if label == 'Término {:.0f}'.format(i+1):
lfit_terms[i].set_visible(not lfit_terms[i].get_visible())
lspectrum_terms[i].set_visible(
not lspectrum_terms[i].get_visible())
plt.draw()
legend_buttons.on_clicked(legend_callback)
# Since I can, I would also like an interactive 'Save' button
save_button = interactiveSaveButton(filename, overwrite=overwrite,
folder=folder, sufix='_fit')
# Once I have all that, I'll show the plot
plt.show()
# Like it is shown for the first time, autosave if configured that way
if autosave:
save_button.ax.set_visible(False)
ivs.saveFig(filename, overwrite=overwrite, folder=folder,
sufix='_fit')
save_button.ax.set_visible(True)
return fig, legend_buttons, save_button |
class Plane:
def __init__(self, name, airspeed = 0, altitude = 0, direction = 0, vspeed = 0, fuel = 0):
self.name = str(name)
self.airspeed = airspeed
self.altitude = altitude
self.direction = direction
self.vspeed = vspeed
self.fuel = fuel
def engines(self, throttle):
self.airspeed = throttle
def elevator(self, elv):
self.vspeed = elv
def status():
for i in range(len(names)):
print(flight[i].name)
print("Airspeed = " + str(flight[i].airspeed) + " Altitude = " + str(flight[i].altitude))
names = ["B777", "A380", "B787"]
flight = [Plane(i) for i in names]
status()
flight[0].engines(100)
status() |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = 'shjbxhud9280h1gx9eub9sugue'
SQLALCHEMY_DATABASE_URI = "mysql://itcom:72167964c1f4740fe8@10.16.45.109:3306/it_company"
class Static(object):
LAGOU_WEBSITE = "http://www.lagou.com" |
######################################################################################
#__author__ = "Gaurav Sharma" #
#__copyright__ = "Copyright 2014, School of Public Health, University of Maryland" #
#__department__ = "Telecommunications" #
#__credits__ = "Gaurav Sharma" #
#__email__ = "gaurav10@umd.edu", "sgauravsharma059@gmail.com" #
#__status__ = "Implemented" #
#__Purpose__ = "To remove old files from dropbox based on a pattern" #
#__Platform__ = "Python" #
#__File.Name__ = "File_Remover.py" #
######################################################################################
import sys
import os
import datetime
import time
date_now= datetime.datetime.now().strftime("%y%m%d")
month=date_now[2:4]
day=date_now[4:]
year=date_now[0:2]
print (day, month, year)
## Set path to that of the desired dropbox folder
for path, subdirs, files in os.walk('/Users/administrator/Dropbox/Concourse_TV/'):
for filename in files:
a = filename
## Removes .mp4, .MP4, .jpg, JPG file types
if a[len(a)-4:].lower() == ".mp4" or a[len(a)-4:].lower() == ".jpg":
print filename
date_file= a.replace(' ','a').replace('.','*').replace('_','*').split('*')
print date_file
print len(date_file)
if len(date_file)>2:
print date_file
date_file=date_file[1]
if int(date_file[0:2]) < int(year):
os.remove("/Users/administrator/Dropbox/Concourse_TV/"+filename)
else:
if int(date_file[2:4]) < int(month):
os.remove("/Users/administrator/Dropbox/Concourse_TV/"+filename)
else:
if int(date_file[4:]) < int(day):
os.remove("/Users/administrator/Dropbox/Concourse_TV/"+filename)
sys.exit (0)
|
from MakeMyTrip.Pages.WebPage import *
from MakeMyTrip.Resources.Locators import Locators
import time
class FindHotel(WebPage):
popup_xpath = Locators.popup_xpath
login_menu_xpath = Locators.login_menu_xpath
city_label_xpath = Locators.city_label_xpath
city_textbox_xpath = Locators.city_textbox_xpath
city_value_xpath = Locators.city_value_xpath
suggestion_li_xpath = Locators.suggestion_li_xpath
search_button_xpath = Locators.search_button_xpath
rooms_guests_xpath = Locators.rooms_guests_xpath
guests_number_xpath = Locators.guests_number_xpath
apply_button_xpath = Locators.apply_button_xpath
travel_for_xpath = Locators.travel_for_xpath
travel_reason_option_xpath = Locators.travel_reason_option_xpath
autosearch_options_div_xpath = Locators.autosearch_options_div_xpath
autosearch_options_xpath = Locators.autosearch_options_xpath
autosearch_noopt_xpath = Locators.autosearch_noopt_xpath
children_number_xpath = Locators.children_number_xpath
guest_number_error_xpath = Locators.guest_number_error_xpath
child_age_error_xpath = Locators.child_age_error_xpath
selected_travel_reason = Locators.selected_travel_reason
date_month_body_weeks_xpath = Locators.date_month_body_weeks_xpath
date_month_body_xpath = Locators.date_month_body_xpath
today_xpath = Locators.today_xpath
day_xpath = Locators.day_xpath
label_checkin = Locators.label_checkin
past_days_xpath = Locators.past_days_xpath
selected_start_day_xpath = Locators.selected_start_day_xpath
# selected_end_day_xpath = Locators.selected_end_day_xpath
future_days_xpath = Locators.future_days_xpath
selected_end_day_xpath = Locators.selected_end_day_xpath
checkout_header_xpath = Locators.checkout_header_xpath
child_age_select_xpath = Locators.child_age_select_xpath
prev_month_arrow_xpath = Locators.prev_month_arrow_xpath
next_month_arrow_xpath = Locators.next_month_arrow_xpath
current_month_xpath = Locators.current_month_xpath
future_month_xpath = Locators.future_month_xpath
elem_class_attrib = Locators.elem_class_attrib
elem_disable_attrib = Locators.elem_disable_attrib
elem_class_outside = Locators.elem_class_outside
elem_class_disable = Locators.elem_class_disable
elem_class_future_day = Locators.elem_class_future_day
elem_class_today = Locators.elem_class_today
elem_class_day_start = Locators.elem_class_day_start
elem_class_day_end = Locators.elem_class_day_end
def __init__(self, driver):
WebPage.__init__(self, driver)
self.elem_body = self.driver.find_element_by_tag_name('body')
def close_login_popup(self):
try:
self.element_visible_loc(By.XPATH, FindHotel.popup_xpath)
except TimeoutException:
# print('Login pop up did not appear')
pass
else:
login_dialog = self.element_clickable(By.XPATH, FindHotel.login_menu_xpath)
login_dialog.click()
return True
def check_elem_visible_city_div(self):
try:
self.element_visible_loc(By.XPATH, FindHotel.city_label_xpath)
return True
except TimeoutException as e:
print(e, ' Failed to find component for City/Hotel')
return False
def select_textbox(self):
try:
self.element_clickable(By.XPATH, FindHotel.city_label_xpath).click()
return True
except TimeoutException as e:
print(e, 'Failed to access component for City/Hotel')
return False
def check_visibility_city_textbox(self):
try:
self.element_visible_loc(By.XPATH, FindHotel.city_textbox_xpath)
return True
except TimeoutException as e:
print(e, 'Failed to find component textbox provided to enter city')
return False
def enter_city(self, city):
char = ''
try:
for c in city:
char = char + c
self.element_clickable(By.XPATH, FindHotel.city_textbox_xpath).clear()
self.element_clickable(By.XPATH, FindHotel.city_textbox_xpath).send_keys(char)
return True
except TimeoutException as e:
print(e, 'Failed to access textbox provided for entering city')
return False
# try:
# elem_city_text = self.element_clickable(By.XPATH, FindHotel.city_textbox_xpath)
# except Exception as e:
# print(e, 'Failed to find text field for city')
# return False
# for c in city:
# try:
# elem_city_text.clear()
# except TimeoutException as e:
# print(e, 'Failed to clear text field')
# pass
# else:
# char = char + c
# elem_city_text.send_keys(char)
# return True
def check_autosearch_option(self):
try:
self.element_visible_loc(By.XPATH, FindHotel.autosearch_options_div_xpath)
return True
except TimeoutException as e:
print(e, 'Failed to find auto-search options')
return False
finally:
self.elem_body.send_keys(Keys.ESCAPE)
def check_expected_option_available(self, city):
self.select_textbox()
self.enter_city(city)
try:
if self.wait.until(ec.presence_of_element_located((By.XPATH, FindHotel.autosearch_options_div_xpath))):
expected_options = self.driver.find_elements_by_xpath(FindHotel.autosearch_options_div_xpath)
except TimeoutException as e:
print(e, 'Expected option not found in auto-searched list')
return False
else:
for exp_opt in expected_options:
if exp_opt.text.startswith(city):
exp_opt.click()
break
return True
finally:
self.elem_body.send_keys(Keys.ESCAPE)
def check_invalid_city_name(self, city):
self.select_textbox()
self.enter_city(city)
try:
self.element_visible_loc(By.XPATH, FindHotel.autosearch_noopt_xpath)
return True
except TimeoutException as e:
print(e, 'Expected option (empty list) not displayed')
return False
finally:
self.elem_body.send_keys(Keys.ESCAPE)
def enter_room_guests_info(self):
try:
self.element_clickable(By.XPATH, FindHotel.rooms_guests_xpath).click()
guests_number = self.driver.find_elements_by_xpath(FindHotel.guests_number_xpath)
for gus_num in guests_number:
if gus_num.text == '4':
gus_num.click()
break
# self.element_clickable(By.XPATH, FindHotel.guests_number_xpath).click()
self.element_clickable(By.XPATH, FindHotel.apply_button_xpath).click()
return True
except Exception as e:
print('Failed to select room/guests options', e)
return False
def check_guest_selection_negative(self):
self.element_clickable(By.XPATH, FindHotel.rooms_guests_xpath).click()
guests_number = self.driver.find_elements_by_xpath(FindHotel.guests_number_xpath)
guests_number[-1].click()
try:
children_number = self.driver.find_elements_by_xpath(FindHotel.children_number_xpath)
children_number[-1].click()
self.element_visible_loc(By.XPATH, FindHotel.guest_number_error_xpath)
return True
except TimeoutException as e:
print(e, 'Failed to display error message')
return False
finally:
self.elem_body.send_keys(Keys.ESCAPE)
def check_guest_selection_child_age_selection_negative(self):
self.element_clickable(By.XPATH, FindHotel.rooms_guests_xpath).click()
children_number = self.driver.find_elements_by_xpath(FindHotel.children_number_xpath)
children_number[1].click()
try:
self.element_clickable(By.XPATH, FindHotel.apply_button_xpath).click()
self.element_visible_loc(By.XPATH, FindHotel.child_age_error_xpath)
return True
except TimeoutException as e:
print(e, 'Failed to display error message for children age not selected')
return False
finally:
self.elem_body.send_keys(Keys.ESCAPE)
def check_child_age_dropdown_visible(self):
self.element_clickable(By.XPATH, FindHotel.rooms_guests_xpath).click()
children_number = self.driver.find_elements_by_xpath(FindHotel.children_number_xpath)
children_number[1].click()
try:
self.wait.until(ec.visibility_of_element_located((By.XPATH, FindHotel.child_age_select_xpath)))
except TimeoutException as e:
print(e, 'Dropdown for child age selection not visible')
return False
else:
self.elem_body.send_keys(Keys.ESCAPE)
return True
def select_travel_for_reason(self):
try:
self.element_clickable(By.XPATH, FindHotel.travel_for_xpath).click()
travel_reasons = self.driver.find_elements_by_xpath(FindHotel.travel_reason_option_xpath)
if travel_reasons[0].text == 'Work' and travel_reasons[1].text == 'Leisure':
travel_reasons[1].click()
return True
except Exception as e:
print('Failed to select reason of traveling', e)
return False
def cancel_travel_reason_change(self):
prev_selected_reason = self.driver.find_element_by_xpath(FindHotel.selected_travel_reason).text
self.element_clickable(By.XPATH, FindHotel.travel_for_xpath).click()
self.elem_body.send_keys(Keys.ESCAPE)
try:
cur_selected_reason = self.driver.find_element_by_xpath(FindHotel.selected_travel_reason).text
if prev_selected_reason == cur_selected_reason:
return True
except ValueError as e:
print(e, 'Selected reason for travel changed')
return False
def check_past_day_click(self):
result = True
self.wait.until(ec.element_to_be_clickable((By.XPATH, FindHotel.label_checkin))).click()
month = self.driver.find_element_by_xpath(FindHotel.date_month_body_xpath)
days_of_month = month.find_elements_by_xpath(FindHotel.day_xpath)
for day in days_of_month:
elem_class = str(day.get_attribute(FindHotel.elem_class_attrib))
elem_aria_disabled = str(day.get_attribute(FindHotel.elem_disable_attrib))
outside_found = elem_class.find(FindHotel.elem_class_outside)
if outside_found > 0:
continue
disabled_found = elem_class.find(FindHotel.elem_class_disable)
if disabled_found > 0 and elem_aria_disabled == 'true':
continue
elif (elem_class == FindHotel.elem_class_future_day) and elem_aria_disabled == 'false':
continue
elif (elem_class.find(FindHotel.elem_class_today) > 0) and elem_aria_disabled == 'false':
continue
elif (elem_class.find(FindHotel.elem_class_day_start) > 0) and elem_aria_disabled == 'false':
continue
elif (elem_class.find(FindHotel.elem_class_day_end) > 0) and elem_aria_disabled == 'false':
continue
else:
result = False
break
self.elem_body.send_keys(Keys.ESCAPE)
return result
def checkin_date_later_than_checkout_date(self):
result = True
self.wait.until(ec.element_to_be_clickable((By.XPATH, FindHotel.label_checkin))).click()
days_of_month = self.driver.find_elements_by_xpath(FindHotel.day_xpath)
for i in range(len(days_of_month)):
elem_class = days_of_month[i].get_attribute(FindHotel.elem_class_attrib)
# if elem_class == "DayPicker-Day DayPicker-Day--end DayPicker-Day--selected":
if FindHotel.elem_class_day_end in elem_class:
days_of_month[i+1].click()
break
checkout_heading = self.wait.until(ec.element_to_be_clickable((By.XPATH, FindHotel.checkout_header_xpath))).text
if 'Select Checkout Date' != checkout_heading:
result = False
self.elem_body.send_keys(Keys.ESCAPE)
return result
def check_calendar_span(self):
result = True
self.wait.until(ec.element_to_be_clickable((By.XPATH, FindHotel.label_checkin))).click()
try:
self.wait.until(ec.visibility_of_element_located((By.XPATH, FindHotel.prev_month_arrow_xpath)))
except TimeoutException:
pass
else:
print('Allowed to access past months')
result = False
return result
current_date = str(self.driver.find_element_by_xpath(FindHotel.current_month_xpath).text)
current_month = current_date[:-4]
current_year = int(current_date[-4:])
try:
while self.wait.until(ec.visibility_of_element_located((By.XPATH, FindHotel.next_month_arrow_xpath))):
self.wait.until(ec.element_to_be_clickable((By.XPATH, FindHotel.next_month_arrow_xpath))).click()
except TimeoutException:
pass
else:
result = False
return result
future_dates = self.driver.find_elements_by_xpath(FindHotel.future_month_xpath)
future_month = future_dates[1].text[:-4]
future_year = int(future_dates[1].text[-4:])
if future_year != (current_year + 1) or future_month != current_month:
result = False
self.elem_body.send_keys(Keys.ESCAPE)
return result
def check_element_active(self, menu_name):
elem_xpath = Locators.mmt_menu[menu_name]
try:
self.wait.until(ec.visibility_of_element_located((By.XPATH, elem_xpath)))
except TimeoutException as e:
print(e, 'Menu item {} not visible'.format(menu_name))
return False
else:
return True
def search_hotel_in_amboli_for_3_adults_for_next_weekend(self, next_weekend, city):
result = True
cid_day = int((next_weekend.split(' '))[0])
self.check_expected_option_available(city)
self.wait.until(ec.element_to_be_clickable((By.XPATH, FindHotel.label_checkin))).click()
days_of_month = self.driver.find_elements_by_xpath(FindHotel.future_days_xpath)
for i in range(len(days_of_month)):
if days_of_month[i].text == str(cid_day):
try:
days_of_month[i].click()
days_of_month[i+2].click()
except TimeoutException:
result = False
else:
result = True
finally:
break
return result
def click_search_button(self):
try:
self.element_clickable(By.XPATH, FindHotel.search_button_xpath).click()
return True
except Exception as e:
print(e, 'Failed to click Search button')
return False
|
import time
import pygame
from bullet import Bullet
from flagzombie import Flagzombie
from peashooter import Peashooter
from sun import Sun
from sunflower import Sunflower
from wallnut import Wallnut
from zombie import Zombie
pygame.init()
backgdsize = (1000, 600)
screen = pygame.display.set_mode(backgdsize)
pygame.display.set_caption("植物大战僵尸")
# 初始化音乐模块
pygame.mixer.init()
# 加载音乐
pygame.mixer.music.load("resources/music/18 - Crazy Dave IN-GAME.mp3")
# 用于跟随鼠标
sunflowerImg = pygame.image.load('resources/images/sunflower/SunFlower_00.png').convert_alpha()
peashooterImg = pygame.image.load('resources/images/peashooter/Peashooter_00.png').convert_alpha()
wallnutImg = pygame.image.load('resources/images/wall_nut/WallNut_00.png').convert_alpha()
# 植物槽
flowerSeed = pygame.image.load('resources/images/cards/card_sunflower.png').convert_alpha()
wallnutSeed = pygame.image.load('resources/images/cards/card_wallnut.png').convert_alpha()
peashooterSeed = pygame.image.load('resources/images/cards/card_peashooter.png').convert_alpha()
bg_img = pygame.image.load('resources/images/screen/background.jpg').convert_alpha()
seedbank_img = pygame.image.load('resources/images/screen/SeedBank.png').convert_alpha()
text = 900
sun_font = pygame.font.SysFont('arial', 25)
sun_num_surface = sun_font.render(str(text), True, (0, 0, 0))
sunFlowerGroup = pygame.sprite.Group()
peashooterGroup = pygame.sprite.Group()
bulletGroup = pygame.sprite.Group()
zombieGroup = pygame.sprite.Group()
wallnutGroup = pygame.sprite.Group()
sunGroup = pygame.sprite.Group()
GEN_SUN_EVENT = pygame.USEREVENT + 1
pygame.time.set_timer(GEN_SUN_EVENT, 1000)
GEN_BULLET_EVENT = pygame.USEREVENT + 2
pygame.time.set_timer(GEN_BULLET_EVENT, 1000)
GEN_ZOMBIE_EVENT = pygame.USEREVENT + 3
pygame.time.set_timer(GEN_ZOMBIE_EVENT, 3000)
GEN_FLAGZOMBIE_EVENT = pygame.USEREVENT + 4
pygame.time.set_timer(GEN_FLAGZOMBIE_EVENT, 3000)
choose = 0
clock = pygame.time.Clock()
def main():
global sun_num_surface, choose
global text
index = 0
while True:
clock.tick(20)
if not pygame.mixer.music.get_busy():
pygame.mixer.music.play()
index += 1
# 碰撞检测
for bullet in bulletGroup:
for zombie in zombieGroup:
if pygame.sprite.collide_mask(bullet, zombie):
zombie.energy -= 1
bulletGroup.remove(bullet)
for wallNut in wallnutGroup:
for zombie in zombieGroup:
if pygame.sprite.collide_mask(wallNut, zombie):
zombie.ismeetwallnut = True
wallNut.zombies.add(zombie)
for peashooter in peashooterGroup:
for zombie in zombieGroup:
if pygame.sprite.collide_mask(peashooter, zombie):
zombie.ismeetwallnut = True
peashooter.zombies.add(zombie)
for sunflower in sunFlowerGroup:
for zombie in zombieGroup:
if pygame.sprite.collide_mask(sunflower, zombie):
zombie.ismeetwallnut = True
sunflower.zombies.add(zombie)
screen.blit(bg_img, (0, 0))
screen.blit(seedbank_img, (250, 0))
screen.blit(sun_num_surface, (270, 60))
screen.blit(flowerSeed, (320, 0))
screen.blit(peashooterSeed, (382, 0))
screen.blit(wallnutSeed, (446, 0))
sunFlowerGroup.update(index)
sunFlowerGroup.draw(screen)
peashooterGroup.update(index)
peashooterGroup.draw(screen)
bulletGroup.update(index)
bulletGroup.draw(screen)
zombieGroup.update(index)
zombieGroup.draw(screen)
wallnutGroup.update(index)
wallnutGroup.draw(screen)
sunGroup.update(index)
sunGroup.draw(screen)
(x, y) = pygame.mouse.get_pos()
if choose == 1:
screen.blit(sunflowerImg, (x, y))
elif choose == 2:
screen.blit(peashooterImg, (x, y))
elif choose == 3:
screen.blit(wallnutImg, (x, y))
for event in pygame.event.get():
if event.type == GEN_SUN_EVENT:
for sprite in sunFlowerGroup:
now = time.time()
if now - sprite.lasttime >= 5:
sun = Sun(sprite.rect)
sunGroup.add(sun)
sprite.lasttime = now
if event.type == GEN_BULLET_EVENT:
for sprite in peashooterGroup:
bullet = Bullet(sprite.rect, backgdsize)
bulletGroup.add(bullet)
if event.type == GEN_ZOMBIE_EVENT:
zombie = Zombie()
zombieGroup.add(zombie)
if event.type == GEN_FLAGZOMBIE_EVENT:
flagzombie = Flagzombie()
zombieGroup.add(flagzombie)
if event.type == pygame.QUIT:
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
pressed_key = pygame.mouse.get_pressed()
if pressed_key[0] == 1:
x, y = pygame.mouse.get_pos()
print(x, y)
if 320 <= x <= 382 and 0 <= y <= 89 and text >= 50:
# 点中了太阳花
choose = 1
elif 383 <= x < 446 and 0 <= y <= 89 and text >= 100:
# 点中了豌豆射手
choose = 2
elif 447 <= x < 511 and 0 <= y <= 89 and text >= 50:
# 点中了坚果墙
choose = 3
elif 250 < x < 1200 and 90 < y < 600:
if choose == 1:
current_time = time.time()
sunflower = Sunflower(current_time)
sunflower.rect.x = x
sunflower.rect.y = y
sunFlowerGroup.add(sunflower)
choose = 0
text -= 50
sun_font = pygame.font.SysFont('arial', 25)
sun_num_surface = sun_font.render(str(text), True, (0, 0, 0))
elif choose == 2:
peashooter = Peashooter()
peashooter.rect.y = y
peashooter.rect.x = x
peashooterGroup.add(peashooter)
choose = 0
text -= 100
sun_font = pygame.font.SysFont('arial', 25)
sun_num_surface = sun_font.render(str(text), True, (0, 0, 0))
elif choose == 3:
wallnut = Wallnut()
wallnut.rect.y = y
wallnut.rect.x = x
wallnutGroup.add(wallnut)
choose = 0
text -= 50
sun_font = pygame.font.SysFont('arial', 25)
sun_num_surface = sun_font.render(str(text), True, (0, 0, 0))
for sun in sunGroup:
if sun.rect.collidepoint(x, y):
sunGroup.remove(sun)
text += 50
sun_font = pygame.font.SysFont('arial', 25)
sun_num_surface = sun_font.render(str(text), True, (0, 0, 0))
pygame.display.update()
if __name__ == '__main__':
main()
|
"""Reports presence information into Zookeeper.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import logging
import sys
import kazoo
from treadmill import exc
from treadmill import sysinfo
from treadmill import zkutils
from treadmill import zknamespace as z
from treadmill.appcfg import abort as app_abort
_LOGGER = logging.getLogger(__name__)
_SERVERS_ACL = zkutils.make_role_acl('servers', 'rwcda')
_INVALID_IDENTITY = sys.maxsize
# Time to wait when registering endpoints in case previous ephemeral
# endpoint is still present.
_EPHEMERAL_RETRY_INTERVAL = 5
_EPHEMERAL_RETRY_COUNT = 13
def _create_ephemeral_with_retry(zkclient, path, data):
"""Create ephemeral node with retry."""
prev_data = None
for _ in range(0, _EPHEMERAL_RETRY_COUNT):
try:
return zkutils.create(zkclient, path, data, acl=[_SERVERS_ACL],
ephemeral=True)
except kazoo.client.NodeExistsError:
prev_data = zkutils.get_default(zkclient, path)
_LOGGER.warning(
'Node exists, will retry: %s, data: %r',
path, prev_data
)
time.sleep(_EPHEMERAL_RETRY_INTERVAL)
raise exc.ContainerSetupError('%s:%s' % (path, prev_data),
app_abort.AbortedReason.PRESENCE)
class EndpointPresence(object):
"""Manages application endpoint registration in Zookeeper."""
def __init__(self, zkclient, manifest, hostname=None, appname=None):
self.zkclient = zkclient
self.manifest = manifest
self.hostname = hostname if hostname else sysinfo.hostname()
if appname:
self.appname = appname
else:
self.appname = self.manifest.get('name')
def register(self):
"""Register container in Zookeeper."""
self.register_identity()
self.register_running()
self.register_endpoints()
def register_running(self):
"""Register container as running."""
_LOGGER.info('registering container as running: %s', self.appname)
_create_ephemeral_with_retry(self.zkclient,
z.path.running(self.appname),
self.hostname)
def unregister_running(self):
"""Safely deletes the "running" node for the container."""
_LOGGER.info('un-registering container as running: %s', self.appname)
path = z.path.running(self.appname)
try:
data, _metadata = self.zkclient.get(path)
if data == self.hostname:
self.zkclient.delete(path)
except kazoo.client.NoNodeError:
_LOGGER.info('running node does not exist.')
def register_endpoints(self):
"""Registers service endpoint."""
_LOGGER.info('registering endpoints: %s', self.appname)
endpoints = self.manifest.get('endpoints', [])
for endpoint in endpoints:
internal_port = endpoint['port']
ep_name = endpoint.get('name', str(internal_port))
ep_port = endpoint['real_port']
ep_proto = endpoint.get('proto', 'tcp')
hostport = self.hostname + ':' + str(ep_port)
path = z.path.endpoint(self.appname, ep_proto, ep_name)
_LOGGER.info('register endpoint: %s %s', path, hostport)
# Endpoint node is created with default acl. It is ephemeral
# and not supposed to be modified by anyone.
_create_ephemeral_with_retry(self.zkclient, path, hostport)
def unregister_endpoints(self):
"""Unregisters service endpoint."""
_LOGGER.info('registering endpoints: %s', self.appname)
endpoints = self.manifest.get('endpoints', [])
for endpoint in endpoints:
port = endpoint.get('port', '')
ep_name = endpoint.get('name', str(port))
ep_proto = endpoint.get('proto', 'tcp')
if not ep_name:
_LOGGER.critical('Logic error, no endpoint info: %s',
self.manifest)
return
path = z.path.endpoint(self.appname, ep_proto, ep_name)
_LOGGER.info('un-register endpoint: %s', path)
try:
data, _metadata = self.zkclient.get(path)
if data.split(':')[0] == self.hostname:
self.zkclient.delete(path)
except kazoo.client.NoNodeError:
_LOGGER.info('endpoint node does not exist.')
def register_identity(self):
"""Register app identity."""
identity_group = self.manifest.get('identity_group')
# If identity_group is not set or set to None, nothing to register.
if not identity_group:
return
identity = self.manifest.get('identity', _INVALID_IDENTITY)
_LOGGER.info('Register identity: %s, %s', identity_group, identity)
_create_ephemeral_with_retry(
self.zkclient,
z.path.identity_group(identity_group, str(identity)),
{'host': self.hostname, 'app': self.appname},
)
def unregister_identity(self):
"""Register app identity."""
identity_group = self.manifest.get('identity_group')
# If identity_group is not set or set to None, nothing to register.
if not identity_group:
return
identity = self.manifest.get('identity', _INVALID_IDENTITY)
_LOGGER.info('Unregister identity: %s, %s', identity_group, identity)
path = z.path.identity_group(identity_group, str(identity))
try:
data = zkutils.get(self.zkclient, path)
if data['host'] == self.hostname:
zkutils.ensure_deleted(self.zkclient, path)
except kazoo.client.NoNodeError:
_LOGGER.info('identity node %s does not exist.', path)
def server_node(hostname, presence_id):
"""Return server.presence node for given hostname and presence_id."""
if presence_id == '-1':
return hostname
return '{}#{}'.format(hostname, presence_id)
def parse_server(node):
"""Return hostname and presence_id for given server.presence node."""
if '#' not in node:
return node, '-1'
hostname, presence_id = node.split('#')
return hostname, presence_id
def server_hostname(node):
"""Return hostname for given server.presence node."""
return parse_server(node)[0]
def find_server(zkclient, hostname):
"""Find server."""
for node in sorted(zkclient.get_children(z.SERVER_PRESENCE), reverse=True):
if server_hostname(node) == hostname:
return z.path.server_presence(node)
def register_server(zkclient, hostname, node_info):
"""Register server."""
server_path = z.path.server(hostname)
server_data = zkutils.get(zkclient, server_path)
server_data.update(node_info)
_LOGGER.info('Registering server %s: %r', hostname, server_data)
zkutils.update(zkclient, server_path, server_data)
host_acl = zkutils.make_host_acl(hostname, 'rwcda')
return zkutils.put(
zkclient, z.path.server_presence(hostname + '#'), {'seen': False},
acl=[host_acl], ephemeral=True, sequence=True
)
def unregister_server(zkclient, hostname):
"""Unregister server."""
_LOGGER.info('Unregistering server %s', hostname)
server_presence_path = find_server(zkclient, hostname)
if server_presence_path:
zkutils.ensure_deleted(zkclient, server_presence_path)
def kill_node(zkclient, node):
"""Kills app, endpoints, and server node."""
_LOGGER.info('killing node: %s', node)
try:
zkutils.get(zkclient, z.path.server(node))
except kazoo.client.NoNodeError:
_LOGGER.info('node does not exist.')
return
apps = zkclient.get_children(z.path.placement(node))
for app in apps:
_LOGGER.info('removing app presence: %s', app)
try:
manifest = zkutils.get(zkclient, z.path.scheduled(app))
app_presence = EndpointPresence(zkclient,
manifest,
hostname=node,
appname=app)
app_presence.unregister_running()
app_presence.unregister_endpoints()
except kazoo.client.NoNodeError:
_LOGGER.info('app %s no longer scheduled.', app)
_LOGGER.info('removing server presence: %s', node)
unregister_server(zkclient, node)
|
import torch
import numpy as np
import pandas as pd
from MyDataLoader import *
from CNN import *
from FC import *
from AlexNet import *
def is_right(prediction, label):
index = -1
max = -1
pred = prediction.data.numpy()
for i in range(len(pred[0])):
if pred[0][i] > max:
max = pred[0][i]
index = i
print(index, label)
if index == label:
return True
return False
if __name__ == "__main__":
filename = "./train_data/train_label.csv"
image_dir = "./train_data/binary_image"
test_csv = "./test_data/test_label.csv"
test_dir = "./test_data/binary_image"
image_shape = (28, 28)
image_label_list = []
with open(test_csv, 'r') as f:
lines = f.readlines()
for line in lines:
content = line.rstrip().split(',')
# print(content)
name = test_dir + "/" + content[0]
label = content[1]
image_label_list.append((name, label))
image_label_list_test = image_label_list
my_dataset = train_set(filename, image_dir, image_shape)
exist = False
epoch_num = 5 # 总样本循环次数
batch_size = 10 # 训练时的一组数据的大小
train_data_nums = 10
# max_iterate = int((train_data_nums + batch_size - 1) / batch_size * epoch_num) # 总迭代次数
net = simpleNet()
if exist:
net.load_state_dict(torch.load("./model/LeNet/LeNet_model0.9908.pth"))
optim = torch.optim.SGD(net.parameters(), lr=0.001, weight_decay=0.0001)
criterion = torch.nn.MSELoss()
train_data = train_set(filename=filename, image_dir=image_dir, image_shape=image_shape, repeat=1)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=False)
max_score = 0
for epoch in range(epoch_num):
print("===============training===============")
index = 0
for batch_image, batch_label in train_loader:
loss = 0
batch_image = batch_image.float()
batch_label = batch_label.float()
for i in range(batch_image.shape[0]):
prediction = net.forward(batch_image[i].view(1, 784))
loss = loss + criterion(prediction, batch_label[i])
index = index + 1
optim.zero_grad()
loss.backward()
optim.step()
print("train:epoch:{}, data_index:{}/42000, loss:{:.4f}".format(epoch, index, loss))
print("================test================")
right_count = 0
for i in range(len(image_label_list_test)):
image_data_path = image_label_list_test[i][0]
image_label = int(image_label_list_test[i][1])
image_data = np.array(Image.open(image_data_path))
image_data = torch.tensor(image_data).float()
image_data = image_data.view(1, 784)
prediction = net.forward(image_data)
# print(prediction)
if is_right(prediction, image_label):
right_count = right_count + 1
print("test:index:{}/18000".format(i))
print("score:{:.4f}".format(right_count/len(image_label_list_test)))
if right_count/len(image_label_list_test) > max_score:
max_score = right_count/len(image_label_list_test)
torch.save(net.state_dict(), "./model/FC_model/FC_model{:.4f}.pth".format(max_score))
|
import sys
#starting number in sequence
value1 = '21'
#calculate value2 digits of sequence
value2 = '40'
# write your solution here
def fib(x,y):
fib_sequence = ''
counter = 0
a, b = 0, 1
while counter < y:
if a == x:
fib_sequence += f'{b}'
counter += 1
if fib_sequence != '' and a != x:
fib_sequence += f',{b}'
counter += 1
a, b = b, a + b
return fib_sequence
print(fib(int(value1), int(value2))) |
import cv2
import numpy as np
import pyautogui as pag
import keyboard
# open settingS file contaning basic configs
with open("settings.txt") as f:
lines = f.readlines()
scaling_factor = int(lines[0].split("=")[-1].strip()) # ratio of object movement to mouse pointer movement
click_threshold = int(lines[1].split("=")[-1].strip()) # threshold of object movement which triggers left mouse click
window = int(lines[2].split("=")[-1].strip()) # whether to show camera or not
mask_tog = int(lines[3].split("=")[-1].strip()) # whether to show mask or not
"""
function description: movement(pos, scale, orientation)
pos - length of movement
scale - scaling factor
orientation - 0 if movement in x direction, 1 if movement in y direction
function to return value of mouse movement based on object movement
Also tells whether to move the mouse pointer or to click
return type: (bool a, integer b):
bool a tells whether it's a click or not
integer b tells how much to move the mouse pointer
"""
def movement(pos, scale, orientation):
if orientation == 1 and pos > click_threshold: # whether to trigger a click
return 1, pos
else: # or to move the mouse pointer
if abs(pos) < 5: # don't move on small object movements
return 0, 0
elif abs(pos) < 50: # move linearly on relatively smaller movements
return 0, pos*scale
elif abs(pos) < click_threshold: # move slowly on faster movements
return 0, pos*scale*0.2
else:
return 0, 0 # don't move if oject movement > threshold
pag.FAILSAFE = False # don't kill camera if object goes out of frame
colour = [-1] # stores color of object
"""
function description: def rgb_to_hsv(r, g, b)
r, g, b - RGB values of object
function to convert RGB value of oject to HSV values
return type: (integer h):
hue value for given RGB values
"""
def rgb_to_hsv(r, g, b):
r, g, b = r / 255.0, g / 255.0, b / 255.
cmax = max(r, g, b)
cmin = min(r, g, b)
diff = cmax-cmin
if cmax == cmin:
h = 0
elif cmax == r:
h = (60 * ((g - b) / diff) + 360) % 360
elif cmax == g:
h = (60 * ((b - r) / diff) + 120) % 360
elif cmax == b:
h = (60 * ((r - g) / diff) + 240) % 360
return h/2
"""
function description: mouseRGB(event,x,y,flags,param)
event - captures event, should be mouse click
x - X coordinate of object's centroid
y - Y coordinate of object's centroid
flags, param - default parameters for internal usage of function
function to calculate hue value of object which will be used as mouse pointer
return type: (NULL)
"""
def mouseRGB(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN: # checks mouse left button down condition
colorsB = frame[y,x,0]
colorsG = frame[y,x,1]
colorsR = frame[y,x,2]
colour[0] = rgb_to_hsv(colorsR,colorsG,colorsB)
cv2.namedWindow('mouseRGB') # create a window named mousergb
cv2.setMouseCallback('mouseRGB', mouseRGB) # take object color as input in this window
capture= cv2.VideoCapture(0) # open camera
kernelOpen=np.ones((5,5)) # array to remove noise from other objects of same color
kernelClose=np.ones((20,20)) # array to join possible holes in the object
centroidOld = [0, 0] # holds previous centroid of object
centroidNew = [0, 0] # holds current centroid of object
Iterator = [1, 0, 0] # (Entry, Stay, Exit) - boolean variables
take_only_once = 1 # bool to decide whether input has been taken or not
bool_hold = 0 # bool to check if mouse is down or up
while True:
ret, frame = capture.read() # read frame by frame from camera
# this ladder decides the range of colours which are acceptable,
# which can be considered part of the object,
# a tolerance calculation:
if colour[0] <= 15:
lower_limit = colour[0]*0
upper_limit = colour[0] + 15
elif colour[0] >= 165:
lower_limit = colour[0] - 15
upper_limit = colour[0]*0 + 179
else:
lower_limit = colour[0] - 15
upper_limit = colour[0] + 15
lowerBound=np.array([lower_limit ,100, 100])
upperBound=np.array([upper_limit ,255, 255])
if colour[0] == -1: # has the input been taken yet, if not then take the input
cv2.imshow('mouseRGB', frame)
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
thickness = 2
frame = cv2.putText(frame, 'Select the object to be used as mouse pointer.', org, font, fontScale, color, thickness, cv2.LINE_AA)
# if the input has been taken, close the window that takes input
if colour[0] != -1 and take_only_once == 1:
take_only_once = 0
cv2.destroyWindow('mouseRGB')
if cv2.waitKey(1) == ord('q'):
break
# if input has been taken, start depicting the object as mouse pointer
if colour[0] != -1:
img = frame # give video frame to openCV
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # convert BGR to HSV
mask=cv2.inRange(imgHSV, lowerBound, upperBound) # create the Mask
# reduce noise for other same coloured objects
maskOpen = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelOpen)
# fill holes inside the object
maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
maskFinal = maskClose
# find contours of the object and heirarchy
conts, heir = cv2.findContours(maskFinal.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#
cv2.drawContours(img,conts,-1,(255,0,0),3)
x, y, w, h = 0, 0, 0, 0 # initialize the dimensions of the object
Iterator[2] = 1
# iterate over all objects
for i in range(len(conts)):
x,y,w,h=cv2.boundingRect(conts[0]) # form a rectangle around the object
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255), 2) # depict on the screen
# if object enters on screen for first time:
if Iterator[0] == 1 and Iterator[1] == 0:
Iterator[0] = 0
Iterator[1] = 0
Iterator[2] = 0
# transition from object enters to object stays
elif Iterator[0] == 0 and Iterator[1] == 0:
Iterator[0] = 0
Iterator[1] = 1
Iterator[2] = 0
# object is still on screen
elif Iterator[1] == 1:
Iterator[2] = 0
# calculate the poistion where mouse pointer has to be moved next
centroidNew = [int(x)+int(w)/2, int(y)+int(w)/2]
if Iterator[2]== 1: # if object goes out of screen, don't move the mouse pointer
centroidNew = [0,0]
centroidOld = [0,0]
# calculate movement using function defined
bool_click,movementX = movement((centroidNew[0] - centroidOld[0]), scaling_factor, 0)
bool_click,movementY = movement((centroidNew[1] - centroidOld[1]), scaling_factor, 1)
# Object Enters
if Iterator[0] == 0 and Iterator[1] == 0 and Iterator[2] == 0:
pag.moveTo(centroidNew[0] - centroidOld[0], centroidNew[1] - centroidOld[1])
# Object stays and no click triggered
if Iterator[1] == 1 and Iterator[2] ==0 and bool_click == 0:
pag.move((-1)*(movementX), movementY)
# Object stays and left mouse click released
if Iterator[1] == 1 and Iterator[2] == 0 and bool_click == 1 and bool_hold == 1:
pag.mouseUp()
bool_hold = 0
# Object stays and left mouse click held
elif Iterator[1] == 1 and Iterator[2] == 0 and bool_click == 1 and bool_hold == 0:
pag.mouseDown()
bool_hold = 1
centroidOld = centroidNew # assign new centroid to old centroid to calculate distance moved
if window == 1: # enable user video
cv2.imshow("cam",img)
if mask_tog == 1:
cv2.imshow("Mask_final",maskFinal)
cv2.imshow("Mask",mask)
if (keyboard.is_pressed('esc')) : # exit condition
print("exiting loop")
break
capture.release() # stop video
cv2.destroyAllWindows() # destroy all windows
|
import matplotlib.pyplot as plt
import numpy as np
def get_data(filename):
return np.loadtxt("dataSets/" + filename + ".txt")
def plot_data(filename):
data = get_data(filename)
for point in data:
plt.plot(point[0], point[1], 'ro')
plt.title(filename)
plt.show()
print(get_data("gmm"))
plot_data("gmm")
|
import unittest
from botsrc import Bot
class BotTestCase(unittest.TestCase):
def setUp(self):
self.Bot = Bot(473559457, ":AAH5NFuZppQP0PrypaussjDoo_d0FpJUDxg")
def test_bot_connect(self):
self.assertEqual(self.Bot.getMe(), True, 'cannot connect')
if __name__ == '__main__':
unittest.main()
|
# Work With Python3
import os
import stat
from shutil import rmtree
from subprocess import check_call
def resolve_path(rel_path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), rel_path))
def rmtree_silent(root):
def remove_readonly_handler(fn, root, excinfo):
if fn is os.rmdir:
if os.path.isdir(root): # if exists
os.chmod(root, stat.S_IWRITE) # make writable
os.rmdir(root)
elif fn is os.remove:
if os.path.isfile(root): # if exists
os.chmod(root, stat.S_IWRITE) # make writable
os.remove(root)
rmtree(root, onerror=remove_readonly_handler)
def makedirs_silent(root):
try:
os.makedirs(root)
except OSError: # mute if exists
pass
def test_pre():
if os.path.exists('build/vaiueo2d.wav') is False:
red = open('test/vaiueo2d.wav', 'rb').read()
new = open('build/vaiueo2d.wav', 'wb')
new.write(red)
if __name__ == "__main__":
build_dir = resolve_path("build")
rmtree_silent(build_dir + '/CMakeFiles/')
makedirs_silent(build_dir)
test_pre()
os.chdir(build_dir)
check_call([
"cmake",
os.path.expandvars(
"-DCMAKE_TOOLCHAIN_FILE=C:\\ProgramData\\emsdk\\upstream\\emscripten\\cmake\\Modules\\Platform\\Emscripten.cmake"),
"-DCMAKE_BUILD_TYPE=Release",
"-DCMAKE_MAKE_PROGRAM=mingw32-make",
"-G",
"Unix Makefiles",
".."
])
try:
check_call(["mingw32-make"])
except:
print('Build Fail')
|
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import WebDriverException
class WebPage:
"""
This is a parent class for all web-page classes created in this project.
This class provides the basic common functionality required for web-page.
"""
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(self.driver, 10)
def check_page_load_success(self):
if 'This site can’t be reached' in self.driver.page_source:
print('Page not found')
return False
return True
def check_element_accessible(self, element, expected_text):
iter_count = 0
elem_found = False
while not elem_found:
try:
elem_found = self.wait.until(ec.text_to_be_present_in_element((By.XPATH, element), expected_text))
except Exception as e:
print(e)
iter_count = iter_count + 1
if iter_count > 5:
break
return elem_found
def element_tobe_clickable(self, by, element_id):
try:
found_element = self.wait.until(ec.element_to_be_clickable((by, element_id)))
return found_element
except TimeoutException as e:
print(e)
return False
def visibility_of_element_located(self, by, element_id):
try:
found_element = self.wait.until(ec.visibility_of_element_located((by, element_id)))
return found_element
except TimeoutException as e:
print('Login pop up did not appear.', e)
def presence_of_required_element(self, by, element_id):
try:
found_element = self.wait.until(ec.presence_of_element_located((by, element_id)))
return found_element
except TimeoutException as e:
print(e)
def element_clickable(self, by, element_id):
element_found = self.wait.until(ec.element_to_be_clickable((by, element_id)))
if not element_found:
raise TimeoutException
else:
return element_found
def element_visible_loc(self, by, element_id):
element_found = self.wait.until(ec.visibility_of_element_located((by, element_id)))
if not element_found:
return False
else:
return element_found
|
"""Custom authentication for DRF."""
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django_otp.models import Device
from drf_spectacular.contrib.rest_framework_simplejwt import SimpleJWTScheme
from rest_framework import status
from rest_framework.exceptions import APIException
from rest_framework_simplejwt.authentication import JWTAuthentication
from . import constants
class NotVerified(APIException):
status_code = status.HTTP_418_IM_A_TEAPOT
default_detail = _('Missing 2FA verification.')
default_code = 'not_verified'
class JWTAuthenticationWith2FA(JWTAuthentication):
"""Add 2FA support."""
def verify_user(self, request, user):
header = self.get_header(request)
raw_token = self.get_raw_token(header)
validated_token = self.get_validated_token(raw_token)
url_exceptions = (
reverse("v2:account-tfa-verify-code"),
)
if request.path in url_exceptions:
return
if constants.TFA_DEVICE_TOKEN_KEY in validated_token:
device_id = validated_token[constants.TFA_DEVICE_TOKEN_KEY]
device = Device.from_persistent_id(device_id)
if device is not None and device.user_id == user.pk:
return
raise NotVerified
def authenticate(self, request):
result = super().authenticate(request)
if result is None:
return None
user, token = result
if user.tfa_enabled:
self.verify_user(request, user)
return user, token
class SimpleJWTWith2FAScheme(SimpleJWTScheme):
target_class = 'modoboa.core.drf_authentication.JWTAuthenticationWith2FA'
name = 'jwt2FAAuth'
|
import os
import logging
from aws import SNS
def sender_stdout(report, **kwargs):
print(report)
return True
def sender_sns(report, **kwargs):
logging.info('sending report over SNS %s',os.environ['SNS_TOPIC'])
subject = 'ASG Subnet Audit Report'
return SNS(kwargs['region']).publish(
TopicArn=os.environ['SNS_TOPIC'],
Message=report,
Subject=subject
)
class SendReport:
def __init__(self, report):
self._report = report
def send(self, sender=sender_stdout, **kwargs):
return sender(self._report, **kwargs)
|
"""
zum etwas austesten und zwischenspeichern gedacht
"""
import random
# my_list = ['Holz', 'Wasser', 'Mücke']
#
# print(my_list[my_list.index('Holz')+1])
# print(my_list.index('Holz')+1)
#
# my_dict = {'item': {
# 'potion': {
# 'manapotion': 3,
# 'healingpotion': 0
# },
# 'ingredient': {
# 'wood': 0,
# 'wood_max': 10,
#
# },
# },
# }
#
# # self.ingredient[self.ingredient.index(resource)]
# my_list = my_dict['item']['ingredient']
#
# print(my_list['wood'+'_max'])
# from MyFactory import MyFactory
# from data import character_props as props
# import random
#
# class_list =[['Wizard', 'suny'], ['Witch', 'ursel'], ['Warrior', 'kurak']]
#
# random.shuffle(class_list)
# class_name = class_list[0][0]
#
# instance = MyFactory.instanciate(class_name, props[class_name.lower()][class_list[0][1]])
#
# print(instance.name)
###################################################################################################
# char_list = [f" {chr(882)} ", f" {chr(882)} ", f" {chr(127801)} ", f" {chr(127799)} ", f" {chr(127799)} ",
# f" {chr(127801)} ", f" {chr(9962)} ", f" {chr(882)} ", f" {chr(127799)} "]
# char_list = [f"{chr(882)}", f"{chr(882)}", f"{chr(127801)}", f"{chr(127799)}", f"{chr(127799)}",
# f"{chr(127801)}", f"{chr(9962)}", f"{chr(882)}", f"{chr(127799)}"]
#
# random.shuffle(char_list)
#
# myboard = [char_list[i % len(char_list)] for i in range(25)]
#
#
#
# tmp_str = ''
# for i in range(0, 25, 5):
# tmp_str = ''
# for j in range(5):
# tmp_str += str(myboard[(i+j)%len(myboard)] + '\t')
# # tmp_str += format(str(myboard[(i + j) % len(myboard)]), '^6')
# print(tmp_str)
# stringmy = 'Hallo du da {:s} du brauchst'
# print(stringmy.format(str(5)))
#####################################################################################
# dic = {
# '\\' : b'\xe2\x95\x9a',
# '-' : b'\xe2\x95\x90',
# '/' : b'\xe2\x95\x9d',
# '|' : b'\xe2\x95\x91',
# '+' : b'\xe2\x95\x94',
# '%' : b'\xe2\x95\x97',
# }
#
# def decode(x):
# return (''.join(dic.get(i, i.encode('utf-8')).decode('utf-8') for i in x))
#
# print(decode('+-------------------------------------%'))
# print(decode('| Willkommen bei uns im Zauberwald |'))
# print(decode('\\-------------------------------------/'))
print(b'\xe2\x95\x94'.decode('utf-8'))
#######################################################################################
# if i % 5:
# tmp_str += "\n"
# print(tmp_str)
# class Tile:
#
# def __init__(self):
# self.neighbors = []
# self.sign = [char_list]
# class Boardy:
#
# def __init__(self, size):
# self.size = size
# self.tiles = []
#
# def build_board(self):
# for i in range(self.size**2):
# x = i % self.size
# y = i // self.size
# self.tiles.append(Tile())
# return board
#
#
# mychar = Tile()
#
# mychar.sign[0]
# print('hallo', mychar)
# format(my_feld[(i + 1 + j) % len(my_feld)], '<1s')
#
# board = Boardy(10)
#
# my_board = board.build_board()
#
# print(my_board)
# the_board = [my_board.tiles[i*10:i*10+10] for i in range(10)]
#
# width = 10
# i = 0
# j = 0
#
# for tile in the_board:
# tmp_str = ''
# i += 1
#
# tmp_str += str(tile.sign[i % len(tile.sign)])
# j += 1
# print(' '.join(tmp_str))
#####################################################################
# class Tile:
#
# def __init__(self):
# self.neighbors = []
# self.sign = '_'
#
# def add_neighbor(self, neighbor):
# self.neighbors.append(neighbor)
#
# def __str__(self):
# return self.sign
#
#
# class Boardy:
#
# def __init__(self, size):
# self.size = size
# self.tiles = []
#
# def build_board(self):
# for i in range(self.size**2):
# x = i % self.size
# y = i // self.size
# self.tiles.append(Tile())
# return board
#
#
#
# board = Boardy(10)
#
# my_board = board.build_board()
# the_board = [my_board.tiles[i*10:i*10+10] for i in range(10)]
#
# for row in the_board:
# tmp_str = ''
# for tile in row:
# tmp_str += tile.sign + ' '
# print(tmp_str)
# y * w + x
####################################################################################################################
# Alte Funktion safen
# move Player
# def self_move_b(self, my_pos, game_field): # OK
# """ Bescheibung:
#
# Vergleicht die X und Y Position des Spielers, speichert die Richtung, die der Spieler gehn kann,
# in X und Y und gibt diese zurück
#
# :param my_pos:
# :param game_field:
# :return: y, x, go gibt die Koordinaten X und Y zurück sowie die Richtung(z.b. Norden)
#
# Details:
# X Achse
# Y|0|_|_|
# A|_|_|_|
# c|_|_|_|
# Spieler steht auf X 0 und Y 0 so kann er nur in 3 Richtungen ziehen
# Bei X 1 und Y 1 wären es 8 Richtungen
# Bei falschen oder keinen Eingaben wird die Funktion erneut aufgerufen(rekursiv)
# """
# y = my_pos[0]
# x = my_pos[1]
# max_xy = len(game_field) - 1
#
# if y == 0 and x == 0:
# richtung = input('(d) für E, (c) für SE und (x) für S\n')
# wertung = ['d', 'c', 'x']
#
# elif y == 0 and x == max_xy:
# richtung = input('(x) für S, (y) für SE und (a) für W\n')
# wertung = ['x', 'y', 'a']
#
# elif y == 0 and 0 < x < max_xy:
# richtung = input('(d) für E, (c) für SE, (x) für S, (y) für SW und (a) für W\n')
# wertung = ['d', 'c', 'x', 'y', 'a']
#
# elif x == 0 and 0 < y < max_xy:
# richtung = input('(w) für N, (e) für NE, (d) für E, (c) für SE und (x) für S\n')
# wertung = ['w', 'e', 'd', 'c', 'x']
#
# elif x == 0 and y == max_xy:
# richtung = input('(w) für N, (e) für NE, (d) für E\n')
# wertung = ['w', 'e', 'd']
#
# elif 0 < x < max_xy and 0 < y < max_xy:
# richtung = input('(w) für N, (e) für NE, (d) für E, (c) für SE\n'
# '(x) für S, (y) für SW, (a) für W und (q) für NW\n')
# wertung = ['w', 'e', 'd', 'c', 'x', 'y', 'a', 'q']
#
# elif 0 < x < max_xy and y == max_xy:
# richtung = input('(w) für N, (e) für NE, (d) für E, (a) für W und (q) für NW\n')
# wertung = ['w', 'e', 'd', 'a', 'q']
#
# elif 0 == max_xy and y == max_xy:
# richtung = input('(w) für N, (a) für W und (q) für NW\n')
# wertung = ['w', 'a', 'q']
#
# else:
# richtung = ""
# wertung = "error"
# print(f"self_move keine positions eingabe") # TEST
#
# if richtung not in wertung:
# message_output(features['message']['wrong_entry'])
# return self.self_move(my_pos, game_field)
#
# if richtung:
# if richtung == 'w':
# y -= 1
# go = 'Norden'
#
# elif richtung == 'e':
# y -= 1
# x += 1
# go = 'Nordosten'
#
# elif richtung == 'd':
# x += 1
# go = 'Osten'
#
# elif richtung == 'c':
# y += 1
# x += 1
# go = 'Südosten'
#
# elif richtung == 'x':
# y += 1
# go = 'Süden'
#
# elif richtung == 'y':
# y += 1
# x -= 1
# go = 'Südwest'
# elif richtung == 'a':
# x -= 1
# go = 'Westen'
# elif richtung == 'q':
# y -= 1
# x -= 1
# go = 'Nordwesten'
# else:
# go = 'Da ging erwas schief' # TEST
# else:
# go = 'Richtung leer' # TEST
#
# return y, x, go
|
'''
Created on 2017年2月4日
@author: admin
import userinfo # 导入函数
# 获取字典数据
info = userinfo.zidian()
# 通过 items() 循环读取元组(键/值对)
for us, pw in info.items():
print(us)
print(pw)
'''
import csv # 导入 csv 包
from _csv import Dialect
# 读取本地 CSV 文件
my_file = 'F:\\workspace\\hola world\\selenium\\userinfo.csv'
data=csv.reader(file(my_file,'rb'))
print(data)
# 循环输出每一行信息
for user in data:
print(user[0])
print(user[1])
print(user[2])
print(user[3])
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The above encoding declaration is required and the file must be saved as UTF-8
"""
Якщо одного слова для змістовної назви недостатньо, слова в імені змінної розділюються підкресленням.
Int - цілі числа: 1, 2, 0, -10, 9999 і т.д.
Відображаються просто як числа.
Для перетворення будь-якого значення на ціле число використовується функція int():
"""
x_float = 1.0
x_int = int(x_float)
|
import os
import sys
import urllib2
import subprocess
from copy import deepcopy
from distutils import version
PYTHON_VERSIONS = ['2.6', '2.7', '3.1', '3.2', '3.3', '3.4']
SCRIPT = """
# Create virtual environment
/opt/local/bin/virtualenv-{pv} -v {full}
# Install Cython
{full}/bin/pip install Cython
"""
def setup_virtualenv(versions):
print versions['full']
f = open('{full}.log'.format(**versions), 'w')
code = subprocess.call(SCRIPT.format(**versions), shell=True, stdout=f, stderr=f)
f.close()
all_versions = []
versions = {}
for python_version in PYTHON_VERSIONS:
versions['pv'] = python_version
versions['full'] = 'python{pv}-numpy-dev'.format(**versions)
all_versions.append(deepcopy(versions))
from multiprocessing import Pool
p = Pool()
p.map(setup_virtualenv, all_versions)
|
diccionario ={
"redes_socioales":["Twitter","Facebook","LidenIn"],
3:"Tres",
"hola":"Mundo"
}
print "ver diccionario: \n",diccionario
print "existe hola :",diccionario.has_key("hola")
print "ver en forma de lista :\n",diccionario.items()
print "ver lista de key :\n",diccionario.keys()
print "ver lista de los valores :\n",diccionario.values()
diccionario2 = diccionario.copy()
print "copiar ha diccionario2: \n",diccionario2
diccionario["hola2"]="adios"
print "agregar 1 elemento al diccionario: \n",diccionario2
print "sacar la key 3 : ",diccionario.pop(3)
print "sacar la key 3 otra vez : ",diccionario.pop(3,"no existe 3")
print "borrar key hola"
del diccionario["hola"]
print "ver diccionario: \n",diccionario
print "vaciar el diccionario\n"
diccionario.clear()
print "ver diccionario: \n",diccionario
|
# Libraries
import json
import os
from flask import Flask, render_template, request, redirect, jsonify, \
abort, url_for, session, _request_ctx_stack, flash
from flask_cors import CORS
from six.moves.urllib.parse import urlencode
import sys
import datetime
from sqlalchemy import func, desc, join
# Constants for Auth0 from constants.py, secret keys stores as config variables
import auth.constants as constants
from auth.auth import AuthError, requires_auth, requires_auth_rbac, auther
# Database model
from database.models import setup_db, db, Month, User, UserHistory, Secret
# My features
from features.input_classifier import check, loc_class
from features.link_maker import links
from features.weather_widget_maker import weather_widget
from features.covid_widget_maker import covid_widget
from features.info_widget_maker import info_widget
from features.holiday_widget_maker import holiday
def create_app(test_config=None):
# Init app functions
app = Flask(__name__)
app.debug = True
app.secret_key = os.environ['SECRET_KEY']
setup_db(app)
CORS(app)
# CORS Headers
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods',
'GET,PUT,POST,DELETE,OPTIONS')
return response
# Auth0 initalizing from auth.py
auth_dict = auther(app)
auth0 = auth_dict["auth0"]
AUTH0_CALLBACK_URL = auth_dict["url"]
AUTH0_AUDIENCE = auth_dict["audi"]
AUTH0_CLIENT_ID = auth_dict['id']
"""Auth0 login / logout"""
# Start side to guide user to login/register
@app.route('/')
def index():
# Get current month for go warm on
current_month = datetime.datetime.now().month
month_de = Month.query.filter(Month.number == current_month).one()
month_de_str = month_de.name_de
if len(month_de_str) == 0:
abort(404)
go_warm = "https://www.reise-klima.de/urlaub/" + month_de_str
return render_template("index.html", go_warm=go_warm)
@app.route('/login')
def login():
return auth0.authorize_redirect(redirect_uri=AUTH0_CALLBACK_URL,
audience=AUTH0_AUDIENCE)
@app.route('/callback')
def callback_handling():
# Get authorization token
token = auth0.authorize_access_token()
access_token = token['access_token']
# Store access token in Flask session
session[constants.ACCESS_TOKEN] = access_token
resp = auth0.get('userinfo')
userinfo = resp.json()
session[constants.JWT_PAYLOAD] = userinfo
session[os.environ['PROFILE_KEY']] = {
'user_id': userinfo['sub'],
'name': userinfo['name'],
'picture': userinfo['picture']
}
# Store user email in session
session[constants.USER_EMAIL] = userinfo["email"]
email = session['user_email']
# If user is new, add to users table
res = User.query.filter(User.email == email).one_or_none()
# Location information not available for manual registered account
try:
loc = userinfo['locale']
except Exception:
loc = None
if res is None:
user = User(email=session['user_email'], name=userinfo['name'],
location_iso2=loc)
user.insert()
res = User.query.filter(User.email == email).one()
user_id = res.id
session[constants.USER_ID] = user_id
return redirect("/home")
@app.route('/logout')
def logout():
session.clear()
params = {'returnTo': url_for('index', _external=True),
'client_id': AUTH0_CLIENT_ID}
return redirect(auth0.api_base_url + '/v2/logout?' + urlencode(params))
"""API"""
"""APP"""
# View Travel Cockpit's vision / motivation
@app.route("/vision")
def get_vision():
return render_template("vision.html")
# View contact page
@app.route("/contact")
def get_contact():
return render_template("contact.html")
# Get destination search and view result in dashboard view
@app.route('/home', methods=['GET', 'POST'])
@requires_auth
def get_post_home(jwt):
# Get user permission, empty if user not actively got permissions
session[constants.PERMISSION] = jwt['permissions']
permi = jwt['permissions']
# Check if user with or without RBAC -> Render different navi layout
# -> Director = delete:master, Manager = delete:own
if 'delete:own' in permi:
session[constants.ROLE] = 'Manager'
if 'delete:master' in permi:
session[constants.ROLE] = 'Director'
if request.method == "GET":
# Get current month for go warm on
current_month = datetime.datetime.now().month
month_de = Month.query.filter(Month.number == current_month).one()
month_de_str = month_de.name_de
if len(month_de_str) == 0:
abort(404)
go_warm = "https://www.reise-klima.de/urlaub/" + month_de_str
return render_template("home.html", go_warm=go_warm)
# POST
else:
# Get current user_id
try:
id = session['user_id']
# For testing with travel_cockpit_test database
except Exception:
id = 46
# User input check, must be text
# Formatting and classification with check function
# Input via user input or blog link button
destination = request.form.get("destination")
req = request.args.get('dest', None, type=str)
if destination is None:
destination = req
dest = check(destination)
if not dest:
return render_template(
"home.html", number=1,
message="Please provide TRAVEL DESTINATION")
# Get language switch value (English or German)
switch = request.form.get("language")
# Get location classified dictionary
loc_classes = loc_class(dest)
# Post default language to dropdwon on my dashboard
if loc_classes['language'] == 'german':
options = ["German", "English"]
else:
options = ["English", "German"]
# Button links dictionary
links_dic = links(dest, loc_classes, switch)
# Weather widget
weather = weather_widget(loc_classes, switch)
# Covid19 widget
covid = covid_widget(loc_classes, switch)
# Info box widget
info = info_widget(loc_classes, switch, weather)
print('info', info)
# National holidays widget
holidays = holiday(loc_classes, switch)
print('holidays', holidays)
# Current time
time = datetime.datetime.now()
# Destination for search history
loc = loc_classes["loc_type"]
if loc == "country":
history = loc_classes["country_en"]
elif loc == "big_city":
history = loc_classes["city"]
else:
history = loc_classes["location"]
# Store user search in user_history
user_history = UserHistory(
destination=history,
timestamp=time,
user_id=id)
user_history.insert()
return render_template("my_dashboard.html", switch=switch,
loc_classes=loc_classes, links_dic=links_dic,
info=info, options=options, weather=weather,
covid=covid, holidays=holidays)
# View user own history
@app.route("/history")
@requires_auth
def get_history(jwt):
# Show user's search history
# Get current user_id
try:
id = session['user_id']
# For testing with travel_cockpit_test database
except Exception:
id = 46
history = UserHistory.query.filter(UserHistory.user_id == id) \
.with_entities(UserHistory.destination,
func.count(UserHistory.destination)) \
.group_by(UserHistory.destination) \
.order_by(func.count(UserHistory.destination).desc()).all()
return render_template("history.html", rows=history)
# Master view of all users, only for Manager and Director RBAC roles
@app.route("/history-all")
@requires_auth_rbac('get:history-all')
def get_history_all(jwt):
hist_all = UserHistory.query \
.with_entities(UserHistory.destination,
func.count(UserHistory.destination)) \
.group_by(UserHistory.destination) \
.order_by(func.count(UserHistory.destination).desc()).all()
# Get unique user list of listed destinations
data = []
for hist in hist_all:
users = UserHistory.query \
.filter(UserHistory.destination == hist[0]).all()
names = []
for user in users:
name = user.users.name
if name not in names:
names.append(name)
data.append({
"destination": hist[0],
"amount": hist[1],
"names": names
})
return render_template("history_all.html", data=data)
"""TRAVEL SECRETS BLOG"""
# View blog posts USER VIEW
@app.route("/blog/user")
@requires_auth
def get_blog_user(jwt):
blogs = Secret.query.order_by(desc(Secret.id)).all()
try:
userinfo = session[os.environ['PROFILE_KEY']]
except Exception:
userinfo = None
return render_template(
"blog_user.html", blogs=blogs,
userinfo=userinfo
)
# View blog posts Director & Manager
@app.route("/blog")
@requires_auth_rbac('get:blog')
def get_blog(jwt):
blogs = Secret.query.select_from(join(Secret, User)) \
.order_by(desc(Secret.id)).all()
# Userinfo to great by name
try:
userinfo = session[os.environ['PROFILE_KEY']]
except Exception:
userinfo = None
# Permission to steer edit & delete link buttons
try:
permi = jwt['permissions']
except Exception:
permi = None
# User id to show only relevant edit/delete function to Manager
try:
id = session['user_id']
# For testing with travel_cockpit_test database
except Exception:
id = 47
return render_template("blog.html", blogs=blogs, userinfo=userinfo,
permi=permi, id=id)
# Create new travel secrets
# First get template, then post
@app.route("/blog/create")
@requires_auth_rbac('post:blog')
def post_blog(jwt):
return render_template("blog_create.html")
@app.route("/blog/create", methods=['POST'])
@requires_auth_rbac('post:blog')
def post_blog_submission(jwt):
try:
try:
user_id = session['user_id']
# For testing with travel_cockpit_test database
except Exception:
user_id = 47
# Get user form input and insert in database
secret = Secret(
title=request.form.get('title'),
why1=request.form.get('why1'),
why2=request.form.get('why2'),
why3=request.form.get('why3'),
text=request.form.get('text'),
link=request.form.get('link'),
user_id=user_id
)
secret.insert()
flash("Blog was successfully added!")
return redirect("/blog")
except Exception:
abort(405)
# Edit travel blog post MASTER (Director)
# First get blog then patch
@app.route("/blog/<int:id>/edit")
@requires_auth_rbac('patch:master')
def patch_blog(jwt, id):
blog = Secret.query.filter(Secret.id == id).one_or_none()
if blog is None:
abort(404)
return render_template("blog_edit.html", blog=blog)
@app.route("/blog/<int:id>/edit/submission", methods=['PATCH'])
@requires_auth_rbac('patch:master')
def patch_blog_submission(jwt, id):
try:
# Get HTML json body response
body = request.get_json()
secret = Secret.query.filter(Secret.id == id).one_or_none()
# Get user edit and update database
secret.title = body.get('title', None)
secret.why1 = body.get('why1', None)
secret.why2 = body.get('why2', None)
secret.why3 = body.get('why3', None)
secret.text = body.get('text', None)
secret.link = body.get('link', None)
secret.update()
flash("Blog was successfully updated!")
return jsonify({'success': True})
except Exception:
abort(405)
# Edit travel blog post OWN (Manager)
# First get blog then patch
@app.route("/blog/<int:id>/edit-own")
@requires_auth_rbac('patch:own')
def patch_own_blog(jwt, id):
blog = Secret.query.filter(Secret.id == id).one_or_none()
if blog is None:
abort(404)
# Double check if blog was created by user
try:
user_id = session["user_id"]
# For testing with travel_cockpit_test database
except Exception:
user_id = 46
if user_id != blog.user_id:
abort(403)
return render_template("blog_edit_own.html", blog=blog)
@app.route("/blog/<int:id>/edit-own/submission", methods=['PATCH'])
@requires_auth_rbac('patch:own')
def patch_own_blog_submission(jwt, id):
try:
# Get HTML json body response
body = request.get_json()
secret = Secret.query.filter(Secret.id == id).one_or_none()
# Double check if blog was created by user
try:
user_id = session["user_id"]
# For testing with travel_cockpit_test database
except Exception:
user_id = 46
if user_id != secret.user_id:
abort(403)
# Get user edit and update database
secret.title = body.get('title', None)
secret.why1 = body.get('why1', None)
secret.why2 = body.get('why2', None)
secret.why3 = body.get('why3', None)
secret.text = body.get('text', None)
secret.link = body.get('link', None)
# Update database
secret.update()
flash("Blog was successfully updated!")
return jsonify({'success': True})
except Exception:
abort(405)
# Delete blog MASTER (Director)
@app.route("/blog/<int:id>/delete", methods=['DELETE'])
@requires_auth_rbac('delete:master')
def delete_blog_master(jwt, id):
try:
secret = Secret.query.filter(Secret.id == id).one_or_none()
if secret is None:
abort(404)
secret.delete()
flash("Blog was DELETED!")
return jsonify({'success': True})
except Exception:
abort(422)
# Delete blog OWN (Manager)
@app.route("/blog/<int:id>/delete-own", methods=['DELETE'])
@requires_auth_rbac('delete:own')
def delete_blog_own(jwt, id):
try:
secret = Secret.query.filter(Secret.id == id).one_or_none()
if secret is None:
abort(404)
# Double check if blog was created by user
try:
user_id = session["user_id"]
# For testing with travel_cockpit_test database
except Exception:
user_id = 46
if user_id != secret.user_id:
abort(403)
# Delete in database
secret.delete()
flash("Blog was DELETED!")
return jsonify({'success': True})
except Exception:
abort(422)
"""Error handler"""
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "Bad request"
}), 400
@app.errorhandler(401)
def not_found(error):
return jsonify({
"success": False,
"error": 401,
"message": "Unauthorized"
}), 401
@app.errorhandler(403)
def not_found(error):
return jsonify({
"success": False,
"error": 403,
"message": "Forbidden access"
}), 403
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Resource NOT found"
}), 404
@app.errorhandler(405)
def not_found(error):
return jsonify({
"success": False,
"error": 405,
"message": "Method NOT allowed"
}), 405
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "Unprocessable"
}), 422
@app.errorhandler(500)
def unprocessable(error):
return jsonify({
"success": False,
"error": 500,
"message": "Internal database error"
}), 500
return app
app = create_app()
if __name__ == '__main__':
app.run()
|
def b2a_hex(val: any) -> bytes: ...
def a2b_hex(val: str) -> bytes: ...
|
import utils_tasks as utils
import hydra
import os
import logging
log = logging.getLogger(__name__)
def run_task(task):
log.info(f"Task name: {task.name}")
task_args = task.args if "args" in task else ""
task_args = task_args.replace("$\\", "\\$")
command = f"CUDA_VISIBLE_DEVICES={utils.WORKER_CUDA_DEVICE} HYDRA_CONFIG_PATH={task.config_path} {task.environ} python {task.command} repeat={task.repeat} {task_args}"
log.info(f"Command: {command}")
ret = os.system(command)
ret = str(ret)
log.info(f'Task "{task.name}" finished with return code: {ret}.')
return ret
@hydra.main(config_path=os.environ["HYDRA_CONFIG_PATH"])
def main(configs):
auto_generated_dir = os.getcwd()
os.chdir(hydra.utils.get_original_cwd())
utils.run_tasks(configs, run_task)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CreateProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='Musician',
fields=[
('user_name', models.CharField(max_length=30, serialize=False, primary_key=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('current_rating', models.IntegerField()),
('rating_text', models.TextField()),
('bio', models.TextField()),
('zip_code', models.IntegerField()),
('website', models.URLField()),
],
),
]
|
"""
* Copyright 2020, Departamento de sistemas y Computación,
* Universidad de Los Andes
*
*
* Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos
*
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along withthis program. If not, see <http://www.gnu.org/licenses/>.
*
* Contribuciones:
*
* Dario Correal - Version inicial
"""
import config as cf
import csv
from DISClib.ADT import list as lt
from DISClib.ADT import map as mp
from DISClib.DataStructures import mapentry as me
from DISClib.DataStructures import rbt
from DISClib.Algorithms.Sorting import shellsort as sa
from DISClib.DataStructures import arraylistiterator as al_it
from DISClib.DataStructures import linkedlistiterator as ll_it
assert cf
"""
Se define la estructura de un catálogo de videos. El catálogo tendrá dos listas, una para los videos, otra para las categorias de
los mismos.
"""
# Construccion de modelos
def max_rep(ans_list):
iterator = ll_it.newIterator(ans_list)
maximo = ('',0)
while ll_it.hasNext(iterator):
valor = ll_it.next(iterator)
if maximo[1] < valor[1]:
maximo = valor
return maximo
def setup_genres():
genre_map = mp.newMap()
mp.put(genre_map, "Reggae", (60, 90))
mp.put(genre_map, "Down-tempo", (70, 100))
mp.put(genre_map, "Chill-out", (90, 120))
mp.put(genre_map, "Hip-Hop", (85, 115))
mp.put(genre_map, "Jazz and Funk", (120, 125))
mp.put(genre_map, "Pop", (100, 130))
mp.put(genre_map, "R&B", (60, 80))
mp.put(genre_map, "Rock", (110, 140))
mp.put(genre_map, "Metal", (100, 160))
return genre_map
def req_1_cmpfunc(characteristic: int, list1, list2):
char_1 = lt.getElement(list1, characteristic)
char_2 = lt.getElement(list2, characteristic)
id_1 = lt.getElement(list1, 19)
id_2 = lt.getElement(list2, 19)
if char_1 < char_2:
return -1
elif char_1 > char_2:
return 1
else:
if id_1 < id_2:
return -1
elif id_1 > id_2:
return 1
else:
return 0
def req_2_cmpfunc(list1, list2):
en_1 = lt.getElement(list1, 9)
en_2 = lt.getElement(list2, 9)
id_1 = lt.getElement(list1, 14)
id_2 = lt.getElement(list2, 14)
if en_1 < en_2:
return -1
elif en_1 > en_2:
return 1
elif id_1 < id_2:
return -1
elif id_1 > id_2:
return 1
else:
return 0
def req_3_cmpfunc(list1, list2):
inst_1 = lt.getElement(list1, 1)
inst_2 = lt.getElement(list2, 1)
id_1 = lt.getElement(list1, 14)
id_2 = lt.getElement(list2, 14)
if inst_1 < inst_2:
return -1
elif inst_1 > inst_2:
return 1
elif id_1 < id_2:
return -1
elif id_1 > id_2:
return 1
else:
return 0
def req_4_cmpfunc(list1, list2):
temp_1 = lt.getElement(list1, 7)
temp_2 = lt.getElement(list2, 7)
id_1 = lt.getElement(list1, 14)
id_2 = lt.getElement(list2, 14)
if temp_1 < temp_2:
return -1
elif temp_1 > temp_2:
return 1
elif id_1 < id_2:
return -1
elif id_1 > id_2:
return 1
else:
return 0
def count_artists(rango, reproducciones) -> int:
artists_ids = mp.newMap(numelements=reproducciones)
rango_it = ll_it.newIterator(rango)
while ll_it.hasNext(rango_it):
mp.put(artists_ids, ll_it.next(rango_it), 1)
return mp.size(artists_ids)
def count_artists_2(rango, reproducciones) -> int:
artists_ids = mp.newMap(numelements=reproducciones)
rango_it = ll_it.newIterator(rango)
while ll_it.hasNext(rango_it):
mp.put(artists_ids, ll_it.next(rango_it), 1)
artist_list = lt.newList()
artist_full_list = mp.keySet(artists_ids)
iterator = ll_it.newIterator(artist_full_list)
for i in range(0,10):
if not ll_it.hasNext(iterator):
break
lt.addLast(artist_list, ll_it.next(iterator))
return mp.size(artists_ids), artist_list
def type_var(var):
if var.isnumeric():
return 'i'
elif var.replace('.','').isnumeric():
return 'f'
else:
return 's'
class file_proc:
def indexes(self):
f = open(self.filepath, 'r')
f.readline()
line = f.readline()
f.close()
params = line.replace('"','').split(',')
indexes = lt.newList(datastructure='ARRAY_LIST')
for par in params:
lt.addLast(indexes, type_var(par))
self.index_types = indexes
def __init__(self, filepath: str):
self.filepath = filepath
self.indexes()
class catalog:
def array_line(self, line, index_types):
if line:
array = lt.newList(datastructure='ARRAY_LIST')
i = 1
for element in line.items():
element_obj = element[1].replace('"', '')
if lt.getElement(index_types, i) == 'f':
if element_obj:
lt.addLast(array, float(element_obj))
else:
lt.addLast(array, 0.0)
elif lt.getElement(index_types, i) == 'i':
if element_obj:
lt.addLast(array, int(element_obj))
else:
lt.addLast(array, 0)
else:
lt.addLast(array, element_obj)
i += 1
return array
else:
return None
def create_matrix(self, file: file_proc):
filepath = file.filepath
index_types = file.index_types
if filepath is not None:
input_file = csv.DictReader(open(filepath, encoding="utf-8"), delimiter=',')
matrix = lt.newList(datastructure='ARRAY_LIST')
for line in input_file:
array = self.array_line(line, index_types)
if array:
lt.addLast(matrix, array)
return matrix
def __init__(self, file_basic: file_proc, file_characteristics: file_proc, file_sentiments: file_proc):
self.basic_catalog = self.create_matrix(file_basic)
self.characteristics_catalog = self.create_matrix(file_characteristics)
self.sentiments_catalog = self.create_matrix(file_sentiments)
self.req_1_rbt = mp.newMap()
self.genres = setup_genres()
def req_1(self, characteristic: int, minimo, maximo):
char_rbt = None
if mp.get(self.req_1_rbt, characteristic):
char_rbt = mp.get(self.req_1_rbt, characteristic)
else:
cmpfunc = lambda a,b: req_1_cmpfunc(characteristic,a,b)
char_rbt = rbt.newMap(cmpfunc)
for music in catalog_iterator(self, 1):
rbt.put(char_rbt, music, lt.getElement(music, 12))
mp.put(self.req_1_rbt, characteristic, char_rbt)
rango = rbt.values(char_rbt, minimo, maximo)
reproducciones = lt.size(rango)
artistas = count_artists(rango, reproducciones)
return (reproducciones, artistas)
def req_2(self, en_min, en_max, dan_min:float, dan_max:float):
char_rbt = None
if mp.get(self.req_1_rbt, 20):
char_rbt = mp.get(self.req_1_rbt, 20)['value']
else:
char_rbt = rbt.newMap(req_2_cmpfunc)
for music in catalog_iterator(self, 1):
rbt.put(char_rbt, music, [lt.getElement(music, 14),lt.getElement(music,9),lt.getElement(music, 4)])
mp.put(self.req_1_rbt, 20, char_rbt)
rango = rbt.values(char_rbt, en_min, en_max)
itera = ll_it.newIterator(rango)
count = 0
res_list = lt.newList()
while ll_it.hasNext(itera):
danceability = ll_it.next(itera)
if danceability[2] >= dan_min and danceability[2] <= dan_max:
count += 1
if count <= 5:
lt.addLast(res_list, danceability)
return count, res_list
def req_3(self, inst_min, inst_max, temp_min:float, temp_max:float):
char_rbt = None
if mp.get(self.req_1_rbt, 21):
char_rbt = mp.get(self.req_1_rbt, 21)['value']
else:
char_rbt = rbt.newMap(req_3_cmpfunc)
for music in catalog_iterator(self, 1):
rbt.put(char_rbt, music, [lt.getElement(music, 14),lt.getElement(music, 1),lt.getElement(music, 7)])
mp.put(self.req_1_rbt, 21, char_rbt)
rango = rbt.values(char_rbt, inst_min, inst_max)
itera = ll_it.newIterator(rango)
count = 0
res_list = lt.newList()
while ll_it.hasNext(itera):
tempo = ll_it.next(itera)
if tempo[2] >= temp_min and tempo[2] <= temp_max:
count += 1
if count <= 5:
lt.addLast(res_list, tempo)
return count, res_list
def req_4_aux(self, min_temp, max_temp):
char_rbt = None
if mp.get(self.req_1_rbt, 22):
char_rbt = mp.get(self.req_1_rbt, 22)['value']
else:
char_rbt = rbt.newMap(req_4_cmpfunc)
for music in catalog_iterator(self, 1):
rbt.put(char_rbt, music, lt.getElement(music, 12))
mp.put(self.req_1_rbt, 22, char_rbt)
rango = rbt.values(char_rbt, min_temp, max_temp)
reproducciones = lt.size(rango)
artistas = count_artists_2(rango, reproducciones)
return (reproducciones, *artistas)
def req_5_aux(self, temp_min, temp_max):
char_rbt = None
if mp.get(self.req_1_rbt, 7):
char_rbt = mp.get(self.req_1_rbt, 7)['value']
else:
cmpfunc = lambda a,b: req_1_cmpfunc(7,a,b)
char_rbt = rbt.newMap(cmpfunc)
for music in catalog_iterator(self, 1):
rbt.put(char_rbt, music, lt.getElement(music, 12))
mp.put(self.req_1_rbt, 7, char_rbt)
rango = rbt.values(char_rbt, temp_min, temp_max)
reproducciones = lt.size(rango)
return reproducciones
def req_5_aux_2(self, temp_min, temp_max):
real_minimo = lt.newList(datastructure='ARRAY_LIST')
real_maximo = lt.newList(datastructure='ARRAY_LIST')
for i in range(1, 20):
if i == 7:
lt.addLast(real_minimo, temp_min)
lt.addLast(real_maximo, temp_max)
else:
lt.addLast(real_minimo, '')
lt.addLast(real_maximo, '')
return real_minimo, real_maximo
def req_4(self, genre_list):
genre_it = ll_it.newIterator(genre_list)
ans_list = lt.newList()
while ll_it.hasNext(genre_it):
genre = ll_it.next(genre_it)
temp_min, temp_max = mp.get(self.genres, genre)['value']
temp_min, temp_max = self.req_5_aux_2(temp_min, temp_max)
lt.addLast(ans_list, (genre, *self.req_4_aux(temp_min, temp_max)))
return ans_list
def req_5(self):
genre_list = mp.keySet(self.genres)
genre_it = ll_it.newIterator(genre_list)
ans_list = lt.newList()
while ll_it.hasNext(genre_it):
genre = ll_it.next(genre_it)
temp_min, temp_max = mp.get(self.genres, genre)['value']
temp_min, temp_max = self.req_5_aux_2(temp_min, temp_max)
reproducciones = self.req_5_aux(temp_min, temp_max)
lt.addLast(ans_list, (genre, reproducciones))
maximo = max_rep(ans_list)
return ans_list, maximo
class catalog_iterator:
def __init__(self, music_catalog: catalog, index:int):
this_catalog = None
if index == 0:
this_catalog = music_catalog.basic_catalog
elif index == 1:
this_catalog = music_catalog.characteristics_catalog
elif index == 2:
this_catalog = music_catalog.sentiments_catalog
self.m_it = al_it.newIterator(this_catalog)
def __next__(self):
if al_it.hasNext(self.m_it):
return al_it.next(self.m_it)
else:
raise StopIteration
def __iter__(self):
return self
# Funciones para agregar informacion al catalogo
# Funciones para creacion de datos
# Funciones de consulta
# Funciones utilizadas para comparar elementos dentro de una lista
# Funciones de ordenamiento
|
from build_manifest import build_manifest
from diff_maker import diff_maker
if __name__ == "__main__":
diff_maker.build_diff_files('src','CCC')
build_manifest.custom_package_xml_generator('CCC') |
jack_age = int(input())
alex_age = int(input())
lana_age = int(input())
print(min(alex_age, jack_age, lana_age))
|
# Importing Modules
import os
from fpdf import *
from docx2pdf import convert
from tkinter import *
from tkinter import filedialog, messagebox, simpledialog
from emoji import emojize
# Making global variables and dictionaries
select = 0
values = {"Docx to PDF": "1", "Txt to PDF": "2"}
file_path = ''
path = ''
# Making Functions
def change():
global select
if select == 1:
do_to_pdf()
if select == 2:
text_to_pdf()
def selection():
global select
select = v.get()
def browse():
global file_entry, select, file_path
file_entry.delete("0", END)
file_path = filedialog.askopenfilename(
filetype=[("All Files", "*.*"), ("Docx Files", "*.docx"), ("Text Files", "*.txt")]
)
if not file_path:
return None
file_entry.insert('1', file_path)
def do_to_pdf():
global file_entry, file_path, path
path = simpledialog.askstring("My_PDF_converter", "Enter name of pdf")
if path == '':
base_name = os.path.basename(file_entry.get())
filename = os.path.splitext(base_name)[0]
convert(file_entry.get(), 'C:/Users/Aman Agrawal/Desktop/' + filename + '.pdf')
messagebox.showinfo("My_PDF_converter", filename + ".pdf" + " Created Successfully")
elif '.pdf' in path:
convert(file_entry.get(), 'C:/Users/Aman Agrawal/Desktop/' + path)
messagebox.showinfo("My_PDF_converter", path + " Created Successfully")
else:
pdf_name = str(path) + '.pdf'
convert(file_entry.get(), 'C:/Users/Aman Agrawal/Desktop/' + pdf_name)
messagebox.showinfo("My_PDF_converter", pdf_name + " Created Successfully")
def text_to_pdf():
global file_entry, path, file_path
# save FPDF() class into a variable pdf
pdf = FPDF()
# Add a page
pdf.add_page()
# set style and size of font that you want in the pdf
pdf.set_font("Arial", size=10)
# open the text file in read mode
f = open(file_entry.get(), "r", encoding='utf-8')
# insert the texts in pdf
for x in f:
"""using latin-1 encoding and decoding MUST"""
text2 = x.encode('latin-1', 'replace').decode('latin-1')
pdf.cell(200, 5, txt=text2, ln=1, align='L')
path = simpledialog.askstring('My_PDF_convertor', 'Enter name of your PDF')
if path == '':
base_name = os.path.basename(file_entry.get())
filename = os.path.splitext(base_name)[0] + '.pdf'
pdf.output('C:/Users/Aman Agrawal/Desktop/' + filename)
messagebox.showinfo("My_PDF_converter", filename + ".pdf" + " Created Successfully")
elif '.pdf' in path:
pdf.output('C:/Users/Aman Agrawal/Desktop/' + path)
messagebox.showinfo("My_PDF_converter", path + " Created Successfully")
else:
pdf_name = str(path) + '.pdf'
pdf.output('C:/Users/Aman Agrawal/Desktop/' + pdf_name)
messagebox.showinfo("My_PDF_convertor", pdf_name + " Created Successfully")
# Making Window
win = Tk()
win.title("My_PDF_Converter")
win.geometry("490x225+400+100")
win.config(bg="white")
win.resizable(0, 0)
v = IntVar()
# Defining Functions
# Making Title frame
title_frame = Frame(master=win, bg="yellow", height=30)
title_frame.pack(fill=X, side=TOP)
title_label = Label(master=title_frame, text="PDF Converter -- Convert .docx and .txt to PDF",
font=("Verdana", 12, "italic"), bg="khaki")
title_label.pack()
# Making file_name frame
file_frame = Frame(master=win, bg="medium spring green", height=100)
file_frame.pack(side=TOP, fill=X)
file_label = Label(master=file_frame, text="Enter file name : ", font=("Verdana", 11),
anchor=W, width=75, bg='medium spring green',
pady=5)
file_label.pack()
file_entry = Entry(master=file_frame, bg="white", width=60)
file_entry.pack()
# Making button frame
btn_frame = Frame(master=win, bg="medium spring green", pady=10, padx=10)
btn_frame.pack(side=TOP, fill=X)
browse_btn = Button(master=btn_frame, text="Browse", height=1, width=10, bg="lime", command=browse)
browse_btn.pack(side=LEFT, padx=10)
convert_btn = Button(master=btn_frame, text="Convert !" + emojize(':thumbs_up:'), height=1,
width=15, bg="lime", command=change)
convert_btn.pack(side=LEFT, padx=10)
for text, values in values.items():
Radiobutton(master=btn_frame, text=text, value=values, font=("Verdana", 11, "italic"), variable=v,
bg="medium spring green", justify="left",
command=selection).pack(side="top", padx=5, pady=5, anchor=W)
# Making output frame
out_frame = Frame(master=win, bg="forest green", pady=10, padx=10)
out_frame.pack(side="top")
out_label = Label(master=out_frame, text="", font=("Verdana", 11),
anchor=W, width=75, bg='forest green',
pady=5)
out_label.pack(side="top")
# Making mainloop
win.mainloop()
|
import os
import smtplib
import hashlib
import logging
import traceback
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from ckanpackager.lib.utils import BadRequestError
from ckanpackager.lib.resource_file import ResourceFile
from ckanpackager.lib.statistics import statistics
from raven import Client
class PackageTask(object):
"""Base class for DatastorePackageTask and UrlPackageTask
Note that all methods may be called from the web service or the task
consumer.
Derived classes must implement:
- schema(): Return a dictionary of all possible request parameters to tuples defining (required,
process function). Note that classes may define additional entries for their own use.
'email' and 'resource_id' parameters are always required, so both are added to the schema
as (True, None) if not defined;
- host(): Return the hostname for the current request;
- create_zip(ResourceFile): Create the ZIP file associated with the given resource file;
In addition, derived class should implement:
- speed(): Return 'slow' or 'fast' depending on the expected duration of the
task. If not implemented, this always returns 'slow'.
"""
def __init__(self, params, config):
self.config = config
self.sentry = Client(self.config.get('SENTRY_DSN'))
self.time = str(datetime.now())
self.request_params = {}
self.log = logging.getLogger(__name__)
schema = self.schema()
if 'email' not in schema:
schema['email'] = (True, None)
if 'resource_id' not in schema:
schema['resource_id'] = (True, None)
for field, definition in schema.items():
if definition[0] and field not in params:
raise BadRequestError("Parameter {} is required".format(field))
if field in params:
if definition[1] is not None:
self.request_params[field] = definition[1](params.get(field, None))
else:
self.request_params[field] = params.get(field, None)
def schema(self):
raise NotImplementedError
def create_zip(self, resource):
raise NotImplementedError
def host(self):
raise NotImplementedError
def speed(self):
""" Return the task estimated time as either 'fast' or 'slow'.
If the file exists in the cache, then this returns 'fast'. It returns
'slow' otherwise.
"""
resource = ResourceFile(
self.request_params,
self.config['STORE_DIRECTORY'],
self.config['TEMP_DIRECTORY'],
self.config['CACHE_TIME']
)
if resource.zip_file_exists():
return 'fast'
else:
return 'slow'
def run(self, logger=None):
"""Run the task."""
# create a stats object for database access
stats = statistics(self.config['STATS_DB'], self.config.get(u'ANONYMIZE_EMAILS'))
try:
if logger is not None:
self.log = logger
else:
self.log = logging.getLogger(__name__)
self._run()
stats.log_request(
self.request_params['resource_id'],
self.request_params['email'],
self.request_params.get('limit', None)
)
except Exception as e:
stats.log_error(
self.request_params['resource_id'],
self.request_params['email'],
traceback.format_exc()
)
self.sentry.captureException()
raise e
def _run(self):
"""Run the task"""
self.log.info("Task parameters: {}".format(str(self.request_params)))
# Get/create the file
resource = ResourceFile(
self.request_params,
self.config['STORE_DIRECTORY'],
self.config['TEMP_DIRECTORY'],
self.config['CACHE_TIME']
)
if not resource.zip_file_exists():
self.create_zip(resource)
else:
self.log.info("Found file in cache")
zip_file_name = resource.get_zip_file_name()
self.log.info("Got ZIP file {}. Emailing link.".format(zip_file_name))
# Email the link
place_holders = {
'resource_id': self.request_params['resource_id'],
'zip_file_name': os.path.basename(zip_file_name),
'ckan_host': self.host(),
# retrieve a doi from the request params, if there is one, otherwise default to the empty string
'doi': self.request_params.get('doi', ''),
# default the doi_body to the empty string, we'll fill it in below if necessary
'doi_body': '',
'doi_body_html': '',
}
if place_holders['doi']:
if 'DOI_BODY' in self.config:
place_holders['doi_body'] = self.config['DOI_BODY'].format(**place_holders)
if 'DOI_BODY_HTML' in self.config:
place_holders['doi_body_html'] = \
self.config['DOI_BODY_HTML'].format(**place_holders)
from_addr = self.config['EMAIL_FROM'].format(**place_holders)
msg = MIMEMultipart('alternative')
# add the basics
msg['Subject'] = self.config['EMAIL_SUBJECT'].format(**place_holders)
msg['From'] = from_addr
msg['To'] = self.request_params['email']
# add the body as html and text
text = MIMEText(self.config['EMAIL_BODY'].format(**place_holders), 'plain')
html = MIMEText(self.config['EMAIL_BODY_HTML'].format(**place_holders), 'html')
msg.attach(text)
msg.attach(html)
# send the email
server = smtplib.SMTP(self.config['SMTP_HOST'], self.config['SMTP_PORT'])
try:
if 'SMTP_LOGIN' in self.config:
server.login(self.config['SMTP_LOGIN'], self.config['SMTP_PASSWORD'])
server.sendmail(from_addr, self.request_params['email'], msg.as_string())
finally:
server.quit()
def __str__(self):
"""Return a unique representation of this task"""
md5 = hashlib.md5()
md5.update(str(self.request_params))
md5.update(self.time)
return md5.hexdigest()
|
class Borg:
"""Borg pattern making the class attributes global"""
_shared_data = {} #Attribute dictionary
def __init__(self):
self.__dict__ = self._shared_data # Make it attribute dictonary
class Singleton(Borg): #inherits from the Borg class
"""This class now shares all its attributes among its various instances"""
#This essenstially makes the singleton objects an objects an object-oriented global variable
def __init__(self, **kwargs):
Borg.__init__(self)
self._shared_data.update(kwargs) # Update the attributes dictionary by inserting a new key-value pair
def __str__(self):
return str(self._shared_data) #Returns the attribute dictionary for printing
#Let's create a singleton object and add our forst acronym
x = Singleton(HTTP="Hyper Text Transfer Protocol")
print(x)
y = Singleton(SNMP="Simple Network Management Protocol")
print(y)
z = Singleton(SMTP="Simple Mail Transfer Protocol")
print(z)
|
from . import szh
# 示例
# 本地测试访问地址 http://localhost:5000/interGroup
@szh.route('/interGroup')
def add_fri():
return '进入群成功'
|
# pieces.py --- includes all class declarations for the various chess pieces
from moves import getPawnMoves, getRookMoves, getBishopMoves, getKnightMoves, getKingMoves, checkAllMoves, removeKingTake
class Piece:
def __init__(self, pos, colour):
self.pos = pos
self.colour = colour
self.moveNo = 0
self.name = "xx"
def getName(self):
return self.name
def getMoves(self, board):
# Non-implemented: - Implemented in child classes
print("Why is this horrific text showing!!!")
return []
class Pawn(Piece):
def __init__(self, pos, colour):
super().__init__(pos, colour)
self.name = colour[0].lower() + "p"
def getMoves(self, board):
possibleMoves = getPawnMoves(self.pos, board, self.colour, self.moveNo)
possibleMoves = removeKingTake(board, self.colour, possibleMoves)
return possibleMoves
class Rook(Piece):
def __init__(self, pos, colour):
super().__init__(pos, colour)
self.name = colour[0].lower() + "r"
def getMoves(self, board):
possibleMoves = getRookMoves(self.pos, board, self.colour)
possibleMoves = removeKingTake(board, self.colour, possibleMoves)
return possibleMoves
class Bishop(Piece):
def __init__(self, pos, colour):
super().__init__(pos, colour)
self.name = colour[0].lower() + "b"
def getMoves(self, board):
possibleMoves = getBishopMoves(self.pos, board, self.colour)
possibleMoves = removeKingTake(board, self.colour, possibleMoves)
return possibleMoves
class Knight(Piece):
def __init__(self, pos, colour):
super().__init__(pos, colour)
self.name = colour[0].lower() + "n"
def getMoves(self, board):
possibleMoves = getKnightMoves(self.pos, board, self.colour)
possibleMoves = removeKingTake(board, self.colour, possibleMoves)
return possibleMoves
class Queen(Piece):
def __init__(self, pos, colour):
super().__init__(pos, colour)
self.name = colour[0].lower() + "q"
def getMoves(self, board):
possibleMoves = getRookMoves(self.pos, board, self.colour) + getBishopMoves(self.pos, board, self.colour)
possibleMoves = removeKingTake(board, self.colour, possibleMoves)
return possibleMoves
class King(Piece):
def __init__(self, pos, colour):
super().__init__(pos, colour)
self.name = colour[0].lower() + "k"
def getMoves(self, board):
possibleMoves = getKingMoves(self.pos, board, self.colour, self.moveNo)
possibleMoves = removeKingTake(board, self.colour, possibleMoves)
allMoves = checkAllMoves(board, self.colour)
print(possibleMoves, allMoves)
for move in possibleMoves:
if move in allMoves:
possibleMoves.remove(move)
return possibleMoves |
# Import DQoc HTML from lp:ubuntu-ui-toolkit
import os, sys, re
import zlib
import simplejson
from django.core.files import File
from django.core.files.storage import get_storage_class
from ..models import *
from . import Importer
__all__ = (
'SphinxImporter',
)
SECTIONS = dict()
class SphinxImporter(Importer):
SOURCE_FORMAT = "sphinx"
def __init__(self, *args, **kwargs):
super(SphinxImporter, self).__init__(*args, **kwargs)
self.source = self.options.get('dir')
self.DOC_ROOT = self.source
self.sections_file = self.options.get('sections')
self.pages_sections = dict()
self.page_data_map = dict()
self.module_order = []
def parse_line(self, line, source_file, element_fullname):
line = line.replace(u'\u00b6', u'')
return super(SphinxImporter, self).parse_line(line, source_file, element_fullname)
def parse_pagename(self, pagename):
if pagename.endswith('.html'):
pagename = pagename[:-5]
return pagename.replace('/', '-').replace(' ', '_')
def parse_namespace(self, namespace):
if self.options.get('strip_namespace', None) and namespace:
strip_prefix = self.options.get('strip_namespace')
if namespace.startswith(strip_prefix):
namespace = namespace[len(strip_prefix):]
elif strip_prefix.startswith(namespace):
namespace = ''
if namespace.startswith('.'):
namespace = namespace[1:]
if self.options.get('namespace', None) and not namespace:
return self.options.get('namespace')
return namespace
def lookup_from_url(self, url, anchor, element_fullname):
if anchor is None:
anchor = ''
if anchor != '' and anchor[1:] in self.class_map:
return anchor[1:]
rel_url = os.path.relpath(os.path.join(element_fullname, url))
if rel_url in self.class_map or rel_url in self.page_map:
return rel_url
url_part = url.replace('../', '')
if url_part.endswith('/'):
url_part = url_part[:-1]
if url_part in self.class_map or url_part in self.page_map:
return url_part
anchor_part = anchor[1:anchor.rfind('.')]
if anchor_part in self.class_map or anchor_part in self.page_map:
return anchor_part
anchor_with_ns = element_fullname[:element_fullname.rfind('.')] + '.'+anchor[1:]
if anchor_with_ns in self.class_map or anchor_with_ns in self.page_map:
return anchor_with_ns
anchor_without_function = anchor_with_ns[:anchor_with_ns.rfind('.')]
if anchor_without_function in self.class_map or anchor_without_function in self.page_map:
return anchor_without_function
return url
def get_section(self, namespace, fullname):
if fullname is not None and fullname in SECTIONS:
return SECTIONS[fullname]
elif namespace is not None and namespace in SECTIONS:
return SECTIONS[namespace]
elif fullname is not None and '/' in fullname and fullname.split('/')[0]+'/' in SECTIONS:
return SECTIONS[fullname.split('/')[0]+'/']
else:
return SECTIONS["*"]
def read_inv_file(self, filepath):
inv_file = open(filepath)
inv_file_data = inv_file.readlines()
inv_compressed_data = ''.join(inv_file_data[4:])
try:
inv_data = zlib.decompress(inv_compressed_data)
return inv_data.split('\n')
except Exception, e:
print "Error reading inv:\n%s" % filepath
raise e
def read_json_file(self, filepath):
js_file = open(filepath)
js_data = js_file.read()
try:
json_object = simplejson.loads(js_data)
return json_object
except Exception, e:
print "Error parsing JSON:\n%s" % js_data
raise e
def extract_classes(self, module_html):
classes = []
current_class = None
current_class_start = 0
extra_end = len(module_html)
i = 0
if isinstance(module_html, (str,unicode)) and '\n' in module_html:
module_html = module_html.split('\n')
html_len = len(module_html)
if self.verbosity >= 2:
print "Looking for classes in %s lines" % html_len
while i < html_len:
line = module_html[i]
if line == "<dl class=\"class\">":
if i <= extra_end:
extra_end = i-0
if current_class:
classes.append((current_class, module_html[current_class_start:i]))
if self.verbosity >= 1:
print "Found class: %s" % current_class
current_class_start = i
# <dt id="autopilot.process.ProcessManager">
current_class = module_html[i+1][8:-2]
i += 1
if current_class:
classes.append((current_class, module_html[current_class_start:-1]))
if self.verbosity >= 1:
print "Found class: %s" % current_class
return classes, module_html[1:extra_end]
def clean_content(self, unclean_data, doc_file, element_fullname):
if unclean_data is None:
return None
try:
# Change the content of the docs
cleaned_data = ''
for line in unclean_data:
if "<span class=\"viewcode-link\">[source]</span>" in line:
line = line.replace("<span class=\"viewcode-link\">[source]</span>", "")
if '<div class="section" id="' in line:
start_div = line.find('<div class="section"')
end_div = line.find('>', start_div)
line = line[:start_div] + line[end_div+1:]
if '<h1><tt class="docutils literal"><span class="pre">' in line:
start_div = line.find('<h1><tt class="docutils literal"><span class="pre">')
end_div = line.find('</h1>', start_div)
line = line[:start_div] + line[end_div+5:]
line = self.parse_line(line, doc_file, element_fullname)
cleaned_data += line + '\n'
return cleaned_data
except Exception, e:
print "Parsing content failed: "
import pdb; pdb.set_trace()
print e
return unclean_data
def run(self):
self.source = self.options.get('inv')
if not os.path.exists(self.source):
print "Source directory not found"
exit(1)
self.sections_file = self.options.get('sections')
if not self.sections_file:
print "You must define a sections definition file to import Sphinx API docs"
exit(2)
elif not os.path.exists(self.sections_file):
print "Sections definition file not found"
exit(1)
else:
sections_file_dir = os.path.dirname(self.sections_file)
if sections_file_dir:
if self.verbosity >= 2:
print "Adding to PYTHONPATH: %s" % sections_file_dir
sys.path.append(sections_file_dir)
sections_file_module = os.path.basename(self.sections_file)
if sections_file_module.endswith('.py'):
sections_file_module = sections_file_module[:-3]
if self.verbosity >= 2:
print "Importing module: %s" % sections_file_module
sections_data = __import__(sections_file_module)
if hasattr(sections_data, 'SECTIONS') and isinstance(sections_data.SECTIONS, dict):
SECTIONS.update(sections_data.SECTIONS)
else:
print "Sections file does not contain a SECTIONS dictionary"
exit(3)
objects = self.read_inv_file(self.source)
self.DOC_ROOT = os.path.dirname(self.source)
self.PRIMARY_NAMESPACE = None
module_order_index = 0
DOC_MODULE = '0'
DOC_API_PART = '1'
DOC_PAGE = '-1'
self.inventory = {
'namespaces': [],
'classes': [],
'pages': [],
}
# Import class documentation
for obj_item in objects:
if not obj_item:
continue
#autopilot.display py:module 0 api/autopilot.display/#module-$ -
if self.verbosity >= 3:
print "Object: %s" % obj_item
obj_data = obj_item.split(' ')
try:
fullname, doc_type, doc_enum, href = obj_data[0:4]
except ValueError:
print "Not enough values: %s" % obj_item
exit(1)
if doc_enum == DOC_MODULE:
page_path, page_anchor = href.split('#')
if page_path.endswith('/'):
page_path = page_path[:-1]
ns_name = fullname
self.module_order.append(fullname)
self.namespace_map[fullname] = page_path
elif doc_enum == DOC_API_PART:
ns_name = '.'.join(fullname.split('.')[:2])
if doc_type == 'py:class':
self.class_map[fullname] = fullname
elif doc_type == 'py:method':
self.class_map[fullname] = '.'.join(fullname.split('.')[:-1])
elif doc_enum == DOC_PAGE:
if self.verbosity >= 2:
print "Found Page: %s" % fullname
ns_name = ''
page_path, page_anchor = href.split('#')
if page_path.endswith('/'):
page_path = page_path[:-1]
if page_anchor == '#$':
page_anchor = '#'+fullname
if self.verbosity >= 2:
print "Adding Page: %s" % page_path
if len(obj_data) > 4:
page_title = ' '.join(obj_data[4:])
else:
page_title = page_anchor
if page_path not in self.pages_sections:
self.pages_sections[page_path] = dict()
self.pages_sections[page_path][page_anchor] = fullname
if not page_path in self.page_order:
self.page_map[page_path] = self.parse_pagename(page_path)
self.page_data_map[page_path] = (ns_name, fullname, fullname, page_title)
self.page_order.append(page_path)
else:
ns_name = ''
continue
for module in self.module_order:
doc_file = os.path.join(self.DOC_ROOT, self.namespace_map[module]+'.fjson')
module_data = self.read_json_file(doc_file)
classes, extra = self.extract_classes(module_data['body'])
if len(classes) > 0:
for fullname, doc_data in classes:
if '.' in fullname:
ns_name = fullname[:fullname.rindex('.')]
classname = fullname[fullname.rindex('.')+1:]
else:
classname = fullname
ns_name = None
cleaned_ns_name = self.parse_namespace(ns_name)
section, created = Section.objects.get_or_create(name=self.get_section(ns_name, None), topic_version=self.version)
if created:
print "Created section: %s" % section.name
if self.verbosity >= 1:
print 'Namespace: ' + ns_name
print 'Section: ' + section.name
if cleaned_ns_name is not None and cleaned_ns_name != '':
namespace, created = Namespace.objects.get_or_create(name=ns_name, display_name=cleaned_ns_name, platform_section=section)
if created:
print "Created Namespace: %s" % ns_name
namespace.data = self.clean_content(extra, doc_file, ns_name)
namespace.source_file = os.path.basename(doc_file)
namespace.source_format = "sphinx"
namespace.save()
else:
namespace = None
if self.verbosity >= 1:
print 'Element: ' + fullname
element, created = Element.objects.get_or_create(name=classname, fullname=fullname, section=section, namespace=namespace)
try:
for line in doc_data:
if line.startswith('<dd><p>'):
desc_line = self.parse_line(line[7:-4], doc_file, fullname)
link_replacer = re.compile('<a [^>]*>([^<]+)</a>')
while link_replacer.search(desc_line):
desc_line = link_replacer.sub('\g<1>', desc_line, count=1)
if len(desc_line) >= 256:
desc_line = desc_line[:252]+'...'
element.description = desc_line
break
except ValueError:
pass
element.data = self.clean_content(doc_data, doc_file, fullname)
element.source_file = os.path.basename(doc_file)
element.source_format = "sphinx"
element.save()
#exit(0)
if not self.options.get('no_pages', False):
page_order_index = 0
#self.page_order.extend(self.module_order)
for pagefile in self.pages_sections:
ns_name, pagename, pagefullname, pagetitle = self.page_data_map[pagefile]
try:
self.import_page(pagefile, ns_name, page_order_index)
page_order_index += 1
except Exception as e:
print "Failed to import page '%s': %s'" % (pagefile, e)
def import_page(self, pagehref, ns_name, page_order_index):
doc_file = os.path.join(self.DOC_ROOT, pagehref+'.fjson')
doc_data = self.read_json_file(doc_file)
if not 'body' in doc_data:
return
doc_data = doc_data['body'].split('\n')
cleaned_ns_name = self.parse_namespace(ns_name)
section, section_created = Section.objects.get_or_create(name=self.get_section(ns_name, pagehref), topic_version=self.version)
if section_created:
print "Created section: %s" % section.name
if cleaned_ns_name is not None and cleaned_ns_name != '':
namespace, created = Namespace.objects.get_or_create(name=ns_name, display_name=cleaned_ns_name, platform_section=section)
else:
namespace = None
pagename = self.parse_pagename(pagehref)
page, created = Page.objects.get_or_create(slug=pagename, fullname=pagename, section=section, namespace=namespace)
if not page.title:
page.title = pagename
page.save()
doc_start = 2
doc_end = len(doc_data)
for i, line in enumerate(doc_data):
if '<h1>' in line:
page.title = self.just_text(line[line.find('<h1>')+4:line.find('</h1>', 4)])
if len(page.title) >= 64:
page.title = page.title[:60]+'...'
if self.verbosity >= 2:
print "Setting title of %s to: %s" % (pagename, page.title)
page.save()
try:
# Change the content of the docs
cleaned_data = ''
for line in doc_data[doc_start:doc_end]:
line = self.parse_line(line, pagehref, pagehref)
if isinstance(line, unicode):
line = line.encode('ascii', 'replace')
cleaned_data += line + '\n'
page.data = cleaned_data
except Exception, e:
print "Parsing content failed:"
print e
#continue
#import pdb; pdb.set_trace()
page.source_file = os.path.basename(doc_file)
page.source_format = "sphinx"
page.order_index = page_order_index
page.save()
def import_namespace(self, nshref, nsname, nstitle, nsfullname, parent_ns_name, ns_order_index):
if nsname.endswith('.html'):
nsname = nsname[:-5]
section = Section.objects.get(name=self.get_section(nsname, None), topic_version=self.version)
if len(nstitle) >= 64:
nstitle = nstitle[:60]+'...'
ns, created = Namespace.objects.get_or_create(name=nsfullname, display_name=nsfullname, platform_section=section)
if self.verbosity >= 1:
print 'ns[%s]: %s' % (ns_order_index, ns.name)
doc_file = os.path.join(self.DOC_ROOT, nshref)
doc_handle = open(doc_file)
doc_data = doc_handle.readlines()
doc_handle.close()
doc_start = 2
doc_end = len(doc_data)
for i, line in enumerate(doc_data):
if '<div class="contents">' in line:
doc_start = i+1
if '</div><!-- doc-content -->' in line and doc_end > i:
doc_end = i-1
if '<!-- start footer part -->' in line and doc_end > i:
doc_end = i-2
if self.verbosity >= 2:
print "Doc range: %s:%s" % (doc_start, doc_end)
try:
# Change the content of the docs
cleaned_data = ''
for line in doc_data[doc_start:doc_end]:
if '<h1 class="title">' in line:
continue
line = self.parse_line(line, nshref, nsfullname)
if isinstance(line, unicode):
line = line.encode('ascii', 'replace')
cleaned_data += line + '\n'
ns.data = cleaned_data
except Exception, e:
print "Parsing content failed:"
print e
#continue
#import pdb; pdb.set_trace()
ns.source_file = os.path.basename(doc_file)
ns.source_format = "sphinx"
ns.order_index = ns_order_index
ns.save()
|
# ******************************************
# * File: IfTest.py
# * A test program for if statement
# ******************************************
random.seed(time.clock())
x = random.randint(0,100)
y = random.randint(0,100)
print ("X = ", x, " Y = ", y)
if x == y:
print ('X is equal to Y')
else:
print ('X is not equal to Y')
|
# Generated by Django 2.2.2 on 2019-08-01 15:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0016_auto_20190801_2023'),
]
operations = [
migrations.AddField(
model_name='inbox',
name='listing',
field=models.CharField(default='Junaid', max_length=200),
),
]
|
"""
This file is part of pysofar: A client for interfacing with Sofar Ocean's Spotter API
Contents: Classes for representing devices and data grabbed from the API
Copyright 2019-2022
Sofar Ocean Technologies
Authors: Mike Sosa et al.
"""
from pysofar.sofar import SofarApi, WaveDataQuery
# --------------------- Devices ----------------------------------------------#
class Spotter:
"""
Class to represent a Spotter object
"""
def __init__(self, spotter_id: str, name: str, session: SofarApi=None):
"""
:param spotter_id: The Spotter id as a string
:param name: The name of the Spotter
"""
self.id = spotter_id
self.name = name
# cached Spotter data
self._data = None
# Spotter parameters
self._mode = None
self._latitude = None
self._longitude = None
self._battery_power = None
self._battery_voltage = None
self._solar_voltage = None
self._humidity = None
self._timestamp = None
if session is None:
session = SofarApi()
self._session = session
# -------------------------- Properties -------------------------------------- #
@property
def mode(self):
"""
The tracking type of the Spotter.
3 Modes are possible:
- waves_standard
- waves_spectrum (Includes spectrum data)
- tracking
:return: The current mode of the Spotter
"""
return self._mode
@mode.setter
def mode(self, value):
"""
Sets the mode of the Spotter
:param value: Either 'full , 'waves', or 'track' else throws exception
"""
if value == 'full':
self._mode = 'waves_spectrum'
elif value == 'waves':
self._mode = 'waves_standard'
elif value == 'track':
self._mode = 'tracking'
else:
raise Exception('Invalid Mode')
@property
def lat(self):
"""
:return: The most recent latitude value (since updating)
"""
return self._latitude
@lat.setter
def lat(self, value): self._latitude = value
@property
def lon(self):
"""
:return: The most recent longitude value (since updating)
"""
return self._longitude
@lon.setter
def lon(self, value): self._longitude = value
@property
def battery_voltage(self):
"""
:return: Battery voltage of the Spotter
"""
return self._battery_voltage
@battery_voltage.setter
def battery_voltage(self, value): self._battery_voltage = value
@property
def battery_power(self):
"""
:return: The most recent battery_power value (since updating)
"""
return self._battery_power
@battery_power.setter
def battery_power(self, value): self._battery_power = value
@property
def solar_voltage(self):
"""
:return: The most recent solar voltage level (since updating)
"""
return self._solar_voltage
@solar_voltage.setter
def solar_voltage(self, value): self._solar_voltage = value
@property
def humidity(self):
"""
:return: The most recent humidity value (since updating)
"""
return self._humidity
@humidity.setter
def humidity(self, value): self._humidity = value
@property
def timestamp(self):
"""
The time value at which the current Spotter last recorded data
:return: ISO8601 formatted string
"""
return self._timestamp
@timestamp.setter
def timestamp(self, value): self._timestamp = value
@property
def data(self):
"""
:return: Cached data from the latest update
"""
return self._data
@data.setter
def data(self, value): self._data = value
# -------------------------- API METHODS -------------------------------------- #
def change_name(self, new_name: str):
"""
Updates the Spotter's name in the Sofar database
:param new_name: The new desired Spotter name
"""
self.name = self._session.update_spotter_name(self.id, new_name)
def download_datafile(self, start_date, end_date):
"""
Download a datafile container this Spotter's data from start_date to end_date
:param start_date: Start date string
:param end_date: End date String
"""
from pysofar.tools import parse_date
self._session.grab_datafile(self.id, parse_date(start_date), parse_date(end_date))
def update(self):
"""
Updates this Spotter's attribute values.
:return: The data last recorded by the current Spotter
"""
# TODO: also add the latest data for this (Since it does return it)
# TODO: disambiguate & de-duplicate update() vs latest_data()
_data = self._session.get_latest_data(self.id)
self.name = _data['spotterName']
self._mode = _data['payloadType']
self._battery_power = _data['batteryPower']
self._battery_voltage = _data['batteryVoltage']
self._solar_voltage = _data['solarVoltage']
self._humidity = _data['humidity']
wave_data = _data['waves']
track_data = _data['track']
freq_data = _data['frequencyData']
if len(track_data):
self._latitude = _data['track'][-1]['latitude']
self._longitude = _data['track'][-1]['longitude']
self._timestamp = _data['track'][-1]['timestamp']
else:
self._latitude = None
self._longitude = None
self._timestamp = None
results = {
'wave': wave_data[-1] if len(wave_data) > 0 else None,
'tracking': track_data[-1] if len(track_data) > 0 else None,
'frequency': freq_data[-1] if len(freq_data) > 0 else None
}
self._data = results
def latest_data(self,
include_wind: bool = False,
include_directional_moments: bool = False,
include_barometer_data: bool = False,
include_partition_data: bool = False,
include_surface_temp_data: bool = False):
"""
Updates and returns the latest data for this Spotter.
:param include_wind: Defaults to False. Set to True if you want the latest data to include wind data
:param include_directional_moments: Defaults to False. Only applies if the Spotter is in 'full_waves' mode.
Set to True if you want the latest data to include directional moments
:param include_barometer_data: Defaults to False. Only applies to barometer-equipped Spotters.
:param include_partition_data: Defaulse to False. Only applies to Spotters in Waves:Partition mode.
:param include_surface_temp_data: Defaults to False. Only applies to SST sensor-equipped Spotters.
:return: The latest data values based on the given parameters from this Spotter
"""
_data = self._session.get_latest_data(self.id,
include_wind_data=include_wind,
include_directional_moments=include_directional_moments,
include_barometer_data=include_barometer_data,
include_partition_data=include_partition_data,
include_surface_temp_data=include_surface_temp_data)
wave_data = _data['waves']
track_data = _data['track']
freq_data = _data['frequencyData']
# the following fields are not included when not requested, so default to empty list
wind_data = _data.get('wind', [])
baro_data = _data.get('barometerData', [])
partition_data = _data.get('partitionData', [])
sst_data = _data.get('surfaceTemp', [])
results = {
'wave': wave_data[-1] if len(wave_data) > 0 else None,
'tracking': track_data[-1] if len(track_data) > 0 else None,
'frequency': freq_data[-1] if len(freq_data) > 0 else None,
'wind': wind_data[-1] if len(wind_data) > 0 else None,
'barometer': baro_data[-1] if len(baro_data) > 0 else None,
'partition': partition_data[-1] if len(partition_data) > 0 else None,
'surfaceTemp': sst_data[-1] if len(sst_data) > 0 else None
}
return results
def grab_data(self, limit: int = 20,
start_date: str = None, end_date: str = None,
include_waves: bool = True, include_wind: bool = False,
include_track: bool = False, include_frequency_data: bool = False,
include_directional_moments: bool = False,
include_surface_temp_data: bool = False,
include_spikes: bool = False,
include_barometer_data = False,
include_microphone_data = False,
smooth_wave_data: bool = False,
smooth_sg_window: int = 135,
smooth_sg_order: int = 4,
interpolate_utc: bool = False,
interpolate_period_seconds: int = 3600):
"""
Grabs the requested data for this Spotter based on the given keyword arguments
:param limit: The limit for data to grab. Defaults to 20, For frequency data max of 100 samples at a time,
else, 500 samples. If you send values over the limit, it will automatically limit for you
:param start_date: ISO 8601 formatted date string. If not included defaults to beginning of Spotter's history
:param end_date: ISO 8601 formatted date string. If not included defaults to end of Spotter history
:param include_waves: Defaults to True. Set to False if you do not want the wave data in the returned response
:param include_wind: Defaults to False. Set to True if you want wind data in the returned response
:param include_track: Defaults to False. Set to True if you want tracking data in the returned response
:param include_frequency_data: Defaults to False. Only applies if the Spotter is in 'Full Waves mode' Set to
True if you want frequency data in the returned response
:param include_directional_moments: Defaults to False. Only applies if the Spotter is in 'Full Waves mode' and
'include_frequency_data' is True. Set True if you want the frequency data
returned to also include directional moments
:param include_surface_temp_data: Defaults to False. Set to True if your device is a v2 model or newer with the
SST sensor installed
:param include_barometer_data: Defaults to False. Set to True if your device is a v3 model or newer with the
barometer installed
:param include_spikes: Defaults to False. Set to True if you wish to include data points that our system has
identified as a potentially unwanted spike.
:return: Data as a json based on the given query paramters
"""
_query = WaveDataQuery(self.id, limit, start_date, end_date)
_query.waves(include_waves)
_query.wind(include_wind)
_query.track(include_track)
_query.frequency(include_frequency_data)
_query.directional_moments(include_directional_moments)
_query.surface_temp(include_surface_temp_data)
_query.spikes(include_spikes)
_query.barometer(include_barometer_data)
_query.microphone(include_microphone_data)
_query.smooth_wave_data(smooth_wave_data)
_query.smooth_sg_window(smooth_sg_window)
_query.smooth_sg_order(smooth_sg_order)
_query.interpolate_utc(interpolate_utc)
_query.interpolate_period_seconds(interpolate_period_seconds)
_data = _query.execute()
return _data
|
data=[8,1,7,9,6,5,10]
data=[7,8,9,1,2,3,4,-100,-99,6,7]
data=[9]
data=[]
length = len(data)
num=[0 for i in range(0,length)]
res = [0 for i in range(0,length)]
sample=[[-1 for i in range(0,length)] for j in range(0,length)]
for i in range(0,length):
for j in range(0,length):
sample[i][j] = data[j]
sample=[[] for i in range(0,length)]
for i in range(0,length):
num[i]=1
maxNum=-1
maxIndex=-1
if (1 ==len(data)):
sample[0]= data[0]
res[0]=data[0]
maxIndexv = 0
else:
for i in range(0, length):
for j in range(0,i):
print(i,"; ",j," ")
if(data[i]>data[j]):
if(1+num[j]>num[i]):
if(sample[j]==[]):
sample[j]=[data[j]]
sample[i] = sample[j]+[data[i]]
num[i]=max(1+(num[j]),num[i])
if(maxNum<num[i]):
maxIndex = i
maxNum = max(maxNum,num[i])
res[i]=maxNum
print(data)
print (num)
print(res)
print(sample)
print(maxIndex)
print (sample[maxIndex])
#print (sample)
|
import prompt
from typing import Callable
from brain_games.games.game_types import Game
ATTEMPTS = 3
def run(rules: str,
create_game: Callable[[], Game],
name: str) -> None:
print(rules)
def game(attempts):
if attempts == 0:
return print(f"Congratulations, {name}!")
question, correct_answer = create_game()
print(f"Question: {question}")
answer = prompt.string("Your answer: ")
if answer == correct_answer:
print("Correct!")
return game(attempts - 1)
else:
print(f"'{answer}' is wrong answer ;(.",
f"Correct answer was '{correct_answer}'.")
print(f"Let's try again, {name}!")
return game(ATTEMPTS)
|
from django import forms
from .models import Contact
from captcha.fields import CaptchaField
class ContactForm(forms.ModelForm):
captcha = CaptchaField()
class Meta:
model = Contact
fields = ('name', 'email', 'text', 'captcha') |
def isPalindrome(string):
return string[::-1] == string
A = input()
a = isPalindrome(A)
if a:
print(1)
else:
print(0)
# Done
|
import esp
import machine
import network
import dht
from bme280 import BME280
import ujson as json
from utime import sleep
from umqtt.simple import MQTTClient
MQTT_SERVER = "10.254.0.1"
SENSOR_NAME = "afra-t1"
DHT_TOPIC = "afra-t1"
BME_TOPIC = "afra-t2"
GPIO_DHT = 26
GPIO_SCL = 22
GPIO_SDA = 21
# how often it should be measured. Every x seconds
MEASURE_TIME = 5 * 60
def connect_mqtt(server, client_id):
client = MQTTClient(client_id, server)
client.connect()
print("Connected to mqtt")
return client
def connect_wifi(timeout):
""" timeout in sec """
sta_if = network.WLAN(network.STA_IF)
sta_if.active(True)
sta_if.connect('discord', 'baraustrinken')
for _ in range(timeout):
# wait x sec
if sta_if.isconnected():
print("Connected to wifi")
break
sleep(1)
return sta_if
def read_dht():
sensor = dht.DHT22(machine.Pin(GPIO_DHT))
sensor.measure()
result = {
"name": DHT_TOPIC,
"data": {
"temp": sensor.temperature(),
"humidity": sensor.humidity(),
},
"measuretime": MEASURE_TIME,
}
return json.dumps(result)
def format_bme280(bme):
temp, pressure, _hum = bme.read_compensated_data()
temp = temp / 100
hpa = (pressure // 256) / 100
result = {
"name": BME_TOPIC,
"data": {
"temp": temp,
"pressure": hpa
},
"measuretime": 60 * 5,
}
return json.dumps(result)
def deepsleep():
print("Goint to deepsleep in 10 sec")
sleep(10)
machine.deepsleep((MEASURE_TIME - 10) * 1000)
def main():
try:
while True:
print("Connecting to Wifi")
wifi = connect_wifi(timeout=10)
if not wifi.isconnected():
print("Can't connect to wifi")
deepsleep()
print("Connecting to MQTT")
mqtt = connect_mqtt(MQTT_SERVER, SENSOR_NAME)
dhtjson = read_dht()
print("Read dht %s" % dhtjson)
i2c = machine.SoftI2C(scl=machine.Pin(GPIO_SCL), sda=machine.Pin(GPIO_SDA))
bme = BME280(i2c=i2c)
bmejson = format_bme280(bme)
print("Read bme %s" % bmejson)
print("Publishing to mqtt")
mqtt.publish("afra.sensors", bytes(dhtjson, 'utf-8'))
mqtt.publish("afra.sensors", bytes(bmejson, 'utf-8'))
mqtt.disconnect()
wifi.disconnect()
print("Successful cycle, going into deepsleep")
deepsleep()
except Exception as exp:
print("Exception occured %s" % exp)
deepsleep()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 19 15:04:08 2018
@author: ravila
"""
from Bio import SeqIO
import glob
import pandas as pd
from multiprocessing import Pool
import numpy as np
import sys
def get_reading_frame(seq, n):
"""Takes a sequence string and an integer shift
retruns a list of triplets shifted by the given ammount."""
frame = []
for f in (seq[i:i+3] for i in range(n, len(seq), 3)):
if len(f) % 3 == 0:
frame.append(f)
return frame
def vectorize(r):
fasta = SeqIO.read(r, 'fasta')
sequence = str(fasta.seq)
# Get reading frames
rf1 = get_reading_frame(sequence, 0)
rf2 = get_reading_frame(sequence, 1)
rf3 = get_reading_frame(sequence, 2)
# Get the sum of vectors for all the triplets.
vec = np.zeros(100)
for l in [rf1, rf2, rf3]:
for t in l:
component = np.array(model.loc[model[0] == t])[0][1:]
vec = np.add(vec, component)
name = r.split("/")[-1].rstrip(".txt")
return [name] + [i for i in vec]
if __name__ == "__main__":
# Import pre-trained model
model = pd.read_csv("protVec_100d_3grams.csv", header=None)
# List to store results
vecs = []
# Read receptor data
dir_name = sys.argv[1].rstrip("/")
receptors = glob.glob(dir_name + "/*.txt")
# Start process pool
pool = Pool(processes=4)
vecs.append(pool.map(vectorize, receptors))
vecs = vecs[0]
vecs_df = pd.DataFrame(vecs)
vecs_df.to_csv(dir_name + "_vec.csv", index=False)
|
import pymysql.cursors
connection = pymysql.connect(host=#'hostname',
user=#'username',
password=#'password',
db=#'dbname',
charset=#'utf8',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
sql ="insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Angelic Guardian','[\\'white\\']','[]','天使(Angel)','飛行\n" \
"あなたがコントロールしているクリーチャーが1体以上攻撃するたび、ターン終了時までそれらは破壊不能を得る。'," \
"'144','creature','6','5','5','','0','');"
cursor.execute(sql)
sql ="insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Angler Turtle','[\\'blue\\']','[]','海亀(Turtle)','呪禁\n" \
"各戦闘で、対戦相手がコントロールするクリーチャーは可能なら攻撃する。','800','creature'," \
"'7','5','7','','0','');"
cursor.execute(sql)
sql ="insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Vengeant Vampire','[\\'black\\']','[]','吸血鬼(Vampire)','絆魂\n" \
"Vengeant Vampireが死亡したとき、対戦相手のコントロールするクリーチャー1体を対象とする。それを破壊し、あなたは4点のライフを得る。'," \
"'784','creature','6','4','4','','0','');"
cursor.execute(sql)
sql ="insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Immortal Phoenix','[\\'red\\']','[]','フェニックス(Phoenix)','飛行\n" \
"Immortal Phoenixが死亡したとき、これをオーナーの手札に戻す。','1936','creature','6','5','3','','0','');"
cursor.execute(sql)
sql ="insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Rampaging Brontodon','[\\'green\\']','[]','恐竜(Dinosaur)'," \
"'トランプル\nRampaging Brontodonが攻撃するたび、ターン終了時までこれはあなたがコントロールする土地1つにつき+1/+1の修整を受ける。'," \
"'5408','creature','7','7','7',' ','0',' ');"
cursor.execute(sql)
connection.commit()
finally:
connection.close() |
from kts_linguistics.corpora.corpora import Corpora
from kts_linguistics.spellcheck.spellfix import spellfix_word
from kts_linguistics.string_transforms.abstract_transform import AbstractByWordTransform
class SpellfixTransform(AbstractByWordTransform):
def __init__(self,
corpora: Corpora,
do_cache: bool = False,
cache: dict = None,
fix_threshold: float = 1.0):
self.corpora = corpora
self.cache = cache if cache is not None else dict()
self.do_cache = do_cache
self.fix_threshold = fix_threshold
def transform_word(self, word: str) -> str:
if word in self.cache:
return self.cache[word]
spellfixed_word = spellfix_word(word, corpora=self.corpora, fix_threshold=self.fix_threshold)
if self.do_cache:
if spellfixed_word != word:
self.cache[word] = spellfixed_word
return spellfixed_word
|
from transformers import ElectraForSequenceClassification
from utils_electra import ElectraClassificationHeadCustom
def get_last_dropout(model):
if isinstance(model, ElectraForSequenceClassification):
if isinstance(model.classifier, ElectraClassificationHeadCustom):
return model.classifier.dropout2
else:
return model.classifier.dropout
else:
return model.dropout
def set_last_dropout(model, dropout):
if isinstance(model, ElectraForSequenceClassification):
if isinstance(model.classifier, ElectraClassificationHeadCustom):
model.classifier.dropout2 = dropout
else:
model.classifier.dropout
else:
model.dropout = dropout
|
__all__ = ["BaseModel","CardModel","HeroModel"] |
from flask import Flask, redirect, url_for, render_template
app = Flask(__name__)
@app.route('/<name>')
def home(name):
return render_template("index.html", content=name, radiation=999, gang=["joe", "mama", "kek"])
@app.route("/admin")
def admin():
return redirect(url_for("home"))
if __name__ == '__main__':
app.run()
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
import pytest
from pants.option.option_value_container import OptionValueContainerBuilder
from pants.option.ranked_value import Rank, RankedValue
class OptionValueContainerTest(unittest.TestCase):
def test_unknown_values(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.HARDCODED, 1)
o = ob.build()
assert 1 == o.foo
with pytest.raises(AttributeError):
o.bar
def test_value_ranking(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.CONFIG, 11)
o = ob.build()
assert 11 == o.foo
assert Rank.CONFIG == o.get_rank("foo")
ob.foo = RankedValue(Rank.HARDCODED, 22)
o = ob.build()
assert 11 == o.foo
assert Rank.CONFIG == o.get_rank("foo")
ob.foo = RankedValue(Rank.ENVIRONMENT, 33)
o = ob.build()
assert 33 == o.foo
assert Rank.ENVIRONMENT == o.get_rank("foo")
ob.foo = RankedValue(Rank.FLAG, 44)
o = ob.build()
assert 44 == o.foo
assert Rank.FLAG == o.get_rank("foo")
def test_is_flagged(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.NONE, 11)
assert not ob.build().is_flagged("foo")
ob.foo = RankedValue(Rank.CONFIG, 11)
assert not ob.build().is_flagged("foo")
ob.foo = RankedValue(Rank.ENVIRONMENT, 11)
assert not ob.build().is_flagged("foo")
ob.foo = RankedValue(Rank.FLAG, 11)
assert ob.build().is_flagged("foo")
def test_indexing(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.CONFIG, 1)
o = ob.build()
assert 1 == o["foo"]
assert 1 == o.get("foo")
assert 1 == o.get("foo", 2)
assert o.get("unknown") is None
assert 2 == o.get("unknown", 2)
with pytest.raises(AttributeError):
o["bar"]
def test_iterator(self) -> None:
ob = OptionValueContainerBuilder()
ob.a = RankedValue(Rank.FLAG, 3)
ob.b = RankedValue(Rank.FLAG, 2)
ob.c = RankedValue(Rank.FLAG, 1)
o = ob.build()
names = list(iter(o))
assert ["a", "b", "c"] == names
def test_copy(self) -> None:
# copy semantics can get hairy when overriding __setattr__/__getattr__, so we test them.
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.FLAG, 1)
ob.bar = RankedValue(Rank.FLAG, {"a": 111})
p = ob.build()
z = ob.build()
# Verify that the result is in fact a copy.
assert 1 == p.foo # Has original attribute.
ob.baz = RankedValue(Rank.FLAG, 42)
assert not hasattr(p, "baz") # Does not have attribute added after the copy.
# Verify that it's a shallow copy by modifying a referent in o and reading it in p.
p.bar["b"] = 222
assert {"a": 111, "b": 222} == z.bar
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../py')
from DREAM.DREAMOutput import DREAMOutput
do = DREAMOutput('output.h5')
do.eqsys.T_cold.plot(t=[0,5,10,15,-1], show=False)
plt.show()
|
n = int(input())
d = []
e = []
c = 0
for i in range(n):
temp = input().split()
if temp[1] == "D":
d.append(int(temp[0]))
else:
e.append(int(temp[0]))
for s in d:
if s in e:
c += 1
e.pop(e.index(s))
print(c)
|
"""Update user tables
Revision ID: 43cda5e14cf0
Revises: 3473402c38bc
Create Date: 2012-11-14 23:11:34.817678
"""
revision = '43cda5e14cf0'
down_revision = '3473402c38bc'
from alembic import op
import sqlalchemy as db
from datetime import datetime
def upgrade():
op.create_table('users',
db.Column('id', db.Integer, primary_key=True),
db.Column('email', db.String(50), unique=True, nullable=False),
db.Column('email_status', db.Integer, nullable=True, default=0),
db.Column('nickname', db.String(50), unique=True, nullable=False),
db.Column('password', db.String(50), nullable=True),
db.Column('is_email_verified', db.Boolean, nullable=False, default=True),
db.Column('slug', db.String(50), nullable=True),
db.Column('created_time', db.DateTime, nullable=False, default=datetime.now),
db.Column('modified_time', db.DateTime, nullable=False, default=datetime.now),
db.Column('last_login_time', db.DateTime),
db.Column('privilege', db.Integer, default=3),
db.Column('user_info_id', db.Integer, db.ForeignKey('user_info.id'), nullable=False))
op.create_table('user_openids',
db.Column('id', db.Integer, primary_key=True),
db.Column('user_id', db.Integer, db.ForeignKey('users.id'), nullable=False),
db.Column('openid', db.String(255), nullable=False, unique=True),
db.Column('provider', db.String(50), nullable=False))
def downgrade():
op.drop_table('user_openids')
op.drop_table('users')
|
# NameError: name 'pirnt' is not defined
# pirnt("hello")
# IndentationError: unexpected indent
# print("hello")
# SyntaxError: invalid syntax
# print("hello ") print("world")
|
import media
import fresh_tomatoes
import csv
# Define a function that read myMovie.csv
# and create Movie instance from the csv file
def get_movie_list(file_name):
# Initialize a list for storing movie data
movie_list = []
# Read file.csv and create media.Movie Instances
with open(file_name, 'rt') as movie_csv:
# Read csv as python dictionary
reader = csv.DictReader(movie_csv)
for row in reader:
# Create media.Movie instances for each row
# The arguments are: title, poster_image_url, introduction, stars, trailer_youtube_url
movie_obj = media.Movie(
row['title'],
row['image'],
row['introduction'],
row['year'],
row['type'],
row['trailer_url']
)
# Store object in the list
movie_list.append(movie_obj)
return movie_list
# create a movie array
myMovies = get_movie_list('myMovies.csv')
# open the movie website page
fresh_tomatoes.open_movies_page(myMovies)
|
from .ResNet import *
from .googlenet import *
from .lenet import *
from .mobilenet import *
from .shufflenet import *
from .vgg import *
from .dpn import *
from .preact_resnet import *
from .senet import * |
inputFile = open("Day3\inputFile.txt","r")
inputTestFile = open("Day3\inputTestFile.txt","r")
Lines = inputFile.readlines()
def problem1():
"""
You start on the open square (.) in the top-left corner and need to reach the bottom (below the bottom-most row on your map).
The toboggan can only follow a few specific slopes (you opted for a cheaper model that prefers rational numbers); start by counting all the trees you would encounter for the slope right 3, down 1:
From your starting position at the top-left, check the position that is right 3 and down 1.
Then, check the position that is right 3 and down 1 from there, and so on until you go past the bottom of the map.
"""
countTrees = 0
XCoordinate = 0
for line in Lines:
XCoordinate = XCoordinate % 31
currentInput = line.strip()
currentValueAtXCoordinate = currentInput[XCoordinate]
if (currentValueAtXCoordinate == "#"):
countTrees = countTrees +1
XCoordinate = XCoordinate +3
print(countTrees)
def problem2Helper(XCoordinateStep, YCoordinateStep):
"""
"""
countTrees = 0
YCoordinate = 0
XCoordinate = 0
for line in Lines:
if (YCoordinate%YCoordinateStep == 0):
XCoordinate = XCoordinate % 31
currentInput = line.strip()
currentValueAtXCoordinate = currentInput[XCoordinate]
if (currentValueAtXCoordinate == "#"):
countTrees = countTrees +1
XCoordinate = XCoordinate + XCoordinateStep
YCoordinate = YCoordinate +1
return countTrees
def problem2():
"""
Determine the number of trees you would encounter if, for each of the following slopes, you start at the top-left corner and traverse the map all the way to the bottom:
Right 1, down 1.
Right 3, down 1. (Problem 1)
Right 5, down 1.
Right 7, down 1.
Right 1, down 2.
What do you get if you multiply together the number of trees encountered on each of the listed slopes?
"""
result = problem2Helper(1,1) * problem2Helper(3,1) * problem2Helper(5,1) * problem2Helper(7,1) *problem2Helper(1,2)
print(result)
problem2() |
import copy
will = ["Will", 28, ["Python", "C#", "JavaScript"]]
# wilber = copy.copy(will)
wilber = copy.deepcopy(will)
print(id(will))
print(will)
print([id(ele) for ele in will])
print(id(wilber))
print(wilber)
print([id(ele) for ele in wilber])
print('--------------------------------------')
will[0] = "Wilber"
will[2].append("CSS")
print(id(will))
print(will)
print([id(ele) for ele in will])
print(id(wilber))
print(wilber)
print([id(ele) for ele in wilber])
# 对于非容器类型(如数字、字符串、和其他'原子'类型的对象)没有拷贝这一说
# 也就是说,对于这些类型,"obj is copy.copy(obj)" 、"obj is copy.deepcopy(obj)"
# 如果元祖变量只包含原子类型对象,则不能深拷贝,看下面的例子
print('--------------------------')
will = ("Python", "C#", "JavaScript")
wills = copy.deepcopy(will)
print("1",will)
print("2",wills)
print(will is wills)
will = ("Python", "C#", "JavaScript", [])
print("3", will)
print("4", wills)
print(will is wills) |
nomeCompleto = input('Digite o seu nome e o sobrenome: ')
nome,sobrenome = nomeCompleto.split(' ')
print(f'seu nome e {nome} {sobrenome}')
|
class Datum:
def __init__(self,val,err):
self.val = val
self.err = err
def __add__(self, other):
import math as m
return Datum(self.val+other.val, m.sqrt(self.err**2+other.err**2) )
def __str__(self):
return "%f +/- %f"%(self.val, self.err)
class Person:
def __init__(self,n=None):
self.name = n
def __str__(self):
return "[Class Person] name: %s"%self.name
def display(self):
print(self.name)
if __name__ == "__main__":
x = Datum(1.1, 0.2)
y = Datum(-0.5, 0.2)
z = x + y
print(z)
john = Person()
print(john)
susan = Person("Susy")
print(susan)
|
# Generated by Django 3.1 on 2020-09-03 13:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MostRecent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Images', models.ImageField(default='default.jpg', upload_to='pictures')),
('foodname', models.CharField(max_length=200)),
('prize', models.CharField(max_length=200)),
],
),
]
|
try:
import amath.ext as _b
except ModuleNotFoundError:
print("_basic failed to import")
GammaN = 10
GammaR = 10.900511
GammaDk = [2.48574089138753565546e-5, 1.05142378581721974210, -3.45687097222016235469, 4.51227709466894823700,
-2.98285225323576655721, 1.05639711577126713077, -1.95428773191645869583e-1, 1.70970543404441224307e-2,
-5.71926117404305781283e-4, 4.63399473359905636708e-6, -2.71994908488607703910e-9]
def sqrt(x):
# type: (object) -> float
"""Returns square root of X
:type x: object
:param x:
:return: float
>>> sqrt(16)
4.0
>>> sqrt(25)
5.0
>>> sqrt(2)
1.4142135623730951
If X is negative, Returns a complex number
>>> sqrt(-4)
2j
>>> sqrt(-16)
4j
Can accept Fractions and floats
>>> sqrt(5.5)
2.345207879911715
>>> from amath.DataTypes.Fraction import Fraction
>>> sqrt(Fraction(25,4))
5/2
"""
try:
return _b.sqrt(x) # call the c-api
except TypeError:
try:
return _b.sqrt(float(x))
except ValueError:
try:
return _b.sqrt(complex(x))
except ValueError:
raise TypeError("{0} is not a number".format(str(x))) # if it failed, x is not a number
except ValueError:
return sqrt(abs(x)) * 1j # x is negative
except: # in case of _basic failure
if isinstance(x, complex):
return x ** 0.5
if x < 0:
return sqrt(abs(x)) * 1j
return x ** 0.5
# noinspection PyShadowingBuiltins
def abs(x):
"""
Returns the absolute value of a float
:param x: float, int, complex
:return: absolute value of x
>>> abs(5)
5
>>> abs(-5)
5
>>> abs(-5.2)
5.2
>>> abs(5.2)
5.2
Complex is different
>>> abs(1j)
1.0
>>> abs(-532j)
532.0
"""
try:
return _b.abs(x) # call c-api
except:
try:
return _b.abs(float(x))
except ValueError:
try:
return _b.abs(complex(x))
except ValueError:
try:
return x.__abs__() # if c-api fails, run __abs__ function
except AttributeError:
raise TypeError("{0} is not a number".format(str(x))) # x is then not a valid number
# TODO-Look at gamma
def fac(x):
"""
Finds x factorial
:param x: integer
:return: x factorial
>>> fac(0)
1.0
>>> fac(5)
120
>>> fac(0.5)
0.886226925452758
>>> fac(float("inf"))
inf
"""
if x == 0:
return 1.0
elif x < 0:
from amath.constants import Cinf
return Cinf
return gamma(x + 1)
# TODO-Allow gamma to accept complex numbers
# TODO-Allow gamma to take larger numbers
def gamma(x):
# type: (float) -> float
t = False
y = 0.0
try:
y = _b.gamma(x) # call c-api
except:
t = True
from amath.testing.types import isinf, isnan, intQ
if x >= 170 or t: # to not overflow float or if in _basic failure
try:
from amath.constants import e, pi
s = GammaDk[0]
for i in range(1, GammaN + 1):
s += GammaDk[i] / (x + i - 1.0)
return s * 2 * sqrt(e / pi) * pow((x - 0.5 + GammaR) / e, x - 0.5)
except OverflowError:
if intQ(x): # x must be an int
from amath.stats.stats import product
return product(lambda k: k, 1, x) // x
elif isinf(x):
if x > 0:
from amath.DataTypes import Infinity
return Infinity(True)
else:
from amath.Errors import Indeterminate
raise Indeterminate()
else:
raise
else:
if isinf(y) or isnan(y):
from amath.DataTypes import Infinity
return Infinity(None)
elif isinstance(x, int) or int(x) == x:
return int(y)
else:
return y
# @lru_cache(1024)
def fib(n):
try:
y = _b.fib(n)
except:
y = float("inf")
if y == float("inf"):
from amath.constants import gr, pi
from .trig import cos
n = float(n)
return int(int(gr ** n - cos(n * pi) / gr ** n) / sqrt(5))
else:
return y
def rising_factorial(x, n):
product = 1
for k in range(n):
product *= x + k
return product
def falling_factorial(x, n):
product = 1
for k in range(n):
product *= x - k
return product
def N():
return NotImplemented
|
"""
剑指 Offer 58 - II. 左旋转字符串
字符串的左旋转操作是把字符串前面的若干个字符转移到字符串的尾部。请定义一个函数实现字符串左旋转操作的功能。比如,输入字符串"abcdefg"和数字2,该函数
将返回左旋转两位得到的结果"cdefgab"。
"""
"""
其实还是栈堆的知识,就不赘述了。
"""
def reverseLeftWords(s: str, n: int) -> str:
return s[n:] +s[:n]
if __name__ == '__main__':
res = reverseLeftWords("abcde",1)
print(res) |
from django.test import TestCase
from apps.jogo.models import Jogo
from apps.jogo.mixins.tabuleiro import Tabuleiro
class JogoTestCase(TestCase):
def setUp(self):
self.jogo = Jogo()
self.tabuleiro = Tabuleiro()
self.jogo.iniciar_jogo()
for n in range(1,301):
self.jogo.iniciar_partida(n)
self.jogo.save_historico()
def test_partidas_salvas(self):
'''Teste para verificar se as partidas estão sendo salva.
'''
jogo = Jogo.objects.latest()
self.assertTrue(jogo.partidas)
def test_media_turno(self):
'''Teste para verificar se as media do turno está sendo salva.
'''
jogo = Jogo.objects.latest()
self.assertTrue(jogo.media_turnos)
def test_total_turnos(self):
'''Teste para verificar se as media do turno está sendo salva.
'''
jogo = Jogo.objects.latest()
self.assertTrue(jogo.total_turnos)
def test_vencedor(self):
'''Teste para verificar se ha registro de vencedor.
'''
jogo = Jogo.objects.latest()
self.assertTrue(jogo.vencedor)
def test_timeout(self):
'''Teste para verificar se tem partidas que sofreram timeout.
'''
jogo = Jogo.objects.latest()
self.assertTrue(jogo.timeout)
def test_media_vencedor(self):
'''Teste para verificar se registros de cada comportamento.
'''
jogo = Jogo.objects.latest()
self.assertTrue(jogo.aleatorio)
self.assertTrue(jogo.cauteloso)
self.assertTrue(jogo.exigente)
self.assertTrue(jogo.impulsivo)
def test_verificar_jogador(self):
''' Teste que verifica se todos os jogadores foram instanciado.
'''
self.tabuleiro.organizando_tabuleiro()
jogadores = self.tabuleiro.jogadores
self.assertEqual(len(jogadores), 4)
def test_inativar_jogador(self):
''' Teste que verifica se o metodo de inativação se está funcionando com quando o montante
do jogador chega a zero.
'''
self.tabuleiro.organizando_tabuleiro()
jogador = self.tabuleiro.jogadores[0]
jogador.subtrair_valor(jogador.valor_total)
jogadores_ativos = self.tabuleiro.jogadores_ativo()
self.assertFalse(jogador in jogadores_ativos)
def test_jogador_andando(self):
''' Teste que verifica se o metodo de andar se está funcionando corretamente.
'''
self.tabuleiro.organizando_tabuleiro()
jogador = self.tabuleiro.jogadores_ativo()[0]
posicao_ini = jogador.posicao_tab
jogador.andar()
posicao_fin = jogador.posicao_tab
self.assertTrue(posicao_ini < posicao_fin)
def test_propriedades(self):
''' Teste que verifica se todas as propriedas foram adicionadas no tabuleiro.
'''
self.tabuleiro.organizando_tabuleiro()
propriedades = self.tabuleiro.propriedades[0].keys()
self.assertEqual(len(propriedades), 20)
|
from django import forms
import django_tables2 as tables
from orderedtable.models import Project
CHOICE = (
('distance', 'distance'),
('rate', 'rate'),
('project_size', 'project_size'),
('completion_date', 'completion_date'),
)
class ImportJson(forms.Form):
json = forms.FileField(required=True)
class ProjectTable(tables.Table):
class Meta:
model = Project
class Choice(forms.Form):
ch1 = forms.ChoiceField(choices=CHOICE,required=True)
ch2 = forms.ChoiceField(choices=CHOICE, required=True)
ch3 = forms.ChoiceField(choices=CHOICE, required=True)
ch4 = forms.ChoiceField(choices=CHOICE, required=True) |
"""
Robotritons testing version of gps navigation.
Purpose: Use reliable GPS data to control vehicle speed and calculate waypoint heading.
Requirements: A vehicle with at least one speed controller and one servo, and one Ublox NEO-M8N Standard Precision GNSS Module. The python modules sys, time, spidev, math, navio.util, VehicleGPSModule, and VehiclePWMModule
Use: Set a waypoint. Instantiate esc, servo, and ublox objects then use their included methods as well as those defined here in order to wait until usable GPS data is secured.
The vehicle's wheels will move, so help the vehicle approach the waypoint. The wheels will stop once the vehicle's latitude and longitude are both within 0.001 of the waypoint.
Instantiate objects for an esc using vehiclePWM("esc"), a servo using vehiclePWM("servo"), and a ublox using U_blox()
Updates:
- September 9, 2016. Merged the testGPSModule into better named GPSBasicNav.py. The old testGPSModule and the new GPSBasicNav.py now calculate desired heading to a waypoint.
- May 26, 2016. I was finally able to reproduce the error of GPSfetch not returning a message. It has nothing to do with rapidly setting and changing CFG-Messages,
(although 1 second mus pass after a message is enabled/disabled) but instead is caused when accel() changes direction inside the GPSfetch loop. When two back-back
accel() statements have different signed speeds then the internal time.sleep() is executed for a total of 1 second. In other words each GPSfetch attempt will occur
with a period of 1 second. But GPSfetch requires many, many, attempts to sucessfully find a NAVposllh message, about 1130 attempts by my count, so it would take
~18 minutes for GPSfix to work running at one execution/second.
Maybe a solution is to immediately poll for that type of message? I made the fetchSpecial method to poll for the desired message and expedite the message receival.
But no luck: it takes just as long to get a response as not polling. Looks like it will take more effort than it is worth.
I Guess threading or ommiting time.sleep() is the only solution.
- May 25, 2016. Found accel() execution of time.sleep() as the SOURCE OF ERROR causing GPSfetch() to never get a valid message. The pauses in
the accel commands cannot be run with the nav message updates because it will take ~1000 tries to get a message. Also, noticed a POTENTIAL ERROR
that accel statements move the vehicle only as long as the pwm generator maintains the same state. Aka, changing accel states too quickly
means that output won't transfer to vehicle motion. Either structure your accel statements well, or they need to be made into a threaded process.
- May 24, 2016. The VehicleGPSModule methods NavStatusMessage and Parse_ubx naturally return values in addition to text. I made GPSfetch to capitolize
on this behavior. GPSfetch packages the repetative set of commands to buffer and check message for data from the Ublox. Also I fixed accel from
entering an infinite loop; now calling accel() multiple times and passing different signed arguments works.
Resources:
https://docs.emlid.com/navio/Navio-dev/read-gps-data/
https://shahriar.svbtle.com/importing-star-in-python
Resources GPS:
http://www.movable-type.co.uk/scripts/latlong.html
"""
import sys
import spidev
import time
import math
import navio.util
import VehiclePWMModule
from VehicleGPSModule import *
ubl = U_blox()
#Define Ublox reset messages
CFGmsg8_NAVposllh_no = [0xb5,0x62,0x06,0x01,0x08,0x00,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0xb9]#Disable Ublox from publishing a NAVposllh
CFGmsg8_NAVstatus_no = [0xb5,0x62,0x06,0x01,0x08,0x00,0x01,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x13,0xc0]#Disable Ublox from publishing a NAVstatus
CFGmsg8_NAVstatus_yes = [0xb5,0x62,0x06,0x01,0x08,0x00,0x01,0x03,0x00,0x00,0x00,0x00,0x01,0x00,0x14,0xc2]#Enable Ublox to publish a NAVstatus
CFGmsg8_NAVposllh_yes = [0xb5,0x62,0x06,0x01,0x08,0x00,0x01,0x02,0x00,0x00,0x00,0x00,0x01,0x00,0x13,0xbb]#Enable Ublox to publish a NAVposllh
#Define servo and esc
vehicle_servo = VehiclePWMModule.vehiclePWM("servo")
vehicle_esc = VehiclePWMModule.vehiclePWM("esc")
#Define secondary waypoint for bearing approximation
lat2 = 32.871894
lon2 = -117.2450076
phi2 = lat2*(math.pi/180)
lam2 = lon2*(math.pi/180)
#Define earth's mean volumetric radius
rE = 6371.008
def commUblox(msg):
for x in range(0,10):
ubl.bus.xfer2(msg)
def GPSNavInit():
#reset the Ublox messages
commUblox(CFGmsg8_NAVposllh_no)
commUblox(CFGmsg8_NAVstatus_no)
print 'all NAV stopped \n'
#Enable NAVstatus messages
commUblox(CFGmsg8_NAVstatus_yes)
print 'NAVstatus started \n'
#Wait until we have a confirmed GPS fix
goodGPSfix = False
while not (goodGPSfix):
GPSfix = ubl.GPSfetch()
#print GPSfix
if (GPSfix):
if((GPSfix['fStatus'] == 2) or (GPSfix['fStatus'] == 3) or (GPSfix['fStatus'] == 4)):
goodGPSfix = True
print 'goodFix \n'
#After confirmed fix, disable Navstatus messages
commUblox(CFGmsg8_NAVstatus_no)
#print 'NAVstatus stopped \n'
#Wiggle weels to indicate done init
vehicle_servo.steer(-35)
time.sleep(0.5)
vehicle_servo.steer(35)
time.sleep(0.5)
vehicle_servo.center()
#Know next location
#calculate next waypoint heading
#Set course to move towards next waypoint
# ----- Initialization -----
#Start with vehicle at rest
vehicle_esc.stop()
vehicle_esc.rest()
#vehicle_servo.center()
#Initialize using the method above
GPSNavInit()
#Start the NAVposllh messages
time.sleep(1)
commUblox(CFGmsg8_NAVposllh_yes)
print "Started NAVposllh"
#backupMsg = [0xb5, 0x62, 0x06, 0x01, 0x03, 0x00, 0x01, 0x02, 0x01, 0x0e, 0x47]
#commUblox(backupMsg)
# ----- Initialization End -----
while(True):
try:
pos = ubl.GPSfetch()
if (pos != None):
print pos
if (pos['hAcc'] <= 2000000):#change to 10 for actual testing
#Prepare coordinate variables in order to calculate bearing
lat = pos['lat']
lon = pos['lon']
phi = lat*(math.pi/180)
lam = lon*(math.pi/180)
#Equirectangular distance Approximation
#The original formula from online calculates clockwise so 0-180 is east and 0--180 is west
#x = (lam2-lam)*math.cos((phi+phi2)/2)
#y = (phi2-phi)
#My own formula calculates counterclockwise so 0-180 is west and 0--180 is east
x = (lam-lam2)*math.cos((phi+phi2)/2)
y = (phi2-phi)
d = rE*math.sqrt((x*x)+(y*y)) #Only use is for debugging
#print 'distance', d
#Forward Bearing from Equirectagular Approximation
bearWPsign = math.atan2(x,y)*(180/math.pi)
bearWP = bearWPsign%360 #removes the sign so counter clockwise 0-360
print 'bearing wp', bearWP
if ( (abs(pos['lat']-lat2) <= 0.01) and (abs(pos['lon']-lon2) <= 0.01) ):
print 'waypoint!'
vehicle_esc.stop()
else:
vehicle_esc.accel(1)
else: #If we don't have good accuracy
print 'Bad accuracy!'
#Move forward and back slowly until established valuable horizontal accuraccy
#time.sleep(0.1)
except KeyboardInterrupt:
vehicle_esc.stop()
vehicle_servo.rest()
sys.exit()
|
import sys
from PySide2.QtWidgets import QWidget, QMessageBox, QApplication
import pandas as pd
from PySide2.QtGui import QTextCursor
from Funkcje.SzukanieMetodDlaMiar.searchingBestMethodCelinskiHarabasz import szukanieCH
from Funkcje.SzukanieMetodDlaMiar.searchingBestMethodDaviesBoudlin import szukanieDaviesBoudlin
from Funkcje.SzukanieMetodDlaMiar.searchingBestMethodSilhouette import szukanieSilhouette
from Klasy.PandasModel import PandasModel
from forms.edycjaBazyWiedzy import Ui_edycjaBazyWiedzy
import os
class MyStdout:
def __init__(self, buffer):
self.buffer = buffer
def write(self, string):
if string:
if 'Test ilosci klastrow' in string or 'Liczenie eps' in string:
self.buffer.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.buffer.moveCursor(QTextCursor.StartOfLine, QTextCursor.MoveAnchor)
self.buffer.moveCursor(QTextCursor.End, QTextCursor.KeepAnchor)
storeCursorPos = self.buffer.textCursor()
self.buffer.textCursor().removeSelectedText()
self.buffer.textCursor().deletePreviousChar()
self.buffer.setTextCursor(storeCursorPos)
self.buffer.insertPlainText(string)
self.buffer.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
else:
self.buffer.append(string)
self.buffer.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
QApplication.processEvents()
class EdycjaBazyWiedzy(QWidget):
dane = pd.DataFrame()
cechy = {}
czySilhouette = False
czyDaviesBouldin = False
czyCalinskiHarabasz = False
rezultatyBF = None
wynikDoDodania = {}
wynikBF = {}
def __init__(self, dane, cechy, czySilhouette, czyDaviesBouldin, czyCalinskiHarabasz, wynikBF):
super(EdycjaBazyWiedzy, self).__init__()
self.ui = Ui_edycjaBazyWiedzy()
self.ui.setupUi(self)
self.show()
self.dane = dane
self.cechy = cechy
self.czySilhouette = czySilhouette
self.czyCalinskiHarabasz = czyCalinskiHarabasz
self.czyDaviesBouldin = czyDaviesBouldin
self.wynikBF = wynikBF
self.wynikDoDodania.update(cechy)
if len(self.wynikBF) > 0:
self.wynikDoDodania.update(self.wynikBF)
self.ui.textWynikiMonteCarlo.append("Test był przeprowadzony w poprzednim oknie")
self.ui.textWynikiMonteCarlo.append(
'Gotowy wpis do dodania:')
self.ui.textWynikiMonteCarlo.append(str(self.wynikDoDodania))
self.ui.tableViewTabelaNauki.setModel(self.pokazTabele())
self.ui.tableViewTabelaNauki.horizontalHeader().setCascadingSectionResizes(True)
self.ui.tableViewTabelaNauki.verticalHeader().setVisible(True)
self.ui.tableViewTabelaNauki.verticalHeader().setCascadingSectionResizes(True)
self.ui.tableViewTabelaNauki.resizeColumnsToContents()
self.ui.btnZapiszPlik.clicked.connect(lambda: self.zapiszTabele())
self.ui.btnUsunWiersz.clicked.connect(lambda: self.usunWiersz())
self.ui.btnTestuj.clicked.connect(lambda: self.szukanieBruteForce())
self.ui.btnDodajDoBazy.clicked.connect(lambda: self.dodajDoBazy())
sys.stdout = MyStdout(self.ui.textWynikiMonteCarlo)
sys.stderr = MyStdout(self.ui.textWynikiMonteCarlo)
def pokazTabele(self):
try:
rezultatyBF = pd.DataFrame()
if self.czySilhouette:
rezultatyBF = pd.read_csv("TabeleTreningowe/Rezultaty Silhouette.csv", header=0, index_col=None)
elif self.czyDaviesBouldin:
rezultatyBF = pd.read_csv("TabeleTreningowe/Rezultaty DaviesBoudlin.csv", header=0, index_col=None)
elif self.czyCalinskiHarabasz:
rezultatyBF = pd.read_csv("TabeleTreningowe/Rezultaty CelinskiHarabasz.csv", header=0, index_col=None)
rezultatyBF = rezultatyBF.round(4)
self.rezultatyBF = rezultatyBF
model = PandasModel(self.rezultatyBF)
return model
except Exception as inst:
self.showDialog(inst)
def showDialog(self, informacja):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setText(str(informacja))
msgBox.setWindowTitle("Błąd")
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.exec()
def usunWiersz(self):
try:
if self.dane.empty:
self.showDialog('Dane nie zostały wczytane')
return
self.rezultatyBF = self.rezultatyBF.drop(self.ui.spinBoxUsuwanieIndeksu.value())
model = PandasModel(self.rezultatyBF)
self.ui.tableViewTabelaNauki.setModel(model)
except Exception as inst:
self.showDialog(inst)
def zapiszTabele(self):
if self.czySilhouette:
self.rezultatyBF.to_csv("TabeleTreningowe/Rezultaty Silhouette.csv", index=False, header=True)
elif self.czyDaviesBouldin:
self.rezultatyBF.to_csv("TabeleTreningowe/Rezultaty DaviesBoudlin.csv", index=False, header=True)
elif self.czyCalinskiHarabasz:
self.rezultatyBF.to_csv("TabeleTreningowe/Rezultaty CelinskiHarabasz.csv", index=False, header=True)
def szukanieBruteForce(self):
if len(self.wynikBF) == 0:
try:
if self.dane.empty:
self.showDialog('Dane nie zostały wczytane')
return
self.ui.textWynikiMonteCarlo.append(
'Rozpoczecie testu brute force dla zbioru.')
QApplication.processEvents()
if self.czySilhouette:
wynik = szukanieSilhouette(self.dane, self.dane, self.ui.spinBoxMaxIterMC.value(), self.ui.spinBoxIleKlastrowMC.value())
elif self.czyDaviesBouldin:
wynik = szukanieDaviesBoudlin(self.dane, self.dane, self.ui.spinBoxMaxIterMC.value(), self.ui.spinBoxIleKlastrowMC.value())
elif self.czyCalinskiHarabasz:
wynik = szukanieCH(self.dane, self.dane, self.ui.spinBoxMaxIterMC.value(), self.ui.spinBoxIleKlastrowMC.value())
metoda = wynik.get('Metoda')
metryka = wynik.get('Metryka')
parametr = wynik.get('eps/k')
if self.czySilhouette:
score = wynik.get('Wynik Silhouette')
score = round(score, 4)
elif self.czyDaviesBouldin:
score = wynik.get('Wynik DaviesBoudlin')
score = round(score, 4)
elif self.czyCalinskiHarabasz:
score = wynik.get('Wynik CelinskiHarabasz')
score = round(score, 2)
self.ui.textWynikiMonteCarlo.append(' ')
self.ui.textWynikiMonteCarlo.append(
'Znaleziona konfiguracja: ' + str(metoda) + " " + str(metryka) + " z parametrem " + str(parametr))
self.ui.textWynikiMonteCarlo.append(
'Wynik miary: ' + str(score))
QApplication.processEvents()
self.wynikBF = {"Metoda_metryka": metoda + ' ' + metryka,
"Parametr": parametr,
"Wynik miary": score}
self.wynikDoDodania.update(self.wynikBF)
self.ui.textWynikiMonteCarlo.append(' ')
self.ui.textWynikiMonteCarlo.append(
'Gotowy wpis do dodania:')
self.ui.textWynikiMonteCarlo.append(str(self.wynikDoDodania))
self.ui.textWynikiMonteCarlo.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
if self.ui.checkBoxWylaczPoTescie.isChecked():
self.wylaczenieKomputera()
except Exception as inst:
self.showDialog(inst)
def dodajDoBazy(self):
self.rezultatyBF = self.rezultatyBF.append(self.wynikDoDodania, ignore_index=True)
self.rezultatyBF = self.rezultatyBF.round(4)
model = PandasModel(self.rezultatyBF)
self.ui.tableViewTabelaNauki.setModel(model)
def wylaczenieKomputera(self):
self.dodajDoBazy()
self.zapiszTabele()
os.system("shutdown /s /t 5")
sys.exit()
|
import re
import unittest
import datetime
import time
import validator.utils
#----------------------------------------------------------------------------------------------
class lhcbTest(unittest.TestCase):
def __init__(self, test_name, entry, value, test_class):
unittest.TestCase.__init__(self, test_name)
self.entry = entry
if 'dn' in entry:
self.dn = entry['dn'][0]
else:
self.dn = None
self.types = __import__('%s.types' %(test_class,)).types
self.value = value
#----------------------------------------------------------------------------------------------
def test_GLUE2ComputingShareMaxCPUTime_OK (self):
message = validator.utils.message_generator("INFO","I029",self.dn,"GLUE2ComputingShareMaxCPUTime",self.value[0])
self.assertTrue ( int(self.value[0]) != 999999999, message )
|
import numpy as np
import warnings
from sklearn.model_selection import KFold, RepeatedKFold
class DoubleMLResampling:
def __init__(self,
n_folds,
n_rep,
n_obs,
apply_cross_fitting):
self.n_folds = n_folds
self.n_rep = n_rep
self.n_obs = n_obs
self.apply_cross_fitting = apply_cross_fitting
if (self.n_folds == 1) & self.apply_cross_fitting:
warnings.warn('apply_cross_fitting is set to False. Cross-fitting is not supported for n_folds = 1.')
self.apply_cross_fitting = False
if not apply_cross_fitting:
assert n_folds <= 2
self.resampling = RepeatedKFold(n_splits=n_folds,
n_repeats=n_rep)
if n_folds == 1:
assert n_rep == 1
self.resampling = ResampleNoSplit()
def split_samples(self):
all_smpls = [(train, test) for train, test in self.resampling.split(np.zeros(self.n_obs))]
smpls = [all_smpls[(i_repeat * self.n_folds):((i_repeat + 1) * self.n_folds)]
for i_repeat in range(self.n_rep)]
if not self.apply_cross_fitting:
# in the no cross-fitting case in each repetition we only use the first sample split
smpls = [[xx[0]] for xx in smpls]
return smpls
# A helper class to run double without cross-fitting
class ResampleNoSplit():
def __init__(self):
self.n_splits = 1
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
def split(self, X, y=None, groups=None):
indices = np.arange(X.shape[0])
yield indices, indices
class DoubleMLClusterResampling:
def __init__(self,
n_folds,
n_rep,
n_obs,
apply_cross_fitting,
n_cluster_vars,
cluster_vars):
if (n_folds == 1) | (not apply_cross_fitting):
raise NotImplementedError('No cross-fitting (`apply_cross_fitting = False`) '
'is not yet implemented with clustering.')
self.n_folds = n_folds
self.n_rep = n_rep
self.n_obs = n_obs
self.apply_cross_fitting = apply_cross_fitting
assert cluster_vars.shape[0] == n_obs
assert cluster_vars.shape[1] == n_cluster_vars
self.n_cluster_vars = n_cluster_vars
self.cluster_vars = cluster_vars
self.resampling = KFold(n_splits=n_folds, shuffle=True)
def split_samples(self):
all_smpls = []
all_smpls_cluster = []
for _ in range(self.n_rep):
smpls_cluster_vars = []
for i_var in range(self.n_cluster_vars):
this_cluster_var = self.cluster_vars[:, i_var]
clusters = np.unique(this_cluster_var)
n_clusters = len(clusters)
smpls_cluster_vars.append([(clusters[train], clusters[test])
for train, test in self.resampling.split(np.zeros(n_clusters))])
smpls = []
smpls_cluster = []
# build the cartesian product
cart = np.array(np.meshgrid(*[np.arange(self.n_folds)
for i in range(self.n_cluster_vars)])).T.reshape(-1, self.n_cluster_vars)
for i_smpl in range(cart.shape[0]):
ind_train = np.full(self.n_obs, True)
ind_test = np.full(self.n_obs, True)
this_cluster_smpl_train = []
this_cluster_smpl_test = []
for i_var in range(self.n_cluster_vars):
i_fold = cart[i_smpl, i_var]
train_clusters = smpls_cluster_vars[i_var][i_fold][0]
test_clusters = smpls_cluster_vars[i_var][i_fold][1]
this_cluster_smpl_train.append(train_clusters)
this_cluster_smpl_test.append(test_clusters)
ind_train = ind_train & np.in1d(self.cluster_vars[:, i_var], train_clusters)
ind_test = ind_test & np.in1d(self.cluster_vars[:, i_var], test_clusters)
train_set = np.arange(self.n_obs)[ind_train]
test_set = np.arange(self.n_obs)[ind_test]
smpls.append((train_set, test_set))
smpls_cluster.append((this_cluster_smpl_train, this_cluster_smpl_test))
all_smpls.append(smpls)
all_smpls_cluster.append(smpls_cluster)
return all_smpls, all_smpls_cluster
|
# -*- coding: utf-8 -*-
class Environment:
def __init__(self, parent=None):
self.bindings = {}
self.parent = parent
def __getitem__(self, key):
return self.bindings[key] if key in self.bindings else self.parent[key]
def __setitem__(self, key, value):
self.bindings[key] = value
class Solution:
def evaluate(self, expression):
return self.eval(self.parse(expression), Environment())
def eval(self, ast, env):
if isinstance(ast, str):
return env[ast]
elif isinstance(ast, int):
return ast
elif ast[0] == "add":
return self.eval(ast[1], env) + self.eval(ast[2], env)
elif ast[0] == "mult":
return self.eval(ast[1], env) * self.eval(ast[2], env)
elif ast[0] == "let":
new_env = Environment(parent=env)
for var, exp in zip(*[iter(ast[1:-1])] * 2):
new_env[var] = self.eval(exp, new_env)
return self.eval(ast[-1], new_env)
def parse(self, expression):
return self.to_ast(self.to_tokens(expression))
def to_ast(self, tokens):
token = tokens.pop(0)
if token == "(":
result = []
while tokens[0] != ")":
result.append(self.to_ast(tokens))
tokens.pop(0)
return result
elif token.lstrip("-").isdigit():
return int(token)
return token
def to_tokens(self, expression):
return expression.replace("(", " ( ").replace(")", " ) ").split()
if __name__ == "__main__":
solution = Solution()
assert 3 == solution.evaluate("(add 1 2)")
assert 15 == solution.evaluate("(mult 3 (add 2 3))")
assert 10 == solution.evaluate("(let x 2 (mult x 5))")
assert 14 == solution.evaluate("(let x 2 (mult x (let x 3 y 4 (add x y))))")
assert 2 == solution.evaluate("(let x 3 x 2 x)")
assert 5 == solution.evaluate("(let x 1 y 2 x (add x y) (add x y))")
assert 6 == solution.evaluate("(let x 2 (add (let x 3 (let x 4 x)) x))")
assert 4 == solution.evaluate("(let a1 3 b2 (add a1 1) b2)")
assert -12 == solution.evaluate("(let x 7 -12)")
|
"""PSIT2017"""
import pygal #เรียกใช้ Pygal
def main():
"""Render Employee or Unemployee"""
chart = pygal.Line(title='กราฟเส้นแสดงตัวเลขผู้ว่างงานและผู้มีงานทำของคนไทยทั่วประเทศ ระหว่างปี 2550-2559') #ชื่อกราฟ แสดงอยู่บนสุด
chart.x_labels = ('2550', '2551', '2552', '2553', '2554', '2555', '2556', '2557', '2558', '2559') #ตัวเลขในแกนX(แนวนอน) #แสดงช่วงเวลา
num = int(input()) #ใส่อินพุตเพื่อเลือกว่าจะเอากราฟไหน
if num == 1:
chart.add('จำนวนผู้ว่างงาน', [508475, 521980, 572336, 402181, 264339, 259094, 283520, 322675, 340561, 377466]) #เพิ่มตัวเลขเข้าไปในกราฟตามListที่ใส่
elif num == 2:
chart.add('จำนวนผู้มีงานทำ', [36249454, 37016612, 37706321, 38037342, 38464667, 38939130, 38906889, 38077429, 38016169, 37692651]) #เพิ่มตัวเลขเข้าไปในกราฟตามListที่ใส่
chart.render_in_browser() #สั่งเรนเดอร์บนบราวเซอร์ = เปิดในเว็บ
main()
|
import boto3
region = 'us-west-2'
ec2client = boto3.client('ec2', region_name=region)
response = client.create_volume(
AvailabilityZone='region',
# Encrypted=True|False,
# Iops=123,
# KmsKeyId='string',
# OutpostArn='string',
# Size=123,
SnapshotId='snap-09adcc2b712086452',
VolumeType='gp2',
TagSpecifications=[
{
'ResourceType': 'volume',
'Tags': [
{
"Key": "Application",
"Value": "Windchill"
},
{
"Key": "ApplicationTier",
"Value": "Application"
},
{
"Key": "ApplicationTierLevel",
"Value": "No Tier"
},
{
"Key": "ContactPreference",
"Value": "Email"
},
{
"Key": "CorpInfoMSP:TakeNightlySnapshot",
"Value": "Yes"
},
{
"Key": "CostCenter",
"Value": "1001596013"
},
{
"Key": "Environment",
"Value": "Development"
},
{
"Key": "Managed",
"Value": "Yes"
},
{
"Key": "MonitoredServices",
"Value": "No"
},
{
"Key": "Name",
"Value": 'AWOR-DVPDMLAS10'
},
{
"Key": "PatchGroup",
"Value": "ProductionManualReboot"
},
{
"Key": "Purpose",
"Value": "CAD PLM"
},
{
"Key":"RequestNumber",
"Value":"RITM0122480"
},
{
"Key": "ReviewDate",
"Value": "8/1/2021"
},
{
"Key": "Schedule",
"Value": "AlwaysOn"
},
{
"Key": "ServiceLocation",
"Value": "Irvine"
},
{
"Key": "ServiceOwner",
"Value": "Amir Memaran"
},
{
"Key": "TechnicalOwner",
"Value": "Mike Lockwood"
},
{
"Key": "Validated",
"Value": "No"
}
]
}
|
#!/usr/bin/env python3
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
import os
from amaranth import Elaboratable, Module, Cat
from usb_protocol.emitters import DeviceDescriptorCollection
from luna import top_level_cli
from luna.usb2 import USBDevice, USBStreamOutEndpoint
from luna.gateware.platform import NullPin
class USBStreamOutDeviceExample(Elaboratable):
""" Simple device that demonstrates use of a bulk-OUT endpoint.
Captures streaming data, and outputs it over the User I/O.
"""
BULK_ENDPOINT_NUMBER = 1
MAX_BULK_PACKET_SIZE = 512
def create_descriptors(self):
""" Create the descriptors we want to use for our device. """
descriptors = DeviceDescriptorCollection()
#
# We'll add the major components of the descriptors we we want.
# The collection we build here will be necessary to create a standard endpoint.
#
# We'll need a device descriptor...
with descriptors.DeviceDescriptor() as d:
d.idVendor = 0x16d0
d.idProduct = 0xf3b
d.iManufacturer = "LUNA"
d.iProduct = "User IO streamer"
d.iSerialNumber = "no serial"
d.bNumConfigurations = 1
# ... and a description of the USB configuration we'll provide.
with descriptors.ConfigurationDescriptor() as c:
with c.InterfaceDescriptor() as i:
i.bInterfaceNumber = 0
with i.EndpointDescriptor() as e:
e.bEndpointAddress = self.BULK_ENDPOINT_NUMBER
e.wMaxPacketSize = self.MAX_BULK_PACKET_SIZE
return descriptors
def elaborate(self, platform):
m = Module()
# Generate our domain clocks/resets.
m.submodules.car = platform.clock_domain_generator()
# Create our USB device interface...
ulpi = platform.request(platform.default_usb_connection)
m.submodules.usb = usb = USBDevice(bus=ulpi)
# Add our standard control endpoint to the device.
descriptors = self.create_descriptors()
usb.add_standard_control_endpoint(descriptors)
# Add a stream endpoint to our device.
stream_ep = USBStreamOutEndpoint(
endpoint_number=self.BULK_ENDPOINT_NUMBER,
max_packet_size=self.MAX_BULK_PACKET_SIZE
)
usb.add_endpoint(stream_ep)
leds = Cat(platform.request_optional("led", i, default=NullPin()) for i in range(6))
user_io = Cat(platform.request_optional("user_io", i, default=NullPin()) for i in range(4))
# Always stream our USB data directly onto our User I/O and LEDS.
with m.If(stream_ep.stream.valid):
m.d.usb += [
leds .eq(stream_ep.stream.payload),
user_io .eq(stream_ep.stream.payload),
]
# Always accept data as it comes in.
m.d.comb += stream_ep.stream.ready.eq(1)
# Connect our device as a high speed device by default.
m.d.comb += [
usb.connect .eq(1),
usb.full_speed_only .eq(1 if os.getenv('LUNA_FULL_ONLY') else 0),
]
return m
if __name__ == "__main__":
top_level_cli(USBStreamOutDeviceExample)
|
import pygame
import pygame.camera
from pygame.locals import *
from datetime import datetime
from BluetoothCom import BluetoothComm
# Steering angle must be from -180 to 180
# used DroidCam to use phone camera for video streaming to the laptop
DEVICE = '/dev/video0'
SIZE = (640, 480)
# folder where images will be saved
FOLDER = 'GroundFloor6/'
# You have to pair and authenticate the device before this.
# rfcomm would be helpful
serverMACAddress = '00:15:83:35:99:09' # Mac Address of HC-05 (The Bluetooth)
bluetoothServer = BluetoothComm(serverMACAddress, False)
def camstream():
# initialising pygame display
pygame.init()
display = pygame.display.set_mode(SIZE, 0)
# initialising camera
pygame.camera.init()
camera = pygame.camera.Camera(DEVICE, SIZE)
camera.start()
# initialising joystick
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0)
joystick.init()
joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]
print(joysticks)
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
screen = pygame.surface.Surface(SIZE, 0, display)
capture = True
recording = False
x = 0
y = 0
while capture:
screen = camera.get_image(screen)
display.blit(screen, (0, 0))
pygame.display.flip()
for event in pygame.event.get():
lf = -joystick.get_axis(0)
leftright = int(translate(lf, -1, 1, 0, 180))
angleInfo = 'a' + formatAngle(leftright)
# speedInfo = 's' + formatSpeed(updown)
a = joystick.get_button(0)
b = joystick.get_button(2)
x = ''
if (a == 1):
x = 's1'
elif (b == 1):
x = 's2'
else:
x = 's3'
saveImage(screen, lf, x, recording)
bluetoothServer.send(angleInfo)
bluetoothServer.send(x)
if event.type == QUIT:
capture = False
"""
joystick.get_button(i)
i:0 -> 1
i:1 -> 2
i:2 -> 3
i:3 -> 4
i:4 -> L1
i:5 -> R1
i:6 -> L2
i:7 -> R2
"""
if event.type == pygame.JOYBUTTONDOWN:
if joystick.get_button(5) == 1:
# R1
recording = True
print("recording status : " + str(recording))
if joystick.get_button(7) == 1:
# R2
recording = False
print("recording status : " + str(recording))
if joystick.get_button(4) == 1 or joystick.get_button(6) == 1:
# L1 or L2
recording = False
bluetoothServer.send("s3")
print ("Stoping car and recording")
clock.tick(60)
camera.stop()
pygame.quit()
return
"""
TODO: Create a tread to save image so that it doesn't interfere with normal code execution
"""
def saveImage(img, event_angle, updown, recording):
if (recording):
pygame.image.save(img, FOLDER + str(datetime.now()) + "--" + str(event_angle) + "--" + str(updown) + ".jpg")
def formatAngle(x):
# x = x*90
# if x >= 0:
# x = x + 90
# else:
# x = x + 90
# x = round(x,0)
# x = int(x)
x = str(x)
if len(x) == 1:
return '00' + x
if len(x) == 2:
return '0' + x
return x
def formatSpeed(x):
if x < 0:
x = 2 # back
elif x > 0:
x = 1 # forward
else:
x = 3 # stop
return str(x)
def translate(value, leftMin, leftMax, rightMin, rightMax):
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
if __name__ == '__main__':
camstream()
|
def warcaby(matrix, gracz):
rows = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
retValue = ""
for i in range(8):
for j in range(8):
if matrix[i][j] == 1:
if i + 1 <= 6 and j + 1 <= 6:
if matrix[i + 1][j + 1] == 2 and matrix[i + 2][j + 2] == 0:
retValue += "Biale maja bicie z " + rows[i] + str(j + 1) + " na " + rows[i + 1] + str(j + 2) + "\n"
if i + 1 <= 6 and j - 1 >= 1:
if matrix[i + 1][j - 1] == 2 and matrix[i + 2][j - 2] == 0:
retValue += "Biale maja bicie z " + rows[i] + str(j + 1) + " na " + rows[i + 1] + str(j) + "\n"
if i - 1 >= 1 and j - 1 >= 1:
if matrix[i - 1][j - 1] == 2 and matrix[i - 2][j - 2] == 0:
retValue += "Biale maja bicie z " + rows[i] + str(j + 1) + " na " + rows[i - 1] + str(j) + "\n"
if i - 1 >= 1 and j + 1 <= 6:
if matrix[i - 1][j + 1] == 2 and matrix[i - 2][j + 2] == 0:
retValue += "Biale maja bicie z " + rows[i] + str(j + 1) + " na " + rows[i - 1] + str(j + 2) + "\n"
if matrix[i][j] == 2:
if i + 1 <= 6 and j + 1 <= 6:
if matrix[i + 1][j + 1] == 1 and matrix[i + 2][j + 2] == 0:
retValue += "Czarne maja bicie z " + rows[i] + str(j + 1) + " na " + rows[i + 1] + str(j + 2) + "\n"
if i + 1 <= 6 and j - 1 >= 1:
if matrix[i + 1][j - 1] == 1 and matrix[i + 2][j - 2] == 0:
retValue += "Czarne maja bicie z " + rows[i] + str(j + 1) + " na " + rows[i + 1] + str(j) + "\n"
if i - 1 >= 1 and j - 1 >= 1:
if matrix[i - 1][j - 1] == 1 and matrix[i - 2][j - 2] == 0:
retValue += "Czarne maja bicie z " + rows[i] + str(j + 1) + " na " + rows[i - 1] + str(j) + "\n"
if i - 1 >= 1 and j + 1 <= 6:
if matrix[i - 1][j + 1] == 1 and matrix[i - 2][j + 2] == 0:
retValue += "Czarne maja bicie z " + rows[i] + str(j + 1) + " na " + rows[i - 1] + str(j + 2) + "\n"
return retValue
szachownica = [[int(0) for j in range(8)] for i in range(8)]
szachownica[0][0] = 1;
szachownica[1][1] = 2;
#szachownica[2][2] = 1;
print(warcaby(szachownica, 1)) |
import csv
import face_recognition
import pickle
"""
csv_reader = csv.reader(open('../data/train.csv', encoding='utf-8'))
train_imgs = []
train_img_codings = []
train_labels = []
count = 0
try:
for item in csv_reader:
img_path = '%s%s' % ('../data/train/', item[0])
img = face_recognition.load_image_file(img_path)
face_locations = face_recognition.face_locations(img, model="cnn")
if len(face_locations) > 0 :
img_code = face_recognition.face_encodings(img,face_locations)[0]
train_imgs.append(img)
train_img_codings.append(img_code)
train_labels.append(item[1])
print('training', item[1])
except IndexError:
print("I wasn't able to locate any faces in at least one of the images. Check the image files. Aborting...")
# 序列化
with open('../data/train_codings.pkl', 'wb') as f:
pickle.dump(train_img_codings, f)
with open('../data/train_labels.pkl', 'wb') as f:
pickle.dump(train_labels, f)
"""
#load test files
test_gallery_imgs = []
test_img_codings = []
test_gallery_labels = []
csv_reader = csv.reader(open('../data/test_a_gallery.csv', encoding='utf-8'))
try:
for item in csv_reader:
img_path = '%s%s' % ('../data/test_a/gallery/', item[0])
img = face_recognition.load_image_file(img_path)
face_locations = face_recognition.face_locations(img, model="cnn")
if len(face_locations) > 0:
test_gallery_imgs.append(img)
test_img_codings.append(face_recognition.face_encodings(img, face_locations)[0])
test_gallery_labels.append(item[1])
print('testing', item[1])
except IndexError:
print("I wasn't able to locate any faces in at least one of the images. Check the image files. Aborting...")
# 序列化
with open('../data/test_codings.pkl', 'wb') as f:
pickle.dump(test_img_codings, f)
with open('../data/test_labels.pkl', 'wb') as f:
pickle.dump(test_gallery_labels, f)
|
#------------------------------------------------------------------------------#
# Imports
#------------------------------------------------------------------------------#
from flask import Flask, render_template, jsonify, abort, request
from flask.ext.sqlalchemy import SQLAlchemy
import string
import random
import datetime
import pprint
#------------------------------------------------------------------------------#
# App Config.
#------------------------------------------------------------------------------#
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
#------------------------------------------------------------------------------#
# Snippet Model
#------------------------------------------------------------------------------#
class Snippet(db.Model):
#__tablename__ = 'Snippets'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
content = db.Column(db.String(100000))
key = db.Column(db.String(10))
lang = db.Column(db.String(120))
expires = db.Column(db.DateTime())
def __init__(self, title, content, lang):
key = ''.join(random.choice(string.ascii_lowercase) for i in range(10))
expires = datetime.datetime.now()
self.title = title
self.content = content
self.lang = lang
self.key = key
self.expires = expires
def __repr__(self):
return '<Snippet %r>' % self.title
#------------------------------------------------------------------------------#
# Routes
#------------------------------------------------------------------------------#
@app.route('/')
def home():
return render_template('pages/create.html')
@app.route('/terms')
def terms():
return 'Terms page'
@app.route('/about')
def about():
return 'About page'
@app.route('/snippets/<int:snippet_id>', methods=['GET'])
def show_snippet(snippet_id):
snippet = Snippet.query.get(snippet_id)
pprint.pprint(Snippet.query.filter(id == snippet_id).first())
if snippet is None:
abort(404)
return render_template('pages/view-snippet.html', snippet=snippet)
@app.route('/<string:key>', methods=['GET'])
def show_snippet_for_key(key):
snippet = Snippet.query.filter_by(key=key).first()
if snippet is None:
abort(404)
return render_template('pages/view.html', snippet=snippet)
@app.route('/api/v1.0/snippets/<int:snippet_id>', methods=['GET'])
def get_snippet(snippet_id):
snippet = [snippet for snippet in snippets if snippet['id'] == snippet_id]
if len(snippet) == 0:
abort(404)
return jsonify({'snippet': snippet[0]})
@app.route('/api/v1.0/snippets', methods=['POST'])
def create_snippet():
if not request.json or not 'title' in request.json:
abort(400)
# snippets.append(snippet)
snippet = Snippet(request.json['title'], request.json.get('content', ""), request.json.get('lang', ""))
db.session.add(snippet)
db.session.commit()
return jsonify({'key': snippet.key, 'lang': snippet.lang}), 201
#----------------------------------------------------------------------------#
# App Start
#----------------------------------------------------------------------------#
if __name__ == '__main__':
app.debug = True
app.run() |
import uuid
def create_a_not_really_a_secret():
return uuid.uuid4() # lgtm [py/not-sensitive-data]
def main():
print(create_a_not_really_a_secret())
main()
|
from rubicon_ml.viz.common.dropdown_header import dropdown_header
__all__ = ["dropdown_header"]
|
n = int(input())
d = 0
x = 0
for i in range(n):
j = [int(x) for x in input().split()]
if j[0] == j[1] - 1 or j[0] == j[1] - 2 or j[0] - 5 == j[1] - 1 or j[0] - 5 == j[1] - 2:
d += 1
else:
x += 1
print("dario" if d>x else "xerxes") |
i=input("Input a String:") #Taking an input string from the user
temp=i
if temp[::-1]==i: #Checking string is palindrome or not
print("The string is a palindrome")
else:
print("The string is not a palindrome")
|
import os
import json
class en_US():
def __init__(self):
with open(file=os.path.dirname(__file__)+"\\assest\\lang\\en_US.json",mode="r",encoding="utf-8") as f:
self.__langDict__=json.load(f)
class zh_TW():
def __init__(self):
with open(file=os.path.dirname(__file__)+"\\assest\\lang\\zh_TW.json",mode="r",encoding="utf-8") as f:
self.__langDict__=json.load(f)
class ja_JP():
def __init__(self):
with open(file=os.path.dirname(__file__)+"\\assest\\lang\\ja_JP.json",mode="r",encoding="utf-8") as f:
self.__langDict__=json.load(f)
|
import cv2
import imutils
import time
model_path = "emotions-recognition-retail-0003.xml"
pbtxt_path = "emotions-recognition-retail-0003.bin"
net = cv2.dnn.readNet(model_path, pbtxt_path)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cascade_scale = 1.2
cascade_neighbors = 6
minFaceSize = (30,30)
# Specify target device
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
def getFaces(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(
gray,
scaleFactor= cascade_scale,
minNeighbors=cascade_neighbors,
minSize=minFaceSize,
flags=cv2.CASCADE_SCALE_IMAGE
)
bboxes = []
for (x,y,w,h) in faces:
if(w>minFaceSize[0] and h>minFaceSize[1]):
bboxes.append((x, y, w, h))
return bboxes
camera = cv2.VideoCapture(0)
frameID = 0
grabbed = True
start_time = time.time()
while grabbed:
(grabbed, img) = camera.read()
img = cv2.resize(img, (550,400))
# Read an image
out = []
frame = img.copy()
faces = getFaces(frame)
x, y, w, h = 0, 0, 0, 0
i = 0
for (x,y,w,h) in faces:
cv2.rectangle( frame,(x,y),(x+w,y+h),(255,255,255),1)
if(w>0 and h>0):
facearea = frame[y:y+h, x:x+w]
# Prepare input blob and perform an inference
blob = cv2.dnn.blobFromImage(facearea, size=(64, 64), ddepth=cv2.CV_8U)
net.setInput(blob)
out = net.forward()
neutral = int(out[0][0][0][0] * 100)
happy = int(out[0][1][0][0] * 100)
sad = int(out[0][2][0][0] * 100)
surprise = int(out[0][3][0][0] * 100)
anger = int(out[0][4][0][0] * 100)
yy = y
line2 = "{}%\n{}%\n{}%\n{}%\n{}%".format(neutral,happy,sad,surprise,anger)
y0, dy = yy, 35
for ii, txt in enumerate(line2.split('\n')):
y = y0 + ii*dy
cv2.putText(frame, txt, (x, y ), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
line1 = "Neutral:\nHappy:\nSad:\nSurprise:\nAnger:"
y0, dy = yy, 35
for ii, txt in enumerate(line1.split('\n')):
y = y0 + ii*dy
cv2.putText(frame, txt, (x+55, y ), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
i += 1
# Save the frame to an image file
cv2.imshow("FRAME", frame)
frameID += 1
fps = frameID / (time.time() - start_time)
print("FPS:", fps)
cv2.waitKey(1)
cv2.destroyAllWindows()
|
# -*- coding: utf-8 -*-
from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint
from sqlalchemy.types import Integer, String, Text, DateTime
from ..extensions import db
class Star(db.Model):
__tablename__ = 'stars'
id = Column(Integer, primary_key = True)
repo_id = Column(Integer)
user_id = Column(Integer)
create_time = Column(DateTime)
def __init__(self, repo_id, user_id):
self.repo_id = repo_id
self.user_id = user_id
def __repr__(self):
return '<Watch %s/%d>' % (self.id, self.repo_id)
|
# -*- encoding: utf-8 -*-
from openerp import models
import re
class res_partner(models.Model):
_inherit = 'res.partner'
def name_search(self, cr, user, name, args=None, operator='ilike',
context=None, limit=100):
if not args:
args = []
if context is None:
context = {}
ids = []
if name:
ptrn_name = re.compile('(\[(.*?)\])')
res_name = ptrn_name.search(name)
if res_name:
name = name.replace('[' + res_name.group(2) + '] ', '')
partner_search = super(res_partner, self).name_search(cr, user,
name, args, operator, context, limit)
ids = [partner[0] for partner in partner_search]
if not ids:
ids = self.search(cr, user, [('x_identificacion', operator, name)] + args,
limit=limit, context=context)
if not ids:
ptrn = re.compile('(\[(.*?)\])')
res = ptrn.search(name)
if res:
ids = self.search(cr, user,
[('x_identificacion', operator, res.group(2))] + args, limit=limit,
context=context)
else:
return super(res_partner, self).name_search(cr, user,
name, args, operator, context, limit)
return self.name_get(cr, user, ids, context=context)
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
res_name = super(res_partner, self).name_get(cr, uid, ids, context)
res = []
for record in res_name:
partner = self.browse(cr, uid, record[0], context=context)
name = record[1]
if partner.x_identificacion:
name = name + ' [' + partner.x_identificacion + '] '
res.append((record[0], name))
return res
|
# Solidity-Compatible EIP20/ERC20 Token
# Implements https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md
# Author: Phil Daian
# The use of the uint256 datatype as in this token is not
# recommended, as it can pose security risks.
# This token is intended as a proof of concept towards
# language interoperability and not for production use.
# Events issued by the contract
Transfer: event({_from: indexed(address), _to: indexed(address), _value: uint256})
Approval: event({_owner: indexed(address), _spender: indexed(address), _value: uint256})
Truthiness: event({_truthy: bool})
balances: uint256[address]
allowances: (uint256[address])[address]
num_issued: uint256
max_uint_256: public(uint256)
name: public(bytes32)
decimals: public(uint256)
symbol: public(bytes32)
@public
def __init__(_initial_amount: uint256, _token_name: bytes32, _decimals: uint256, _token_symbol: bytes32):
self.num_issued = _initial_amount
self.balances[msg.sender] = _initial_amount
self.name = _token_name
self.decimals = _decimals
self.symbol = _token_symbol
# self.max_uint_256 = 2**256-1 # this line would overflow before subtraction, next is equivalent
self.max_uint_256 = 2*(2**255-1)+1
@public
@constant
def totalSupply() -> uint256:
return self.num_issued
@public
@constant
def balanceOf(_owner : address) -> uint256:
return self.balances[_owner]
@public
def transfer(_to : address, _value : uint256) -> bool:
_sender: address = msg.sender
# Make sure sufficient funds are present implicitly through overflow protection
self.balances[_sender] = self.balances[_sender] - _value
self.balances[_to] = self.balances[_to] + _value
# Fire transfer event
log.Transfer(_sender, _to, _value)
return True
@public
def transferFrom(_from : address, _to : address, _value : uint256) -> bool:
_sender: address = msg.sender
allowance: uint256 = self.allowances[_from][_sender]
# Make sure sufficient funds/allowance are present implicitly through overflow protection
self.balances[_from] = self.balances[_from] - _value
self.balances[_to] = self.balances[_to] + _value
log.Truthiness(allowance != self.max_uint_256)
if allowance != self.max_uint_256:
self.allowances[_from][_sender] = allowance - _value
# Fire transfer event
log.Transfer(_from, _to, _value)
return True
@public
def approve(_spender : address, _value : uint256) -> bool:
_sender: address = msg.sender
self.allowances[_sender][_spender] = _value
# Fire approval event
log.Approval(_sender, _spender, _value)
return True
@public
@constant
def allowance(_owner : address, _spender : address) -> uint256:
return self.allowances[_owner][_spender]
|
class Persist:
def __init__(self, function):
self.function = function
def __call__(self, *args, **kwargs):
# We can add some code
# before function call
self.function(*args, **kwargs)
print(self.function)
print("DATABASE PERSISTANCE")
# We can also add some code
# after function call.
|
# FUNCOES UTEIS
def rl_quotes(value):
return '\'' + value + '\''
def r_quotes(value):
return value + '\''
def l_quotes(value):
return '\'' + value
def param_parse(param, sql):
for key, value in param.items():
if type(value) == str:
sql = sql.replace(key, rl_quotes(value))
elif type(value) == int:
sql = sql.replace(key, str(value))
return sql
def concat_nl(*params):
global sql
value = params[0]
if len(params) > 1:
tabulations = params[1]
sql += '\t' * tabulations + str(value) + '\n'
else:
sql += str(value) + '\n'
def concat(*params):
global sql
value = params[0]
if len(params) > 1:
tabulations = params[1]
sql += '\t' * tabulations + str(value)
else:
sql += str(value)
# PROGRAMA
numero_arquivo = 1
sql = ''
contador = 0
quantidade_registros_arquivo = 10000
nome_arquivo_saida = 'ARQUIVO__'
extensao_arquivo_saida = '.sql'
saida = open(nome_arquivo_saida + str(numero_arquivo) + extensao_arquivo_saida, 'w')
parametros_query = {':id_usuario': 4821,
':nome_usuario_sgu': 'Xxxxxx Xxxxxxxx Xxxxxxxxx',
':cpf_usuario_sgu': '11122233344',
':email_usuario_sgu': 'xxxxxxx@xx.xx.xx',
':id_funcionalidade':13,
':justificativa': 'Xxxxxxxxxxxxxxx',
':id_situacao':3,
':id_debito':0
}
def build_sql():
concat_nl('DECLARE')
concat_nl('V_ID_SITUACAO NUMBER := 0;',1)
concat_nl('BEGIN')
concat_nl('SELECT ID_SITUACAO INTO V_ID_SITUACAO FROM SDA.XXXXXXXXX WHERE ID = :id_debito;',1)
concat_nl('IF V_ID_SITUACAO = 1 THEN',1)
concat_nl('UPDATE SDA.XXXXXXXXX SET ID_SITUACAO = :id_situacao WHERE ID = :id_debito;', 2)
concat_nl('COMMIT;', 2)
concat_nl('END IF;',1)
concat_nl('END;')
concat_nl('/')
with open('entrada.txt') as entrada:
for linha in entrada:
id_debito = str(linha).rstrip()
parametros_query[":id_debito"] = int(id_debito)
build_sql()
sql = param_parse(parametros_query, sql)
saida.write(sql)
contador += 1
sql = ''
if contador >= quantidade_registros_arquivo:
saida.close()
numero_arquivo += 1
contador = 0
saida = open(nome_arquivo_saida + str(numero_arquivo) + extensao_arquivo_saida, 'w')
saida.close()
|
from data import DataLoader
from data import DcardDataset
from data import customed_collate_fn
from data import cut_validation
import model
from args import get_args
import torch
from utils import save_training_args
from utils import check_save_path
from utils import set_random_seed
import os
import sys
import time
def train(
total_data,
train_x,
train_y,
sentence_length,
prefix,
validation,
batch_size,
collate_fn,
model_name,
vocabulary_size,
embed_dim,
hidden_size,
rnn_layers,
dropout_rate,
bidirectional,
learning_rate,
epoches,
save_intervals,
use_cuda=True):
print('Training preprocessing...')
# processing saving path
log_save_path, model_path, save_args_path = \
check_save_path(prefix, validation)
# processing validation data
if validation:
train_data, valid_data = cut_validation(
total_data,
[train_x, train_y, sentence_length],
shuffle=True)
total_train, train_x, train_y, train_length = train_data
total_valid, valid_x, valid_y, valid_length = valid_data
else:
total_train = total_data
# make dataset
dcard_train_dataset = DcardDataset(
total_train, train_x, train_y, sentence_length)
train_loader = torch.utils.data.DataLoader(
dataset=dcard_train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate_fn
)
if validation:
dcard_valid_dataset = DcardDataset(
total_valid, valid_x, valid_y, sentence_length)
valid_loader = torch.utils.data.DataLoader(
dataset=dcard_valid_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate_fn
)
# Initialize model
try:
model_class_object = getattr(model, model_name)
except AttributeError:
raise Exception('Model %s not found in model.py' % model_name)
# training arguments to send
# load_model_filename: the filename of init parmeters
# word_dict_len: the length of word dictionary
# embed_dim: embedding dimension
# hidden_size: hidden size of RNN
# dropout: dropout rate of RNN
# bidirectional: RNN is bidirectional or not
training_args = {
'load_model_filename': None,
'vocabulary_size': vocabulary_size,
'embed_dim': embed_dim,
'hidden_size': hidden_size,
'rnn_layers': rnn_layers,
'dropout': dropout_rate,
'bidirectional': bidirectional}
save_training_args(training_args, save_args_path)
my_model = model_class_object(training_args, train=True)
my_model = my_model.cuda() if use_cuda else my_model
optimizer = torch.optim.Adam(my_model.parameters(), lr=learning_rate)
#loss_func = torch.nn.CrossEntropyLoss()
loss_func = torch.nn.BCELoss()
print('Start training...')
for epoch in range(epoches):
start_time = time.time()
total_loss, total_steps, total_accu = 0.0, 0.0, 0.0
for step, (x, y, length) in enumerate(train_loader):
duration = time.time() - start_time
sys.stdout.write('\rduration: %06.1f, step: %04d ' \
% (duration, step))
sys.stdout.flush()
if use_cuda:
x, y, length = x.cuda(), y.cuda(), length.cuda()
optimizer.zero_grad()
pred_y = my_model.forward(x, length).squeeze()
loss = loss_func(pred_y, y)
loss.backward()
optimizer.step()
total_loss += float(loss.cpu())
pred_y[pred_y >= 0.5] = 1.0
pred_y[pred_y < 0.5] = 0.0
total_accu += float(torch.sum(pred_y == y).cpu())
total_steps += 1
total_loss /= total_steps
total_accu /= total_train
if validation:
with torch.no_grad():
my_model.eval()
total_valid_loss, total_valid_accu, total_valid_step = 0, 0, 0
for step, (x, y, length) in enumerate(valid_loader):
if use_cuda:
x, y, length = x.cuda(), y.cuda(), length.cuda()
pred_valid_y = my_model.forward(x, length).squeeze()
total_valid_loss += float(loss_func(pred_valid_y, y).cpu())
pred_valid_y[pred_valid_y >= 0.5] = 1.0
pred_valid_y[pred_valid_y < 0.5] = 0.0
total_valid_accu += \
float(torch.sum(pred_valid_y == y).cpu())
total_valid_step += 1
total_valid_loss /= total_valid_step
total_valid_accu /= total_valid
my_model.train()
progress_msg = 'epoch:%3d, loss:%.3f, accuracy:%.3f, valid:%.3f, accuracy:%.3f'\
% (epoch, total_loss, total_accu, \
total_valid_loss, total_valid_accu)
log_msg = '%d,%.4f,%.3f,%.4f,%.3f\n' % \
(epoch, total_loss, total_accu, \
total_valid_loss, total_valid_accu)
else:
progress_msg = 'epoch:%3d, loss:%.3f, accuracy:%3f'\
% (epoch, total_loss, total_accu)
log_msg = '%d,%.4f,%.3f\n' % (epoch, total_loss, total_accu)
print(progress_msg)
with open(log_save_path, 'a') as f_log:
f_log.write(log_msg)
if (epoch + 1) % save_intervals == 0:
model_save_path = os.path.join(model_path, 'models_e%d.pt' % (epoch+1))
my_model.save(model_save_path)
def main():
args = get_args(train=True)
set_random_seed(args.seed)
if args.load_word_dict:
dl = DataLoader(
create_word_dict=False,
word_dict_filename=args.word_dict_filename)
else:
dl = DataLoader(
create_word_dict=True,
filenames=[args.train_x_filename, args.test_x_filename],
save_word_dict=True,
word_dict_filename=args.word_dict_filename)
train_x = dl.load_data_x(args.train_x_filename)
train_y = dl.load_data_y(args.train_y_filename)
sentence_length = dl.get_sentence_length()
#limit = 1000
#train_x, train_y, sentence_length = train_x[:limit], train_y[:limit], sentence_length[:limit]
word_dict_len = dl.get_word_dict_len()
train(
total_data=len(train_x),
train_x=train_x,
train_y=train_y,
sentence_length=sentence_length,
prefix=args.prefix,
validation=args.validation,
batch_size=args.batch_size,
collate_fn=customed_collate_fn,
model_name=args.model,
vocabulary_size=word_dict_len,
embed_dim=args.embed_dim,
hidden_size=args.hidden_size,
rnn_layers=args.rnn_layers,
dropout_rate=args.dropout_rate,
bidirectional=args.no_bidirectional,
learning_rate=args.learning_rate,
epoches=args.epoches,
save_intervals=args.save_intervals,
use_cuda=args.no_cuda)
if __name__ == '__main__':
main()
|
c = int(input('Primeiro termo da progressão: '))
r = int(input('Razão: '))
u = c + (10) * r
for c in range(c, u, r):
print(c, end=' . ') |
import json
import requests
from spotibot.core.objects import Time
from spotibot.core.objects.General import Image, ExternalUrl, ExternalId
from spotibot.mongo.utils.Handlers import object_handler, get_serializable
class Album:
"""Auto-generated attribute instantiation docstring for album
object (simplified)
Note: Parameter description in below docstring is populated based
on the descriptions at the following link:
https://developer.spotify.com/documentation/web-
api/reference/object-model
Please consult their official documentation for more in-depth
information & full-linking across pages.
Attributes:
album_group (string, optional): The field is present when
getting an artists albums. Possible values are album, single,
compilation, appears_on. Compare to album_type this field
represents relationship between the artist and the album.
album_type (str): The type of the album: one of album, single,
or compilation.
artists (array of simplified artist objects): The artists of the
album. Each artist object includes a link in ``href`` to more
detailed information about the artist.
available_markets (array of strings): The markets in which the
album is available: ISO 3166-1 alpha-2 country codes. Note
that an album is considered available in a market when at least
1 of its tracks is available in that market.
external_urls (an external URL object): Known external URLs for
this album.
href (str): A link to the Web API endpoint providing full
details of the album.
id (str): The Spotify ID for the album.
images (array of image objects): The cover art for the album in
various sizes, widest first.
name (str): The name of the album. In case of an album
take-down, the value may be an empty string.
release_date (str): The date the album was first released, for
example ``1981``. Depending on the precision, it might be shown
as ``1981-12`` or ``1981-12-15``.
release_date_precision (str): The precision with which
``release_date`` value is known: ``year`` , ``month`` , or
``day``.
restrictions (str): Part of the response when
Track Relinking is applied, the original track is not available
in the given market, and Spotify did not have any tracks to
relink it with. The track response will still contain metadata
for the original track, and a restrictions object containing
the reason why the track is not available: ``"restrictions" :
{"reason" : "market"}``
type (str): The object type: album
uri (str): The Spotify URI for the album.
"""
def __init__(self, album):
self.album_type: str = object_handler(album, "album_type")
self.artists: list = [
Artist(artist) for artist in object_handler(album, "artists")
]
self.available_markets: list = object_handler(album, "available_markets")
self.external_urls: ExternalUrl = ExternalUrl(
object_handler(album, "external_urls")
)
self.href: str = object_handler(album, "href")
self.id: str = object_handler(album, "id")
self.images: list = [Image(image) for image in object_handler(album, "images")]
self.name: str = object_handler(album, "name")
self.release_date: str = object_handler(album, "release_date")
self.release_date_precision: str = object_handler(
album, "release_date_precision"
)
self.restrictions: dict = object_handler(album, "restrictions")
self.type: str = object_handler(album, "type")
self.uri: str = object_handler(album, "uri")
def __eq__(self, other) -> bool:
"""Equality comparison to other objects.
Args:
other: Comparison object
Returns:
Boolean value indicating whether or not the attributes and their
associated values are equal between the two objects
"""
return vars(self) == vars(other)
def __getitem__(self, item: str):
"""Getter method for subscriptability.
Args:
item: Attribute to get the value of
Returns:
Attribute value if exists in object's namespace
"""
return getattr(self, item)
def get(self, item: str, default=None):
"""Method for extracting attributes without throwing existence errors.
Args:
item: Attribute to get the value of
default: Return value if attribute doesn't exist
Returns:
Attribute value or default if attribute does not exist
"""
return vars(self).get(item, default)
def to_dict(self) -> dict:
"""Calling utility serialization method on all attributes.
Returns:
String following valid json structure for mongo serialization.
"""
return {k: get_serializable(v) for k, v in vars(self).items()}
@property
def json(self) -> str:
"""Jsonified/string attribute for all SpotiBot objects for mongo
serialization purposes
Returns:
Serializable 'json' output of SpotiBot object
"""
return json.dumps(self.to_dict())
class Artist:
"""Auto-generated attribute instantiation docstring for artist
object (simplified)
Note: Parameter description in below docstring is populated based
on the descriptions at the following link:
https://developer.spotify.com/documentation/web-
api/reference/object-model
Please consult their official documentation for more in-depth
information & full-linking across pages.
Attributes:
external_urls (an external URL object): Known external URLs for
this artist.
href (str): A link to the Web API endpoint providing full
details of the artist.
id (str): The Spotify ID for the artist.
name (str): The name of the artist.
type (str): The object type: ``"artist"``
uri (str): The Spotify URI for the artist.
"""
def __init__(self, artist):
self.href: str = object_handler(artist, "href")
self.id: str = object_handler(artist, "id")
self.name: str = object_handler(artist, "name")
self.type: str = object_handler(artist, "type")
self.uri: str = object_handler(artist, "uri")
def __eq__(self, other) -> bool:
"""Equality comparison to other objects.
Args:
other: Comparison object
Returns:
Boolean value indicating whether or not the attributes and their
associated values are equal between the two objects
"""
return vars(self) == vars(other)
def __getitem__(self, item: str):
"""Getter method for subscriptability.
Args:
item: Attribute to get the value of
Returns:
Attribute value if exists in object's namespace
"""
return getattr(self, item)
def get(self, item: str, default=None):
"""Method for extracting attributes without throwing existence errors.
Args:
item: Attribute to get the value of
default: Return value if attribute doesn't exist
Returns:
Attribute value or default if attribute does not exist
"""
return vars(self).get(item, default)
def to_dict(self) -> dict:
"""Calling utility serialization method on all attributes.
Returns:
String following valid json structure for mongo serialization.
"""
return {k: get_serializable(v) for k, v in vars(self).items()}
@property
def json(self) -> str:
"""Jsonified/string attribute for all SpotiBot objects for mongo
serialization purposes
Returns:
Serializable 'json' output of SpotiBot object
"""
return json.dumps(self.to_dict())
class Track:
"""Auto-generated attribute instantiation docstring for track
object (full)
Note: Parameter description in below docstring is populated based
on the descriptions at the following link:
https://developer.spotify.com/documentation/web-
api/reference/object-model
Please consult their official documentation for more in-depth
information & full-linking across pages.
Attributes:
album (a simplified album object): The album on which the track
appears. The album object includes a link in ``href`` to full
information about the album.
artists (an array of simplified artist objects): The artists who
performed the track. Each artist object includes a link in
``href`` to more detailed information about the artist.
available_markets (array of strings): A list of the countries in
which the track can be played, identified by their ISO 3166-1
alpha-2 code.
disc_number (int): The disc number (usually ``1`` unless the
album consists of more than one disc).
duration (int): The track length in milliseconds.
explicit (Boolean): Whether or not the track has explicit lyrics
( ``true`` = yes it does; ``false`` = no it does not OR
unknown).
external_ids (an external ID object): Known external IDs for the
track.
external_urls (an external URL object): Known external URLs for
this track.
href (str): A link to the Web API endpoint providing full
details of the track.
id (str): The Spotify ID for the track.
is_playable (bool): Part of the response when Track Relinking is
applied. If ``true`` , the track is playable in the given
market. Otherwise ``false``.
linked_from (a linked track object): Part of the response when
Track Relinking is applied, and the requested track has been
replaced with different track. The track in the
``linked_from`` object contains information about the
originally requested track.
restrictions (a restrictions object): Part of the response when
Track Relinking is applied, the original track is not available
in the given market, and Spotify did not have any tracks to
relink it with. The track response will still contain metadata
for the original track, and a restrictions object containing
the reason why the track is not available: ``"restrictions" :
{"reason" : "market"}``
name (str): The name of the track.
popularity (int): The popularity of the track. The value will
be between 0 and 100, with 100 being the most popular.The
popularity of a track is a value between 0 and 100, with 100
being the most popular. The popularity is calculated by
algorithm and is based, in the most part, on the total number
of plays the track has had and how recent those plays
are.Generally speaking, songs that are being played a lot now
will have a higher popularity than songs that were played a lot
in the past. Duplicate tracks (e.g. the same track from a
single and an album) are rated independently. Artist and album
popularity is derived mathematically from track popularity.
Note that the popularity value may lag actual popularity by a
few days: the value is not updated in real time.
preview_url (str): A link to a 30 second preview (MP3 format) of
the track. Can be ``null``
track_number (int): The number of the track. If an album has
several discs, the track number is the number on the specified
disc.
type (str): The object type: track.
uri (str): The Spotify URI for the track.
is_local (bool): Whether or not the track is from a local file.
"""
def __init__(self, track: dict):
self.album: Album = Album(object_handler(track, "album"))
self.artists: list = [
Artist(artist) for artist in object_handler(track, "artists")
]
self.available_markets: list = object_handler(track, "available_markets")
self.disc_number: int = object_handler(track, "disc_number")
self.duration: Time.Timestamp = Time.Timestamp(
track.get("duration_ms"), base="milliseconds"
)
self.explicit: bool = object_handler(track, "explicit")
self.external_ids: ExternalId = ExternalId(
object_handler(track, "external_ids")
)
self.external_urls: ExternalUrl = ExternalUrl(
object_handler(track, "external_urls")
)
self.href: str = object_handler(track, "href")
self.id: str = object_handler(track, "id")
self.is_local: bool = object_handler(track, "is_local")
self.name: str = object_handler(track, "name")
self.popularity: int = object_handler(track, "popularity")
self.preview_url: str = object_handler(track, "preview_url")
self.track_number: int = object_handler(track, "track_number")
self.type: str = object_handler(track, "type")
self.uri: str = object_handler(track, "uri")
def get_duration(self) -> Time.Timestamp:
return self.duration
def __eq__(self, other) -> bool:
"""Equality comparison to other objects.
Args:
other: Comparison object
Returns:
Boolean value indicating whether or not the attributes and their
associated values are equal between the two objects
"""
return vars(self) == vars(other)
def __getitem__(self, item: str):
"""Getter method for subscriptability.
Args:
item: Attribute to get the value of
Returns:
Attribute value if exists in object's namespace
"""
return getattr(self, item)
def get(self, item: str, default=None):
"""Method for extracting attributes without throwing existence errors.
Args:
item: Attribute to get the value of
default: Return value if attribute doesn't exist
Returns:
Attribute value or default if attribute does not exist
"""
return vars(self).get(item, default)
def to_dict(self) -> dict:
"""Calling utility serialization method on all attributes.
Returns:
String following valid json structure for mongo serialization.
"""
return {k: get_serializable(v) for k, v in vars(self).items()}
@property
def json(self) -> str:
"""Jsonified/string attribute for all SpotiBot objects for mongo
serialization purposes
Returns:
Serializable 'json' output of SpotiBot object
"""
return json.dumps(self.to_dict())
def add_to_playlist(self, playlist_href: str, headers: dict):
return requests.post(
playlist_href, data=json.dumps({"uris": [self.uri]}), headers=headers
)
|
#!/usr/bin/env python3
with open('input.txt', 'r') as f:
data = f.readline().strip()
#data = '{}' # 1 characters.
#data = '{{{}}}' # 6 characters.
#data = '{{},{}}' # 5 characters.
#data = '{{{},{},{{}}}}' # 16 characters.
#data = '{<a>,<a>,<a>,<a>}' # 1 characters.
#data = '{{<ab>},{<ab>},{<ab>},{<ab>}}' # 9 characters.
#data = '{{<!!>},{<!!>},{<!!>},{<!!>}}' # 9 characters.
#data = '{{<a!>},{<a!>},{<a!>},{<ab>}}' # 3 characters.
res = 0
lvl = 0
garbage = False
escape = False
for c in data:
if garbage:
if escape:
escape = False
elif c == '!':
escape = True
elif c == '>':
garbage = False
else:
if c == '{':
lvl += 1
res += lvl
elif c == '}':
lvl -= 1
elif c == '<':
garbage = True
print(res)
|
a = set(range(1, 10001))
i = 1
while i <= 10000:
generated_num = sum([int(c) for c in str(i)]) + i
if generated_num in a:
a.remove(generated_num)
i += 1
for i in a:
print(i)
|
# This file is part of beets.
# Copyright 2016, Pedro Silva.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""List duplicate tracks or albums.
"""
import shlex
import os
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, Subcommand, UserError
from beets.util import command_output, displayable_path, subprocess, \
bytestring_path, MoveOperation
from beets.library import Item, Album
PLUGIN = 'duplicates'
class DuplicatesPlugin(BeetsPlugin):
"""List duplicate tracks or albums
"""
def __init__(self):
super().__init__()
self.config.add({
'album': False,
'checksum': '',
'copy': '',
'count': False,
'delete': False,
'format': '',
'full': False,
'keys': [],
'merge': False,
'move': '',
'path': False,
'tiebreak': {},
'strict': False,
'tag': '',
})
self._command = Subcommand('duplicates',
help=__doc__,
aliases=['dup'])
self._command.parser.add_option(
'-c', '--count', dest='count',
action='store_true',
help='show duplicate counts',
)
self._command.parser.add_option(
'-C', '--checksum', dest='checksum',
action='store', metavar='PROG',
help='report duplicates based on arbitrary command',
)
self._command.parser.add_option(
'-d', '--delete', dest='delete',
action='store_true',
help='delete items from library and disk',
)
self._command.parser.add_option(
'-F', '--full', dest='full',
action='store_true',
help='show all versions of duplicate tracks or albums',
)
self._command.parser.add_option(
'-s', '--strict', dest='strict',
action='store_true',
help='report duplicates only if all attributes are set',
)
self._command.parser.add_option(
'-k', '--key', dest='keys',
action='append', metavar='KEY',
help='report duplicates based on keys (use multiple times)',
)
self._command.parser.add_option(
'-M', '--merge', dest='merge',
action='store_true',
help='merge duplicate items',
)
self._command.parser.add_option(
'-m', '--move', dest='move',
action='store', metavar='DEST',
help='move items to dest',
)
self._command.parser.add_option(
'-o', '--copy', dest='copy',
action='store', metavar='DEST',
help='copy items to dest',
)
self._command.parser.add_option(
'-t', '--tag', dest='tag',
action='store',
help='tag matched items with \'k=v\' attribute',
)
self._command.parser.add_all_common_options()
def commands(self):
def _dup(lib, opts, args):
self.config.set_args(opts)
album = self.config['album'].get(bool)
checksum = self.config['checksum'].get(str)
copy = bytestring_path(self.config['copy'].as_str())
count = self.config['count'].get(bool)
delete = self.config['delete'].get(bool)
fmt = self.config['format'].get(str)
full = self.config['full'].get(bool)
keys = self.config['keys'].as_str_seq()
merge = self.config['merge'].get(bool)
move = bytestring_path(self.config['move'].as_str())
path = self.config['path'].get(bool)
tiebreak = self.config['tiebreak'].get(dict)
strict = self.config['strict'].get(bool)
tag = self.config['tag'].get(str)
if album:
if not keys:
keys = ['mb_albumid']
items = lib.albums(decargs(args))
else:
if not keys:
keys = ['mb_trackid', 'mb_albumid']
items = lib.items(decargs(args))
# If there's nothing to do, return early. The code below assumes
# `items` to be non-empty.
if not items:
return
if path:
fmt = '$path'
# Default format string for count mode.
if count and not fmt:
if album:
fmt = '$albumartist - $album'
else:
fmt = '$albumartist - $album - $title'
fmt += ': {0}'
if checksum:
for i in items:
k, _ = self._checksum(i, checksum)
keys = [k]
for obj_id, obj_count, objs in self._duplicates(items,
keys=keys,
full=full,
strict=strict,
tiebreak=tiebreak,
merge=merge):
if obj_id: # Skip empty IDs.
for o in objs:
self._process_item(o,
copy=copy,
move=move,
delete=delete,
tag=tag,
fmt=fmt.format(obj_count))
self._command.func = _dup
return [self._command]
def _process_item(self, item, copy=False, move=False, delete=False,
tag=False, fmt=''):
"""Process Item `item`.
"""
print_(format(item, fmt))
if copy:
item.move(basedir=copy, operation=MoveOperation.COPY)
item.store()
if move:
item.move(basedir=move)
item.store()
if delete:
item.remove(delete=True)
if tag:
try:
k, v = tag.split('=')
except Exception:
raise UserError(
f"{PLUGIN}: can't parse k=v tag: {tag}"
)
setattr(item, k, v)
item.store()
def _checksum(self, item, prog):
"""Run external `prog` on file path associated with `item`, cache
output as flexattr on a key that is the name of the program, and
return the key, checksum tuple.
"""
args = [p.format(file=os.fsdecode(item.path))
for p in shlex.split(prog)]
key = args[0]
checksum = getattr(item, key, False)
if not checksum:
self._log.debug('key {0} on item {1} not cached:'
'computing checksum',
key, displayable_path(item.path))
try:
checksum = command_output(args).stdout
setattr(item, key, checksum)
item.store()
self._log.debug('computed checksum for {0} using {1}',
item.title, key)
except subprocess.CalledProcessError as e:
self._log.debug('failed to checksum {0}: {1}',
displayable_path(item.path), e)
else:
self._log.debug('key {0} on item {1} cached:'
'not computing checksum',
key, displayable_path(item.path))
return key, checksum
def _group_by(self, objs, keys, strict):
"""Return a dictionary with keys arbitrary concatenations of attributes
and values lists of objects (Albums or Items) with those keys.
If strict, all attributes must be defined for a duplicate match.
"""
import collections
counts = collections.defaultdict(list)
for obj in objs:
values = [getattr(obj, k, None) for k in keys]
values = [v for v in values if v not in (None, '')]
if strict and len(values) < len(keys):
self._log.debug('some keys {0} on item {1} are null or empty:'
' skipping',
keys, displayable_path(obj.path))
elif (not strict and not len(values)):
self._log.debug('all keys {0} on item {1} are null or empty:'
' skipping',
keys, displayable_path(obj.path))
else:
key = tuple(values)
counts[key].append(obj)
return counts
def _order(self, objs, tiebreak=None):
"""Return the objects (Items or Albums) sorted by descending
order of priority.
If provided, the `tiebreak` dict indicates the field to use to
prioritize the objects. Otherwise, Items are placed in order of
"completeness" (objects with more non-null fields come first)
and Albums are ordered by their track count.
"""
kind = 'items' if all(isinstance(o, Item) for o in objs) else 'albums'
if tiebreak and kind in tiebreak.keys():
key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind])
else:
if kind == 'items':
def truthy(v):
# Avoid a Unicode warning by avoiding comparison
# between a bytes object and the empty Unicode
# string ''.
return v is not None and \
(v != '' if isinstance(v, str) else True)
fields = Item.all_keys()
key = lambda x: sum(1 for f in fields if truthy(getattr(x, f)))
else:
key = lambda x: len(x.items())
return sorted(objs, key=key, reverse=True)
def _merge_items(self, objs):
"""Merge Item objs by copying missing fields from items in the tail to
the head item.
Return same number of items, with the head item modified.
"""
fields = Item.all_keys()
for f in fields:
for o in objs[1:]:
if getattr(objs[0], f, None) in (None, ''):
value = getattr(o, f, None)
if value:
self._log.debug('key {0} on item {1} is null '
'or empty: setting from item {2}',
f, displayable_path(objs[0].path),
displayable_path(o.path))
setattr(objs[0], f, value)
objs[0].store()
break
return objs
def _merge_albums(self, objs):
"""Merge Album objs by copying missing items from albums in the tail
to the head album.
Return same number of albums, with the head album modified."""
ids = [i.mb_trackid for i in objs[0].items()]
for o in objs[1:]:
for i in o.items():
if i.mb_trackid not in ids:
missing = Item.from_path(i.path)
missing.album_id = objs[0].id
missing.add(i._db)
self._log.debug('item {0} missing from album {1}:'
' merging from {2} into {3}',
missing,
objs[0],
displayable_path(o.path),
displayable_path(missing.destination()))
missing.move(operation=MoveOperation.COPY)
return objs
def _merge(self, objs):
"""Merge duplicate items. See ``_merge_items`` and ``_merge_albums``
for the relevant strategies.
"""
kind = Item if all(isinstance(o, Item) for o in objs) else Album
if kind is Item:
objs = self._merge_items(objs)
else:
objs = self._merge_albums(objs)
return objs
def _duplicates(self, objs, keys, full, strict, tiebreak, merge):
"""Generate triples of keys, duplicate counts, and constituent objects.
"""
offset = 0 if full else 1
for k, objs in self._group_by(objs, keys, strict).items():
if len(objs) > 1:
objs = self._order(objs, tiebreak)
if merge:
objs = self._merge(objs)
yield (k, len(objs) - offset, objs[offset:])
|
# -*- coding: utf-8 -*-
from flask import current_app
from ..extensions import db
from .models import *
|
from io import BytesIO
# noinspection PyPackageRequirements
import zopfli
from django.contrib.staticfiles.storage import ManifestStaticFilesStorage
from django.contrib.staticfiles.utils import matches_patterns
from django.core.files.base import File
class GzipMixin:
"""
Brings the Gzip-ability if mixed with a storage. Uses Zopfli for
compression.
"""
gzip_patterns = ("*.css", "*.js", "*.svg", "*.ttf")
def _compress(self, original_file):
c = zopfli.ZopfliCompressor(zopfli.ZOPFLI_FORMAT_GZIP)
z = c.compress(original_file.read()) + c.flush()
return File(BytesIO(z))
def post_process(self, paths, dry_run=False, **options):
super_class = super()
if hasattr(super_class, "post_process"):
for name, hashed_name, processed in super_class.post_process(
paths.copy(), dry_run, **options
):
if hashed_name != name:
paths[hashed_name] = (self, hashed_name)
yield name, hashed_name, processed
if dry_run:
return
for path in paths:
if path:
if not matches_patterns(path, self.gzip_patterns):
continue
original_file = self.open(path, mode="rb")
gzipped_path = "{0}.gz".format(path)
if self.exists(gzipped_path):
self.delete(gzipped_path)
gzipped_file = self._compress(original_file)
gzipped_path = self.save(gzipped_path, gzipped_file)
yield gzipped_path, gzipped_path, True
class GzipManifestStaticFilesStorage(GzipMixin, ManifestStaticFilesStorage):
"""
Almost like the regular ManifestStaticFilesStorage, except it will create
.gz files for all the text assets.
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.