text stringlengths 8 6.05M |
|---|
from django.shortcuts import render, get_object_or_404
from rest_framework import generics
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from .models import Note, Episode
from .serializers import NoteSerializer, EpisodeSerializer
# Create your views here.
class NoteListCreateApiView(generics.ListCreateAPIView):
# queryset = Note.objects.all().order_by('timestamp')
serializer_class = NoteSerializer
permission_classes = [IsAuthenticatedOrReadOnly,]
def get_queryset(self):
# import pdb; pdb.set_trace()
return Note.objects.filter(episode__show_id=self.kwargs['episode'])
def perform_create(self, serializer):
# import pdb; pdb.set_trace()
episode = get_object_or_404(Episode, show_id=self.kwargs['episode'])
serializer.save(episode=episode)
class NoteDetailApiView(generics.RetrieveUpdateDestroyAPIView):
queryset = Note.objects.all()
serializer_class = NoteSerializer
permission_classes = [IsAuthenticatedOrReadOnly,]
class EpisodeListCreateApiView(generics.ListCreateAPIView):
queryset = Episode.objects.all()
serializer_class = EpisodeSerializer
permission_classes = [IsAuthenticatedOrReadOnly,]
|
import pandas as pd
Data={}
Data_IndexCol={"employees":['emp_no']}
Data_Names={"employees":['emp_no','birth_date','first_name','last_name','gender','hire_date','ids']}
employees=pd.read_csv("./files/employees.csv",
names=Data_Names['employees'],
index_col=Data_IndexCol['employees'], header=None,dtype={"ids":str},parse_dates=['hire_date'],
date_parser=lambda x: pd.datetime.strptime(x, '%Y-%m-%d')
)
employees['hire_date_year']=employees.apply(lambda x:x['hire_date'].strftime('%Y'),axis=1)
Data["employees"]=employees |
__author__ = "Narwhale"
import socket
server = socket.socket()
server.bind(('localhost',6565))
server.listen() #监听
conn,addr = server.accept() #等待
data = conn.recv(1024)
print('recv:',data)
conn.send(data.upper())
server.close()
|
"""Tools specific to template rendering."""
def domain_renderer(domain):
"""Template helper to add IDNA values beside Unicode domains."""
idna_domain = domain.encode('idna')
if idna_domain == domain:
return domain
return domain + ' ({})'.format(idna_domain)
|
ano_nasc = int(input('ano de nascimento: '))
geracao = str('Qual a sua geraçao?: ' )
# BBoomer = nascido até 1964
# Geracao X = 1964 até 1981
#Geracao y = 1981 até 1996
# Geraçao z = depois de 1996
# Calculo
if (ano_nasc <= 1964):
print('Vocé é da geracao BBoomer')
elif (ano_nasc > 1964 and ano_nasc < 1981):
print('Vocé é da geracao X')
elif (ano_nasc > 1981 and ano_nasc <1996):
print('Vocé é da geracao Y')
else:
print('Vocé é da geracao Z')
|
"""Plot uniform time-series of one variable."""
# author: Christian Brodbeck
from __future__ import division
from itertools import izip
import operator
from warnings import warn
import matplotlib as mpl
import numpy as np
from .._data_obj import ascategorial, asndvar, assub, cellname, Celltable
from .._stats import stats
from . import _base
from ._base import _EelFigure, LegendMixin
from ._colors import colors_for_oneway, find_cell_colors
class UTSStat(_EelFigure, LegendMixin):
"""
Plot statistics for a one-dimensional NDVar
Parameters
----------
Y : 1d-NDVar
Dependent variable (one-dimensional NDVar).
X : categorial or None
Model: specification of conditions which should be plotted separately.
Xax : None | categorial
Make separate axes for each category in this categorial model.
match : Factor
Identifier for repeated measures data.
sub : None | index array
Only use a subset of the data provided.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
main : func | None
Measure for the central tendency (function that takes an ``axis``
argument). The default is numpy.mean.
error : None | str
Measure of variability to plot (default: 1 SEM). Examples:
'ci': 95% confidence interval;
'99%ci': 99% confidence interval (default);
'2sem': 2 standard error of the mean;
'all': plot all traces.
pool_error : bool
Pool the errors for the estimate of variability (default is True
for related measures designs, False otherwise). See Loftus & Masson
(1994).
legend : str | int | 'fig' | None
Matplotlib figure legend location argument or 'fig' to plot the
legend in a separate figure.
axtitle : str | None
Axes title, '{name}' is formatted to the category name. When plotting
only one axes, use the `title` argument.
xlabel : str | None
X-axis labels. By default the label is inferred from the data.
ylabel : str | None
Y-axis labels. By default the label is inferred from the data.
xticklabels : bool
Add tick-labels to the x-axis (default True).
invy : bool
Invert the y axis (if ``bottom`` and/or ``top`` are specified explicitly
they take precedence; an inverted y-axis can also be produced by
specifying ``bottom > top``).
bottom, top | None | scalar
Set an absolute range for the plot's y axis.
hline : None | scalar | (value, kwarg-dict) tuple
Add a horizontal line to each plot. If provided as a tuple, the second
element can include any keyword arguments that should be submitted to
the call to matplotlib axhline call.
xdim : str
dimension for the x-axis (default is 'time')
xlim : None | (scalar, scalar)
Tuple of xmin and xmax to set the initial x-axis limits.
color : matplotlib color
Color if just a single category of data is plotted.
colors : str | list | dict
Colors for the plots if multiple categories of data are plotted.
**str**: A colormap name; Cells of X are mapped onto the colormap in
regular intervals.
**list**: A list of colors in the same sequence as X.cells.
**dict**: A dictionary mapping each cell in X to a color.
Colors are specified as `matplotlib compatible color arguments
<http://matplotlib.org/api/colors_api.html>`_.
clusters : None | Dataset
Clusters to add to the plots. The clusters should be provided as
Dataset, as stored in test results' :py:attr:`.clusters`.
pmax : scalar
Maximum p-value of clusters to plot as solid.
ptrend : scalar
Maximum p-value of clusters to plot as trend.
tight : bool
Use matplotlib's tight_layout to expand all axes to fill the figure
(default True)
title : str | None
Figure title.
frame : bool | 't'
How to frame the plots.
``True`` (default): normal matplotlib frame;
``False``: omit top and right lines;
``'t'``: draw spines at x=0 and y=0, common for ERPs.
"""
def __init__(self, Y='Y', X=None, Xax=None, match=None, sub=None, ds=None,
main=np.mean, error='sem', pool_error=None, legend='upper right',
axtitle='{name}', xlabel=True, ylabel=True, xticklabels=True,
invy=False, bottom=None, top=None, hline=None, xdim='time',
xlim=None, color='b', colors=None, clusters=None, pmax=0.05,
ptrend=0.1, *args, **kwargs):
if 'dev' in kwargs:
error = kwargs.pop('dev')
warn("The 'dev' keyword is deprecated, use 'error'",
DeprecationWarning)
# coerce input variables
sub = assub(sub, ds)
Y = asndvar(Y, sub, ds)
if X is not None:
X = ascategorial(X, sub, ds)
if Xax is not None:
Xax = ascategorial(Xax, sub, ds)
if match is not None:
match = ascategorial(match, sub, ds)
if pool_error or (pool_error is None and match is not None):
all_x = [i for i in (Xax, X) if i is not None]
if len(all_x) > 0:
full_x = reduce(operator.mod, all_x)
ct = Celltable(Y, full_x, match)
dev_data = stats.variability(ct.Y.x, ct.X, ct.match, error, True)
error = 'data'
else:
dev_data = None
else:
dev_data = None
if Xax is None:
nax = 1
ct = Celltable(Y, X, match)
if X is None:
color_x = None
else:
color_x = ct.X
else:
ct = Celltable(Y, Xax)
if X is None:
color_x = None
X_ = None
else:
Xct = Celltable(X, Xax)
color_x = Xct.Y
if match is not None:
matchct = Celltable(match, Xax)
nax = len(ct.cells)
# assemble colors
if color_x is None:
colors = {None: color}
else:
colors = find_cell_colors(color_x, colors)
frame_title = _base.frame_title("UTSStat", Y, X, Xax)
_EelFigure.__init__(self, frame_title, nax, 4, 2, *args, **kwargs)
# create plots
self._plots = []
legend_handles = {}
if Xax is None:
p = _ax_uts_stat(self._axes[0], ct, colors, main, error, dev_data,
None, xdim, xlim, invy, bottom, top, hline,
clusters, pmax, ptrend)
self._plots.append(p)
legend_handles.update(p.legend_handles)
if len(ct) < 2:
legend = False
else:
for i, ax, cell in zip(xrange(nax), self._axes, ct.cells):
if X is not None:
X_ = Xct.data[cell]
if match is not None:
match = matchct.data[cell]
ct_ = Celltable(ct.data[cell], X_, match=match, coercion=asndvar)
title_ = axtitle.format(name=cellname(cell))
p = _ax_uts_stat(ax, ct_, colors, main, error, dev_data, title_,
xdim, xlim, invy, bottom, top, hline, clusters,
pmax, ptrend)
self._plots.append(p)
legend_handles.update(p.legend_handles)
self._configure_yaxis(ct.Y, ylabel)
self._configure_xaxis_dim(xdim, xlabel, xticklabels)
LegendMixin.__init__(self, legend, legend_handles)
self._update_ui_cluster_button()
self._show()
def _fill_toolbar(self, tb):
import wx
btn = self._cluster_btn = wx.Button(tb, wx.ID_ABOUT, "Clusters")
btn.Enable(False)
tb.AddControl(btn)
btn.Bind(wx.EVT_BUTTON, self._OnShowClusterInfo)
LegendMixin._fill_toolbar(self, tb)
def _OnShowClusterInfo(self, event):
from .._wxutils import show_text_dialog
if len(self._plots) == 1:
clusters = self._plots[0].cluster_plt.clusters
all_plots_same = True
else:
all_clusters = [p.cluster_plt.clusters is None for p in self._plots]
clusters = all_clusters[0]
if all(c is clusters for c in all_clusters[1:]):
all_plots_same = True
else:
all_plots_same = False
if all_plots_same:
info = str(clusters)
else:
info = []
for i, clusters in enumerate(all_clusters):
if clusters is None:
continue
title = "Axes %i" % i
info.append(title)
info.append('\n')
info.append('-' * len(title))
info.append(str(clusters))
info = '\n'.join(info)
show_text_dialog(self._frame, info, "Clusters")
def _update_ui_cluster_button(self):
if hasattr(self, '_cluster_btn'):
enable = not all(p.cluster_plt.clusters is None for p in self._plots)
self._cluster_btn.Enable(enable)
def add_vspan(self, xmin, xmax, axes=None, *args, **kwargs):
"""Draw a vertical bar on all axes
Parameters
----------
xmin : scalar
Start value on the x-axis.
xmax : scalar
Last value on the x-axis.
axes : int | list of int
Which axes to mark (default is all axes).
...
Notes
-----
See Matplotlib's :meth:`matplotlib.axes.Axes.axvspan` for more
arguments.
"""
if axes is None:
axes = self._axes
elif isinstance(axes, int):
axes = (self._axes[axes],)
else:
axes = [self._axes[i] for i in axes]
for ax in axes:
ax.axvspan(xmin, xmax, *args, **kwargs)
def set_clusters(self, clusters, pmax=0.05, ptrend=0.1, color='.7', ax=None):
"""Add clusters from a cluster test to the plot (as shaded area).
Parameters
----------
clusters : None | Dataset
The clusters, as stored in test results' :py:attr:`.clusters`.
Use ``None`` to remove the clusters plotted on a given axis.
pmax : scalar
Maximum p-value of clusters to plot as solid.
ptrend : scalar
Maximum p-value of clusters to plot as trend.
color : matplotlib color
Color for the clusters.
ax : None | int
Index of the axes to which the clusters are to be added. If None,
add the clusters to all axes.
"""
nax = len(self._axes)
if ax is None:
axes = xrange(nax)
else:
axes = [ax]
# update plots
for ax in axes:
p = self._plots[ax].cluster_plt
p.set_clusters(clusters, False)
p.set_color(color, False)
p.set_pmax(pmax, ptrend)
self.draw()
self._update_ui_cluster_button()
def set_xlim(self, xmin, xmax):
"Adjust the x-axis limits on all axes"
for ax in self._axes:
ax.set_xlim(xmin, xmax)
self.draw()
def set_ylim(self, bottom=None, top=None):
"Adjust the y-axis limits on all axes"
for ax in self._axes:
ax.set_ylim(bottom, top)
self.draw()
class UTS(_EelFigure):
"""Value by time plot for UTS data
Parameters
----------
epochs : epochs
Uts data epochs to plot.
Xax : None | categorial
Make separate axes for each category in this categorial model.
axtitle : None | str
Axes title. '{name}' is formatted to the category name.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
xlabel, ylabel : str | None
X- and y axis labels. By default the labels will be inferred from
the data.
tight : bool
Use matplotlib's tight_layout to expand all axes to fill the figure
(default True)
title : None | str
Figure title.
"""
def __init__(self, epochs, Xax=None, axtitle='{name}', ds=None,
xlabel=True, ylabel=True, *args, **kwargs):
epochs, _ = _base.unpack_epochs_arg(epochs, 1, Xax, ds)
_EelFigure.__init__(self, "UTS", len(epochs), 2, 1.5, *args, **kwargs)
self._configure_yaxis(epochs[0][0], ylabel)
for ax, epoch in izip(self._axes, epochs):
_ax_uts(ax, epoch, axtitle, xlabel)
self.epochs = epochs
self._show()
class _ax_uts_stat(object):
def __init__(self, ax, ct, colors, main, error, dev_data, title, xdim, xlim,
invy, bottom, top, hline, clusters, pmax, ptrend):
# stat plots
self.stat_plots = []
self.legend_handles = {}
x = ct.Y.get_dim(xdim)
for cell in ct.cells:
ndvar = ct.data[cell]
y = ndvar.get_data(('case', xdim))
plt = _plt_uts_stat(ax, x, y, main, error, dev_data, colors[cell],
cellname(cell))
self.stat_plots.append(plt)
if plt.main is not None:
self.legend_handles[cell] = plt.main[0]
# hline
if hline is not None:
if isinstance(hline, tuple):
if len(hline) != 2:
raise ValueError("hline must be None, scalar or length 2 tuple")
hline, hline_kw = hline
hline_kw = dict(hline_kw)
else:
hline_kw = {'color': 'k'}
hline = float(hline)
ax.axhline(hline, **hline_kw)
# cluster plot
self.cluster_plt = _plt_uts_clusters(ax, clusters, pmax, ptrend)
# title
if title:
if title is True:
title = ct.Y.name
ax.set_title(title)
# format x axis
if xlim is None:
ax.set_xlim(x[0], x[-1])
else:
xmin, xmax = xlim
ax.set_xlim(xmin, xmax)
if invy:
y0, y1 = ax.get_ylim()
if bottom is None:
bottom = y1
if top is None:
top = y0
if (bottom is not None) or (top is not None):
ax.set_ylim(bottom, top)
# store attributes
self.ax = ax
self.title = title
class UTSClusters(_EelFigure):
"""Plot permutation cluster test results
Parameters
----------
res : testnd.anova
ANOVA with permutation cluster test result object.
pmax : scalar
Maximum p-value of clusters to plot as solid.
ptrend : scalar
Maximum p-value of clusters to plot as trend.
axtitle : None | str
Axes title pattern. '{name}' is formatted to the effect name.
cm : str
Colormap to use for coloring different effects.
overlay : bool
Plot epochs (time course for different effects) on top of each
other (as opposed to on separate axes).
xticklabels : bool
Add tick-labels to the x-axis (default True).
tight : bool
Use matplotlib's tight_layout to expand all axes to fill the figure
(default True)
title : str
Figure title.
"""
def __init__(self, res, pmax=0.05, ptrend=0.1, axtitle='{name}', cm='jet',
overlay=False, xticklabels=True, *args, **kwargs):
clusters_ = res.clusters
epochs, (xdim,) = _base.unpack_epochs_arg(res, 1)
# create figure
n = len(epochs)
nax = 1 if overlay else n
_EelFigure.__init__(self, "UTSClusters", nax, 4, 2, *args, **kwargs)
colors = colors_for_oneway(range(n), cm)
self._caxes = []
if overlay:
ax = self._axes[0]
axtitle = None
for i, layers in enumerate(epochs):
stat = layers[0]
if not overlay:
ax = self._axes[i]
# ax clusters
if clusters_:
if 'effect' in clusters_:
cs = clusters_.sub('effect == %r' % stat.name)
else:
cs = clusters_
else:
cs = None
cax = _ax_uts_clusters(ax, stat, cs, colors[i], pmax, ptrend, xdim,
axtitle)
self._caxes.append(cax)
self._configure_yaxis(epochs[0][0], True)
self._configure_xaxis_dim(xdim, True, xticklabels)
self.clusters = clusters_
self._show()
def _fill_toolbar(self, tb):
import wx
btn = wx.Button(tb, wx.ID_ABOUT, "Clusters")
tb.AddControl(btn)
btn.Bind(wx.EVT_BUTTON, self._OnShowClusterInfo)
def _OnShowClusterInfo(self, event):
from .._wxutils import show_text_dialog
info = str(self.clusters)
show_text_dialog(self._frame, info, "Clusters")
def set_pmax(self, pmax=0.05, ptrend=0.1):
"set the threshold p-value for clusters to be displayed"
for cax in self._caxes:
cax.set_pmax(pmax, ptrend)
self.draw()
def _ax_uts(ax, layers, title, bottom=None, top=None, invy=False, color=None,
xdim='time'):
overlay = False
for l in layers:
args = _base.find_uts_args(l, overlay, color)
overlay = True
if args is None:
continue
_plt_uts(ax, l, xdim=xdim, **args)
contours = l.info.get('contours', None)
if contours:
for v, color in contours.iteritems():
if v in contours:
continue
contours[v] = ax.axhline(v, color=color)
l0 = layers[0]
x = l0.get_dim(xdim)
ax.set_xlim(x[0], x[-1])
if title:
if 'name' in title:
title = title.format(name=l0.name)
ax.set_title(title)
if invy:
y0, y1 = ax.get_ylim()
bottom = bottom if (bottom is not None) else y1
top = top if (top is not None) else y0
if (bottom is not None) or (top is not None):
ax.set_ylim(bottom, top)
def _plt_uts(ax, ndvar, color=None, xdim='time', kwargs={}):
y = ndvar.get_data((xdim,))
x = ndvar.get_dim(xdim).x
if color is not None:
kwargs['color'] = color
ax.plot(x, y, **kwargs)
for y, kwa in _base.find_uts_hlines(ndvar):
if color is not None:
kwa['color'] = color
ax.axhline(y, **kwa)
class _ax_uts_clusters:
def __init__(self, ax, Y, clusters, color=None, pmax=0.05, ptrend=0.1,
xdim='time', title=None):
uts_args = _base.find_uts_args(Y, False, color)
self._bottom, self._top = _base.find_vlim_args(Y)
if title:
if '{name}' in title:
title = title.format(name=Y.name)
ax.set_title(title)
_plt_uts(ax, Y, xdim=xdim, **uts_args)
if np.any(Y.x < 0) and np.any(Y.x > 0):
ax.axhline(0, color='k')
# pmap
self.cluster_plt = _plt_uts_clusters(ax, clusters, pmax, ptrend, color)
# save ax attr
self.ax = ax
x = Y.get_dim(xdim).x
self.xlim = (x[0], x[-1])
ax.set_xlim(*self.xlim)
ax.set_ylim(bottom=self._bottom, top=self._top)
def set_clusters(self, clusters):
self.cluster_plt.set_clusters(clusters)
def set_pmax(self, pmax=0.05, ptrend=0.1):
self.cluster_plt.set_pmax(pmax, ptrend)
class _plt_uts_clusters:
def __init__(self, ax, clusters, pmax, ptrend, color=None, hatch='/'):
"""
clusters : Dataset
Dataset with entries for 'tstart', 'tstop' and 'p'.
"""
self.pmax = pmax
self.ptrend = ptrend
self.h = []
self.ax = ax
self.clusters = clusters
self.color = color
self.hatch = hatch
self.update()
def set_clusters(self, clusters, update=True):
self.clusters = clusters
if update:
self.update()
def set_color(self, color, update=True):
self.color = color
if update:
self.update()
def set_pmax(self, pmax, ptrend, update=True):
self.pmax = pmax
self.ptrend = ptrend
if update:
self.update()
def update(self):
h = self.h
while len(h):
h.pop().remove()
clusters = self.clusters
if clusters is None:
return
p_include = self.ptrend or self.pmax
for cluster in clusters.itercases():
if 'p' in cluster:
p = cluster['p']
if p > p_include:
continue
alpha = 0.5 if p < self.pmax else 0.2
else:
alpha = 0.5
x0 = cluster['tstart']
x1 = cluster['tstop']
h = self.ax.axvspan(x0, x1, color=self.color, # , hatch=self.hatch,
fill=True, alpha=alpha, zorder=-1)
self.h.append(h)
class _plt_uts_stat(object):
def __init__(self, ax, x, y, main, error, dev_data, color, label):
# plot main
if hasattr(main, '__call__'):
y_main = main(y, axis=0)
lw = mpl.rcParams['lines.linewidth']
if error == 'all':
lw *= 2
self.main = ax.plot(x, y_main, color=color, label=label, lw=lw,
zorder=5)
elif error == 'all':
self.main = None
else:
raise ValueError("Invalid argument: main=%r" % main)
# plot error
if error == 'all':
self.error = ax.plot(x, y.T, color=color, alpha=0.3)
elif error:
if error == 'data':
pass
elif hasattr(error, '__call__'):
dev_data = error(y, axis=0)
else:
dev_data = stats.variability(y, None, None, error, False)
lower = y_main - dev_data
upper = y_main + dev_data
self.error = ax.fill_between(x, lower, upper, color=color, alpha=0.3,
linewidth=0, zorder=0)
else:
self.error = None
|
import json, sys, codecs, copy
# 2017-07-24
# A script that takes a json file and outputs a modified json file
# (changes the structure and replaces null values with proper strings)
# sys.argv[0] -- name of the python script
# sys.argv[1] -- arg1
# sys.argv[2] -- arg2
inputFile = sys.argv[1]
outputFile = sys.argv[2]
# read json file
with codecs.open(inputFile, 'r', encoding='utf8') as infile:
data=infile.read()
# load json into a dict
dump = json.loads(data)
voucherDict = dump["vourcherList"] # Note: vou_R_cher (typo)
# create a modified json string
outJsonString = '{"vouchers":['
for voucher in voucherDict:
lineList = voucher["voucherLines"]
for transactionDict in lineList:
newVoucher = copy.deepcopy( {**transactionDict, **voucher} )
del newVoucher["voucherLines"]
json_string = json.dumps(newVoucher)
outJsonString += json_string
outJsonString += ","
outJsonString += " {}]}"
outJsonString = outJsonString.replace(' null',' "thenullval"') #replace null values
# create a new dict
outJsonDict = json.loads(outJsonString)
# write json file
with codecs.open(outputFile, 'w', encoding='utf8') as outfile:
data = json.dumps(outJsonDict, ensure_ascii=False)
outfile.write(data)
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import NullFormatter
import warnings
warnings.simplefilter('ignore', np.RankWarning)
def vis_accuracy(X, Y, title1='', title2='', xlab='', ylab=''):
'''
Arg
X = a list of tuple where a tuple = (X_values, accuracy)
Y = a list of tuple where a tuple = (y_values, accuracy)
'''
# data for the scatter plot
x = list(map(lambda x: x[0], X))
y = list(map(lambda x: x[0], Y))
# data for the best fit plot
x2, acc_x = list(zip(*sorted(X, key=lambda x: x[0])))
y2, acc_y = list(zip(*sorted(Y, key=lambda x: x[0])))
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_linex = [left, bottom_h, width, 0.2]
rect_liney = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(1, figsize=(8, 8))
axScatter = plt.axes(rect_scatter)
axLinex = plt.axes(rect_linex)#, sharey=axScatter)
axLiney = plt.axes(rect_liney)#, sharex=axScatter)
# no labels
axLinex.xaxis.set_major_formatter(nullfmt)
axLiney.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.scatter(x, y)
axScatter.grid(b=True, which='major', color='gray', linestyle='--')
axScatter.set_xlabel(xlab)
axScatter.set_ylabel(ylab)
# for plotting the best fit accuracy curve
num_pts = 100; order = 3
coeffs_x = np.polyfit(x2, acc_x, order)
x3 = np.arange(num_pts+1)*(np.max(x2)-np.min(x2))/num_pts + np.min(x2)
fit_x = np.polyval(coeffs_x, x3)
coeffs_y = np.polyfit(y2, acc_y, order)
y3 = np.arange(num_pts+1)*(np.max(y2)-np.min(y2))/num_pts + np.min(y2)
fit_y = np.polyval(coeffs_y, y3)
# plot the curve and place dots on the curve
axLinex.plot(x3, fit_x)
axLinex.scatter(x2, acc_x)
axLinex.grid(b=True, which='major', color='gray', linestyle='--')
axLinex.set_title(title1)
axLiney.plot(fit_y, y3)
axLiney.scatter(acc_y, y2)
axLiney.grid(b=True, which='major', color='gray', linestyle='--')
axLiney.set_title(title2)
plt.show() |
"""
Vault as in fuzzy vault
:var self.vault_original_minutiae: list of representation of minutiae without chaff points
:var self.vault_chaff_points: list of representation of chaff points
"""
import random
from Polynomial_Generator import PolynomialGenerator
from Geometric_Hashing_Transformer import GHTransformer
from Constants import CHECK_CHAFF_POINT_MAPPING
class VaultElement:
""" Element of a (fuzzy) Vault """
def __init__(self, x_rep, y_rep):
"""
:param x_rep 1st element of vault element tuple: e.g. representation of minutia
:param y_rep 2nd element of vault element tuple: e.g. polynomial evaluated at minutia_rep """
self.x_rep = x_rep
self.y_rep = y_rep
def __str__(self):
return '({}, {})\n'.format(self.x_rep, self.y_rep)
def __repr__(self):
return '{}({}, {})'.format(
self.__class__.__name__, self.x_rep, self.y_rep
)
class Vault:
def __init__(self):
# list of vault elements (tuples)
self.vault_final_elements_pairs = []
# used in encoding and decoding. while decoding original_minutiae are candidate_minutiae
self.vault_original_minutiae_rep = []
# only used in encoding
self.vault_chaff_points_rep = []
# used for decoding vault. all points that correspond to original_minutiae_rep are stored here
self.vault_function_points_rep = []
# table for geometric hashing
self.geom_table = []
self.clear_vault()
def add_minutia_rep(self, minutia_rep):
""" Add minutia to vault
:param minutia_rep is a uint representation of a minutia """
self.vault_original_minutiae_rep.append(minutia_rep)
def add_chaff_point_rep(self, chaff_point_rep):
""" Add chaff point to vault
:param chaff_point_rep is a uint representation of a generated minutia """
self.vault_chaff_points_rep.append(chaff_point_rep)
def add_vault_element(self, vault_element: VaultElement):
self.vault_final_elements_pairs.append(vault_element)
def add_function_point_rep(self, function_point_rep):
""" Add function point that corresponds to a original_minutiae_rep to vault_function_points for decoding
:param function_point_rep is a uint representation of a polynomial mapping minutia """
self.vault_function_points_rep.append(function_point_rep)
def clear_vault(self):
""" Clear all lists in vault """
self.vault_final_elements_pairs.clear()
self.vault_original_minutiae_rep.clear()
self.vault_chaff_points_rep.clear()
self.vault_function_points_rep.clear()
self.geom_table.clear()
def evaluate_polynomial_on_minutiae(self, poly_generator: PolynomialGenerator, echo=False):
""" Evaluate polynomial on original minutiae in vault_minutiae and save to vault_elements_pairs
:param poly_generator: generator containing polynomial"""
for minutia_rep in self.vault_original_minutiae_rep:
vault_element = VaultElement(minutia_rep, poly_generator.evaluate_polynomial_gf_2(minutia_rep))
self.add_vault_element(vault_element)
if echo:
print("...", end="")
if echo:
print("\nFinish evaluating polynomial of vault elements")
def evaluate_random_on_chaff_points(self, poly_generator: PolynomialGenerator, m):
""" Generate random evaluation of chaff points for second element of VaultElement (X,Y)
Random points Y do not lie on polynomial(X) = Y
:param poly_generator: generator containing polynomial
:param m describes largest number exponential 2**m """
# gets vault elements from original minutiae to generate similar values
min_digits = 9
max_digits = 10
max_number = 2 ** m
if self.vault_final_elements_pairs:
digits_list = []
for element in self.vault_final_elements_pairs:
digits_list.append(len(str(element.y_rep)))
min_digits = int(min(digits_list))
max_digits = int(max(digits_list))
# generate random Y and check if Y = polynomial(X) where X = chaff_point
for chaff_point in self.vault_chaff_points_rep:
y_candidate = 0
# check for on_polynomial normally omitted due to performance reasons
if CHECK_CHAFF_POINT_MAPPING:
y_real = poly_generator.evaluate_polynomial_gf_2(chaff_point)
on_polynomial = True
else:
y_real = 0
on_polynomial = False
while on_polynomial or y_candidate > max_number or y_candidate == 0:
y_candidate = self.random_int_digits(min_digits, max_digits)
if y_real != y_candidate:
on_polynomial = False
self.add_vault_element(VaultElement(chaff_point, y_candidate))
def finalize_vault(self):
""" Delete vault_original_minutiae and vault_chaff_points, scramble vault_final_elements_pairs"""
self.vault_original_minutiae_rep.clear()
self.vault_chaff_points_rep.clear()
random.shuffle(self.vault_final_elements_pairs)
def random_int_digits(self, min_digits, max_digits):
""" Helper function to generate random integer between min_digits and max_digits
Example: :param min_digits = 3, max_digits = 5: random integer between 100 and 99999 """
range_start = 10 ** (min_digits - 1)
range_end = (10 ** max_digits) - 1
return random.randint(range_start, range_end)
def get_smallest_original_minutia(self):
""" Get smallest original minutia for better chaff points creation
:returns smallest original minutia in uint """
return min(self.vault_original_minutiae_rep)
def create_geom_table(self):
self.geom_table = GHTransformer.generate_enrollment_table(self.vault_final_elements_pairs)
|
import textdistance
res1 = textdistance.hamming('test', 'text')
print(res1)
|
import pytest
from django.contrib.auth.models import User
@pytest.fixture()
def create_user(db):
user = User.objects.create_user(
username='test',
email='test@email.com'
)
print('Creating User')
return user
@pytest.fixture()
def new_user_factory(db):
# inner function
def create_app_user(
username: str,
password: str = None,
first_name: str = "firstname",
last_name: str = "lastname",
email: str = "user@email.com",
is_staff: bool = False,
is_superuser: bool = False,
is_active: bool = True
):
user = User.objects.create(
username=username,
password=password,
first_name=first_name,
last_name=last_name,
email=email,
is_staff=is_staff,
is_superuser=is_superuser,
is_active=is_active
)
return user
return create_app_user
# include db as we need to access database
@pytest.fixture
def create_new_user(db, new_user_factory):
return new_user_factory(
'Test User',
'password',
'user_firstname',
is_staff=True
)
|
import asyncio
class AsyncFile:
def __init__(self, filename):
self.filename = filename
async def __aenter__(self):
self.file = await asyncio.to_thread(open, self.filename, encoding="utf8")
return self
async def __aexit__(self, ext, exc, tb):
await asyncio.to_thread(self.file.close)
async def read(self):
return await asyncio.to_thread(self.file.read)
async def __aiter__(self):
while True:
line = await asyncio.to_thread(self.file.readline)
if line:
#line = line.encode('utf-8')
yield line
else:
break
async def main():
cm = AsyncFile("hello_async.py") ## __init__
async with cm as f: ## __aenter__
content = await f.read()
print(content)
if __name__ == "__main__":
asyncio.run(main()) |
# Generated by Django 2.0.7 on 2020-08-13 12:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('aid', '0004_auto_20200813_1146'),
]
operations = [
migrations.AlterModelOptions(
name='drugdetail',
options={'verbose_name': '药品详情', 'verbose_name_plural': '药品详情'},
),
migrations.AlterModelTable(
name='drugdetail',
table='drugdetail',
),
]
|
import datetime, time
import os,sys,shlex
import re
import socket, select, fcntl
import json, testlink
from robot.api import logger
TSP_SGL_COMMAND_INDEX = 0
TSP_SGL_STATUS_INDEX = 1
TSP_STL_COMMAND_INDEX = 2
TSP_STL_STATUS_INDEX = 3
TSP_RCS_COMMAND_INDEX = 4
TSP_RCS_STATUS_INDEX = 5
TSP_VMS_COMMAND_INDEX = 6
TSP_VMS_STATUS_INDEX = 7
TSP_SELECT_TIME_OUT = 5
TSP_BUFFER_SIZE = 1024
TSP_MAX_BUFFER_SIZE = 1024000
driver_data ={}
class SocketInfo(object):
socket_id = []
socket_desc = [None] * 10
def __init__( self):
self.socket_id = []
self.socket_desc = [None] * 10
g_socket_info = SocketInfo
g_json_directory = {}
g_user_name= {}
class TSP(object):
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'#'GLOBAL'#
def __init__( self):
self.start_tc = None
self.end_tc = None
self.vod_val_list = []
self.sgl_status_json = None
self.stl_status_json = None
self.is_sgl_successful = None
self.is_rcs_successful = None
self.is_stl_successful = None
self.is_vms_successful = None
self.is_test_case_passed = None
self.m_test_project = None
self.m_test_build = None
self.m_test_plan = None
self.m_test_case_id = None
self.m_test_suite_id = None
@staticmethod
def Terminate_Sessions():
logger.console("Stopping SGL RCS SPS and VMS Servers", newline=True)
logger.info("Stopping SGL RCS SPS and VMS Servers", html=True)
try:
g_socket_info.socket_desc[TSP_SGL_COMMAND_INDEX].send('{ "STOP": "STOP" }')
except (AttributeError, socket.error):
logger.console("Sending STOP command failed to SGL", newline=True)
logger.info("Sending STOP command failed to SGL", html=True)
try:
g_socket_info.socket_desc[TSP_STL_COMMAND_INDEX].send('{ "STOP": "STOP" }')
except (AttributeError, socket.error):
logger.console("Sending STOP command failed to STL", newline=True)
logger.info("Sending STOP command failed to STL", html=True)
try:
g_socket_info.socket_desc[TSP_RCS_COMMAND_INDEX].send('{ "STOP": "STOP" }')
except (AttributeError, socket.error):
logger.console("Sending STOP command failed to RCS", newline=True)
logger.info("Sending STOP command failed to RCS", html=True)
try:
g_socket_info.socket_desc[TSP_VMS_COMMAND_INDEX].send('{ "STOP": "STOP" }')
except (AttributeError, socket.error):
logger.console("Sending STOP command failed to VMS", newline=True)
logger.info("Sending STOP command failed to VMS", html=True)
logger.console("SGL RCS SPS and VMS Servers stopped", newline=True)
logger.info("SGL RCS SPS and VMS Servers stopped", html=True)
time.sleep (2)
logger.console("Terminating Session with SGL RCS SPS and VMS Servers.", newline=True)
logger.info("Terminating Session with SGL RCS SPS and VMS Servers.", html=True)
sending_string = "{ \"TERMINATE_SESSION\": \""
sending_string += "TERMINATE_SESSION"
sending_string += '\" }'
try:
g_socket_info.socket_desc[TSP_SGL_COMMAND_INDEX].send(sending_string)
except (AttributeError, socket.error):
logger.console("Sending STOP command failed to SGL", newline=True)
logger.info("Sending STOP command failed to SGL", html=True)
try:
g_socket_info.socket_desc[TSP_STL_COMMAND_INDEX].send(sending_string)
except (AttributeError, socket.error):
logger.console("Sending STOP command failed to STL", newline=True)
logger.info("Sending STOP command failed to STL", html=True)
try:
g_socket_info.socket_desc[TSP_RCS_COMMAND_INDEX].send(sending_string)
except (AttributeError, socket.error):
logger.console("Sending STOP command failed to RCS", newline=True)
logger.info("Sending STOP command failed to RCS", html=True)
try:
g_socket_info.socket_desc[TSP_VMS_COMMAND_INDEX].send(sending_string)
except (AttributeError, socket.error):
logger.console("Sending STOP command failed to VMS", newline=True)
logger.info("Sending STOP command failed to VMS", html=True)
logger.console("SGL RCS SPS and VMS Servers are terminated.", newline=True)
logger.info("SGL RCS SPS and VMS Servers are terminated.", html=True)
@staticmethod
def Establish_Connection(sgl_cmd, sgl_status, stl_cmd, stl_status, rcs_cmd, rcs_status, vms_cmd, vms_status):
port_seg_cmd = int(sgl_cmd)
port_seg_status = int(sgl_status)
port_stl_cmd = int(stl_cmd)
port_stl_status = int(stl_status)
port_rcs_cmd = int(rcs_cmd)
port_rcs_status = int(rcs_status)
port_vms_cmd = int(vms_cmd)
port_vms_status = int(vms_status)
logger.console("Establish Connection with SGL RCS SPS and VMS Servers", newline=True)
host = ''
max_wait_queue = 10 # Number of clients on wait.
count_servers = 0
max_count = 8
try:
for item in port_seg_cmd, port_seg_status, port_stl_cmd, port_stl_status, port_rcs_cmd, port_rcs_status, \
port_vms_cmd, port_vms_status:
g_socket_info.socket_id.append( socket.socket(socket.AF_INET, socket.SOCK_STREAM))
g_socket_info.socket_id[-1].setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 5000000)
g_socket_info.socket_id[-1].setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 5000000)
g_socket_info.socket_id[-1].setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
g_socket_info.socket_id[-1].setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
g_socket_info.socket_id[-1].bind((host, item))
g_socket_info.socket_id[-1].listen(max_wait_queue)
except socket.error, (value, message):
if g_socket_info.socket_id[-1]:
g_socket_info.socket_id[-1].close()
g_socket_info.socket_id = g_socket_info.socket_id[:-1]
logger.console(message, newline=True)
logger.console('Could not open socket: ',newline=True)
sys.exit(1)
while True:
read, write, error = select.select(g_socket_info.socket_id,[],[], 10)
for r in read:
for item in g_socket_info.socket_id:
if r == item:
accepted_socket, adress = item.accept()
g_socket_info.socket_id.append(accepted_socket)
fcntl.fcntl(accepted_socket, fcntl.F_SETFL, os.O_NONBLOCK)
logger.console("Accepted connection from %s" % str(accepted_socket.getsockname()), newline=True)
if item == g_socket_info.socket_id[TSP_SGL_COMMAND_INDEX]:
g_socket_info.socket_desc[TSP_SGL_COMMAND_INDEX] = accepted_socket
if item == g_socket_info.socket_id[TSP_SGL_STATUS_INDEX]:
g_socket_info.socket_desc[TSP_SGL_STATUS_INDEX] = accepted_socket
if item == g_socket_info.socket_id[TSP_STL_COMMAND_INDEX]:
g_socket_info.socket_desc[TSP_STL_COMMAND_INDEX] = accepted_socket
if item == g_socket_info.socket_id[TSP_STL_STATUS_INDEX]:
g_socket_info.socket_desc[TSP_STL_STATUS_INDEX] = accepted_socket
if item == g_socket_info.socket_id[TSP_RCS_COMMAND_INDEX]:
g_socket_info.socket_desc[TSP_RCS_COMMAND_INDEX] = accepted_socket
if item == g_socket_info.socket_id[TSP_RCS_STATUS_INDEX]:
g_socket_info.socket_desc[TSP_RCS_STATUS_INDEX] = accepted_socket
if item == g_socket_info.socket_id[TSP_VMS_COMMAND_INDEX]:
g_socket_info.socket_desc[TSP_VMS_COMMAND_INDEX] = accepted_socket
if item == g_socket_info.socket_id[TSP_VMS_STATUS_INDEX]:
g_socket_info.socket_desc[TSP_VMS_STATUS_INDEX] = accepted_socket
count_servers = count_servers + 1
if max_count == count_servers:
break
logger.console("Connections Established Successfully", newline=True)
logger.info("Connections Established Successfully", html=True)
@staticmethod
def Close_Sockets():
g_socket_info.socket_id[TSP_SGL_COMMAND_INDEX].close()
g_socket_info.socket_id[TSP_SGL_STATUS_INDEX].close()
g_socket_info.socket_id[TSP_STL_COMMAND_INDEX].close()
g_socket_info.socket_id[TSP_STL_STATUS_INDEX].close()
g_socket_info.socket_id[TSP_RCS_COMMAND_INDEX].close()
g_socket_info.socket_id[TSP_RCS_STATUS_INDEX].close()
g_socket_info.socket_id[TSP_VMS_COMMAND_INDEX].close()
g_socket_info.socket_id[TSP_VMS_STATUS_INDEX].close()
logger.console("Socket Connections closed Successfully", newline=True)
logger.info("Socket Connections closed Successfully", html=True)
@staticmethod
def Mount_Segments(socket_info, mount_path):
socket_info.socket_desc[TSP_VMS_COMMAND_INDEX].send(mount_path)
logger.console("Mount Command Sent to Validation Matrix Server", newline=True)
logger.info("Mount Command Sent to Validation Matrix Server", html=True)
while True:
read, write, error = select.select(socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == socket_info.socket_desc[TSP_VMS_STATUS_INDEX].getsockname():
vms_status = TSP.recvall(recv_sock)
if not vms_status:
logger.info("Connection lost with VMS", html=True)
logger.console("Connection lost with VMS", newline=True)
raise AssertionError
logger.console(vms_status,newline=True)
json_vms_status = json.loads(vms_status)
if 'MOUNT' in json_vms_status:
if json_vms_status["MOUNT"] == "SUCCESS":
logger.console("Segments Mounted Successfully", newline=True)
logger.info("Segments Mounted Successfully", html=True)
return
else:
logger.console("Mounting Segments Failed", newline=True)
logger.info("Mounting Segments Failed", html=True)
raise AssertionError
break
break
@staticmethod
def UnMount_Segments(socket_info, unMountPath):
try:
socket_info.socket_desc[TSP_VMS_COMMAND_INDEX].send(unMountPath)
except (AttributeError, socket.error):
logger.console("Sending STOP command failed to VMS", newline=True)
logger.info("Sending STOP command failed to VMS", html=True)
return
logger.console("Un-Mount Command Sent to Validation Matrix Server",newline=True)
logger.info("Un-Mount Command Sent to Validation Matrix Server", html=True)
while True:
read, write, error = select.select(socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == socket_info.socket_desc[TSP_VMS_STATUS_INDEX].getsockname():
vms_status = TSP.recvall(recv_sock)
if not vms_status:
logger.info("Connection lost with VMS", html=True)
logger.console("Connection lost with VMS", newline=True)
return
logger.console(vms_status,newline=True)
json_vms_status = json.loads(vms_status)
if 'UNMOUNT' in json_vms_status:
if json_vms_status["UNMOUNT"] == "SUCCESS":
logger.console("Segments Un-Mounted Successfully", newline=True)
logger.info("Segments Un-Mounted Successfully", html=True)
return
else:
logger.console("Un-Mounting Segments Failed", newline=True)
logger.info("Un-Mounting Segments Failed", html=True)
#raise AssertionError
return
break
else:
vms_status = TSP.recvall(recv_sock)
if not vms_status:
logger.info("Connection lost with VMS", html=True)
logger.console("Connection lost with VMS", newline=True)
raise AssertionError
logger.console(vms_status,newline=True)
break
@staticmethod
def Mount_Streams(socket_info, mount_path):
try:
socket_info.socket_desc[TSP_SGL_COMMAND_INDEX].send(mount_path)
except (AttributeError, socket.error):
logger.console("Sending mount command failed to Segmenter", newline=True)
logger.info("Sending mount command failed to Segmenter", html=True)
return
logger.console("Mount Command Sent to Segmenter", newline=True)
logger.info("Mount Command Sent to Segmenter", html=True)
while True:
read, write, error = select.select(socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == socket_info.socket_desc[TSP_SGL_STATUS_INDEX].getsockname():
seg_status = TSP.recvall(recv_sock)
if not seg_status:
logger.info("Connection lost with SGL", html=True)
logger.console("Connection lost with SGL", newline=True)
raise AssertionError
logger.console(seg_status,newline=True)
json_sgl_status = json.loads(seg_status)
if 'MOUNT' in json_sgl_status["Status Message"]:
mount_json = json_sgl_status["Status Message"]
if mount_json["MOUNT"] == "SUCCESS":
logger.console("Streams Mounted Successfully", newline=True)
logger.info("Streams Mounted Successfully", html=True)
return
else:
logger.console("Mounting Streams Failed", newline=True)
logger.info("Mounting Streams Failed", html=True)
#raise AssertionError
return
break
break
@staticmethod
def UnMount_Streams(socket_info, unMountPath):
socket_info.socket_desc[TSP_SGL_COMMAND_INDEX].send(unMountPath)
logger.console("Un-Mount Command Sent to Segmenter",newline=True)
logger.info("Un-Mount Command Sent to Segmenter", html=True)
while True:
read, write, error = select.select(socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == socket_info.socket_desc[TSP_SGL_STATUS_INDEX].getsockname():
seg_status = TSP.recvall(recv_sock)
if not seg_status:
logger.info("Connection lost with SGL", html=True)
logger.console("Connection lost with SGL", newline=True)
return
logger.console(seg_status,newline=True)
json_sgl_status = json.loads(seg_status)
if 'UNMOUNT' in json_sgl_status["Status Message"]:
mount_json = json_sgl_status["Status Message"]
if mount_json["UNMOUNT"] == "SUCCESS":
logger.console("Streams Un-Mounted Successfully", newline=True)
logger.info("Streams Un-Mounted Successfully", html=True)
return
else:
logger.console("Un-Mounting Streams Failed", newline=True)
logger.info("Un-Mounting Streams Failed", html=True)
#raise AssertionError
return
break
break
# Helper function to recv n bytes or return None if EOF is hit
@staticmethod
def recvall(recv_sock):
data = ''
while len(data) < TSP_MAX_BUFFER_SIZE:
try:
packet = recv_sock.recv(TSP_MAX_BUFFER_SIZE - len(data))
except socket.error, (value, message):
break
if not packet:
break
data += packet
return data
def Send_SegmenterCommand(self, sgl_cmd):
self.start_tc = time.time()
try:
json_object = json.loads(sgl_cmd)
except ValueError, e:
logger.console("Segmenter command is invalid json object, EXITING with out sending.", newline=True)
logger.console(sgl_cmd, newline=True)
sys.exit(1)
g_socket_info.socket_desc[TSP_SGL_COMMAND_INDEX].send(sgl_cmd)
logger.console("Segmenter Command is Sent to Segment launcher Successfully", newline=True)
logger.info("Segmenter Command is Sent to Segment launcher Successfully", html=True)
is_status_fm_sgl = False
while True:
read, write, error = select.select(g_socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_SGL_STATUS_INDEX].getsockname():
sgl_status = self.recvall(recv_sock)
if not sgl_status:
logger.info("Connection lost with SGL", html=True)
logger.console("Connection lost with SGL", newline=True)
raise AssertionError
logger.console(sgl_status, newline=True)
sgl_json = json.loads(sgl_status)
if sgl_json["START_SEGMENTER"] == "STARTING_SEGMENTER_FAILED":
logger.console("Segment launcher failed to start Segmenter", newline=True)
logger.info("Segment launcher failed to start Segmenter", html=True)
self.sgl_status_json = sgl_json
raise AssertionError
is_status_fm_sgl = True
break
if is_status_fm_sgl:
break
logger.console("Segmenter Command is processed by Segment launcher Successfully", newline=True)
logger.info("Segmenter Command is processed by Segment launcher Successfully", html=True)
def Send_StreamPumpCommand(self, stl_cmd):
try:
json_object = json.loads(stl_cmd)
except ValueError, e:
logger.console("Stream pump command is invalid json object, EXITING with out sending.", newline=True)
logger.console(stl_cmd, newline=True)
sys.exit(1)
g_socket_info.socket_desc[TSP_STL_COMMAND_INDEX].send(stl_cmd)
logger.console("Stream Pump Command is Sent to Stream Pump Server Successfully", newline=True)
logger.info("Stream Pump Command is Sent to Stream Pump Server Successfully", html=True)
is_status_fm_stl = False
while True:
read, write, error = select.select(g_socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_STL_STATUS_INDEX].getsockname():
stl_status = self.recvall(recv_sock)
if not stl_status:
logger.info("Connection lost with STL", html=True)
logger.console("Connection lost with STL", newline=True)
raise AssertionError
logger.console(stl_status, newline=True)
is_status_fm_stl = True
break
if is_status_fm_stl:
break
logger.console("Stream Pump Command is processed Successfully", newline=True)
logger.info("Stream Pump Command is processed Successfully", html=True)
def Send_RunTimeCommand(self, rcs_cmd):
try:
json_object = json.loads(rcs_cmd)
except ValueError, e:
logger.console("Run-Time server's command is invalid json object, EXITING with out sending.", newline=True)
logger.console(rcs_cmd, newline=True)
sys.exit(1)
is_status_fm_rcs = False
#g_socket_info.socket_desc[TSP_RCS_COMMAND_INDEX].send(rcs_cmd)
len_send_msg = len(rcs_cmd)
index = 0
while index < len_send_msg:
if ( index + (128*2) ) < len_send_msg:
slice_msg = rcs_cmd[index:index+(128*2)]
index = index + (128*2)
else:
slice_msg = rcs_cmd[index:len_send_msg]
index = len_send_msg
#logger.console(slice_msg, newline=True)
g_socket_info.socket_desc[TSP_RCS_COMMAND_INDEX].send(slice_msg)
logger.console("Run-Time Command is Sent to Run-Time Command Server Successfully", newline=True)
logger.info("Run-Time Command is Sent to Run-Time Command Server Successfully", html=True)
while True:
read, write, error = select.select(g_socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_RCS_STATUS_INDEX].getsockname():
rcs_status = self.recvall(recv_sock)
if not rcs_status:
logger.info("Connection lost with RCS", html=True)
logger.console("Connection lost with RCS", newline=True)
raise AssertionError
logger.console(rcs_status, newline=True)
is_status_fm_rcs = True
break
if is_status_fm_rcs:
break
logger.console("Run-Time Command processed Successfully", newline=True)
logger.info("Run-Time Command processed Successfully", html=True)
def Send_ValidationCommand(self, vms_cmd):
try:
json_object = json.loads(vms_cmd)
except ValueError, e:
logger.console("Sending Invalid json command to VMS, EXITING.", newline=True)
logger.console(vms_cmd, newline=True)
sys.exit(1)
g_socket_info.socket_desc[TSP_VMS_COMMAND_INDEX].send(vms_cmd)
logger.console("Validation Command is Sent to Validation Matrix Server Successfully", newline=True)
logger.info("Validation Command is Sent to Validation Matrix Server Successfully", html=True)
is_status_fm_vms = False
while True:
read, write, error = select.select(g_socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_VMS_STATUS_INDEX].getsockname():
vms_status = self.recvall(recv_sock)
if not vms_status:
logger.info("Connection lost with VMS", html=True)
logger.console("Connection lost with VMS", newline=True)
raise AssertionError
logger.console(vms_status, newline=True)
is_status_fm_vms = True
break
if is_status_fm_vms:
break
logger.console("Validation Command processed Successfully", newline=True)
logger.info("Validation Command processed Successfully", html=True)
def Stop_Vms(self):
vms_cmd = '{ "STOP": "STOP" }'
g_socket_info.socket_desc[TSP_VMS_COMMAND_INDEX].send(vms_cmd)
logger.console("STOP Command is Sent to Validation Matrix Server Successfully", newline=True)
logger.info("STOP Command is Sent to Validation Matrix Server Successfully", html=True)
is_status_fm_vms = False
while True:
read, write, error = select.select(g_socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_VMS_STATUS_INDEX].getsockname():
vms_status = self.recvall(recv_sock)
if not vms_status:
logger.info("Connection lost with VMS", html=True)
logger.console("Connection lost with VMS", newline=True)
raise AssertionError
logger.console(vms_status, newline=True)
is_status_fm_vms = True
break
if is_status_fm_vms:
break
logger.console("STOP Command processed Successfully", newline=True)
logger.info("STOP Command processed Successfully", html=True)
def Filebased_Validation_Process(self):
is_status_fm_sgl = False
is_status_fm_vms = False
is_sgl_stop_sent = False
is_vms_stop_sent = False
self.is_test_case_passed = None
while True:
read, write, error = select.select(g_socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_SGL_STATUS_INDEX].getsockname():
seg_status = TSP.recvall(recv_sock)
if not seg_status:
logger.info("Connection lost with SGL", html=True)
logger.console("Connection lost with SGL", newline=True)
raise AssertionError
logger.console(seg_status, newline=True)
json_sgl_status = json.loads(seg_status)
seg_status = json_sgl_status["Status Message"]
if seg_status["START_SEGMENTER"]:
is_status_fm_sgl = True
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_VMS_STATUS_INDEX].getsockname():
vms_status = TSP.recvall(recv_sock)
if not vms_status:
logger.info("Connection lost with VMS", html=True)
logger.console("Connection lost with VMS", newline=True)
raise AssertionError
logger.console(vms_status, newline=True)
is_status_fm_vms = True
if is_status_fm_sgl is True and is_status_fm_vms is True:
break
elif is_status_fm_sgl is True or is_status_fm_vms is True:
if is_status_fm_sgl is True and is_status_fm_vms is False:
if is_vms_stop_sent is False:
logger.console("Sending STOP to VMS",newline=True)
g_socket_info.socket_desc[TSP_VMS_COMMAND_INDEX].send('{ "STOP": "STOP" }')
#vms_cmd = '{ "STOP": "STOP" }'
#self.Send_ValidationCommand(vms_cmd)
#self.Stop_Vms()
is_vms_stop_sent = True
if is_status_fm_vms is True and is_status_fm_sgl is False:
if is_sgl_stop_sent is False:
logger.console("Sending STOP to SGL",newline=True)
#sgl_cmd = '{ "STOP": "STOP" }'
#self.Send_SegmenterCommand(sgl_cmd)
g_socket_info.socket_desc[TSP_SGL_COMMAND_INDEX].send('{ "STOP": "STOP" }')
is_sgl_stop_sent = True
self.sgl_status_json = json_sgl_status
json_vms_status = json.loads(vms_status)
self.stl_status_json = None
self.vod_val_list.append(json_vms_status)
seg_status = json_sgl_status["Status Message"]
if seg_status["START_SEGMENTER"] == "EXITED_GRACEFULLY" :
logger.info("Segmenter Exited Gracefully", html=True)
self.is_sgl_successful = True
else:
logger.info("Segmenter Failed", html=True)
self.is_sgl_successful = False
#logger.console(json_vms_status, newline=True)
if json_vms_status["OVER_ALL_STATUS"] == "VALIDATION_SUCCESS":
self.is_vms_successful = True
logger.info("Validation Successful", html=True)
else:
self.is_vms_successful = False
logger.info("Validation Failed", html=True)
json_vms_list = json_vms_status["VALIDATION_LIST"]
for vms_obj in json_vms_list:
json_diff_val = vms_obj["DIFF_VALIDATIONS"]
for diff_val in json_diff_val:
if diff_val["OVER_ALL_STATUS"] == "VALIDATION_FAILED":
if diff_val["DIFFERED_SEGMENT"] != "":
logger.info("%s differed in %s" % (
str(diff_val["DIFFERED_SEGMENT"]), str(diff_val["VAL_ID"])), html=True)
#logger.console("%s differed in %s" % (
#str(diff_val["DIFFERED_SEGMENT"])), str(diff_val["VAL_ID"]))), newline=True)
elif diff_val["MISSING_FILES"] != "NO":
logger.info("Missing Files in %s" % str(diff_val["VAL_ID"]), html=True)
#logger.console("Missing Files in %s" % str(diff_val["VAL_ID"])), newline=True)
elif diff_val["OVER_ALL_STATUS"] == "MEDIA_STREAM_VALIDATION_FAILED":
logger.info("Media stream validation failed for \'%s\'" % (
str(str(diff_val["VAL_ID"]))), html=True)
elif diff_val["OVER_ALL_STATUS"] == "MPD_VALIDATOR_FAILED":
logger.info("MPD validator failed for \'%s\'" % (
str(str(diff_val["VAL_ID"]))), html=True)
#elif diff_val["OVER_ALL_STATUS"] == "VALIDATION_NOT_STARTED":
#pass
if self.is_sgl_successful is True and self.is_vms_successful is True:
self.is_test_case_passed = True
else:
self.is_test_case_passed = False
raise AssertionError
def Portbased_Validation_Process(self):
is_status_fm_sgl = False
is_status_fm_vms = False
is_status_fm_stl = False
is_sgl_stop_sent = False
is_stl_stop_sent = False
is_vms_stop_sent = False
while True:
read, write, error = select.select(g_socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_SGL_STATUS_INDEX].getsockname():
sgl_status = TSP.recvall(recv_sock)
if not sgl_status:
logger.info("Connection lost with SGL", html=True)
logger.console("Connection lost with SGL", newline=True)
raise AssertionError
logger.console(sgl_status, newline=True)
json_sgl_status = json.loads(sgl_status)
seg_status = json_sgl_status["Status Message"]
if seg_status["START_SEGMENTER"]:
is_status_fm_sgl = True
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_STL_STATUS_INDEX].getsockname():
stl_status = TSP.recvall(recv_sock)
if not stl_status:
logger.info("Connection lost with STL", html=True)
logger.console("Connection lost with STL", newline=True)
raise AssertionError
logger.console(stl_status, newline=True)
is_status_fm_stl = True
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_VMS_STATUS_INDEX].getsockname():
vms_status = TSP.recvall(recv_sock)
if not vms_status:
logger.info("Connection lost with VMS", html=True)
logger.console("Connection lost with VMS", newline=True)
raise AssertionError
logger.console(vms_status, newline=True)
json_vms_status = json.loads(vms_status)
if 'OVER_ALL_STATUS' in json_vms_status:
is_status_fm_vms = True
if is_status_fm_sgl is True and is_status_fm_stl is True and is_status_fm_vms is True:
break
elif is_status_fm_sgl is True or is_status_fm_stl is True or is_status_fm_vms is True:
if is_status_fm_stl is True and is_status_fm_sgl is False:
if is_sgl_stop_sent is False:
logger.console("Sending STOP to SGL", newline=True)
g_socket_info.socket_desc[TSP_SGL_COMMAND_INDEX].send('{ "STOP": "STOP" }')
is_sgl_stop_sent = True
if is_status_fm_sgl is True and is_status_fm_vms is False:
if is_vms_stop_sent is False:
logger.console("Sending STOP to VMS", newline=True)
#self.Stop_Vms()
g_socket_info.socket_desc[TSP_VMS_COMMAND_INDEX].send('{ "STOP": "STOP" }')
#vms_cmd = '{ "STOP": "STOP" }'
#self.Send_ValidationCommand(vms_cmd)
is_vms_stop_sent = True
if is_status_fm_vms is True:
if is_status_fm_sgl is False:
if is_sgl_stop_sent is False:
logger.console("Sending STOP to SGL", newline=True)
g_socket_info.socket_desc[TSP_SGL_COMMAND_INDEX].send('{ "STOP": "STOP" }')
is_sgl_stop_sent = True
if is_status_fm_stl is False:
if is_stl_stop_sent is False:
logger.console(" Sending STOP to STL", newline=True)
g_socket_info.socket_desc[TSP_STL_COMMAND_INDEX].send('{ "STOP": "STOP" }')
is_stl_stop_sent = True
seg_status = json_sgl_status["Status Message"]
json_stl_status = json.loads(stl_status)
json_vms_status = json.loads(vms_status)
self.sgl_status_json = json_sgl_status
self.stl_status_json = json_stl_status
self.vod_val_list.append(json_vms_status)
if seg_status["START_SEGMENTER"] == "EXITED_GRACEFULLY" :
logger.info("Segmenter Exited Gracefully", html=True)
self.is_sgl_successful = True
else:
logger.info("Segmenter Failed", html=True)
self.is_sgl_successful = False
if json_stl_status["OVER_ALL_STATUS"] == "STREAM_PUMP_SUCCESS":
logger.info("Stream Pumping Successful", html=True)
self.is_stl_successful = True
else:
logger.info("Stream Pumping Failed", html=True)
self.is_stl_successful = False
json_stream_obj = json_vms_status["STREAM_ID"]
for stream_obj in json_stream_obj:
if stream_obj["Status Message"] != "STREAM_PUMP_SUCCESS":
logger.info("Pumping %s Stream Failed" % str(stream_obj["STREAM_ID"]), html=True)
logger.console("Pumping %s Stream Failed" % str(stream_obj["STREAM_ID"]), newline=True)
if json_vms_status["OVER_ALL_STATUS"] == "VALIDATION_SUCCESS":
self.is_vms_successful = True
logger.info("Validation Successful", html=True)
else:
self.is_vms_successful = False
logger.info("Validation Failed", html=True)
json_vms_list = json_vms_status["VALIDATION_LIST"]
for vms_obj in json_vms_list:
json_diff_val = vms_obj["DIFF_VALIDATIONS"]
for diff_val in json_diff_val:
if diff_val["OVER_ALL_STATUS"] == "VALIDATION_FAILED":
if diff_val["DIFFERED_SEGMENT"] != "":
logger.info("%s differed in %s" % (
str(diff_val["DIFFERED_SEGMENT"]), str(diff_val["VAL_ID"])), html=True)
logger.console("%s differed in %s" % (
str(diff_val["DIFFERED_SEGMENT"]), str(diff_val["VAL_ID"])), newline=True)
elif diff_val["MISSING_FILES"] != "NO":
logger.info("Missing Files in %s" % str(diff_val["VAL_ID"]), html=True)
#logger.console("Missing Files in %s" % str(diff_val["VAL_ID"])), newline=True)
elif diff_val["OVER_ALL_STATUS"] == "MEDIA_STREAM_VALIDATION_FAILED":
logger.info("Media stream validation failed for \'%s\'" % (
str(str(diff_val["VAL_ID"]))), html=True)
if self.is_sgl_successful is True and self.is_stl_successful is True and self.is_vms_successful is True:
self.is_test_case_passed = True
else:
self.is_test_case_passed = False
raise AssertionError
def VOD_Manifest_Validation_Process(self):
is_status_fm_vms = False
while True:
read, write, error = select.select(g_socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_VMS_STATUS_INDEX].getsockname():
vms_status = TSP.recvall(recv_sock)
if not vms_status:
logger.info("Connection lost with VMS", html=True)
logger.console("Connection lost with VMS", newline=True)
raise AssertionError
logger.console(vms_status, newline=True)
json_vms_status = json.loads(vms_status)
#logger.console(json_vms_status, newline=True)
self.vod_val_list.append(json_vms_status)
if 'OVER_ALL_STATUS' in json_vms_status:
is_status_fm_vms = True
if is_status_fm_vms is True:
break
if json_vms_status["OVER_ALL_STATUS"] == "VALIDATION_SUCCESS":
logger.info("Manifest Validation Successful", html=True)
self.is_vms_successful = True
else:
logger.info("Manifest Validation Failed", html=True)
self.is_vms_successful = False
json_vms_list = json_vms_status["VALIDATION_LIST"]
for vms_obj in json_vms_list:
json_diff_val = vms_obj["DIFF_VALIDATIONS"]
for diff_val in json_diff_val:
if diff_val["OVER_ALL_STATUS"] == "VALIDATION_FAILED":
if diff_val["DIFFERED_SEGMENT"] != "":
logger.info("%s differed in %s" % (
str((diff_val["DIFFERED_SEGMENT"])), str(diff_val["VAL_ID"])), html=True)
logger.console("%s differed in %s" % (
str((diff_val["DIFFERED_SEGMENT"])), str(diff_val["VAL_ID"])), newline=True)
elif diff_val["MISSING_FILES"] != "NO":
logger.info("Missing Files in %s" % str(diff_val["VAL_ID"]), html=True)
logger.console("Missing Files in %s" % str(diff_val["VAL_ID"]), newline=True)
elif diff_val["OVER_ALL_STATUS"] == "MEDIA_STREAM_VALIDATION_FAILED":
logger.info("Media stream validation failed for \'%s\'" % (
str(str(diff_val["VAL_ID"]))), html=True)
self.print_message_to_console(diff_val)
if self.is_test_case_passed is True and self.is_vms_successful is True:
self.is_test_case_passed = True
else:
self.is_test_case_passed = False
raise AssertionError
def print_message_to_console(self,diff_val):
issues_present = diff_val["ISSUES_PRESENT"]
variant_list = issues_present["variants"]
logger.info("", html=True)
logger.console("", newline=True)
for variant in variant_list:
bitrate = variant["playlistMaxBitrate"]
logger.info("variant: %s" % bitrate, html=True)
logger.console("variant: %s" % bitrate, newline=True)
mustfix_messages_list = variant["mustfix_messages"]
logger.info(" Must Fix Messages", html=True)
logger.console(" Must Fix Messages", newline=True)
for mustfix_message in mustfix_messages_list:
logger.info(" %s" % mustfix_message, html=True)
logger.console(" %s" % mustfix_message, newline=True)
shouldfix_messages_list = variant["shouldfix_messages"]
logger.info(" Should Fix Messages", html=True)
logger.console(" Should Fix Messages", newline=True)
for shouldfix_message in shouldfix_messages_list:
logger.info(" %s" % shouldfix_message, html=True)
logger.console(" %s" % shouldfix_message, newline=True)
logger.info("", html=True)
logger.console("", newline=True)
def Create_Filebased_Segments(self):
is_status_fm_sgl = False
while True:
read, write, error = select.select(g_socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_SGL_STATUS_INDEX].getsockname():
seg_status = TSP.recvall(recv_sock)
if not seg_status:
logger.info("Connection lost with SGL", html=True)
logger.console("Connection lost with SGL", newline=True)
raise AssertionError
logger.console(seg_status, newline=True)
json_sgl_status = json.loads(seg_status)
seg_status = json_sgl_status["Status Message"]
if seg_status["START_SEGMENTER"]:
is_status_fm_sgl = True
break
if is_status_fm_sgl is True:
break
seg_status = json_sgl_status["Status Message"]
if seg_status["START_SEGMENTER"] == "EXITED_GRACEFULLY" :
logger.info("Segmenter Exited Gracefully", html=True)
self.is_sgl_successful = True
self.is_test_case_passed = True
else:
logger.info("Segmenter Failed", html=True)
self.is_sgl_successful = False
self.is_test_case_passed = False
def Create_Portbased_Segments(self):
is_status_fm_sgl = False
is_status_fm_stl = False
is_sgl_stop_sent = False
is_stl_stop_sent = False
while True:
read, write, error = select.select(g_socket_info.socket_id, [], [], TSP_SELECT_TIME_OUT)
for recv_sock in read:
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_SGL_STATUS_INDEX].getsockname():
sgl_status = TSP.recvall(recv_sock)
if not sgl_status:
logger.info("Connection lost with SGL", html=True)
logger.console("Connection lost with SGL", newline=True)
raise AssertionError
logger.console(sgl_status, newline=True)
json_sgl_status = json.loads(sgl_status)
seg_status = json_sgl_status["Status Message"]
if seg_status["START_SEGMENTER"]:
is_status_fm_sgl = True
if recv_sock.getsockname() == g_socket_info.socket_desc[TSP_STL_STATUS_INDEX].getsockname():
stl_status = TSP.recvall(recv_sock)
if not stl_status:
logger.info("Connection lost with STL", html=True)
logger.console("Connection lost with STL", newline=True)
raise AssertionError
logger.console(stl_status, newline=True)
is_status_fm_stl = True
if is_status_fm_sgl is True and is_status_fm_stl is True:
break
elif is_status_fm_sgl is True or is_status_fm_stl is True:
if is_status_fm_stl is True and is_status_fm_sgl is False:
if is_sgl_stop_sent is False:
logger.console("Sending STOP to SGL", newline=True)
g_socket_info.socket_desc[TSP_SGL_COMMAND_INDEX].send('{ "STOP": "STOP" }')
is_sgl_stop_sent = True
if is_status_fm_sgl is True and is_status_fm_stl is False:
if is_stl_stop_sent is False:
logger.console(" Sending STOP to STL", newline=True)
g_socket_info.socket_desc[TSP_STL_COMMAND_INDEX].send('{ "STOP": "STOP" }')
is_stl_stop_sent = True
seg_status = json_sgl_status["Status Message"]
json_stl_status = json.loads(stl_status)
if seg_status["START_SEGMENTER"] == "EXITED_GRACEFULLY" :
logger.info("Segmenter Exited Gracefully", html=True)
self.is_sgl_successful = True
self.is_test_case_passed = True
else:
logger.info("Segmenter Failed", html=True)
self.is_sgl_successful = False
self.is_test_case_passed = False
if json_stl_status["OVER_ALL_STATUS"] == "STREAM_PUMP_SUCCESS":
logger.info("Stream Pumping Successful", html=True)
self.is_stl_successful = True
else:
logger.info("Stream Pumping Failed", html=True)
self.is_stl_successful = False
json_stream_obj = json_stl_status["STREAM_ID"]
for stream_obj in json_stream_obj:
if stream_obj["Status Message"] != "STREAM_PUMP_SUCCESS":
logger.info("Pumping %s Stream Failed" % str(stream_obj["STREAM_ID"]), html=True)
logger.console("Pumping %s Stream Failed" % str(stream_obj["STREAM_ID"]), newline=True)
def _Create_Json(self, sgl_json=None, stl_json=None, vms_json=None):
self.end_tc = time.time()
start_tc_time = datetime.datetime.fromtimestamp(self.start_tc).strftime('%Y-%m-%d %H:%M:%S')
end_tc_time = datetime.datetime.fromtimestamp(self.end_tc).strftime('%Y-%m-%d %H:%M:%S')
if sgl_json != None and stl_json != None and vms_json != None:
overall_json = {"Start_time": start_tc_time, "End_time": end_tc_time, "SEGMENT_LAUNCHER": sgl_json,
"STREAM_PUMPER": stl_json, "VALIDATION_LAUNCHER": vms_json}
return overall_json
if sgl_json != None and stl_json != None and vms_json == None:
overall_json = {"Start_time": start_tc_time, "End_time": end_tc_time, "SEGMENT_LAUNCHER": sgl_json,
"STREAM_PUMPER": stl_json}
return overall_json
if sgl_json != None and stl_json == None and vms_json != None:
overall_json = {"Start_time": start_tc_time, "End_time": end_tc_time, "SEGMENT_LAUNCHER": sgl_json,
"VALIDATION_LAUNCHER": vms_json}
return overall_json
if sgl_json != None and stl_json == None and vms_json == None:
overall_json = {"Start_time": start_tc_time, "End_time": end_tc_time, "SEGMENT_LAUNCHER": sgl_json}
return overall_json
if sgl_json == None and stl_json != None and vms_json != None:
overall_json = {"Start_time": start_tc_time, "End_time": end_tc_time, "STREAM_PUMPER": stl_json,
"VALIDATION_LAUNCHER": vms_json}
return overall_json
if sgl_json == None and stl_json != None and vms_json == None:
overall_json = {"Start_time": start_tc_time, "End_time": end_tc_time, "STREAM_PUMPER": stl_json}
return overall_json
if sgl_json == None and stl_json == None and vms_json != None:
overall_json = {"Start_time": start_tc_time, "End_time": end_tc_time, "VALIDATION_LAUNCHER": vms_json}
return overall_json
if sgl_json == None and stl_json == None and vms_json == None:
overall_json = {}
return overall_json
'''
def Update_Testlink(self):
logger.console("Connecting to Testlink", newline=True)
tls = testlink.TestLinkHelper().connect(testlink.TestlinkAPIClient)
logger.console("Connected to Testlink", newline=True)
test_plan_id = tls.getTestPlanByName(self.m_test_project, self.m_test_plan)[0]["id"]
if self.is_test_case_passed is True:
result = tls.reportTCResult(int(self.m_test_case_id), int(test_plan_id), self.m_test_build, "p", "testing")
else:
result = tls.reportTCResult(int(self.m_test_case_id), int(test_plan_id), self.m_test_build, "f","testing")
logger.console(result, newline=True)
logger.console(self.m_test_case_id, newline=True)
logger.console(test_plan_id, newline=True)
'''
def Update_Testlink(self, test_project, test_build, test_plan, test_case_id):
logger.console("Connecting to Testlink", newline=True)
tls = testlink.TestLinkHelper().connect(testlink.TestlinkAPIClient)
logger.console("Connected to Testlink", newline=True)
test_plan_id = tls.getTestPlanByName(test_project, test_plan)[0]["id"]
if self.is_test_case_passed is True:
result = tls.reportTCResult(int(test_case_id), int(test_plan_id), test_build, "p", "testing")
else:
result = tls.reportTCResult(int(test_case_id), int(test_plan_id), test_build, "f","testing")
logger.console(result, newline=True)
logger.console(test_case_id, newline=True)
logger.console(test_plan_id, newline=True)
'''
def Create_Json(self):
json_obj = {}
json_obj = self._Create_Json( self.sgl_status_json, self.stl_status_json, self.vod_val_list )
logger.console(json_obj, newline=True)
ts = time.time()
json_timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S')
if self.is_test_case_passed is True:
with open(os.path.join(g_json_directory, "+" + driver_data['produce_code'][0] + "_" + driver_data[
"user_name"] + "_" + json_timestamp + "_" + str(self.m_test_suite_id) + "_" + str(self.m_test_case_id) + ".json"),
'w') as output:
json.dump(json_obj, output)
else:
with open(os.path.join(g_json_directory, "-" + driver_data['produce_code'][0] + "_" + driver_data[
"user_name"] + "_" + json_timestamp + "_" + str(self.m_test_suite_id) + "_" + str(self.m_test_case_id) + ".json"),
'w') as output:
json.dump(json_obj, output)
logger.console(json_obj, newline=True)
self.start_tc = None
self.end_tc = None
self.vod_val_list = []
self.is_sgl_successful = None
self.is_rcs_successful = None
self.is_stl_successful = None
self.is_vms_successful = None
if self.is_test_case_passed is False:
self.is_test_case_passed = None
raise AssertionError
'''
def Create_Json(self, test_suite_id, test_case_id):
json_obj = {}
json_obj = self._Create_Json( self.sgl_status_json, self.stl_status_json, self.vod_val_list )
logger.console(json_obj, newline=True)
ts = time.time()
json_timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S')
if self.is_test_case_passed is True:
with open(os.path.join(g_json_directory, "+" + driver_data['produce_code'][0] + "_" + driver_data[
"user_name"] + "_" + json_timestamp + "_" + str(test_suite_id) + "_" + str(test_case_id) + ".json"),
'w') as output:
json.dump(json_obj, output)
else:
with open(os.path.join(g_json_directory, "-" + driver_data['produce_code'][0] + "_" + driver_data[
"user_name"] + "_" + json_timestamp + "_" + str(test_suite_id) + "_" + str(test_case_id) + ".json"),
'w') as output:
json.dump(json_obj, output)
logger.console(json_obj, newline=True)
self.start_tc = None
self.end_tc = None
self.vod_val_list = []
self.is_sgl_successful = None
self.is_rcs_successful = None
self.is_stl_successful = None
self.is_vms_successful = None
if self.is_test_case_passed is False:
self.is_test_case_passed = None
raise AssertionError
'''
def Init_test_link(self, test_project, test_plan, test_build, test_case_id, test_suite_id):
self.m_test_project = test_project
self.m_test_build = test_build
self.m_test_plan = test_plan
self.m_test_case_id = test_case_id
self.m_test_suite_id = test_suite_id
logger.console("Setup", newline=True)
logger.console(self.m_test_project, newline=True)
logger.console(self.m_test_build, newline=True)
logger.console(self.m_test_plan, newline=True)
logger.console(self.m_test_case_id, newline=True)
def Tear_down(self):
logger.console("TearDown", newline=True)
logger.console("Update TestLink", newline=True)
self.Update_Testlink()
logger.console("Create Json", newline=True)
self.Create_Json()
'''
def Cleanup(self, user_name, sgl_ip, sps_ip, rcs_ip, vms_ip):
machine_user_name="root"
cmd='pkill -9 live_index_ref_; pkill azuki; ps -ef | grep driver | grep ' + user_name + ' | grep segment_launcher | grep -v "sh -c" | tail -n 1 | awk \'{print $2}\' | xargs kill -9'
(command_output1, exitstatus) = \
pexpect.run("ssh " + machine_user_name + "@" + sgl_ip + " '" + cmd + "'", \
events={'Password':'rebaca\n'}, \
timeout=660, withexitstatus=1)
cmd='ps -ef | grep ' + user_name + ' | grep stream_pump_server | grep -v "sh -c" | tail -n 3 | awk \'{a=("ps -ef | grep "$2""); system(a)}\' | grep -v defunct | grep -v "grep" | grep tsudpsend | awk \'{cmd=("pgrep -P "$2""); system(cmd)}\' | xargs kill'
(command_output1, exitstatus) = \
pexpect.run("ssh " + machine_user_name + "@" + sps_ip + " '" + cmd + "'", \
events={'Password':'rebaca\n'}, \
timeout=660, withexitstatus=1)
cmd='ps -ef | grep ' + user_name + ' | grep stream_pump_server | grep -v "sh -c" | awk \'{print $2}\' | xargs kill -9'
(command_output1, exitstatus) = \
pexpect.run("ssh " + machine_user_name + "@" + sps_ip + " '" + cmd + "'", \
events={'Password':'rebaca\n'}, \
timeout=660, withexitstatus=1)
cmd='ps -ef | grep ' + user_name + ' | grep runtime_command_server | grep -v "sh -c" | awk \'{print $2}\' | xargs kill -9'
(command_output1, exitstatus) = \
pexpect.run("ssh " + machine_user_name + "@" + rcs_ip + " '" + cmd + "'", \
events={'Password':'rebaca\n'}, \
timeout=660, withexitstatus=1)
cmd='ps -ef | grep ' + user_name + ' | grep diffTool | grep -v "sh -c" | grep ' + user_name + ' | awk \'{cmd=("kill -9 "$2""); system(cmd)}\''
(command_output1, exitstatus) = \
pexpect.run("ssh " + machine_user_name + "@" + vms_ip + " '" + cmd + "'", \
events={'Password':'rebaca\n'}, \
timeout=660, withexitstatus=1)
cmd='ps -ef | grep ' + user_name + ' | grep validation_matrix_server | awk \'{print $2}\' | xargs kill -9'
(command_output1, exitstatus) = \
pexpect.run("ssh " + machine_user_name + "@" + vms_ip + " '" + cmd + "'", \
events={'Password':'rebaca\n'}, \
timeout=660, withexitstatus=1)
'''
ps -ef | grep Rakesh | grep segment_launcher | tail -n 1 | awk '{print $2}' | xargs kill
ps -ef | grep Rakesh | grep stream_pump_server | grep -v "sh -c" | awk '{print $2}' | xargs kill
ps -ef | grep Rakesh | grep runtime_command_server | grep -v "sh -c" | awk '{print $2}' | xargs kill
ps -ef | grep Rakesh | grep validation_matrix_server | awk '{print $2}' | xargs kill
ps -ef | grep Rakesh | grep diffTool | grep -v "sh -c" | awk '{print $2}' | xargs kill
ps -ef | grep Rakesh | grep validation_matrix_server | grep -v "sh -c" | tail -n 3 | awk '{cmd=("pgrep -P "$2""); system(cmd)}' | awk '{cmd=("pgrep -P "$1""); system(cmd)}' | rebaca
(command_output1, exitstatus) = \
pexpect.run("ssh " + machine_user_name + "@" + sgl_ip + " '" + cmd + "'", \
events={'Password':'rebaca\n'}, \
timeout=660, withexitstatus=1)
'''
|
import sys
import numpy as np
def data_split(Data):
all_sentences = []
for i in Data:
all_sentences.append(i.strip().split('\n'))
sentence_words = [j[0].split(' ') for j in all_sentences]
# print(sentence_words)
return sentence_words
def train_data(words_dict, tags_dict, Data):
list_input = []
for i in Data:
list_line = []
for each_word in i:
dict_word = {}
temp = each_word.split('_')
word_index = words_dict[temp[0]]
tag_index = tags_dict[temp[1]]
dict_word['word'] = word_index
dict_word['tag'] = tag_index
list_line.append(dict_word)
list_input.append(list_line)
# print(list_input[-1])
# print(list_input[-1])
return list_input
def init_prob(list_input, tags_dict):
Pi = np.ones(len(tags_dict))
for each_sentence in list_input:
dict_1 = each_sentence[0]
tag = dict_1['tag']
Pi[tag] += 1
Pi = Pi/(len(list_input)+len(tags_dict))
return Pi
def transit_prob(list_input, tags_dict):
A = np.ones((len(tags_dict), len(tags_dict)))
for each_sentence in list_input:
for i in range(len(each_sentence)-1):
current_dict = each_sentence[i]
next_dict = each_sentence[i+1]
current_tag = current_dict['tag']
next_tag = next_dict['tag']
A[current_tag][next_tag] += 1
for row_index in range(len(A)):
row_sum = np.sum(A[row_index])
for col_index in range(len(A[row_index])):
A[row_index][col_index] /= row_sum
return A
def emit_prob(words_dict, tags_dict):
B = np.ones((len(tags_dict), len(words_dict)))
for each_sentence in list_input:
for i in range(len(each_sentence)):
current_dict = each_sentence[i]
current_word = current_dict['word']
current_tag = current_dict['tag']
B[current_tag][current_word] += 1
for row_index in range(len(B)):
row_sum = np.sum(B[row_index])
for col_index in range(len(B[row_index])):
B[row_index][col_index] /= row_sum
return B
if __name__ == "__main__":
# train_input = "fulldata/trainwords.txt"
# word_txt = "fulldata/index_to_word.txt"
# tag_txt = "fulldata/index_to_tag.txt"
# prior_out = 'hmmprior.txt'
# emission_out = 'hmmemit.txt'
# transition_out = 'hmmtrans.txt'
train_input = sys.argv[1]
word_txt = sys.argv[2]
tag_txt = sys.argv[3]
prior_out = sys.argv[4]
emission_out = sys.argv[5]
transition_out = sys.argv[6]
############################################################################################################################################################
word_txt_open = open(word_txt, 'r')
word_txt_read = word_txt_open.readlines()
word_list = []
for i in word_txt_read:
word_list.append(i.strip())
words_dict_ulta = dict(enumerate(word_list))
words_dict = {v: k for k, v in words_dict_ulta.items()}
# print(words_dict)
############################################################################################################################################################
tag_txt_open = open(tag_txt, 'r')
tag_txt_read = tag_txt_open.readlines()
tag_list = []
for j in tag_txt_read:
tag_list.append(j.strip())
tags_dict_ulta = dict(enumerate(tag_list))
tags_dict = {v: k for k, v in tags_dict_ulta.items()}
# print(tags_dict)
############################################################################################################################################################
train_open = open(train_input, 'r')
train_read = train_open.readlines()
sentences_use = data_split(train_read)
list_input = train_data(words_dict, tags_dict, sentences_use)
Pi = init_prob(list_input, tags_dict)
A = transit_prob(list_input, tags_dict)
B = emit_prob(words_dict, tags_dict)
np.savetxt(prior_out, Pi)
np.savetxt(emission_out, B)
np.savetxt(transition_out, A)
|
from pydantic import BaseModel
class Album(BaseModel):
title: str
artist_id: int
|
# Hi, with this Code, you can send a WhatsApp Message to your Loved Ones very Easily on Time.
# Points to Remember:
# • You should be connected to the WhatsApp Web from the Number you want to send the Message.
# • You should have an Internet Connection during the Process.
# • It takes 130 Seconds to Open Whatsapp Web and 20 Seconds to Send the Message, so plan the time Accordingly.
# • The time is in 24 Hour Format.
# Now Follow the Steps Below:
# 1. Go to Command Prompt Terminal and write "pip install pywhatkit"
# 2. After it is completed, then open Python Shell or VS Code and Copy the Code
# 3. Then Simply Run the Code and see the Magic
import pywhatkit
pywhatkit.sendwhatmsg("+91897905XXXX","Hey, This is the Message sent to you.",19,11)
|
#this program is used load the file and do sorting using buble sort
from data import main
try:
file = open('number', 'r') # opening the file
str_ = file.read() # reading the text and storing it into the object
split_array = str_.split() # splitting the words to store the elements in array using sorted inbuilt function
main.buble_sort(split_array) # calling the method
except FileNotFoundError:
print("FILE NOT FOUND") |
"""
Fishing - TO BE TESTED
Fish in a suitable room.
"""
import time
from django.conf import settings
from evennia.utils import utils, evmenu
from evennia import CmdSet
from evennia.utils.create import create_object
from typeclasses.objects import Object
COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS)
# ------------------------------------------------------------------------------
# Camera Commands - Calls the camera.use_object() function
# ------------------------------------------------------------------------------
class CmdUse(COMMAND_DEFAULT_CLASS):
"""
Use an object with the at_use() hook.
Usage:
use <obj> [on/with <target>, <target>] =['say message][:pose message][:'say message]
The command simply sanity checks arguments before calling the objects
at_use() function.
"""
key = "use"
aliases = ["read"]
locks = "cmd:all()"
help_category = "General"
def func(self):
"""Use an object."""
# Set up function variables.
caller = self.caller
args = self.lhs
for splitter in (" on ", " with "): # try each delimiter
if splitter in args: # to find first successful split
args = self.lhs.split(splitter)
break
obj = args[0].strip() if len(args) >= 1 else None
targets = [arg.strip() for arg in args[1](",")] if len(args) > 1 else None
# No target
if not obj:
caller.msg("Use what?")
return
# Can't find target
obj = caller.search(obj)
if not obj:
return
# Unsuitable target
if not getattr(obj, "at_use", None):
caller.msg("You cannot use this object.")
return
# If targets given: find targets.
if targets:
subjectlist = []
for target in targets:
subject = self.caller.search(target)
if not subject:
caller.msg("'{}' could not be located.".format(target))
return
subjectlist.append(subject)
# # Handle roleplay
# if self.rhs:
# _roleplay(self, caller, self.rhs.split(":"))
# Call use_object hook on object.
obj.at_use(caller, subjectlist)
# ------------------------------------------------------------------------------
# Camera Object - Creates photographs when used.
# ------------------------------------------------------------------------------
class Rod(Object):
def at_use(self, bait):
# Call rooms fishing Function (rod, bait)
# ------------------------------------------------------------------------------
# Photograph Object - Uses menus to mimic location when photograph taken.
# ------------------------------------------------------------------------------
class FishingRoom():
"""
This is a mixin that provides object functionality for fishing.
"""
def at_fish(self, detailkey):
"""
This looks for an Attribute "obj_details" and possibly
returns the value of it.
Args:
detailkey (str): The detail being looked at. This is
case-insensitive.
"""
# Accumulate value of bait
# Run through odds and get winnings.
# Match result against table
# Return random result from table
|
from .models import *
from aristo.models import *
from payment.models import *
import time
from datetime import datetime, timezone,timedelta
now = datetime.now(timezone.utc)
def get_linked_accounts(user):
instagram_accounts=Instagram_Accounts.objects.filter(main_user=user)
instagram_accounts_list=[]
for i in instagram_accounts:
Analyse=Instagram_Accounts_Analyse.objects.filter(instagram_account=i)[0]
datas = {'username':i.username,'follower_count':Analyse.follower_count,'following_count':Analyse.following_count,
'profile_pic_url':i.profile_pic_url,'media_count':Analyse.media_count,'is_current_account':i.is_current_account}
instagram_accounts_list.append(datas)
return instagram_accounts_list
def get_assistants_details(user):
active_ig_account = Instagram_Accounts.objects.filter(main_user = user,is_current_account = 1)
if len(active_ig_account) == 0:
return False
else:
active_ig_account = active_ig_account[0]
latest_follow_assistant = Assistants.objects.filter(instagram_account__username=active_ig_account.username,assistant_type=0)
latest_like_assistant = Assistants.objects.filter(instagram_account__username=active_ig_account.username,assistant_type=1)
latest_comment_assistant = Assistants.objects.filter(instagram_account__username=active_ig_account.username,assistant_type=2)
latest_unfollow_assistant = Assistants.objects.filter(instagram_account__username=active_ig_account.username,assistant_type=3)
assistants_list=[]
#check_latest_assistants
if latest_follow_assistant:
i = latest_follow_assistant.latest("update_time")
percentage_of_process=len(Follow_Actions.objects.filter(status=1,assistant = i))/i.number_of_actions*100
percentage_of_process=round(percentage_of_process)
if i.source_type==0:
target_username="@"+i.source
ig_username="@"+i.instagram_account.username
status=i.activity_status
assistants_list.append((i.number_of_actions,percentage_of_process,i.assistant_type,i.source_type,ig_username,target_username,status,"x","Takip_Kullanıcı",i.id,i.is_there_enough_data))
elif i.source_type==1:
target_hashtag="#"+i.source
ig_username="@"+i.instagram_account.username
status=i.activity_status
assistants_list.append((i.number_of_actions,percentage_of_process,i.assistant_type,i.source_type,ig_username,target_hashtag,status,"x","Takip_Hashtag",i.id,i.is_there_enough_data))
elif i.source_type==2:
target_location="📍"+i.source
ig_username="@"+i.instagram_account.username
status=i.activity_status
assistants_list.append((i.number_of_actions,percentage_of_process,i.assistant_type,i.source_type,ig_username,target_location,status,"x","Takip_Lokasyon",i.id,i.is_there_enough_data))
#like assistant
if latest_like_assistant:
i = latest_like_assistant.latest("update_time")
percentage_of_process=len(Like_Actions.objects.filter(status=1,assistant = i))/i.number_of_actions*100
percentage_of_process=round(percentage_of_process)
if int(i.source_type)==0:
target_username="@"+i.source
ig_username="@"+i.instagram_account.username
status=i.activity_status
assistants_list.append((i.number_of_actions,percentage_of_process,i.assistant_type,i.source_type,ig_username,target_username,status,"y","Beğeni_Kullanıcı",i.id,i.is_there_enough_data))
elif int(i.source_type)==1:
target_hashtag="#"+i.source
ig_username="@"+i.instagram_account.username
status=i.activity_status
assistants_list.append((i.number_of_actions,percentage_of_process,i.assistant_type,i.source_type,ig_username,target_hashtag,status,"y","Beğeni_Hashtag",i.id,i.is_there_enough_data))
elif int(i.source_type)==2:
target_location="📍"+i.source
ig_username="@"+i.instagram_account.username
status=i.activity_status
assistants_list.append((i.number_of_actions,percentage_of_process,i.assistant_type,i.source_type,ig_username,target_location,status,"y","Beğeni_Lokasyon",i.id,i.is_there_enough_data))
#comment assistant
if latest_comment_assistant:
i = latest_comment_assistant.latest("update_time")
percentage_of_process=len(Comment_Actions.objects.filter(status=1,assistant = i))/i.number_of_actions*100
percentage_of_process=round(percentage_of_process)
if int(i.source_type)==0:
target_username="@"+i.source
ig_username="@"+i.instagram_account.username
status=i.activity_status
assistants_list.append((i.number_of_actions,percentage_of_process,i.assistant_type,i.source_type,ig_username,target_username,status,"z","Yorum_Kullanıcı",i.id,i.is_there_enough_data))
elif int(i.source_type)==1:
target_hashtag="#"+i.source
ig_username="@"+i.instagram_account.username
status=i.activity_status
assistants_list.append((i.number_of_actions,percentage_of_process,i.assistant_type,i.source_type,ig_username,target_hashtag,status,"z","Yorum_Hashtag",i.id,i.is_there_enough_data))
elif int(i.source_type)==2:
target_location="📍"+i.source
ig_username="@"+i.instagram_account.username
status=i.activity_status
assistants_list.append((i.number_of_actions,percentage_of_process,i.assistant_type,i.source_type,ig_username,target_location,status,"z","Yorum_Lokasyon",i.id,i.is_there_enough_data))
if latest_unfollow_assistant:
i = latest_unfollow_assistant.latest("update_time")
if i.number_of_actions != 0:
percentage_of_process=len(Unfollow_Actions.objects.filter(status=1,assistant = i))/i.number_of_actions*100
else:
percentage_of_process = 100
percentage_of_process=round(percentage_of_process)
target_username="@"+i.instagram_account.username
ig_username="@"+i.instagram_account.username
status=i.activity_status
assistants_list.append((i.number_of_actions,percentage_of_process,i.assistant_type,i.source_type,ig_username,target_username,status,"ogg","Takipten Çık",i.id,i.is_there_enough_data))
return assistants_list
def new_actions(user):
analyse_ff = Analyse_FF.objects.filter(instagram_account__main_user = user)
daily_new_followers = 0
weekly_new_followers = 0
monthly_new_followers = 0
for i in analyse_ff:
if i.is_follower == 1:
if (datetime.now(timezone.utc)-i.follower_update_time).days<1:
daily_new_followers += 1
elif (datetime.now(timezone.utc)-i.follower_update_time).days<7:
weekly_new_followers += 1
else:
monthly_new_followers += 1
else:
pass
weekly_new_followers += daily_new_followers
monthly_new_followers += weekly_new_followers
ig_account_analyse = Instagram_Accounts_Analyse.objects.filter(instagram_account__main_user = user)
daily_new_likes = 0
weekly_new_likes = 0
monthly_new_likes = 0
total_new_likes = 0
daily_new_comments = 0
weekly_new_comments = 0
monthly_new_comments = 0
total_new_comments = 0
for i in ig_account_analyse:
if i.like_count == None:
pass
else:
if (datetime.now(timezone.utc)-i.update_time).days<1:
daily_new_likes += i.like_count
daily_new_comments += i.comment_count
elif (datetime.now(timezone.utc)-i.update_time).days<7:
weekly_new_likes += i.like_count
weekly_new_comments += i.comment_count
elif (datetime.now(timezone.utc)-i.update_time).days<30:
monthly_new_likes += i.like_count
monthly_new_comments += i.comment_count
else:
total_new_comments += i.comment_count
total_new_likes += i.like_count
weekly_new_likes += daily_new_likes
monthly_new_likes += weekly_new_likes
total_new_likes += monthly_new_likes
weekly_new_comments += daily_new_comments
monthly_new_comments += weekly_new_comments
total_new_comments += monthly_new_comments
all_new_ffs = {'daily_new_followers':daily_new_followers,'weekly_new_followers':weekly_new_followers,'monthly_new_followers':monthly_new_followers,'daily_new_likes':daily_new_likes,'weekly_new_likes':weekly_new_likes,
'monthly_new_likes':monthly_new_likes,'daily_new_comments':daily_new_comments,'weekly_new_comments':weekly_new_comments,'monthly_new_comments':monthly_new_comments,'total_new_likes':total_new_likes,'total_new_comments':total_new_comments}
return all_new_ffs
def linked_assistants(user):
x=get_assistants_details(user)
assistants_list=[(0,"Takip"),(0,"Beğeni"),(0,"Yorum"),(0,"Takip bırak")]
if x == False:
return assistants_list
assistants_list=[]
assistant_type_list=["Takip","Beğeni","Yorum","Takip bırak"]
for b in x:
if b[2]==0 or b[2]==1 or b[2]==2 or b[2]==3:
assistants_list.append(b)
a=0
for i in range(4):
try:
if assistants_list[a][2] != a:
assistants_list.insert(a,(0,assistant_type_list[a]))
except:
assistants_list.insert(a,(0,assistant_type_list[a]))
a+=1
return assistants_list
def check_linked_assistans(user):
assistants_details=Assistants.objects.filter(instagram_account__main_user= user)
new=[]
for i in assistants_details:
new.append(i.assistant_type)
assistants_status=[]
a=0
for i in range(3):
try:
new.index(a)
assistants_status.append(1)
except:
assistants_status.append(0)
a+=1
return assistants_status
def license_data(user):
license_object = License.objects.filter(main_user = user)[0]
created_date = license_object.created_date
package_name = license_object.package.name.upper()
status = license_object.status
if status == 2:
package_name = package_name + " lisans sonlandı"
remaining_time = 0
else:
if not package_name =='TEMEL':
remaining_time = license_object.package.offered_days-(datetime.now(timezone.utc)-created_date).days
Assistants.objects.filter(instagram_account__main_user = user,activity_status = 4).update(activity_status = 1)
if remaining_time < 0:
remaining_time = 0
license_object.status = 2
license_object.save()
else:
remaining_time = '∞'
license_data = {'package_name':package_name,'remaining_time':remaining_time}
return license_data
|
# _*_ coding:UTF-8 _*_
#! /usr/bin/env python
import time
from pymouse import PyMouse,PyMouseEvent
from pykeyboard import PyKeyboard
m = PyMouse()
k = PyKeyboard()
poslist = []
def exliststr(plist,x,y):
plist.extend([str(x),str(y)])
class mmouse(PyMouseEvent):
def __init__(self):
PyMouseEvent.__init__(self)
def click(self, x, y, button, press):
if button == 2:
if press:
self.x, self.y = x,y
self.stop()
print "是否更新控件坐标: Y or N "
updateflag = raw_input("Enter your choice: ")
if updateflag == 'Y'or 'y':
mm = mmouse()
print "鼠标放到+price输入框点击右键"
mm.run()
add_x, add_y = mm.x, mm.y
exliststr(poslist,add_x,add_y)
print add_x,add_y
mm = mmouse()
print "鼠标放到+price确认图标点击右键"
mm.run()
add_b_x, add_b_y = mm.x, mm.y
exliststr(poslist,add_b_x,add_b_y)
print add_b_x, add_b_y
mm = mmouse()
print "鼠标放到价格输入框点击右键"
mm.run()
input_price_x, input_price_y = mm.x, mm.y
exliststr(poslist,input_price_x,input_price_y)
print input_price_x, input_price_y
mm = mmouse()
print "鼠标放到自选价格确认图标点击右键"
mm.run()
submit_x, submit_y = mm.x, mm.y
exliststr(poslist,submit_x,submit_y)
print submit_x, submit_y
mm = mmouse()
print "鼠标放到+300图标点击右键"
mm.run()
add300_x, add300_y = mm.x, mm.y
exliststr(poslist,add300_x,add300_y)
print add300_x, add300_y
mm = mmouse()
print "鼠标放到验证码确认按键点击右键"
mm.run()
vcode_con_x, vcode_con_y = mm.x, mm.y
exliststr(poslist,vcode_con_x,vcode_con_y)
print vcode_con_x, vcode_con_y
file_abs = "d:\\pos.txt"
with open(file_abs,"w") as f:
for i in poslist:
f.write(i+'\n')
with open(file_abs,"r") as f:
for line in f.readlines():
poslist.append(int(line.strip('\n')))
for i in poslist:
print type(i)
print i
|
#!/usr/bin/env python
import flickrquery
import argparse
import os.path, os
import subprocess
def download_image(data, filename):
if data['originalsecret'] and data['originalsecret'] != 'null':
url_original = 'http://farm%s.staticflickr.com/%s/%s_%s_o.%s' % (data['farm'], data['server'], data['id'], data['originalsecret'], data['originalformat'])
else:
url_original = 'http://farm%s.staticflickr.com/%s/%s_%s_o.jpg' % (data['farm'], data['server'], data['id'], data['secret'])
url_large = 'http://farm%s.staticflickr.com/%s/%s_%s_b.jpg' % (data['farm'], data['server'], data['id'], data['secret'])
cmd = 'wget -t 3 -T 5 --quiet --max-redirect 0 %s -O %s' % (url_original, filename)
res = os.system(cmd)
if res == 0:
try:
jhead_output = subprocess.check_output(['jhead', filename])
except:
pass
else:
for line in jhead_output.splitlines():
tokens = line.split()
if len(tokens) == 5 and tokens[0] == 'Resolution' and int(tokens[2]) > 0 and int(tokens[4]) > 0:
return True
# print 'Trying to look for the large image...'
cmd = 'wget -t 3 -T 5 --quiet --max-redirect 0 %s -O %s' % (url_large, filename)
res = os.system(cmd)
if res == 0:
try:
jhead_output = subprocess.check_output(['jhead', filename])
except:
pass
else:
for line in jhead_output.splitlines():
tokens = line.split()
if len(tokens) == 5 and tokens[0] == 'Resolution' and int(tokens[2]) > 0 and int(tokens[4]) > 0:
return True
return False
parser = argparse.ArgumentParser()
parser.add_argument("input file", help="input file with a query and an output directory separated by ';' in each line")
parser.add_argument("num_images", type=int, help="number of images to be downloaded")
parser.add_argument("storage_dir", help="directory where all images will be stored, the output dirs will have simlinks to this directory.")
parser.add_argument("-start_date", help="start date", default="1/1/2005")
parser.add_argument("-end_date", help="end date", default="1/1/2014")
parser.add_argument("--split_dirs", help="split downloaded images in directories", dest="split_dirs",action="store_true")
parser.set_defaults(split_dirs=False)
args = parser.parse_args()
inputf = open(args.input_file, 'r')
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
if not os.path.exists(args.output_dir):
print 'Cannot create output directory, exiting.'
exit()
queries = args.query.split(';')
all_results = {}
for q in queries:
results = flickrquery.run_flickr_query(q, args.num_images, startDate=args.start_date, endDate=args.end_date)
print 'Found %d images for query: %s' % (len(results), q)
for photo_id, data in results.items():
all_results[photo_id] = data;
MAX_IMAGES_DIRECTORY = 1000
directory_number = 1
num_images_in_directory = 0
num_images = 0
print 'Downloading %d images.' % len(all_results.keys())
for photo_id,data in all_results.items():
if args.split_dirs:
current_directory = os.path.join(args.output_dir, '%04d' % directory_number)
if not os.path.exists(current_directory):
os.mkdir(current_directory)
else:
current_directory = args.output_dir
if download_image(data, os.path.join(current_directory, '%s.jpg' % photo_id)):
num_images_in_directory = num_images_in_directory+1
num_images = num_images + 1
# print 'Successfully downloaded image: %s' % photo_id
# Change directory if max number of images per directory is reached.
if args.split_dirs and num_images_in_directory >= MAX_IMAGES_DIRECTORY:
directory_number = directory_number+1
current_directory = os.path.join(args.output_dir, '%04d' % directory_number)
if not os.path.exists(current_directory):
os.mkdir(current_directory)
num_images_in_directory = 0
if num_images % 100 == 0:
print 'Processed %d / %d images.' % (num_images, len(results))
if num_images >= args.num_images:
break
|
import pandas as pd
from data_operation import *
# DS-GA 1007 Assignment 10
# Author: Junchao Zheng
def main():
try:
data_raw = pd.read_csv('DOHMH_New_York_City_Restaurant_Inspection_Results.csv', low_memory = False)
data = data_raw.dropna() # Drop the rows where data has NaN values.
data = data[data['GRADE'].isin(['A', 'B', 'C'])] # Drop the rows where the grades are 'Not Yet Graded' or 'P' or 'Z'.
data.rename(columns={'GRADE DATE':'DATE'}, inplace=True) # Rename the column of 'GRADE DATE' making it efficient for following functions.
data_short = data[['CAMIS','BORO', 'GRADE', 'DATE']] # Choose data including certain columns.
data_clean = data_short.drop_duplicates(['DATE','CAMIS']) # Delete the duplicate grade for a certain grade date.
print 'Data Loaded...'
# Compute the sum of the function over all restaurants in the dataset.
print 'Now for Question4.1: compute the sum of the function over all restaurants in the dataset.'
print 'Computing...\nIt may take several minutes.'
score_total = 0
for i in range(len(data_clean['CAMIS'].unique())):
score_total = score_total + test_restaurant_grades(data_clean, data_clean['CAMIS'].unique()[i])
print 'The sum of the function is: ' + str(score_total)
# Compute the sum of the function over all restaurants for each of the five Boroughs.
print 'Now for Question4.2: compute the sum of the function over all restaurants for each of the five Boroughs.'
print 'Computing...\nIt may take several minutes.'
borough_dictionary = {}
for borough in data_clean['BORO'].unique()[:-1]:
score_boro = 0
data_given_borough = data_clean[data_clean['BORO'] == borough]
for camis in data_given_borough['CAMIS'].unique():
score_boro = score_boro + test_restaurant_grades(data_given_borough, camis)
borough_dictionary[borough] = score_boro
print 'Here list the sum of the function to each boroughs:'
for key, val in borough_dictionary.items():
print str(key) + ': ' + str(val)
#plot the distribution of grade over all restaurants.
print 'Now generate the plots for Question5.a.'
plot_grade(data_clean, 'nyc')
print 'Plots saved to pdf files.'
#plot the distribution of grade over all restaurants in boroughs respectively.
print 'Now generate the plots for Question5.b.'
boro_list = data_clean['BORO'].unique().tolist()[:-1]
for boro in boro_list:
plot_grade(data_clean[data_clean['BORO'] == boro], boro)
print 'Plots saved to pdf files.'
print 'Program finish now. Bye!'
except KeyboardInterrupt:
print 'Terminate abnormally'
if __name__ == '__main__':
try:
main()
except EOFError:
pass |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygmsh as pg
import numpy as np
def generate():
geom = pg.Geometry()
X0 = np.array([
[0.0, 0.0, 0.0],
[0.5, 0.3, 0.1],
[-0.5, 0.3, 0.1],
[0.5, -0.3, 0.1]
])
R = np.array([0.1, 0.2, 0.1, 0.14])
holes = []
for x0, r in zip(X0, R):
vol, sl = geom.add_ball(x0, r, with_volume=False, lcar=0.2*r)
holes.append(sl)
# geom.add_box(
# -1, 1,
# -1, 1,
# -1, 1,
# lcar=0.2,
# holes=holes
# )
geom.add_ball(
[0, 0, 0], 1.0,
lcar=0.2,
holes=holes
)
return geom
if __name__ == '__main__':
import meshio
points, cells = pg.generate_mesh(generate())
meshio.write('swiss_cheese.vtu', points, cells)
|
import pygame
import sys
import random
pygame.init()
# Display
screen_height = 800
screen_width = 600
background_color = (169, 169, 169)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
white = (255, 255, 255)
# Player
player_color = (81, 156, 213)
player_size = 50
player_pos = [(screen_height / 2), screen_width - 2 * player_size]
# Obstacles
enemy_color = red
enemy_size = 50
enemy_speed = 5
enemy_pos = [random.randint(0, screen_width - enemy_size), 0]
enemy_list = [enemy_pos]
screen = pygame.display.set_mode((screen_height, screen_width))
pygame.display.set_caption("Avoid the blocks!")
game_over = False
score = 0
clock = pygame.time.Clock()
myfont = pygame.font.SysFont("consolas", 35)
def set_level(score, enemy_speed):
if score < 20:
emeny_speed = 5
elif score < 40:
enemy_speed = 8
elif score < 60:
enemy_speed = 12
else:
enemy_speed = 15
return enemy_speed
def detect_collision(player_pos, enemy_pos):
player_x = player_pos[0]
player_y = player_pos[1]
enemy_x = enemy_pos[0]
enemy_y = enemy_pos[1]
if (enemy_x >= player_x and enemy_x < (player_x + player_size)) or (player_x >= enemy_x and player_x < (enemy_x + enemy_size)):
if (enemy_y >= player_y and enemy_y < (player_y + player_size)) or (player_y>= enemy_y and player_y < (enemy_y + enemy_size)):
return True
else:
return False
def spawn_enemies(enemy_list):
delay = random.random()
if len(enemy_list) < 10 and delay < 0.1:
x_pos = random.randint(0, screen_width - enemy_size)
y_pos = 0
enemy_list.append([x_pos, y_pos])
def draw_enemies(enemy_list):
for enemy_pos in enemy_list:
pygame.draw.rect(screen, enemy_color, (enemy_pos[0], enemy_pos[1], enemy_size, enemy_size))
def update_enemy_pos(enemy_list, score):
for i, enemy_pos in enumerate(enemy_list):
if enemy_pos[1] >= 0 and enemy_pos[1] < screen_height:
enemy_pos[1] += enemy_speed
else:
enemy_list.pop(i)
score += 1
return score
def collision_check(enemy_list, player_pos):
for enemy_pos in enemy_list:
if detect_collision(player_pos, enemy_pos):
return True
return False
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT: # Quits the game
sys.exit()
if event.type == pygame.KEYDOWN:
x = player_pos[0]
y = player_pos[1]
if event.key == pygame.K_RIGHT:
x += player_size
elif event.key == pygame.K_LEFT:
x -= player_size
player_pos = [x, y]
screen.fill(background_color)
spawn_enemies(enemy_list)
score = update_enemy_pos(enemy_list, score)
enemy_speed = set_level(score, enemy_speed)
text = "Score:" + str(score)
text_surface = myfont.render(text, False, blue)
screen.blit(text_surface, (screen_width - 5, 550))
if collision_check(enemy_list, player_pos):
game_over = True
draw_enemies(enemy_list)
pygame.draw.rect(screen, player_color, (player_pos[0], player_pos[1], player_size, player_size), 0) # Draws the player
clock.tick(30)
pygame.display.update()
|
"""Status Services Classes."""
import logging
from .taskstatuses import TaskStatuses
logging.debug("In the status_services __init__.py file.")
__all__ = ["TaskStatuses"]
|
import urllib
import urllib.request as url
import re
class plugin:
handle = "ddg"
method = "args"
do_init = False;
cron_time = False
join_hook = False
help_str = "Usage: " + handle + " [search]. Search duckduckgo for whatever."
def run( self, pman, server, nick, host, channel, args ):
if channel[0] == "#": reply_to = channel
else: reply_to = nick
msg = False
if len( args ) <= 1:
msg = self.help_str
#msg = "Usage: " + handle + " [search]"
if len( args ) > 1:
search = "http://www.duckduckgo.com/html/?q=" + " ".join( args[1:] )
search = search.replace( " ", "+" )
print( search )
ddg = url.urlopen( search )
data = str( ddg.read( ))
links = []
title = ""
link = ""
for thing in data.split( "\\n" ):
if "<a rel=\"nofollow\" class" in thing:
links.append( thing )
if len( links ) < 1:
return
metadata = links[0].split("\"")
link = metadata[5]
title = metadata[6][1:]
title = re.sub( "<.*?>", "", title )
msg = title + ": " + link
if msg:
server.send_message( reply_to, msg )
hooks = { }
|
"""
Prints out all the melons in our inventory
"""
from melons import melon_info
# def print_melon():
# for melon, value in melon_info.items():
# print melon
# for attribute, value in attributes.items():
# print "{}: {}".format(attribute, value)
# print
# print_melon()
def print_melon(): #define function print_melon
for melon, value in melon_info.items(): #for each 'melon, value' tuple in dictionary melon_info
print melon #print to console melon
for value, value2 in value.items(): #and then for each "key, value" within each melon dictionary
print "{}: {}".format(value, value2) #print key, value
print #add a new line?
print_melon()
# key and value can be named anything
|
from .base import Badge
class BadgeCache(object):
"""
This is responsible for storing all badges that have been registered, as
well as providing the pulic API for awarding badges.
This class should not be instantiated multiple times, if you do it's your
fault when things break, and you get to pick up all the pieces.
"""
def __init__(self):
self._event_registry = {}
self._registry = {}
def register(self, badge):
# We should probably duck-type this, but for now it's a decent sanity
# check.
assert issubclass(badge, Badge)
badge = badge()
self._registry[badge.slug] = badge
for event in badge.events:
self._event_registry.setdefault(event, []).append(badge)
def possibly_award_badge(self, event, **state):
if event in self._event_registry:
for badge in self._event_registry[event]:
badge.possibly_award(**state)
badges = BadgeCache()
|
# I pledge my honor I have abided by the Stevens Honor System
# I understand that I may access the course textbook and course lecture notes
# but I am not to access any other resource.
# I also pledge that I worked alone on this exam.
# Jacob Aylmer
# Quiz 2 Part 2
def main():
math_or_string = float(input("Enter 1 for mathematical function or 2 for string functions: "))
if math_or_string == 1:
print("These are the math functions")
math_operations = float(input("Enter 1 for for addition, 2 for subtraction, 3 for multiplication or 4 for division: "))
if math_operations == 1:
print("Add two numbers")
add1 = float(input("Enter first number to add: "))
add2 = float(input("Enter second number to add: "))
sum1 = add1 + add2
print(sum1)
elif math_operations == 2:
print("Subtract two numbers")
sub1 = float(input("Enter first number to subtract: "))
sub2 = float(input("Enter second number to subtract: "))
difference1 = sub1 - sub2
print(difference1)
elif math_operations == 3:
print("Multiply two numbers")
mult1 = float(input("Enter first number to multiply: "))
mult2 = float(input("Enter second number to multiply: "))
product1 = mult1 * mult2
print(product1)
elif math_operations == 4:
print("Divide two numbers")
div1 = float(input("Enter first number to divide: "))
div2 = float(input("Enter second number to divide: "))
quotient1 = div1 / div2
print(quotient1)
else:
print("Invalid input")
elif math_or_string == 2:
print("These are the string functions")
string_operations = float(input("Enter 1 to count the number of vowels in a string or enter 2 to encrypt a string: "))
if string_operations == 1:
print("This will count the number of vowels in a string")
str1 = input("Enter a string: ")
a1 = str1.count("a") + str1.count("A")
e1 = str1.count("e") + str1.count("E")
i1 = str1.count("i") + str1.count("I")
o1 = str1.count("o") + str1.count("O")
u1 = str1.count("u") + str1.count("U")
y1 = str1.count("y") + str1.count("Y")
vowels1 = a1 + e1 + i1 + o1 + u1 + y1
print(vowels1)
elif string_operations == 2:
print("This will encrypt a string")
str2 = input("Enter a string: ")
for i in str2:
x = ord(i)
print("", x + 5, end = "")
print()
else:
print("Invalid input")
else:
print("Invalid input")
main()
|
'''
Created on 15 nov. 2012
@author: David
inspired by Telmo Menezes's work : telmomenezes.com
'''
"""
this class inherits from pyevovle.DBAdapters.DBBaseAdaoter. It computes and stores statistics during genetic algorithm processing.
"""
import pydot
import pyevolve as py
import pyevolve.DBAdapters as db
import logging
from collections import defaultdict
import operator
import numpy as np
import matplotlib.pyplot as plt
from lxml import etree as xml
class StatisticsInTxt(db.DBBaseAdapter):
''' This class inherits from DBAdpater in pyevolve, it will be called at each generation of the genetic algorithm
and print stats in a txt file and print it on screen
'''
def __init__(self, filename=None, identify=None,
frequency=py.Consts.CDefCSVFileStatsGenFreq, reset=True):
""" The creator of StatisticsInTxt Class """
db.DBBaseAdapter.__init__(self, frequency, identify)
self.filename = filename
self.file = None
self.reset = reset
def __repr__(self):
""" The string representation of adapter """
ret = "StatisticsInTxt DB Adapter [File='%s', identify='%s']" % (self.filename, self.getIdentify())
return ret
def open(self, ga_engine):
""" Open the Txt file or creates a new file
"""
logging.debug("Opening the txt file to dump statistics [%s]", self.filename)
if self.reset:
open_mode = "w"
else:
open_mode = "a"
self.file = open(self.filename, open_mode)
self.file.write("name = %s \n" % ga_engine.getPopulation().oneSelfGenome.getParam("name"))
self.file.write("number of generations = %s \n" % ga_engine.getGenerations())
self.file.write(
"evaluation_method = %s \n" % ga_engine.getPopulation().oneSelfGenome.getParam("evaluation_method"))
self.file.write("network_type = %s \n" % ga_engine.getPopulation().oneSelfGenome.getParam("network_type"))
self.file.write("tree_type = %s \n" % ga_engine.getPopulation().oneSelfGenome.getParam("tree_type"))
self.file.write("selector = %s \n" % ga_engine.selector)
self.file.write("multiprocessing = %s \n" % ga_engine.getPopulation().multiProcessing[0])
print "name = %s" % ga_engine.getPopulation().oneSelfGenome.getParam("name")
print "number of generations = %s" % ga_engine.getGenerations()
print "evaluation_method = %s" % ga_engine.getPopulation().oneSelfGenome.getParam("evaluation_method")
print "network_type = %s" % ga_engine.getPopulation().oneSelfGenome.getParam("network_type")
print "tree_type = %s" % ga_engine.getPopulation().oneSelfGenome.getParam("tree_type")
print "selector = %s" % ga_engine.selector
print "multiprocessing = %s" % ga_engine.getPopulation().multiProcessing[0]
self.file.close()
def close(self):
""" Closes the Txt file """
logging.debug("Closing the txt file [%s]", self.filename)
self.file.close()
def commitAndClose(self):
""" Commits and closes """
self.close()
def insert(self, ga_engine):
self.file = open(self.filename, 'a')
""" writes population statistics and the 5 best elements"""
self.file.write(
"##### Generation {numero} ###########\n".format(numero=ga_engine.getCurrentGeneration()))
print "##### Generation {numero} ###########".format(numero=ga_engine.getCurrentGeneration())
print ga_engine.getStatistics()
self.file.write(ga_engine.getStatistics().__repr__())
pop = ga_engine.getPopulation()
for i in xrange(25):
self.file.write("######### Arbre num {numero} ###########\n".format(numero=i))
print "######### Arbre num {numero} ###########".format(numero=i)
tree = pop.bestFitness(i)
self.file.write(str(tree.getRawScore()))
print tree.getRawScore()
self.file.write(getTreeString(tree))
print getTreeString(tree)
self.file.close()
class StatisticsInDot(db.DBBaseAdapter):
''' This class inherits from DBAdpater in pyevolve, it will be called at each generation of the genetic algorithm
and print best individuals in dot format
'''
def __init__(self, filename=None, identify=None,
frequency=py.Consts.CDefCSVFileStatsGenFreq, dot_path=None, reset=True):
""" The creator of StatisticsInTxt Class """
db.DBBaseAdapter.__init__(self, frequency, identify)
self.dot_path = dot_path
self.filename = filename
self.file = None
self.reset = reset
def __repr__(self):
""" The string representation of adapter """
ret = "StatisticsInTxt DB Adapter [File='%s', identify='%s']" % (self.filename, self.getIdentify())
return ret
def open(self, ga_engine):
""" Open the Txt file or creates a new file
"""
logging.debug("Opening the txt file to dump statistics [%s]", self.filename)
if self.reset:
open_mode = "w"
else:
open_mode = "a"
self.file = open(self.filename, open_mode)
self.file.write("name = %s \n" % ga_engine.getPopulation().oneSelfGenome.getParam("name"))
self.file.write("number of generations = %s \n" % ga_engine.getGenerations())
self.file.write(
"evaluation_method = %s \n" % ga_engine.getPopulation().oneSelfGenome.getParam("evaluation_method"))
self.file.write("network_type = %s \n" % ga_engine.getPopulation().oneSelfGenome.getParam("network_type"))
self.file.write("tree_type = %s \n" % ga_engine.getPopulation().oneSelfGenome.getParam("tree_type"))
self.file.write("selector = %s \n" % ga_engine.selector)
self.file.write("multiprocessing = %s \n" % ga_engine.getPopulation().multiProcessing[0])
print "name = %s" % ga_engine.getPopulation().oneSelfGenome.getParam("name")
print "number of generations = %s" % ga_engine.getGenerations()
print "evaluation_method = %s" % ga_engine.getPopulation().oneSelfGenome.getParam("evaluation_method")
print "network_type = %s" % ga_engine.getPopulation().oneSelfGenome.getParam("network_type")
print "tree_type = %s" % ga_engine.getPopulation().oneSelfGenome.getParam("tree_type")
print "selector = %s" % ga_engine.selector
print "multiprocessing = %s" % ga_engine.getPopulation().multiProcessing[0]
self.file.close()
def close(self):
""" Closes the Txt file """
logging.debug("Closing the txt file [%s]", self.filename)
self.file.close()
def commitAndClose(self):
""" Commits and closes """
self.close()
def insert(self, ga_engine):
self.file = open(self.filename, "a")
""" writes population statistics and the 5 best elements"""
self.file.write(
"##### Generation {numero} ###########\n".format(numero=ga_engine.getCurrentGeneration()))
print "##### Generation {numero} ###########".format(numero=ga_engine.getCurrentGeneration())
print ga_engine.getStatistics()
self.file.write(ga_engine.getStatistics().__repr__())
print ga_engine.getPopulation()
writePopulationDot(ga_engine, self.dot_path, "jpeg", 0, 25)
pop = ga_engine.getPopulation()
for i in xrange(5):
self.file.write("######### Arbre num {numero} ###########\n".format(numero=i))
print "######### Arbre num {numero} ###########".format(numero=i)
tree = pop.bestFitness(i)
scores = {k: str(round(v,2)) for k, v in tree.scoref.score.items()}
score = str(round(tree.getRawScore(), 2))
score_str = score+" D:"+scores.get('degrees',"None")+" ID:"+scores.get('indegrees',"None")+" OD:"+scores.get('outdegrees',"None")+\
" Di:"+scores['distances']+" C:"+scores['clustering']+" I:"+scores['importance']+" Co:"+scores['communities']+"\n"
self.file.write(score_str+"\n")
print(score_str)
self.file.write(getTreeString(tree))
print getTreeString(tree)
self.file.close()
def getTreeString(tree, start_node=None, spc=0):
""" Returns a tree-formated string of the tree. This
method is used by the __repr__ method of the tree
:rtype: a string representing the tree
"""
str_buff = ""
if start_node is None:
start_node = tree.getRoot()
if start_node.isLeaf():
reprint_start_node = start_node.clone()
number,variable = reprint_start_node.getData()
reprint_start_node.setData([round(number,2),variable])
str_buff += "%s\n" % reprint_start_node
else:
str_buff += "%s\n" % start_node
spaces = spc + 2
if start_node.getData() in ["exp", "log", "abs", "inv", "opp", "H", "T", "N"]:
child_node = start_node.getChild(0)
str_buff += "%s%s\n" % (" " * spaces, child_node)
str_buff += getTreeString(tree, child_node, spaces)
else:
for child_node in start_node.getChilds():
str_buff += "%s%s\n" % (" " * spaces, child_node)
str_buff += getTreeString(tree, child_node, spaces)
return str_buff
def writePopulationDot(ga_engine, filename, format="jpeg", start=0, end=0):
""" Writes to a graphical file using pydot, the population of trees
:param ga_engine: the GA Engine
:param filename: the filename, ie. population.jpg
:param start: the start index of individuals
:param end: the end index of individuals
"""
pop = ga_engine.getPopulation()
graph = pydot.Dot(graph_type="digraph")
n = 0
end_index = len(pop) if end == 0 else end
for i in xrange(start, end_index):
ind = pop[i]
subg = pydot.Cluster("cluster_%d" % i, label="\"Ind. #%d - Score Raw/Fit.: %.4f/%.4f\"" % (
i, ind.getRawScore(), ind.getFitnessScore()))
n = writeDotGraph(ind, subg, n)
graph.add_subgraph(subg)
graph.write(filename, prog='dot', format=format)
graph.write(filename.replace(".jpeg",".dot"), prog='dot', format="raw")
def writeDotGraph(tree, graph, startNode=0):
""" Write a graph to the pydot Graph instance
:param graph: the pydot Graph instance
:param startNode: used to plot more than one individual
"""
count = startNode
node_stack = []
nodes_dict = {}
tmp = None
def add_node(node,count):
newnode = pydot.Node(str(count), style="filled")
newnode.set_color("goldenrod2")
data = '\n'.join(map(str, node.getData()))
newnode.set_label(data)
nodes_dict.update({node: newnode})
graph.add_node(newnode)
return count +1
node_stack.append(tree.getRoot())
while len(node_stack) > 0:
tmp = node_stack.pop()
count =add_node(tmp,count)
parent = tmp.getParent()
if parent is not None:
parent_node = nodes_dict[parent]
child_node = nodes_dict[tmp]
newedge = pydot.Edge(parent_node, child_node)
graph.add_edge(newedge)
if tmp.getData() in ["exp", "log", "abs", "inv", "opp", "H", "T", "N"]:
node_stack.append(tmp.getChild(0))
if tmp.getData() in ["+", "-", "*", "/", "min", "max", ">", "<", "="]:
node_stack.extend([tmp.getChild(1),tmp.getChild(0)])
return count
'''
Function that plts and stores stats from best individual
'''
def store_best_network(chromosome) :
eval_methods = chromosome.getParam("evaluation_method")
results_path = chromosome.getParam("results_path")
dynamic_network = xml.parse(results_path).getroot()
graph_xml = dynamic_network.find("mesures")
def plot(mesure) :
value_test = chromosome.scoref.distributions[mesure]
value_goal = eval(graph_xml.find(mesure).get('value'))
fig, ax = plt.subplots()
model = ax.bar(2 * np.arange(len(value_test)), value_test, 0.8, color='#ccffcc')
real = ax.bar(2 * np.arange(len(value_goal)) + 0.8, value_goal, 0.8, color='#ff9999')
ax.set_xticks(0.8 + 2 * np.arange(max(len(value_goal), len(value_test))))
ax.set_xticklabels(1 + np.arange(max(len(value_goal), len(value_test))))
ax.set_title("Distribution of " + mesure)
ax.legend((real, model), ('Real', 'Model'))
plt.savefig(results_path.replace("results.xml", "") + mesure + ".jpg")
plt.clf()
def save(mesure):
parser = xml.XMLParser(remove_blank_text=True)
tree = xml.parse(results_path, parser)
results = tree.getroot()
static_network = results.find("model")
try:
results.remove(results.find(mesure))
except TypeError:
pass
xml.SubElement(static_network, mesure, value=str(chromosome.scoref.distributions[mesure]))
f = open(results_path, 'w')
tree.write(f, pretty_print=True)
f.close()
def save_tree():
parser = xml.XMLParser(remove_blank_text=True)
tree = xml.parse(results_path, parser)
results = tree.getroot()
try:
results.remove(results.find("model"))
except TypeError:
pass
static_network = xml.SubElement(results, "model")
xml.SubElement(static_network, "tree", value=getTreeString(chromosome))
f = open(results_path, 'w')
tree.write(f, pretty_print=True)
f.close()
save_tree()
for mesure in eval_methods.split('_'):
save(mesure)
plot(mesure)
list_of_functions_number = defaultdict(int)
list_of_functions_sum = defaultdict(int)
count = 0
class StatisticsQualityInTxt(py.DBAdapters.DBBaseAdapter):
''' This class inherits from DBAdpater in pyevolve, it will be called at each generation of the genetic algorithm
and print stats in a txt file and print it on screen
'''
def __init__(self, current_variable, filename=None, identify=None,
frequency=py.Consts.CDefCSVFileStatsGenFreq, reset=True):
""" The creator of StatisticsInTxt Class """
global count
py.DBAdapters.DBBaseAdapter.__init__(self, frequency, identify)
self.filename = filename
self.file = None
self.reset = reset
def __repr__(self):
""" The string representation of adapter """
ret = "StatisticsQualityInTxt DB Adapter [File='%s', identify='%s']" % (self.filename, self.getIdentify())
return ret
def open(self, ga_engine):
""" Open the Txt file or creates a new file
"""
logging.debug("Opening the txt file to dump statistics [%s]", self.filename)
if self.reset:
open_mode = "w"
else:
open_mode = "a"
self.file = open(self.filename, open_mode)
def close(self):
""" Closes the Txt file """
logging.debug("Closing the txt file [%s]", self.filename)
global list_of_functions_number
global list_of_functions_sum
global count
count += 1
if count == 10:
count = 0
list_of_functions_quality = {}
list_of_functions_product = {}
list_of_functions_rapport = {}
for key in list_of_functions_number:
list_of_functions_quality[key] = list_of_functions_sum[key] / list_of_functions_number[key]
maximum = max(list_of_functions_quality.values())
for key in list_of_functions_quality:
list_of_functions_quality[key] = maximum - list_of_functions_quality[key]
list_of_functions_product[key] = list_of_functions_quality[key] * list_of_functions_number[key]
list_of_functions_rapport[key] = list_of_functions_number[key] * list_of_functions_number[key] / \
list_of_functions_sum[key]
sorted_quality = sorted(list_of_functions_quality.iteritems(), key=operator.itemgetter(1))[-8:]
sorted_product = sorted(list_of_functions_product.iteritems(), key=operator.itemgetter(1))[-8:]
sorted_rapport = sorted(list_of_functions_rapport.iteritems(), key=operator.itemgetter(1))[-8:]
print "\n### Sorted by Quality ####"
for (key, _) in sorted_quality:
print " ".join(
["variable :", key, "number of apparitions =", str(list_of_functions_number[key]), "quality =",
str(list_of_functions_quality[key])])
print "\n#####Sorted by Quantity*Quality#########"
for (key, _) in sorted_product:
print " ".join(
["variable :", key, "number of apparitions =", str(list_of_functions_number[key]), "quality =",
str(list_of_functions_quality[key])])
print "\n######Sorted by Rate######"
for (key, _) in sorted_rapport:
print " ".join(
["variable :", key, "number of apparitions =", str(list_of_functions_number[key]), "quality =",
str(list_of_functions_quality[key])])
self.file.write("\n### Sorted by Quality ####\n")
for (key, _) in sorted_quality:
self.file.write(" ".join(
["variable :", key, "number of apparitions =", str(list_of_functions_number[key]), "quality =",
str(list_of_functions_quality[key]), "\n"]))
self.file.write("\n#####Sorted by Quantity*Quality#########\n")
for (key, _) in sorted_product:
self.file.write(" ".join(
["variable :", key, "number of apparitions =", str(list_of_functions_number[key]), "quality =",
str(list_of_functions_quality[key]), "\n"]))
self.file.write("\n######Sorted by Rate######\n")
for (key, _) in sorted_rapport:
self.file.write(" ".join(
["variable :", key, "number of apparitions =", str(list_of_functions_number[key]), "quality =",
str(list_of_functions_quality[key]), "\n"]))
list_of_functions_number.clear()
list_of_functions_sum.clear()
self.file.close()
def commitAndClose(self):
""" Commits and closes """
self.close()
def insert(self, ga_engine):
global list_of_functions_number
global list_of_functions_sum
pop = ga_engine.getPopulation()
for element in pop:
score = element.getRawScore()
for node in element.getAllNodes():
if node.isLeaf():
variable = node.getData()[1]
list_of_functions_number[variable] += 1
list_of_functions_sum[variable] += score
# else :
# variable = node.getData()
# list_of_functions_number[variable]+=1
# list_of_functions_sum[variable]+=score
|
__author__ = "Narwhale"
import urllib
#urllib.request 请求
#urllib.error 错误信息
#urllib.response 响应
#urllib.parse 解析
data = bytes(urllib.parse.urencode({'hello':'world'}))
|
import sys
from data import Data
from flights import *
from statistics import *
def main():
# path = argv[1]
# features = argv[2]
default_path = 'Airports.csv' # REMEMBER T0 CHANGE BEFORE HANDING
default_features = ['Origin_airport', 'Destination_airport', 'Flights', 'Distance', 'Seats', 'Passengers']
q1_features = ['Distance', 'Flights', 'Passengers', 'Seats']
q1_leters = (['D', 'A', 'T', 'S', 'C', 'I', 'E', 'N'])
statistic_functions = [mean, median]
flights = Data(default_path)
flights.select_features(default_features)
flights = Flights(flights.data)
flights.filter_by_airport_names('Origin_airport', q1_leters)
print("Question 1:")
flights.print_details(q1_features, statistic_functions)
print("\nQuestion 2:")
flights.compute_empty_seats()
flights.count_bad_flights(3000)
if __name__ == "__main__":
main()
|
a=[1,3,5,7]
b=[2,4,6,8]
c=[11,12,13]
d=[11,21,31]
# e={[2,3]:2,[4,5]:3} #no,type list no permit
e={2:3,3:4}
f={4:5,5:6}
# g={e:10,f:20} #no ,tpye dict no permit
a=int(input('please input a year '))
if (a%4)==0:
if (a%100)!=0:
print (a ,"is 闰年")
else:
print (a ,"is not 闰年")
else:
print (a ,"is not 闰年")
|
import random
class Dice:
def __init__(self, dice_type):
self.dice_type = self.__validate_dice_type(dice_type)
def __validate_dice_type(self, new_dice_type):
if new_dice_type not in [3, 4, 6, 8, 10, 12, 20, 100]:
raise ValueError("Wrong dice type")
return new_dice_type
def roll(self):
return random.randint(1, self.dice_type)
e = Dice(100)
print(e.roll())
print(e.roll())
e1 = Dice(7)
# Napisz klasę Dice, która będzie miała własność dice_type. W tej własności będziesz przechowywać liczbę ścianek kostki. Kostka może być 3, 4, 6, 8, 10, 12, 20 lub 100-ścienna. Pamiętaj o sprawdzeniu, czy wartość parametru jest właściwa. Jeśli nie, wyrzuć błąd: ValueError.
# Napisz metodę roll(), która wylosuje liczbę z zakresu 1..dice_type, czyli zasymuluje rzut kostką. |
class Constants:
cache_base_path = "/Users/joergsimon/Documents/phd/HELENA/ssl-ecg/cache/"
data_base_path = "/Volumes/knownew/600 Datasets/human-telemetry/other_datasets_joerg/"
# data_base_path = "/Users/joergsimon/Documents/work/datasets_cache/
# data_base_path = "/home/jsimon/Desktop/knownew/600 Datasets/human-telemetry/other_datasets_joerg/"
model_base_path = "/Users/joergsimon/Documents/phd/HELENA/ssl-ecg/model_data/"
use_ray = True |
import numpy as np
import tensorflow as tf
from constants import overlap_thresh, max_boxes, anchor_size as s, feature_size, real_image_height, real_image_width
def non_max_suppression_fast(boxes, probs):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = (x2 - x1) * (y2 - y1)
np.testing.assert_array_less(x1, x2)
np.testing.assert_array_less(y1, y2)
picked_box_ids = []
ids_sorted = np.argsort(probs)
while (len(ids_sorted) > 0) & (len(picked_box_ids) < max_boxes):
current_id = ids_sorted[-1]
ids_sorted = ids_sorted[:-1]
picked_box_ids.append(current_id)
xx1_intersection = np.maximum(x1[current_id], x1[ids_sorted])
yy1_intersection = np.maximum(y1[current_id], y1[ids_sorted])
xx2_intersection = np.minimum(x2[current_id], x2[ids_sorted])
yy2_intersection = np.minimum(y2[current_id], y2[ids_sorted])
ww_intersection = np.maximum(0, xx2_intersection - xx1_intersection)
hh_intersection = np.maximum(0, yy2_intersection - yy1_intersection)
area_intersection = ww_intersection * hh_intersection
area_union = area[current_id] + area[ids_sorted] - area_intersection
overlap = area_intersection / area_union # division of ints resulting in a float
ids_to_delete = np.where(overlap > overlap_thresh)[0]
ids_sorted = np.delete(ids_sorted, ids_to_delete)
picked_boxes = boxes[picked_box_ids].astype("int")
picked_probs = probs[picked_box_ids]
return picked_boxes, picked_probs
def get_boxes(rpn_input):
# In this case, we only need one anchor and one anchor size. But faster-rcnn can use many
anchor_sizes = [s]
anchor_ratios = [[1, 1]]
assert rpn_input.shape[0] == 1
rpn_layer = tf.map_fn(lambda x: tf.math.sigmoid(x), rpn_input).numpy()
(rows, cols) = rpn_layer.shape[1:3]
(rows, cols) = (int(rows), int(cols))
curr_layer = 0
A = np.zeros((4, rpn_layer.shape[1], rpn_layer.shape[2], rpn_layer.shape[3]))
for anchor_size in anchor_sizes:
for anchor_ratio in anchor_ratios:
anchor_x = (anchor_size * anchor_ratio[0])
anchor_y = (anchor_size * anchor_ratio[1])
X, Y = np.meshgrid(np.arange(cols), np.arange(rows))
A[0, :, :, curr_layer] = np.maximum(X - anchor_x//2, 0)
A[1, :, :, curr_layer] = np.maximum(Y - anchor_y//2, 0)
A[2, :, :, curr_layer] = np.minimum(X + anchor_x//2, cols) # The upper border can be equal to cols/row,
A[3, :, :, curr_layer] = np.minimum(Y + anchor_y//2, rows) # in which case the box reaches the border of the mapping
curr_layer += 1
all_boxes = np.reshape(A.transpose((0, 3, 1, 2)), (4, -1)).transpose((1, 0))
all_probs = np.reshape(rpn_layer.transpose((0, 3, 1, 2)), (-1))
boxes, probs = non_max_suppression_fast(all_boxes, all_probs)
return boxes, probs
def get_labels_boxes(boxes, target):
labels_boxes = []
for b in boxes:
x1 = b[0]
y1 = b[1]
x2 = b[2]
y2 = b[3]
t = np.reshape(target[y1:y2, x1:x2], (-1))
t = np.delete(t, np.where(t == 0))
if (len(t) == 0):
labels_boxes.append(0)
else:
(classes, occurences) = np.unique(t, return_counts=True)
k = np.argmax(occurences)
labels_boxes.append(classes[k])
return labels_boxes
def get_final_box(b, regr, limit_border=True):
box_center_x = (b[2] + b[0]) / 2
box_center_y = (b[3] + b[1]) / 2
box_w = b[2] - b[0]
box_h = b[3] - b[1]
final_box_center_x = box_center_x + regr[0]
final_box_center_y = box_center_y + regr[1]
final_box_w = box_w + regr[2] # This is not the right correction for proper faster-rcnn
final_box_h = box_h + regr[3] # But it is better suited to our case
x1 = int(round(final_box_center_x - final_box_w / 2))
x2 = int(round(final_box_center_x + final_box_w / 2))
y1 = int(round(final_box_center_y - final_box_h / 2))
y2 = int(round(final_box_center_y + final_box_h / 2))
# Evalutation is based on the center of the box, in which case we do not want to limit the border
if (limit_border):
x1 = max(x1, 0)
x2 = min(x2, feature_size)
y1 = max(y1, 0)
y2 = min(y2, feature_size)
return x1, y1, x2, y2
def get_boxes_precision(boxes, regression_values, target):
precision = []
for i in range(len(boxes)):
x1, y1, x2, y2 = get_final_box(boxes[i], regression_values[i])
t = np.reshape(target[y1:y2, x1:x2], (-1))
total_area = len(t)
t = np.delete(t, np.where(t == 0))
non_zero_area = len(t)
precision.append([non_zero_area, total_area])
return precision
def get_box_true_mask(boxes, whole_mask):
masks = []
for b in boxes:
x1 = b[0] * real_image_width // feature_size
y1 = b[1] * real_image_height // feature_size
x2 = b[2] * real_image_width // feature_size
y2 = b[3] * real_image_height // feature_size
mask = whole_mask[y1:y2, x1:x2]
center_value = whole_mask[(y1 + y2) // 2, (x1 + x2) // 2]
mask = np.array(mask == center_value, dtype=np.int)
mask = (2 * mask) - 1
masks.append(mask)
return masks
|
import unittest
import json
from elasticsearch import helpers, Elasticsearch, TransportError
from flask import current_app as app
from app.main import db
from app.test.base import BaseTestCase
class TestGlossaryBlueprint(BaseTestCase):
maxDiff = None
def setUp(self):
super().setUp()
es = Elasticsearch(app.config['ELASTICSEARCH_URL'])
es.indices.delete(index=app.config['ELASTICSEARCH_GLOSSARY'], ignore=[400, 404])
es.indices.create(index=app.config['ELASTICSEARCH_GLOSSARY'])
es.indices.put_mapping(
body=json.load(open('./elasticsearch/alegre_glossary.json')),
index=app.config['ELASTICSEARCH_GLOSSARY']
)
def test_glossary_mapping(self):
es = Elasticsearch(app.config['ELASTICSEARCH_URL'])
mapping = es.indices.get_mapping(
index=app.config['ELASTICSEARCH_GLOSSARY']
)
self.assertDictEqual(
json.load(open('./elasticsearch/alegre_glossary.json')),
mapping[app.config['ELASTICSEARCH_GLOSSARY']]['mappings']
)
def test_glossary_queries(self):
es = Elasticsearch(app.config['ELASTICSEARCH_URL'])
success, _ = helpers.bulk(es,
json.load(open('./app/test/data/glossary.json')),
index=app.config['ELASTICSEARCH_GLOSSARY']
)
self.assertTrue(success)
es.indices.refresh(index=app.config['ELASTICSEARCH_GLOSSARY'])
result = es.search(
index=app.config['ELASTICSEARCH_GLOSSARY'],
body={
"query": {
"simple_query_string": {
"fields": [ "en" ],
"query": "talking"
}
}
}
)
self.assertEqual("Por que minha mãe conversa com a TV?", result['hits']['hits'][0]['_source']['pt'])
result = es.search(
index=app.config['ELASTICSEARCH_GLOSSARY'],
body={
"_source": ["pt"],
"query": {
"bool": {
"must": [
{
"match_phrase": { "en": "mothers talking" }
},
{
"nested": {
"path": "context",
"query": {
"bool": {
"must": [
{
"match": {
"context.user": "ccx"
}
}
]
}
}
}
}
],
"filter": [
{ "exists": { "field": "pt" } }
]
}
}
}
)
self.assertEqual("Por que minha mãe conversa com a TV?", result['hits']['hits'][0]['_source']['pt'])
def test_glossary_api(self):
with self.client:
for term in json.load(open('./app/test/data/glossary.json')):
response = self.client.post('/text/glossary/', data=json.dumps(term), content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual('created', result['result']['result'])
response = self.client.get(
'/text/glossary/',
data=json.dumps({
"query": {
"simple_query_string": {
"fields": [ "en" ],
"query": "talking"
}
}
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual("Por que minha mãe conversa com a TV?", result['result']['hits']['hits'][0]['_source']['pt'])
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python3
import sys, os
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QDialog, QApplication
from sessiondialog import SessionDialog
def main():
os.system("./generate_weight_plot")
app = QApplication( sys.argv )
sdialog = SessionDialog()
sdialog.show()
sys.exit( app.exec_() )
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from typing import Tuple, Any
import aiomysql
import asyncio
import json
class Database:
"""
Class used to represent the database connection used by the bot.
"""
__slots__ = []
pool = None
@classmethod
async def _make(cls, **credentials):
"""
Initialise the database class.
:param credentials: MySQL database credentials.
"""
cls.pool = await aiomysql.create_pool(**credentials, autocommit=True)
@classmethod
async def fetchone(cls, query, *data):
"""
Fetch a single row from the given query, using the given parameters for
prepared statements. If the resulting row only contains one item, return
said item, else return the full row. If no row is found, return None.
:param query: SQL query to execute
:param data: Arguments for prepared statements in query
:return: Fetched row from database
"""
async with cls.pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(query, data)
ret = await cur.fetchone()
if ret is None:
return ret
elif len(ret) == 1:
return ret[0]
else:
return ret
@classmethod
async def fetchall(cls, query, *data):
"""
Fetch all rows from the given query, using the given parameters for
prepared statements. If no rows are found, return None.
:param query: SQL query to execute
:param data: Arguments for prepared statements in query
:return: Fetched rows from database
"""
async with cls.pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(query, data)
return await cur.fetchall()
@classmethod
async def execute(cls, query, *data) -> int:
"""
Execute the given query.
:param query: SQL query to execute
:param data: Arguments for prepared statements in query
"""
async with cls.pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(query, data)
return cur.rowcount
@classmethod
async def executemany(cls, query, *data_groups: Tuple[Any, ...]) -> int:
"""
Execute the given query multiple times, once for each entry in the
data_groups parameter.
:param query: SQL query to execute
:param data_groups: List of tuples of arguments to use in prepared
statements
"""
async with cls.pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.executemany(query, data_groups)
return cur.rowcount
with open("config.json") as f:
# need to load the config file again to get credentials
config = json.load(f)
asyncio.get_event_loop().run_until_complete(
Database._make(**config["db_credentials"])
)
|
import cv2
import uuid
from model import face_model as fm
from controller.face_detector import FaceDetector
from controller.worker import Worker
from queue import Queue, Empty
class FrameController:
def __init__(self, sim_threshold=0.54, num_workers=2, max_queue_size=10, frame_proc_freq=3):
self.frame_counter = 0
self.frame_proc_freq = frame_proc_freq
self.sim_threshold = sim_threshold
self.present_faces = dict()
self.face_det = FaceDetector()
self.request_queue = Queue(maxsize=max_queue_size)
self.response_queue = Queue(maxsize=max_queue_size)
self.workers = list()
for i in range(num_workers):
worker = Worker(self.request_queue, self.response_queue)
worker.start()
self.workers.append(worker)
def stop(self):
for i in range(len(self.workers)):
self.request_queue.put(None)
for worker in self.workers:
worker.join()
def clear_face_list(self):
self.present_faces.clear()
def process_frame(self, frame):
if self.frame_counter % self.frame_proc_freq == 0:
faces = self.face_det.detect_faces(frame)
self.face_correlation(faces)
self.frame_counter = 0
self.frame_counter += 1
self.get_results()
self.label_faces(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return frame
def face_correlation(self, new_faces):
new_faces_dict = dict()
for new_face in new_faces:
max_similarity, joint_index = 0, ''
for key, present_face in self.present_faces.items():
similarity = self.face_det.compare_faces(new_face, present_face)
if similarity > max_similarity:
max_similarity = similarity
joint_index = key
if max_similarity < self.sim_threshold:
new_face.face_id = uuid.uuid4().hex
self.request_queue.put(new_face)
else:
joint_face = self.present_faces[joint_index]
new_face.face_id = joint_face.face_id
new_face.identity = joint_face.identity
new_face.state = joint_face.state
new_faces_dict[new_face.face_id] = new_face
self.present_faces = new_faces_dict
def label_faces(self, frame):
for face in self.present_faces.values():
(x, y, w, h) = face.coordinates
if face.state == fm.UNKNOWN:
color = (0, 0, 255)
label = 'Unknown person'
elif face.state == fm.RECOGNIZED:
color = (0, 255, 0)
label = '{} {}'.format(face.identity.name, face.identity.surname)
else:
color = (255, 0, 0)
label = 'Processing...'
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 1)
cv2.putText(frame, label, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 1)
def get_results(self):
while True:
try:
face = self.response_queue.get_nowait()
if face.face_id in self.present_faces:
joint_face = self.present_faces[face.face_id]
joint_face.identity = face.identity
joint_face.state = fm.RECOGNIZED if face.identity is not None else fm.UNKNOWN
except Empty:
break
|
from django.shortcuts import render, redirect
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth.models import User
from .models import Messages, MessageFeatures
from django.db.models import Q, Max, Count
from django.http import Http404, JsonResponse, HttpResponse
from django.views.decorators.http import require_safe, require_http_methods
from django.contrib.auth.decorators import login_required
from .crypto import encrypt
from .forms import ImageMessageForm
from PIL import Image
@require_safe
@login_required
def view_message_logs(request):
if request.is_ajax():
chat_logs_users = Messages.objects.filter( Q(sender=request.user)|
Q(receiver=request.user)
).exclude(
messages=None
)
chat_logs = chat_logs_users.values('sender','receiver').annotate(
messages__id__max=Max('messages__id')
).order_by('-messages__id__max')
chat_logs_final = chat_logs
for i in range(len(chat_logs)):
sender = chat_logs[i]['sender']
receiver = chat_logs[i]['receiver']
for j in range(i+1,len(chat_logs)):
if chat_logs[j]['sender'] == receiver and chat_logs[j]['receiver'] == sender:
chat_logs_final = chat_logs_final.exclude( sender=receiver, receiver=sender)
break
for i in chat_logs_final:
sender = User.objects.get(id=i['sender'])
if sender != request.user:
unseen_count = Messages.objects.filter( sender=sender,
receiver=request.user
).first().messages.filter(seen=False).count()
else:
unseen_count = 0
i['unseen_count'] = unseen_count
return render(request, 'messaging/message_logs.html', {'chat_logs' : chat_logs_final })
@require_safe
@login_required
def handle_unseen_messages(request):
if request.is_ajax():
id = request.GET.get('id')
person = User.objects.get(id=id)
m = Messages.objects.filter(sender=person, receiver=request.user).first()
for i in m.messages.all():
i.seen = True
i.save()
return JsonResponse({ 'message' : 'success' })
@require_safe
@login_required
def handle_message_liking(request):
if request.is_ajax():
id = request.GET.get('id')
m = MessageFeatures.objects.get(id=id)
m.liked = not m.liked
m.save()
return JsonResponse({ 'message' : 'success' })
@require_safe
@login_required
def view_messages(request):
if request.is_ajax():
id = request.GET.get('id')
other = User.objects.get(id=id)
requested_chat1 = Messages.objects.filter( sender=request.user,
receiver=other
).first()
requested_chat2 = Messages.objects.filter( sender=other,
receiver=request.user
).first()
if requested_chat1 and requested_chat2:
requested_chat1 = requested_chat1.messages.all().order_by('time_stamp')
requested_chat2 = requested_chat2.messages.all().order_by('time_stamp')
requested_chat = requested_chat1|requested_chat2
elif requested_chat1:
requested_chat = requested_chat1.messages.all().order_by('time_stamp')
elif requested_chat2:
requested_chat = requested_chat2.messages.all().order_by('time_stamp')
else:
requested_chat = MessageFeatures.objects.none()
return render(request, 'messaging/chats_requested.html', { 'other' : other,
'messages' : requested_chat
})
@require_safe
@login_required
def view_typing_box(request):
if request.is_ajax():
id = request.GET.get('id')
image_form = ImageMessageForm()
return render(request, 'messaging/typing_box.html', {
'id' : id,
'image_form' : image_form,
})
@require_http_methods(['POST'])
@login_required
def send_message(request, id):
if request.is_ajax():
text = request.POST.get('text')
if(len(text) > 63):
inp = [text[i: i+63] for i in range(0, len(text), 63)]
enc = ''.join([encrypt(s) for s in inp])
else:
enc = encrypt(text)
person = User.objects.get(id=id)
try:
Messages.add_message(request.user, person, text=enc)
response = 'Sent'
except:
response = 'Failed'
return JsonResponse({'response' : response })
@require_http_methods(['POST'])
@login_required
def handle_image_messages(request, id):
if request.is_ajax():
image_form = ImageMessageForm(request.POST, request.FILES)
if image_form.is_valid():
person = User.objects.get(id=id)
response = 'Sent'
Messages.add_message(
request.user,
person,
message_type='i',
image=image_form.cleaned_data['image']
)
else:
response = 'Failed'
return JsonResponse({'response' : response })
|
import datetime
import fileinput
import logging
import os
import socket
import ssl
import time
from ssl_expiry import *
from sendOutlookMail import *
# initialize log file
#logging.basicConfig(filename='CertificateChecker.log', format='%(asctime)s %(message)s: %(levelname)s', filemode='w', level=logging.DEBUG)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', filemode='w', level=logging.DEBUG)
logging.info("Start Program")
# Read in the Certificate target file
filename = "input.txt"
if not os.path.isfile(filename):
logging.error("{} not found. Please make sure it is in the same directory".format(filename))
exit()
with open(filename, encoding='utf-8-sig') as f:
lines = (line.strip() for line in f)
# ignore blank lines with this generator expression
lines = (line for line in lines if line)
# ignore comments (inline comments might be a problem!)
lines = (line for line in lines if not line.startswith("#"))
# remove the https from the urls, maybe add one for http
lines = (line.replace("https://","") for line in lines)
# create list element out of the generator
lines = list(lines)
# call ssl checker and get remaining days
for line in lines:
arg = line.split(';')
logging.info("Start to check Certificate for {}".format(arg[0]))
# get the port number if necessary
try:
hostname,port = arg[0].split(':')
port = int(port)
except ValueError:
logger.debug("No explicit port found")
hostname = arg[0]
port = 443
mailRecipient = arg[2]
daysFile = int(arg[1])
# reset flag
flag = False
# now get remaining days
try:
date = ssl_expiry_datetime(hostname, port)
rem_days = int((date - datetime.datetime.utcnow()).days)
flag = True
except Exception as e:
textbody = "Error for url '{}': \n \t {}".format(hostname, e)
logging.error(textbody)
check_and_send_mail(mailRecipient, textbody)
#?check_and_send_mail('manuel.kramer01@sap.com', textbody)
# compare days and send email if they disagree
# according to mail smaller than 60, check if
if flag:
if rem_days < daysFile:
textbody = "Warning for url '{}': \n \t Certificate will expire in {} days. \n \t Date of expiration: {}\n \t Warning for expiration check is set to {} days.".format(hostname, rem_days, date, daysFile)
logging.warning(textbody)
check_and_send_mail(mailRecipient, textbody)
else:
logging.debug("No mail sent because certificate won't expire within {} days. Certificate will be valid until {}".format(daysFile, date))
logging.info("End Program")
|
# session management
import random
import pickle
import settings
r = settings.r
_sidChars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_defaultTimeout=30*60
_defaultCookieName='gsid'
class Session(dict):
def __init__(self,request,response,name=_defaultCookieName,timeout=_defaultTimeout):
"""
request response --- parent's handler
name --- cookie name
timeout --- session's keep time
"""
self.request = request
self.response = response
self._timeout = timeout
self._name = name
self._new = True
self._invalid = False
dict.__init__(self)
_name = request.COOKIES.get(self._name,None)
if _name:
self._sid = _name
data = r.get(self._sid)
if data:
self.update(pickle.loads(data))
r.set(self._sid,data)
r.expire(self._name,self._timeout)
self._new = False
return
# create a new session ID
self._sid = random.choice(_sidChars)+random.choice(_sidChars)+\
random.choice(_sidChars)+random.choice(_sidChars)+\
random.choice(_sidChars)+random.choice(_sidChars)+\
random.choice(_sidChars)+random.choice(_sidChars)
self.response.set_cookie(self._name,self._sid,path='/')
def save(self):
if not self._invalid:
r.set(self._sid,pickle.dumps(self.copy()))
r.expire(self._name,self._timeout)
def is_new(self):
return self._new
def invalidate(self):
self.response.set_cookie(self._name,'',expires=-100)
r.delete(self._sid)
self.clear()
self._invalid=True
|
#!/usr/bin/env python
from ironicclient import client
import os
import subprocess
import sys
if os.environ.get('OS_AUTH_TOKEN'):
OS_AUTH_TOKEN = os.environ['OS_AUTH_TOKEN']
else:
OS_AUTH_TOKEN = 'fake-token'
if os.environ.get('IRONIC_URL'):
IRONIC_URL = os.environ['IRONIC_URL']
else:
IRONIC_URL = 'http://{{ ironic_server }}:6385'
FILENAME = "{{ lookup('env','HOME') }}/ironic_scripts/nodes.txt"
IRONIC_DRIVERS = ["pxe_ipmitool", "pxe_drac", "pxe_ilo"]
DEPLOY_KERNEL = "file://{{ tftp_path }}/ironic_images/{{ deploy_kernel }}"
DEPLOY_RAMDISK = "file://{{ tftp_path }}/ironic_images/{{ deploy_ramdisk }}"
USER_KERNEL = "file://{{ tftp_path }}/ironic_images/{{ user_kernel }}"
USER_RAMDISK = "file://{{ tftp_path }}/ironic_images/{{ user_ramdisk }}"
USER_IMAGE = "file://{{ tftp_path }}/ironic_images/{{ user_image }}"
USER_IMAGE_CHECKSUM = "{{ user_image_checksum }}"
def generate_nodes_dict_from_file():
nodes_dict = []
if not os.path.isfile(FILENAME):
print('%s does not exists.' % FILENAME)
sys.exit()
with open(FILENAME, 'r') as f:
try:
lines = f.readlines()
for line in lines:
a = line.strip().split()
node_dict = {
'name': a[0],
'driver': a[1],
'driver_address': a[2],
'driver_username': a[3],
'driver_password': a[4],
'mac_address': a[5],
}
nodes_dict.append(node_dict)
return nodes_dict
except IndexError:
print('No contents or wrong keys in %s.' % FILENAME)
sys.exit()
except Exception:
print('There are some problems parsing node text file.')
sys.exit()
def do_create_node(ironic, node):
if node['driver'] not in IRONIC_DRIVERS:
print('%s is not supported in tacoplay BM playbook now.' % node['driver'])
sys.exit()
if node['driver'] == 'pxe_ipmitool':
kwargs = {
'name': node['name'],
'driver': node['driver'],
'driver_info': {
'ipmi_address': node['driver_address'],
'ipmi_username': node['driver_username'],
'ipmi_password': node['driver_password'],
'deploy_kernel': DEPLOY_KERNEL,
'deploy_ramdisk': DEPLOY_RAMDISK,
},
}
elif node['driver'] == 'pxe_drac':
kwargs = {
'name': node['name'],
'driver': node['driver'],
'driver_info': {
'drac_address': node['driver_address'],
'drac_username': node['driver_username'],
'drac_password': node['driver_password'],
'deploy_kernel': DEPLOY_KERNEL,
'deploy_ramdisk': DEPLOY_RAMDISK,
},
}
elif node['driver'] == 'pxe_ilo':
kwargs = {
'name': node['name'],
'driver': node['driver'],
'driver_info': {
'ilo_address': node['driver_address'],
'ilo_username': node['driver_username'],
'ilo_password': node['driver_password'],
'deploy_kernel': DEPLOY_KERNEL,
'deploy_ramdisk': DEPLOY_RAMDISK,
},
}
result = ironic.node.create(**kwargs)
return result.uuid
def do_create_port(ironic, node, uuid):
kwargs = {
'address': node['mac_address'],
'node_uuid': uuid,
}
ironic.port.create(**kwargs)
def do_update_instance_info(ironic, uuid):
params = [
{"path": "/instance_info/kernel", "value": USER_KERNEL, "op": "add"},
{"path": "/instance_info/ramdisk", "value": USER_RAMDISK, "op": "add"},
{"path": "/instance_info/image_source", "value": USER_IMAGE, "op": "add"},
{"path": "/instance_info/image_checksum", "value": USER_IMAGE_CHECKSUM, "op": "add"},
{"path": "/instance_info/root_gb", "value": 10, "op": "add"},
]
ironic.node.update(uuid, params)
def do_update_node_state(ironic, uuid, state):
ironic.node.set_provision_state(uuid, state)
def main():
kwargs = {'os_auth_token': OS_AUTH_TOKEN, 'ironic_url': IRONIC_URL}
ironic = client.get_client(1, **kwargs)
file_nodes = generate_nodes_dict_from_file()
ironic_nodes = ironic.node.list()
node_name_list = [i.name for i in ironic_nodes]
for node in file_nodes:
if node['name'] not in node_name_list:
uuid = do_create_node(ironic, node)
do_create_port(ironic, node, uuid)
do_update_instance_info(ironic, uuid)
do_update_node_state(ironic, uuid, 'manage')
do_update_node_state(ironic, uuid, 'provide')
if __name__ == '__main__':
main()
|
import random
HANGMANPIC='''
---------
I I
I
I
I
I
I
I
--------------''','''
---------
I I
O I
I
I
I
I
I
--------------''','''
---------
I I
O I
/ I
I
I
I
I
--------------''','''
---------
I I
O I
/l I
I
I
I
--------------''','''
---------
I I
O I
/l\ I
I
I
I
--------------''','''
---------
I I
O I
/l\ I
A I
I
I
--------------'''
#==============================
a={'first':'secretword dog fish cat happy '.split(),'second':'one kkbox three two four'.split()}
#=============================
def pick(a):
wordkey=random.choice(list(a.keys()))
b=random.randint(0,len(a[wordkey])-1)
return a[wordkey][b]
#================================
def p(v,HANGMANPIC):
n=0
blanks='_'*len(v)
QQ=' '*6
while n<len(HANGMANPIC) :
print('正確的單字:',end='')
for i in blanks:
print(i+' ',end='')
print()
print('猜過的字:'+QQ,end='')
print()
print(HANGMANPIC[n])
print('猜一個字阿~')
c=input()
c=c.lower()
if c in blanks:
print('這個字你猜過了喔,再猜一次吧~')
elif c in QQ:
print('這個字你猜過了喔,再猜一次吧~')
elif len(c)!=1:
print('只能打一個字喔')
elif c not in 'qwertyuiopasdfghjklzxcvbnm':
print('要打英文字母喔')
else:
for i in range(len(v)):
if v[i] in c:
blanks=blanks[:i]+v[i]+blanks[i+1:]
if c not in v :
QQ=QQ[:n]+c[0]+QQ[n+1:]
n=n+1
if n==len(HANGMANPIC):
print('正確的單字:',end='')
for i in blanks:
print(i+' ',end='')
print()
print('猜過的字:'+QQ,end='')
print()
print('好可惜喔,沒能猜到。')
if blanks==v :
print('正確的單字:',end='')
for i in blanks:
print(i+' ',end='')
print()
print('猜過的字:'+QQ,end='')
print()
print('對阿就是 '+v+' 好強喔,答對了~')
break
#===========下面正文開始============
play='y'
while play!='n' :
print('HANGMAN')
v=pick(a)
p(v,HANGMANPIC)
print()
print('press n to qiut')
play=input().lower()
|
t = int(input())
while t > 0:
n,s = map(int,input().split())
arr = list(map(int,input().strip().split()))[:n]
mx_len = 0
for i in range(n):
cur_sum = 0
for j in range(i,n):
cur_sum += arr[j]
if cur_sum == s:
mx_len = max(mx_len,j - i + 1)
res = mx_len
if sum(arr) < s:
print(-1)
else:
print(n-res)
t = t-1
|
from pynput import mouse, keyboard
import logging
from time import sleep
logger = logging.getLogger('MouseMoveApp')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('mousemove.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
class MouseMove:
__slots__ = ('mouse_controller', 'keyboard_controller', 'interval')
def __init__(self, interval_in_minutes=3):
self.mouse_controller = mouse.Controller()
self.keyboard_controller = keyboard.Controller()
self.interval = interval_in_minutes
def movemouse_event(self, pos):
self.mouse_controller.position = pos
def shiftkey_event(self, flag=True):
if flag:
self.keyboard_controller.press(keyboard.Key.shift_r)
sleep(2)
self.keyboard_controller.release(keyboard.Key.shift_r)
else:
self.keyboard_controller.press(keyboard.Key.shift)
sleep(2)
self.keyboard_controller.release(keyboard.Key.shift)
def run(self):
try:
self.movemouse_event((2, 2))
logger.info('pointer moved to 2, 2')
while True:
sleep(60 * self.interval)
logger.info('sleeping')
for _ in range(0, 10):
self.movemouse_event((0, _*4))
sleep(1)
logger.info('movements made 10 times')
self.movemouse_event((2, 2))
logger.info('pointer moved to 2, 2')
self.shiftkey_event()
logger.info('right shift pressed and released')
self.shiftkey_event(False)
logger.info('left shift pressed and released')
self.shiftkey_event()
logger.info('right shift pressed and released')
except:
logger.info('app exit invoked')
logger.info("Good bye Headshot")
self.keyboard_controller.release(keyboard.Key.shift)
self.keyboard_controller.release(keyboard.Key.shift_r)
print("Good bye Headshot")
task = MouseMove()
task.run() |
# *_*coding:utf-8 *_*
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from public.publicExcel import PublicExcel
class InsuranceOpera(object):
"""
读取excel所有字段值
"""
def __init__(self,path):
self.excel = PublicExcel(path)
# 产品名称
def get_productName(self,row):
read_productName =self.excel.get_value(0,row,1)
return read_productName
#产品代号
def get_productCode(self,row):
read_productCode = self.excel.get_value(0,row,2)
return read_productCode
#保障期间
def get_productValue(self,row):
read_productValue = self.excel.get_value(0,row,3)
return read_productValue
#保障类型
def get_productType(self,row):
read_productType = self.excel.get_value(0,row,4)
return read_productType
#金额
def get_productPrice(self,row):
read_productPrice = self.excel.get_value(0,row,5)
return read_productPrice
#产品保额
def get_productPlanName(self,row):
read_productPlanName = self.excel.get_value(0,row,6)
return read_productPlanName
#health
def get_productHealth(self,row):
read_productHealth = self.excel.get_value(0,row,7)
return read_productHealth
#是否运行
def get_productRun(self,row):
read_productRun = self.excel.get_value(0,row,8)
return read_productRun
#url
def get_productUrl(self,row):
read_productUrl = self.excel.get_value(0,row,9)
return read_productUrl
#请求类型
def get_productRequest(self,row):
read_productRequest = self.excel.get_value(0,row,10)
return read_productRequest
#预期结果
def get_productExpect(self,row):
read_productExpect = self.excel.get_value(0,row,11)
return read_productExpect
#实际结果
def get_productActual(self,row):
read_productActual = self.excel.get_value(0,row,12)
return read_productActual
#响应时间
def get_productTime(self,row):
read_productTime = self.excel.get_value(0,row,13)
return read_productTime
# 写入结果(fail or pass)
def get_productResult(self,row):
read_productResult = self.excel.get_value(0,row,14)
return read_productResult
if __name__ == '__main__':
path = '../config/case.xls'
opera = InsuranceOpera(path)
for i in range(40):
print opera.get_productValue(i) |
from django import forms
from django.forms import (
ModelForm, Textarea
)
from .models import Review
class CustomerLoginForm(forms.Form):
email = forms.EmailField(label='Email', required=True, error_messages={'required': 'Please enter your Email'})
password = forms.CharField(label='Password',
widget=forms.PasswordInput,
required=True
)
def __init__(self, *args, **kwargs):
super( CustomerLoginForm, self ).__init__(*args, **kwargs)
self.fields[ 'email' ].widget.attrs[ 'placeholder' ]="example@domain.com"
self.fields[ 'password' ].widget.attrs['placeholder']="password"
class ReviewForm(ModelForm):
class Meta:
model = Review
fields = ['review_content']
widgets = {
'review_content': Textarea(attrs={'cols': 80, 'rows': 5}),
}
exclude = ('user', 'previous_review', 'product', 'update_date')
help_texts = {
'review_content':'Required.',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[ 'review_content' ].widget.attrs[ 'placeholder' ]="Please enter your review here."
|
from django.template.loader import render_to_string
from django.conf import settings
from sendgrid.helpers.mail import Mail
from sendgrid import SendGridAPIClient
def send_init_pwd(user, password):
email_template = render_to_string('send_init_pwd.html',
context={'user': user, 'site_name': settings.APP_NAME, 'password': password})
message = Mail(from_email=settings.DEFAULT_FROM_EMAIL, to_emails=user.email, subject='Init password',
html_content=email_template)
sender = SendGridAPIClient(settings.SENDGRID_API_KEY)
sender.send(message=message)
def send_reset_pwd_token(user):
email_template = render_to_string('send_reset_pwd_token.html',
context={'user': user, 'site_name': settings.APP_NAME,
'token': user.reset_password_token})
message = Mail(from_email=settings.DEFAULT_FROM_EMAIL, to_emails=user.email, subject='Email token',
html_content=email_template)
sender = SendGridAPIClient(settings.SENDGRID_API_KEY)
sender.send(message=message)
|
import os
import sys
import stat
import urllib
import zipfile
import time
import re
from selenium.webdriver import Chrome
# User's Reddit credentials
USERNAME = 'username'
PASSWORD = 'password'
def get_reddit_api_keys():
pass_chrome_binary()
os.environ['webdriver.chrome.driver'] = './chromedriver'
driver = Chrome('./chromedriver')
# Log in
driver.get('https://www.reddit.com/prefs/apps')
driver.find_element_by_id("loginUsername").send_keys(USERNAME)
driver.find_element_by_id("loginPassword").send_keys(PASSWORD)
driver.find_element_by_xpath("//button[@class='AnimatedForm__submitButton'][@type='submit']").submit()
# Create dummy app
time.sleep(4)
driver.refresh()
driver.find_element_by_id("create-app-button").click()
driver.find_element_by_xpath("//*[@id='create-app']/table/tbody/tr[1]/td/input").send_keys('test')
driver.find_element_by_xpath("//*[@id='app_type_script']").click()
driver.find_element_by_xpath("//*[@id='create-app']/table/tbody/tr[5]/td/textarea").send_keys('for DankMemeBot')
driver.find_element_by_xpath("//*[@id='create-app']/table/tbody/tr[7]/td/input").send_keys('http://localhost:8080')
driver.find_element_by_xpath("//*[@id='create-app']/button").submit()
# Regex Oath2 access token and secret key
app_details = driver.find_elements_by_id("developed-apps")[-1].text
access_token = re.findall('personal use script\\n(.+)\\n', app_details)[-1]
secret = re.findall('\\nsecret(.+)\\n', app_details)[-1]
return access_token, secret
def pass_chrome_binary():
if not os._exists('./chromedriver'):
platform = sys.platform
if platform == 'linux':
system = 'linux64.zip'
elif platform == 'darwin':
system = 'mac64.zip'
elif platform == 'win32':
system = 'win32.zip'
else:
sys.exit('Error: no chromedriver available for your system')
url = 'https://chromedriver.storage.googleapis.com/74.0.3729.6/chromedriver_' + system
urllib.request.urlretrieve(url, './chromedriver.zip')
with zipfile.ZipFile('./chromedriver.zip', 'r') as zip_ref:
zip_ref.extractall('.')
try:
os.remove('./chromedriver.zip')
except FileNotFoundError:
print('Chrome driver zip file not found.')
os.chmod('./chromedriver', stat.S_IRWXU)
|
import prometheus_client
from prometheus_client import Counter
from prometheus_client import Gauge
from prometheus_client.core import CollectorRegistry
import psutil
import time
import datetime
import requests
import socket
import threading
from flask import Response, Flask
app = Flask(__name__)
class GetNetRate:
def __init__(self, interval_time):
self.interval = interval_time
self.hostname = socket.gethostname() # 主机名
self.registry = CollectorRegistry(auto_describe=False) # prometheus仓库
self.key_info = dict()
self.old_recv = dict()
self.old_sent = dict()
@staticmethod
def get_key():
key_info = psutil.net_io_counters(pernic=True).keys()
recv = dict()
sent = dict()
for k in key_info:
recv.setdefault(k, psutil.net_io_counters(pernic=True).get(k).bytes_recv)
sent.setdefault(k, psutil.net_io_counters(pernic=True).get(k).bytes_sent)
return key_info, recv, sent
def get_rate(self):
while True:
if not self.key_info:
self.key_info, self.old_recv, self.old_sent = self.get_key()
t = time.time()
time.sleep(self.interval - (t % self.interval))
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
self.key_info, now_recv, now_sent = self.get_key()
net_in = dict()
net_out = dict()
for k in self.key_info:
net_in.setdefault(k, float('%.2f' % ((now_recv.get(k) - self.old_recv.get(k))/(1024 * self.interval))))
net_out.setdefault(k, float('%.2f' % ((now_sent.get(k) - self.old_sent.get(k))/(1024 * self.interval))))
self.old_recv = now_recv
self.old_sent = now_sent
yield self.key_info, net_in, net_out
@staticmethod
def net_address():
dic = psutil.net_if_addrs()
net_dic = dict()
net_dic['no_ip'] = [] # 无ip的网卡列表
for adapter in dic:
snicList = dic[adapter]
mac = '无 mac 地址'
ipv4 = '无 ipv4 地址'
ipv6 = '无 ipv6 地址'
for snic in snicList:
if snic.family.name in {'AF_LINK', 'AF_PACKET'}:
mac = snic.address
elif snic.family.name == 'AF_INET':
ipv4 = snic.address
elif snic.family.name == 'AF_INET6':
ipv6 = snic.address
# print('%s, %s, %s, %s' % (adapter, mac, ipv4, ipv6))
# 判断网卡名不在net_dic中时,并且网卡不是lo
if adapter not in net_dic and adapter != 'lo':
if not ipv4.startswith("无"): # 判断ip地址不是以无开头
net_dic[adapter] = ipv4 # 增加键值对
else:
net_dic['no_ip'].append(adapter) # 无ip的网卡
return net_dic
def insert_registry(self):
# 流入
net_input = Gauge("network_traffic_input", self.hostname, ['adapter_name', 'unit', 'ip', 'instance'],
registry=self.registry)
# 流出
net_output = Gauge("network_traffic_output", self.hostname, ['adapter_name', 'unit', 'ip', 'instance'],
registry=self.registry)
for key_info, net_in, net_out in self.get_rate():
for key in key_info:
net_addr = self.net_address()
# 判断网卡不是lo(回环网卡)以及 不是无ip的网卡
if 'lo' not in key and key not in net_addr['no_ip'] and key in net_addr:
# 流入和流出
net_input.labels(ip=net_addr[key], adapter_name=key, unit="KByte/s", instance=self.hostname).\
set(net_in.get(key))
net_output.labels(ip=net_addr[key], adapter_name=key, unit="KByte/s", instance=self.hostname).\
set(net_out.get(key))
try:
requests.post("http://prometheus-gateway.kube-ops.svc.cluster.local:9091/metrics/job/network_traffic",
data=prometheus_client.generate_latest(self.registry))
print("发送了一次网卡流量数据")
except Exception as e:
print(e)
def run(self):
t = threading.Thread(target=self.insert_registry)
t.start()
interval = 5
o = GetNetRate(interval)
o.run()
@app.route("/metrics")
def api_response():
return Response(prometheus_client.generate_latest(o.registry), mimetype="text/plain")
@app.route('/')
def index():
return "Hello World"
if __name__ == '__main__':
app.run(host="0.0.0.0")
|
numero = int(input("Escribe un numero decimal: "))
binario = ''
while True:
print(numero)
if numero % 2 == 0:
binario = '0' + binario
else:
binario = '1' + binario
numero = numero // 2
if numero == 0:
break;
print(binario)
|
''' PyTorch backend '''
import json
import os
class ModelFactory: # pylint: disable=too-few-public-methods
''' PyTorch backend model factory '''
def open(self, model): # pylint: disable=missing-function-docstring
return _Model(model)
class _Model: # pylint: disable=too-few-public-methods
def __init__(self, model):
self.graph = _Graph(model)
def to_json(self):
''' Serialize model to JSON message '''
metadata = {}
metadata_file = os.path.join(os.path.dirname(__file__), 'onnx-metadata.json')
with open(metadata_file, 'r', encoding='utf-8') as file:
for item in json.load(file):
name = 'onnx::' + item['name']
metadata[name] = item
json_model = {
'signature': 'netron:pytorch',
'format': 'TorchScript',
'graphs': [ self.graph.to_json() ]
}
return json_model
class _Graph: # pylint: disable=too-few-public-methods
def __init__(self, graph):
self.value = graph
def to_json(self): # pylint: disable=missing-function-docstring
import torch # pylint: disable=import-outside-toplevel
graph = self.value
json_graph = {
'arguments': [],
'nodes': [],
'inputs': [],
'outputs': []
}
data_type_map = dict([
[ torch.float16, 'float16'], # pylint: disable=no-member
[ torch.float32, 'float32'], # pylint: disable=no-member
[ torch.float64, 'float64'], # pylint: disable=no-member
[ torch.int32, 'int32'], # pylint: disable=no-member
[ torch.int64, 'int64'], # pylint: disable=no-member
])
arguments_map = {}
def argument(value):
if not value in arguments_map:
json_argument = {}
json_argument['name'] = str(value.unique()) + '>' + str(value.node().kind())
if value.isCompleteTensor():
json_tensor_shape = {
'dimensions': value.type().sizes()
}
json_argument['type'] = {
'dataType': data_type_map[value.type().dtype()],
'shape': json_tensor_shape
}
if value.node().kind() == "prim::Param":
json_argument['initializer'] = {}
arguments = json_graph['arguments']
arguments_map[value] = len(arguments)
arguments.append(json_argument)
return arguments_map[value]
for _ in graph.inputs():
json_graph['inputs'].append({
'name': _.debugName(),
'arguments': [ argument(_) ]
})
for _ in graph.outputs():
json_graph['outputs'].append({
'name': _.debugName(),
'arguments': [ argument(_) ]
})
for node in graph.nodes():
json_node = {
'type': { 'name': node.kind() },
'inputs': [],
'outputs': [],
'attributes': []
}
json_graph['nodes'].append(json_node)
for name in node.attributeNames():
value = node[name]
json_attribute = {
'name': name,
'value': value
}
if torch.is_tensor(value):
json_node['inputs'].append({
'name': name,
'arguments': []
})
else:
json_node['attributes'].append(json_attribute)
for input_value in node.inputs():
json_parameter = {
'name': 'x',
'arguments': [ argument(input_value) ]
}
json_node['inputs'].append(json_parameter)
for output_value in node.outputs():
json_node['outputs'].append({
'name': 'x',
'arguments': [ argument(output_value) ]
})
return json_graph
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db.models import Sum
from django.contrib.contenttypes.fields import GenericRelation
class LikeDislikeManager(models.Manager):
use_for_related_fields = True
def likes(self):
# We take the queryset with records greater than 0
return self.get_queryset().filter(vote__gt=0)
def dislikes(self):
# We take the queryset with records less than 0
return self.get_queryset().filter(vote__lt=0)
def sum_rating(self):
# We take the total rating
return self.get_queryset().aggregate(Sum('vote')).get('vote__sum') or 0
def post(self):
return self.get_queryset().filter(content_type__model='post').order_by('-post__pub_date')
def comments(self):
return self.get_queryset().filter(content_type__model='comment').order_by('-comment__pub_date')
class LikeDislike(models.Model):
LIKE = 1
DISLIKE = -1
VOTES = (
(DISLIKE, 'Dislike'),
(LIKE, 'Like')
)
vote = models.SmallIntegerField(choices=VOTES)
user = models.ForeignKey(User, on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
objects = LikeDislikeManager()
class Post(models.Model):
votes = GenericRelation(LikeDislike, related_query_name='post')
post = models.CharField(max_length=1000)
user = models.ForeignKey(User, on_delete=models.CASCADE)
picture = models.ImageField(upload_to='profile_image', blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.post
@property
def cnt(self):
return self.like_set.all().count()
@property
def total_comment(self):
return self.comment_set.all().count()
class Comment(models.Model):
votes = GenericRelation(LikeDislike, related_query_name='comment')
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
comment = models.CharField(max_length=1000)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.post.post
class Like(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.user.username} likes \'{self.post.post}\''
|
import itertools
import pytest
import tigger.cluda as cluda
from tigger.helpers import min_blocks, product
import tigger.cluda.dtypes as dtypes
from helpers import *
from pytest_contextgen import parametrize_context_tuple, create_context_in_tuple
pytest_funcarg__ctx_with_gs_limits = create_context_in_tuple
def set_context_gs_limits(metafunc, cc):
"""
Parametrize contexts with small grid limits for testing purposes
"""
new_ccs = []
rem_ids = []
for gl in [[31, 31], [31, 31, 31]]:
# If the context will not support these limits, skip
ctx = cc()
mgs = ctx.device_params.max_num_groups
ctx.release()
if len(gl) > len(mgs) or (len(mgs) > 2 and len(gl) > 2 and mgs[2] < gl[2]):
continue
# New context creator function
def new_cc():
ctx = cc()
ctx.override_device_params(max_num_groups=gl)
return ctx
rem_ids.append(str(gl))
new_ccs.append(new_cc)
return new_ccs, [tuple()] * len(new_ccs), rem_ids
def pytest_generate_tests(metafunc):
if 'ctx_with_gs_limits' in metafunc.funcargnames:
parametrize_context_tuple(metafunc, 'ctx_with_gs_limits', set_context_gs_limits)
if 'gs_is_multiple' in metafunc.funcargnames:
metafunc.parametrize('gs_is_multiple', [True, False],
ids=["gs_is_multiple", "gs_is_not_multiple"])
if 'gl_size' in metafunc.funcargnames:
grid_sizes = [
(13,), (35,), (31*31*4,),
(13, 15), (35, 13),
(13, 15, 17), (75, 33, 5)]
local_sizes = [(4,), (4, 4), (4, 4, 4)]
gl_sizes = [(g, l) for g, l in itertools.product(grid_sizes, local_sizes)
if len(g) == len(l)]
metafunc.parametrize('gl_size', gl_sizes, ids=[str(x) for x in gl_sizes])
if 'incorrect_gl_size' in metafunc.funcargnames:
grid_sizes = [
(31**3+1,),
(31**2, 32), (31*20, 31*20),
(31, 31, 32), (150, 150, 150)]
local_sizes = [(4,), (4, 4), (4, 4, 4)]
gl_sizes = [(g, l) for g, l in itertools.product(grid_sizes, local_sizes)
if len(g) == len(l)]
metafunc.parametrize('incorrect_gl_size', gl_sizes, ids=[str(x) for x in gl_sizes])
class ReferenceIds:
def __init__(self, grid_size, local_size, gs_is_multiple=True):
global_size = [g * l for g, l in zip(grid_size, local_size)]
if not gs_is_multiple:
global_size = [g - 1 for g in global_size]
self.global_size = tuple(global_size)
self.local_size = local_size
self.np_global_size = list(reversed(global_size))
self.np_local_size = list(reversed(local_size))
def predict_global_flat_ids(self):
return numpy.arange(product(self.np_global_size)).astype(numpy.int32)
def predict_local_ids(self, dim):
if dim > len(self.global_size) - 1:
return numpy.zeros(self.np_global_size, dtype=numpy.int32)
np_dim = len(self.global_size) - dim - 1
global_len = self.np_global_size[np_dim]
local_len = self.np_local_size[np_dim]
repetitions = min_blocks(global_len, local_len)
pattern = numpy.tile(numpy.arange(local_len), repetitions)[:global_len]
pattern_shape = [x if i == np_dim else 1 for i, x in enumerate(self.np_global_size)]
pattern = pattern.reshape(*pattern_shape)
tile_shape = [x if i != np_dim else 1 for i, x in enumerate(self.np_global_size)]
pattern = numpy.tile(pattern, tile_shape)
return pattern.astype(numpy.int32)
def predict_group_ids(self, dim):
if dim > len(self.global_size) - 1:
return numpy.zeros(self.np_global_size, dtype=numpy.int32)
np_dim = len(self.global_size) - dim - 1
global_len = self.np_global_size[np_dim]
local_len = self.np_local_size[np_dim]
repetitions = min_blocks(global_len, local_len)
pattern = numpy.repeat(numpy.arange(repetitions), local_len)[:global_len]
pattern_shape = [x if i == np_dim else 1 for i, x in enumerate(self.np_global_size)]
pattern = pattern.reshape(*pattern_shape)
tile_shape = [x if i != np_dim else 1 for i, x in enumerate(self.np_global_size)]
pattern = numpy.tile(pattern, tile_shape)
return pattern.astype(numpy.int32)
def predict_global_ids(self, dim):
lids = self.predict_local_ids(dim)
gids = self.predict_group_ids(dim)
return lids + gids * (self.local_size[dim] if dim < len(self.local_size) else 0)
def test_ids(ctx_with_gs_limits, gl_size, gs_is_multiple):
"""
Test that virtual IDs are correct for each thread.
"""
ctx = ctx_with_gs_limits
grid_size, local_size = gl_size
ref = ReferenceIds(grid_size, local_size, gs_is_multiple)
get_ids = ctx.compile_static("""
KERNEL void get_ids(GLOBAL_MEM int *fid,
GLOBAL_MEM int *lx, GLOBAL_MEM int *ly, GLOBAL_MEM int *lz,
GLOBAL_MEM int *gx, GLOBAL_MEM int *gy, GLOBAL_MEM int *gz,
GLOBAL_MEM int *glx, GLOBAL_MEM int *gly, GLOBAL_MEM int *glz)
{
VIRTUAL_SKIP_THREADS;
const int i = virtual_global_flat_id();
fid[i] = i;
lx[i] = virtual_local_id(0);
ly[i] = virtual_local_id(1);
lz[i] = virtual_local_id(2);
gx[i] = virtual_group_id(0);
gy[i] = virtual_group_id(1);
gz[i] = virtual_group_id(2);
glx[i] = virtual_global_id(0);
gly[i] = virtual_global_id(1);
glz[i] = virtual_global_id(2);
}
""", 'get_ids', ref.global_size, local_size=ref.local_size)
fid = ctx.allocate(product(ref.np_global_size), numpy.int32)
lx = ctx.allocate(ref.np_global_size, numpy.int32)
ly = ctx.allocate(ref.np_global_size, numpy.int32)
lz = ctx.allocate(ref.np_global_size, numpy.int32)
gx = ctx.allocate(ref.np_global_size, numpy.int32)
gy = ctx.allocate(ref.np_global_size, numpy.int32)
gz = ctx.allocate(ref.np_global_size, numpy.int32)
glx = ctx.allocate(ref.np_global_size, numpy.int32)
gly = ctx.allocate(ref.np_global_size, numpy.int32)
glz = ctx.allocate(ref.np_global_size, numpy.int32)
get_ids(fid, lx, ly, lz, gx, gy, gz, glx, gly, glz)
assert diff_is_negligible(fid.get(), ref.predict_global_flat_ids())
assert diff_is_negligible(lx.get(), ref.predict_local_ids(0))
assert diff_is_negligible(ly.get(), ref.predict_local_ids(1))
assert diff_is_negligible(lz.get(), ref.predict_local_ids(2))
assert diff_is_negligible(gx.get(), ref.predict_group_ids(0))
assert diff_is_negligible(gy.get(), ref.predict_group_ids(1))
assert diff_is_negligible(gz.get(), ref.predict_group_ids(2))
assert diff_is_negligible(glx.get(), ref.predict_global_ids(0))
assert diff_is_negligible(gly.get(), ref.predict_global_ids(1))
assert diff_is_negligible(glz.get(), ref.predict_global_ids(2))
def test_sizes(ctx_with_gs_limits, gl_size, gs_is_multiple):
"""
Test that virtual sizes are correct.
"""
ctx = ctx_with_gs_limits
grid_size, local_size = gl_size
ref = ReferenceIds(grid_size, local_size, gs_is_multiple)
get_sizes = ctx.compile_static("""
KERNEL void get_sizes(GLOBAL_MEM int *sizes)
{
if (virtual_global_flat_id() > 0) return;
for (int i = 0; i < 3; i++)
{
sizes[i] = virtual_local_size(i);
sizes[i + 3] = virtual_num_groups(i);
sizes[i + 6] = virtual_global_size(i);
}
sizes[9] = virtual_global_flat_size();
}
""", 'get_sizes', ref.global_size, local_size=ref.local_size)
sizes = ctx.allocate(10, numpy.int32)
get_sizes(sizes)
gls = list(ref.global_size) + [1] * (3 - len(ref.global_size))
ls = list(ref.local_size) + [1] * (3 - len(ref.local_size))
gs = [min_blocks(g, l) for g, l in zip(gls, ls)]
ref_sizes = numpy.array(ls + gs + gls + [product(gls)]).astype(numpy.int32)
assert diff_is_negligible(sizes.get(), ref_sizes)
def test_incorrect_sizes(ctx_with_gs_limits, incorrect_gl_size):
"""
Test that for sizes which exceed context capability the exception is raised
"""
ctx = ctx_with_gs_limits
grid_size, local_size = incorrect_gl_size
ref = ReferenceIds(grid_size, local_size)
with pytest.raises(ValueError):
kernel = ctx.compile_static("""
KERNEL void test(GLOBAL_MEM int *temp)
{
temp[0] = 1;
}
""", 'test', ref.global_size, local_size=ref.local_size)
|
from .dataset import Dataset
from .in_memory_dataset import InMemoryDataset
from .planetoid import Planetoid
from .npz_dataset import NPZDataset
from .ppi import PPI
from .reddit import Reddit
from .tu_dataset import TUDataset
from .karateclub import KarateClub
from .musae import MUSAE
|
#!/usr/bin/env python
# test has been developed by Robert Harakaly and changed for SAM by Victor Galaktionov
# get the replica entries associated with a list of GUIDs (lfc_getreplicas)
# meta: proxy=true
# meta: preconfig=../../LFC-config
import os, lfc, sys, errno
from testClass import _test, _ntest, _testRunner, SAM_Run, LFC_VO, TEST_HOME
class test_OK(_test):
def info(self):
return "Test OK: "
def prepare(self):
self.sfn1="sfn://test-se.cern.ch" + LFC_VO + "/hary/lfc_getreplica_test"
self.sfn2="sfn://test-se.in2p3.fr" + LFC_VO + "/hary/lfc_getreplica_test"
self.guid1=self.get_guid()
self.name1= LFC_VO + "/lfc_getreplica_test1"
self.guid2=self.get_guid()
self.name2= LFC_VO + "/lfc_getreplica_test2"
lfc.lfc_creatg(self.name1, self.guid1, 0664)
lfc.lfc_creatg(self.name2, self.guid2, 0664)
lfc.lfc_addreplica(self.guid1, None, "test-se.cern.ch", self.sfn1, '-', 'D', "", "")
lfc.lfc_addreplica(self.guid2, None, "test-se.in2p3.fr", self.sfn2, '-', 'D', "", "")
def clean(self):
lfc.lfc_delreplica(self.guid1, None, self.sfn1)
lfc.lfc_delreplica(self.guid2, None, self.sfn2)
lfc.lfc_unlink(self.name1)
lfc.lfc_unlink(self.name2)
def test(self):
ret, list = lfc.lfc_getreplicas([self.guid1, self.guid2], "")
return (list,ret)
def ret(self):
retval=[]
retval.append(lfc.lfc_filereplica())
retval.append(lfc.lfc_filereplica())
retval[0].sfn = self.sfn1
retval[1].sfn = self.sfn2
return retval
def compare(self, testVal, retVal):
(ret, retRetVal) = retVal
(test, testRetVal) = testVal
retval = True
if ((retRetVal == testRetVal) & ( len(test) == 2 )):
retval = retval & ( test[0].sfn == ret[0].sfn )
retval = retval & ( test[1].sfn == ret[1].sfn )
else:
retval = False
return retval
class test_OK_se(_test):
def info(self):
return "Test SE filter: "
def prepare(self):
self.sfn11="sfn://test-se.cern.ch" + LFC_VO + "/hary/lfc_getreplica_test"
self.sfn12="sfn://test-se.in2p3.fr" + LFC_VO + "/hary/lfc_getreplica_test"
self.sfn13="sfn://test-se.cern.ch" + LFC_VO + "/lfc_getreplica_test"
self.sfn21="sfn://test-se.cern.ch" + LFC_VO + "/hary/lfc_getreplica_test1"
self.sfn22="sfn://test-se.in2p3.fr" + LFC_VO + "/hary/lfc_getreplica_test1"
self.guid1=self.get_guid()
self.name1= LFC_VO + "/lfc_getreplica_test1"
self.guid2=self.get_guid()
self.name2= LFC_VO + "/lfc_getreplica_test2"
lfc.lfc_creatg(self.name1, self.guid1, 0664)
lfc.lfc_creatg(self.name2, self.guid2, 0664)
lfc.lfc_addreplica(self.guid1, None, "test-se.cern.ch", self.sfn11, '-', 'D', "", "")
lfc.lfc_addreplica(self.guid1, None, "test-se.in2p3.fr", self.sfn12, '-', 'D', "", "")
lfc.lfc_addreplica(self.guid1, None, "test-se.cern.ch", self.sfn13, '-', 'D', "", "")
lfc.lfc_addreplica(self.guid2, None, "test-se.cern.ch", self.sfn21, '-', 'D', "", "")
lfc.lfc_addreplica(self.guid2, None, "test-se.in2p3.fr", self.sfn22, '-', 'D', "", "")
def clean(self):
lfc.lfc_delreplica(self.guid1, None, self.sfn11)
lfc.lfc_delreplica(self.guid1, None, self.sfn12)
lfc.lfc_delreplica(self.guid1, None, self.sfn13)
lfc.lfc_unlink(self.name1)
lfc.lfc_delreplica(self.guid2, None, self.sfn22)
lfc.lfc_delreplica(self.guid2, None, self.sfn21)
lfc.lfc_unlink(self.name2)
def test(self):
ret, list = lfc.lfc_getreplicas([self.guid1,self.guid2], "test-se.cern.ch")
return (list,ret)
def ret(self):
retval=[]
retval.append(lfc.lfc_filereplica())
retval.append(lfc.lfc_filereplica())
retval.append(lfc.lfc_filereplica())
retval[0].sfn = self.sfn11
retval[1].sfn = self.sfn13
retval[2].sfn = self.sfn21
return retval
def compare(self, testVal, retVal):
(ret, retRetVal) = retVal
(test, testRetVal) = testVal
retval = True
if ((retRetVal == testRetVal) & ( len(test) == 3 )):
retval = retval & ( test[0].sfn == ret[0].sfn )
retval = retval & ( test[1].sfn == ret[1].sfn )
retval = retval & ( test[2].sfn == ret[2].sfn )
else:
retval = False
return retval
class test_EINVAL1(_ntest):
def info(self):
return "Test guid length exeeds CA_MAXGUIDLEN (EINVAL):"
def test(self):
guid = "a" * (lfc.CA_MAXGUIDLEN+2)
ret, list = lfc.lfc_getreplicas([guid], "")
return (list,lfc.cvar.serrno,ret)
def ret(self):
return (None, errno.EINVAL)
def compare(self, testVal, retVal):
((ret, reterr), retRetVal) = retVal
(list, serrno, testRetVal) = testVal
if serrno != 0:
return False
for item in list:
if item.errcode != reterr:
return False
return True
class test_EINVAL2(_ntest):
def info(self):
return "Test SE name length exeeds CA_MAXNAMELEN (EINVAL): "
def prepare(self):
self.sfn1="sfn://test-se.cern.ch" + LFC_VO + "/hary/lfc_getreplica_test"
self.sfn2="sfn://test-se.in2p3.fr" + LFC_VO + "/hary/lfc_getreplica_test"
self.sfn3="sfn://test-se.cern.ch" + LFC_VO + "/lfc_getreplica_test"
self.guid=self.get_guid()
self.name= LFC_VO + "lfc_getreplica_test"
lfc.lfc_creatg(self.name, self.guid, 0664)
lfc.lfc_addreplica(self.guid, None, "test-se.cern.ch", self.sfn1, '-', 'D', "", "")
lfc.lfc_addreplica(self.guid, None, "test-se.in2p3.fr", self.sfn2, '-', 'D', "", "")
lfc.lfc_addreplica(self.guid, None, "test-se.cern.ch", self.sfn3, '-', 'D', "", "")
def clean(self):
lfc.lfc_delreplica(self.guid, None, self.sfn1)
lfc.lfc_delreplica(self.guid, None, self.sfn2)
lfc.lfc_delreplica(self.guid, None, self.sfn3)
lfc.lfc_unlink(self.name)
def test(self):
se = "a" * (lfc.CA_MAXNAMELEN + 2)
ret, list = lfc.lfc_getreplicas([self.guid], se)
return (list,lfc.cvar.serrno,ret)
def ret(self):
return (None, errno.EINVAL)
def compare(self, testVal, retVal):
((ret, reterr), retRetVal) = retVal
(test, testerr, testRetVal) = testVal
if ((retRetVal == testRetVal) & (reterr == testerr)):
retval = True
else:
retval = False
return retval
class test_ENOENT(_ntest):
def info(self):
return "Test existing replica nonexisting file (ENOENT): "
def prepare(self):
self.guid = self.get_guid()
def test(self):
ret, list = lfc.lfc_getreplicas([self.guid], "")
return (list,lfc.cvar.serrno,ret)
def ret(self):
return (None, errno.ENOENT)
def compare(self, testVal, retVal):
((ret, reterr), retRetVal) = retVal
(list, serrno, testRetVal) = testVal
if serrno != 0:
return False
for item in list:
if item.errcode != reterr:
return False
return True
class lfc_getreplicas_test(_testRunner):
def __init__(self):
self.name = "lfc_getreplicas_test"
self.tests=[test_OK, test_OK_se, test_EINVAL1, test_EINVAL2, test_ENOENT]
#************* Interface for SAM and Python tests ***************
SAM_Run(lfc_getreplicas_test)
|
"""
Defines the Maze data type, which can store and draw a maze of arbitrary size.
"""
class Maze:
def __init__(self, size=(8, 8)):
"""
Creates a blank maze
:param size:
"""
pass |
import setuptools
from os.path import join, dirname
setuptools.setup(
name="django_bulb_switcher",
version='0.1',
packages=["django_bulb_switcher"],
install_requires=open(join(dirname(__file__), 'requirements.txt')).readlines(),
author="Bernardo Fontes",
author_email="bernardoxhc@gmail.com",
url="https://github.com/berinhard/django-bulb-switcher",
license="Apache 2.0",
description="DB-Independent feature toggle for Django",
keywords="django switcher feature toggle",
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
import json
from django.http import HttpResponse
from django.utils.safestring import mark_safe
from django.shortcuts import get_object_or_404
from .models import Chat,Message
from accounts.models import User
from courses.models import Course,Image
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
# from django.shortcuts import get_object_or_404
def get_curent_chat(chatId):
return get_object_or_404(Chat,id=chatId)
def get_last_10_comments(postId):
post = get_object_or_404(Post,id=postId)
return post.comments.order_by('-timestamp')[:30]
|
# http://www.practicepython.org/exercise/2014/07/05/18-cows-and-bulls.html
from random import randint
if __name__=="__main__":
cifra0 = str(randint(0,9))
cifra1 = str(randint(0,9))
cifra2 = str(randint(0,9))
cifra3 = str(randint(0,9))
numero = cifra0 + cifra1 + cifra2 + cifra3
num = 0
while True:
num += 1
while True:
try:
intento = input("Introduce cuatro cifras del 0 al 9 (p.ej: 0123): ")
acert = 0
descolocado = 0
if ((len(intento) == 4 and int(intento)) or intento=="0000"):
break
except ValueError:
pass
for i in range(4):
if intento[i] in numero:
if intento[i] == numero[i]:
acert += 1
else:
descolocado += 1
if (acert == 4):
break
else:
if (acert == 1):
s = ""
else:
s = "s"
print ("Hay {} cifra{} en su sitio y {} fuera de su sitio".format(acert, s, descolocado))
#
if (num == 1):
s = ""
else:
s = "s"
print ("Has acertado! El numero era el {} y lo has acertado en {} intento{}".format(numero, num, s))
|
# Generated by Django 3.1 on 2020-08-29 07:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0003_order_detail_start_time'),
]
operations = [
migrations.CreateModel(
name='order_choice_log',
fields=[
('choice_id', models.AutoField(primary_key=True, serialize=False)),
('add_order_type', models.IntegerField(default=0, verbose_name='加单算法选择')),
('nudge_order_type', models.IntegerField(default=0, verbose_name='催单算法选择')),
('create_time', models.DateTimeField(auto_now=True, verbose_name='使用算法的时间')),
],
),
]
|
from attractor import *
from parity_game import ParityGame
from game_solver import GameSolver
class QPZSolver(GameSolver):
def __init__(self, game: ParityGame):
super().__init__()
self.game = game
def solve(self):
i = self.game.d % 2
wi = self.qpz(self.game, self.game.d, self.game.parsed_pg.size, self.game.parsed_pg.size)
wj = self.game.V & ~wi
w0 = wi if i == 0 else wj
w1 = wj if i == 0 else wi
return (w0, w1)
def qpz(self, game: ParityGame, d: int, p0: int, p1: int):
if game.V == game.bdd.false:
return game.bdd.false
i = d % 2
j = 1 - i
pi = p0 if i == 0 else p1
pj = p1 if i == 0 else p0
if pj == 0 or d == 0:
return game.V
pip = pi
pjp = pj // 2
p0p = pip if i == 0 else pjp
p1p = pjp if i == 0 else pip
wj = game.V & ~ self.qpz(game, d, p0p, p1p)
a = attr_i(j, game, wj)
game_p = game.subgame(a)
game_pp = game_p.subgame(attr_i(i, game_p, game_p.get_parity(d)))
wjpp = self.qpz(game_pp, d - 1, p0, p1)
ap = attr_i(j, game_p, wjpp)
game_ppp = game_p.subgame(ap)
wjppp = (game.V & ~ (a | ap)) & ~ self.qpz(game_ppp, d, p0p, p1p)
return game.V & ~(a | ap | wjppp)
|
print("욥욥욥욥욥욥")
print("얍얍얍얍얍얍얍") |
"""todolist URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from todo import views
urlpatterns = [
path('admin/', admin.site.urls),
# Auth
path( 'signup/', views.signupuser, name="signupuser" ),
path( 'login/', views.loginuser, name="loginuser" ),
path( 'logout/', views.logoutuser, name="logoutuser" ),
#Todos
path( '', views.home, name="home" ),
path( 'create/', views.createtodo, name="createtodo" ),
path( 'current/', views.currenttodos, name="currenttodos" ),
path( 'completed/', views.completedtodos, name="completedtodos" ),
path( 'archived/', views.archivedtodos, name="archivedtodos" ),
path( 'todo/<int:todo_pk>', views.viewtodo, name="viewtodo" ),
path( 'todo/<int:todo_pk>/complete', views.completetodo, name="completetodo" ),
path( 'todo/<int:todo_pk>/archive', views.archivetodo, name="archivetodo" ),
path( 'todo/<int:todo_pk>/shred', views.shredtodo, name="shredtodo" ),
path( 'todo/<int:todo_pk>/unarchive', views.unarchivetodo, name="unarchivetodo" ),
]
|
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
class HolderTentacleAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('HolderTentacleAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air) |
#coding:gb2312
#传递实参
print("位置实参:")
def favorite_numbers(name,number):
print("被调查者的名字是:"+name.title())
print(name.title()+"'s favorite number is "+number+"."+"\n")
favorite_numbers('lyl','1')
favorite_numbers('hl','9') #函数是可以调用多次的
#关键字实参
print("\n\n\n关键字实参:")
def favorite_numbers(name,number):
print("被调查者的名字是:"+name.title())
print(name.title()+"'s favorite number is "+number+"."+"\n")
favorite_numbers(name='lyl',number='1')
favorite_numbers(number='9',name='hl') #将函数的名称—值对传递给参数,那么关键字的实参顺序就无关紧要了
#默认值
print("\n\n\n默认值:")
def favorite_numbers(name,number = '1'): #number指定了是1后面调用就无需通过实参来指定number
print("被调查者的名字是:"+name.title())
print(name.title()+"'s favorite number is "+number+"."+"\n")
favorite_numbers(name='lf') #没有给number指定值就是用实参指定的默认值
favorite_numbers(name='lyl') #将名称值对直接传递给函数,就不用再考虑函数调用中实参的顺序
favorite_numbers(name='hl',number='9') #显示的给number提供了值,因此python将忽略形参的默认值
#在使用默认值时,再形参列表中必须先列出没有默认值的形参,再列出有默认值的形参,python才能正确的解读位置实参
|
"""
Biblioteca criada com todas as funções criadas em Programação 2
"""
"""
Um n-grama é uma sequência de caracteres de tamanho n, por exemplo:
"goiaba" --> 1-grama: g, o, i, a, b, a
2-grama: go, oi, ia, ab, ba
3-grama: goi, oia, iab, aba
...
Construa a função ngrama(<texto>, <tam>) que retorna uma lista contendo os n-gramas de tamanho <tam> de <texto>
"""
def nGrama(pTexto, tam):
ngramas = []
for i in range(len(pTexto)):
if i <= len(pTexto) - tam:
ngramas.append(pTexto[i: tam + i])
#
#
return ngramas
#
def insereEspacos(pTexto):
strSeparadores = " .,:;!?(){}[]/\\"
textoNovo = ""
for elem in pTexto:
if elem in strSeparadores:
if elem != "":
textoNovo += " " + elem + " "
else:
textoNovo += elem
#
else:
textoNovo += elem
#
#
return textoNovo
#
def tokenizador(txt):
lstTokens = []
strbuffer = ''
lstposicoes = []
pos = 0
separador = ' ,!?.:;/-_\\()[]{}'
for pos in range(len(txt)):
if txt[pos] not in separador:
strbuffer += txt[pos]
else:
if strbuffer != '':
lstTokens.append(strbuffer)
lstposicoes.append(pos-len(strbuffer))
strbuffer = ''
if txt[pos] not in [' ','\t']:
lstTokens.append(txt[pos])
lstposicoes.append(pos)
if strbuffer != '':
lstTokens.append(strbuffer)
lstposicoes.append(pos-len(strbuffer))
return lstTokens, lstposicoes
#
def separaPal(pTexto):
strSeparadores = ' ,!?.:;/-_\\()[]{}'
strBuffer = ""
lstPalavras = []
for i in range(len(pTexto)):
if pTexto[i] not in strSeparadores:
strBuffer += pTexto[i]
elif strBuffer != "":
lstPalavras.append(strBuffer)
strBuffer = ""
#
#
if strBuffer != "":
lstPalavras.append(strBuffer)
#
return lstPalavras
#
def separaPal2(pTexto):
textoAux = insereEspacos(pTexto)
lstPals = textoAux.split()
pos = 0
for pos in range(len(lstPals)):
if not pos.isalpha():
del(pos)
else:
pos += 1
#
#
return lstPals
#
def codifica(pTexto):
preposicoes = ['a', 'ante', 'até', 'após', 'com', 'contra','de', 'desde','em','entre','para','per',
'perante','por','sem','sob','sobre','trás']
conjuncoes = ['e', 'nem', 'mas também', 'como também', 'bem como', 'mas ainda','mas', 'porém', 'todavia', 'contudo', 'antes']
artigos = ['o', 'a', 'os', 'as', 'um', 'uma', 'uns', 'umas']
strCodificada = ""
for elem in pTexto:
if elem.lower() in preposicoes:
strCodificada += 'p'
elif elem.lower() in artigos:
strCodificada += 'a'
elif elem.lower() in conjuncoes:
strCodificada += 'c'
elif elem[0].isupper():
strCodificada += 'M'
elif elem.isdigit():
strCodificada += 'N'
elif elem.islower:
strCodificada += 'm'
else:
strCodificada += elem
#
return strCodificada
#
|
d=input("Write diameter: ")
L=(3.14 * float(d))
print(L) |
#!/usr/bin/env python3
"""
__author__ = "Axelle Apvrille"
__license__ = "MIT License"
"""
import argparse
import os
import subprocess
import droidutil # that's my own utilities
import droidsample
import droidreport
import sys
property_dump_file = 'details.md'
report_file = 'report.md'
json_file = 'report.json'
__version__ = "3.4.1"
def get_arguments():
"""Read arguments for the program and returns the ArgumentParser"""
parser = argparse.ArgumentParser(description='''DroidLysis3 is a Python
script which processes Android samples. \n
1/ It extracts properties from the samples (e.g connects to Internet, roots the phone...). The extracted properties are displayed.\n
2/ It helps the analyst begin its reverse engineering of the sample, by performing a first automatic analysis, disassembling, decompiling and a description draft.''', prog='DroidLysis', epilog='Version '+__version__+' - Greetz from Axelle Apvrille')
parser.add_argument('-i', '--input', help='input directories or files to process', nargs='+', action='store', default='.')
parser.add_argument('-o', '--output', help='analysis of input files is written into subdirectories of this directory', action='store', default='.')
parser.add_argument('-c', '--clearoutput', help='erase the output directory at the end. Indicates you want something quick.', action='store_true')
parser.add_argument('-s', '--silent', help='do not display output on console', action='store_true')
parser.add_argument('-m', '--movein', help='after it has been processed, each input file is moved to this directory', action='store')
parser.add_argument('-v', '--verbose', help='get more detailed messages', action='store_true')
parser.add_argument('-V', '--version', help='displays version number', action='version', version="%(prog)s "+__version__)
parser.add_argument('--no-kit-exception', help='by default, ad/dev/stats kits are ruled out for searches. Use this option to treat them as regular namespaces', action='store_true')
parser.add_argument('--enable-procyon', help='enable procyon decompilation', action='store_true')
parser.add_argument('--disable-report', help='do not generate automatic report', action='store_true')
parser.add_argument('--enable-sql', help='write analysis to SQL database', action='store_true')
parser.add_argument('--disable-json', help='do not dump analysis to JSON', action='store_true')
args = parser.parse_args()
# create output dir if necessary
droidutil.mkdir_if_necessary(args.output)
# create movein dir if necessary
if args.verbose and args.movein:
print("Creating %s if necessary" % (args.movein))
droidutil.mkdir_if_necessary(args.movein)
return args
def process_input(args):
"""
Process input.
Provide ArgumentParser as argument.
args.input contains a list of files and directories to process.
each file in an input directory are processed, but not recursively.
each input file is process.
"""
for element in args.input:
if os.path.isdir(element):
listing = os.listdir(element)
for file in listing:
process_file(os.path.join(element, file), args.output, args.verbose, args.clearoutput, args.enable_procyon, args.disable_report, args.no_kit_exception, args.enable_sql, args.disable_json)
if args.movein:
if args.verbose:
print("Moving %s to %s" % (os.path.join('.',element), os.path.join(args.movein, element)))
# TODO: issue if inner dirs. Are we handling this?
try:
os.rename(os.path.join(element, file), os.path.join(args.movein, file))
except OSError as e:
if args.verbose:
print( "%s no longer present?: %s\n" % (file, str(e)))
if os.path.isfile(element):
process_file(os.path.join('.',element), args.output, args.verbose, args.clearoutput, args.enable_procyon, args.disable_report, args.silent, args.no_kit_exception)
# dirname = os.path.join(args.output, '{filename}-*'.format(filename=element))
if args.movein:
if args.verbose:
print("Moving %s to %s" % (os.path.join('.',element), os.path.join(args.movein, os.path.basename(element))))
os.rename(os.path.join('.',element), os.path.join(args.movein, os.path.basename(element)))
def process_file(infile, outdir='/tmp/analysis', verbose=False, clear=False, enable_procyon=False, disable_report=False, silent=False, no_kit_exception=False, enable_sql=False, disable_json=False):
"""Static analysis of a given file"""
if os.access(infile, os.R_OK):
if not silent:
print("Processing: " + infile + " ...")
sample = droidsample.droidsample(infile, outdir, verbose, clear, enable_procyon, disable_report, silent, no_kit_exception)
sample.unzip()
sample.disassemble()
sample.extract_file_properties()
sample.extract_meta_properties()
sample.extract_manifest_properties()
sample.extract_dex_properties()
listofkits = sample.extract_kit_properties()
if no_kit_exception:
listofkits = []
sample.extract_smali_properties(listofkits)
sample.extract_wide_properties(listofkits)
if enable_sql:
sample.properties.write()
if not disable_json:
sample.properties.dump_json(os.path.join(sample.outdir, json_file))
report_to_file = True
if clear or disable_report:
report_to_file = False
console = True
if silent:
console = False
if console or report_to_file:
report = droidreport.droidreport(sample, console=True, report_to_file=report_to_file)
report.write(os.path.join(sample.outdir, report_file), verbose)
if not clear:
analysis_file = open(os.path.join(sample.outdir, property_dump_file), 'a')
analysis_file.write(str(sample.properties))
analysis_file.close()
else:
if not silent:
print("Removing directory %s ..." % (sample.outdir))
proc = subprocess.Popen(['rm', '-rf', sample.outdir])
proc.communicate()
sample.close()
def check_python_version():
if sys.version_info.major < 3:
print("ERROR: Please run DroidLysis with Python 3")
quit()
if __name__ == "__main__":
check_python_version()
args = get_arguments()
process_input(args)
print("END")
|
import pandas as pd
import plotly.express as px
url = 'http://api.open-notify.org/iss-now.json'
# Get position of ISS
df = pd.read_json(url)
#print(df)
df['latitude'] = df.loc['latitude','iss_position']
df['longitude'] = df.loc['longitude','iss_position']
df.reset_index(inplace=True)
df = df.drop(['index','message'],axis=1)
fig = px.scatter_geo(df,lat='latitude',lon='longitude')
fig.show()
|
from sklearn.svm import SVC
from DataSet.iris import learn_iris
from DataSet.xor import learn_xor
# SVMに関するテスト
# カーネルトリック
# カーネル = 簡単に言うと, 2つのサンプル(x_i, x_j)間の類似度を表す関数( 0 ~ 1 )
# ガウスカーネル
# k(x_i, x_j) = exp(- |x_i - x_j|^2 / 2σ^2)
# = exp(-γ|x_i - x_j|^2) (γ = 1/2σ^2)
svm = SVC(kernel='rbf', random_state=0, gamma=0.1, C=10.0)
learn_xor(classifier=svm)
learn_iris(classifier=svm, title='SVM')
# --------------------
# Γを大きくした場合 → 決定境界が複雑になりか学習が発生する
svm = SVC(kernel='rbf', random_state=0, gamma=100, C=10.0)
learn_iris(classifier=svm, title='large Gamma')
|
from pico2d import *
import random
import time
import game_world
import config
from ball import Ball
# Boy State
# IDLE, RUN, SLEEP = range(3)
# Boy Event
RIGHT_DOWN, LEFT_DOWN, RIGHT_UP, LEFT_UP, TIME_OUT, SPACE_DOWN, ENTER_DOWN = range(7)
key_event_table = {
(SDL_KEYDOWN, SDLK_RIGHT): RIGHT_DOWN,
(SDL_KEYDOWN, SDLK_LEFT): LEFT_DOWN,
(SDL_KEYUP, SDLK_RIGHT): RIGHT_UP,
(SDL_KEYUP, SDLK_LEFT): LEFT_UP,
(SDL_KEYDOWN, SDLK_SPACE): SPACE_DOWN,
(SDL_KEYDOWN, SDLK_RETURN): ENTER_DOWN,
}
class IdleState:
@staticmethod
def enter(boy):
boy.time = time.time()
@staticmethod
def exit(boy):
pass
@staticmethod
def update(boy):
boy.frame = (boy.frame + 1) % 8
elapsed = time.time() - boy.time
if elapsed > 2.0:
boy.set_state(SleepState)
@staticmethod
def draw(boy):
y = 200 if boy.dir == 0 else 300
Boy.image.clip_draw(boy.frame * 100, y, 100, 100, boy.x, boy.y)
class RunState:
@staticmethod
def enter(boy):
boy.time = time.time()
@staticmethod
def exit(boy):
pass
@staticmethod
def update(boy):
elapsed = time.time() - boy.time
mag = 2 if elapsed > 2.0 else 1
# print(mag, elapsed)
boy.frame = (boy.frame + 1) % 8
boy.x = max(25, min(boy.x + mag * boy.dx, 775))
@staticmethod
def draw(boy):
y = 0 if boy.dir == 0 else 100
Boy.image.clip_draw(boy.frame * 100, y, 100, 100, boy.x, boy.y)
class SleepState:
@staticmethod
def enter(boy):
boy.time = time.time()
@staticmethod
def exit(boy):
pass
@staticmethod
def update(boy):
boy.frame = (boy.frame + 1) % 8
@staticmethod
def draw(boy):
if boy.dir == 1:
y, mx, angle = 300, -25, 3.141592/2
else:
y, mx, angle = 200, +25, -3.141592/2
Boy.image.clip_composite_draw(boy.frame * 100, y, 100, 100,
angle, '', boy.x + mx, boy.y - 25, 100, 100)
next_state_table = {
IdleState: { RIGHT_UP: RunState, LEFT_UP: RunState, RIGHT_DOWN: RunState, LEFT_DOWN: RunState, TIME_OUT: SleepState},
RunState: { RIGHT_UP: IdleState, LEFT_UP: IdleState, RIGHT_DOWN: IdleState, LEFT_DOWN: IdleState },
SleepState: { LEFT_DOWN: RunState, RIGHT_DOWN: RunState }
}
class Boy:
image = None
Line = None
def __init__(self):
print("Creating..")
self.x = random.randint(0, 200)
# self.y = random.randint(90, 550)
self.y = 90
self.speed = random.uniform(3.0, 5.0)
self.frame = random.randint(0, 7)
self.state = None
self.set_state(IdleState)
self.dir = 1
self.dx = 0
if Boy.image == None:
Boy.image = load_image('../res/animation_sheet.png')
if Boy.Line == None:
Boy.Line = load_image('../res/line.png')
def get_bb(self):
if self.state == IdleState:
return self.x - 15, self.y - 40, self.x + 15, self.y + 40
if self.state == RunState:
return self.x - 20, self.y - 40, self.x + 20, self.y + 40
if self.state == SleepState:
if self.dir == 1:
return self.x - 70, self.y - 40, self.x + 10, self.y
else:
return self.x - 10, self.y - 40, self.x + 70, self.y
def draw(self):
self.state.draw(self)
if config.draws_bounding_box:
draw_rectangle(*self.get_bb())
def update(self):
self.state.update(self)
def handle_event(self, e,x,y):
if (e.type, e.key) in key_event_table:
key_event = key_event_table[(e.type, e.key)]
if key_event == SPACE_DOWN or key_event == ENTER_DOWN:
self.fire_ball(key_event == ENTER_DOWN,x,y)
if self.state == SleepState:
self.set_state(IdleState)
return
if key_event == RIGHT_DOWN:
self.dx += self.speed
if self.dx > 0: self.dir = 1
elif key_event == LEFT_DOWN:
self.dx -= self.speed
if self.dx < 0: self.dir = 0
elif key_event == RIGHT_UP:
self.dx -= self.speed
if self.dx < 0: self.dir = 0
elif key_event == LEFT_UP:
self.dx += self.speed
if self.dx > 0: self.dir = 1
self.set_state(IdleState if self.dx == 0 else RunState)
# print(self.dx, self.dir)
def set_state(self, state):
if self.state == state: return
if self.state and self.state.exit:
self.state.exit(self)
self.state = state
if self.state.enter:
self.state.enter(self)
def fire_ball(self, big,ex,ey):
ballSpeed = 0.1 * (ex - self.x)/2 + self.dx
ySpeed = 0.1 * (ey - self.y)/2
if big: ySpeed *= 0.75
ball = Ball(big, self.x, self.y + 70, ballSpeed, ySpeed)
game_world.add_object(ball, game_world.layer_obstacle)
|
import matplotlib.pyplot as plt
plt.plot([1, 2.5, 3, 4.5])
plt.ylabel('some numbers')
plt.show()
|
import sys
import os
import fam
sys.path.insert(0, 'tools/trees')
import cut_long_branches
import ete3
ali = "../BenoitDatasets/families/pdb_plants23/families/Phy003MBZY_CUCME/alignment.msa"
tree= "../BenoitDatasets/families/pdb_plants23/families/Phy003MBZY_CUCME/gene_trees/raxml-ng.bestAA.geneTree.newick"
out = "plop"
subtrees = cut_long_branches.cut_long_branches(tree, 2.0)
idx = 1
for subtree in subtrees:
leaves = set(subtree.get_leaf_names())
if (len(leaves) < 4):
continue
newtree = os.path.join(out, "tree." + str(idx) + ".newick")
newali = os.path.join(out, "ali." + str(idx) + ".fasta")
idx = idx + 1
subtree.write(outfile = newtree)
seqs = ete3.SeqGroup(ali, format="fasta")
newseqs = ete3.SeqGroup()
for seq in seqs.get_entries():
if (seq[0] in leaves):
newseqs.set_seq(seq[0], seq[1])
newseqs.write(outfile = newali, format = "fasta")
|
from rest_framework import viewsets
from .models import Message
from rest_framework.permissions import AllowAny
from .serializers import MessageSerializer
class MessageViewSet(viewsets.ModelViewSet):
serializer_class = MessageSerializer
queryset = Message.objects.all()
http_method_names = 'get', 'post'
permission_classes = [AllowAny]
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.forms.models import model_to_dict
from django.dispatch import receiver
from datetime import datetime
import urllib.request
from django import template
from tagging.fields import TagField
register = template.Library()
# Create your models here.
class Memo(models.Model):
user_id = models.IntegerField()
owner = models.CharField(max_length=20, default="???")
directory = models.CharField(max_length=20, default="recently")
shared = models.BooleanField(default=False)
download = models.IntegerField(default=0)
keyword = models.CharField(max_length=30)
urls = models.TextField(default=None)
memo = models.TextField(default="")
pub_date = models.DateTimeField('date_published', default=datetime.now())
tag = TagField()
def updateMemo(self, u_id, u_owner, u_directory, u_shared, u_download, u_keyword, u_urls, u_memo, u_tag):
self.user_id = u_id
self.owner = u_owner
self.directory = u_directory
self.shared = u_shared
self.download = u_download
self.keyword = u_keyword
self.urls = u_urls
self.memo = u_memo
self.tag = u_tag
self.save()
def __str__(self):
return self.keyword
def split(urls):
urlList = urls.split('\n')
return urlList
def increaseDL(self):
self.download+=1
self.save()
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
numofDir = models.IntegerField(default=0)
selectedMemo = models.IntegerField(default=0)
currentdir = models.CharField(max_length=20, default='recently')
dir1 = models.CharField(max_length=30, blank=True)
dir2 = models.CharField(max_length=30, blank=True)
dir3 = models.CharField(max_length=30, blank=True)
dir4 = models.CharField(max_length=30, blank=True)
dir5 = models.CharField(max_length=30, blank=True)
dir6 = models.CharField(max_length=30, blank=True)
dir7 = models.CharField(max_length=30, blank=True)
dir8 = models.CharField(max_length=30, blank=True)
dir9 = models.CharField(max_length=30, blank=True)
dir10 = models.CharField(max_length=30, blank=True)
def setSelectedMemo(id):
self.selectedMemo=id
self.save()
def increase(self):
self.numofDir+=1
self.save()
def decrease(self):
self.numofDir-=1
self.save()
def get_fields_name(model):
names=[]
for key in model_to_dict(model).values():
if type(key) == str and key !='':
names.append(key)
names.pop(0)
return names
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save() |
import datetime
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from django.db.models import Q
from tssite.models import TalmudStudy, Class
class RSSAllFeed(Feed):
title = "Tanach Study Daily Updates"
link = "/feeds/rss/all"
description = "Description of Tanach Study Daily Updates"
def items(self):
items = []
for torah in Class.objects.exclude(date__isnull=True).filter(division_sequence=1, date__lte=datetime.datetime.now())[:500]:
items.append((torah.division, torah))
for parasha in Class.objects.exclude(date__isnull=True).filter(division_sequence=7, date__lte=datetime.datetime.now())[:500]:
items.append((parasha.division, parasha))
for neviim_rishonim in Class.objects.exclude(date__isnull=True).filter(division_sequence=2, date__lte=datetime.datetime.now())[:500]:
items.append((neviim_rishonim.division, neviim_rishonim))
for neviim_aharonim in Class.objects.exclude(date__isnull=True).filter(division_sequence=3, date__lte=datetime.datetime.now())[:500]:
items.append((neviim_aharonim.division, neviim_aharonim))
for tere_assar in Class.objects.exclude(date__isnull=True).filter(division_sequence=4, date__lte=datetime.datetime.now())[:500]:
items.append((tere_assar.division, tere_assar))
for ketuvim in Class.objects.exclude(date__isnull=True).filter(division_sequence=5, date__lte=datetime.datetime.now())[:500]:
items.append((ketuvim.division, ketuvim))
for mishna in Class.objects.exclude(date__isnull=True).filter(division_sequence=6, date__lte=datetime.datetime.now())[:500]:
items.append((mishna.division, mishna))
for i in TalmudStudy.objects.all().order_by('-date', '-teacher')[:500]:
items.append(("talmud", i))
return items
def item_title(self, item):
return str(item[1])
def item_pubdate(self, item):
return item[1].date
def item_author_name(self, item):
return item[1].teacher.__str__()
def item_description(self, tup):
description = ''
item = tup[1]
title = str(item)
teacher = str(item.teacher)
class_title = ''
if tup[0] == 'talmud':
seder = item.seder.title()
masechet = item.masechet.title()
link = item.get_location()
seder_sponsor = '' if not item.seder_sponsor else item.seder_sponsor
masechet_sponsor = '' if not item.masechet_sponsor else item.masechet_sponsor
daf_sponsor = '' if not item.daf_sponsor else item.daf_sponsor
description = f'Seder {seder}'
if seder_sponsor:
description = f'{description}<br />{seder_sponsor}'
description = f'{description}<br />Masechet {masechet}'
if masechet_sponsor:
description = f'{description}<br />{masechet_sponsor}'
description = f'{description}<br />Daf {item.daf}'
if daf_sponsor:
description = f'{description}<br />{daf_sponsor}'
elif tup[0] == 'parasha':
description = f'Sefer {item.section_title}'
if item.section_sponsor:
description = f'{description}<br />{item.section_sponsor}'
description = f'{description}<br />Parashat {item.unit_title}'
if item.unit_sponsor:
description = f'{description}<br />{item.unit_sponsor}'
elif tup[0] == 'torah':
class_title = item.part_title
description = f'Sefer {item.section_title}'
if item.section_sponsor:
description = f'{description}<br />{item.section_sponsor}'
description = f'{description}<br />Parashat {item.unit_title}'
if item.unit_sponsor:
description = f'{description}<br />{item.unit_sponsor}'
elif tup[0] == 'neviim_rishonim' or tup[0] == 'neviim_aharonim' or tup[0] == 'tere_assar' or tup[0] == 'ketuvim':
class_title = item.unit_title
description = f'Sefer {item.section_title}'
if item.section_sponsor:
description = f'{description}<br />{item.section_sponsor}'
description = f'{description}<br />Perek {item.unit}'
if item.unit_sponsor:
description = f'{description}<br />{item.unit_sponsor}'
elif tup[0] == 'mishna':
class_title = item.part_title
description = f'In Loving Memory of Mr. Ovadia Buddy Sutton A"H<br />'
description = f'{description}<br />Seder {item.segment_title}'
if item.segment_sponsor:
description = f'{description}<br />{item.segment_sponsor}'
description = f'{description}<br />Masechet {item.section_title}'
if item.section_sponsor:
description = f'{description}<br />{item.section_sponsor}'
description = f'{description}<br />Perek {item.unit}'
if item.unit_sponsor:
description = f'{description}<br />{item.unit_sponsor}'
else:
raise Exception(f'unsupported division {tup[0]}')
if class_title != '':
return f'<b>{class_title}</b><br />{description}<br /><audio controls=""><source src="https://cdn.tanachstudy.com/{item.audio}"></audio>'
return f'{description}<br /><audio controls=""><source src="https://cdn.tanachstudy.com/{item.audio}"></audio>'
def item_link(self, tup):
host = 'https://tanachstudy.com'
return f'{host}{tup[1].get_location()}'
def item_enclosure_url(self, item):
return f'https://cdn.tanachstudy.com/{item[1].audio}'
item_enclosure_mime_type = 'audio/mpeg'
class AtomAllFeed(RSSAllFeed):
feed_type = Atom1Feed
subtitle = RSSAllFeed.description
link = "/feeds/atom/all"
|
import os
import cv2
from scipy.fftpack import dct
from tools import detect_face
import image_preprocessing_functions as ipf
import investigation_functions as inv_func
def make_db(path_to_dataset, is_detection=False):
people = {}
for images_dir in os.listdir(path_to_dataset):
if images_dir.startswith("s"):
dir_paths = os.path.join(path_to_dataset, images_dir)
faces = []
for image_path_name in os.listdir(dir_paths):
image_path = os.path.join(dir_paths, image_path_name)
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if is_detection:
face, rect = detect_face(image)
if face is not None:
faces.append(face)
else:
faces.append(image)
person = f'person_{images_dir.replace("s", "")}'
people.update({person: faces})
return people
if __name__ == "__main__":
path_to_db = "orl_faces"
size_invest_path = os.path.join("graphs", "dataset_size_investigation")
param_graphs_path = os.path.join("graphs", "parameters_investigation_graphs")
voter_inv_path = os.path.join("graphs", "voter_graphs")
params_inv_config_path = os.path.join("configs", "best_params.yml")
dataset_inv_config_path = os.path.join("configs", "dataset_size_investigation.yml")
voter_inv_config_path = os.path.join("configs", "voter_investigation.yml")
investigation_mode = False
voter_investigation_mode = True
feature_extraction_methods = [ipf.hist, ipf.dft, dct,
ipf.scaler, ipf.sliding_window]
db = make_db(path_to_db)
proportions = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
hist_args = {
'histSize': [[x] for x in range(10, 257, 20)],
"const_args": {
'channels': [0],
'mask': None,
'ranges': [0, 256]
}
}
dct_args = {
'n': range(10, 210, 10),
"const_args": None
}
window_args = {
'window_size': range(1, 21, 1),
"const_args": None
}
scaler_args = {
'area': range(1, 21, 1),
"const_args": None
}
dft_args = {
'p': range(50, 200, 10),
"const_args": None
}
args = [hist_args, dft_args, dct_args, scaler_args, window_args]
if investigation_mode:
proportion = 0.6
X_train, y_train, X_test, y_test = inv_func.train_and_test_split(db, proportion)
best_params = inv_func.investigate_params(X_train, y_train, X_test, y_test,
feature_extraction_methods, args,
param_graphs_path, params_inv_config_path)
inv_func.investigate_dataset_size(db, feature_extraction_methods, args, proportions,
best_params, dataset_inv_config_path, size_invest_path)
print(best_params)
else:
best_hist_args = {
'histSize': [30],
'channels': [0],
'mask': None,
'ranges': [0, 256]
}
best_dft_args = {'p': 120}
best_dct_args = {'n': 90}
best_scaler_args = {'area': 7}
best_window_args = {'window_size': 2}
best_args = [best_hist_args, best_dft_args, best_dct_args, best_scaler_args, best_window_args]
if voter_investigation_mode:
accuracies = [1, 0.45, 0.45, 0.4, 0.8]
best_params = [[30], 120, 90, 7, 2]
inv_func.voter_investigation(db, feature_extraction_methods, args, best_args, proportions,
best_params, voter_inv_config_path, voter_inv_path, accuracies)
else:
proportion = 0.8
images_path = "predictions"
inv_func.person_recognition_example(db, proportion, feature_extraction_methods, best_args, images_path)
|
# Set your API key here
api_key = "" |
import tkinter as tk
import sys
import os.path
from os import path
import time
from tkinter import *
import datetime
import os
from tkinter import messagebox
import smtplib
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import random
from email.message import EmailMessage
import functions as funct
HEIGHT = 467
WIDTH = 830
sroot = Tk()
sroot.resizable(False,False)
sroot.minsize(height=HEIGHT,width=WIDTH)
sroot.title("CanvasAlerts")
sroot.configure()
Frame(sroot)
BG = Label(sroot)
BG.place(x=-5,y=-5)
import sys, os
sVerifImg = tk.PhotoImage(file = os.path.join(sys.path[0],"imageAssets/sendVerification.png"))
runImg = tk.PhotoImage(file = os.path.join(sys.path[0],"imageAssets/run.png"))
submitImg = tk.PhotoImage(file = os.path.join(sys.path[0],"imageAssets/submit.png"))
infoImg = tk.PhotoImage(file = os.path.join(sys.path[0],"imageAssets/infoImg.png"))
logoutImg = tk.PhotoImage(file = os.path.join(sys.path[0],"imageAssets/logout.png"))
elems = []
PATH = os.path.join(sys.path[0], funct.getCorrectDriver())
#PATH = os.path.join(sys.path[0], "chromedriver")
options = webdriver.ChromeOptions()
options.add_argument('--lang=en_US')
options.headless = True
global driver
global njitEmail
global appPassword
email = ""
passord = ""
smsGateway = ""
verificationCode = ""
def destroyElems():
global elems
for elem in elems:
elem.destroy()
# driver.get("http://njit.instructure.com/login/saml")
# username = driver.find_element_by_name("j_username")
# Password = driver.find_element_by_name("j_password")
# username.send_keys(EMAIL.replace("@njit.edu",""))
# Password.send_keys(PASSWORD)
# username.send_keys(Keys.RETURN)
def verifyUPAP(EMAIL, PASSWORD):
global email
global password
global appPassword
global elems
global njitEmail
njitEmail="canvasalertsnjit@gmail.com"
appPassword="eeytczejwrwbmruh"
driver = webdriver.Chrome(PATH, options=options)
errorLabel = tk.Label(sroot, text ="hello", font=("Myriad", 12), fg='#b33030')
elems.append(errorLabel)
if "@njit.edu" not in EMAIL:
errorLabel.config(text='Invalid Email - include \'@njit.edu\'')
errorLabel.place(relx=0.345, rely=0.73,relwidth=0.305, relheight=0.04)
else:
if(not (funct.authenticate(EMAIL.replace("@njit.edu",""),PASSWORD, driver))):
driver.close()
errorLabel.config(text='Invalid Email or Password')
errorLabel.place(relx=0.345, rely=0.73,relwidth=0.305, relheight=0.04)
else:
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(njitEmail, appPassword)
server.quit()
driver.close()
phoneSetUpPage()
email = EMAIL
password = PASSWORD
except:
driver.close()
errorLabel.config(text='Invalid Email or App Password')
errorLabel.place(relx=0.345, rely=0.8,relwidth=0.305, relheight=0.04)
def upapPage():
global BG
global elems
destroyElems()
upapBG = tk.PhotoImage(file =os.path.join(sys.path[0],"imageAssets/testCABGV2.png"))
BG.configure(image = upapBG)
BG.image = upapBG
BG.place(x=-5,y=-5)
emailEntry = tk.Entry(sroot, font=40, borderwidth = 0,highlightthickness = 0,bg = '#fafafa', fg = '#636363')
emailEntry.place(relx=0.345, rely=0.435,relwidth=0.305, relheight=0.04)
passwordEntry = tk.Entry(sroot, font=40, borderwidth = 0,highlightthickness = 0,bg = '#fafafa', fg = '#636363', show = "•")
passwordEntry.place(relx=0.345, rely=0.535,relwidth=0.305, relheight=0.04)
submitButton = tk.Button(sroot, command =lambda: verifyUPAP(emailEntry.get(), passwordEntry.get()), image = submitImg)
submitButton.place(relx=0.345, rely=0.63,relwidth=0.305, relheight=0.04)
elems = [emailEntry, passwordEntry,submitButton]
def carrierInfo():
messagebox.showinfo("Carrier Options", "The following are supported carriers:\n Verizon\n T-Mobile\n Sprint\n AT&T\n Metro PCS\n Boost\n\n If you use a differnt carrier, enter the SMS Gateway extension here")
def createSMSGateway(number, carrier):
print(number)
print(carrier)
global smsGateway
smsGateway = ""
smsGateway+=number
if(carrier == "Verizon"):
smsGateway += "@vtext.com"
elif(carrier == "AT&T"):
smsGateway += "@txt.att.net"
elif(carrier == "Sprint"):
smsGateway += "@messaging.sprintpcs.com"
elif(carrier == "T-Mobile"):
smsGateway += "@tmomail.net"
elif(carrier == "Metro PCS"):
smsGateway += "@mymetropcs.com"
elif(carrier == "Boost"):
smsGateway += "@myboostmobile.com"
else:
smsGateway += carrier
return smsGateway
def sendVerificationCode(number, carrier):
global smsGateway
global email
global appPassword
global verificationCode
global elems
verificationCode = str(int(random.random() * 10)) + str(int(random.random() * 10)) + str(int(random.random() * 10)) + str(int(random.random() * 10))
cleanNumber = number.replace(" ", "").replace("-","").replace("(","").replace(")","")
msg = EmailMessage()
msg.set_content(verificationCode)
msg['subject'] = "CanvasAlerts Verification Code"
msg['to'] = createSMSGateway(cleanNumber, carrier)
msg['from'] = njitEmail
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(njitEmail, appPassword)
server.send_message(msg)
server.quit()
sentLabel = tk.Label(sroot, text ="Verification Code was sent to " + cleanNumber, font=("Myriad", 12), fg='#3e95ef')
sentLabel.place(relx=0.345, rely=0.9,relwidth=0.305, relheight=0.04)
elems.append(sentLabel)
def verifyVerificationCode(code):
global elems
if(code == verificationCode):
creds = open("creds.txt", "w")
creds.write(email + "\n")
creds.write(password + "\n")
creds.write(appPassword + "\n")
creds.write(smsGateway)
creds.close()
runPage()
else:
errorLabel = tk.Label(sroot, text ="Incorrect Verification Code", font=("Myriad", 12), fg='#b33030')
errorLabel.place(relx=0.345, rely=0.9,relwidth=0.305, relheight=0.04)
elems.append(errorLabel)
def phoneSetUpPage():
global elems
global BG
destroyElems()
phoneBG = tk.PhotoImage(file =os.path.join(sys.path[0],"imageAssets/phoneBG.png"))
BG.configure(image = phoneBG)
BG.image = phoneBG
BG.place(x=-5,y=-5)
numberEntry = tk.Entry(sroot, font=40, borderwidth = 0,highlightthickness = 0,bg = '#fafafa', fg = '#636363')
numberEntry.place(relx=0.345, rely=0.435,relwidth=0.305, relheight=0.04)
verificationCodeEntry = tk.Entry(sroot, font=40, borderwidth = 0,highlightthickness = 0,bg = '#fafafa', fg = '#636363')
verificationCodeEntry.place(relx=0.345, rely=0.734,relwidth=0.305, relheight=0.04)
# carrier dropdown
# carrier = StringVar(sroot)
# carrier.set("Select Carrier") # default value
# carrierDropdown = OptionMenu(sroot, carrier, "Verizon","T-Mobile", "AT&T", "Sprint", "Metro PCS", "Boost")
# carrierDropdown.place(relx=0.333, rely=0.5, relwidth=0.328, relheight=0.1)
carrierEntry = tk.Entry(sroot, font=40, borderwidth = 0,highlightthickness = 0,bg = '#fafafa', fg = '#636363')
carrierEntry.place(relx=0.345, rely=0.535,relwidth=0.305, relheight=0.04)
sVerifButton = tk.Button(sroot, command =lambda: sendVerificationCode(numberEntry.get(), carrierEntry.get()), image = sVerifImg)
sVerifButton.place(relx=0.345, rely=0.615,relwidth=0.305, relheight=0.04)
submitButton = tk.Button(sroot, command =lambda: verifyVerificationCode(verificationCodeEntry.get()), image = submitImg)
submitButton.place(relx=0.345, rely=0.82,relwidth=0.305, relheight=0.04)
infoButton = tk.Button(sroot, command =lambda: carrierInfo(), image = infoImg)
infoButton.place(relx=0.661, rely=0.51,relwidth=0.04, relheight=0.065)
elems = [numberEntry,verificationCodeEntry,carrierEntry,sVerifButton,submitButton, infoButton]
#doesnt load correctly
#def showRunLabelAndRun():
# global sroot
# runningLabel = tk.Label(sroot, text ="Canvas Alerts is running.\nPlease allow up to 2 minutes.", font=("Myriad", 12), fg='#3e95ef')
# runningLabel.place(relx=0.345, rely=0.61,relwidth=0.305, relheight=0.06)
# sroot.update_idletasks()
# elems.append(runningLabel)
# runScript()
def runScript(delta):
global elems
file = open("delta.txt", 'w')
file.write(str(delta))
file.close()
funct.setDelta(delta)
print("SCRIPT IS RUNNING")
funct.setDriver()
funct.setCreds()
listA = funct.assignmentList(funct.assignmentLinks())
funct.sendAlertIfDue(listA)
#destroyElems()
doneLabel = tk.Label(sroot, text ="Canvas Alerts has finished running.", font=("Myriad", 12), fg='#3e95ef')
doneLabel.place(relx=0.345, rely=0.6,relwidth=0.305, relheight=0.04)
elems.append(doneLabel)
def confirmLogout():
if messagebox.askyesno("Confirm Logout", "Are you sure you want to logout?") == True:
os.remove("creds.txt")
upapPage()
def runPage():
global BG
global elems
delta = 7
if(path.exists("delta.txt")):
file = open("delta.txt", 'r')
delta = file.read()
file.close()
destroyElems()
runBG = tk.PhotoImage(file =os.path.join(sys.path[0],"imageAssets/runBG.png"))
BG.configure(image = runBG)
BG.image = runBG
BG.place(x=-5,y=-5)
daysBefore = tk.Entry(sroot, font='Helvetica 14 bold', borderwidth = 0,highlightthickness = 0,bg = '#ffffff', fg = '#a94848') #636363
daysBefore.insert(0, str(delta))
daysBefore.place(relx=0.44, rely=0.735,relwidth=0.025, relheight=0.06)
runButton = tk.Button(sroot, command =lambda: runScript(daysBefore.get()), image = runImg)
runButton.place(relx=0.345, rely=0.535,relwidth=0.305, relheight=0.04)
logoutButton = tk.Button(sroot, command =lambda: confirmLogout(), image = logoutImg)
logoutButton.place(relx=0.01, rely=0.01,relwidth=0.04, relheight=0.07)
elems.append(runButton)
elems.append(logoutButton)
elems.append(daysBefore)
if(path.exists("creds.txt")):
runPage()
else:
upapPage()
mainloop()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 18:13:19 2015
@author: eejvt
Code developed by Jesus Vergara Temprado
Contact email eejvt@leeds.ac.uk
University of Leeds 2015
"""
import numpy as np
import sys
#sys.path.append('C:\opencv\build\x64\vc12\bin')
import cv2
from glob import glob
import os
folder='C:\Users\eejvt\Mace head 2015\Experiments\ul-assay\\'
day='150827'
os.chdir(folder+day)
a=glob('*\\')
def getSec(s):
l = s.split(':')
return int(l[0]) * 3600 + int(l[1]) * 60 + int(l[2])
def run_video(ini_speed=1,name='Cold Plate',delay=0,temp_frame=0,low_info=0):
cap = cv2.VideoCapture('run.avi')
print cap.isOpened()
iframe=1
events=[]
speed=ini_speed#ms
font = cv2.FONT_HERSHEY_SIMPLEX
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
while(cap.isOpened()):
cap.set(cv2.CAP_PROP_POS_FRAMES,iframe)
ret, frame = cap.read()
if not ret:
break
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#print
'''
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if iframe>save_frames:
for j in range(save_frames):
if j==0:
olds[:,:,j]=frame
else:
olds[:,:,j]=olds[:,:,j-1]
'''
color=(255,50,0)
st_events=str(events).strip('[]')
if not low_info:
cv2.putText(frame,name,(10,120), font, 1,color,2,cv2.LINE_AA)
if not isinstance(temp_frame,int):
cv2.putText(frame,'T= %1.2f C'%temp_frame[iframe],(900,200), font, 2,color,2,cv2.LINE_AA)
cv2.putText(frame,'Pause: p - Back: b - Forward: n - Event: spacebar - Delete: d - Faster/play: h,f - Slower: s - 200ms speed: j',(10,25), font, 0.6,color,2,cv2.LINE_AA)
cv2.putText(frame,'50 frames back: 1 - 10 frames back: 2 - 10 frames forward: 3 - 50 frames forward: 4 - Low info: l',(10,75), font, 0.6,color,2,cv2.LINE_AA)
cv2.putText(frame,'Frame %i'%iframe,(10,200), font, 2,color,2,cv2.LINE_AA)
cv2.putText(frame,'Speed %i ms'%speed,(10,300), font, 1,color,2,cv2.LINE_AA)
cv2.putText(frame,'Events %i'%len(events),(10,400), font, 1,color,2,cv2.LINE_AA)
else:
cv2.putText(frame,'Fr %i'%iframe,(10,200), font, 1.5,color,2,cv2.LINE_AA)
cv2.putText(frame,'Sp %i'%speed,(10,300), font, 0.8,color,2,cv2.LINE_AA)
cv2.putText(frame,'Ev %i'%len(events),(10,400), font, 0.8,color,2,cv2.LINE_AA)
if len(st_events)<100:
cv2.putText(frame,'%s'%st_events,(10,700), font, 0.5,color,2,cv2.LINE_AA)
else:
cv2.putText(frame,'%s'%st_events[:100],(10,700), font, 0.5,color,2,cv2.LINE_AA)
cv2.putText(frame,'%s'%st_events[100:],(10,750), font, 0.5,color,2,cv2.LINE_AA)
cv2.imshow('Droplet freezing',frame)
#cv2.waitKey(speed)
#print iframe
k = cv2.waitKey(speed)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
break
elif k == ord(' '): # wait for 's' key to save and exit
events.append(iframe-delay)
continue
# cv2.waitKey(speed)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
elif k == ord('l'):
low_info=int(np.logical_not(low_info))
continue
elif k == ord('s'):
speed=speed*2
cv2.waitKey(speed)
elif k == ord('f'):
speed=speed/2
if speed==0:
speed=1
cv2.waitKey(speed)
elif k == ord('h'):
speed=speed/2
if speed==0:
speed=1
cv2.waitKey(speed)
elif k == ord('j'):
speed=200
elif k == ord('d'):
if len(events)!=0:
events.pop()
continue
#cv2.waitKey(speed)
elif k == ord('p'):
cv2.waitKey(0)
elif k == ord('1'):
iframe=iframe-50
continue
elif k == ord('2'):
iframe=iframe-10
continue
elif k == ord('3'):
iframe=iframe+10
continue
elif k == ord('4'):
iframe=iframe+50
continue
cv2.waitKey(0)
elif k == ord('b'):
iframe=iframe-1
speed=0
continue
'''
elif k == ord('r'):
iframe=iframe-save_frames
for iold in range(save_frames):
cv2.putText(olds[:,:,save_frames-iold-1],name,(10,100), font, 1,color,2,cv2.LINE_AA)
cv2.putText(olds[:,:,save_frames-iold-1],'Frame %i'%iframe,(10,200), font, 1,color,2,cv2.LINE_AA)
cv2.putText(olds[:,:,save_frames-iold-1],'Speed %i ms'%speed,(10,300), font, 1,color,2,cv2.LINE_AA)
cv2.putText(olds[:,:,save_frames-iold-1],'Events %i'%len(events),(10,400), font, 0.5,color,2,cv2.LINE_AA)
if len(st_events)<10:
cv2.putText(olds[:,:,save_frames-iold-1],'%s'%st_events,(10,500), font, 0.5,color,2,cv2.LINE_AA)
else:
cv2.putText(olds[:,:,save_frames-iold-1],'%s'%st_events[:10],(10,500), font, 0.5,color,2,cv2.LINE_AA)
cv2.putText(olds[:,:,save_frames-iold-1],'%s'%st_events[10:],(10,600), font, 0.5,color,2,cv2.LINE_AA)
cv2.imshow('Droplet freezing',olds[:,:,save_frames-iold-1])
k = cv2.waitKey(500)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
break
elif k == ord(' '): # wait for 's' key to save and exit
events.append(iframe-delay)
if first_time:
speed=200
first_time=0
cv2.waitKey(speed)
iframe=iframe+1
'''
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
iframe=iframe+1
print
cap.release()
cv2.destroyAllWindows()
return events
#%%
#fig=plt.figure()
#ax=plt.subplot(211)
#bx=plt.subplot()
#%%
for ifile in range (len(a)):
os.chdir(folder+day+'\\'+a[ifile])
if not a[ifile]== 'blanks\\':
print a[ifile][7:][:-1]
print 'Read? 1:Yes 0:No'
if os.path.isfile('events_frame.csv'):
print '"events_frame.csv" file existing'
if os.path.isfile('temps.csv'):
print '"temps.csv" file existing'
if os.path.isfile('ff.csv'):
print '"ff.csv" file existing'
awnser= int(raw_input())
if not awnser:
continue
else:
continue
data=np.genfromtxt('log.csv',delimiter=',',dtype=None)#converters = {1: getSec})
headers=data[0,:]
data=data[1:,:]
run_times=np.genfromtxt('run',skip_header=1,dtype=None)
temp_frame=np.linspace(0,len(run_times),len(run_times))
i=0
#run_times=getSec(run_times)
for i in range(len(run_times)):
if run_times[i] in data[:,1]:
temp_frame[i]=data[data[:,1].tolist().index(run_times[i]),2]
else:
if int(run_times[i][-1])>0:
run_times[i]=run_times[i][:(len(run_times[i])-1)]+str(int(run_times[i][-1])-1)
else :
run_times[i]=run_times[i][:(len(run_times[i])-1)]+str(int(run_times[i][-1])+1)
if run_times[i] in data[:,1]:
temp_frame[i]=data[data[:,1].tolist().index(run_times[i]),2]
else:
temp_frame[i]=999
print 'Events input: \n 1: video analisys \n 2: .csv:'
awnser= int(raw_input())
if awnser==1:
events=run_video(name=a[ifile][12:][:-1],temp_frame=temp_frame)
np.savetxt('events_frame.csv',events,delimiter=',')
print '\'events_frame.csv\' saved/overwritted \n \n'
#print 'REPEAT VIDEO? 0=NO 1=YES'
#nb = int(raw_input())
#if nb:
# events=run_video()
else:
events=np.genfromtxt('events_frame.csv',delimiter=',',dtype=None)
np.savetxt('events_frame.csv',events,delimiter=',')
particles=len(events)
frezz_events=np.linspace(0,len(events),len(events))
ff=frezz_events/float(particles)
temps=np.zeros(len(events))
i=0
#run_times=getSec(run_times)
for i in range(len(events)):
if run_times[events[i]] in data[:,1]:
temps[i]=data[data[:,1].tolist().index(run_times[events[i]]),2]
else:
if int(run_times[events[i]][-1])>0:
run_times[events[i]]=run_times[events[i]][:(len(run_times[events[i]])-1)]+str(int(run_times[events[i]][-1])-1)
else :
run_times[events[i]]=run_times[events[i]][:(len(run_times[events[i]])-1)]+str(int(run_times[events[i]][-1])+1)
temps[i]=data[data[:,1].tolist().index(run_times[events[i]]),2]
'''
for itime in range(len(data[:,1])):
if run_times[pos]==data[itime,1]:
print run_times[pos]
print data[itime,1]
print pos
print '---------------'
temps[i]=data[itime,2].astype(float)
print temps[i]
if temps[i]==999:
temps[i]=data[:,2].astype(float).min()
print 'cambiado',temps[i]
i=i+1
if temps[i-1]==0:
print pos, run_times[pos],data[-1,1],data[0,1]
'''
np.savetxt('temps.csv',temps,delimiter=',')
np.savetxt('ff.csv',ff,delimiter=',')
imp=a[ifile][7:11]
#np.save(')
#plt.plot(temps,ff,'o',label=a[ifile][12:][:-3])
#plt.legend()
#plt.xlabel('Temperature')
#plt.ylabel('Fraction frozen')
|
import cv2
body_cascade = cv2.CascadeClassifier('/Users/jeremy.meyer/opencv/data/haarcascades/haarcascade_fullbody.xml')
face_cascade = cv2.CascadeClassifier('/Users/jeremy.meyer/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
#ped_cascade = cv2.CascadeClassifier('/Users/jeremy.meyer/opencv/data/hogcascades/hogcascade_pedestrians.xml')
def testCascade(casc, imgpath, pr = True):
test = cv2.imread(imgpath)
bodies = casc.detectMultiScale(test)
if pr:
print(len(bodies))
for (x, y, w, h) in bodies:
cv2.rectangle(test, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.imshow('img', test)
cv2.waitKey(0)
cv2.destroyAllWindows()
testCascade(body_cascade, 'test2.png')
def testCascadeVid(casc, vidpath, pr=True):
cap = cv2.VideoCapture(vidpath)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = casc.detectMultiScale(gray) # Mess with these parameters later
if pr:
print(len(faces))
# Draws bboxes on the faces in blue
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Shows the frame. Hit ESC to close out.
cv2.imshow('img', img)
k = cv2.waitKey(15) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
testCascadeVid(face_cascade, 'jeremy-vids/positive/test1.mp4')
# Static image test doesn't work well either. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-08 18:45:15
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
#闭包
#在函数中提出的概念
#当函数定义内部函数,且返回值时内部函数名,就叫闭包
#1.闭包必须是外部函数中定义了内部函数
#2.外部函数是有返回值的,且该返回值就是内部函数名,不能加括号
#3.内部函数引用外部函数的变量值
'''
闭包格式:
def 外部函数():
...
def 内部函数():
...
return 内部函数
'''
def func():
a = 100
def inner_func():
b = 99
print(a,b)
print(inner_func)
return inner_func
#调用函数时,用对象接住函数返回的内部函数,那其实,这个对象x就变成了func()的内部函数,当使用 x() 是可以调用它
x = func()
x()
|
'''Author: Akash Shah (ass502)
This module contains the Grades class, along with its member functions.
An instance of the grades class consists of the restaurant grades that has been pre-processed
and a dataframe containing the scores for each of the restaurants, indexed by borough and camis id'''
import pandas as pd
import matplotlib.pyplot as plt
import sys
from calculate import *
class Grades(object):
def __init__(self):
'''creates an instance of our Grades object'''
self.data,self.scores = self.load_and_clean_data()
def load_and_clean_data(self):
'''helper function that loads and cleans the data set'''
#load data
try:
data = pd.read_csv("../DOHMH_New_York_City_Restaurant_Inspection_Results.csv",low_memory=False)
#only select rows with valid grades, valid being A, B, or C
data = data[data['GRADE'] <='C']
#remove data with missing grade dates
data['GRADE DATE'].fillna(0,inplace=True)
data = data[data['GRADE DATE']!=0]
#remove data with missing borough
data = data[data['BORO'] != 'Missing']
#convert grade date column to valid date format
data['GRADE DATE'] = pd.to_datetime(data['GRADE DATE'])
#compute grade score for each restaurant, using the helper function test_grades located in calculate.py
scores = data.sort('GRADE DATE').groupby(['BORO','CAMIS'])['GRADE'].apply(lambda x: x.tolist()).apply(test_grades)
return [data,scores]
except IOError: #catch exception if the file cannot be located or read properly
print "Could not locate/read file"
sys.exit()
def test_restaurant_grades(self,camis_id):
'''computes the score of a restaurant with the camis_id as input, by using the pre-computed scores dataframe'''
#get the borough of the restaurant
borough = pd.unique(self.data[self.data['CAMIS']==camis_id]['BORO'])[0]
#return the score that we already computed
return self.scores[borough][camis_id]
def compute_borough_and_city_sums(self):
'''computes the sum of the scores for all restaurants within each borough, and across New York City'''
print "Grade score sums for each region:"
#get sums of scores for the boroughs using our existing scores dataframe
borough_sum = self.scores.sum(level='BORO')
#keep track of the total sum across NYC as we go through each borough
nyc_sum=0
for borough in pd.unique(self.data['BORO'].values.ravel()):
print borough.title() + ": " + str(borough_sum[borough])
nyc_sum+=borough_sum[borough]
print "NYC: " + str(nyc_sum)
def plot_grade_improvement_boroughs(self):
'''creates a plot showing restaurant grades over time for each of the boroughs'''
#plot for each borough
for borough in pd.unique(self.data['BORO'].values.ravel()):
#filter data for current borough
borough_data = self.data[self.data['BORO']==borough]
#get grade counts for each grade date
grade_counts = borough_data.groupby(['GRADE DATE','GRADE']).size()
#convert to a dataframe by unstacking, sort by grade date
grade_counts_df = grade_counts.unstack(level=-1).sort()
#now we plot
plt.clf()
grade_counts_df.plot(kind='line')
plt.ylabel('Number of Grades')
plt.title('Distribution of Restaurant Grades in '+borough.title())
#when saving figure, want lower case letters and only the first word of the borough (staten, not staten island)
plt.savefig("grade_improvement_"+str(borough).lower().partition(' ')[0]+".pdf",format='pdf')
def plot_grade_improvement_nyc(self):
'''creates a plot showing restaurant grades over time for all of NYC'''
#get grade counts for each grade date
grade_counts = self.data.groupby(['GRADE DATE','GRADE']).size()
#convert to a dataframe by unstacking, sort by grade date
grade_counts_df = grade_counts.unstack(level=-1).sort()
#plot
plt.clf()
grade_counts_df.plot(kind='line')
plt.ylabel('Number of Grades')
plt.title('Distribution of Restaurant Grades across NYC')
plt.savefig("grade_improvement_nyc.pdf",format='pdf')
|
import subprocess
experiments = [
'experiment_folders\paper\\cv1\\' + s for s in [
## Experiments to evaluate modalities on PET/CT/MRI dataset (36 patients):
#'ADC_adc_basic_f1_adam',
#'CT_ct_windowing_c32_w220_basic_f1_adam',
#'Perf_perf_basic_f1_adam',
#'PETCT_petct_windowing_c32_w220_basic_f1_adam',
#'PET_pet_basic_f1_adam',
#'DPCT_dpct_windowing_c70_w300_basic_f1_adam',
#'PETDPCT_petdpct_windowing_c70_w300_basic_f1_adam',
#'T2W_t2w_basic_f1_adam',
#'T2WADC_t2wadc_basic_f1_adam',
#'T2WADCPerf_t2wadcperf_basic_f1_adam'
## Augmentation experiments:
#'T2W_t2w_basic_f1_adam_aug',
#'DPCT_dpct_windowing_c70_w300_basic_f1_adam_aug',
#'PETCT_petct_windowing_c32_w220_basic_f1_adam_aug',
#'PETDPCT_petdpct_windowing_c70_w300_basic_f1_adam_aug',
#'CT_ct_windowing_c32_w220_basic_f1_adam_aug',
## Experiments on PET/CT dataset (85 patients):
#'PET_pet_basic_f1_adam_85',
#'CT_ct_windowing_c32_w220_basic_f1_adam_85',
#'DPCT_dpct_windowing_c70_w300_basic_f1_adam_85',
#'PETCT_petct_windowing_c32_w220_basic_f1_adam_85',
#'PETDPCT_petdpct_windowing_c70_w300_basic_f1_adam_85',
## Experiments on PET/CT dataset (85 patients) with augmentation:
#'PET_pet_basic_f1_adam_85_aug',
#'CT_ct_windowing_c32_w220_basic_f1_adam_85_aug',
#'DPCT_dpct_windowing_c70_w300_basic_f1_adam_85_aug'
#'PETCT_petct_windowing_c32_w220_basic_f1_adam_85_aug',
#'PETDPCT_petdpct_windowing_c70_w300_basic_f1_adam_85_aug'
# Additional experiments for paper:
#'PETDPCTT2W_petdpctt2w_windowing_c70_w300_basic_f1_adam',
#'PETCTT2W_petctt2w_windowing_c32_w220_basic_f1_adam',
'CTT2W_ctt2w_windowing_c32_w220_basic_f1_adam',
#'PETT2W_pett2w_basic_f1_adam'
]
]
from pathlib import Path
base_path = Path('.\experiment_folders\paper\\cv1\\')
all_experiments = [str(i) for i in base_path.glob('*')]
for experiment in experiments:
subprocess.run(['python', 'run_experiment.py', experiment, '5000', '--eval', 'dice'])
subprocess.run(['python', 'run_cv.py'])
|
# 生产者,消费者模型
# 爬虫的时候
# 分布式操作 :celery
# 本质:就是让生产数据和处理数据的效率达到平衡并且最大化效率
from multiprocessing import Queue,Process
import random
import time
def consumer(q,name): # 消费者:通常收到数据后还要进行某些操作
while True: # 这样,保证我们消费者可以及时消费,而且当生产者不提供的时候,可以停下程序
food = q.get()
if food:
print('%s吃了%s'%(name,q.get()))
else:
break
def producer(q,name,food): # 生产者: 通常在提供数据之前需要先去获取数据
for i in range(10):
foodi = '%s%s'%(food,i)
print('%s生产了%s'%(name,foodi))
time.sleep(random.random())
q.put(foodi)
if __name__ == "__main__":
q = Queue()
c1 = Process(target=consumer,args=(q,'alex'))
c2 = Process(target=consumer,args=(q,'wusir'))
p1 = Process(target=producer,args=(q,'大壮','泔水'))
p2 = Process(target=producer,args=(q,'b哥','香蕉'))
c1.start()
c2.start()
p1.start()
p2.start()
p1.join()
p2.join()
q.put(None)
q.put(None) |
def fibonacci(n):
if (n <= 1):
return 0
elif (n == 2):
return 1
else:
return (fibonacci(n-1) + fibonacci (n-2))
n = int(input("Digite a quantidade de numeros da sequencia: "))
for i in range(n):
print(str(fibonacci(i)) + " ")
|
from PyInstaller.utils.hooks import logger
def pre_safe_import_module(psim_api):
import PyMca5.PyMcaGui as PyMcaGui
for p in PyMcaGui.__path__:
psim_api.append_package_path(p)
|
#!/usr/bin/env python
# vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=python fileencoding=utf-8
'''
Copyright © 2013
Eric van der Vlist <vdv@dyomedea.com>
Jens Neuhalfen <http://www.neuhalfen.name/>
See license information at the bottom of this file
'''
import re
import os
import ConfigParser
import ast
from datetime import datetime
import xmlrpclib
import trac2down
"""
What
=====
This script migrates issues from trac to gitlab.
License
========
License: http://www.wtfpl.net/
Requirements
==============
* Python 2, xmlrpclib, requests
* Trac with xmlrpc plugin enabled
* Peewee (direct method)
* GitLab
"""
default_config = {
'ssl_verify': 'no',
'migrate': 'true',
'overwrite': 'true',
'exclude_authors': 'trac',
'uploads': ''
}
config = ConfigParser.ConfigParser(default_config)
config.read('migrate.cfg')
trac_url = config.get('source', 'url')
dest_project_name = config.get('target', 'project_name')
uploads_path = config.get('target', 'uploads')
method = config.get('target', 'method')
if method == 'api':
from gitlab_api import Connection, Issues, Notes, Milestones
print("importing api")
gitlab_url = config.get('target', 'url')
gitlab_access_token = config.get('target', 'access_token')
dest_ssl_verify = config.getboolean('target', 'ssl_verify')
overwrite = False
elif method == 'direct':
print("importing direct")
from gitlab_direct import Connection, Issues, Notes, Milestones
db_name = config.get('target', 'db-name')
db_password = config.get('target', 'db-password')
db_user = config.get('target', 'db-user')
db_path = config.get('target', 'db-path')
overwrite = config.getboolean('target', 'overwrite')
users_map = ast.literal_eval(config.get('target', 'usernames'))
default_user = config.get('target', 'default_user')
must_convert_issues = config.getboolean('issues', 'migrate')
only_issues = None
if config.has_option('issues', 'only_issues'):
only_issues = ast.literal_eval(config.get('issues', 'only_issues'))
must_convert_wiki = config.getboolean('wiki', 'migrate')
pattern_changeset = r'(?sm)In \[changeset:"([^"/]+?)(?:/[^"]+)?"\]:\n\{\{\{(\n#![^\n]+)?\n(.*?)\n\}\}\}'
matcher_changeset = re.compile(pattern_changeset)
pattern_changeset2 = r'\[changeset:([a-zA-Z0-9]+)\]'
matcher_changeset2 = re.compile(pattern_changeset2)
def convert_xmlrpc_datetime(dt):
return datetime.strptime(str(dt), "%Y%m%dT%H:%M:%S")
def format_changeset_comment(m):
return 'In changeset ' + m.group(1) + ':\n> ' + m.group(3).replace('\n', '\n> ')
def fix_wiki_syntax(markup):
markup = matcher_changeset.sub(format_changeset_comment, markup)
markup = matcher_changeset2.sub(r'\1', markup)
return markup
def get_dest_project_id(dest, dest_project_name):
dest_project = dest.project_by_name(dest_project_name)
if not dest_project:
raise ValueError("Project '%s' not found" % dest_project_name)
return dest_project["id"]
def get_dest_milestone_id(dest, dest_project_id, milestone_name):
dest_milestone_id = dest.milestone_by_name(dest_project_id, milestone_name)
if not dest_milestone_id:
raise ValueError("Milestone '%s' of project '%s' not found" % (milestone_name, dest_project_name))
return dest_milestone_id["id"]
def convert_issues(source, dest, dest_project_id, only_issues=None):
if overwrite and method == 'direct':
dest.clear_issues(dest_project_id)
milestone_map_id = {}
for milestone_name in source.ticket.milestone.getAll():
milestone = source.ticket.milestone.get(milestone_name)
print(milestone)
new_milestone = Milestones(
description=trac2down.convert(fix_wiki_syntax(milestone['description']), '/milestones/', False),
title=milestone['name'],
state='active' if str(milestone['completed']) == '0' else 'closed'
)
if method == 'direct':
new_milestone.project = dest_project_id
if milestone['due']:
new_milestone.due_date = convert_xmlrpc_datetime(milestone['due'])
new_milestone = dest.create_milestone(dest_project_id, new_milestone)
milestone_map_id[milestone_name] = new_milestone.id
get_all_tickets = xmlrpclib.MultiCall(source)
for ticket in source.ticket.query("max=0&order=id"):
get_all_tickets.ticket.get(ticket)
for src_ticket in get_all_tickets():
src_ticket_id = src_ticket[0]
if only_issues and src_ticket_id not in only_issues:
print("SKIP unwanted ticket #%s" % src_ticket_id)
continue
src_ticket_data = src_ticket[3]
src_ticket_priority = src_ticket_data['priority']
src_ticket_resolution = src_ticket_data['resolution']
# src_ticket_severity = src_ticket_data['severity']
src_ticket_status = src_ticket_data['status']
src_ticket_component = src_ticket_data.get('component', '')
src_ticket_version = src_ticket_data['version']
new_labels = []
if src_ticket_priority == 'high':
new_labels.append('high priority')
elif src_ticket_priority == 'medium':
pass
elif src_ticket_priority == 'low':
new_labels.append('low priority')
if src_ticket_resolution == '':
# active ticket
pass
elif src_ticket_resolution == 'fixed':
pass
elif src_ticket_resolution == 'invalid':
new_labels.append('invalid')
elif src_ticket_resolution == 'wontfix':
new_labels.append("won't fix")
elif src_ticket_resolution == 'duplicate':
new_labels.append('duplicate')
elif src_ticket_resolution == 'worksforme':
new_labels.append('works for me')
if src_ticket_version:
if src_ticket_version == 'trunk' or src_ticket_version == 'dev':
pass
else:
new_labels.append('release-%s' % src_ticket_version)
# if src_ticket_severity == 'high':
# new_labels.append('critical')
# elif src_ticket_severity == 'medium':
# pass
# elif src_ticket_severity == 'low':
# new_labels.append("minor")
# Current ticket types are: enhancement, defect, compilation, performance, style, scientific, task, requirement
# new_labels.append(src_ticket_type)
if src_ticket_component != '':
for component in src_ticket_component.split(','):
new_labels.append(component.strip())
new_state = ''
if src_ticket_status == 'new':
new_state = 'opened'
elif src_ticket_status == 'assigned':
new_state = 'opened'
elif src_ticket_status == 'reopened':
new_state = 'reopened'
elif src_ticket_status == 'closed':
new_state = 'closed'
elif src_ticket_status == 'accepted':
new_labels.append(src_ticket_status)
elif src_ticket_status == 'reviewing' or src_ticket_status == 'testing':
new_labels.append(src_ticket_status)
else:
print("!!! unknown ticket status: %s" % src_ticket_status)
print("migrated ticket %s with labels %s" % (src_ticket_id, new_labels))
# Minimal parameters
new_issue = Issues(
title=src_ticket_data['summary'],
description=trac2down.convert(fix_wiki_syntax(src_ticket_data['description']), '/issues/', False),
state=new_state,
labels=",".join(new_labels)
)
if src_ticket_data['owner'] != '':
try:
new_issue.assignee = dest.get_user_id(users_map[src_ticket_data['owner']])
except KeyError:
new_issue.assignee = dest.get_user_id(default_user)
# Additional parameters for direct access
if method == 'direct':
new_issue.created_at = convert_xmlrpc_datetime(src_ticket[1])
new_issue.updated_at = convert_xmlrpc_datetime(src_ticket[2])
new_issue.project = dest_project_id
new_issue.state = new_state
try:
new_issue.author = dest.get_user_id(users_map[src_ticket_data['reporter']])
except KeyError:
new_issue.author = dest.get_user_id(default_user)
if overwrite:
new_issue.iid = src_ticket_id
else:
new_issue.iid = dest.get_issues_iid(dest_project_id)
if 'milestone' in src_ticket_data:
milestone = src_ticket_data['milestone']
if milestone and milestone_map_id[milestone]:
new_issue.milestone = milestone_map_id[milestone]
new_ticket = dest.create_issue(dest_project_id, new_issue)
# new_ticket_id = new_ticket.id
changelog = source.ticket.changeLog(src_ticket_id)
is_attachment = False
for change in changelog:
change_type = change[2]
if change_type == "attachment":
# The attachment will be described in the next change!
is_attachment = True
attachment = change
if change_type == "comment" and (change[4] != '' or is_attachment):
note = Notes(
note=trac2down.convert(fix_wiki_syntax(change[4]), '/issues/', False)
)
binary_attachment = None
if method == 'direct':
note.created_at = convert_xmlrpc_datetime(change[0])
note.updated_at = convert_xmlrpc_datetime(change[0])
try:
note.author = dest.get_user_id(users_map[change[1]])
except KeyError:
note.author = dest.get_user_id(default_user)
if is_attachment:
note.attachment = attachment[4]
print("migrating attachment for ticket id %s: %s" % (src_ticket_id, note.attachment))
binary_attachment = source.ticket.getAttachment(src_ticket_id,
attachment[4].encode('utf8')).data
attachment = None
dest.comment_issue(dest_project_id, new_ticket, note, binary_attachment)
is_attachment = False
def convert_wiki(source, dest):
exclude_authors = [a.strip() for a in config.get('wiki', 'exclude_authors').split(',')]
target_directory = config.get('wiki', 'target-directory')
server = xmlrpclib.MultiCall(source)
for name in source.wiki.getAllPages():
info = source.wiki.getPageInfo(name)
if info['author'] not in exclude_authors:
page = source.wiki.getPage(name)
print("Page %s:%s" % (name, info))
if name == 'WikiStart':
name = 'home'
converted = trac2down.convert(page, os.path.dirname('/wikis/%s' % name))
if method == 'direct':
files_not_linked_to = []
for attachment_filename in source.wiki.listAttachments(name):
print(attachment_filename)
binary_attachment = source.wiki.getAttachment(attachment_filename).data
attachment_name = attachment_filename.split('/')[-1]
dest.save_wiki_attachment(attachment_name, binary_attachment)
converted = converted.replace(r'migrated/%s)' % attachment_filename,
r'migrated/%s)' % attachment_name)
if '%s)' % attachment_name not in converted:
files_not_linked_to.append(attachment_name)
if len(files_not_linked_to) > 0:
converted += '\n\n'
converted += '##### Attached files:\n'
for f in files_not_linked_to:
converted += '- [%s](/uploads/migrated/%s)\n' % (f, f)
trac2down.save_file(converted, name, info['version'], info['lastModified'], info['author'], target_directory)
if __name__ == "__main__":
if method == 'api':
dest = Connection(gitlab_url, gitlab_access_token, dest_ssl_verify)
elif method == 'direct':
dest = Connection(db_name, db_user, db_password, db_path, uploads_path, dest_project_name)
source = xmlrpclib.ServerProxy(trac_url)
dest_project_id = get_dest_project_id(dest, dest_project_name)
if must_convert_issues:
convert_issues(source, dest, dest_project_id, only_issues=only_issues)
if must_convert_wiki:
convert_wiki(source, dest)
'''
This file is part of <https://gitlab.dyomedea.com/vdv/trac-to-gitlab>.
This sotfware is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This sotfware is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this library. If not, see <http://www.gnu.org/licenses/>.
'''
|
from src.util.Logging import warn
from src.util.Exceptions import PathValidationException, NotSupportedException
from src.plan.GraphAssembler import MergeAssembler
from src.plan.Region import RegionLoop, RegionPair
from src.layout.PlannedGraph import Vertex, Edge
from src.layout.EdgeStyles import EdgeStyle, AUTO_BEND, AUTO_LOOP, LEFT_BEND, RIGHT_BEND
from .PathFacts import PathFacts
def raise_path_not_continuous(path):
raise PathValidationException("The declared path '" + str(path) + "' is not a valid path, as it is not continuous")
def raise_paths_endpoints(pathA, pathB):
raise PathValidationException("The declared paths '" + str(pathA) + "' and '" + str(pathB) + "' do not share a start/end node")
def raise_path_not_a_loop(path):
raise PathValidationException("The declared path '" + str(path) + "' does not loop, so cannot be an identity")
class ParseValidator:
def __init__(self):
self.nodes = {}
self.edges = {}
self.set_labels = set()
self.dimensions = None
self.compositions = {}
self.regions = []
def set_dimensions(self, dimensions):
self.dimensions = dimensions
def addEdge(self, edgeId, nodeAId, nodeBId):
if nodeAId not in self.nodes:
self.nodes[nodeAId] = Vertex(nodeAId)
if nodeBId not in self.nodes:
self.nodes[nodeBId] = Vertex(nodeBId)
edge = Edge(edgeId, self.nodes[nodeAId], self.nodes[nodeBId])
self.edges[edgeId] = edge
def set_label(self, object_id, label):
if object_id in self.set_labels:
warn(object_id + ' label has already been defined')
else:
self.set_labels.add(object_id)
if object_id in self.nodes:
self.nodes[object_id].set_label(label)
if object_id in self.edges:
self.edges[object_id].set_label(label)
def add_styles(self, edgeId, styles):
style_list = list(map(lambda s : EdgeStyle(s), styles))
self.edges[edgeId].styles = style_list
def validate_path_continuity(self, path):
for i in range(len(path) - 1):
edge1 = self.edges[path[i]]
edge2 = self.edges[path[i + 1]]
if edge1.end.node_name != edge2.start.node_name:
return False
return True
def validate_path_ends(self, pathA, pathB):
startNodeA = self.edges[pathA.edge_ids[0]].start.node_name
endNodeA = self.edges[pathA.edge_ids[-1]].end.node_name
startNodeB = self.edges[pathB.edge_ids[0]].start.node_name
endNodeB = self.edges[pathB.edge_ids[-1]].end.node_name
return startNodeA == startNodeB and endNodeA == endNodeB
def validate_loop_ends(self, path):
start = self.edges[path.edge_ids[0]].start.node_name
end = self.edges[path.edge_ids[-1]].end.node_name
return start == end
def validate_paths(self, pathA, pathB):
if not self.validate_path_continuity(pathA.edge_ids):
raise_path_not_continuous(pathA)
if not self.validate_path_continuity(pathB.edge_ids):
raise_path_not_continuous(pathB)
if not self.validate_path_ends(pathA, pathB):
raise_paths_endpoints(pathA, pathB)
def validate_path_loop(self, path):
if not self.validate_path_continuity(path.edge_ids):
raise_path_not_continuous(path)
if not self.validate_loop_ends(path):
raise_path_not_a_loop(path)
def add_compositions(self, pathA, pathB, label):
self.validate_paths(pathA, pathB)
if len(pathA) == 1 and len(pathB) == 1:
self.edges[pathA.edge_ids[0]].add_auto_style(LEFT_BEND)
self.edges[pathB.edge_ids[0]].add_auto_style(RIGHT_BEND)
region_id = len(self.regions)
region = RegionPair(region_id, (pathA, pathB), label = label)
self.regions.append(region)
def add_identity_path(self, path, label):
self.validate_path_loop(path)
if len(path) == 1:
self.edges[path.edge_ids[0]].add_auto_style(AUTO_LOOP.copy())
if label != None:
raise NotSupportedException('region label for identity of length 1')
elif len(path) == 2:
self.edges[path.edge_ids[0]].add_auto_style(LEFT_BEND)
self.edges[path.edge_ids[1]].add_auto_style(LEFT_BEND)
region_id = len(self.regions)
region = RegionLoop(region_id, path, label = label)
self.regions.append(region)
def get_graph_assembler(self):
if self.dimensions == None:
warn("diagram size has not been declared, using default")
self.dimensions = (8.0, 8.0)
return MergeAssembler(self.nodes, self.edges, self.regions, self.dimensions)
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import csv
import matplotlib.pyplot as plt
import datetime
# In the following, change 'Country/Region' to your desired choice. I wrote the Python file for the Country/Region Lebanon.
# Change the following to the location of the CSSE file "time_series_covid19_confirmed_global.csv"
with open('covid-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv', mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row['Country/Region']=='Lebanon':
Lebanon_confirmed = row
# Change the following to the location of the CSSE file "time_series_covid19_deaths_global.csv"
with open('covid-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv', mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row['Country/Region']=='Lebanon':
Lebanon_deaths = row
# Change the following to the location of the CSSE file "time_series_covid19_recovered_global.csv"
with open('covid-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv', mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row['Country/Region']=='Lebanon':
Lebanon_recovered = row
Lebanon_confirmed.pop('Province/State')
Lebanon_confirmed.pop('Country/Region','Lat')
Lebanon_confirmed.pop('Lat')
Lebanon_confirmed.pop('Long')
Lebanon_deaths.pop('Province/State')
Lebanon_deaths.pop('Country/Region','Lat')
Lebanon_deaths.pop('Lat')
Lebanon_deaths.pop('Long')
Lebanon_recovered.pop('Province/State')
Lebanon_recovered.pop('Country/Region','Lat')
Lebanon_recovered.pop('Lat')
Lebanon_recovered.pop('Long')
confirmed = np.array(np.array(list(Lebanon_confirmed.items()))[:,1].reshape(-1,1), dtype = np.int32)
deaths = np.array(np.array(list(Lebanon_deaths.items()))[:,1].reshape(-1,1), dtype = np.int32)
recovered = np.array(np.array(list(Lebanon_recovered.items()))[:,1].reshape(-1,1), dtype = np.int32)
Lebanon_cdr = np.concatenate([confirmed,deaths,recovered], axis = 1)
dates = pd.date_range('20200122', periods=Lebanon_cdr.shape[0])
Lebanon_cdr_data = pd.DataFrame(Lebanon_cdr, index=dates, columns=['confirmed', 'deaths','recovered'])
plt.figure()
Lebanon_cdr_data
data_daily_changes = Lebanon_cdr_data.diff()
data_daily_changes.plot()
march_description = data_daily_changes.loc['2020-03-01':'2020-03-31'].describe()
april_description = data_daily_changes.loc['2020-04-01':'2020-04-30'].describe()
may_description = data_daily_changes.loc['2020-05-01':'2020-05-31'].describe()
june_description = data_daily_changes.loc['2020-06-01':'2020-06-30'].describe()
march_description.loc[['mean','std']]
april_description.loc[['mean','std']]
may_description.loc[['mean','std']]
june_description.loc[['mean','std']] |
import torch
import torch.nn as nn
import torch.tensor as tensor
from torch.nn import functional as F
import pdb
from torchvision.models.resnet import resnet18
class SpatialTransformBlock(nn.Module):
def __init__(self, num_classes, pooling_size, channels):
super(SpatialTransformBlock, self).__init__()
self.num_classes = num_classes
self.spatial = pooling_size
self.stn_list = nn.ModuleList()
for i in range(self.num_classes):
self.stn_list.append(nn.Linear(channels, 4))
resnet = resnet18(pretrained=True)
self.base = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1,
resnet.layer2,
resnet.layer3,
resnet.layer4,
)
def stn(self, x, theta):
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(x, grid, padding_mode='zeros')
return x.cuda(), grid
def transform_theta(self, theta_i, region_idx):
theta = torch.zeros(theta_i.size(0), 2, 3)
theta[:,0,0] = torch.sigmoid(theta_i[:,0])
theta[:,1,1] = torch.sigmoid(theta_i[:,1])
theta[:,0,2] = torch.tanh(theta_i[:,2])
theta[:,1,2] = torch.tanh(theta_i[:,3])
theta = theta.cuda()
return theta
def forward(self, features):
pred_list = []
grid_list = list()
bs = features.size(0)
feature = self.base(features)
for i in range(self.num_classes):
stn_feature = feature
theta_i = self.stn_list[i](F.max_pool2d(stn_feature, stn_feature.size()[2:]).view(bs,-1)).view(-1,4)
theta_i = self.transform_theta(theta_i, i)
stn_i = self.stn(features, theta_i)
pred = stn_i[0]
pred_list.append(pred)
grid_list.append(stn_i[1])
return pred_list, grid_list |
import test_class0
import test_class1
#bb= B()
#cc= B()
#A.aaa= '789'
if(1):
cls = getattr(test_class1, 'B', None)
#print obj
obj = cls()
func = getattr(obj, 'bbb', None)
#print func
func()
cls = getattr(test_class1, 'B', None)
obj2 = cls()
func2 = getattr(obj2, 'bbb', None)
func2() |
import numpy as np
# Utility functions
def updateIndividual (I, O, M, prob):
for dim in range(0, M):
if O[dim] != 0:
if (np.random.choice(2, p = [prob, 1-prob]) == 0):
I[dim] = O[dim]
def VecSimm(Vec1, Vec2):
return len(list(x for x,y in zip(Vec1, Vec2) if x == y))
def VecDiff(Vec1, Vec2):
return len(list(x for x,y in zip(Vec1, Vec2) if x != y))
def findDominantBelief (I, S, i):
lenS =len(S)
count_1 = count_0 = count_M1 = 0
# Possible beliefs: 1, 0, -1
for j in range(0, lenS):
if I[j][i] == 1:
count_1 += 1
elif I[j][i] == 0:
count_0 += 1
else:
count_M1 += 1
maxCount = max(count_1, count_0, count_M1)
#print "count 1/0/-1 ", count_1, count_0, count_M1
if maxCount == count_1:
return 1
elif maxCount == count_0:
return 0
else:
return -1
# Setup the Simualtion
# Socialzation Rate
p1 = 0.1
# Organization Learning Rate
p2 = 0.9
# Reality, M-tuple [1, -1]
# Organisation Code, M-tuple [1, 0, -1] - Initially 0s
# Individuals, count N - [-1, 0, 1]
# Dimension, for O, I and R, original experiment 30
M = 12
# Number of Individuals, original experiment 50
N = 16
# iterations, original experiment 80
iterCount = 10
# Model Reality, R
# X = np.random.randint(0, 2, size=M)
R = 2*np.random.randint(0, 2, size=M) - 1
# Model Organization Code, initially 0's
O = np.zeros(M, dtype=np.int)
# Model Individuals
I = np.random.randint(-1, 2, size=(N, M))
print
print "Reality"
print R
print "Org Code"
print O
print "Individuals"
AvgIndSimmIndexStart = 0
for i in range(0, N):
avgSimm = VecSimm(I[i], O)
print I[i], "Similarity with Org Code:", avgSimm
AvgIndSimmIndexStart += avgSimm
print "Proportion of Reality represented by the Individuals (on average):", round(float(AvgIndSimmIndexStart) / N, 1)
print
print "Socialzation Rate:", p1
print "Organization Learning Rate:", p2
print
print "*** Simualtion Begin ***"
# Total iterations in the simulation
for loop in range(0, iterCount):
print "*** Iteration: ", loop, "***"
# Adjust for Socialzation Rate (Individual learning)
for i in range(0, N):
# Update Individual i with probability p1 with Org Code, O
# and I/O dimensions as M
updateIndividual(I[i], O, M, p1)
# Adjust for Organisational learning
# Step 1: Make a set of Individuals who agree more with the Reality, than
# the Organisation. Call this group "Superior Group".
S = []
SimmOR = VecSimm(O, R)
print "Similarity between Org Code and Reality:", SimmOR
for i in range(0, N):
if (VecSimm(I[i], R) > SimmOR):
S.append(i)
print "Superior Group: ", S
print "Superior Group Size: ", len(S)
# Step 2: For every vector dimension, Find dominant belief ammong
# Superior Group and uptate O.
print "Org Code before iteration:"
print O
if len(S) > 0:
for i in range(0, M):
if (np.random.choice(2, p = [p2, 1-p2]) == 0):
dBelief = findDominantBelief (I, S, i)
print "Component:", i, ",Dominant Belief:", dBelief
O[i] = dBelief
print "Org Code after iteration:"
print O
print
print "*** Simualtion End ***"
print
print "*** Observations ***"
print "[1] Individuals"
print
AvgIndSimmIndexEnd = 0
for i in range(0, N):
avgSimm = VecSimm(I[i], O)
print I[i], "Similarity with Org Code:", avgSimm
AvgIndSimmIndexEnd += avgSimm
print "Proportion of Reality represented by the Individuals (on average):", round(float(AvgIndSimmIndexEnd) / N, 1)
print
print "New Organisation Code "
print O
print "Reality"
print R
print "Final Similarity between Org Code and Reality ", VecSimm(R, O)
print "Proportion of Reality correctly represented in Org Code:", round(100.0 * VecSimm(R, O) / M, 1), "%"
|
import pandas as pd
import numpy as np
import math
import re
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import shuffle
from sklearn.decomposition import PCA
from scipy import interp
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
data_folder = '~/security_privacy2018/data/'
dataset1_file = data_folder + 'dataset1.csv.gz'
dlls_imports_file = data_folder + 'dlls.csv.gz'
dlls_invalid_file = data_folder + 'dlls_invalid.csv.gz'
imports1_file = data_folder + 'dataset1_imports.csv.gz'
cv_token_pattern = u'[^;]+'
# Remove imports' extension
def token_preprocessor(s):
return ''.join(re.split('\..{0,3}', s))
imports = pd.read_csv(dlls_imports_file)
imports = imports.set_index('link')
# Load dataset
dataset = pd.read_csv(dataset1_file)
dataset = dataset.set_index('link')
dataset = dataset.join(imports, how='inner')
dataset.dropna(inplace=True)
#dataset.drop_duplicates(subset='md5', keep='last', inplace=True)
dataset = shuffle(dataset)
dlls_invalid = list(pd.read_csv(dlls_invalid_file)['0'].values)
# display(dlls_invalid)
cv = CountVectorizer(token_pattern=cv_token_pattern, stop_words=dlls_invalid, lowercase=False)
#preprocessor=token_preprocessor, lowercase=False)
cv.fit(dataset.dlls)
classifier = LogisticRegression(C=1, verbose=5)
classifier.fit(cv.transform(dataset.dlls), dataset.malware)
pipe = Pipeline(steps=[('cv', cv), ('logistic', classifier)])
pipe.fit(dataset.dlls, dataset.malware)
joblib.dump(pipe, 'lr_1.pk1', compress=9, protocol=2) |
from tools import ReadJson,ReadRedis,ReadDB,ReadConfig
from common import FormatConversion,RunMain
import json
class DisposeEnv:
def __init__(self):
self.readenvjsonhandle = ReadJson.ReadJson('Env','ENV')
self.readrelyjsonhandle = ReadJson.ReadJson('RelyOn','RELYON')
self.readredishandle = ReadRedis.ReadRedis()
self.readdbhandle = ReadDB.ReadDB()
self.readconfighandle = ReadConfig.ReadConfig()
self.version = self.readconfighandle.get_data('INTERFACE','version_num')
self.formatconversionhandle = FormatConversion.FormatConversion()
self.runmethodhandle = RunMain.RunMethod()
def set_env(self,data):
if data['环境是否依赖'] == '是':
jsondata = self.readenvjsonhandle.get_json_data()[data['依赖数据']]
if "redis" in jsondata:
for envdata in jsondata['redis']:
if envdata['nodetype'] == 'Hash':
if envdata['nodeoperation'] == 'Add':
self.readredishandle.hashsetall(envdata['name'],envdata['key'],envdata['value'])
elif envdata['nodeoperation'] == 'Delete':
self.readredishandle.hashdelall(envdata['name'],envdata['key'])
elif envdata['nodetype'] == 'String':
pass
elif envdata['nodetype'] == 'List':
pass
elif envdata['nodetype'] == 'Set':
pass
if "sql" in jsondata:
self.readdbhandle.modify_data(jsondata['sql'])
if "api" in jsondata:
relydata = self.readrelyjsonhandle.get_json_data()
for envdata in jsondata['api']:
data = self.set_dict_value(envdata,relydata)
if data["url"] == 'url_system':
url = self.readconfighandle.get_data('INTERFACE','url_system')
else:
url = self.readconfighandle.get_data('INTERFACE','url_app')
all_url = url + data["api"].format(version = self.version)
all_header = data["header"]
all_payload = data["payload"]
all_method = data["method"]
r = self.runmethodhandle.run_main(all_url,all_method,all_header,all_payload)
if r.status_code == 200:
if "set_rely" in data:
self.set_rely(data['set_rely'],r)
else:
print('接口创建测试环境失败')
#查询每个value,如果存在rely_开头,直接替换并返回
def set_dict_value(self,env_dict,rely_dict):
if type(env_dict) is dict:
for key in env_dict.keys(): # 迭代当前的字典层级
data = env_dict[key] # 将当前字典层级的第一个元素的值赋值给data
# 如果当前data属于dict类型, 进行回归
if isinstance(data, dict):
if key =='set_rely':
continue
self.set_dict_value(data,rely_dict)
elif isinstance(data, list):
for data1 in data:
self.set_dict_value(data1,rely_dict)
else:
if "rely_" in data:
env_dict[key] = self.get_rely_json(data)
return env_dict
#获取依赖json值
def get_rely_json(self,case_api_relyed):
jsondata = self.readrelyjsonhandle.get_json_data()
jsonrelydata = self.formatconversionhandle.FormatConversion(case_api_relyed,jsondata)
return jsonrelydata
def set_rely(self,data,r):
jsondata = self.readrelyjsonhandle.get_json_data()
fromdata = data["fromdata"]
todata = data["todata"]
#从返回json数据中获取存入字段的值
reportdatavalue = self.formatconversionhandle.FormatConversion(fromdata,r.json())
#重新赋值relyjson
self.formatconversionhandle.FormatSet(reportdatavalue,todata,jsondata)
self.readrelyjsonhandle.set_json_data(jsondata)
|
# -*- coding: utf-8 -*-
# @Time : 2020-04-28 10:57
# @Author : speeding_motor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.