text
stringlengths 8
6.05M
|
|---|
import numpy as np
import pytest
import math
from sklearn.base import clone
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
import doubleml as dml
from ._utils import draw_smpls
from ._utils_irm_manual import fit_irm, boot_irm
@pytest.fixture(scope='module',
params=[[RandomForestRegressor(max_depth=2, n_estimators=10),
RandomForestClassifier(max_depth=2, n_estimators=10)]])
def learner(request):
return request.param
@pytest.fixture(scope='module',
params=['ATE', 'ATTE'])
def score(request):
return request.param
@pytest.fixture(scope='module',
params=[1, 2])
def n_folds(request):
return request.param
@pytest.fixture(scope='module')
def dml_irm_no_cross_fit_fixture(generate_data_irm, learner, score, n_folds):
boot_methods = ['normal']
n_rep_boot = 499
dml_procedure = 'dml1'
# collect data
(x, y, d) = generate_data_irm
# Set machine learning methods for m & g
ml_g = clone(learner[0])
ml_m = clone(learner[1])
np.random.seed(3141)
obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d)
dml_irm_obj = dml.DoubleMLIRM(obj_dml_data,
ml_g, ml_m,
n_folds,
score=score,
dml_procedure=dml_procedure,
apply_cross_fitting=False)
dml_irm_obj.fit()
np.random.seed(3141)
if n_folds == 1:
smpls = [(np.arange(len(y)), np.arange(len(y)))]
else:
n_obs = len(y)
all_smpls = draw_smpls(n_obs, n_folds)
smpls = all_smpls[0]
smpls = [smpls[0]]
res_manual = fit_irm(y, x, d,
clone(learner[0]), clone(learner[1]),
[smpls], dml_procedure, score)
res_dict = {'coef': dml_irm_obj.coef,
'coef_manual': res_manual['theta'],
'se': dml_irm_obj.se,
'se_manual': res_manual['se'],
'boot_methods': boot_methods}
for bootstrap in boot_methods:
np.random.seed(3141)
boot_theta, boot_t_stat = boot_irm(y, d, res_manual['thetas'], res_manual['ses'],
res_manual['all_g_hat0'], res_manual['all_g_hat1'],
res_manual['all_m_hat'], res_manual['all_p_hat'],
[smpls], score, bootstrap, n_rep_boot,
apply_cross_fitting=False)
np.random.seed(3141)
dml_irm_obj.bootstrap(method=bootstrap, n_rep_boot=n_rep_boot)
res_dict['boot_coef' + bootstrap] = dml_irm_obj.boot_coef
res_dict['boot_t_stat' + bootstrap] = dml_irm_obj.boot_t_stat
res_dict['boot_coef' + bootstrap + '_manual'] = boot_theta
res_dict['boot_t_stat' + bootstrap + '_manual'] = boot_t_stat
return res_dict
@pytest.mark.ci
def test_dml_irm_no_cross_fit_coef(dml_irm_no_cross_fit_fixture):
assert math.isclose(dml_irm_no_cross_fit_fixture['coef'],
dml_irm_no_cross_fit_fixture['coef_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_irm_no_cross_fit_se(dml_irm_no_cross_fit_fixture):
assert math.isclose(dml_irm_no_cross_fit_fixture['se'],
dml_irm_no_cross_fit_fixture['se_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_irm_no_cross_fit_boot(dml_irm_no_cross_fit_fixture):
for bootstrap in dml_irm_no_cross_fit_fixture['boot_methods']:
assert np.allclose(dml_irm_no_cross_fit_fixture['boot_coef' + bootstrap],
dml_irm_no_cross_fit_fixture['boot_coef' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_irm_no_cross_fit_fixture['boot_t_stat' + bootstrap],
dml_irm_no_cross_fit_fixture['boot_t_stat' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
|
from django.db import models
from django.contrib.auth.models import User
class Tweet(models.Model):
user = models.ForeignKey(User, related_name='tweets', on_delete=models.CASCADE)
body = models.CharField(max_length=140)
timecreated = models.DateTimeField(auto_now_add=True)
|
import os.path
import sys
import glob
import os
from kapteyn import wcs
import pyfits
def get_img_WCS_map(fits_filename):
hdulist = pyfits.open(fits_filename)
header = hdulist[0].header
proj = wcs.Projection(header)
return proj.sub(nsub=2)
pass
def get_img_WCS_posn(fits_filename, pixel_tuple):
wcs_map = get_img_WCS_map(fits_filename)
sky_loc = wcs_map.toworld(pixel_tuple)
return sky_loc
pass
def get_img_pixel_posn(fits_filename, sky_tuple):
map=get_img_WCS_map(fits_filename)
pix_loc = map.topixel( sky_tuple )
return pix_loc
output_dir = "hstphot_catalogs/"
hstphot_location="~/software/hstphot1.1/hstphot"
if len(sys.argv) != 2:
print "Usage: hstphot_ filestem (e.g. coadd_u33)"
sys.exit(1)
#list the coadded files
search_stem=sys.argv[1]
if(search_stem.find("coadd")==-1):
print "should supply path to coadd files"
sys.exit()
files= glob.glob("./"+search_stem+"*")
filename_bases=[]
for file in files:
if (file.endswith("c0f.fits")):
filename_bases+=[file.rsplit('.',1)[0]]
working_dir = filename_bases[0].rsplit('/',1)[0]+"/"
#look up their relative shifts
original_filenames=[]
for filename_base in filename_bases:
original_filenames.append( filename_base.split("coadd_for_WCS_matching_",1)[-1]+".fits" )
ref_filename = original_filenames[0]
centre_pixel = (400,400)
ref_sky_loc = get_img_WCS_posn(ref_filename, centre_pixel)
ref_pix_x, ref_pix_y = centre_pixel
print "Ref file: ", ref_filename," sky loc: " , ref_sky_loc
filename_to_offset_dict={}
for filename in original_filenames:
this_file_centre_sky_posn = get_img_WCS_posn(filename, centre_pixel)
this_file_centre_pixel_posn_in_ref_image = get_img_pixel_posn(ref_filename, this_file_centre_sky_posn)
this_file_pix_x, this_file_pix_y = this_file_centre_pixel_posn_in_ref_image
# this_file_pix_offset = (this_file_pix_x-ref_pix_x, this_file_pix_y-ref_pix_y)
this_file_pix_offset = (-1*(this_file_pix_x-ref_pix_x), -1*(this_file_pix_y-ref_pix_y))
# print file_pix_offset
filename_to_offset_dict[filename]=this_file_pix_offset
# Run HSTphot
if(os.path.isdir(output_dir)==False):
os.makedirs(output_dir)
hstphot_command=hstphot_location
hstphot_command+=" "+output_dir+"hstphot_sourcelist_refimage_"+ original_filenames[0].rsplit('.',1)[0] +".txt" #output
hstphot_command+=" 1.0 " #per image sigma threshold
#hstphot+=" "+str(2.5*n_frames)#total sigma threshold
hstphot_command+=" 1.5"#total sigma threshold
hstphot_command+=" -1 0 0 0 0 " #chip, x/y min/max --- all chips
#hstphot+=" 0 0 800 0 800 " #chip, x/y min/max --- chip 0 (PC) only.
#calculate options code
opt_code = 2 + 4 + 8 #+ 2048 #(refit sky=512),(turn off ap corr=8), turn off psf_residuals = 4 , (weight_centre=2048)
#opt_code = 2 +4 + 8 + 2048 #(local background determination=2),(turn off ap corr=8), turn off psf_residuals = 4 , (more weighting on psf centre=2048)
hstphot_command+=" "+str(opt_code)+ " " #options code
command = hstphot_command + " "
for filename in original_filenames:
offset_x, offset_y = filename_to_offset_dict[filename]
command+= working_dir+"coadd_for_WCS_matching_"+filename.rsplit('.',1)[0] ##strip off file extension
command+=" "+str(offset_x)+" "+str(offset_y)+" " #dx, dy
command +="\"\"" #no reference, refer to first image listed
print command
os.system(command)
|
f = open("yesterday","r",encoding="utf-8")
f2 = open("yesterday2","w",encoding="utf-8")
for line in f:
if "肆意的快乐等我享受" in line:
line = line.replace("肆意的快乐等我享受","肆意的快乐等---黄世杰----享受")
f2.write(line)
f.close()
f2.close()
|
from django.shortcuts import render_to_response
from django.http import HttpResponse
from .models import User
# Create your views here.
def index(request):
user = User.objects.all()
return render_to_response('jan/personal.html', locals())
|
import cmath
a=int(input('a:'))
b=int(input('b:'))
c=int(input('c:'))
disc=cmath.sqrt(b**2 - 4*a*c)
sol1=(-b+disc)/(2*a)
sol2=(-b-disc)/(2*a)
print(round(sol1.real,3)+round(sol1.imag,3)*1j)
print(round(sol2.real,3)+round(sol2.imag,3)*1j)
|
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView
from appointments.models import Appointment
import datetime
class HomePage(LoginRequiredMixin, ListView):
login_url = 'login'
redirect_field_name = 'redirect_to'
model = Appointment
template_name = 'home_page.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'object_list': Appointment.objects.all().filter(appointment_date=datetime.date.today())
})
return context
|
"""
cis and trans eigenvector decomposition on Hi-C numpy arrays
refactored from mirnylib and hiclib
"""
def _filter_heatmap(A, transmask, perc_top, perc_bottom):
# Truncate trans blowouts
lim = np.percentile(A[transmask], perc_top)
tdata = A[transmask]
tdata[tdata > lim] = lim
A[transmask] = tdata
# Remove bins with poor coverage in trans
marg = np.sum(A, axis=0)
marg_nz = marg[np.sum(A, axis=0) > 0]
min_cutoff = np.percentile(marg_nz, perc_bottom)
dropmask = (marg > 0) & (marg < min_cutoff)
idx = np.flatnonzero(dropmask)
A[idx, :] = 0
A[:, idx] = 0
return A
def _fake_cis(A, cismask):
cismask = cismask.astype(np.int64)
s = np.abs(np.sum(A, axis=0)) <= 1e-10
cismask[:, s] = 2
cismask[s, :] = 2
numutils.fakeCisImpl(A, cismask)
return A
def _orient_eigs(lam, vecs, gc):
# If GC is provided reorder and change signs of E1, E2, etc. by GC
# correlation. If not, reorder by descending eigenvalue magnitude and
# don't change signs.
if gc is not None:
corrs = [spearmanr(gc, vec, nan_policy='omit')[0] for vec in vecs]
signs = np.sign(corrs)
idx = np.argsort(-np.abs(corrs))
lam, vecs, signs = lam[idx], vecs[idx], signs[idx]
# change signs
for i in range(len(vecs)):
vecs[i] = signs[i] * vecs[i]
else:
idx = np.argsort(-np.abs(lam))
lam, vecs = lam[idx], vecs[idx]
return lam, vecs
def _eig(A, k):
# Compute eigs
vecs, lam = numutils.zeroEIG(A, numPCs=k)
# eigsh returns unit vectors, but just making sure
# then rescale by sqrt(eigval)
for j in range(len(lam)):
vecs[j] /= np.sqrt(np.sum(vecs[j]**2))
vecs[j] *= np.sqrt(np.abs(lam[j]))
return lam, np.array(vecs)
def trans_eig(A, partition, k=3, perc_top=99.95, perc_bottom=1, gc=None):
"""
Compute compartmentalization eigenvectors on trans contact data
Parameters
----------
A : 2D array
balanced whole genome contact matrix
partition : sequence of int
bin offset of each contiguous region to treat separately (e.g.,
chromosomes or chromosome arms)
k : int
number of eigenvectors to compute; default = 3
perc_top : float (percentile)
filter - clip trans blowout contacts above this cutoff; default = 99.95
perc_bottom : float (percentile)
filter - remove bins with trans coverage below this cutoff; default=1
gc : 1D array, optional
GC content per bin for reordering and orienting the primary compartment
eigenvector; not performed if no array is provided
Returns
-------
eigenvalues, eigenvectors
"""
if A.shape[0] != A.shape[1]:
raise ValueError("A is not symmetric")
A = np.array(A)
A[np.isnan(A)] = 0
n_bins = A.shape[0]
if not (partition[0] == 0 and
partition[-1] == n_bins and
np.all(np.diff(partition) > 0)):
raise ValueError("Not a valid partition. Must be a monotonic sequence "
"from 0 to {}.".format(n_bins))
# Delete cis data and create trans mask
extents = zip(partition[:-1], partition[1:])
part_ids = []
for n, (i0, i1) in enumerate(extents):
A[i0:i1, i0:i1] = 0
part_ids.extend([n] * (i1 - i0))
part_ids = np.array(part_ids)
transmask = (part_ids[:, None] != part_ids[None, :])
# Filter heatmap
A = _filter_heatmap(A, transmask, perc_top, perc_bottom)
# Fake cis and re-balance
A = _fake_cis(A, ~transmask)
A = numutils.iterativeCorrection(A)[0]
A = _fake_cis(A, ~transmask)
A = numutils.iterativeCorrection(A)[0]
# Compute eig
Abar = A.mean()
O = (A - Abar) / Abar
lam, vecs = _eig(O, k)
lam, vecs = _orient_eigs(lam, vecs, gc)
return lam, vecs
def cis_eig(A, k=3, robust=True, gc=None, classic=False):
"""
Compute compartment eigenvector on a cis matrix
Parameters
----------
A : 2D array
balanced whole genome contact matrix
k : int
number of eigenvectors to compute; default = 3
robust : bool
Clip top 0.1 percentile and smooth first two diagonals
gc : 1D array, optional
GC content per bin for choosing and orienting the primary compartment
eigenvector; not performed if no array is provided
classic : bool
Do it old-school
Returns
-------
eigenvalues, eigenvectors
"""
A = np.array(A)
A[~np.isfinite(A)] = 0
mask = A.sum(axis=0) > 0
if A.shape[0] <= 5 or mask.sum() <= 5:
return (
np.array([np.ones(A.shape[0]) * np.nan for i in range(k)]),
np.array([np.nan for i in range(k)]),
)
if robust:
A = np.clip(A, 0, np.percentile(A, 99.9))
fill_value = np.mean(np.diag(A, 2) * 2)
for d in [-1, 0, 1]:
numutils.fillDiagonal(A, fill_value, d)
A[~mask, :] = 0
A[:, ~mask] = 0
OE = numutils.observedOverExpected(A[mask, :][:, mask])
if robust:
OE = np.clip(OE, 0, np.percentile(OE, 99.9))
if classic:
OE = numutils.iterativeCorrection(OE)[0]
if (~np.isfinite(OE)).sum() > 0:
return (
np.array([np.ones(A.shape[0]) * np.nan for i in range(k)]),
np.array([np.nan for i in range(k)]),
)
# mean-centered (subtract mean)
eigvecs_compressed, eigvals = numutils.EIG(OE, k)
else:
eigvecs_compressed, eigvals = numutils.EIG((OE - 1.0), k,
subtractMean=False, divideByMean=False)
# Restore full eigs
eigvecs = []
for i in range(k):
v = np.ones(mask.shape[0]) * np.nan
v[mask] = eigvecs_compressed[i]
eigvecs.append(v)
eigvecs = np.array(eigvecs)
# Orient and reorder
eigvals, eigvecs = _orient_eigs(eigvals, eigvecs, gc)
return eigvals, eigvecs
def cooler_cis_eigs(clr, bins, gc_col='GC', **kwargs):
bins_grouped = bins.groupby('chrom')
def _each(chrom):
A = clr.matrix(balance=True).fetch(chrom)
gc = None
if gc_col in bins:
gc = bins_grouped.get_group(chrom)[gc_col].values
lam, vec = cis_eig(A, robust=True, gc=gc, **kwargs)
eig = bins_grouped.get_group(chrom).copy()
for i in range(len(vec)):
eig['E{}'.format(i+1)] = vec[i]
return lam, eig
bins_chroms = list(bins_grouped.groups.keys())
map_chroms = [chrom for chrom in clr.chromnames if chrom in bins_chroms]
lams, vecs = zip(*map(_each, map_chroms))
lams = pandas.DataFrame(
index=map_chroms,
data=np.vstack(lams),
columns=['lam1', 'lam2', 'lam3'],
)
lams.index.name = 'chrom'
vecs = pandas.concat(vecs, axis=0, ignore_index=True)
return lams, vecs
def cooler_trans_eig(clr, bins, partition=None, gc_col='GC', **kwargs):
if partition is None:
partition = np.r_[
[clr.offset(chrom) for chrom in clr.chromnames], len(clr.bins())]
lo = partition[0]
hi = partition[-1]
A = clr.matrix(balance=True)[lo:hi, lo:hi]
gc = None
if gc_col in bins:
gc = bins[gc_col].values
lam, PCs = trans_eig(A, partition, gc=gc, **kwargs)
vecs = bins.copy()
for i in range(len(PCs)):
vecs['E{}'.format(i+1)] = PCs[i]
return lam, vecs
"""
Saddle plot code.
Authors
~~~~~~~
* Anton Goloborodko
* Nezar Abdennur
"""
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.linalg import toeplitz
from cytoolz import merge
import numpy as np
import pandas as pd
pal = sns.color_palette('muted')
def digitize_track(
bins,
get_track,
get_mask,
chromosomes,
prange=None,
by_percentile=False):
"""
Digitize per-chromosome genomic tracks.
Parameters
----------
get_track : function
A function returning a genomic track given a chromosomal name/index.
get_mask : function
A function returning a binary mask of valid genomic bins given a
chromosomal name/index.
chromosomes : list or iterator
A list of names/indices of all chromosomes.
bins : int or list or numpy.ndarray
If list or numpy.ndarray, `bins` must contain the bin edges for
digitization.
If int, then the specified number of bins will be generated
automatically from the genome-wide range of the track values.
prange : pair of floats
The percentile of the genome-wide range of the track values used to
generate bins. E.g., if `prange`=(2. 98) the lower bin would
start at the 2-nd percentile and the upper bin would end at the 98-th
percentile of the genome-wide signal.
Use to prevent the extreme track values from exploding the bin range.
Is ignored if `bins` is a list or a numpy.ndarray.
by_percentile : bool
If true then the automatically generated bins will contain an equal
number of genomic bins genome-wide (i.e. track values are binned
according to their percentile). Otherwise, bins edges are spaced equally.
Is ignored if `bins` is a list or a numpy.ndarray.
Returns
-------
digitized : dict
A dictionary of the digitized track, split by chromosome.
The value of -1 corresponds to the masked genomic bins, the values of 0
and the number of bins correspond to the values lying below and above
the bin range limits.
binedges : numpy.ndarray
The edges of bins used to digitize the track.
"""
if not hasattr(bins, '__len__'):
if prange is None:
prange = (0, 100)
fulltrack = np.concatenate([
get_track(chrom)[get_mask(chrom)]
for chrom in chromosomes
])
if by_percentile:
perc_edges = np.linspace(prange[0], prange[1], bins + 1)
binedges = np.percentile(fulltrack, perc_edges)
else:
lo = np.percentile(fulltrack, prange[0])
hi = np.percentile(fulltrack, prange[1])
binedges = np.linspace(lo, hi, bins + 1)
else:
binedges = bins
digitized = {}
for chrom in chromosomes:
x = np.digitize(get_track(chrom), binedges, right=False)
x[~get_mask(chrom)] = -1
digitized[chrom] = x
return digitized, binedges
def fill_diagonal(A, values, k=0, wrap=False, inplace=False):
"""
Based on numpy.fill_diagonal, but allows for kth diagonals as well.
Only works on 2D arrays.
"""
if not inplace:
A = np.array(A)
else:
A = np.asarray(A)
start = k
end = None
step = A.shape[1] + 1
#This is needed so a tall matrix doesn't have the diagonal wrap around.
if not wrap:
end = start + A.shape[1] * A.shape[1]
A.flat[start:end:step] = values
return A
def make_saddle(
get_matrix,
get_digitized,
chromosomes,
contact_type,
verbose=False):
"""
Make a matrix of average interaction probabilities between genomic bin pairs
as a function of a specified genomic track. The provided genomic track must
be pre-binned (i.e. digitized).
Parameters
----------
get_matrix : function
A function returning an matrix of interaction between two chromosomes
given their names/indicies.
get_digitized : function
A function returning a track of the digitized target genomic track given
a chromosomal name/index.
chromosomes : list or iterator
A list of names/indices of all chromosomes.
contact_type : str
If 'cis' then only cis interactions are used to build the matrix.
If 'trans', only trans interactions are used.
verbose : bool
If True then reports progress.
Returns
-------
interaction_sum : 2D array
The matrix of summed interaction probability between two genomic bins
given their values of the provided genomic track.
interaction_count : 2D array
The matrix of the number of genomic bin pairs that contributed to the
corresponding pixel of ``interaction_sum``.
"""
if contact_type not in ['cis', 'trans']:
raise ValueError("The allowed values for the contact_type "
"argument are 'cis' or 'trans'.")
n_bins = max([
get_digitized(chrom).max()
for chrom in chromosomes
])
interaction_sum = np.zeros((n_bins, n_bins))
interaction_count = np.zeros((n_bins, n_bins))
for k, chrom1 in enumerate(chromosomes):
for chrom2 in chromosomes[k:]:
if (((contact_type == 'trans') and (chrom1 == chrom2)) or
((contact_type == 'cis') and (chrom1 != chrom2))):
continue
matrix = get_matrix(chrom1, chrom2)
for d in [-2, -1, 0, 1, 2]:
fill_diagonal(matrix, np.nan, d)
if verbose:
print('chromosomes {} vs {}'.format(chrom1, chrom2))
for i in range(n_bins):
row_mask = (get_digitized(chrom1) == i)
for j in range(n_bins):
col_mask = (get_digitized(chrom2) == j)
data = matrix[row_mask, :][:, col_mask]
data = data[np.isfinite(data)]
interaction_sum[i, j] += np.sum(data)
interaction_count[i, j] += float(len(data))
interaction_sum += interaction_sum.T
interaction_count += interaction_count.T
return interaction_sum, interaction_count
def saddleplot(binedges,
digitized,
saddledata,
color,
cbar_label=None,
fig_kws=None,
heatmap_kws=None,
margin_kws=None):
"""
Plot saddle data and signal histograms in the margins.
Parameters
----------
binedges: 1D array
digitized: dict of chrom to 1D array
saddledata: 2D array
color: str
cbar_label: str
fig_kws: dict, optional
Extra keywords to pass to ``figure``.
heatmap_kws : dict, optional
Extra keywords to pass to ``imshow`` for saddle heatmap.
margin_kws : dict, optional
Extra keywords to pass to ``hist`` for left and top margins.
"""
n_bins = len(binedges) - 1
lo, hi = 0, n_bins #-0.5, n_bins - 1.5
# Populate kwargs
fig_kws = merge(
dict(figsize=(5, 5)),
fig_kws if fig_kws is not None else {}
)
heatmap_kws = merge(
dict(aspect='auto',
cmap='coolwarm',
interpolation='none',
vmin=-1,
vmax=1),
heatmap_kws if heatmap_kws is not None else {},
)
vmin = heatmap_kws['vmin']
vmax = heatmap_kws['vmax']
margin_kws = merge(
dict(bins=n_bins,
range=(0, len(binedges)),
histtype='stepfilled',
edgecolor='k',
facecolor=color,
linewidth=1),
margin_kws if margin_kws is not None else {},
)
# layout
gs = GridSpec(
nrows=3,
ncols=3,
width_ratios=[0.2, 1, 0.1],
height_ratios=[0.2, 1, 0.1],
wspace=0.05,
hspace=0.05,
)
fig = plt.figure(**fig_kws)
# heatmap
ax = ax1 = plt.subplot(gs[4])
img = ax.imshow(np.log10(saddledata), **heatmap_kws)
plt.xticks(
np.arange(0, n_bins)[::5],
['{:0.4f}'.format(t)
for t in ((binedges[1:] + binedges[:-1])/2)[::5]],
rotation=90,
)
plt.yticks([])
plt.xlim(lo, hi)
plt.ylim(hi, lo)
# left margin
plt.subplot(gs[3])
plt.hist(np.concatenate(list(digitized.values())),
**merge(margin_kws, {'orientation': 'horizontal'}))
plt.xticks([])
plt.yticks(
np.arange(0, n_bins)[::5],
['{:0.4f}'.format(t)
for t in ((binedges[1:] + binedges[:-1])/2)[::5]],
)
plt.xlim(plt.xlim()[1], plt.xlim()[0]) # fliplr
plt.ylim(hi, lo)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['bottom'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
# top margin
plt.subplot(gs[1])
plt.hist(np.concatenate(list(digitized.values())), **margin_kws)
plt.xticks([])
plt.yticks([])
plt.xlim(lo, hi)
plt.ylim(plt.ylim()[0], plt.ylim()[1]) # correct
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
# colorbar
plt.subplot(gs[5])
cb = plt.colorbar(
img,
fraction=0.8,
label=cbar_label)
if vmin is not None and vmax is not None:
cb.set_ticks(np.arange(vmin, vmax + 0.001, 0.5))
plt.grid(False)
plt.axis('off')
return fig
|
import paramiko, time
from termcolor import colored
import requests
from functions import SSH
import scp
def statusWebsite():
url = "https://media.scheijvens.com"
status = requests.get(url)
if status.status_code == 200:
status_active = colored("Active", "green")
else:
status_active = colored("Inactive", "red")
return f"Status Website: {status_active}"
def updateUbuntu(user, password):
host = "172.16.0.10"
port = 22
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, port, user, password)
stdin, stdout, stderr = client.exec_command("sudo apt update && sudo apt upgrade -y")
time.sleep(5)
restult = stdout.readlines()
print(restult[-1])
client.close()
def systemJellyfin(user, password):
host = "172.16.0.10"
port = 22
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, port, user, password)
parameter = ""
option = input("Start, stop or restart?: ")
if option.lower() == "start":
parameter = "start"
elif option.lower() == "stop":
parameter = "stop"
elif option.lower() == "restart":
parameter = "restart"
else:
print(colored("Not a valid option.\n", "red"))
stdin, stdout, stderr = client.exec_command(f"sudo service jellyfin {parameter}")
client.close()
def systemUbuntu(user, password):
host = "172.16.0.10"
port = 22
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, port, user, password)
parameter = ""
option = input("reboot or shutdown?: ")
if option.lower() == "reboot":
parameter = "reboot"
elif option.lower() == "shutdown":
parameter = "shutdown now"
else:
print(colored("Not a valid option.\n", "red"))
stdin, stdout, stderr = client.exec_command(f"sudo {parameter}")
client.close()
def getStatus(user, password):
host = "172.16.0.10"
port = 22
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.load_system_host_keys()
ssh.connect(host, port, user, password)
stdin, stdout, stderr = ssh.exec_command("service jellyfin status")
time.sleep(2)
result = stdout.readlines()
ssh.close()
result_activity = result[4]
result_activity = result_activity.split()
if result_activity[1] == "failed":
status_activity = colored(result[4].strip(), "red")
elif result_activity[1] == "active":
status_activity = colored(result[4].strip(), "green")
return f"Status Server: {status_activity}"
def uploadMovie(user, password):
host = "172.16.0.10"
port = 22
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, port, user, password)
sftp = client.open_sftp()
path_movies_server = "/mnt/media/movies/"
path_movie_local = input("What is the path of the directory?: ")
sftp.put(path_movie_local, path_movies_server)
sftp.close()
|
# -*- coding:utf-8 -*-
__author__ = 'lish'
import urllib2,re,urllib,json,time,bs4
import MySQLdb,random,os,StringIO,gzip
from multiprocessing.dummy import Pool as ThreadPool
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
base_path=os.path.split( os.path.realpath( sys.argv[0] ) )[0]
# isExists=os.path.exists(base_path+u'/391275/charpters/412118793.txt')
# if not isExists:
# print '???'
def test():
pathlist = os.listdir(base_path)
print pathlist
checkHasendZIP = False
# for filename in pathlist:
# if filename.endswith('zip'):
# checkHasendZIP = True
# if checkHasendZIP:
# # 解压zip文件
#
#
# # 替换错误或者带标记的字符
# sedcommand1="sed -i 's/kuaikanxiaoshuo/imreadorg/g' "+base_path+'/'+str(bookid)+"/charpters/*.txt"
# os.system(sedcommand1)
#
# sedcommand2="sed -i 's/快看小说/艾美阅读/g' "+base_path+'/'+str(bookid)+"/charpters/*.txt"
# os.system(sedcommand2)
test()
|
D = {
1: "One",
2: "Two",
3: "Three",
4: "Four",
5: "Five"
}
with open('./7.txt', 'w') as f:
for key in D:
f.write(str(key) + ", "+ D[key] + "\n")
f.close()
|
#!usr/bin/env python3
#coding=utf8
source = [1,5,2,8,10,3,7,6,9,4]
class Solution:
def sortArray(self, arry:list)->list:
length = len(arry)
if length == 1 or length == 0:
return arry
mid = int(length/2)
midNum = arry[mid]
#print("mid:%d,midNum:%d"%(mid,midNum))
left = []
right = []
#print(arry[:mid])
#print(arry[mid+1:])
for num in arry[:mid]:
if num >= midNum:
right.append(num)
else:
left.append(num)
for num in arry[mid+1:]:
if num >= midNum:
right.append(num)
else:
left.append(num)
#print("left:",left)
#print("right:",right)
if len(left) > 0:
left = self.sortArray(left)
if len(right)> 0:
right = self.sortArray(right)
left.append(midNum)
return left + right
def main():
s = Solution()
result =s.sortArray(source)
print(result)
if __name__ == "__main__":
main()
|
'''
猩球崛起
'''
class Person:
def __init__(self, name, atk, left):
self.name = name
self.atk = atk
self.left = left
def attack(self, starstar):
starstar.left = starstar.left - self.atk
def __str__(self):
msg = '{}的攻击力是{},剩余生命力{}'.format(self.name, self.atk, self.left)
return msg
class StarSatr:
def __init__(self, name, atk, left):
self.name = name
self.atk = atk
self.left = left
def attack(self, person):
person.left = person.left - self.atk
def __str__(self):
msg = '{}的攻击力是{},剩余生命力是{}'.format(self.name, self.atk, self.left)
return msg
person = Person('诸葛亮', 10, 100)
print(person)
star1 = StarSatr('金刚', 20, 100)
print(star1)
print('----人攻击猩猩------')
person.attack(star1)
print(person)
print(star1)
print('------猩猩攻击人------')
star1.attack(person)
print(person)
print(star1)
|
"""
Calculates the mean and variance over h5 audio trainings files.
"""
__author__ = 'David Flury'
__email__ = "david@flury.email"
import os
import glob
import json
import h5py
import time
import argparse
import numpy as np
import progressbar
def calculate_sum(file):
data = h5py.File(file,'r')
stereo = data['stereo'][()]
channels = None
if stereo:
channels = np.array(
[np.abs(to_complex(data['spectrogram_left'][()])),
np.abs(to_complex(data['spectrogram_right'][()]))])
else:
channels = np.array([np.abs(to_complex(data['spectrogram'][()]))])
return np.sum(np.sum(channels, axis=2), axis=0), np.size(channels[0])
def calculate_derivation(file, mean):
data = h5py.File(file,'r')
stereo = data['stereo'][()]
channels = None
if stereo:
channels = np.array(
[np.abs(to_complex(data['spectrogram_left'][()])),
np.abs(to_complex(data['spectrogram_right'][()]))])
else:
channels = np.array([np.abs(to_complex(data['spectrogram'][()]))])
return np.sum(np.sum(np.square(channels - np.repeat(np.reshape(mean, mean.shape + (1,)), channels.shape[-1], axis=1)), axis=2), axis=0)
def to_complex(realimag):
'Converts the real-imag array of the training values to complex values'
real = realimag[:, :, 0]
imag = realimag[:, :, 1]
return real + imag * 1j
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculates the mean and variance over h5 audio trainings files.')
parser.add_argument('--path', default='D:\\Data\\unmix.io\\4_training\\fft-window=1536_sample-rate=11025_channels=1-mono\\mini',
type=str, help='Working path.')
parser.add_argument('--prefix', default='',
type=str, help='Prefix for file names to pick only vocal or instrumental songs.')
parser.add_argument('--fft_window', default=1536,
type=int, help='FFT-Window size.')
args = parser.parse_args()
print('Arguments:', str(args))
start = time.time()
height = args.fft_window // 2 + 1
print('Start calculating mean...')
total_sum = np.zeros((height))
total_elements = 0
total_derivation = np.zeros((height))
files = []
with progressbar.ProgressBar(max_value=progressbar.UnknownLength) as bar:
for file in glob.iglob(os.path.join(args.path, '**', '%s*.h5' % args.prefix), recursive=True):
s, e = calculate_sum (file)
total_sum += s
total_elements += e
files.append(file)
bar.update(len(files))
total_mean = total_sum / total_elements
print('Start calculating variance...')
with progressbar.ProgressBar(max_value=len(files)) as bar:
for i in range(len(files)):
file = files[i]
total_derivation += calculate_derivation(file, total_mean)
bar.update(i)
total_variance = total_derivation / (total_elements - 1)
result = {
"mean": np.average(total_mean),
"variance": np.average(total_variance),
"bin_mean": total_variance.tolist(),
"bin_variance": total_variance.tolist(),
"files": len(files),
"fft_window": args.fft_window,
"prefix": args.prefix,
"path": args.path
}
path = os.path.join(args.path, '%s%smean-derivation.json' % (args.prefix, '_' if args.prefix else ''))
with open(path, 'w') as file:
json.dump(result, file, indent=4)
print("Wrote results to: %s" % path)
end = time.time()
print('Finished processing in %d [s].' % (end - start))
|
from typing import List, Optional, Tuple, Union
class User:
def __init__(self, name: str, discord_id: Optional[int], telegram_id: Optional[int], vk_id: Optional[int]):
self.name = name
self.discord_id = discord_id
self.telegram_id = telegram_id
self.vk_id = vk_id
class Image:
def __init__(self, url: str) -> None:
self.url = url
class Gif:
def __init__(self, url: str) -> None:
self.url = url
class Post:
def __init__(self, user_name: str, group_name: str, text: str, post_url: str, attachments: List[Union[Image,Gif]]) -> None:
self.user_name = user_name
self.group_name = group_name
self.text = text
self.post_url = post_url
self.attachments = attachments
class Audio:
def __init__(self, id: int, url: str, artist: str, title: str, duration: int) -> None:
self.id = id
self.url = url
self.artist = artist
self.title = title
self.duration = duration
class Event:
def __init__(self, user: User, attachments: List[Union[Image,Gif,Post,Audio]]) -> None:
self.user = user
self.attachments = attachments
def try_get_media(self) -> Tuple[bool,List[Union[Image,Gif]]]:
media = list(filter(lambda a: type(a) is Image or type(a) is Gif, self.attachments))
if (len(media) > 0):
return [True, media]
else:
return [False, None]
def try_get_post(self) -> Tuple[bool,Post]:
posts = list(filter(lambda a: type(a) is Post, self.attachments))
if (len(posts) > 0):
return [True, posts[0]]
else:
return [False, None]
def try_get_audio(self) -> Tuple[bool,Audio]:
audio = list(filter(lambda a: type(a) is Audio, self.attachments))
if (len(audio) > 0):
return [True, audio]
else:
return [False, None]
def extract_events(events: dict):
result: List[Event] = []
for event in events:
hub_payload = event['hub']
user = User(
hub_payload['name'],
hub_payload['discord_id'],
hub_payload['telegram_id'],
hub_payload['vk_id'],
)
user_id = event['object']['user_id']
user_name = event['object']['user_name']
attachments = event['object'].get('attachments', [])
extracted_attachments: List[Union[Image,Gif,Post]] = []
for attachment in attachments:
if (attachment['type'] == 'photo'):
photo = attachment['photo']
url = _extract_photo_url(photo)
extracted_attachments.append(Image(url))
elif (attachment['type'] == 'doc' and attachment['doc']['ext'] == 'gif'):
url = attachment['doc']['url']
extracted_attachments.append(Gif(url))
elif (attachment['type'] == 'wall' and attachment['wall']['from']['type'] in ['page', 'group']):
wall = attachment['wall']
post_attachments = wall.get('attachments', [])
extracted_post_attachments: List[Union[Image,Gif]] = []
for post_attachment in post_attachments:
if (post_attachment['type'] == 'photo'):
photo = post_attachment['photo']
url = _extract_photo_url(photo)
extracted_post_attachments.append(Image(url))
if (post_attachment['type'] == 'doc' and post_attachment['doc']['ext'] == 'gif'):
url = post_attachment['doc']['url']
extracted_post_attachments.append(Gif(url))
extracted_attachments.append(Post(
user_name,
wall['from']['name'],
wall['text'],
f'https://vk.com/wall{wall["from_id"]}_{wall["id"]}',
extracted_post_attachments
))
elif (attachment['type'] == 'audio'):
audio = attachment['audio']
extracted_attachments.append(Audio(
audio['id'],
audio['url'],
audio['artist'],
audio['title'],
audio['duration']
))
if (len(extracted_attachments) > 0):
result.append(Event(user, extracted_attachments))
return result
def _extract_photo_url(photo) -> str:
keys = list(photo.keys())
resolution_keys = list(filter(lambda k: k.startswith('photo_'), keys))
largest_key = sorted(resolution_keys, key=lambda key: int(key.split('_')[1]), reverse=True)[0]
return photo[largest_key]
|
from django.contrib import admin
from .models import UserFavouriteProducts
# Register your models here.
class FavouriteProductsAdmin(admin.ModelAdmin):
list_display = ['__str__', 'owner']
class Meta:
Model = UserFavouriteProducts
admin.site.register(UserFavouriteProducts,FavouriteProductsAdmin)
|
import os
from dotenv import load_dotenv
load_dotenv()
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN")
BASE_API_URL = os.getenv("BASE_API_URL")
PASSWORD = os.getenv("PASSWORD")
HOST_DB = os.getenv("HOST_DB")
USER_DB = os.getenv("USER_DB")
PASSWORD_DB = os.getenv("PASSWORD_DB")
DB = os.getenv("DB")
|
from leetcode import test
def is_valid(s: str) -> bool:
stack = []
for ch in s:
if ch in ("(", "[", "{"):
stack.append(ch)
elif not stack:
return False
elif (stack[-1], ch) in (("(", ")"), ("[", "]"), ("{", "}")):
stack.pop()
else:
return False
return not stack
test(
is_valid,
[
("", True),
("()", True),
("()[]{}", True),
("(]", False),
("([)]", False),
("{[()]}", True),
("]", False),
],
)
|
import math
class Vector(object):
def __init__(self, x, y):
self.x, self.y = x, y
def __add__(self, v):
return Vector(self.x + v.x, self.y + v.y)
def __sub__(self, v):
return Vector(self.x - v.x, self.y - v.y)
def __mul__(self, coef):
return Vector(self.x*coef, self.y*coef)
def __eq__(self, v):
return self.x, self.y == v.x, v.y
def __hash__(self):
return hash((self.x, self.y))
def __len__(self):
return math.sqrt(self.x**2, self.y**2)
def __iter__(self):
return iter((self.x, self.y))
class AABB(object):
def __init__(self, x, y, size):
self.position = Vector(x, y)
self.size = Vector(*size)
|
# coding=utf-8
# @Time : 2021/9/25 15:48
# @Author : 黄鸿林
# @File : index.py
# @Software : PyCharm
import yaml
from todaySchool.utils.Utils import Utils
from todaySchool.login.NjitLogin import NjitLogin
from todaySchool.actions.AutoClock import AutoClock
def getConfig(yaml_file='config/userConfig.yml'):
file=open(yaml_file,'r',encoding='utf8')
file_data=file.read()
file.close()
config=yaml.load(file_data,Loader=yaml.FullLoader)
return dict(config)
def main():
config=getConfig()
for user in config['users']:
if config['debug']:
msg=working(user)
print(msg)
else:
try:
msg=working(user)
except Exception as e:
msg=str(e)
print(msg)
if user['user']['isNoticeOpen']:
if '打卡成功' in msg:
if user['user']['isNotice']:
Utils.sendEmail(user['user']['email'], msg)
elif '打卡失败' in msg:
Utils.sendEmail(user['user']['email'], msg)
else:
Utils.sendEmail(user['user']['email'], msg)
def working(user):
count = 0
msg = '无效的验证码'
while '无效的验证码' in msg or '连接尝试失败' in msg and count < 3:
try:
njitLogin = NjitLogin(user['user'])
msg = njitLogin.login()
except Exception as e:
msg = str(e)
count += 1
print(msg)
if '无效的验证码' in msg or '连接尝试失败' in msg:
return '验证码识别未通过,打卡失败,请手动打卡'
if user['user']['type']==0:
# 以下代码是信息收集的代码
pass
elif user['user']['type']==1:
# 签到
pass
elif user['user']['type'] == 3:
# 打卡
clock = AutoClock(njitLogin, user['user'])
msg = clock.start()
return msg
def main_handler(event,context):
main()
return '执行成功!'
if __name__=='__main__':
main()
|
def pl(items):
if type(items) == type(list()):
for i in items:
print i
else:
print "## pl: expected <type 'list'>, got %s" % type(items)
return items
def pd(items):
if type(items) == type(dict()):
for i in items.keys():
print i, "|", items[i]
else:
print "## pl: expected <type 'dict'>, got %s" % type(items)
return items
|
from django.db import models
import jsonfield
from django.contrib.postgres.fields import JSONField
class Estimates(models.Model):
QUOTE_STATUS = (
('IQ', 'Initial Quote'),
('QS', 'Quote Send'),
('WP', 'Waiting for Payment'),
('QC', 'Completed'),
)
quote_id = models.AutoField(primary_key=True)
quote_number = models.IntegerField(null= False)
quote_date = models.DateField()
customer_name = models.CharField(max_length= 255, null= False)
salse_person = models.CharField(max_length= 255, null= False)
quote_status = models.CharField(max_length= 2, choices= QUOTE_STATUS, default='FIXED')
quote_info = JSONField()
quote_summary = JSONField()
class Meta:
db_table = 'tbl_estiamtes'
|
"""Constants for the Toggl integration."""
DOMAIN = "toggl"
CONF_SENSOR = "sensor"
|
import pyautogui
from pyautogui import *
import ppadb
from ppadb.client import Client
from PIL import Image
import numpy
import time
from mss import mss
adb = Client(host='127.0.0.1', port=5037)
devices = adb.devices()
if len(devices) == 0:
print("No device found")
quit()
device = devices[0]
#device.shell('input tap 500 500')
class Fishing:
def __init__(self, max_fishing):
self.max_fishing = max_fishing
self.position = 0
self.amount = 0
def throw(self):
throw_position = pyautogui.locateCenterOnScreen('image/cast.png', confidence=.8, region=(1000, 500, 200, 200))
if throw_position != None:
device.shell('input tap 1083 576')
#pyautogui.click(throw_position)
print("Casting bait")
print("Waiting")
self.position = 1
def pull(self):
pull_position = pyautogui.locateCenterOnScreen('image/reel.png', confidence=.9, region=(1000, 500, 200, 200))
if pull_position != None:
device.shell('input tap 1060 548')
#pyautogui.click(pull_position)
print("Reel")
self.amount += 1
print("Caught ", self.amount)
self.position = 0
def fishing(self):
while True:
if self.amount >= self.max_fishing:
break
elif self.position == 0:
self.throw()
elif self.position == 1:
self.pull()
if __name__ == "__main__":
print("Start Fishing")
max_fishing = 1
letsgo = Fishing(max_fishing)
letsgo.fishing()
|
def sortAsc(alist):
for passnum in range(len(alist)-1,0,-1):
for i in range(passnum):
temp1 = alist[i]
temp1 = temp1.split("/")
temp1 = temp1[::-1]
temp1 = int("".join(temp1))
temp2 = alist[i + 1]
temp2 = temp2.split("/")
temp2 = temp2[::-1]
temp2 = int("".join(temp2))
if temp1>temp2:
temp = alist[i]
alist[i] = alist[i+1]
alist[i+1] = temp
def sortdesc(alist):
for passnum in range(len(alist)-1,0,-1):
for i in range(passnum):
temp1 = alist[i]
temp1 = temp1.split("/")
temp1 = temp1[::-1]
temp1 = int("".join(temp1))
temp2 = alist[i + 1]
temp2 = temp2.split("/")
temp2 = temp2[::-1]
temp2 = int("".join(temp2))
if temp1<temp2:
temp = alist[i]
alist[i] = alist[i+1]
alist[i+1] = temp
alist = ["01/01/2001","01/01/2000","01/01/1995","01/01/2009","02/01/2001"]
sortAsc(alist)
print(alist)
sortdesc(alist)
print(alist)
|
#!/usr/bin/python
from __future__ import print_function
from corpkit.constants import STRINGTYPE, PYTHON_VERSION, INPUTFUNC
def structure_corpus(path_to_files, new_corpus_name='structured_corpus'):
"""
Structure a corpus in some kind of sequence
"""
import corpkit
import os
import shutil
base = os.path.basename(path_to_files)
new_corpus_name = 'structured_' + base
if not os.path.isdir(path_to_files):
raise ValueError('Directory not found: %s' % path_to_files)
if not os.path.exists(new_corpus_name):
os.makedirs(new_corpus_name)
files = os.listdir(path_to_files)
for f in files:
filepath = os.path.join(path_to_files, f)
subcorpus_name = 'what goes here?'
subcorpus_path = os.path.join(new_corpus_name, subcorpus_name)
if not os.path.exists(subcorpus_path):
os.makedirs(subcorpus_path)
shutil.copy(filepath, subcorpus_path)
print('Done!')
def download_large_file(proj_path, url, actually_download=True, root=False, **kwargs):
"""
Download something to proj_path
"""
import corpkit
import os
import shutil
import glob
import sys
import zipfile
from time import localtime, strftime
from corpkit.textprogressbar import TextProgressBar
from corpkit.process import animator
file_name = url.split('/')[-1]
home = os.path.expanduser("~")
# if it's corenlp, put it in home/corenlp
# if that dir exists, check if for a zip file
# if there's a zipfile and it works, move on
# if there's a zipfile and it's broken, delete it
if 'stanford' in url:
downloaded_dir = os.path.join(home, 'corenlp')
if not os.path.isdir(downloaded_dir):
os.makedirs(downloaded_dir)
else:
poss_zips = glob.glob(os.path.join(downloaded_dir, 'stanford-corenlp-full*.zip'))
if poss_zips:
fullfile = poss_zips[-1]
the_zip_file = zipfile.ZipFile(fullfile)
ret = the_zip_file.testzip()
if ret is None:
return downloaded_dir, fullfile
else:
os.remove(fullfile)
#else:
# shutil.rmtree(downloaded_dir)
else:
downloaded_dir = os.path.join(proj_path, 'temp')
try:
os.makedirs(downloaded_dir)
except OSError:
pass
fullfile = os.path.join(downloaded_dir, file_name)
if actually_download:
if not root:
txt = 'CoreNLP not found. Download latest version (%s)? (y/n) ' % url
selection = INPUTFUNC(txt)
if 'n' in selection.lower():
return None, None
try:
import requests
# NOTE the stream=True parameter
r = requests.get(url, stream=True, verify=False)
file_size = int(r.headers['content-length'])
file_size_dl = 0
block_sz = 8192
showlength = file_size / block_sz
thetime = strftime("%H:%M:%S", localtime())
print('\n%s: Downloading ... \n' % thetime)
par_args = {'printstatus': kwargs.get('printstatus', True),
'length': showlength}
if not root:
tstr = '%d/%d' % (file_size_dl + 1 / block_sz, showlength)
p = animator(None, None, init=True, tot_string=tstr, **par_args)
animator(p, file_size_dl + 1, tstr)
with open(fullfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=block_sz):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
file_size_dl += len(chunk)
#print file_size_dl * 100.0 / file_size
if kwargs.get('note'):
kwargs['note'].progvar.set(file_size_dl * 100.0 / int(file_size))
else:
tstr = '%d/%d' % (file_size_dl / block_sz, showlength)
animator(p, file_size_dl / block_sz, tstr, **par_args)
if root:
root.update()
except Exception as err:
import traceback
print(traceback.format_exc())
thetime = strftime("%H:%M:%S", localtime())
print('%s: Download failed' % thetime)
try:
f.close()
except:
pass
if root:
root.update()
return
if kwargs.get('note'):
kwargs['note'].progvar.set(100)
else:
p.animate(int(file_size))
thetime = strftime("%H:%M:%S", localtime())
print('\n%s: Downloaded successully.' % thetime)
try:
f.close()
except:
pass
return downloaded_dir, fullfile
def extract_cnlp(fullfilepath, corenlppath=False, root=False):
"""
Extract corenlp zip file
"""
import corpkit
import zipfile
import os
from time import localtime, strftime
time = strftime("%H:%M:%S", localtime())
print('%s: Extracting CoreNLP files ...' % time)
if root:
root.update()
if corenlppath is False:
home = os.path.expanduser("~")
corenlppath = os.path.join(home, 'corenlp')
with zipfile.ZipFile(fullfilepath) as zf:
zf.extractall(corenlppath)
time = strftime("%H:%M:%S", localtime())
print('%s: CoreNLP extracted. ' % time)
def get_corpus_filepaths(projpath=False, corpuspath=False):
import corpkit
import fnmatch
import os
matches = []
for root, dirnames, filenames in os.walk(corpuspath):
for filename in fnmatch.filter(filenames, '*.txt'):
matches.append(os.path.join(root, filename))
if len(matches) == 0:
return False
matchstring = '\n'.join(matches)
# maybe not good:
if projpath is False:
projpath = os.path.dirname(os.path.abspath(corpuspath.rstrip('/')))
corpname = os.path.basename(corpuspath)
fp = os.path.join(projpath, 'data', corpname + '-filelist.txt')
if os.path.join('data', 'data') in fp:
fp = fp.replace(os.path.join('data', 'data'), 'data')
with open(fp, "w") as f:
f.write(matchstring + '\n')
return fp
def check_jdk():
"""Check for a Java/OpenJDK"""
import corpkit
import subprocess
from subprocess import PIPE, STDOUT, Popen
# add any other version string to here
javastrings = ['java version "1.8', 'openjdk version "1.8']
p = Popen(["java", "-version"], stdout=PIPE, stderr=PIPE)
_, stderr = p.communicate()
encoded = stderr.decode(encoding='utf-8').lower()
return any(j in encoded for j in javastrings)
def parse_corpus(proj_path=False,
corpuspath=False,
filelist=False,
corenlppath=False,
operations=False,
only_tokenise=False,
root=False,
stdout=False,
nltk_data_path=False,
memory_mb=2000,
copula_head=True,
multiprocessing=False,
**kwargs
):
"""
Create a CoreNLP-parsed and/or NLTK tokenised corpus
"""
import corpkit
import subprocess
from subprocess import PIPE, STDOUT, Popen
from corpkit.process import get_corenlp_path
import os
import sys
import re
import chardet
from time import localtime, strftime
import time
fileparse = kwargs.get('fileparse', False)
url = 'http://nlp.stanford.edu/software/stanford-corenlp-full-2015-12-09.zip'
if not only_tokenise:
if not check_jdk():
print('Need latest Java.')
return
curdir = os.getcwd()
note = kwargs.get('note', False)
if nltk_data_path:
if only_tokenise:
import nltk
if nltk_data_path not in nltk.data.path:
nltk.data.path.append(nltk_data_path)
from nltk import word_tokenize as tokenise
if proj_path is False:
proj_path = os.path.dirname(os.path.abspath(corpuspath.rstrip('/')))
basecp = os.path.basename(corpuspath)
if fileparse:
new_corpus_path = os.path.dirname(corpuspath)
else:
if only_tokenise:
new_corpus_path = os.path.join(proj_path, 'data', '%s-tokenised' % basecp)
else:
new_corpus_path = os.path.join(proj_path, 'data', '%s-parsed' % basecp)
# todo:
# this is not stable
if os.path.join('data', 'data') in new_corpus_path:
new_corpus_path = new_corpus_path.replace(os.path.join('data', 'data'), 'data')
# this caused errors when multiprocessing
# it used to be isdir, but supposedly there was a file there
# i don't see how it's possible ...
if not os.path.exists(new_corpus_path):
os.makedirs(new_corpus_path)
else:
if not os.path.isfile(new_corpus_path):
fs = os.listdir(new_corpus_path)
if not multiprocessing:
if not only_tokenise:
if any([f.endswith('.xml') for f in fs]):
print('Folder containing xml already exists: "%s-parsed"' % basecp)
return False
else:
if any([f.endswith('.p') for f in fs]):
print('Folder containing tokens already exists: "%s-tokenised"' % basecp)
return False
corenlppath = get_corenlp_path(corenlppath)
if not corenlppath:
cnlp_dir = os.path.join(os.path.expanduser("~"), 'corenlp')
corenlppath, fpath = download_large_file(cnlp_dir, url,
root=root,
note=note,
actually_download=True,
custom_corenlp_dir=corenlppath)
if corenlppath is None and fpath is None:
import shutil
shutil.rmtree(new_corpus_path)
shutil.rmtree(new_corpus_path.replace('-parsed', ''))
os.remove(new_corpus_path.replace('-parsed', '-filelist.txt'))
raise ValueError('CoreNLP needed to parse texts.')
extract_cnlp(fpath)
import glob
globpath = os.path.join(corenlppath, 'stanford-corenlp*')
corenlppath = [i for i in glob.glob(globpath) if os.path.isdir(i)]
if corenlppath:
corenlppath = corenlppath[-1]
else:
raise ValueError('CoreNLP installation failed for some reason. Try manual download.')
# if not gui, don't mess with stdout
if stdout is False:
stdout = sys.stdout
if not only_tokenise:
os.chdir(corenlppath)
if root:
root.update_idletasks()
reload(sys)
if memory_mb is False:
memory_mb = 2024
if operations is False:
operations = 'tokenize,ssplit,pos,lemma,parse,ner,dcoref'
if isinstance(operations, list):
operations = ','.join([i.lower() for i in operations])
with open(filelist, 'r') as fo:
dat = fo.read()
num_files_to_parse = len([l for l in dat.splitlines() if l])
# get corenlp version number
reg = re.compile(r'stanford-corenlp-([0-9].[0-9].[0-9])-javadoc.jar')
fver = next(re.search(reg, s).group(1) for s in os.listdir('.') if re.search(reg, s))
if fver == '3.6.0':
extra_jar = 'slf4j-api.jar:slf4j-simple.jar:'
else:
extra_jar = ''
arglist = ['java', '-cp',
'stanford-corenlp-%s.jar:stanford-corenlp-%s-models.jar:xom.jar:joda-time.jar:%sjollyday.jar:ejml-0.23.jar' % (fver, fver, extra_jar),
'-Xmx%sm' % str(memory_mb),
'edu.stanford.nlp.pipeline.StanfordCoreNLP',
'-annotators',
operations,
'-filelist', filelist,
'-noClobber',
'-outputExtension', '.xml',
'-outputDirectory', new_corpus_path]
if copula_head:
arglist.append('--parse.flags')
arglist.append(' -makeCopulaHead')
try:
proc = subprocess.Popen(arglist, stdout=sys.stdout)
# maybe a problem with stdout. sacrifice it if need be
except:
proc = subprocess.Popen(arglist)
#p = TextProgressBar(num_files_to_parse)
while proc.poll() is None:
sys.stdout = stdout
thetime = strftime("%H:%M:%S", localtime())
if not fileparse:
num_parsed = len([f for f in os.listdir(new_corpus_path) if f.endswith('.xml')])
if num_parsed == 0:
if root:
print('%s: Initialising parser ... ' % (thetime))
if num_parsed > 0 and (num_parsed + 1) <= num_files_to_parse:
if root:
print('%s: Parsing file %d/%d ... ' % \
(thetime, num_parsed + 1, num_files_to_parse))
if kwargs.get('note'):
kwargs['note'].progvar.set((num_parsed) * 100.0 / num_files_to_parse)
#p.animate(num_parsed - 1, str(num_parsed) + '/' + str(num_files_to_parse))
time.sleep(1)
if root:
root.update()
else:
from nltk import word_tokenize as tokenise
# tokenise each file
import cPickle as pickle
fs = open(filelist).read().splitlines()
dirs = sorted(list(set([os.path.basename(os.path.dirname(f)) for f in fs])))
one_big_corpus = len(dirs) == 0
if any(os.path.isdir(os.path.join(new_corpus_path, d)) for d in dirs):
thetime = strftime("%H:%M:%S", localtime())
print('%s: Directory already exists. Delete it if need be.' % thetime)
return False
for d in dirs:
os.makedirs(os.path.join(new_corpus_path, d))
nfiles = len(fs)
thetime = strftime("%H:%M:%S", localtime())
print('%s: Tokenising ... ' % (thetime))
for index, f in enumerate(fs):
with open(f, 'r') as fo:
data = fo.read()
enc = chardet.detect(data)
enc_text = data.decode(enc['encoding'], errors='ignore')
tokens = tokenise(enc_text)
thedir = os.path.basename(os.path.dirname(f))
newname = os.path.basename(f).replace('.txt', '-tokenised.p')
if one_big_corpus:
pth = os.path.join(new_corpus_path, newname)
else:
pth = os.path.join(new_corpus_path, thedir, newname)
with open(pth, "wb") as fo:
pickle.dump(tokens, fo)
if kwargs.get('note'):
kwargs['note'].progvar.set((index + 1) * 100.0 / nfiles)
if root:
root.update()
#p.animate(num_files_to_parse)
if kwargs.get('note'):
kwargs['note'].progvar.set(100)
sys.stdout = stdout
thetime = strftime("%H:%M:%S", localtime())
print('%s: Parsing finished. Moving parsed files into place ...' % thetime)
os.chdir(curdir)
return new_corpus_path
def move_parsed_files(proj_path, corpuspath, new_corpus_path):
"""
Make parsed files follow existing corpus structure
"""
import corpkit
import shutil
import os
import fnmatch
cwd = os.getcwd()
basecp = os.path.basename(corpuspath)
dir_list = []
# go through old path, make file list
for path, dirs, files in os.walk(corpuspath):
for bit in dirs:
# is the last bit of the line below windows safe?
dir_list.append(os.path.join(path, bit).replace(corpuspath, '')[1:])
os.chdir(new_corpus_path)
for d in dir_list:
os.makedirs(d)
os.chdir(cwd)
# make list of xml filenames
parsed_fs = [f for f in os.listdir(new_corpus_path) if f.endswith('.xml')]
# make a dictionary of the right paths
pathdict = {}
for rootd, dirnames, filenames in os.walk(corpuspath):
for filename in fnmatch.filter(filenames, '*.txt'):
pathdict[filename] = rootd
# move each file
for f in parsed_fs:
noxml = f.replace('.xml', '')
right_dir = pathdict[noxml].replace(corpuspath, new_corpus_path)
# get rid of the temp adding of dirname to fname
#short_name = f.replace('-%s.txt.xml' % os.path.basename(right_dir), '.txt.xml')
os.rename(os.path.join(new_corpus_path, f),
os.path.join(new_corpus_path, right_dir, f))
return new_corpus_path
def corenlp_exists(corenlppath=False):
import corpkit
import os
important_files = ['stanford-corenlp-3.6.0-javadoc.jar', 'stanford-corenlp-3.6.0-models.jar',
'stanford-corenlp-3.6.0-sources.jar', 'stanford-corenlp-3.6.0.jar']
if corenlppath is False:
home = os.path.expanduser("~")
corenlppath = os.path.join(home, 'corenlp')
if os.path.isdir(corenlppath):
find_install = [d for d in os.listdir(corenlppath) \
if os.path.isdir(os.path.join(corenlppath, d)) \
and os.path.isfile(os.path.join(corenlppath, d, 'jollyday.jar'))]
if len(find_install) > 0:
find_install = find_install[0]
else:
return False
javalib = os.path.join(corenlppath, find_install)
if len(javalib) == 0:
return False
if not any([f.endswith('-models.jar') for f in os.listdir(javalib)]):
return False
return True
else:
return False
return True
def get_filepaths(a_path, ext='txt'):
"""make list of txt files in a_path and remove non txt files"""
import os
files = []
if os.path.isfile(a_path):
return [a_path]
for (root, dirs, fs) in os.walk(a_path):
for f in fs:
if ext:
if not f.endswith('.' + ext):
continue
if 'Unidentified' not in f \
and 'unknown' not in f \
and not f.startswith('.'):
files.append(os.path.join(root, f))
#if ext:
# if not f.endswith('.' + ext):
# os.remove(os.path.join(root, f))
return files
def make_no_id_corpus(pth, newpth):
"""make version of pth without ids"""
import os
import re
import shutil
from build import get_filepaths
# define regex broadly enough to accept timestamps, locations if need be
idregex = re.compile(r'(^.*?):\s+(.*$)')
try:
shutil.copytree(pth, newpth)
except OSError:
shutil.rmtree(newpth)
shutil.copytree(pth, newpth)
files = get_filepaths(newpth)
names = []
for f in files:
good_data = []
with open(f, 'r') as fo:
data = fo.read().splitlines()
for datum in data:
matched = re.search(idregex, datum)
if matched:
names.append(matched.group(1))
good_data.append(matched.group(2))
else:
names.append('UNIDENTIFIED')
good_data.append(datum)
with open(f, "w") as fo:
fo.write('\n'.join(good_data))
from time import localtime, strftime
thetime = strftime("%H:%M:%S", localtime())
if len(names) == 0:
print('%s: No speaker names found. Turn off speaker segmentation.' % thetime)
shutil.rmtree(newpth)
else:
if len(sorted(set(names))) < 19:
print('%s: Speaker names found: %s' % (thetime, ', '.join(sorted(set(names)))))
else:
print('%s: Speaker names found: %s ... ' % (thetime, ', '.join(sorted(set(names[:20])))))
def add_ids_to_xml(corpuspath, root=False, note=False):
"""
Add ids to the xml in corpuspath
needs the raw files to be in the same dir as corpuspath, without
'-parsed' in the dir name
also needs the id files to be in the dir, with '-parsed' changed
to -cleaned
"""
import os
import re
from time import strftime, localtime
from lxml import etree as ET
files = get_filepaths(corpuspath, ext='xml')
if note:
note.progvar.set(0)
thetime = strftime("%H:%M:%S", localtime())
print('%s: Processing speaker IDs ...' % thetime)
if root:
root.update()
for i, f in enumerate(files):
if note:
note.progvar.set(i * 100.0 / len(files))
thetime = strftime("%H:%M:%S", localtime())
print('%s: Processing speaker IDs (%d/%d)' % (thetime, i, len(files)))
if root:
root.update()
# quick check for speakernames already existing
from itertools import islice
with open(f, 'r') as xmlf:
head = list(islice(xmlf, 1000))
if '<speakername>' in '\n'.join(head):
continue
tree = ET.parse(f)
xmlroot = tree.getroot()
sents = xmlroot[0][0]
# open the unparsed version of the file, read into memory
stripped_txtfile = f.replace('.xml', '').replace('-parsed', '')
old_txt = open(stripped_txtfile, 'r')
stripped_txtdata = old_txt.read()
old_txt.close()
# open the unparsed version with speaker ids
id_txtfile = f.replace('.xml', '').replace('-stripped-parsed', '')
idttxt = open(id_txtfile, 'r')
id_txtdata = idttxt.read()
idttxt.close()
for s in sents:
# don't get corefs
tokens = [x for x in s.iter('token')]
start = int(tokens[0][2].text)
end = int(tokens[-1][3].text)
# extract this sentence from the unparsed version
sent = stripped_txtdata[start:end]
# find out line number
# sever at start of match
cut_old_text = stripped_txtdata[:start]
line_index = cut_old_text.count('\n')
# lookup this text
with_id = id_txtdata.splitlines()[line_index]
split_line = with_id.split(': ', 1)
if len(split_line) > 1:
speakerid = split_line[0]
else:
speakerid = 'UNIDENTIFIED'
newtag = ET.Element('speakername')
newtag.text = speakerid
newtag.tail = '\n '
s.append(newtag)
tree.write(f, pretty_print=True)
# make changes
#with open(f, "wb") as fopen:
# try:
# fopen.write(bytes(html.encode('utf-8')))
# except UnicodeDecodeError:
# fopen.write(bytes(html))
if note:
note.progvar.set(100)
def get_speaker_names_from_xml_corpus(path):
"""
Use regex to get speaker names from xml without parsing it
"""
import os
import re
list_of_files = []
names = []
# parsing html with regular expression! :)
speakid = re.compile(r'<speakername>[\s\n]*?([^\s\n]+)[\s\n]*?<.speakername>', re.MULTILINE)
def get_names(filepath):
"""get a list of speaker names from a file"""
with open(filepath, 'r') as fo:
txt = fo.read()
res = re.findall(speakid, txt)
if res:
return list(sorted(set([i.strip() for i in res])))
# if passed a dir, do it for every file
if os.path.isdir(path):
for (root, dirs, fs) in os.walk(path):
for f in fs:
list_of_files.append(os.path.join(root, f))
elif os.path.isfile(path):
list_of_files.append(path)
for filepath in list_of_files:
res = get_names(filepath)
if not res:
continue
for i in res:
if i not in names:
names.append(i)
return list(sorted(set(names)))
def rename_all_files(dirs_to_do):
"""get rid of the inserted dirname in filenames after parsing"""
import os
if isinstance(dirs_to_do, STRINGTYPE):
dirs_to_do = [dirs_to_do]
for d in dirs_to_do:
if d.endswith('-parsed'):
ext = 'txt.xml'
elif d.endswith('-tokenised'):
ext = '.p'
else:
ext = '.txt'
fs = get_filepaths(d, ext)
for f in fs:
fname = os.path.basename(f)
justdir = os.path.dirname(f)
subcorpus = os.path.basename(justdir)
newname = fname.replace('-%s.%s' % (subcorpus, ext), '.%s' % ext)
os.rename(f, os.path.join(justdir, newname))
def flatten_treestring(tree):
import re
tree = re.sub(r'\(.*? ', '', tree).replace(')', '')
tree = tree.replace('$ ', '$').replace('`` ', '``').replace(' ,', ',').replace(' .', '.').replace("'' ", "''").replace(" n't", "n't").replace(" 're","'re").replace(" 'm","'m").replace(" 's","'s").replace(" 'd","'d").replace(" 'll","'ll").replace(' ', ' ')
return tree
def can_folderise(folder):
"""
Check if corpus can be put into folders
"""
import os
from glob import glob
if os.path.isfile(folder):
return False
fs = glob(os.path.join(folder, '*.txt'))
if len(fs) > 1:
if not any(os.path.isdir(x) for x in glob(os.path.join(folder, '*'))):
return True
return False
def folderise(folder):
"""
Move each file into a folder
"""
import os
import shutil
from glob import glob
from corpkit.process import makesafe
fs = glob(os.path.join(folder, '*.txt'))
for f in fs:
newname = makesafe(os.path.splitext(os.path.basename(f))[0])
newpath = os.path.join(folder, newname)
if not os.path.exists(newpath):
os.makedirs(newpath)
shutil.move(f, os.path.join(newpath))
|
# Every email consists of a local name and a domain name,
# separated by the @ sign.
#
# For example, in alice@leetcode.com, alice is the local name,
# and leetcode.com is the domain name.
#
# Besides lowercase letters, these emails may contain '.'s or '+'s.
#
# If you add periods ('.') between some characters in the local name part
# of an email address, mail sent there will be forwarded to the same
# address without dots in the local name.
# For example, "alice.z@leetcode.com" and "alicez@leetcode.com" forward to
# the same email address.
# (Note that this rule does not apply for domain names.)
#
# If you add a plus ('+') in the local name, everything
# after the first plus sign will be ignored.
# This allows certain emails to be filtered,
# for example m.y+name@email.com will be forwarded to my@email.com.
# (Again, this rule does not apply for domain names.)
#
# It is possible to use both of these rules at the same time.
#
# Given a list of emails, we send one email to each address in the list.
# How many different addresses actually receive mails?
class Solution:
def numUniqueEmails(self, emails):
eqmail = ""
res = []
for email in emails:
eqmail = email.split("@")[0].split("+")[0]
eqmail = "".join(eqmail.split("."))
res.append(eqmail + "@" + email.split("@")[1])
return len(set(res))
if __name__ == "__main__":
testinput = ["test.email+alex@leetcode.com",
"test.e.mail+bob.cathy@leetcode.com",
"testemail+david@lee.tcode.com"]
print(Solution.numUniqueEmails(Solution, testinput))
|
import os
from glob import glob
import numpy as np
from tqdm import tqdm
from PIL import Image
# skimage
from skimage import io, color
import skimage.transform as sktrsfm
from sklearn.metrics import precision_score, recall_score
# Pytorch
import torch
import torch.nn.functional as F
from torch.utils import data
import torchvision
from torchvision import transforms
from random import randrange
# for parsing XML image annotations
import xml.etree.ElementTree as ET
# debug
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def xml_to_numpy(anno_path):
anno_names = sorted(glob(os.path.join(anno_path, '*.xml')))
for ca in tqdm(anno_names):
root = ET.parse(ca).getroot().find('object').find('bndbox')
bb = np.zeros(4)
bb[0] = int(root.find('xmin').text)
bb[1] = int(root.find('ymin').text)
bb[2] = int(root.find('xmax').text)
bb[3] = int(root.find('ymax').text)
bb = bb.astype('int32')
np.save(os.path.splitext(ca)[0]+'.npy', bb)
print('done!')
class CIVDS(data.Dataset): # CIVDS: Children in Vehicles Dataset
# img: children image
# prob: whether the image contains a child or not
# bbox: bounding box of the child in the image
def __init__(self, img, b_prob, b_box, f_prob, f_box, trsfm=None):
self.img = img
self.b_prob = b_prob
self.b_box = b_box
self.f_prob = f_prob
self.f_box = f_box
self.transforms = trsfm
def __len__(self):
return len(self.img)
def __getitem__(self, idx):
img, b_prob, b_box, f_prob, f_box = self.img[idx], self.b_prob[idx], self.b_box[idx], self.f_prob[idx], self.f_box[idx]
if self.transforms is not None:
img = self.transforms(img)
return img, torch.tensor(b_prob), torch.tensor(b_box), torch.tensor(f_prob), torch.tensor(f_box)
class CIVDS_bnet(data.Dataset): # CIVDS: Children in Vehicles Dataset
# img: children image
# prob: whether the image contains a child or not
# bbox: bounding box of the child in the image
def __init__(self, img, b_prob, b_box, trsfm=None):
self.img = img
self.b_prob = b_prob
self.b_box = b_box
self.transforms = trsfm
def __len__(self):
return len(self.img)
def __getitem__(self, idx):
img, b_prob, b_box = self.img[idx], self.b_prob[idx], self.b_box[idx]
if self.transforms is not None:
img = self.transforms(img)
return img, torch.tensor(b_prob), torch.tensor(b_box)
def bbox_intersect(a, b):
# get the intersection of bounding boxes a, b
return np.array([max(a[0],b[0]), max(a[1],b[1]), min(a[2],b[2]), min(a[3],b[3])], dtype='int32')
def valid_bbox(a):
if a[0] > a[2] or a[1] > a[3]:
return False
return True
def get_iou(a, b, eps=1e-5):
x1, y1, x2, y2 = bbox_intersect(a, b)
width = (x2 - x1)
height = (y2 - y1)
if (width<0) or (height <0):
return 0.0
area_overlap = width * height
area_a = (a[2] - a[0]) * (a[3] - a[1])
area_b = (b[2] - b[0]) * (b[3] - b[1])
area_combined = area_a + area_b - area_overlap
iou = area_overlap / (area_combined+eps)
return iou
def upd_bbox(oboxa, niba, t_size):
# updates the coordinates of a bounding box when the original image is cropped
# assumes that nib is a square bbox
obox, nib = oboxa.copy(), niba.copy()
scl = t_size / (nib[2] - nib[0])
obox = bbox_intersect(obox, nib)
assert valid_bbox(obox)
obox[0] -= nib[0]
obox[1] -= nib[1]
obox[2] -= nib[0]
obox[3] -= nib[1]
return np.array([int(round(k*scl)) for k in obox], dtype='int32')
def get_images(img_path, anno_path, valid_percent=0.1, resize_shape=(12, 12),
add_orig=True, gen_xtra_neg=True, gen_xtra_body=True, gen_xtra_face=True):
scale = resize_shape[0] / 400 # assuming that original image size is 400x400
# get image file & annotation names
file_names = sorted(glob(os.path.join(img_path, '*.jpg')))
anno_names = sorted(glob(os.path.join(anno_path, '*.npy')))
anno_names.extend([''] * (len(file_names) - len(anno_names))) # match length
imgs, b_probs, b_boxes, f_probs, f_boxes = [], [], [], [], []
dbg_i = -1 # debugging index
for cur_file, cur_anno in tqdm(zip(file_names, anno_names), total=len(file_names)):
dbg_i += 1
img = io.imread(cur_file)
if img.shape[-1] == 1:
img = color.grey2rgb(img)
img = torchvision.transforms.ToPILImage()(img.astype(np.uint8))
age = os.path.splitext(os.path.basename(cur_file))[0].split('_')[0]
# add original (raw) image
if age == 'n': # no child in image
if add_orig:
b_probs.append(np.array([0, 1], dtype='float32'))
b_boxes.append(np.array([0,0,0,0], dtype='int32'))
f_probs.append(np.array([0, 1], dtype='float32'))
f_boxes.append(np.array([0,0,0,0], dtype='int32'))
else:
o = np.load(cur_anno)
o_bb, o_fb = o[0], o[1]
if add_orig:
b_probs.append(np.array([1, 0], dtype='float32'))
b_boxes.append(np.array([int(round(k*scale)) for k in o_bb], dtype='int32'))
if (o_fb == np.array([0,0,0,0])).all(): # no face
f_probs.append(np.array([0, 1], dtype='float32'))
else:
f_probs.append(np.array([1, 0], dtype='float32'))
f_boxes.append(np.array([int(round(k*scale)) for k in o_fb], dtype='int32'))
if add_orig:
imgs.append(img.resize(resize_shape, resample=Image.LANCZOS))
# generate additional negative samples
if gen_xtra_neg:
ct_size = 100
while ct_size >= 12:
mmax = 400 - ct_size
for i in range(0, 1):
cx, cy = 0, 0
while True:
cx, cy = randrange(mmax), randrange(mmax)
nbox = [cx, cy, cx+ct_size, cy+ct_size]
if (get_iou(nbox, o_bb) < 0.10) and (get_iou(nbox, o_fb) < 0.05):
break
imgs.append(img.crop(nbox).resize(resize_shape, resample=Image.LANCZOS))
b_probs.append(np.array([0, 1], dtype='float32'))
b_boxes.append(np.array([0,0,0,0], dtype='int32'))
f_probs.append(np.array([0, 1], dtype='float32'))
f_boxes.append(np.array([0,0,0,0], dtype='int32'))
ct_size = int(ct_size * 0.709)
if age == 'n':
continue
# generate additional positive body samples
if gen_xtra_body:
ct_size = 300
while ct_size >= 200:
mmax = 400 - ct_size
for i in range(0, 3):
cx, cy, nbox = 0, 0, []
trycnt, success = 0, False
while trycnt < 20:
cx, cy = randrange(mmax), randrange(mmax)
nbox = [cx, cy, cx+ct_size, cy+ct_size]
c_iou = get_iou(nbox, o_bb)
if 0.1 <= c_iou < 0.2:
b_probs.append(np.array([0.2, 0], dtype='float32'))
success = True
break
elif 0.2 <= c_iou < 0.4:
b_probs.append(np.array([0.5, 0], dtype='float32'))
success = True
break
elif 0.4 <= c_iou < 0.7:
b_probs.append(np.array([0.8, 0], dtype='float32'))
success = True
break
elif 0.7 <= c_iou:
b_probs.append(np.array([1, 0], dtype='float32'))
success = True
break
trycnt += 1
if success:
imgs.append(img.crop(nbox).resize(resize_shape, resample=Image.LANCZOS))
b_boxes.append(upd_bbox(o_bb, nbox, resize_shape[0]))
# add face annotation if face is present as well
if (o_fb == np.array([0,0,0,0])).all(): # no face
f_probs.append(np.array([0, 1], dtype='float32'))
f_boxes.append(np.array([0,0,0,0], dtype='int32'))
else:
nfb = bbox_intersect(o_fb, nbox)
if valid_bbox(nfb):
nfb_iou = get_iou(nfb, o_fb)
if nfb_iou < 0.35: # low confidence partial face
f_probs.append(np.array([0.5, 0], dtype='float32'))
elif 0.35 <= nfb_iou < 0.5: # mid confidence partial face
f_probs.append(np.array([0.8, 0], dtype='float32'))
else: # pretty much full-face
f_probs.append(np.array([0.99, 0], dtype='float32'))
f_boxes.append(upd_bbox(nfb, nbox, resize_shape[0]))
else:
f_probs.append(np.array([0, 1], dtype='float32'))
f_boxes.append(np.array([0,0,0,0], dtype='int32'))
else:
print("failed to select a body with appropriate IOU!")
assert False
"""
# debug code
fig,ax = plt.subplots(1)
tnb = upd_bbox(o_bb, nbox, nbox[2]-nbox[0])
rect = patches.Rectangle(
(tnb[0], tnb[1]),
tnb[2]-tnb[0], tnb[3]-tnb[1],
linewidth=2, edgecolor='r', facecolor='none'
)
ax.add_patch(rect)
if not (o_fb == np.array([0,0,0,0])).all():
tnb = upd_bbox(nfb, nbox, nbox[2]-nbox[0])
rect = patches.Rectangle(
(tnb[0], tnb[1]),
tnb[2]-tnb[0], tnb[3]-tnb[1],
linewidth=2, edgecolor='g', facecolor='none'
)
ax.add_patch(rect)
ax.imshow(img.crop(nbox))
plt.show()
"""
ct_size = int(ct_size * 0.95)
# generate positive face samples
badcnt = 0
if gen_xtra_face:
if (o_fb == np.array([0,0,0,0])).all(): # no face
continue
ct_size = 250
while ct_size >= 150:
lrx = o_fb[2] - ct_size
lry = o_fb[3] - ct_size
for i in range(0, 1):
if lrx >= o_fb[0] or lry >= o_fb[1]:
"""
print("### ERROR ###")
print("id: {}".format(dbg_i))
print(os.path.basename(cur_file))
print("o_fb: {}".format(o_fb))
plt.imshow(img)
plt.show()
"""
badcnt += 1
break
cx, cy = randrange(lrx, o_fb[0]), randrange(lry, o_fb[1])
nbox = [cx, cy, cx+ct_size, cy+ct_size]
f_probs.append(np.array([1, 0], dtype='float32'))
imgs.append(img.crop(nbox).resize(resize_shape, resample=Image.LANCZOS))
f_boxes.append(upd_bbox(o_fb, nbox, resize_shape[0]))
# add partial body annotation if applicable
nbb = bbox_intersect(o_bb, nbox)
if valid_bbox(nbb):
nbb_iou = get_iou(nbb, o_bb)
if nbb_iou < 0.35: # low confidence partial body
b_probs.append(np.array([0.5, 0], dtype='float32'))
elif 0.35 <= nbb_iou < 0.5: # mid confidence partial body
b_probs.append(np.array([0.8, 0], dtype='float32'))
else: # pretty much full-body
b_probs.append(np.array([0.99, 0], dtype='float32'))
b_boxes.append(upd_bbox(nbb, nbox, resize_shape[0]))
else:
b_probs.append(np.array([0, 1], dtype='float32'))
b_boxes.append(np.array([0,0,0,0], dtype='int32'))
ct_size = int(ct_size * 0.9)
print("total badcnt: {}".format(badcnt))
rand_idx = np.random.permutation(np.arange(len(imgs)))
imgs = [imgs[a] for a in rand_idx]
b_probs = [b_probs[a] for a in rand_idx]
b_boxes = [b_boxes[a] for a in rand_idx]
f_probs = [f_probs[a] for a in rand_idx]
f_boxes = [f_boxes[a] for a in rand_idx]
vn = int(np.floor(len(imgs) * valid_percent))
return imgs[vn:], b_probs[vn:], b_boxes[vn:], f_probs[vn:], f_boxes[vn:], imgs[:vn], b_probs[:vn], b_boxes[:vn], f_probs[:vn], f_boxes[:vn]
def f1_score(truth, pred, eval_class):
def binarize(l, ref):
l_new = np.zeros_like(l)
for cnt in range(len(l)):
if l[cnt] == ref:
l_new[cnt] = 1
else:
l_new[cnt] = 0
return l_new
truth = binarize(truth, eval_class)
pred = binarize(pred, eval_class)
precision = precision_score(truth, pred)
recall = recall_score(truth, pred)
f1 = 2 * (precision * recall) / (precision + recall)
return precision, recall, f1
|
import sys
import io
from pathlib import Path
import requests
import numpy as np
from astropy.table import Table
from astropy.io import fits
import astropy.units as u
import astropy.coordinates as coord
SIA_URL = 'https://irsa.ipac.caltech.edu/SIA'
sia_params = {
'COLLECTION': 'wise_allwise',
'RESPONSEFORMAT': 'VOTABLE',
'FORMAT': 'image/fits',
'POS': 'circle $RA $DEC 0.0',
}
SOURCE_DIR = Path('OB/Kobulnicky2016')
source_table = Table.read(
str(SOURCE_DIR / 'table1.dat'),
format='ascii.cds',
readme=str(SOURCE_DIR / 'ReadMe')
)
OUTPUT_IMAGE_DIR = Path('OB/WISE')
OUTPUT_IMAGE_DIR.mkdir(exist_ok=True)
BASE_IMAGE_SIZE_ARCMIN = 8.0
def skycoord_from_table_row(data):
ra = f"{data['RAh']} {data['RAm']} {data['RAs']}"
dec = f"{data['DE-']}{data['DEd']} {data['DEm']} {data['DEs']}"
return coord.SkyCoord(f'{ra} {dec}', unit=(u.hourangle, u.deg))
try:
k1 = int(sys.argv[1])
except:
k1 = 1
try:
k2 = int(sys.argv[2])
except:
k2 = None
# Loop over all sources in the table
for source_data in source_table[k1-1:k2]:
print(source_data["Seq", "Name", "Alias", "R0"])
# Make a SkyCoord object
c = skycoord_from_table_row(source_data)
sia_params['POS'] = f"circle {c.to_string()} 0.0"
# Perform a search around the specified coordinates
r = requests.get(SIA_URL, params=sia_params)
tab = Table.read(io.BytesIO(r.content), format='votable')
# Expand the image size for bigger bows
expand = 1.0
for threshold in 40.0, 80.0, 160.0:
if source_data["R0"] > threshold:
expand *= 2
image_size = BASE_IMAGE_SIZE_ARCMIN*expand
image_params = {
"center": f"{c.ra.deg:.4f},{c.dec.deg:.4f}",
"size": f"{image_size}, {image_size} arcmin",
"gzip": 0,
}
# Now fetch images in each band
for data in tab:
print(
f"Fetching image ({image_size} arcmin square) from",
data['access_url'].decode(),
)
r = requests.get(data['access_url'], params=image_params)
hdulist = fits.open(io.BytesIO(r.content))
# Get name of WISE bandpass as a unicode string
bpname = data['energy_bandpassname'].decode()
hdulist.writeto(
OUTPUT_IMAGE_DIR / f"{source_data['Seq']:04d}-{bpname}.fits",
overwrite=True,
)
|
class Mapper():
def __init__(self, dynamo_client):
self.dynamo_client = dynamo_client
def map(self, data):
raise NotImplementedError("Should have implemented this")
class Reducer():
def __init__(self, dynamo_client):
self.dynamo_client = dynamo_client
def reduce(self, data):
raise NotImplementedError("Should have implemented this")
def output(self):
raise NotImplementedError("Should have implemented this")
|
# 给定一个非空整数数组,除了某个元素只出现一次以外,其余每个元素均出现两次。找出那个只出现了一次的元素。
#
# 说明:
#
# 你的算法应该具有线性时间复杂度。 你可以不使用额外空间来实现吗?
#
# 示例 1:
#
# 输入: [2,2,1]
# 输出: 1
#
#
# 示例 2:
#
# 输入: [4,1,2,1,2]
# 输出: 4
# Related Topics 位运算 哈希表
# 👍 1584 👎 0
from typing import List
from functools import reduce
# 这个数字与0异或运算以后还是自己, 然后数字与自己运算以后就变成0, 想要
# 不占用额外的空间 就只能使用位运算了, 这个想法简直了,
# a^0 =0 a^a = 0 a^b^a = a^a^b =0^b =b 简直了。
# public int singleNumber(int[] nums) {
# int single = 0;
# for (int num : nums) {
# single ^= num;
# }
# return single;
# }
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def singleNumber(self, nums: List[int]) -> int:
return reduce(lambda x, y: x ^ y, nums)
# leetcode submit region end(Prohibit modification and deletion)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# type: ignore
import codecs
import os.path
import subprocess
from pathlib import Path
import setuptools
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), "r") as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
def parse_dependency(filepath):
dep_list = []
for dep in open(filepath).read().splitlines():
if dep.startswith("#"):
continue
key = "#egg="
if key in dep:
git_link, egg_name = dep.split(key)
dep = f"{egg_name} @ {git_link}"
dep_list.append(dep)
return dep_list
base_requirements = parse_dependency("requirements/base.txt")
dev_requirements = base_requirements + parse_dependency("requirements/dev.txt")
extras_require = {}
for setup_path in Path("mtenv/envs").glob("**/setup.py"):
env_path = setup_path.parent
env_name = (
subprocess.run(["python", setup_path, "--name"], stdout=subprocess.PIPE)
.stdout.decode()
.strip()
)
extras_require[env_name] = base_requirements + parse_dependency(
f"{str(env_path)}/requirements.txt"
)
extras_require["all"] = list(
set([dep for requirements in extras_require.values() for dep in requirements])
)
extras_require["dev"] = dev_requirements
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mtenv",
version=get_version("mtenv/__init__.py"),
author="Shagun Sodhani, Ludovic Denoyer, Pierre-Alexandre Kamienny, Olivier Delalleau",
author_email="sshagunsodhani@gmail.com, denoyer@fb.com, pakamienny@fb.com, odelalleau@fb.com",
description="MTEnv: MultiTask Environments for Reinforcement Learning",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
install_requires=base_requirements,
url="https://github.com/facbookresearch/mtenv",
packages=setuptools.find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests", "docs", "docsrc"]
),
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
extras_require=extras_require,
)
|
while True:
lista = input().split(' ')
n = int(lista[0])
b = int(lista[1])
aux = 1
count = 0
def fib(n):
global count
count += 1
if n < 2:
return n
else:
return (fib(n-1)+fib(n-2))%b
if n != 0 and b != 0:
fib(n)
print(f'Case {aux}: {n} {b} {count}')
aux += 1
else:
break
|
from .models import address
def handle_uploaded_file(file){
for row in file:
newItem = address(email=row[0], name=row[1])
newItem.save()
#created = address.objects.bulk_create(email=row[0],name=row[1])
}
|
import module
print("Adding the name variable from naming.py") # This should be run when the script is imported
# as __name__ = "__name__" ins't done
name = input("What's your name? ")
|
# Bài 06: Viết chương trình lấy ra các phần tử key-value xuất hiện trong cả 2 dict
dict1 = {
1: 1,
2 : 2,
3 : 'Name',
4:4
}
dict2 = {
2: 1,
1 : 2,
3 : 'Name',
4 :4
}
dict1 = list(dict1.items())
dict2 = list(dict2.items())
for i in dict1 :
if i in dict2 :
print(i)
|
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
from pirates.piratesbase import TODDefs
from pirates.piratesbase import TODGlobals
from direct.distributed.ClockDelta import *
class DistributedTimeOfDayManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedTimeOfDayManagerAI')
def __init__(self, air, isPaused, isJolly):
DistributedObjectAI.__init__(self, air)
self.isPaused = isPaused
self.subList = []
self.isJolly = isJolly
self.tod = [0, 0, 0, 0]
def generate(self):
DistributedObjectAI.generate(self)
def announceGenerate(self):
DistributedObjectAI.announceGenerate(self)
def setIsPaused(self, isPaused):
self.isPaused = isPaused
def getIsPaused(self):
return self.isPaused
def d_syncTOD(self, cycleType, cycleSpeed, startingNetTime, timeOffset):
self.tod = [cycleType, cycleSpeed, startingNetTime, timeOffset]
self.sendUpdate('syncTOD', [cycleType,
cycleSpeed,
startingNetTime,
timeOffset])
def getSyncTOD(self):
return self.tod
def requestSync(self): #TODO: cycle type, check for halloween and jolly!
self.d_syncTOD(cycleType=TODGlobals.TOD_REGULAR_CYCLE, cycleSpeed=1, startingNetTime=globalClockDelta.getRealNetworkTime(bits=32), timeOffset=0)
def setEnvSubs(self, subList):
self.subList = subList
def getEnvSubs(self):
return self.subList
def setMoonPhaseChange(self, fromCurrent, startPhase, targetPhase, targetTime): #TODO!
pass
def getMoonPhaseChange(self):
return [0, 0, 0, 0]
def setMoonJolly(self, isJolly):
self.isJolly = isJolly
def d_setMoonJolly(self, isJolly):
self.sendUpdate('setMoonJolly', [
isJolly])
def b_setMoonJolly(self, isJolly):
self.setMoonJolly(isJolly)
self.d_setMoonJolly(isJolly)
def getMoonJolly(self):
return self.isJolly
|
"""
My implementation of the Conway's game of life in python
Ref: https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life
Rules
Any live cell with fewer than two live neighbours dies, as if by underpopulation.
Any live cell with two or three live neighbours lives on to the next generation.
Any live cell with more than three live neighbours dies, as if by overpopulation.
Any dead cell with three live neighbours becomes a live cell, as if by reproduction.
The game plan for this code is as follows:
Create a copy of the inputted board
loop through all the cells on the board
determine whether they are alive or dead
count the number of neighbours that are alive
with the count apply the rules to know whether the current state stays the same or changes
reflect any changes in the new board
return the new board
"""
# representation of the board
# list of 4 lists - sets could be used as well
# random initial state
# ths state will represent a generation
# variable to allow for situations when the board is larger than 4X4
cells_in_squares = 4 # it could be any number
# the state of the cell is represented by 0s and 1s
DEAD = 0
ALIVE = 1
# find cells that are alive cells that have 1 are alive
# cycle through each cell turning it on or off depending on the rules
def return_next_generation(bdlist):
""" this function returns another list which represents the next generation after
processing the original board"""
""" Unit tests need to be carried out to ensure code doesnt fall over unexpectedly
1. that the input passed in is a list
2. that the input is of the required dimensions
3. that the input values consist of only 0s and 1s - no alphabets, complex numbers etc
The first test is done below but ideally this would have been done in a separate unit test
file
"""
# checking to see that the tight structure is passed in
if type(bdlist) != list:
raise TypeError("Input must be a list")
#
# copy the original board
nextboard = bdlist.copy()
# alivemsg = "{},{} is alive"
# deadmsg = "{},{} is dead"
# loop to find out the state of the cells
for x in range(4):
for y in range(4):
ct = neighbours_alive(bdlist, x, y)
nextboard[x][y] = applygamerules(bdlist[x][y], ct)
return nextboard
def neighbours_alive(bd, x, y):
""" return a count of neighbours alive
we have a maximum of 8 cell surrounding each cell
leftcell, rightcell, upcell, downcell, diagleftupcell, diagleftdowncell diagrightupcell diagrightdowncell
"""
count = 0
# print(x,y)
# we need to allow for corner cells
for i in range(x-1, x+2):
for j in range(y-1, y+2):
# we are using this function because this function checks for existence of the cell
ret = return_cell_state(bd, i, j)
if ret == ALIVE:
# we are only counting the cell that are alive
count += 1
# had to make this adjustment
if return_cell_state(bd, x, y) == ALIVE:
count -= 1
return count
def return_cell_state(bd, x, y):
"""this function returns the state of the cell
if it does not exist it returns -1
we have to check to see that the cell exists. For example
we cant have a cell with x or y value -1
"""
if x < 0 or x > cells_in_squares-1 or y < 0 or y > cells_in_squares - 1:
return -1
else:
# cell exists
return bd[x][y]
def applygamerules(state, numberalive):
"""this function applies the game rules to the cell. It takes the current state of the cell and number
of current cells that are ALIVE"""
if state == ALIVE:
if numberalive < 2:
return DEAD
elif numberalive == 2 or numberalive == 3:
return ALIVE
elif numberalive > 3:
return DEAD
else:
# means the cell is dead
if numberalive == 3:
return ALIVE
else:
return DEAD
def list_board_status(bdlist):
"""This function is not used but was used for debugging purposes. It loops through all the
cells on the board and show their status"""
alivemsg = "{},{} is alive"
deadmsg = "{},{} is dead"
# loop to find out the state of the cells
for x in range(4):
for y in range(4):
if bdlist[x][y] == ALIVE:
print(alivemsg.format(x, y))
else:
print(deadmsg.format(x, y))
def compare_boards(bd1, bd2, rowcolsize):
"""
:param bd1: list of lists representing the first board
:param bd2: list of lists representing the second board
:param rowcolsize: size of the board
:return: True or False
bd1 and bd2 must be the same size i.e board must be the same dimensions
2 loops comparing the contents of two list of lists. It
it returns false mid loop if it finds a difference or if it
doesnt find a difference returns True
"""
for x in range(rowcolsize):
for y in range(rowcolsize):
if bd1[x][y] != bd2[x][y]:
return False
return True
# carry out a run using the sample board
if __name__ == "__main__":
# This function will print out the original board and the next generation after processing
board = [[1, 0, 0, 0],
[1, 0, 0, 1],
[0, 1, 1, 0],
[0, 0, 0, 1]]
print("Original board is :", board)
# define a counter for a loop
x = 0
# build a loop with the new generated board as input
# original board is printed once but all all the new generations are printed
for x in range(51):
# x-1 iterations
board = return_next_generation(board)
print(board)
print("End of simulation")
"""
At this stage instead of just printing the newly generated board output it can
be connected to a plotting engine to show the changes. The board could be designed
like a chess board sized 4 x 4. One color for the dead cells and another for the cells are alive
So in summary the outstanding work is as follows
1. find a logical test on the board to end the simulations for example if the new board is the same as the
one that went in stop the loop and end the program
2. Plot the board to produce a visual representation
3. code a random starting board position
4. unit tests for the program logic
"""
|
"""
2010.8.3 (onetail) : modify EPL_USB constructor and add variable self.isConnect
"""
import os
import usb1
import sys
import binascii
class DeviceDescriptor(object) :
def __init__(self, vendor_id, product_id, interface_id) :
self.vendor_id = vendor_id
self.product_id = product_id
self.interface_id = interface_id
def getDevice(self) :
"""
Return the device corresponding to the device descriptor if it is
available on a USB bus. Otherwise, return None. Note that the
returned device has yet to be claimed or opened.
"""
buses = usb.busses()
for bus in buses :
print "bus length = %d" % len(bus.devices)
print "Devices = ",
print bus.devices
for device in bus.devices :
print "Device VID = %0X" % device.idVendor
print "Device PID = %0X" % device.idProduct
if device.idVendor == self.vendor_id :
if device.idProduct == self.product_id :
return device
return None
def getDevices(self) :
"""
Return the device corresponding to the device descriptor if it is
available on a USB bus. Otherwise, return None. Note that the
returned device has yet to be claimed or opened.
"""
self.devices = []
buses = usb.busses()
for bus in buses :
#print "bus length = %d" % len(bus.devices)
#print "Devices = ",
#print bus.devices
for device in bus.devices :
#print "Device VID = %0X" % device.idVendor
#print "Device PID = %0X" % device.idProduct
if device.idVendor == self.vendor_id and device.idProduct == self.product_id:
self.devices.append(device)
return self.devices
class PlugUSBDevice_old(object) :
PLUG_VENDOR_ID = 0x1915
PLUG_PRODUCT_ID = 0x5677
PLUG_INTERFACE_ID = 0
PLUG_BULK_IN_EP = 0x81
PLUG_BULK_OUT_EP = 0x01
def __init__(self) :
self.device_descriptor = DeviceDescriptor(PlugUSBDevice.PLUG_VENDOR_ID,
PlugUSBDevice.PLUG_PRODUCT_ID,
PlugUSBDevice.PLUG_INTERFACE_ID)
self.devices = self.device_descriptor.getDevices()
self.handle = None
self.device = None
def open(self) :
#self.device = self.device_descriptor.getDevice()
#print self.devices
for self.device in self.devices:
self.handle = self.device.open()
if os.name != 'posix' :
self.handle.setConfiguration(1)
#print "a"
else :
self.handle.reset()
#print "b"
try:
self.handle.claimInterface(self.device_descriptor.interface_id)
return
except:
pass
def close(self) :
#print "release usb interface"
self.handle.releaseInterface()
def getDataPacket(self, nbytesToGet):
return self.handle.bulkRead(PlugUSBDevice.PLUG_BULK_IN_EP,nbytesToGet,2500)
def putDataPacket(self, dataToPut):
return self.handle.bulkWrite(PlugUSBDevice.PLUG_BULK_OUT_EP,dataToPut,-1)
class PlugUSBDevice(object):
PLUG_VENDOR_ID = 0x1915
PLUG_PRODUCT_ID = 0x5677
PLUG_CONFIGURATION = 1
PLUG_INTERFACE_ID = 0
PLUG_BULK_IN_EP = 0x81
PLUG_BULK_OUT_EP = 0x01
def __init__(self):
self.device = None
self.handle = None
try:
self.context = usb1.USBContext()
except:
print "Could not create extra USB context"
return None
self.device = self.context.getByVendorIDAndProductID(PlugUSBDevice.PLUG_VENDOR_ID, PlugUSBDevice.PLUG_PRODUCT_ID, skip_on_error=True)
def open(self):
if not self.device:
print "No USB device has been choosen"
return
try:
self.handle = self.device.open()
except:
print "open USB device failed"
return
try:
self.handle.resetDevice()
# claim configuration and interface
self.handle.setConfiguration(PlugUSBDevice.PLUG_CONFIGURATION)
self.handle.claimInterface(PlugUSBDevice.PLUG_INTERFACE_ID)
except Exception as e:
print e
return
def close(self):
try:
# self.handle.releaseInterface(PlugUSBDevice.PLUG_INTERFACE_ID)
self.handle.close()
self.context.exit()
except Exception as e:
print e
print "USB Device close error"
def getDataPacket(self, nbytesToGet):
return self.handle.bulkRead(PlugUSBDevice.PLUG_BULK_IN_EP, nbytesToGet, 2500)
def putDataPacket(self, dataToPut):
return self.handle.bulkWrite(PlugUSBDevice.PLUG_BULK_OUT_EP, dataToPut, timeout=-1)
class EPL_USB:
def __init__ (self):
self.debug = False
self.usb = PlugUSBDevice()
self.isConnected = False
if self.usb.device == None:
print "USB Device Not Found"
return None
else:
self.isConnected = True
self.usb.open()
print "USB Device Connected"
#close connection
def close (self):
if self.isConnected:
print "USB closed"
self.usb.close()
self.isConnected = False
#send cmd
def sendCmd(self,cmd):
if self.isConnected:
if self.debug:
print "USB_LIB SEND CMD: " + cmd
bdata = binascii.unhexlify(cmd)
self.usb.putDataPacket(bdata)
#send cmd
def sendData(self,data):
if self.isConnected:
self.usb.putDataPacket(data)
#convert tuple to string
def t2s(self,v):
z = ""
for item in v:
z += chr(item)
return z
def recvCmd(self,nbytesToGet):
if self.isConnected:
return self.usb.getDataPacket(nbytesToGet)
if __name__ == "__main__":
dongle = EPL_USB()
if dongle:
dongle.close()
|
import numpy as np
import pandas as pd
import time
import numba as nb
import scipy.sparse as sparse
from inference_model import MeanField, DynamicMessagePassing
from sir_model import frequency, indicator
from scipy.sparse import csr_matrix
import sib
@nb.njit()
def count_valid_c1(alli, allj, allt, maxS, minR):
"""
Count valid contacts, given i, j, and t
"""
res = np.zeros(alli.shape[0], dtype=np.bool_)
for l in range(alli.shape[0]):
i = alli[l]
j = allj[l]
t = allt[l]
if t > max(maxS[i], maxS[j]):
if t < minR[j]:
res[l] = True
return res
def create_mat_c2(contacts_cut2, model):
mat = []
times =[]
c2t = contacts_cut2.sort_values("t", ascending=False)
for t, gr in c2t.groupby("t",sort=False):
v = sparse.coo_matrix((np.ones(len(gr),np.int), (gr.i.to_numpy(), gr.j.to_numpy())), shape=(model.N, model.N))
if len(mat)> 0:
mat.append(v.tocsr()+mat[-1])
else:
mat.append(v.tocsr())
times.append(t)
print(f"Appeding contacts, t={t}")
mat_c2= dict(zip(times, mat))
return mat_c2
def csr_to_list(x):
x_coo = x.tocoo()
return zip(x_coo.row, x_coo.col, x_coo.data)
def csr_to_dataframe(x, columns=["i","j","lamb"]):
x_coo = x.tocoo()
dr = {columns[0]:x_coo.row, columns[1]:x_coo.col, columns[2]:x_coo.data}
return pd.DataFrame(dr)
def ranking_inference(t, model, observations, params):
"""Inference starting from t_start.
Run Mean Field from t_start to t, starting from all susceptible and
resetting the probas according to observations.
params["lamb"] : lamb
params["mu"] : mu
params["t_start"] : t_start
params["tau"] : tau
params["algo"] : "MF" (Mean Field) or "DMP" (Dynamic Message Passing)
params["init"] : "all_S" (all susceptible) or "freqs" (frequency at t_start)
Returns: ranked dataframe probas[["i","rank","score","p_I","p_R","p_S"]]
If t < t_start cannot do the inference ranking, returns a random ranking.
"""
t_start = params["t_start"]
tau = params["tau"]
mu = params["mu"]
lamb = params["lamb"]
if (t < t_start):
return ranking_random(t, model, observations, params)
algo = MeanField if params["algo"] == "MF" else DynamicMessagePassing
if params["init"] == "all_S":
initial_probas = indicator(np.zeros(model.N))
else:
initial_probas = frequency(model.states[t_start])
infer = algo(initial_probas, model.x_pos, model.y_pos)
# shift by t_start
for obs in observations:
obs["t"] = obs["t_test"] - t_start
obs["t_I"] = obs["t"] - tau
# set lambda and mu (general)
rec_prob = mu*np.ones(model.N)
transm = []
for t0, A in enumerate(model.transmissions[t_start:t+1]):
B = A.copy()
B.tocsr()[B.nonzero()] = lamb
transm.append(B)
#infer.time_evolution(
# model.recover_probas, model.transmissions[t_start:t+1], observations,
# print_every=0
#)
infer.time_evolution(
rec_prob, transm, observations, print_every=0)
probas = pd.DataFrame(
infer.probas[t-t_start, :, :],
columns=["p_S", "p_I", "p_R"]
)
probas["i"] = range(model.N)
# some i will have the same probas
# -> we add a random value to shuffle the ranking
probas["rand"] = np.random.rand(model.N)
probas = probas.sort_values(by=["p_I", "rand"], ascending=False)
probas.reset_index(drop=True, inplace=True)
probas["rank"] = range(model.N)
probas["score"] = probas["p_I"]
return probas
def ranking_backtrack(t, model, observations, params):
"""Mean Field starting from t - delta.
Run Mean Field from t - delta to t, starting from all susceptible and
resetting the probas according to observations.
params["lamb"] : lamb
params["mu"] : mu
params["delta"] : delta
params["tau"] : tau
params["algo"] : "MF" (Mean Field) or "DMP" (Dynamic Message Passing)
params["init"] : "all_S" (all susceptible) or "freqs" (frequency at t_start)
Returns: ranked dataframe probas[["i","rank","score","p_I","p_R","p_S"]]
If t < delta cannot do the backtrack ranking, returns a random ranking.
"""
delta = params["delta"]
tau = params["tau"]
mu = params["mu"]
lamb = params["lamb"]
#if (t < delta):
# return ranking_random(t, model, observations, params)
t_start = max(t - delta, 0)
algo = MeanField if params["algo"] == "MF" else DynamicMessagePassing
if params["init"] == "all_S":
initial_probas = indicator(np.zeros(model.N))
else:
initial_probas = frequency(model.states[t_start])
infer = algo(initial_probas, model.x_pos, model.y_pos)
# shift by t_start
for obs in observations:
obs["t"] = obs["t_test"] - t_start
obs["t_I"] = obs["t"] - tau
# set lambda and mu (general)
rec_prob = mu*np.ones(model.N)
transm = []
for t0, A in enumerate(model.transmissions[t_start:t+1]):
B = A.copy()
B=B.tocsr()
B[B.nonzero()] = lamb
transm.append(B)
#infer.time_evolution(
# model.recover_probas, model.transmissions[t_start:t+1], observations,
# print_every=0
#)
infer.time_evolution(
rec_prob, transm, observations, print_every=0)
probas = pd.DataFrame(
infer.probas[t-t_start, :, :], columns=["p_S", "p_I", "p_R"]
)
probas["i"] = range(model.N)
# some i will have the same probas
# -> we add a random value to shuffle the ranking
probas["rand"] = np.random.rand(model.N)
probas = probas.sort_values(by=["p_I", "rand"], ascending=False)
probas.reset_index(drop=True, inplace=True)
probas["rank"] = range(model.N)
probas["score"] = probas["p_I"]
return probas
def ranking_random(t, model, observations, params):
"""Random ranking.
Returns: ranked dataframe df[["i","rank","score"]]
"""
ranked = np.random.permutation(model.N)
df = pd.DataFrame({
"i":ranked, "rank":range(model.N), "score":np.linspace(1, 0, model.N),"count":np.linspace(1, 0, model.N)
})
return df
def ranking_tracing(t, model, observations, params):
"""Naive contact tracing.
Search for all individuals that have been in contact during [t-tau, t]
with the individuals last tested positive (observations s=I at t_test=t-1).
params["tau"] = tau
Returns: ranked dataframe encounters[["i","rank","score","count"]]
If t < tau cannot do the tracing ranking, returns a random ranking.
"""
tau = params["tau"]
if (t < tau):
return ranking_random(t, model, observations, params)
# last_tested : observations s=I for t-tau <= t_test < t
last_tested = set(
obs["i"] for obs in observations
if obs["s"] == 1 and (t - tau <= obs["t_test"]) and (obs["t_test"] < t)
)
# contacts with last_tested people during [t - tau, t]
contacts = pd.DataFrame(
dict(i=i, j=j, t=t_contact)
for t_contact in range(t - tau, t)
for i, j, lamb in csr_to_list(model.transmissions[t_contact])
if j in last_tested and lamb # lamb = 0 does not count
)
encounters = pd.DataFrame({"i": range(model.N)})
# no encounters -> count = 0
if (contacts.shape[0] == 0):
encounters["count"] = 0
else:
counts = contacts.groupby("i").size() # number of encounters for all i
encounters["count"] = encounters["i"].map(counts).fillna(0)
# many i will have the same count
# -> we add a random value to shuffle the ranking
encounters["rand"] = np.random.rand(model.N)
encounters = encounters.sort_values(by=["count", "rand"], ascending=False)
encounters.reset_index(drop=True, inplace=True)
encounters["rank"] = range(model.N)
encounters["score"] = encounters["count"]
return encounters
def upd_score(k,Score,lamb,noise):
Score[k] += lamb*lamb + np.random.rand() * noise
def ranking_tracing_secnn(T, model, observations, params, noise = 1e-19):
"""
Contact Tracing up to second nearest neighbors
params["tau"] = tau
params["lamb"] = lamb
Returns: ranked dataframe encounters[["i","rank","score","count"]]
Authors: Sibyl-team
"""
from collections import Counter
import pickle, gzip
tau = params["tau"]
lamb = params["lamb"]
if (T < tau):
return ranking_random(T, model, observations, params)
observ = pd.DataFrame(observations)
observ = observ[(observ["t_test"] <= T)]
contacts = pd.DataFrame(
dict(i=i, j=j, t=t_contact)
for t_contact in range(T - tau, T+1)
for i, j, lamb in csr_to_list(model.transmissions[t_contact])
if lamb # lamb = 0 does not count
)
idx_R = observ[observ['s'] == 2]['i'].to_numpy() # observed R
idx_I = observ[observ['s'] == 1]['i'].to_numpy() # observed I
idx_S = observ[(observ['s'] == 0) & (observ['t_test'] == T)]['i'].to_numpy() # observed S at T -> put them at the tail of the ranking
idx_alli = contacts['i'].unique()
idx_allj = contacts['j'].unique()
idx_all = np.union1d(idx_alli, idx_allj)
idx_non_obs = np.setdiff1d(range(0,model.N), idx_all) # these have no contacts -> tail of the ranking
idx_to_inf = np.setdiff1d(idx_all, idx_I) # rm I anytime
idx_to_inf = np.setdiff1d(idx_to_inf, idx_S) # rm S at time T
idx_to_inf = np.setdiff1d(idx_to_inf, idx_R) # rm R anytime
maxS = -1 * np.ones(model.N)
minR = T * np.ones(model.N)
for i, s, t_test, in observ[["i", "s", "t_test"]].to_numpy():
if s == 0 and t_test < T:
maxS[i] = max(maxS[i], t_test)
if s == 2:
minR[i] = min(minR[i], t_test)
# I can consider a contact as potentially contagious if T > minR > t_contact > maxS,
# the maximum time at which I am observed as S (for both infector and
# infected)
contacts_cut = contacts[(contacts["i"].isin(idx_to_inf)) \
& (contacts["j"].isin(idx_I))]
Score = dict([(i, 0) for i in range(model.N)])
#Score = manager.dict([(i, 0) for i in range(model.N)])
Count = dict([(i, 0) for i in range(model.N)])
if len(contacts_cut):
contacts_cut2 = contacts[(contacts["i"].isin(idx_to_inf)) \
& (contacts["j"].isin(idx_to_inf))]
valid_idx_c1 = count_valid_c1(*[contacts_cut[k].to_numpy() for k in ("i","j","t")], maxS, minR)
good_c1 = contacts_cut.iloc[valid_idx_c1]
for i, j, t in good_c1[["i", "j", "t"]].to_numpy():
# i to be estimated, j is infected
Score[i] += lamb + np.random.rand() * noise
Count[i] += 1.0
mat_c1 = {}
for t, gr in good_c1.groupby("t"):
v = sparse.coo_matrix((np.ones(len(gr),np.int), (gr.i.to_numpy(), gr.j.to_numpy())), shape=(model.N, model.N))
mat_c1[t] = v.tocsr()
sec_NN = 0
if len(contacts_cut):
mat_c2 = create_mat_c2(contacts_cut2, model)
sum_counts = None
for t in sorted(mat_c1.keys()):
## select the rows (i) where the num is 0
## I have n_i rows
idx_i_c1= np.unique(mat_c1[t].nonzero()[0])
res = mat_c1[t][idx_i_c1,:].sum(1).T * mat_c2[t+1][idx_i_c1,:] ##vector product
if sum_counts is None:
sum_counts=res
else:
sum_counts+=res
if sum_counts is not None:
sec_NN = sum_counts.sum()
sum_counts = np.array(sum_counts)[0]
idx_nonzero_j = sum_counts.nonzero()[0]
counts_j = sum_counts[idx_nonzero_j]
for (k, occk) in zip(idx_nonzero_j, counts_j):
Score[k] += lamb*lamb*occk
#print(datetime.datetime.now(), "end 2nd loop")
print(f"first NN c: {len(contacts_cut)}. second NN c: {sec_NN}")
for i in range(0,model.N):
if i in idx_non_obs:
Score[i] = -1 + np.random.rand() * noise
if i in idx_I and i not in idx_R:
Score[i] = model.N * observ[(observ['i'] == i) & (observ['s'] == 1)]['t_test'].max() + np.random.rand() * noise
elif i in idx_S: #at time T
Score[i] = -1 + np.random.rand() * noise
elif i in idx_R: #anytime
Score[i] = -1 + np.random.rand() * noise
sorted_Score = sorted(Score.items(),key=lambda item: item[1], reverse=True)
idxrank = [item[0] for item in sorted_Score]
scores = [item[1] for item in sorted_Score]
count = [Count[i] for i in idxrank]
#i rank score count
encounters = pd.DataFrame({"i": list(idxrank), "rank": range(0,model.N), "score": list(scores), "count": count })
#print(encounters)
#if save_data:
# encounters.to_csv("scores_counts_CT2.csv",index=False)
return encounters
def ranking_tracing_backtrack(t, model, observations, params):
"""Naive contact tracing + backtrack
First rank according to contact tracing (past contact or not), then by
the MF/DMP probas.
params["delta"] : delta
params["tau"] : tau
params["algo"] : "MF" (Mean Field) or "DMP" (Dynamic Message Passing)
params["init"] : "all_S" (all susceptible) or "freqs" (frequency at t_start)
Returns: ranked dataframe df[["i","rank","score","count","p_I","p_R","p_S"]]
If t < t_start or t < tau cannot do the tracing + backtrack ranking,
returns a random ranking.
"""
tau = params["tau"]
if (t < tau):
return ranking_random(t, model, observations, params)
delta = params["delta"]
if (t < delta):
return ranking_random(t, model, observations, params)
encounters = ranking_tracing(t, model, observations, params)
encounters.drop(columns=["rank","score"], inplace=True)
probas = ranking_backtrack(t, model, observations, params)
probas.drop(columns=["rank","score"], inplace=True)
df = pd.merge(encounters, probas, on=["i"], how="inner")
df["past_contact"] = 1*(df["count"] > 0)
df["score"] = df["past_contact"] + df["p_I"]
# some i will have the same score
# -> we add a random value to shuffle the ranking
df["rand"] = np.random.rand(model.N)
df = df.sort_values(by=["score", "rand"], ascending=False)
df.reset_index(drop=True, inplace=True)
df["rank"] = range(model.N)
return df
RANKINGS = {
"tracing_backtrack": ranking_tracing_backtrack,
"inference": ranking_inference,
"backtrack": ranking_backtrack,
"tracing": ranking_tracing,
"random": ranking_random,
"tracing2nd": ranking_tracing_secnn
}
|
import pygame
from pygame.surface import Surface
from GameLogic.Unit import *
from Helpers.EventHelpers import EventExist
from Vector2 import Vector2
class BuyUnitItem:
def __init__(self, offset: Vector2, id, image: Surface = None, rect=None):
self.Offset = offset
self.Image = image if image is not None else self._getTexture(id)
self.Rect = rect
self.clicked = False
def Update(self, game):
return self
def Draw(self, game):
# Extra screen-based properties
margin = 38 + 20
x = 10 + margin * self.Offset.X
y = self.Offset.Y
if self.IsHoverdByMouse() or self.clicked:
game.Settings.GetScreen().blit(
pygame.transform.scale(pygame.image.load('images/tiles/selected.png').convert_alpha(), (40, 40)),
(x, y))
self.Rect = game.Settings.GetScreen().blit(self.Image, (x, y))
def IsHoverdByMouse(self):
return self.Rect is not None and self.Rect.collidepoint(pygame.mouse.get_pos())
def IsClickedByMouse(self, game):
return self.IsHoverdByMouse() and EventExist(game.Events, pygame.MOUSEBUTTONUP)
def _getTexture(self, id):
return None
def GetUnitType(self):
return Soldier
class SoldierButton(BuyUnitItem):
def _getTexture(self, id):
if id == 0:
return pygame.transform.scale(pygame.image.load('images/units/soldierGreen.png').convert_alpha(), (38, 38))
elif id == 1:
return pygame.transform.scale(pygame.image.load('images/units/soldierBlue.png').convert_alpha(), (38, 38))
elif id == 2:
return pygame.transform.scale(pygame.image.load('images/units/soldierYellow.png').convert_alpha(), (38, 38))
else:
return pygame.transform.scale(pygame.image.load('images/units/soldierRed.png').convert_alpha(), (38, 38))
def GetUnitType(self):
return Soldier
class RobotButton(BuyUnitItem):
def _getTexture(self, id):
if id == 0:
return pygame.transform.scale(pygame.image.load('images/units/robotGreen.png').convert_alpha(), (38, 38))
elif id == 1:
return pygame.transform.scale(pygame.image.load('images/units/robotBlue.png').convert_alpha(), (38, 38))
elif id == 2:
return pygame.transform.scale(pygame.image.load('images/units/robotYellow.png').convert_alpha(), (38, 38))
else:
return pygame.transform.scale(pygame.image.load('images/units/robotRed.png').convert_alpha(), (38, 38))
def GetUnitType(self):
return Robot
class TankButton(BuyUnitItem):
def _getTexture(self, id):
if id == 0:
return pygame.transform.scale(pygame.image.load('images/units/tankGreen.png').convert_alpha(), (38, 38))
elif id == 1:
return pygame.transform.scale(pygame.image.load('images/units/tankBlue.png').convert_alpha(), (38, 38))
elif id == 2:
return pygame.transform.scale(pygame.image.load('images/units/tankYellow.png').convert_alpha(), (38, 38))
else:
return pygame.transform.scale(pygame.image.load('images/units/tankRed.png').convert_alpha(), (38, 38))
def GetUnitType(self):
return Tank
class BoatButton(BuyUnitItem):
def _getTexture(self, id):
if id == 0:
return pygame.transform.scale(pygame.image.load('images/units/shipGreen.png').convert_alpha(), (38, 38))
elif id == 1:
return pygame.transform.scale(pygame.image.load('images/units/shipBlue.png').convert_alpha(), (38, 38))
elif id == 2:
return pygame.transform.scale(pygame.image.load('images/units/shipYellow.png').convert_alpha(), (38, 38))
else:
return pygame.transform.scale(pygame.image.load('images/units/shipRed.png').convert_alpha(), (38, 38))
def GetUnitType(self):
return Boat
|
import numpy as np
from operator import itemgetter
from collections import defaultdict
from collections import Counter as ct
file = "Day6/ruben.txt"
coordinates=[]
def manhatton_distance(x1,y1,x2,y2):
return abs(x1-x2)+abs(y1-y2)
#read the file
with open(file,'r') as f:
for i, line in enumerate(f):
parts=line.strip().split(", ")
coordinates.append(dict(id=i, x=int(parts[0]), y=int(parts[1])))
gridxmin, gridxmax = min(coordinates,key=lambda x:x['x'])['x'], max(coordinates,key=lambda x:x['x'])['x']
xlen = gridxmax - gridxmin
gridymin, gridymax = min(coordinates,key=lambda x:x['y'])['y'], max(coordinates,key=lambda x:x['y'])['y']
ylen = gridymax - gridymin
def closest(coord):
x,y=coord[0],coord[1]
nearest=[]
distances = [dict(id=row['id'],dist=manhatton_distance(row['x'],row['y'],x,y)) for row in coordinates]
sorteddistances = sorted(distances,key=itemgetter('dist'))
d0=sorteddistances[0]['dist']
nearest.append(sorteddistances[0]['id'])
sorteddistances.pop(0)
while True:
if sorteddistances[0]['dist']==d0:
nearest.append(sorteddistances[0]['id'])
sorteddistances.pop(0)
else:
break
return nearest
print(closest([0,0]))
grid = [[[i,j] for j in range(gridymin,gridymax+1)] for i in range(gridxmin,gridxmax+1)]
occupants = [[[]for y in range(ylen+1)] for x in range(xlen+1)]
is_infinite=defaultdict(lambda:False)
# fill the square
for i in range(xlen+1):
for j in range(ylen+1):
nearest = closest(grid[i][j])
if i == 0 or j == 0 or i == xlen or j == xlen:
if len(nearest)==1:
occupants[i][j]=str(nearest[0])
is_infinite[nearest[0]]=True
if len(nearest)>1:
occupants[i][j]="-1"
else:
occupants[i][j]=str(nearest[0])
flattened_occupants=[y for x in occupants for y in x]
for i in range(len(flattened_occupants)):
if is_infinite[int(flattened_occupants[i])]:
flattened_occupants[i]="inf"
areas=ct(flattened_occupants)
sortedareas = sorted(areas,reverse=True)
print(sortedareas)
print(areas)
print(is_infinite)
|
from django.db import models
# Create your models here.
class Notes(models.Model):
title = models.CharField('Note title', max_length=50)
text = models.CharField('Text', max_length=200)
date = models.DateTimeField('Date', auto_now_add=True)
|
import sys
import os
import socket
from main import *
from Choice import *
from DoS import *
from Pass import *
from SQL import *
from datetime import datetime
from PyQt5.QtWidgets import QMainWindow, QApplication
import pymysql.cursors
conexao = pymysql.connect(
host='127.0.0.1',
user='root',
password='',
db='sitedjango',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
cursor = conexao.cursor()
class Attack(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super().__init__(parent)
super().setupUi(self)
self.ipBtn.clicked.connect(self.ipChoice)
self.pinga = None
def ipChoice(self):
self.pinga = self.ping()
if not self.pinga:
self.ipLabel.setText(f'Não foi possível se conectar ao IP: {self.inputIP.text()}')
else:
attack.close()
choice.show()
def ping(self):
import os, platform
if platform.system().lower() == "windows":
ping_str = "-n 1"
else:
ping_str = "-c 1"
resposta = os.system("ping " + ping_str + " " + self.inputIP.text())
cmd = "ping -n 1 " + self.inputIP.text()
r = "".join(os.popen(cmd).readlines())
r = r.encode('ascii', 'ignore').decode('ascii')
ping_split = r.split("\n")
if resposta == 0:
return f'{ping_split[4]} \n{ping_split[5]} {ping_split[6].strip()} \n Latências após ataque: {ping_split[8]}'
else:
return False
class Choice(QMainWindow, Ui_ChoiceWindow):
def __init__(self, parent=None):
super().__init__(parent)
super().setupUi(self)
self.btnChoice.clicked.connect(self.choiceAttack)
self.btnBack.clicked.connect(self.backAttack)
self.btnClose.clicked.connect(self.closeAttack)
def closeAttack(self):
choice.close()
def backAttack(self):
choice.close()
attack.show()
def choiceAttack(self):
if self.inputChoice.text() == '1':
choice.close()
dos.show()
elif self.inputChoice.text() == '2':
choice.close()
passw.show()
elif self.inputChoice.text() == '3':
choice.close()
sql.show()
else:
print("Escolha uma opção válida!!")
class Sql(QMainWindow, Ui_Sql_MainWindow):
def __init__(self, parent=None):
super().__init__(parent)
super().setupUi(self)
self.btnSQLVoltar.clicked.connect(self.backSql)
self.btnChoiceSQL.clicked.connect(self.choiceSql)
def backSql(self):
sql.close()
choice.show()
def choiceSql(self):
cursor.execute('SELECT * FROM contatos_categoria')
self.resultado1 = cursor.fetchall()
self.resultado1 = len(self.resultado1)
if int(self.inputSQL.text()) == 1:
pass
elif int(self.inputSQL.text()) == 2:
pass
class Password(QMainWindow, Ui_Pass_Window):
def __init__(self, parent=None):
super().__init__(parent)
super().setupUi(self)
self.btnVoltarPass.clicked.connect(self.backPass)
self.btnChoicePass.clicked.connect(self.choicePass)
def backPass(self):
passw.close()
choice.show()
def choicePass(self):
pass
class Dos(QMainWindow, Ui_Dos_MainWindow):
def __init__(self, parent=None):
super().__init__(parent)
super().setupUi(self)
self.btnDoSVoltar.clicked.connect(self.backDos)
self.btnDoSChoice.clicked.connect(self.choiceDos)
def backDos(self):
dos.close()
choice.show()
def choiceDos(self):
pass
if __name__ == '__main__':
qt = QApplication(sys.argv)
attack = Attack()
choice = Choice()
passw = Password()
sql = Sql()
dos = Dos()
attack.show()
qt.exec()
|
import json
from flask import Flask, request, jsonify
from demo import Wikipedia
from datetime import datetime
import logging
logging.basicConfig(filename='app.log',
level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
app = Flask(__name__)
@app.get('/get_data')
def get_data_from_wikipedia():
try:
filter = request.args.get("filter", None)
result = {}
start_time = datetime.now()
print(f"the function of scraping started by {start_time} ")
if filter is not None:
wk = Wikipedia(filter)
result = wk.get_data()
end_time = datetime.now()
print(f"the function of scraping started by {end_time} ")
response = {"data" : result, "status":200, "message":"Successfull."}
# total time taken in whole the process.
print(f"the time take in getting data from the web app. => {end_time-start_time}")
return jsonify(response)
except Exception:
raise("Getting error in finding result inside the wikipedia")
# run the app
if __name__ == '__main__':
app.run(debug=True)
|
# Generated by Django 3.2.5 on 2021-08-02 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0002_alter_project_title'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'verbose_name': 'Проект', 'verbose_name_plural': 'Проекты'},
),
migrations.AlterField(
model_name='project',
name='image',
field=models.ImageField(upload_to='portfolio/images/', verbose_name='Изображение'),
),
]
|
# coding: utf-8
# packages
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, log_loss
from sklearn.metrics import confusion_matrix
# import data
data = pd.read_csv('kaggleSubsetData.csv')
print(data.head())
# create a list element for variable columns to be become dummy variable
dummy_cols = []
# creating data subset
sample_set = data[['Year', 'Records Lost', 'Sector', 'Method of Leak']].copy(deep=True)
# binned variable records lost into a binary of low = 0, high = 1 for risk as expressed by the amount of records lost
sample_set['catRisk'] = pd.cut(x=data['Records Lost'], bins=2, labels=['0', '1'])
# dropped records column
sample_set.drop('Records Lost', axis=1, inplace=True)
# iterate over variables and if not col year or category of risk then convert to dummy variables
for col in list(sample_set.columns):
if col not in ['Year', 'catRisk']:
dummy_vars = pd.get_dummies(sample_set[col])
dummy_vars.columns = [col+str(x) for x in dummy_vars.columns]
sample_set = pd.concat([sample_set, dummy_vars], axis=1)
# dropping no longer needed cols
sample_set.drop(['Sector', 'Method of Leak'], axis=1, inplace=True)
# assigned col header for target to variable target_var and then seperated it from the sub dataset to create a feature set
target_var = 'catRisk'
features = [x for x in list(sample_set.columns) if x != target_var]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(sample_set['catRisk'])
encoded_Y = encoder.transform(sample_set['catRisk'])
# baseline model
def create_baseline():
# create model
model = Sequential()
model.add(Dense(60, input_dim=len(features), kernel_initializer='normal', activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# splitting data into train and test
X_train, X_test, y_train, y_test = train_test_split(sample_set[features], encoded_Y, test_size=0.1)
# evaluating model performance (accuracy) with data set train and 3 k-folds
estimator = KerasClassifier(build_fn=create_baseline, epochs=100, batch_size=50, verbose=0)
kfold = StratifiedKFold(n_splits=3, shuffle=True, random_state=21)
results = cross_val_score(estimator, X_train, y_train, cv=kfold)
print("Results: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
# evaluting model performance (accuracy) with test data and 5 k-folds
results = cross_val_score(estimator, X_test, y_test, cv=5)
print("Results: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
# RANDOM FOREST MODEL UPDATE
# Display the dimensions of your Training and Testing Data
print('X Train | X Test:')
print(X_train.shape, X_test.shape)
print('-'*50)
print('Y Train | Y Test:')
print(y_train.shape, y_test.shape)
# Create the Random Forest Classifier
classifier = RandomForestClassifier(random_state=5)
print(classifier)
# Compute k-fold cross validation on training data set and see mean accuracy score
scores = cross_val_score(classifier,X_train, y_train, cv=2, scoring='accuracy')
print(f'Training Data Accuracy Scores : {scores}')
print('-'*50)
print(f'Training Data Mean Accuracy Score : {scores.mean()}')
# Fit the model to training data
classifier.fit(X_train, y_train)
prediction = classifier.predict(X_test)
# Display scores
print(f'Accuracy Score : {accuracy_score(y_test, prediction)}')
print('-'*50)
print(f'Log Loss : {log_loss(y_test, prediction)}')
# describe the performance of a classifier using a confusion matrix table: 'TN' 'FN', 'FP' 'TP'
print(confusion_matrix(y_test, prediction))
|
################################################################# GLOBAL CLASSES ###################################################################################
## Stats for the citizen's city
class City():
var_list = [
'houses','tax','population','tax_income','barracks','troops','troop_percent','construct_force','construct_percent','goldmines',\
'stonemines','lumbermills','total_mills','work_force',\
'gold_income','stone_income','wood_income','goldmine_cost','stonemine_cost','lumbermill_cost'
]
## Constants:
houses = 10
population = 35
## Citizen categories:
## 1/5 of pop is troops to start
troop_percent = 25
troops = int( population/ (100/troop_percent) )
## 1/10 of pop is construction workers to start
construct_percent = 12.5
construct_force = int( population/ (100/construct_percent) )
## Everyone else is a worker
work_force = population - troops - construct_force
## Buildings:
barracks = 1
goldmines = 2
stonemines = 3
lumbermills = 2
total_mills = goldmines + stonemines + lumbermills
## Building costs:
## Building cost = [ gold, stone, wood ]
house_cost = [1,2,3]
barracks_cost = [1,2,3]
goldmine_cost = [1,2,3]
stomemine_cost = [1,2,3]
lumbermill_cost = [1,2,3]
## Various rates:
tax = 2
gold_priority = 1
stone_priority = 2
wood_priority = 3
## Income rates:
tax_income = houses * tax
## income = priority * ( total workers / total mills (which = workers for that mine))
gold_income = int( gold_priority * goldmines * work_force/total_mills )
stone_income = int( stone_priority * stonemines * work_force/total_mills )
wood_income = int( wood_priority * lumbermills * work_force/total_mills )
## Character's stats, can be modified
class Man():
health = 100
attack = 12
strength = 2
## The orc enemy stats
class Orc():
health = 50
attack = 6
strength = 3
########################################################## GLOBAL FUNCTIONS #######################################################################################
## Returns the desired task to run
def Get_Action():
print ('You are now at the Command Post. What do you wish to do?')
Type ('Will you go to the "War", build up your "cities", view the "tutorial", or "quit"?')
task = str( input('') )
valid_inputs = ['War','war','cities','Cities','tutorial','Tutorial','quit','Quit']
task = Check(task, valid_inputs)
return task
## Runs the desired task5
def Run_Action(task):
if task == 'quit' or task == 'Quit':
global Running
Running = False
elif task == 'war' or task == 'War':
print ('Under refinement')
#War()
elif task == 'cities' or task == 'Cities':
Cities()
elif task == 'tutorial' or task == 'Tutorial':
Tutorial()
## A very common snippet that makes sure given input is valid so that no loops are broken
## More concise than a try/except because it test multiple inputs at once
def Check(test, check_list):
count = 0
while test not in check_list and count != 8:
if test == 'quit' or test == 'Quit':
break
count += 1
print ('Unrecoginzed command. Enter a new command.')
test = input ('')
if count == 8:
print ('Too many invalid commands. Returning you to the command tent.')
Get_Action()
if test == 'quit' or test == 'Quit':
print ('')
Get_Action()
return test
## Finds the ending word for Type(text)
def Find_Last_Word(text, text_chars, page_length, current_count):
original_count = current_count
current_letter = text_chars[current_count]
# on the off chance that the first letter is a 'space'
if current_letter == ' ':
current_letter = text_chars[current_count-1]
current_word_list = []
while current_letter != ' ':
current_word_list.append(current_letter)
current_count -= 1
current_letter = text_chars[current_count]
first_point = current_count
current_word_list.remove(current_word_list[0])
current_word_list.reverse()
current_count = original_count
current_letter = text_chars[current_count]
while current_letter != ' ':
current_word_list.append(current_letter)
current_count += 1
# if the current word is the last word then is used
try:
current_letter = text_chars[current_count]
except:
break
current_word = ('')
for x in current_word_list:
current_word += str(x)
last_word = current_word
final_return = [last_word, first_point]
return final_return
def Insert_Newline(text_chars, text_words_list, first_point, last_word):
for current_word in text_words_list:
if last_word == current_word:
text_chars.insert( first_point , '\n')
break
return text_chars
## Prints words on screen but will measure the screen to make sure no words are broken up
def Type(text):
page_length = 80 # number of chars that fit on a single line of text
# for multi-line text
if len(text) > page_length:
text_words_list = text.split(' ')
# makes a list of the text
text_chars = []
for x in text:
text_chars.append(x)
# finds valuble info
first_start = page_length - 1
final_return = Find_Last_Word(text, text_chars, page_length, first_start)
last_word = final_return[0]
first_point = final_return[1]
# Puts the ending word on a new line
text_chars = Insert_Newline(text_chars, text_words_list, first_point, last_word)
new_start = first_point + page_length - 1 # the one is for the space before each new line
try:
final_return = Find_Last_Word(text, text_chars, page_length, new_start)
last_word = final_return[0]
first_point = final_return[1]
text_chars = Insert_Newline(text_chars, text_words_list, last_word, first_point)
new_start += first_point
try:
final_return = Find_Last_Word(text, text_chars, page_length, new_start)
last_word = final_return[0]
first_point = final_return[1]
text_chars = Insert_Newline(text_chars, text_words_list, last_word, first_point)
except:
pass
except:
pass
# Makes the actual modified text
text = ''
for x in text_chars:
text += str(x)
print (text)
################################################################## TUTORIAL #######################################################################################
def Tutorial():
print ('The first thing you need to understand is info about your city.')
Type ('Each house you own can hold up to 10 people. To start, 1/5 of your population will be soldiers. Another 1/10 will be construction workers. The rest is divided up to the goldmines, stonemines, and lumbermills.')
print ('')
Type ('You can increase or decrease these ratios at any time by going to your city and prompting to change them, but beware, population is not based solely on the number of homes you own.')
Type ('Your tax rates, gerenal wealthiness, supply of each material, troop percentage, the crowdidness of your town and various other things will affect immigration rates.')
print ('')
Type ('The next thing you need to understand is how to change these things. First, you start by getting to your command tent. Then, when prompted you type in the specific command you would like to change.')
print ('')
print ('Next, battle commands. To start, go to your battle front. ')
######################################################################### BATTLE #################################################################################
## Runs the battle sims
def War():
print ('You currently have '+ str(Man.health) +' health remaining, and '+ str(City.troops) +' troops.)')
print ('Would you like to battle an orc or fight a battle? (y)(n)')
valid_responses = ['y','Y','n','N']
fight = str( input(''))
Check(fight, valid_responses)
if fight == 'y' or fight == 'Y':
print ('Which would you like to fight, large scale battle or a 1v1 match?')
elif fight == 'n' or fight == 'N':
print ('We will return to the commmand tent then.')
########################################################################## CITY ###################################################################################
## Runs the city info and sims
def Cities():
Type ('Currently you have '+ str(City.houses) +' houses, a population of '+ str(City.population) +' citizens, '+str(City.construct_force) +' construction workers, '+ str( City.troops ) +' troops, and '+ str( City.work_force ) +' people in your work force.')
print ('')
print ('Would you like a list of all information about your city? (y)(n)')
getInfo = input('')
valid_inputs = ['y','Y','n','N']
Check(getInfo, valid_inputs)
city_buildings = ['house','barracks','goldmines','stonemines','lumbermills']
city_rates = ['tax','gold_priority','stone_priority','wood_priority']
if getInfo == 'y' or getInfo == 'Y':
print ('Your buildings include '+ str( city_buildings ) +', and your rates include '+ str( city_rates ) +'.')
print (City.var_list)
Type ('You can type the name of any structure and construction will begin to add a new building. You must be able to pay for each construct though.')
Type ("In addition to this, you can change your city's rates such as your tax, gold, stone, or wood income priority, military drafting rate, percent of construction workers, and others.")
print ('Would you like to build any new buildings? (y)(n)')
get_action = input ('')
valid_inputs = ['y','Y','n','N']
Check(get_action, valid_inputs)
if get_action == 'y' or get_action == 'Y':
print('What would you like to build?')
get_action = input('')
Check(get_action, city_buildings)
for building in city_buildings:
if get_action == building:
building_cost = ( ''+ building +'_cost' )
building_cost = locals()['City.'+ building_cost]
## city_varibles = ['houses','tax','barracks','goldmines','stonemines','lumbermills','troop_percent','construct_percent']
## if get_action == 'houses':
## gold = City.house_cost[0]
## stone = City.house_cost[1]
## wood = City.house_cost[2]
## print ('A new house can be constructed for '+ gold +' gold, '+ stone +' stone, and '+ wood +' wood.')
## print ('Are you sure you want to build a house?')
## build = input('')
## if build == 'y' or build == 'Y':
## City.houses += 1
## City.gold -= gold
## City.stone -= stone
## City.wood -= wood
## print ('You now have ')
##
## elif get_action == 'tax':
## Type ('Your current tax is '+ str( City.tax ) + ' gold per a house. What would you like to change your tax to? Beware, too high a tax will encourage people to leave.')
## new_tax = input('')
## City.tax = new_tax
## elif get_action == city_varibles[1]:
## gold = City.barracks_cost[0]
## stone = City.barracks_cost[1]
## wood = City.barracks_cost[2]
## print ('A new barracks can be contructed for ...')
## elif get_action == 'goldmines':
## gold = City.goldmine_cost[0]
## stone = City.goldmine_cost[1]
## wood = City.goldmine_cost[2]
## print ('A new goldmine can be constructed for ...')
## elif get_action == 'stonemines':
## gold = City.stonemine_cost[0]
## stone = City.stonemine_cost[1]
## wood = City.stonemine_cost[2]
## print ('A new stonemine can be constructed for ...')
## elif get_action == 'lumbermill':
## gold = City.lumbermill_cost[0]
## stone = City.lumbermill_cost[1]
## wood = City.lumbermill_cost[2]
## print ('A new lumbermill can be constructed for ...')
## elif get_action == 'troop_percent':
## print ('Your current troop percent is '+ str( City.troop_percent ) +'% of your population.')
## elif get_action == 'construct_percent':
## print ('Your current construction percent is '+ str( City.construct_person ) +'% of your population.')
##
## print ('Would you like to change anything else?')
#################################################################### MAIN #########################################################################################
## Runs the main exe
def Main():
name = input("What is your name: ")
print ('Welcome to The War, '+ name +'.')
while Running == True:
task = Get_Action()
print ('')
Run_Action(task)
print ('')
Running = True
|
from django.apps import AppConfig
class ImportExportsConfig(AppConfig):
name = 'imports'
|
from ._title import Title
from plotly.graph_objs.layout.ternary.baxis import title
from ._tickformatstop import Tickformatstop
from ._tickfont import Tickfont
|
from spotibot.core.objects import (
Activity,
Context,
General,
Music,
Podcasts,
Time,
User,
Device,
)
# Request, \
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-11 23:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resres', '0003_auto_20170511_2246'),
]
operations = [
migrations.AlterField(
model_name='reservation',
name='duration',
field=models.CharField(default='00:00', max_length=5),
),
]
|
from braindecode.datasets.set_loaders import BCICompetition4Set2A
from braindecode.datasets.signal_processor import SignalProcessor
from glob import glob
import h5py
import numpy as np
import logging
log = logging.getLogger(__name__)
# Test A: check if loaded sets signal is correct for train test
# and labels correct for train, labels pos correct for test
# Test B: check if loaded through bci competition set
# with wyrm set train trials are same as first 288 trials,
# test trials same as later 288 trials
def check_file(train_file, test_file, combined_file):
train_signal = train_file['signal'][:]
test_signal = test_file['signal'][:]
combined_signal = combined_file['signal'][:]
# Replace nans with inf to allow "allclose" comparison
# (np.allclose(np.nan, np.nan) => False!)
assert not np.any(np.isinf(train_signal))
assert not np.any(np.isinf(test_signal))
assert not np.any(np.isinf(combined_signal))
train_signal[np.isnan(train_signal)] = np.inf
test_signal[np.isnan(test_signal)] = np.inf
combined_signal[np.isnan(combined_signal)] = np.inf
assert np.allclose(train_signal, combined_signal[:, :train_signal.shape[1]])
assert np.allclose(test_signal, combined_signal[:, train_signal.shape[1]:])
train_labels = train_file['header']['Classlabel'][0,:]
train_event_type = train_file['header']['EVENT']['TYP'][0,:]
train_trial_mask = np.array([ev in [769,770,771,772] for ev in train_event_type])
assert np.array_equal(train_event_type[train_trial_mask] - 768, train_labels)
test_labels = test_file['header']['Classlabel'][0,:]
combined_labels = combined_file['header']['Classlabel'][0,:]
combined_event_type = combined_file['header']['EVENT']['TYP'][0,:]
combined_trial_mask = np.array([ev in [769,770,771,772] for ev in combined_event_type])
assert np.array_equal(combined_event_type[combined_trial_mask] - 768, combined_labels)
assert len(train_labels) == 288
assert len(test_labels) == 288
assert len(combined_labels) == 288 * 2
assert np.array_equal(combined_labels[:288], train_labels)
train_event_pos = train_file['header']['EVENT']['POS'][0,:]
test_event_pos = test_file['header']['EVENT']['POS'][0,:]
combined_event_pos = combined_file['header']['EVENT']['POS'][0,:]
assert np.array_equal(train_event_pos,
combined_event_pos[:len(train_event_pos)])
assert np.array_equal(test_event_pos,
combined_event_pos[len(train_event_pos):] - train_signal.shape[1])
log.info("File ok")
def check_as_sets(train_file_name, test_file_name, combined_file_name):
train_set = BCICompetition4Set2A(train_file_name)
train_wyrm_set = SignalProcessor(train_set)
train_wyrm_set.load()
test_set = BCICompetition4Set2A(test_file_name)
test_wyrm_set = SignalProcessor(test_set,
marker_def={'Unknown':[-2147483648]})
test_wyrm_set.load()
combined_set = BCICompetition4Set2A(combined_file_name)
combined_wyrm_set = SignalProcessor(combined_set)
combined_wyrm_set.load()
# nans were made to be means, so ignore that some values are not equal
train_epo = train_wyrm_set.epo.data
test_epo = test_wyrm_set.epo.data
combined_epo = combined_wyrm_set.epo.data
train_part = combined_epo[:288]
assert (np.sum(train_epo - train_part!= 0) /
float(np.prod(train_epo.shape))) < 1e-2
test_part = combined_epo[288:]
assert (np.sum(test_epo - test_part) /
float(np.prod(test_epo.shape))) < 1e-2
log.info("Set ok")
def test_all():
""" Extra function with test_ so pytest will also run it """
train_files = sorted(glob('data/bci-competition-iv/2a/*T.mat'))
test_files = sorted(glob('data/bci-competition-iv/2a/*E.mat'))
combined_files = sorted(glob('data/bci-competition-iv/2a-combined/*TE.mat'))
train_test_combined = zip(train_files, test_files, combined_files)
for train_file_name, test_file_name, combined_file_name in train_test_combined:
log.info("Checking {:s}".format(combined_file_name))
with h5py.File(
train_file_name, 'r') as train_file, h5py.File(
test_file_name, 'r') as test_file, h5py.File(
combined_file_name, 'r') as combined_file:
check_file(train_file, test_file, combined_file)
check_as_sets(train_file_name, test_file_name, combined_file_name)
if __name__ == '__main__':
test_all()
|
from collections import OrderedDict
from functools import reduce
from glob import glob
from operator import add
import matplotlib.pyplot as plt
import numpy as np
from PyCAR.PyCIT.FT import PowDens
from aer_construction import AerModel
from citvappru.SourceCAREM2_1_1 import Geometry
def main():
alf = AerModel()
elements = [hexag.type[0] for hexag in
list(filter(lambda column: 26 in map(lambda hexag: hexag.type[1], column), alf.grid))[0] if
hexag.type[1] not in [5, 1999]]
pows = PowDens('aer.cdb.powerdensities')
plt.plot(pows[0, min(elements) - 1:max(elements) - 1, :], '-o')
Pows = OrderedDict({ file.strip('.pow') : np.genfromtxt(file) for file in glob('INFO\\*.pow')})
Nods = OrderedDict({file.strip('.nod'): np.genfromtxt(file, comments='*') for file in glob('INFO\\*.nod')})
Nods = OrderedDict({ key:value.reshape(value.shape[0]//2210,2210,value.shape[1]) for key,value in Nods.items()})
plt.plot(*reduce(add,[(Pow[:,0],Pow[:,1],'-o') for Pow in Pows.values()]))
plt.legend(Pows.keys())
file = 'INFO\\solaeki.reac'
alf = np.loadtxt(file)
plt.plot(alf[:, 0], alf[:, 1], 'o')
return
if __name__ == '__main__':
# alf = GroupFlux('aer.cdb.flux')
# plt.plot(range(10),alf[0,0,:,1],'-o',range(10),alf[0,0,:,0],'o-')
Geo = Geometry('aer_geo.ci@')
import re
alfred = re.compile(r'(?<=\s{2}3\n)(?:[\s0-9]{3}){4}([\s0-9]{3})')
findings = alfred.search(open('aer_geo.ci@').read())
pass
|
# -*- coding: utf-8 -*-
import heapq
class Solution:
def kClosest(self, points, K):
return heapq.nsmallest(K, points, key=lambda xy: xy[0] * xy[0] + xy[1] * xy[1])
if __name__ == "__main__":
solution = Solution()
assert [[-2, 2]] == solution.kClosest([[1, 3], [-2, 2]], 1)
assert [[3, 3], [-2, 4]] == solution.kClosest([[3, 3], [5, -1], [-2, 4]], 2)
|
def compare(arg_a, arg_b) :
print(arg_a, arg_b)
if arg_a > arg_b :
print("the first argument is larger than the second one")
else:
print("the second argument is larger than the first one")
compare(3, 7)
compare(5, 3)
compare(100, 102)
|
n2=('1','2','3','4','5')
#元组的数据是不能改的
#连接数据库
print(n2[1])
#n2[1]=3会出错哦
|
import torch
_TORCHFUNCTION_SUBCLASS = False
class _ReturnTypeCM:
def __init__(self, to_restore):
self.to_restore = to_restore
def __enter__(self):
return self
def __exit__(self, *args):
global _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = self.to_restore
def set_return_type(return_type: str):
"""[BETA] Set the return type of torch operations on :class:`~torchvision.tv_tensors.TVTensor`.
This only affects the behaviour of torch operations. It has no effect on
``torchvision`` transforms or functionals, which will always return as
output the same type that was passed as input.
.. warning::
We recommend using :class:`~torchvision.transforms.v2.ToPureTensor` at
the end of your transform pipelines if you use
``set_return_type("TVTensor")``. This will avoid the
``__torch_function__`` overhead in the models ``forward()``.
Can be used as a global flag for the entire program:
.. code:: python
img = tv_tensors.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor (default behaviour)
set_return_type("TVTensor")
img + 2 # This is an Image
or as a context manager to restrict the scope:
.. code:: python
img = tv_tensors.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor
with set_return_type("TVTensor"):
img + 2 # This is an Image
img + 2 # This is a pure Tensor
Args:
return_type (str): Can be "TVTensor" or "Tensor" (case-insensitive).
Default is "Tensor" (i.e. pure :class:`torch.Tensor`).
"""
global _TORCHFUNCTION_SUBCLASS
to_restore = _TORCHFUNCTION_SUBCLASS
try:
_TORCHFUNCTION_SUBCLASS = {"tensor": False, "tvtensor": True}[return_type.lower()]
except KeyError:
raise ValueError(f"return_type must be 'TVTensor' or 'Tensor', got {return_type}") from None
return _ReturnTypeCM(to_restore)
def _must_return_subclass():
return _TORCHFUNCTION_SUBCLASS
# For those ops we always want to preserve the original subclass instead of returning a pure Tensor
_FORCE_TORCHFUNCTION_SUBCLASS = {torch.Tensor.clone, torch.Tensor.to, torch.Tensor.detach, torch.Tensor.requires_grad_}
|
import pandas as pd
x = pd.Series(['Jonh','Ton','Carot','Lisa','Jackie'])
a = pd.Series([1100,2000,1000,1000,1000])
b = 50
y = a+b
data= pd.DataFrame({'Name': x, 'Income': y})
print(data)
|
import optproblems.cec2005
import numpy as np
import time
from IA import *
import os
def IAalgorithm(n_parties, politicians, R, function, function_index, max_evaluations, desertion_threshold):
IA = IdeologyAlgorithm(n_parties=n_parties, politicians=politicians, R=R, function=function,
function_index=function_index, max_evaluations=max_evaluations, desertion_threshold=desertion_threshold)
return IA.ideology_algorithm()
if __name__ == "__main__":
dim = 30
repeats = 10
evaluations = 10000*dim
parties = 5
politicians = 30
r = 0.5
desertion_threshold = 10
if not os.path.exists('results'):
os.makedirs('results')
if not os.path.exists('convergence'):
os.makedirs('convergence')
np.random.seed(10)
f4 = optproblems.cec2005.F4(dim)
time1 = time.time()
results = np.array([IAalgorithm(n_parties=parties, politicians=politicians,
R=r, function=f4, function_index=4, max_evaluations=evaluations,
desertion_threshold=desertion_threshold) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/IA-resultsd-30-4.txt", "w") as file:
print("F4: Shifted Schwefel's Problem 1.2 with Noise in Fitness", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/IA-convergenced-30-4.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f9 = optproblems.cec2005.F9(dim)
time1 = time.time()
results = np.array([IAalgorithm(n_parties=parties, politicians=politicians,
R=r, function=f9, function_index=9, max_evaluations=evaluations,
desertion_threshold=desertion_threshold) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/IA-resultsd-30-9.txt", "w") as file:
print("F9: Shifted Rastrigin's Function", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/IA-convergenced-30-9.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f12 = optproblems.cec2005.F12(dim)
time1 = time.time()
results = np.array([IAalgorithm(n_parties=parties, politicians=politicians,
R=r, function=f12, function_index=12, max_evaluations=evaluations,
desertion_threshold=desertion_threshold) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/IA-resultsd-30-12.txt", "w") as file:
print("F12: Schwefel's Problem 2.13", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/IA-convergenced-30-12.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f21 = optproblems.cec2005.F21(dim)
time1 = time.time()
results = np.array([IAalgorithm(n_parties=parties, politicians=politicians,
R=r, function=f21, function_index=21, max_evaluations=evaluations,
desertion_threshold=desertion_threshold) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/IA-resultsd-30-21.txt", "w") as file:
print("F21: Rotated Hybrid Composition Function", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/IA-convergenced-30-21.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f22 = optproblems.cec2005.F22(dim)
time1 = time.time()
results = np.array([IAalgorithm(n_parties=parties, politicians=politicians,
R=r, function=f22, function_index=22, max_evaluations=evaluations,
desertion_threshold=desertion_threshold) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/IA-resultsd-30-22.txt", "w") as file:
print("F22: Rotated Hybrid Composition Function with High Condition Number Matrix", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/IA-convergenced-30-22.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
|
from flask import request, jsonify, url_for, redirect, g, send_file
from models import Drawings
from sqlalchemy.exc import IntegrityError
from index import app, db
from modelHandler import addNewModel, getModel, getAllModels
import csv
import ast
@app.route('/api/model', methods=['POST'])
def addModel():
data = request.get_json()
alreadyExists = getModel(data['name'])
if not alreadyExists:
addNewModel(data['data'], data['name'])
packet = {'response': 'all good'}
return jsonify(packet)
return jsonify({'error': 'Name Already Exists'})
@app.route('/api/model/<data>', methods=['GET'])
def getModelRoute(data):
array = []
fileName = getModel(data)
with open(fileName, 'r') as file:
read = csv.reader(file, delimiter=',', quotechar='|')
reader = iter(read)
next(reader)
next(reader)
for line in reader:
holder = [float(line[0]), float(line[1]), float(line[2]), ast.literal_eval(line[3]), float(line[4])]
array.append(holder)
return jsonify({'data': array})
@app.route('/api/models', methods=['GET'])
def getAllModelRoute():
data = getAllModels()
return jsonify({'data': data})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, threaded = True)
|
import pyaudio
import librosa
import numpy as np
import tensorflow as tf
from datetime import datetime
########## Variables ##########
RECORD_SECONDS =60
CHUNK = 8192
CHANNELS = 1
FORMAT = pyaudio.paInt16
RATE = 44100
# Socket Variables
ADDRESS = '192.168.123.6'
PORT = 21536
###############################
########## Functions ##########
def load(frames, sr=RATE):
[raw, sr] = librosa.load(frames[0], sr=sr)
for f in frames[1:]:
[array, sr] = librosa.load(f, sr=sr)
raw = np.hstack((raw, array))
return raw
def file_saver(nodeNum, frames, wave, p):
now = datetime.now()
time = now.strftime('-%H:%M:%S')
fileName = './second-'+nodeNum+time+'.wav'
wf = wave.open(fileName, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
return fileName
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 14:11:49 2019
code to investigate stars going into PSFs
@author: ppxee
"""
### Import required libraries ###
import matplotlib.pyplot as plt #for plotting
from astropy.io import fits #for handling fits
from astropy.table import Table #for handling tables
import numpy as np #for handling arrays
#import math
#from astropy.stats import median_absolute_deviation
import vari_funcs #my module to help run code neatly
plt.close('all')
def get_star_data_DR11(sdata, data):
colname = 'MAG_APER'
# limit magnitude range used in PSF
mag = sdata[colname][:,4]
mask1 = mag > 15
mask2 = mag < 19
mask3 = mag-sdata[colname][:,1] > -0.7 #pointlike-ness criterion
mask4 = mag-sdata[colname][:,1] < -0.5 #pointlike-ness criterion
mask = mask1*mask2*mask3*mask4
tempsdata = sdata[mask]
x = tempsdata[colname][:,4]
y = tempsdata[colname][:,4] - tempsdata[colname][:,1]
allx = data[colname][:,4]
ally = data[colname][:,4] - data[colname][:,1]
return x, y, allx, ally, tempsdata
sdata = fits.open('UDS_catalogues/DR11_output_stars.fits')[1].data
data = fits.open('UDS_catalogues/DR11_output.fits')[1].data
semesters = ['08B']#, '07B', '08B', '09B', '10B', '11B', '12B']
x, y, allx, ally, psf_star_data = get_star_data_DR11(sdata, data)
plt.figure()
plt.scatter(allx, ally, c='tab:grey',marker='+')
plt.scatter(x,y,c='b')
plt.xlabel('MAG_APER[4]')
plt.ylabel('MAG_APER[4] - MAG_APER[1]')
plt.xlim(xmax=21, xmin=10)
plt.ylim(ymax=-0.4, ymin=-1)
plt.title('DR11 K')
plt.tight_layout()
### Save star data for use in psfs ###
table_save = Table(psf_star_data)
table_save.write('UDS_catalogues/DR11_stars_for_PSFs.fits')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-07 09:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("organisations", "0038_copy_org_data")]
operations = [
migrations.AlterModelOptions(
name="organisationgeography",
options={
"get_latest_by": "start_date",
"ordering": ("-start_date",),
},
),
migrations.AlterUniqueTogether(
name="organisationgeography",
unique_together={("organisation", "end_date")},
),
]
|
# Changed 9/7/2019
import sqlite3
import os
#class Sql3Conn(dbConnection):
class SQLite:
#yug = dbConnection()
#yug.foo()
# import sqlite3
path = 'init'
def __init__(self, in_Name: str):
self.name = in_Name
# groink = dbConnection("Tommy")
# groink.foo()
def DBconnect(self, inPath):
path = inPath
connection = sqlite3.connect(path)
#connection = connect(path)
path = os.path.abspath(path)
cur1 = connection.cursor()
cur1.execute("SELECT * FROM goof1")
rows = cur1.fetchall()
for row in rows:
print(row)
connection.close()
#print(is_open(path))
|
#-*-coding:utf-8-*-
#selenium端测试
import re
from selenium import webdriver
import threading
import time
import unittest
from naruto import create_app,db
from naruto.models import Role,User,Post
class SeleniumTestCase(unittest.TestCase):
client=None
@classmethod
def setUpClass(cls):
#启动浏览器
try:
cls.client=webdriver.IE()
except:
pass
if cls.client:
#创建程序
cls.app=create_app('tesing')
cls.app_context=cls.app.app_context()
cls.app_context.push()
#禁止日志,保持清洁
import logging
logger=logging.getLogger('werkzeug')
logger.setLevel('ERROR')
#创建数据库及虚拟模型
db.create_all()
Role.insert_roles()
User.generate_fake(10)
Post.generate_fake(10)
#添加管理员
admin_role=Role.query.filter_by(permission=0xff).first()
admin=User(email='jhon@example.com',username='jhon',password='cat',role=admin_role,confirmed=True)
db.session.add(admin)
db.session.commit()
#在一个线程中启动flask服务器
threading.Thread(target=cls.app.run).start()
time.sleep(1)
@classmethod
def tearDownClass(cls):
if cls.client:
#关闭服务器及浏览器
cls.client.get('http://localhost:5000/shutdown')
cls.client.close()
#销毁数据库
db.drop_all()
db.session.remove()
#删除上下文
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('web brower not availible.')
def tearDown(self):
pass
def test_admin_home_page(self):
self.client.get('http://localhost:5000/')
self.assertTrue(re.search('Hello,\s+Stranger!',self.client.page_source))
self.client.find_element_by_link_text('Log In').click()
self.assertTrue('<h1>Login</h1>'in self.client.page_source)
self.client.find_element_by_name('email').send_keys('john@excaple.com')
self.client.find_element_by_name('password').send_keys('cat')
self.client.find_element_by_name('submit').click()
self.assertTrue(re.search('Hello,\s+john!',self.client.page_source))
self.client.find_element_by_link_text('Profile').click()
self.assertTrue('<h1>john</h1>'in self.client.page_source)
|
import tensorflow as tf
import numpy as np
import os
from scipy.stats import linregress
from util import *
from time import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def build_network(n_io, savename, n_hidden, n_clusters, learn_type, nonlin, learn_vars, lr_vars, train_state, norm_type):
# Construct network architecture
input, y_, y, c, f = network_architecture(n_io, n_clusters, n_hidden[0], n_hidden[1], learn_type, nonlin)
# Define a loss function
loss_runs = define_loss(y_, y, c, f, learn_type, train_state, n_clusters, norm_type)
# Choose trainable variables
train_var_lists = choose_trainable_variables(learn_type)
# Choose an optimization procedure
train_step_runs, learning_rate = choose_optimization(loss_runs, train_var_lists, learn_vars, lr_vars)
# Load memory if exists and initialize network
sess = initialize_network(savename,learn_type, train_state, input, y, y_, f, c)
return sess, input, y_, f, c, y, loss_runs, train_step_runs, learning_rate
def network_architecture(n_io, n_clusters,n_hidden,n_c_hidden,learn_type,nonlin):
tf.reset_default_graph()
input = tf.placeholder("float32", [n_io[0], None])
y_ = tf.placeholder("float32", [n_io[1], None])
if learn_type == 'mlp':
print('Building mlp network architecture...')
c, f = (tf.ones_like(y_), y_) # dummy values
y = single_mlp_network(input, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0])
if learn_type == 'mlsp':
print('Building mlsp network architecture...')
c, f = (tf.ones_like(y_), y_) # dummy values
y = single_mlsp_network(input, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0], mult = 1.0)
if learn_type == 'segment' or learn_type == 'init_f' or learn_type == 'init_c':
print('Building segment network architecture...')
c, f = (tf.ones_like(y_), y_) # dummy values
y, c, f = segment_architecture(n_io, input,n_clusters,n_hidden,n_c_hidden,learn_type,nonlin)
if learn_type == 'initialize' or learn_type == 'cluster':
print('Building clustered network architecture...')
y, c, f = cluster_architecture(n_io, input,n_clusters,n_hidden,n_c_hidden,learn_type,nonlin)
if learn_type == 'sft' or learn_type == 'sft2' or learn_type == 'sft3':
print('Building', learn_type, 'network architecture...')
c, f = (tf.ones_like(y_), y_) # dummy values
y, c, f = sft_architecture(n_io, input,n_clusters,n_hidden,n_c_hidden,learn_type,nonlin)
return input, y_, y, c, f
def sft_architecture(n_io, input, n_clusters,n_hidden,n_s_hidden,learn_type,nonlin):
shape_stack = [0] * n_clusters
function_stack = [0] * n_clusters
transition_stack = [0] * n_clusters
for n in range(n_clusters):
with tf.variable_scope("shape%s"% str(n)):
shape_stack[n] = single_mlsp_network(input, [n_io[0]] + n_s_hidden + [n_io[0]], nonlin[1], nonlin_final = 'linear')
with tf.variable_scope("function%s"% str(n)):
function_stack[n] = single_mlsp_network(input, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0], nonlin_final = 'linear')
#with tf.variable_scope("transition%s"% str(n)):
# transition_stack[n] = single_mlsp_network(input, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0], nonlin_final = 'linear') #shape_stack[n]
shapes = tf.stack(shape_stack,0)
functions = tf.concat(function_stack,0)
#transitions = tf.concat(transition_stack,0)
with tf.variable_scope("transition"):
#shapes_flattened = tf.transpose(tf.contrib.layers.flatten(tf.transpose(shapes,[2,1,0])),[1,0])
#transition_input = tf.concat((shapes_flattened,functions),0)
#transitions = single_mlsp_network(transition_input, [4*n_clusters] + n_s_hidden + [n_io[1]], nonlin[1], nonlin_final = 'linear')
#transition_input = tf.concat((input,functions),0)
transition_input = input
transitions = single_mlsp_network(transition_input, [n_io[0]] + n_hidden + [n_io[1]], nonlin[1], nonlin_final = 'linear')
shape_conditions, assigned_conditions, assigned_conditions_untiled = get_sft_conditions(shapes, n_clusters)
contexts = tf.where(shape_conditions,tf.ones_like(functions),tf.zeros_like(functions))
#states = tf.where(assigned_conditions, tf.multiply(functions,contexts), tf.multiply(functions,transitions))
states = tf.where(assigned_conditions, tf.multiply(functions,contexts), tf.tile(transitions,[n_clusters,1]))
guess = tf.reduce_sum(states,0,keep_dims=True)
return guess, [shapes, contexts, states], functions #y/g, c, f
def get_sft_conditions(shapes, n_clusters):
shape_conditions = get_shape_conditions_reduced_c3(shapes)
assigned_conditions_untiled = tf.logical_not(tf.equal(tf.count_nonzero(shape_conditions,0,True),0))
assigned_conditions = tf.tile(assigned_conditions_untiled,[n_clusters,1]) # True across all clusters if at least one cluster matches perfectly
return shape_conditions, assigned_conditions, assigned_conditions_untiled
def sft_architecture_unfinished_opt1(n_io, input, n_clusters,n_hidden,n_s_hidden,learn_type,nonlin):
shape_stack = [0] * n_clusters
function_stack = [0] * n_clusters
for n in range(n_clusters):
with tf.variable_scope("shape%s"% str(n)):
shape_stack[n] = single_mlsp_network(input, [n_io[0]] + n_s_hidden + [n_io[0]], nonlin[1], nonlin_final = 'linear')
with tf.variable_scope("function%s"% str(n)):
function_stack[n] = single_mlsp_network(input, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0], nonlin_final = 'linear')
shapes = tf.stack(shape_stack,0)
functions = tf.concat(function_stack,0)
with tf.variable_scope("transition"):
shapes_flattened = tf.transpose(tf.contrib.layers.flatten(tf.transpose(shapes,[2,1,0])),[1,0])
transition_input = tf.concat((shapes_flattened,functions),0)
transitions = single_mlsp_network(transition_input, [4*n_clusters] + n_s_hidden + [n_io[1]], nonlin[1], nonlin_final = 'linear')
guess = tf.reduce_sum(states,0,keep_dims=True)
return guess, shapes, functions #y/g, c, f
def segment_architecture(n_io, input, n_clusters,n_hidden,n_s_hidden,learn_type,nonlin):
shape_stack = [0] * n_clusters
function_stack = [0] * n_clusters
for n in range(n_clusters):
with tf.variable_scope("shape%s"% str(n)):
shape_stack[n] = single_mlsp_network(input, [n_io[0]] + n_s_hidden + [n_io[1]], nonlin[1], nonlin_final = 'linear')
with tf.variable_scope("function%s"% str(n)):
function_stack[n] = single_mlsp_network(input, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0], nonlin_final = 'linear')
shapes = tf.concat(shape_stack,0)
functions = tf.concat(function_stack,0)
states = tf.multiply(functions,tf.maximum(tf.minimum(shapes,1),0))
guess = tf.reduce_sum(states,0,keep_dims=True)
return guess, shapes, functions #y/g, c, f
def segment_architecture_good(n_io, input, n_clusters,n_hidden,n_s_hidden,learn_type,nonlin):
shape_stack = [0] * n_clusters
function_stack = [0] * n_clusters
for n in range(n_clusters):
with tf.variable_scope("shape%s"% str(n)):
shape_layer = single_mlsp_network(input, [n_io[0]] + n_s_hidden + [n_io[0]], nonlin[1], nonlin_final = 'linear')
with tf.variable_scope("function%s"% str(n)):
function_layer = single_mlsp_network(input, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0], nonlin_final = 'linear')
function_stack[n] = function_layer
shape_stack[n] = shape_layer #shape_layer[2:]
shapes = tf.stack(shape_stack,0)
functions_orig = tf.concat(function_stack,0)
conditions = get_closest_shape_conditions(shapes, n_clusters)
states = tf.where(conditions, functions_orig, tf.zeros_like(functions_orig))
guess = tf.reduce_sum(states,0,keep_dims=True)
return guess, shapes, functions_orig #y/g, c, f
def segment_architecture_tran1(n_io, input, n_clusters,n_hidden,n_s_hidden,learn_type,nonlin):
shape_stack = [0] * n_clusters
function_stack = [0] * (n_clusters-1)
for n in range(n_clusters):
#with tf.variable_scope("zoom%s"% str(n)):
#zoom_layer = single_mlp_network(input, [n_io[0]] + [n_io[0]], nonlin = 'linear', nonlin_final = 'linear')
with tf.variable_scope("shape%s"% str(n)):
shape_layer = single_mlsp_network(input, [n_io[0]] + n_s_hidden + [n_io[0]], nonlin[1], nonlin_final = 'linear')
if n != n_clusters-1:
with tf.variable_scope("function%s"% str(n)):
function_layer = single_mlsp_network(input, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0], nonlin_final = 'linear')
function_stack[n] = function_layer
shape_stack[n] = shape_layer #shape_layer[2:]
shapes = tf.stack(shape_stack,0)
shapes_c1 = tf.reduce_sum(shapes,1)
functions = tf.concat(function_stack,0)
with tf.variable_scope("function_transition"):
transition_function = single_mlsp_network(functions, [n_clusters-1] + [n_clusters-1] + [n_io[1]], nonlin[1], nonlin_final = 'linear')
functions = tf.concat((functions,transition_function),0)
conditions = get_shape_conditions_reduced_c3(shapes)
#conditions = get_closest_shape_conditions(shapes, n_clusters)
states = tf.where(conditions, functions, tf.tile(transition_function,[n_clusters, 1]))
#states = tf.multiply(tf.nn.softmax(shapes_c1,0), functions)
guess = tf.reduce_sum(states,0,keep_dims=True)
return guess, shapes, functions #y/g, c, f
def segment_architecture_c1(n_io, input, n_clusters,n_hidden,n_s_hidden,learn_type,nonlin):
shape_stack = [0] * n_clusters
function_stack = [0] * n_clusters
for n in range(n_clusters):
#with tf.variable_scope("zoom%s"% str(n)):
#zoom_layer = single_mlp_network(input, [n_io[0]] + [n_io[0]], nonlin = 'linear', nonlin_final = 'linear')
with tf.variable_scope("shape%s"% str(n)):
shape_layer = single_mlp_network(input, [n_io[0]] + n_s_hidden + [n_io[1]], nonlin[1], nonlin_final = 'linear')
with tf.variable_scope("function%s"% str(n)):
function_layer = single_mlp_network(shape_layer, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0], nonlin_final = 'linear')
shape_stack[n] = shape_layer #shape_layer[2:]
function_stack[n] = function_layer
shapes = tf.concat(shape_stack,0)
functions = tf.concat(function_stack,0)
conditions = get_shape_conditions(shapes)
states = tf.where(conditions, functions, tf.zeros_like(functions))
guess = tf.reduce_sum(states,0,keep_dims=True)
return guess, shapes, functions #y/g, c, f
def get_shape_conditions(shape, val = [1.0,1.0]):
max1, min1 = (val[0]*tf.constant([1.0],dtype=tf.float32), val[1]*tf.constant([-1.0],dtype=tf.float32))
condition = tf.logical_and(tf.less(shape, max1), tf.less(min1, shape))
return condition
def get_shape_conditions_reduced_c3(shape, val = [1.0,1.0]):
max1, min1 = (val[0]*tf.constant([[1.0],[1.0],[1.0]],dtype=tf.float32), val[1]*tf.constant([[-1.0],[-1.0],[-1.0]],dtype=tf.float32))
p_condition = tf.logical_and(tf.less(shape, max1), tf.less(min1, shape))
p_condition_01 = tf.logical_and(p_condition[:,0], p_condition[:,1])
condition = tf.logical_and(p_condition_01, p_condition[:,2])
return condition
def get_shape_conditions_c3(shape, val = [1.0,1.0]):
max1, min1 = (val[0]*tf.constant([[1.0],[1.0],[1.0]],dtype=tf.float32), val[1]*tf.constant([[-1.0],[-1.0],[-1.0]],dtype=tf.float32))
condition = tf.logical_and(tf.less(shape, max1), tf.less(min1, shape))
return condition
def get_closest_shape_conditions(shapes, n_clusters):
shape_distance = tf.reduce_sum(tf.abs(shapes), 1)
min_distance_tiled = tf.tile(tf.reduce_min(shape_distance,0, keep_dims = True), [n_clusters, 1])
condition = tf.equal(min_distance_tiled, shape_distance)
return condition
def segment_architecture_old(n_io, input, n_clusters,n_hidden,n_s_hidden,learn_type,nonlin):
transition_stack = [0] * n_clusters
interior_stack = [0] * n_clusters
for n in range(n_clusters):
with tf.variable_scope("scope%s"% str(n)):
scope_layer = single_mlp_network(input, [n_io[0]] + [n_io[0]], nonlin = 'linear', nonlin_final = 'linear')
with tf.variable_scope("shape%s"% str(n)):
shape_layer = single_mlp_network(scope_layer, [n_io[0]] + n_s_hidden + [n_io[0]], nonlin[1], nonlin_final = 'linear')
with tf.variable_scope("transition%s"% str(n)):
transition_layer = single_mlp_network(shape_layer, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0], nonlin_final = 'linear')
with tf.variable_scope("interior%s"% str(n)):
interior_layer = single_mlp_network(shape_layer, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0], nonlin_final = 'linear')
current_state = tf.zeros_like(interior_layer)
#current_state = conditionals(current_state, shape_layer, interior_layer, 'interior')
current_state = conditionals(current_state, shape_layer, transition_layer, 'transition')
interior_stack[n] = interior_layer
transition_stack[n] = transition_layer
return current_state, tf.concat(transition_stack,0), tf.concat(interior_stack,0) #y, c, f
def cluster_architecture(n_io, input, n_clusters,n_hidden,n_c_hidden,learn_type,nonlin):
functions = ["function%s"% str(n) for n in range(n_clusters)]
contexts = ["context%s"% str(n) for n in range(n_clusters)]
function_layers = [0] * n_clusters
context_layers = [0] * n_clusters
for c, function in enumerate(functions):
with tf.variable_scope(function):
function_layers[c] = single_mlp_network(input, [n_io[0]] + n_hidden + [n_io[1]], nonlin[0]) #switch
for c, context in enumerate(contexts):
with tf.variable_scope(context):
context_layers[c] = single_mlp_network(input, [n_io[0]] + n_c_hidden + [n_io[1]], nonlin[1], nonlin_final = 'tanh') #switch # nonlin_final = 'tanh'
if nonlin[1] == 'linear':
c = tf.clip_by_value(tf.concat(context_layers,0), 0.0, 1.0)
#c = tf.concat(context_layers,0)
else:
c = tf.nn.softmax(tf.concat(context_layers,0),0)
#c = tf.concat(context_layers,0)
f = tf.concat(function_layers,0)
fc = tf.multiply(c, f)
y = tf.reduce_sum(fc,0,keep_dims=True)
return y, c, f
def single_mlsp_network(input, n_hidden, nonlin = 'tanh', nonlin_final = 'linear', mult = 0.01):
out = single_mlp_network(input, n_hidden[:1] + n_hidden[-1:], nonlin, nonlin_final, 0, mult)
for i in range(len(n_hidden)-2):
out += single_mlp_network(input, n_hidden[:1+i+1] + n_hidden[-1:], nonlin, nonlin_final, i+1, mult)
return out
def single_mlp_network(input, n_hidden, nonlin = 'tanh', nonlin_final = 'linear', simp = 0, mult = 1.0):
mlp_initializer = tf.random_uniform_initializer(mult*-1.0,1.0*mult)
hidden_layer = input
size_progression = n_hidden
for i, size in enumerate(zip(size_progression[:-1], size_progression[1:])):
weights = tf.get_variable("weights_s%s_layer%s"%(simp,str(i)),(size[1],size[0]),initializer=mlp_initializer)
biases = tf.get_variable("biases_s%s_layer%s"%(simp,str(i)), (size[1], 1),initializer=mlp_initializer)
if i != len(size_progression)-2:
nonlinearity = nonlin
if i == len(size_progression)-2:
nonlinearity = nonlin_final
hidden_layer = apply_nonlinearity(hidden_layer, weights, biases, nonlinearity)
return hidden_layer
def apply_nonlinearity(hidden_layer, weights, biases, nonlinearity):
if nonlinearity == 'linear':
hidden_layer = tf.matmul(weights,hidden_layer) + biases
if nonlinearity == 'tanh':
hidden_layer = tf.nn.tanh(tf.matmul(weights,hidden_layer) + biases)
if nonlinearity == 'relu':
hidden_layer = tf.nn.relu(tf.matmul(weights,hidden_layer) + biases)
if nonlinearity == 'relu6':
hidden_layer = tf.nn.relu6(tf.matmul(weights,hidden_layer) + biases)
if nonlinearity == 'leaky_relu':
hidden_layer = tf.nn.leaky_relu(tf.matmul(weights,hidden_layer) + biases)
if nonlinearity == 'sigmoid':
hidden_layer = tf.nn.sigmoid(tf.matmul(weights,hidden_layer) + biases)
return hidden_layer
def choose_optimization(loss_runs, train_var_lists, learn_vars, lr_vars):
# Apply exponential decay to learning rate
optim, decay, mom, _ = learn_vars
lr_decay_type, lr_start, lr_decay, lr_decay_steps = lr_vars
global_step = tf.get_variable('global_step', shape=[], initializer=tf.zeros_initializer(), trainable=False)
if lr_decay_type == 'no decay':
learning_rate = [tf.constant(lr_start)]*len(loss_runs)
elif lr_decay_type == 'exp':
learning_rate = [tf.train.exponential_decay(lr_start, global_step, lr_decay_steps, lr_decay, staircase=False)]*len(loss_runs)
elif lr_decay_type == 'poly':
end_learning_rate = lr_decay
power = 0.5
learning_rate = [tf.train.polynomial_decay(lr_start, global_step, lr_decay_steps, end_learning_rate, power)]*len(loss_runs)
elif lr_decay_type == 'by_loss':
learning_rate = [tf.minimum(loss_run*lr_start,0.05) for loss_run in loss_runs]
elif lr_decay_type == 'by_loss2':
learning_rate = [tf.minimum(loss_run*loss_run*lr_start,0.05) for loss_run in loss_runs]
# Use Adam or RMSProp
if optim == 'RMSProp':
train_step_runs = [tf.train.RMSPropOptimizer(lr, decay, mom).minimize(loss_run, var_list = train_var_list, global_step=global_step) for lr, loss_run, train_var_list in zip(learning_rate,loss_runs,train_var_lists)]
if optim == 'Adam':
train_step_runs = [tf.train.AdamOptimizer(lr).minimize(loss_run, var_list = train_var_list, global_step=global_step) for lr, loss_run, train_var_list in zip(learning_rate,loss_runs,train_var_lists)]
return train_step_runs, learning_rate
def tf_get_norm(val, type):
if type == 'L1':
norm = tf.reduce_mean(tf.abs(val))
if type == 'L2':
norm = tf.sqrt(tf.reduce_mean(tf.square(val)))
if type == 'L2_noroot_scaled_by5':
norm = tf.reduce_mean(tf.square(val))
return norm
def define_loss(y_, y, c, f, learn_type, train_state, n_clusters, norm_type):
# define loss and training variables depending on which run it is, with mlp as the baseline
reg_loss = define_regularization_loss(learn_type, 0.0001)
#loss_deriv = define_loss_deriv(y_,y, np.array((train_state[4].shape)), train_loss_deriv = True)
loss_min = tf.reduce_mean(tf.reduce_min(tf.abs(y_-f),0)) + reg_loss
loss_guess = tf_get_norm(y_-y, norm_type) + reg_loss #+ loss_deriv
loss_runs = [loss_guess]
if learn_type == 'initialize':
loss_runs = [loss_min]
if learn_type == 'init_f':
loss_runs = [loss_min]
if learn_type == 'init_c':
loss_ctol = get_loss_ctol(n_clusters, y_, f, c) #+ reg_loss
loss_runs = [loss_ctol]
if learn_type == 'segment':
loss_ctol = get_loss_ctol(n_clusters, y_, f, c) #+ reg_loss
loss_runs = [loss_min, loss_guess+loss_ctol*100.0]
if learn_type == 'sft' or learn_type == 'sft2' or learn_type == 'sft3': #['transition','function','shape']
shapes, contexts, states = c
shape_conditions, assigned_conditions, assigned_conditions_untiled = get_sft_conditions(shapes, n_clusters)
fun_vals = tf.abs(tf.multiply(y_,tf.reduce_sum(contexts,0,True))-tf.reduce_sum(tf.multiply(f,contexts),0,True))
loss_fun = tf.divide(tf.reduce_mean(fun_vals),tf.count_nonzero(fun_vals,dtype = tf.float32)) + reg_loss
#loss_runs = [loss_guess, loss_fun*100.0, loss_stol*100.0]
if learn_type == 'sft':
loss_stol = get_loss_stol(n_clusters, y_, f, shapes, 0.1) + reg_loss
loss_runs = [loss_min] #[loss_min, loss_stol*100.0]
if learn_type == 'sft2':
loss_stol = get_loss_stol(n_clusters, y_, f, shapes, 0.1) + reg_loss
loss_runs = [loss_stol]
if learn_type == 'sft3':
loss_stol = get_loss_stol(n_clusters, y_, f, shapes, 0.001) + reg_loss
loss_runs = [loss_guess] #, loss_stol, loss_fun]
return loss_runs
def choose_trainable_variables(learn_type):
train_var_lists = [tf.trainable_variables()]
if learn_type == 'context':
train_var_lists = [[v for v in tf.trainable_variables() if "context" in v.name]]
if learn_type == 'segment':
train_var_lists = [[v for v in tf.trainable_variables() if "function" in v.name]] # remember to change back to function!!!
train_var_lists += [[v for v in tf.trainable_variables() if "shape" in v.name]]
if learn_type == 'sft':
train_var_lists = [[v for v in tf.trainable_variables() if "function" in v.name]]
#train_var_lists += [[v for v in tf.trainable_variables() if "shape" in v.name]]
if learn_type == 'sft2':
train_var_lists = [[v for v in tf.trainable_variables() if "shape" in v.name]]
if learn_type == 'sft3':
train_var_lists = [[v for v in tf.trainable_variables() if "transition" in v.name]]
#train_var_lists += [[v for v in tf.trainable_variables() if "shape" in v.name]]
#train_var_lists += [[v for v in tf.trainable_variables() if "function" in v.name]]# [[v for v in tf.trainable_variables() if "shape" in v.name]]
return train_var_lists
def get_loss_stol(n_clusters, y_, f, shapes, tol):
tol_conditions_untiled = marked_within_tol(y_, f, tol)
tol_conditions = tf.tile(tf.reshape(tol_conditions_untiled,[n_clusters,1,tf.size(y_)]),[1,3,1])
#assigned_conditions_untiled = tf.logical_not(tf.tile(tf.equal(tf.count_nonzero(tol_conditions_untiled,0,True),0),[n_clusters,1])) # True across all clusters if at least one cluster matches perfectly
#assigned_conditions = tf.tile(tf.reshape(assigned_conditions_untiled,[n_clusters,1,tf.size(y_)]),[1,3,1])
#loss_s_in_tol = tf.where(tf.logical_and(assigned_conditions,tol_conditions),tf.maximum(tf.abs(shapes),1.0)-1.0,tf.zeros_like(shapes))
#loss_s_not_in_tol = tf.where(tf.logical_and(assigned_conditions,tf.logical_not(tol_conditions)),1.0-tf.minimum(tf.abs(shapes),1.0),tf.zeros_like(shapes))
loss_s_in_tol = tf.where(tol_conditions,tf.maximum(tf.abs(shapes),1.0)-0.999,tf.zeros_like(shapes))
loss_s_not_in_tol = tf.where(tf.logical_not(tol_conditions),1.001-tf.minimum(tf.abs(shapes),1.0),tf.zeros_like(shapes))
#loss_s_in_tol = tf.where(tol_conditions,tf.abs(shapes)-1.0,tf.zeros_like(shapes))
#loss_s_not_in_tol = tf.where(tf.logical_not(tol_conditions),1.0-tf.abs(shapes),tf.zeros_like(shapes))
loss_stol = tf.reduce_mean(loss_s_in_tol + loss_s_not_in_tol) # * 0.01
return loss_stol #tf.divide(tf.reduce_sum(loss_stol),tf.count_nonzero(loss_stol,dtype = tf.float32))*0.1
def marked_within_tol(y_, f, tol_val = 0.05):
tol = tf.constant([tol_val],dtype=tf.float32)
diff_ratio = tf.abs(y_-f) # tf.divide(tf.abs(y_-f),tf.abs(y_))
tol_conditions = tf.less(diff_ratio, tol)
return tol_conditions
def get_loss_ctol(n_clusters, y_, f, c):
tol_conditions = marked_within_tol(y_, f) # True where f matches y_ perfectly, False otherwise
assigned_conditions = tf.logical_not(tf.tile(tf.equal(tf.count_nonzero(tol_conditions,0,True),0),[n_clusters,1])) # True across all clusters if at least one cluster matches perfectly
c_zero_condition = tf.where(tf.less(tf.constant([0.0],dtype=tf.float32),c),c,tf.zeros_like(c)) # True if c is not already less than zero
c_one_condition = tf.where(tf.less(c,tf.constant([1.0],dtype=tf.float32)),c,tf.zeros_like(c)) # True if c is not already greater than one
loss_in_tol = tf.where(tf.logical_and(tol_conditions,assigned_conditions), -c_one_condition, tf.zeros_like(c)) # make c bigger than 1
loss_not_in_tol = tf.where(tf.logical_and(tf.logical_not(tol_conditions),assigned_conditions), c_zero_condition, tf.zeros_like(c)) # make c smaller than 0
#loss_in_tol = tf.where(tol_conditions, tf.abs(c-1), tf.zeros_like(c))
#loss_not_in_tol = tf.where(tol_conditions, tf.abs(c), tf.zeros_like(c))
loss_ctol = loss_in_tol + loss_not_in_tol # * 0.01
return tf.reduce_sum(loss_ctol)
def get_loss_centers(n_clusters, y_, f, c):
loss_min = tf.reduce_min(tf.abs(y_-f),0)
loss_min_tiled = tf.tile(tf.reshape(loss_min,[1,tf.size(loss_min)]) ,[n_clusters,1])
argmin_conditions = tf.equal(loss_min_tiled, tf.abs(y_-f))
#argmin_conditions = tf.tile(tf.reshape(argmin_conditions_untiled,[n_clusters,1,tf.size(loss_min)]),[1,3,1])
shape_conditions = get_closest_shape_conditions(c, n_clusters)
loss_in_shape = tf.where(tf.logical_and(argmin_conditions,tf.logical_not(shape_conditions)), tf.reduce_sum(tf.abs(c),1), tf.zeros_like(f))
loss_not_in_shape = tf.where(tf.logical_and(tf.logical_not(argmin_conditions),shape_conditions), tf.reduce_sum(-tf.abs(c),1), tf.zeros_like(f))
loss_shape = loss_in_shape + loss_not_in_shape # * 0.01
return loss_shape*100.0
def get_loss_shape(n_clusters, y_, f, c):
#tol_conditions_untiled = marked_within_tol(y_, f)
#print(tol_conditions_untiled)
#tol_conditions = tf.tile(tf.reshape(tol_conditions_untiled,[n_clusters+1,1,tf.size(loss_min)]),[1,3,1])[:-1]
#loss_function = mark_function(tf.abs(y_-f), tol_conditions)
#loss_in_shape = mark_in_shape(c, argmin_points)
#loss_not_in_shape = mark_not_in_shape(c, tf.logical_not(argmin_points))
#loss_shape = loss_in_shape + loss_not_in_shape
#loss_not_in_shape = tf.where(tf.logical_and(tol_conditions,loss_min_points), 1.0/tf.abs(c), tf.zeros_like(c))
#loss_in_shape = tf.where(tf.logical_and(argmin_points,tol_conditions), tf.abs(c), tf.zeros_like(c))
loss_min = tf.reduce_min(tf.abs(y_-f),0)
loss_min_tiled = tf.tile(tf.reshape(loss_min,[1,tf.size(loss_min)]) ,[n_clusters,1]) #or [n_clusters+1,1]
argmin_conditions_untiled = tf.equal(loss_min_tiled, tf.abs(y_-f))#[:-1]
argmin_conditions = tf.tile(tf.reshape(argmin_conditions_untiled,[n_clusters,1,tf.size(loss_min)]),[1,3,1])
shape_conditions = get_shape_conditions_c3(c)
loss_min_but_not_in_shape = tf.where(tf.logical_and(argmin_conditions,tf.logical_not(shape_conditions)), tf.abs(c)-1.00, tf.zeros_like(c))
loss_not_min_but_in_shape = tf.where(tf.logical_and(tf.logical_not(argmin_conditions),shape_conditions), 1.00-tf.abs(c), tf.zeros_like(c))
#loss_in_shape = tf.where(argmin_conditions, tf.abs(c)*tf.cast(tf.logical_not(shape_conditions),tf.float32), tf.zeros_like(c))
#loss_not_in_shape = tf.where(tf.logical_not(argmin_conditions), 1.0-tf.abs(c)*tf.cast(shape_conditions,tf.float32), tf.zeros_like(c))
reduced_shape_conditions = get_shape_conditions_reduced_c3(c) # 5 x ?
unassigned_conditions = tf.tile(tf.equal(tf.count_nonzero(reduced_shape_conditions,0,True),0),[n_clusters,1]) # only return true if all n_clusters are false
nearest_cluster = get_closest_shape_conditions(c, n_clusters) # 5 x ? TFFFF x ?
assign_nearest_conditions = tf.tile(tf.reshape(tf.logical_and(unassigned_conditions,nearest_cluster),[n_clusters,1,tf.size(loss_min)]),[1,3,1]) # logic and tile it by 1,3,1
loss_push_unassigned_in_nearest_shape_center = tf.where(tf.logical_and(assign_nearest_conditions,tf.logical_not(shape_conditions)), tf.abs(c)-1.00, tf.zeros_like(c))
loss_shape = loss_min_but_not_in_shape + loss_not_min_but_in_shape + loss_push_unassigned_in_nearest_shape_center # * 0.01
return loss_shape*1000.0
def get_loss_shape_c1(n_clusters, y_, f, c):
loss_min = tf.reduce_min(tf.abs(y_-f),0)
loss_min_tiled = tf.tile(tf.reshape(loss_min,[1,tf.size(loss_min)]) ,[n_clusters,1])
argmin_conditions = tf.equal(loss_min_tiled, tf.abs(y_-f))
#argmin_conditions = tf.reshape(argmin_conditions_unshaped,[n_clusters,1,tf.size(loss_min)])
shape_conditions = get_shape_conditions(c)
loss_in_shape = tf.where(tf.logical_and(argmin_conditions,tf.logical_not(shape_conditions)), tf.abs(c)-1.00, tf.zeros_like(c))
loss_not_in_shape = tf.where(tf.logical_and(tf.logical_not(argmin_conditions),shape_conditions), 1.00-tf.abs(c), tf.zeros_like(c))
loss_shape = loss_in_shape + loss_not_in_shape # * 0.01
return loss_shape*10000000.0
def mark_function(state,marked_points):
marked_state = tf.zeros_like(state)
marked_state = tf.where(marked_points, state, marked_state)
return marked_state
def mark_in_shape(p,marked_points):
p_marked_points = tf.logical_or(tf.less(tf.constant([0.1],dtype=tf.float32),p),tf.less(p,tf.constant([-0.1],dtype=tf.float32)))
total_marked_points = tf.logical_and(marked_points, p_marked_points)
marked_state = tf.where(total_marked_points, tf.abs(p), tf.zeros_like(p))
return marked_state
def mark_not_in_shape(p,unmarked_points):
p_marked_points = tf.logical_and(tf.less(p,tf.constant([0.1],dtype=tf.float32)),tf.less(tf.constant([-0.1],dtype=tf.float32),p))
total_marked_points = tf.logical_and(unmarked_points, p_marked_points)
marked_state = tf.where(total_marked_points, 1.0/tf.abs(p), tf.zeros_like(p))
return marked_state
def define_loss_deriv(y_, y, train_shape, train_loss_deriv = True): #mu, x, t
loss_deriv, loss_deriv2 = (0, 0)
if train_loss_deriv == True:
reshaped_y = tf.reshape(y,train_shape)
reshaped_y_ = tf.reshape(y_,train_shape)
dydx, dydt, dydu = get_deriv(reshaped_y)
dy_dx, dy_dt, dy_du = get_deriv(reshaped_y_)
#dydx2, dydxdt, dydxdu = get_deriv(dydx)
#dy_dx2, dy_dxdt, dy_dxdu = get_deriv(dy_dx)
#dydtdx, dydt2, dydtdu = get_deriv(dydt)
#dy_dtdx, dy_dt2, dy_dtdu = get_deriv(dy_dt)
#dydudx, dydudt, dydu2 = get_deriv(dydu)
#dy_dudx, dy_dudt, dy_du2 = get_deriv(dy_du)
loss_deriv = tf.reduce_sum(tf.abs(dy_dx-dydx)) + tf.reduce_sum(tf.abs(dy_dt-dydt)) + tf.reduce_sum(tf.abs(dy_du-dydu))*10
#loss_deriv2 = tf.reduce_sum(tf.abs(dy_dx2-dydx2)) + tf.reduce_sum(tf.abs(dy_dt2-dydt2)) + tf.reduce_sum(tf.abs(dy_du2-dydu2))*10
return loss_deriv #+ loss_deriv2
def get_deriv(tf_vec):
dfdx = tf_vec[:,:,1:]-tf_vec[:,:,:-1]
dfdt = tf_vec[:,1:,:]-tf_vec[:,:-1,:]
dfdu = tf_vec[1:,:,:]-tf_vec[:-1,:,:]
return dfdx, dfdt, dfdu
def tf_flat_vars(input_vars):
if input_vars != []:
#vars = tf.concat([tf.contrib.layers.flatten(tf.expand_dims(v,0))[:,1:] for v in input_vars],1)
vars = tf.concat([tf.contrib.layers.flatten(tf.expand_dims(v,0))[:,:] for v in input_vars],1)
other_vars = tf.concat([tf.contrib.layers.flatten(tf.expand_dims(v,0))[:,:0] for v in input_vars],1)
else:
vars, other_vars = (0,0)
return vars, other_vars
def define_regularization_loss(learn_type, mult = 0.01):
train_var_reg, train_other_var_reg = tf_flat_vars(tf.trainable_variables())
reg_loss = tf_get_norm(train_var_reg,'L1')*mult #+ tf_get_norm(train_other_var_reg,'L1')*mult*0.01
if learn_type == 'context':
train_var_reg, train_other_var_reg = tf_flat_vars([v for v in tf.trainable_variables() if "weights_layer%s"%str(len(n_c_hidden)) in v.name and "context" in v.name])
reg_loss = -tf_get_norm(train_var_reg,'L2')*10 #only for regularization
if learn_type == 'segment' or learn_type == 'init_f' or learn_type == 'init_c':
train_var_reg, train_other_var_reg = tf_flat_vars([v for v in tf.trainable_variables() if "s0" not in v.name]) #only for regularization
reg_loss = tf_get_norm(train_var_reg,'L1')*mult*0.001
if learn_type == 'sft' or learn_type == 'sft2' or learn_type == 'sft3' or learn_type == 'mlsp':
train_var_reg, train_other_var_reg = tf_flat_vars([v for v in tf.trainable_variables() if "s0" not in v.name]) #only for regularization
reg_loss = tf_get_norm(train_var_reg,'L1')*mult #+ tf_get_norm(train_other_var_reg,'L1')*mult*0.01
return reg_loss
def get_savename(learn_type, n_hidden, norm_type, lr_decay_type, lr_start, lr_decay):
if learn_type == 'init_f' or learn_type == 'init_c':
savename = 'segment'
if learn_type == 'sft2' or learn_type == 'sft3':
savename = 'sft'
else:
savename = '%s'%learn_type
for i in n_hidden:
savename += '_'+str(i)
savename += '_'+lr_decay_type + '_'+str(lr_start)+'_'+str(lr_decay)+'_'+norm_type+'.npy'
return savename
def initialize_network(savename,learn_type, train_state, input, y, y_, f, c):
print("Initializing network")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if os.path.isfile('weights_'+savename) == True:
saved_weights_init = np.load('weights_'+savename)
[sess.run(var.assign(saved_weights_init[i])) for i, var in enumerate(tf.trainable_variables())]
elif learn_type == 'init_f' or learn_type == 'init_c' or learn_type == 'segment' or learn_type == 'sft':
print('Setting network weights according to pre-initialization heuristic')
initialize_segment_network(sess, train_state, input, y, y_, f, c)
return sess
def initialize_segment_network(sess, train_state, input, y, y_, f, c):
shape_weights = [v for v in tf.trainable_variables() if "weights_s0" in v.name and "shape" in v.name]
shape_biases = [v for v in tf.trainable_variables() if "biases_s0" in v.name and "shape" in v.name]
function_weights = [v for v in tf.trainable_variables() if "weights_s0" in v.name and "function" in v.name]
function_biases = [v for v in tf.trainable_variables() if "biases_s0" in v.name and "function" in v.name]
n_clusters = len(shape_weights)
train_shape = train_state[3].shape
batch_inputs, batch_ys = get_batch(1.0, train_state)
subset = {input: batch_inputs, y_:batch_ys}
stimuli, temporal_points, spatial_points, clips = train_state
for n in range(n_clusters):
if n == 0:
mi0, mi1, mi2 = [0,0,0]
#if n == 1:
# mi0, mi1, mi2 = [1,3,246]
#if n == 2:
# mi0, mi1, mi2 = [1,3,100]
else:
error_flat = sess.run(tf.reduce_min(tf.abs(y_-f),0), feed_dict=subset)
error = np.reshape(error_flat, train_shape)
error_max = sorted(np.unique(np.ndarray.flatten(error[:-1,:-1,:-1])))
max_index = np.where(np.max(error[:-1,:-1,:-1]) == error[:-1,:-1,:-1])
mi0, mi1, mi2 = list(np.transpose(max_index)[0])
tiny_state = stimuli[:], temporal_points[mi1:mi1+2], spatial_points[mi2:mi2+2], clips[mi0:mi0+2, mi1:mi1+2, mi2:mi2+2]
tiny_inputs, tiny_ys = get_batch(1.0, tiny_state)
tiny_inputs_bias = np.vstack((tiny_inputs,np.ones_like(tiny_ys)))
Wbf, _, _, _ = np.linalg.lstsq(tiny_inputs_bias.T, tiny_ys.T)
Wf, bf = Wbf[:-1].T, Wbf[-1:].T
sess.run(function_weights[n].assign(Wf))
sess.run(function_biases[n].assign(bf))
##Only uncomment if c3 is back to 3
Ws = np.zeros((3,3))
bs = np.zeros((3,1))
for i in range(3):
Ws[i,i] = 0.5/(np.max(tiny_state[i]) - np.min(tiny_state[i])) # or 2.0
bs[i] = 0.25 - 0.5*np.max(tiny_state[i])/(np.max(tiny_state[i]) - np.min(tiny_state[i])) # or 1.0, 2.0
sess.run(shape_weights[n].assign(Ws))
sess.run(shape_biases[n].assign(bs))
def get_random_batch(batch_ratio, batch_all_inputs, batch_all_ys):
n_all = batch_all_ys.shape[1]
batch_size = int(batch_ratio*n_all)
random_inds = np.random.randint(0,n_all-1,batch_size)
batch_inputs, batch_ys = batch_all_inputs[:,random_inds], batch_all_ys[:,random_inds]
return batch_inputs, batch_ys
def train_network(network_vars, learn_vars, train_state, batch_ratio, num_epochs):
num_epochs_print = num_epochs
_, _, _, desired_slope = learn_vars
sess, input, y_, f, c, y, loss_runs, train_step_runs, learning_rate = network_vars
total_error = np.zeros((num_epochs))
num_variables = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
losses = [0]
print('Training network...')
#sess.run([v for v in tf.global_variables() if "global_step" in v.name][0].assign(0))
j_printout = 200
slope = 100
starttime = time()
batch_all_inputs, batch_all_ys = get_batch(1.0, train_state)
for j in range(0,num_epochs):
if batch_ratio != 1.0 or j == 0:
batch_inputs, batch_ys = get_random_batch(batch_ratio, batch_all_inputs, batch_all_ys)
subset = {input: batch_inputs, y_:batch_ys}
#if j == 0 or (j % 10 == 0 and batch_ratio !=1.0):
# batch_inputs, batch_ys = get_batch(batch_ratio, train_state)
# subset = {input: batch_inputs, y_:batch_ys}
if j % j_printout == 0:
losses = [sess.run(loss_run, feed_dict=subset) for loss_run in loss_runs]
lr_run = sess.run(learning_rate, feed_dict=subset)
total_error[j] = sess.run(loss_runs[0], feed_dict=subset)
if j < 1:
[sess.run(train_step_run, feed_dict=subset) for train_step_run in train_step_runs]
else:
[sess.run(train_step_run, feed_dict=subset) for train_step_run in train_step_runs]
if j % j_printout == 0:
jrange = 250 if j > 249 else j
slope = np.abs(linregress(range(jrange), total_error[j-jrange:j]).slope) if j > 10 else 0.1
if slope < desired_slope:
print('Converged! Convergence slope (final) on iteration %s is %s out of %s'%(j, slope,desired_slope))
num_epochs_print = j
break
t_s = int((num_epochs-j)*(time()-starttime)/j) if j > 0 else 200000
m, s = divmod(t_s, 60)
print("Losses for epoch", j, "out of", num_epochs, "are", losses, "with lr", lr_run, 'and cs',slope,'out of',desired_slope,'with %s minutes %s seconds'% (m,s),'remaining')
network_vars = sess, input, y_, f, c, y, loss_runs, train_step_runs, learning_rate
return network_vars, total_error, num_epochs_print
def descriptive_clusters(n_clusters,train_state,f_run,y__run):
#y__run = sess.run(y_,train_dict)
#f_run = sess.run(f,train_dict)
stimuli_full_range = train_state[0].shape[0]
argmin2 = np.less(np.abs(y__run-f_run), 0.01).reshape(n_clusters,stimuli_full_range,train_state[3].shape[1]*train_state[3].shape[2])
mu_exp = np.sort(np.sum(argmin2,2))[:,::-1] #0.1 just to avoid divide by 0 errors?
heuristic = [mu_exp[j,1]/mu_exp[j,0] for j in range(n_clusters)]
kept_clusters = [i for i, x in enumerate(np.array(heuristic) > 0.25) if x]
return heuristic, kept_clusters
def descriptive_clusters_old(n_clusters,train_state,f_run,y__run):
stimuli_full_range = train_state[0].shape[0]
argmin2 = np.argmin(np.abs(y__run-f_run),0).reshape(stimuli_full_range,train_state[3].shape[1]*train_state[3].shape[2])
mu_exp = [[np.float(np.sum(argmin2[i] == j)) for i in range(stimuli_full_range)] for j in range(n_clusters)] # to determine explanatory power in each mu
heuristic = [(np.min(mu_exp[j])*np.sum(mu_exp[j]))/(np.max(mu_exp[j])*np.sum(mu_exp)) for j in range(n_clusters)]
return heuristic, [i for i, x in enumerate(np.array(heuristic) > 0.01/n_clusters) if x]
def determine_n_clusters(sess,n_clusters,train_state,f_run,y__run):
heuristic, kept_clusters = descriptive_clusters(n_clusters,train_state,f_run,y__run)
save_vars = sum([tf.get_collection('trainable_variables',"function%s"% str(n)) for n in kept_clusters],[])\
+ sum([tf.get_collection('trainable_variables',"context%s"% str(n)) for n in kept_clusters],[])
n_clusters_network = len(kept_clusters)
print('Training found %s clusters'%n_clusters_network)
if n_clusters_network != n_clusters:
print("Reducing number of clusters from %s to %s" % (n_clusters,n_clusters_network))
return heuristic, n_clusters_network, save_vars
def determine_n_segments(sess,learn_type,n_clusters,train_state,f_run,y__run):
heuristic, kept_clusters = descriptive_clusters(n_clusters,train_state,f_run,y__run)
save_vars = []
for n in kept_clusters:
save_vars += tf.get_collection('trainable_variables',"shape%s"% str(n))\
+ tf.get_collection('trainable_variables',"function%s"% str(n))
#if learn_type == 'sft' or learn_type == 'sft2' or learn_type == 'sft3':
# save_vars += tf.get_collection('trainable_variables',"transition%s"% str(n))
if learn_type == 'sft' or learn_type == 'sft2' or learn_type == 'sft3':
save_vars += tf.get_collection('trainable_variables',"transition")
n_clusters_network = len(kept_clusters)
print('Training', learn_type, 'network found %s clusters'%n_clusters_network)
if n_clusters_network != n_clusters:
print("Reducing number of clusters from %s to %s" % (n_clusters,n_clusters_network))
return heuristic, n_clusters_network, save_vars
def check_convergence(savename, network_vars, n_clusters, train_state, learn_type):
sess, input, y_, f, c, y, loss_runs, train_step_runs, learning_rate = network_vars
# Get training inputs for entire network for heuristic checking
train_inputs, train_ys = get_batch(1.0, train_state)
train_dict = {input: train_inputs, y_:train_ys}
# Determine number of clusters to keep and save weights for only those clusters
if learn_type == 'initialize' or learn_type == 'context':
converged = False
heuristic, n_clusters_network, save_vars = determine_n_clusters(sess,n_clusters,train_state,sess.run(f,train_dict),sess.run(y_,train_dict))
# Logic for total convergence
if learn_type == 'context':
converged = True
if n_clusters_network == n_clusters and learn_type == 'initialize':
learn_type = 'context'
elif learn_type == 'init_c' or learn_type == 'init_c' or learn_type == 'sft' or learn_type == 'sft2':
converged = False
heuristic, n_clusters_network, save_vars = determine_n_segments(sess,learn_type,n_clusters,train_state,sess.run(f,train_dict),sess.run(y_,train_dict))
if learn_type == 'init_c':
learn_type = 'segment'
if learn_type == 'init_f':
learn_type = 'init_c'
if n_clusters_network == n_clusters and learn_type == 'sft3':
converged = True
if learn_type == 'sft2':
learn_type = 'sft3'
if learn_type == 'sft':
learn_type = 'sft2'
else:
converged, n_clusters_network, save_vars = (True, n_clusters, tf.trainable_variables())
np.save('weights_'+savename,sess.run([var for var in save_vars]))
return train_dict, n_clusters_network, learn_type, converged
|
import unittest
import pygame
import main
class TestKeyboard(unittest.TestCase):
def setUp(self):
self.game = main.Game()
self.game.start()
def test_catching_of_pressed_buttons(self):
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_UP,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_DOWN,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_LEFT,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_RIGHT,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_SPACE,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_m,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_h,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_w,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_s,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_a,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_d,
type=pygame.KEYDOWN))
pygame.event.post(pygame.event.Event(pygame.KEYDOWN,
key=pygame.K_v,
type=pygame.KEYDOWN))
self.game.get_events()
self.assertTrue(self.game.up)
self.assertTrue(self.game.down)
self.assertTrue(self.game.left)
self.assertTrue(self.game.right)
self.assertTrue(self.game.space)
self.assertTrue(self.game.boom)
self.assertTrue(self.game.hero.buddy_exist)
self.assertTrue(self.game.buddy_up)
self.assertTrue(self.game.buddy_down)
self.assertTrue(self.game.buddy_left)
self.assertTrue(self.game.buddy_right)
self.assertTrue(self.game.buddy_bomb)
def test_catching_of_unpressed_buttons(self):
self.game.up = self.game.down = True
self.game.left = self.game.right = True
self.game.space = self.game.boom = True
self.game.buddy_up = self.game.buddy_down = True
self.game.buddy_left = self.game.buddy_right = True
self.game.buddy_bomb = self.game.hero.buddy_exist = True
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_UP,
type=pygame.KEYUP))
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_DOWN,
type=pygame.KEYUP))
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_LEFT,
type=pygame.KEYUP))
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_RIGHT,
type=pygame.KEYUP))
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_SPACE,
type=pygame.KEYUP))
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_m,
type=pygame.KEYUP))
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_w,
type=pygame.KEYUP))
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_s,
type=pygame.KEYUP))
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_a,
type=pygame.KEYUP))
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_d,
type=pygame.KEYUP))
pygame.event.post(pygame.event.Event(pygame.KEYUP,
key=pygame.K_v,
type=pygame.KEYUP))
self.game.get_events()
self.assertFalse(self.game.up)
self.assertFalse(self.game.down)
self.assertFalse(self.game.left)
self.assertFalse(self.game.right)
self.assertFalse(self.game.space)
self.assertFalse(self.game.boom)
self.assertFalse(self.game.buddy_up)
self.assertFalse(self.game.buddy_down)
self.assertFalse(self.game.buddy_left)
self.assertFalse(self.game.buddy_right)
self.assertFalse(self.game.buddy_bomb)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import subprocess
print "Content-type: text/html"
print "<title>Picture taking CGI</title>"
print "<p>I'm going to upload the picture!</p>"
subprocess.call(['/home/pi/smile.sh'])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from student_downloader import StudentDownloader
from student_analyzer import StudentAnalyzer
from constants import Constants
import datetime
def main():
"""Run an example for a studentIDGetter class."""
#downloader = StudentDownloader()
#
# Use it if you want to create an estimated student DB automatically.
#
#downloader.determine_studentID()
#
# Use it if you want to create an estimated student DB using your hand.
#
#estimated_students_db_manager = downloader.get_db_manager()
#estimated_students_db_manager.register_studentIDs_ranging("g0846002", "g0847498") #entrance_year=2008
#estimated_students_db_manager.register_studentIDs_ranging("g0946010", "g0947622") #entrance_year=2009
#estimated_students_db_manager.register_studentIDs_ranging("g1044011", "g1045344") #entrance_year=2010
#estimated_students_db_manager.register_studentIDs_ranging("g1144010", "g1145505") #entrance_year=2011
#estimated_students_db_manager.label_traced_students_ranging("g1144010", "g1145505", datetime.date(2015,07,14))
#estimated_students_db_manager.register_studentIDs_ranging("g1244028", "g1245397") #entrance_year=2012
#estimated_students_db_manager.register_studentIDs_ranging("g1344018", "g1349031") #entrance_year=2013
#estimated_students_db_manager.register_studentIDs_ranging("g1444026", "g1445539") #entrance_year=2014
#estimated_students_db_manager.register_studentIDs_ranging("g1540074", "g1547932") #entrance_year=2015
#
# Download all student data using an estimated student DB above.
#
#downloader.download_all()
#
# Analyze and save downloaded HTMLs into "cse_student_DB.db".
#
analyzer = StudentAnalyzer()
#analyzer.analyze_HTMLs()
#analyzer.analyze_images()
analyzer.create_index_DB()
return Constants.EXIT_SUCCESS
if __name__ == '__main__':
sys.exit(main())
|
hiddenimports = ['musclex.modules.QF_utilities']
|
import scrapy
from douban250.items import Douban250Item
class Douban250Spider(scrapy.Spider):
"""豆瓣电影Top250爬虫Spider"""
name = 'douban250'
allowed_domains=['movie.douban.com',]
base_url='https://movie.douban.com/top250?start=0'
offset=0
start_urls=[base_url+str(offset),]
def parse(self, response):
# 包含本页所有电影的SelectorList
movies=response.css('.article .grid_view li')
for each in movies:
# 电影名称
title = each.css('.item .hd .title:nth-child(1)::text').extract_first()
# 导演
dire_actor = each.css('.item .bd p::text').extract()[0].strip()
director = dire_actor.split('\xa0\xa0\xa0')[0].strip()
# 演员
if (len(dire_actor.split('\xa0\xa0\xa0')) > 1):
actor = dire_actor.split('\xa0\xa0\xa0')[1].strip()
# 年代
info = each.css('.item .bd p::text').extract()[1].strip()
year = info.split('/')[0].strip()
# 国家
country = info.split('/')[1].strip()
# 类型
type = info.split('/')[2].strip()
# 评分
rating_num = each.css('.item .bd .star .rating_num::text').extract_first()
# 经典台词
quote = each.css('.item .bd .quote span::text').extract_first()
# 海报
image = each.css('.item .pic a img::attr(src)').extract_first()
item = Douban250Item()
item['title'] = title
item['director'] = director
item['actor'] = actor
item['year'] = year
item['country'] = country
item['type'] = type
item['rating_num'] = rating_num
item['quote'] = quote
item['image'] = image
yield item
if self.offset<225:
self.offset+=25
yield scrapy.Request(url=self.base_url+str(self.offset),callback=self.parse)
|
import random
from functools import reduce
from discord.ext import commands
def roll_dice(number_of_pipes: int, throws: int) -> [str]:
return [random.randint(1, number_of_pipes) for _ in range(throws)]
def get_dices_result(number_of_pipes: int, throws: int) -> str:
dice_throw_result = roll_dice(number_of_pipes, throws)
dice_throw_result_str = ", ".join((str(i) for i in dice_throw_result))
dice_throw_sum = reduce(lambda x, y: x + y, dice_throw_result)
return f"Ergebnis: {dice_throw_result_str}\nSummer:{dice_throw_sum}"
class DiceCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name='dsa', help="Wirft 3 W20")
async def dsa(self, ctx, ):
await ctx.send(get_dices_result(20, 3))
@commands.command(name='roll', help="Eingabebeispiel: 2w10")
async def dice(self, ctx, dice:str):
dice_values = dice.split("w")
if len(dice_values) == 2:
await ctx.send(get_dices_result(int(dice_values[1]), int(dice_values[0])))
else:
await ctx.send("Falsche Eingabe")
def setup(bot):
bot.add_cog(DiceCog(bot))
|
from typing import Dict, Any
def get_stack_data():
"""Get all the stack data
This is a testing utility to oganize data required by the troposhpere stack generation.
RUNTIME (execution modes, credentials, verbosity, etc)
GLOBAL (extra-environment data like connection mapping, default enviroment data, environment list): an
organization of environments and the data that connets them
ENVIRONMENT (env) : an environemtn is a organization of resources that have common data. often a closed topology
like a VPC, customer network, air-gapped network, etc. that has so
:rtype: Dict[str, Any]
"""
data = {'GLOBAL':
{'mesh_vpc_peers': ['triangle_1', 'triangle_2', 'triangle_3' ],
'star_vpc_peers': {'middle': ['north', 'south', 'east', 'west']},
'one_to_one_vpc_peers': {'left_vpc': 'right_vpc', 'up_vpc': 'down_vpc'}
},
'MANAGEMENT_VPC': {'vpc_peers': ['PRODUCTION_VPC', 'DEVELOPMENT_VPC', 'INTEGRATION_TEST_VPC'],
}
}
|
#!/usr/bin/python3
import pdb
import kivy
kivy.require('1.9.0')
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.properties import ObjectProperty
class MapTile(Widget):
""" MapTile object
"""
tile = ObjectProperty(None)
color_dark_wall = [0, 0, 100, 1]
color_dark_ground = [50, 50, 150, 1]
def __init__(self, blocked, block_sight=None, **kwargs):
super(MapTile, self).__init__(**kwargs)
self.blocked = blocked
self.block_sight = block_sight or blocked
self.tile.pos = self.pos
if self.block_sight:
self.tile.color = self.color_dark_wall
else:
self.tile.color = self.color_dark_ground
def set_block_sight(self, value=None):
self.block_sight = bool(value)
if value:
self.tile.color = self.color_dark_wall
self.tile.text = '#'
else:
self.tile.color = self.color_dark_ground
self.tile.text = '.'
def set_blocked(self, value):
self.blocked = value
if self.block_sight:
self.tile.color = self.color_dark_wall
self.tile.text = '#'
else:
self.tile.color = self.color_dark_ground
self.tile.text = '.'
class BaseSprite(Label):
""" BaseSprite Object
Important properties to add value upon initialization:
- text
- color
- x
- y
"""
def move(self, dx, dy):
""" Move by the given amount (dx) (dy)
NOTE: When the widget moves, all its children should move too.
"""
self.center_x += dx
self.center_y += dy
class PyRogueGame(Widget):
keypress_label = ObjectProperty(None)
move_speed = 20
def __init__(self, **kwargs):
super(PyRogueGame, self).__init__(**kwargs)
# Request for keyboard and bind keypresses to `self.press()`
self._keyboard = Window.request_keyboard(self.close, self)
self._keyboard.bind(on_key_down=self.press)
def close(self):
self._keyboard.unbind(on_key_down=self.press)
self._keyboard = None
def load_initial_widgets(self):
# NOTE: The last item on the list becomes the widget with the highest
# z-order.
widgets = [
Widget(
id='game_map',
width=self.width,
height=self.height,
pos=self.pos
),
BaseSprite(
id='npc',
color=[255, 255, 0, 1],
x=self.width / 2 - self.move_speed,
y=self.height / 2
),
BaseSprite(
id='player',
color=[255, 255, 255, 1],
x=self.width / 2,
y=self.height / 2
),
]
for widget in widgets:
self.add_widget(widget)
def _widget(self, widget_id, restrict=True, loopback=False):
widget = None
for obj in self.walk(restrict=restrict, loopback=loopback):
if obj.id == widget_id:
widget = obj
break
return widget
def press(self, keyboard, keycode, text, modifiers):
""" Key controls
Preferred key controls: http://www.roguebasin.com/index.php?title=Preferred_Key_Controls
"""
self.keypress_label.text = 'wxh: {}x{}\nx,y: {}\nkey: {}'.format(
self.width,
self.height,
self._widget('player').pos,
keycode
)
if keycode[1] == 'left' or keycode[1] == 'h':
self._widget('player').move(-self.move_speed, 0)
if keycode[1] == 'right' or keycode[1] == 'l':
self._widget('player').move(self.move_speed, 0)
if keycode[1] == 'up' or keycode[1] == 'k':
self._widget('player').move(0, self.move_speed)
if keycode[1] == 'down' or keycode[1] == 'j':
self._widget('player').move(0, -self.move_speed)
if keycode[1] == 'y':
self._widget('player').move(-self.move_speed, 0)
self._widget('player').move(0, self.move_speed)
if keycode[1] == 'u':
self._widget('player').move(self.move_speed, 0)
self._widget('player').move(0, self.move_speed)
if keycode[1] == 'b':
self._widget('player').move(-self.move_speed, 0)
self._widget('player').move(0, -self.move_speed)
if keycode[1] == 'n':
self._widget('player').move(self.move_speed, 0)
self._widget('player').move(0, -self.move_speed)
def make_map(self):
game_map = self._widget('game_map')
for y in range(0, game_map.height, self.move_speed):
for x in range(0, game_map.width, self.move_speed):
game_map.add_widget(MapTile(
id='tile:{},{}'.format(x, y),
blocked=False,
x=x,
y=y
))
# NOTE: Set some tiles to blocking for testing
for ctr, widget in enumerate(game_map.walk(restrict=True)):
if 'tile' not in str(widget.id):
continue
widget.set_block_sight(True)
widget.set_blocked(True)
if ctr > (game_map.width + game_map.height) / 2:
break
class PyRogueApp(App):
def build(self):
game = PyRogueGame()
game.load_initial_widgets()
game.make_map()
return game
if __name__ == '__main__':
PyRogueApp().run()
|
from app.utils.constant import GCN_MODEL, SUPPORTS
from app.model import base_model
from app.layer.GC import SparseGC
import tensorflow as tf
class Model(base_model.Base_Model):
'''Class for GCN Model'''
def __init__(self, model_params, sparse_model_params, placeholder_dict):
super(Model, self).__init__(model_params=model_params,
sparse_model_params=sparse_model_params,
placeholder_dict=placeholder_dict)
self.name = GCN_MODEL
self.supports = placeholder_dict[SUPPORTS]
self.model_op()
def _layers_op(self):
'''Operator to build the layers for the model.
This function should not be called by the variables outside the class and
is to be implemented by all the subclasses'''
self.layers.append(SparseGC(input_dim=self.input_dim,
output_dim=self.model_params.hidden_layer1_size,
supports=self.supports,
dropout_rate=self.dropout_rate,
activation=tf.nn.relu,
sparse_features=self.model_params.sparse_features,
# sparse_features=False,
num_elements=self.num_elements))
self.layers.append(SparseGC(input_dim=self.model_params.hidden_layer1_size,
output_dim=int(self.output_shape[1]),
supports=self.supports,
dropout_rate=self.dropout_rate,
activation=lambda x: x,
sparse_features=False,
num_elements=self.num_elements))
|
#!/usr/bin/env python
"""
This removes lines from pdb files which define
where HELIX, SHEET, or TURNs are located.
"""
import sys
def main():
for line in sys.stdin:
if ((line[0:6] != "HELIX ") and
(line[0:6] != "SHEET ") and
(line[0:5] != "TURN ")):
sys.stdout.write(line)
if __name__ == "__main__":
main()
|
import datetime
from decimal import Decimal
import xlrd
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = """
Import a XLS file containing a list of Orders.
Supports the format used by the Teaching Lab as of December 2018,
with the following columns in the given order:
- Description
- Requester Name
- Supplier
- Amount (net)
- Requisition Number
- Req. date
- Purchase Orders
- Expense_Codes
- Centro Responsabilidade
- Project
- Group
- Payment Method
- Notes
"""
def add_arguments(self, parser):
parser.add_argument("filename", type=str)
parser.add_argument(
"--no-header", action="store_false", help="disable header row"
)
def handle(self, *args, **options):
errors = []
workbook = xlrd.open_workbook(
filename=options["filename"], encoding_override="utf-8", on_demand=True
)
worksheet = workbook.sheet_by_index(0)
# start by validating the file structure
# - number, name and order of columns
# - empty cells
expected_column_labels = [
"Description",
"Requester Name",
"Supplier",
"Amount (net)",
"Requisition Number",
"Req. date",
"Purchase Orders",
"Expense_Codes",
"Centro Responsabilidade",
"Project",
"Group",
"Payment Method",
"Notes",
]
expected_column_number = len(expected_column_labels)
if worksheet.ncols != expected_column_number:
raise CommandError(
"Found %(found)s columns, expected exactly %(expected)s"
% {"found": worksheet.ncols, "expected": expected_column_number}
)
for col, expected in zip(range(worksheet.ncols), expected_column_labels):
if worksheet.cell_value(0, col) != expected:
errors.append('Column "%s" not found' % expected)
if errors:
raise CommandError(errors)
for col in range(worksheet.ncols):
if col in [0, 2, 4, 5, 6, 7, 8, 9, 11, 12]:
# ignore empty cells in columns for not mandatory fields:
continue
col_values = worksheet.col_values(col, start_rowx=2)
empty_cells_row_indices = [
i for i, value in enumerate(col_values) if value == ""
]
for row in empty_cells_row_indices:
cell = xlrd.formula.cellname(row, col)
errors.append("Missing value in cell %s" % cell)
if errors:
raise CommandError(errors)
# validate columns values
# new entries must not have duplicated requisition numbers nor
# purchase order number
User = apps.get_model("auth", "User")
AuthGroup = apps.get_model("auth", "Group")
Currency = apps.get_model("finance", "Currency")
OrderExpenseCode = apps.get_model("finance", "OrderExpenseCode")
ExpenseCode = apps.get_model("finance", "ExpenseCode")
FinanceProject = apps.get_model("finance", "FinanceProject")
FinanceCostCenter = apps.get_model("finance", "FinanceCostCenter")
Order = apps.get_model("finance", "Order")
Supplier = apps.get_model("finance", "Supplier")
self.stdout.write("Validating document...")
imported_orders = []
for row in range(1, worksheet.nrows):
values = worksheet.row_values(row)
# replace en-dash for single dash across all str values extracted
values = [
value.replace("\u2013", "-") if isinstance(value, str) else value
for value in values
]
values_raw = {
col_name: value
for col_name, value in zip(expected_column_labels, values)
}
# description --> TextField
column_id = 0
description = values_raw[expected_column_labels[column_id]]
assert isinstance(description, str)
# requester name --> CharField
column_id = 1
requester_name = values_raw[expected_column_labels[column_id]]
assert "\n" not in requester_name, "Invalid requester name"
assert isinstance(requester_name, str)
# finance --> ForeignKey to Supplier
column_id = 2
supplier_name = values_raw[expected_column_labels[column_id]]
assert "\n" not in supplier_name, "Invalid finance name"
assert isinstance(supplier_name, str)
if "_reimbursement" in supplier_name.lower():
supplier_name = "Geral Reembolsos"
if supplier_name.lower() == "":
supplier = None
else:
try:
supplier = Supplier.objects.get(supplier_name=supplier_name.strip())
except Supplier.DoesNotExist:
# Ignore missing suppliers in the DB
# Generate the report and warn the users to fill in
# the missing entries
supplier = None
msg = " ".join(
[
self.style.WARNING(f"Row {row+1:3d}:"),
f"Verify finance name '{supplier_name}'",
]
)
self.stdout.write(msg)
else:
assert isinstance(supplier, Supplier)
# ammount --> DecimalField
column_id = 3
ammount = str(values_raw[expected_column_labels[column_id]])
ammount = Decimal(ammount.strip())
assert isinstance(ammount, Decimal)
# requisition number --> IntegerField
column_id = 4
reqnum = int(values_raw[expected_column_labels[column_id]])
try:
order = Order.objects.get(order_reqnum=reqnum)
except Order.DoesNotExist:
# requisition number is valid, proceed
pass
else:
if order.responsible.username in ["teresa.dias", "simone.zacarias"]:
msg = " ".join(
[
self.style.WARNING(f"Row {row+1:3d}:"),
f"Ignoring duplicated Order",
]
)
self.stdout.write(msg)
else:
msg = " ".join(
[
self.style.ERROR(f"Row {row+1:3d}:"),
f"Requisiton number {reqnum} already exists",
]
)
self.stdout.write(msg)
continue
assert isinstance(reqnum, int)
# requisition date --> DateField
column_id = 5
req_date = values_raw[expected_column_labels[column_id]] # as float
req_date = xlrd.xldate_as_tuple(req_date, workbook.datemode)
req_date = datetime.datetime(*req_date).date()
assert isinstance(req_date, datetime.date)
# purchase order number --> CharField
column_id = 6
ponum = values_raw[expected_column_labels[column_id]]
if ponum != "":
try:
OrderExpenseCode.objects.get(purchase_order=ponum)
except OrderExpenseCode.DoesNotExist:
assert ponum.startswith("EC")
if "," in ponum or ";" in ponum:
msg = " ".join(
[
self.style.WARNING(f"Row {row+1:3d}:"),
f"Using only the first Purchase Order number '{ponum}'",
]
)
self.stdout.write(msg)
ponum = ponum.split(",")[0].split(";")[0]
except OrderExpenseCode.MultipleObjectsReturned:
msg = " ".join(
[
self.style.WARNING(f"Row {row+1:3d}:"),
f"Multiple duplicated Purchase Order '{ponum}'",
]
)
self.stdout.write(msg)
else:
msg = " ".join(
[
self.style.WARNING(f"Row {row+1:3d}:"),
f"Duplicated Purchase Order '{ponum}'",
]
)
self.stdout.write(msg)
assert isinstance(ponum, str)
# expense code --> ForeignKey to OrderExpenseCode
column_id = 7
expense_code_type = values_raw[expected_column_labels[column_id]]
expense_code_type = expense_code_type.replace("&", "and") # special case
assert isinstance(expense_code_type, str)
# cost center --> ForeignKey to FinanceCostCenter
column_id = 8
cost_center_code = int(values_raw[expected_column_labels[column_id]])
cost_center_code = f"{cost_center_code:07d}"
assert isinstance(cost_center_code, str)
# finance project --> ForeignKey to FinanceProject
column_id = 9
finance_project_code = int(values_raw[expected_column_labels[column_id]])
finance_project_code = f"{finance_project_code:03d}"
assert isinstance(finance_project_code, str)
# group --> ForeignKey to auth.Group
column_id = 10
auth_group_name = values_raw[expected_column_labels[column_id]]
assert "\n" not in auth_group_name, "Invalid group name"
assert isinstance(auth_group_name, str)
if auth_group_name == "INDP":
auth_group_name += " Lab"
auth_group_name = f"GROUP: {auth_group_name}"
try:
group = AuthGroup.objects.get(name=auth_group_name.strip())
except AuthGroup.DoesNotExist:
raise CommandError("Invalid group name %s" % auth_group_name)
assert isinstance(group, AuthGroup)
# payment method --> CharField
column_id = 11
payment_method = values_raw[expected_column_labels[column_id]]
for (
payment_method_code,
payment_method_name,
) in Order.PAYMENT_METHOD.items():
if payment_method_name == payment_method.strip():
break
else:
raise CommandError("Payment Method mismatch: %s" % payment_method)
payment_method = payment_method_code
# notes --> TextField
column_id = 12
notes = values_raw[expected_column_labels[column_id]]
assert isinstance(notes, str)
# responsible --> (will be Simone by default)
responsible = User.objects.get(username="simone.zacarias")
# INTERLUDE: fetch the related tables required and build OrderExpenseCode
try:
cost_center = FinanceCostCenter.objects.get(
costcenter_code=cost_center_code
)
except FinanceCostCenter.DoesNotExist:
raise CommandError("Invalid cost center code: %s" % cost_center_code)
try:
finance_project = FinanceProject.objects.get(
code=finance_project_code, costcenter=cost_center
)
except FinanceProject.DoesNotExist:
raise CommandError("Invalid project code: %s" % finance_project_code)
try:
expense_code = ExpenseCode.objects.get(
expensecode_type=expense_code_type, project=finance_project
)
except ExpenseCode.DoesNotExist:
raise CommandError("Invalid expense code: %s" % expense_code_type)
# create Order
order = Order(
order_reqnum=reqnum,
order_reqdate=req_date,
order_desc=description,
order_amount=ammount,
order_req=requester_name,
order_paymethod=payment_method,
order_notes=notes,
supplier=supplier,
responsible=responsible,
currency=Currency.objects.get(pk=1), # EUR
group=group,
)
order.save(
expensecode_kwargs={
"expensecode": expense_code,
"purchase_order": ponum,
}
)
imported_orders.append(order)
self.stdout.write(
self.style.SUCCESS("Imported %d orders" % len(imported_orders))
)
|
'''
包其实就是一个目录,是为了解决模块名冲突的情况,只要最顶级的目录名不同,
就算模块名相同也不会有问题
包下面一定有一个__init__的文件,文件内容可以什么都不写
'''
|
import utils
import pandas as pd
import numpy as np
from datetime import timedelta
# PLEASE USE THE GIVEN FUNCTION NAME, DO NOT CHANGE IT
def read_csv(filepath):
'''
TODO: This function needs to be completed.
Read the events.csv, mortality_events.csv and event_feature_map.csv files into events, mortality and feature_map.
Return events, mortality and feature_map
'''
#Columns in events.csv - patient_id,event_id,event_description,timestamp,value
events = pd.read_csv(filepath + 'events.csv')
#Columns in mortality_event.csv - patient_id,timestamp,label
mortality = pd.read_csv(filepath + 'mortality_events.csv')
#Columns in event_feature_map.csv - idx,event_id
feature_map = pd.read_csv(filepath + 'event_feature_map.csv')
return events, mortality, feature_map
def calculate_index_date(events, mortality, deliverables_path):
'''
TODO: This function needs to be completed.
Refer to instructions in Q3 a
Suggested steps:
1. Create list of patients alive ( mortality_events.csv only contains information about patients deceased)
2. Split events into two groups based on whether the patient is alive or deceased
3. Calculate index date for each patient
IMPORTANT:
Save indx_date to a csv file in the deliverables folder named as etl_index_dates.csv.
Use the global variable deliverables_path while specifying the filepath.
Each row is of the form patient_id, indx_date.
The csv file should have a header
For example if you are using Pandas, you could write:
indx_date.to_csv(deliverables_path + 'etl_index_dates.csv', columns=['patient_id', 'indx_date'], index=False)
Return indx_date
'''
events['group'] = np.where(events['patient_id'].isin(mortality['patient_id']), 'dead', 'alive')
alive = events[events['group'] == 'alive']
alive_df = alive[['patient_id', 'timestamp']].drop_duplicates()
alive_df['timestamp'] = pd.to_datetime(alive_df['timestamp'])
# alive_df['timestamp'] = alive_df['timestamp']
dead_df = mortality[['patient_id', 'timestamp']]
dead_df['timestamp'] = pd.to_datetime(dead_df['timestamp'])
dead_df['timestamp'] = dead_df['timestamp'] - pd.to_timedelta(30, unit='d')
alive_enddate = alive_df.groupby('patient_id').max()['timestamp'].reset_index()
dead_enddate = dead_df
indx_date = pd.concat([alive_enddate, dead_enddate])
indx_date.columns = ['patient_id', 'indx_date']
indx_date.to_csv(deliverables_path + 'etl_index_dates.csv', header=['patient_id', 'indx_date'], index=False)
return indx_date
def filter_events(events, indx_date, deliverables_path):
'''
TODO: This function needs to be completed.
Refer to instructions in Q3 b
Suggested steps:
1. Join indx_date with events on patient_id
2. Filter events occuring in the observation window(IndexDate-2000 to IndexDate)
IMPORTANT:
Save filtered_events to a csv file in the deliverables folder named as etl_filtered_events.csv.
Use the global variable deliverables_path while specifying the filepath.
Each row is of the form patient_id, event_id, value.
The csv file should have a header
For example if you are using Pandas, you could write:
filtered_events.to_csv(deliverables_path + 'etl_filtered_events.csv', columns=['patient_id', 'event_id', 'value'], index=False)
Return filtered_events
'''
merged = pd.merge(indx_date, events, on='patient_id', how='inner')
merged['timestamp'] = pd.to_datetime(merged['timestamp'])
merged = merged[merged.timestamp <= merged.indx_date]
merged = merged[merged.timestamp >= merged.indx_date - timedelta(days=2000)]
filtered_events = merged[['patient_id', 'event_id', 'value']]
filtered_events.to_csv(deliverables_path + 'etl_filtered_events.csv', header=['patient_id', 'event_id', 'value'],
index=False)
return filtered_events
def aggregate_events(filtered_events, mortality_df,feature_map, deliverables_path):
'''
TODO: This function needs to be completed.
Refer to instructions in Q3 c
Suggested steps:
1. Replace event_id's with index available in event_feature_map.csv
2. Remove events with n/a values
3. Aggregate events using sum and count to calculate feature value
4. Normalize the values obtained above using min-max normalization(the min value will be 0 in all scenarios)
IMPORTANT:
Save aggregated_events to a csv file in the deliverables folder named as etl_aggregated_events.csv.
Use the global variable deliverables_path while specifying the filepath.
Each row is of the form patient_id, event_id, value.
The csv file should have a header .
For example if you are using Pandas, you could write:
aggregated_events.to_csv(deliverables_path + 'etl_aggregated_events.csv', columns=['patient_id', 'feature_id', 'feature_value'], index=False)
Return filtered_events
'''
events_to_idx = pd.merge(filtered_events, feature_map, on='event_id')
events_to_idx = events_to_idx[['patient_id', 'idx', 'value']]
events_to_idx = events_to_idx.dropna()
events_sum = events_to_idx[events_to_idx['idx'] < 2680]
events_count = events_to_idx[events_to_idx['idx'] >= 2680]
events_counts = events_count.groupby(['patient_id', 'idx']).agg('count')
# events_counts.columns = ['patient_id', 'event_id', 'value']
events_sums = events_sum.groupby(['patient_id', 'idx']).agg('sum')
# events_sums.columns = ['patient_id', 'event_id', 'value']
total_events = pd.concat([events_counts, events_sums])
total_events.columns = ['value']
total_events = total_events.reset_index()
##min- max
total_events1 = total_events[['idx', 'value']]
# min_events_value = total_events1.groupby(['idx']).min()
max_events_value = total_events1.groupby(['idx']).max()
max_events_value = max_events_value.reset_index()
max_events_value.columns = ['idx', 'max_value']
df1 = pd.merge(total_events, max_events_value, on='idx')
df1_not_zero = df1[df1['max_value'] != 0]
df1_not_zero['value'] = df1_not_zero['value'] / df1_not_zero['max_value']
df1_zero = df1[df1['max_value'] == 0]
df1_zero['value'] = 1.0
# df1_zero = df1_zero[['patient_id', 'idx', 'value', 'min_value', 'max-min']]
aggregated_events = pd.concat([df1_zero, df1_not_zero])
aggregated_events = aggregated_events[['patient_id', 'idx', 'value']]
aggregated_events.columns = ['patient_id', 'feature_id', 'feature_value']
aggregated_events.to_csv(deliverables_path + 'etl_aggregated_events.csv',
header=['patient_id', 'feature_id', 'feature_value'],
index=False)
return aggregated_events
def create_features(events, mortality, feature_map):
deliverables_path = '../deliverables/'
#Calculate index date
indx_date = calculate_index_date(events, mortality, deliverables_path)
#Filter events in the observation window
filtered_events = filter_events(events, indx_date, deliverables_path)
#Aggregate the event values for each patient
aggregated_events = aggregate_events(filtered_events, mortality, feature_map, deliverables_path)
'''
TODO: Complete the code below by creating two dictionaries -
1. patient_features : Key - patient_id and value is array of tuples(feature_id, feature_value)
2. mortality : Key - patient_id and value is mortality label
'''
aggregated_events['merged'] = aggregated_events.apply(lambda row: (row['feature_id'], row['feature_value']), axis=1)
patient_features = aggregated_events.groupby('patient_id')['merged'].apply(lambda x: x.tolist()).to_dict()
events['group'] = np.where(events['patient_id'].isin(mortality['patient_id']), '1', '0')
events = events.reset_index()
events['group'] = events['group'].astype(int)
mortality_df = events[['patient_id', 'group']]
mortality = mortality_df.set_index('patient_id')['group'].to_dict()
return patient_features, mortality
def save_svmlight(patient_features, mortality, op_file, op_deliverable):
'''
TODO: This function needs to be completed
Refer to instructions in Q3 d
Create two files:
1. op_file - which saves the features in svmlight format. (See instructions in Q3d for detailed explanation)
2. op_deliverable - which saves the features in following format:
patient_id1 label feature_id:feature_value feature_id:feature_value feature_id:feature_value ...
patient_id2 label feature_id:feature_value feature_id:feature_value feature_id:feature_value ...
Note: Please make sure the features are ordered in ascending order, and patients are stored in ascending order as well.
'''
deliverable1 = open(op_file, 'wb')
deliverable2 = open(op_deliverable, 'wb')
for key in sorted(patient_features):
line1 = "%d" % (mortality[key])
line2 = "%d %d" % (key, mortality[key])
for value in sorted(patient_features[key]):
merged = "%d:%.6f" % (value[0], value[1])
line1 = line1 + " " + merged
line2 = line2 + " " + merged
deliverable1.write((line1 + " " + "\n").encode())
deliverable2.write((line2 + " " + "\n").encode())
def main():
train_path = '../data/train/'
events, mortality, feature_map = read_csv(train_path)
patient_features, mortality = create_features(events, mortality, feature_map)
save_svmlight(patient_features, mortality, '../deliverables/features_svmlight.train', '../deliverables/features.train')
if __name__ == "__main__":
main()
|
from __future__ import division, print_function
import time
import matplotlib.pyplot as plt
import numpy as np
from numpy import asarray
from numpy import expand_dims
from numpy import log
from numpy import mean, cov
from numpy import exp
from numpy import std
from math import floor
import os
from keras.models import Model, Sequential
from keras.layers import Activation, Dense, Flatten, BatchNormalization, Dropout, Input, Reshape, multiply
from keras.layers import Embedding, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Nadam, Adam, SGD
from keras.datasets import mnist
import tensorflow as tf
import argparse
from scipy.linalg import sqrtm
import utils
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Parse argument
parser = argparse.ArgumentParser()
# Argument lists
parser.add_argument('--all', action ='store_true', default=False, help="turn on for inception score")
parser.add_argument('--ins', action ='store_true', default=False, help="turn on for inception score")
parser.add_argument('--fid', action ='store_true', default=False, help="turn on for inception distance")
parser.add_argument('--mmd', action ='store_true', default=False, help="turn on for inception distance")
parser.add_argument('--ms', action ='store_true', default=False, help="turn on for inception distance")
parser.add_argument('--nn', action ='store_true', default=False, help="turn on for inception distance")
# Read the arguments
args = parser.parse_args()
all_ = args.all
ins = args.ins
fid = args.fid
mmd = args.mmd
ms = args.ms
nn = args.nn
# Image shape information
img_rows = X_train.shape[1]
img_cols = X_train.shape[2]
if len(X_train.shape) == 4:
channels = X_train.shape[3]
else:
channels = 1
img_shape = (img_rows, img_cols, channels)
num_classes = 10
latent_dim = 100
optimizer = Adam(0.0002, 0.5)
def generator():
model = Sequential()
model.add(Dense(256, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(img_shape), activation='tanh'))
model.add(Reshape(img_shape))
#model.summary()
noise = Input(shape=(latent_dim,))
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(num_classes, latent_dim)(label))
model_input = multiply([noise, label_embedding])
img = model(model_input)
return Model([noise, label], img)
def discriminator():
model = Sequential()
model.add(Dense(512, input_dim=np.prod(img_shape)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Dense(1, activation='sigmoid'))
#model.summary()
img = Input(shape=img_shape)
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(num_classes, np.prod(img_shape))(label))
flat_img = Flatten()(img)
model_input = multiply([flat_img, label_embedding])
validity = model(model_input)
return Model([img, label], validity)
# Build the generator
generator = generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
generator_path = '../Q2/saved_model_weights/version1/generator_weights_99000.h5'
generator.load_weights(generator_path)
# classifier model
path_save_model = 'save_weight_classifier/version_1.h5'
model = tf.keras.models.load_model(path_save_model)
if fid or all_:
fid = utils.calculate_inception_distance_generated_img(generator, model, latent_dim, 1000, X_test)
print("FID score is ", fid)
if ins or all_:
is_avg, is_std = utils.calculate_inception_score_generated_img(generator, model, latent_dim, 1000)
print("IS score is ", is_avg, ", ", is_std)
if mmd or all_:
mmd = utils.calculate_mmd_generated_img(generator, model, latent_dim, 100, X_test)
print('MMD score', mmd)
if ms or all_:
ms = utils.calculate_mode_score_generated_img(generator, model, latent_dim, 1000, X_test)
print('Mode score', ms)
if nn or all_:
ms = utils.calculate_nn_score_generated_img(generator, model, latent_dim, 100, X_test)
print('1NN score', ms)
|
import argparse
from glob import glob
from operator import itemgetter
from os.path import basename, join
from pyrosetta import *
from pyrosetta.rosetta.core.scoring import ScoreType as st
from pyrosetta.rosetta.core.select.residue_selector import \
ChainSelector, NeighborhoodResidueSelector, ResidueIndexSelector
from pyrosetta.rosetta.protocols.enzdes import ADD_NEW, AddOrRemoveMatchCsts
from statistics import stdev
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("directory", type=str,
help="Read fasc files from what directory?")
parser.add_argument("-z", "--zipped", action="store_true", default=False,
help="Add .gz suffix to decoys?")
parser.add_argument("-kf", "--keep_folder", action="store_true",
default=False, help="Keep the folder name of the decoy? (By default, \
only the basename will appear.)")
parser.add_argument("-c", "--constraints", type=str, default='htra1_protease.cst',
help="Pick a constraints file for the enzyme")
parser.add_argument("-n", "--name", type=str,
help="Pick an output file name")
args = parser.parse_args()
return args
def collect_fasc_files(folder):
"""
For a given folder, collects a list of all .fasc files
Excludes any combined_reports files produced by condense_fasc.py
"""
# Search for .fasc files
base_name = basename(folder.rstrip('/'))
folder_search = join(folder, "*.fasc")
fasc_files = glob(folder_search)
fasc_files.sort()
# Remove aggregate reports from condense_fasc.py
for n, i in enumerate(fasc_files):
if 'combined_reports' in i:
fasc_files.pop(n)
return sorted(fasc_files)
def text_to_numbers(in_list):
"""
Takes a list that includes numbers and text all stored as strings.
Returns the same list with the numbers converted to floats.
"""
new_list = []
for i in in_list:
try:
new_list.append(float(i))
except ValueError:
new_list.append(i)
return new_list
def fasc_to_dict_list(fasc):
"""
Reading in a .fasc file, converts each line to a dict, with score terms
as keys and number values. A list of these dicts is collected, and sorted
based on total score.
"""
# Read in fasc file
with open(fasc, 'r') as read:
f_lines = read.readlines()
f_lines.pop(0) # First line is not useful
# Collecting all lines in the fasc file
lines_data = []
for l in f_lines:
# Converting line from a string to lists
text_2_list = l.split()
line_headings = [i[:-1] for i in text_2_list[0::2]] # remove colons
line_data = text_to_numbers(text_2_list[1::2]) # convert numbers
# Combining lists into a dict
line_dict = {}
for i in range(len(line_headings)):
line_dict[line_headings[i]] = line_data[i]
# Adding dict to list
lines_data.append(line_dict)
# Sort list and return
return sorted(lines_data, key=itemgetter('total_score'))
def add_unconstrained_scores(score_dict):
"""
If a scores dict includes constraints, calculate the total constraint score
and the total unconstrained score, and add these to the dict
"""
cst_terms = ['atom_pair_constraint', 'coordinate_constraint',
'angle_constraint', 'dihedral_constraint']
# Calculate constraint score, if constraints are present
cst_score = 0
for ct in cst_terms:
try:
cst_score += score_dict[ct]
except:
pass
# Calculate unconstrained total score
uc_tot = score_dict['total_score'] - cst_score
# Add scores into dict
score_dict['cst_total'] = cst_score
score_dict['unconstrained_total_score'] = uc_tot
return
def remove_outliers(dict_list):
"""
Checks through a list of scoreline dicts and knocks out any with outlying
constraint scores. This is done by finding the minimum constraint score
and the standard deviation in constraint scores, and removing any lines
that have constraint scores more than two standard deviations higher than
the minimum from the list.
"""
constraint_scores = [i['cst_total'] for i in dict_list]
min_cst = min(constraint_scores)
cst_sd = stdev(constraint_scores)
threshold = min_cst + (2 * cst_sd)
for n, i in enumerate(dict_list):
if i['cst_total'] > threshold:
dict_list.pop(n)
return dict_list
def filter_by_constraints(dict_list):
"""
Adds total constraint and total unconstrained scores to score lines,
removes any with outlying high constraint scores, and sorts by
unconstrained total scores.
"""
# Add in extra scoring terms
for i in dict_list:
add_unconstrained_scores(i)
# Eliminating outliers
cleaned_list = remove_outliers(dict_list)
# Sort list and return
return sorted(cleaned_list, key=itemgetter('unconstrained_total_score'))
def selector_to_list(pose, selector):
""" Converts a selector output vector to a list of selected residues """
selection_vector = selector.apply(pose)
selection_list = []
for i in range(len(selection_vector)):
if selection_vector[i+1]==1:
selection_list.append(i+1)
return selection_list
def fix_file(pdb):
bn = basename(pdb)
last_res = bn[36] #HACKY
aa = {'A':'ALA', 'C':'CYS','D':'ASP','E':'GLU', 'F':'PHE','G':'GLY','H':'HIS','I':'ILE','K':'LYS','L':'LEU','M':'MET','N':'ASN','P':'PRO','Q':'GLN','R':'ARG','S':'SER','T':'THR','V':'VAL','W':'TRP','Y':'TYR'}
with open(pdb, 'r') as r:
lines = r.readlines()
# remove LINK lines, which interfere with putting in constraints
lines_to_remove = []
for n, line in enumerate(lines):
if 'LINK' in line:
lines_to_remove.append(n)
for l2r in lines_to_remove:
lines[l2r] = '\n'
# Fixing enzdes comments block
for l in lines[2:6]:
lines[lines.index(l)]=l.replace('VAL',aa[last_res])
with open(pdb,'w') as w:
w.writelines(lines)
def get_pose_discrimination_scores(pdb):
fix_file(pdb)
nam = basename(pdb)
pose = pose_from_pdb(pdb)
# Enzdes constraints
cstm = AddOrRemoveMatchCsts()
cstm.set_cst_action(ADD_NEW)
cstm.apply(pose)
sf = create_score_function('ref2015_cst')
sf(pose)
#pep_res = selector_to_list(pose, ChainSelector('B'))
pep_res = selector_to_list(pose, ResidueIndexSelector('212-218'))
neigh = NeighborhoodResidueSelector()
neigh.set_distance(8)
neigh.set_focus_selector(ChainSelector('B'))
neigh.set_include_focus_in_subset(False)
prot_res = selector_to_list(pose, neigh)
pep_score = 0
for i in pep_res:
pep_score += pose.energies().residue_total_energies(i)[st.total_score]
prot_score = 0
for i in prot_res:
prot_score += pose.energies().residue_total_energies(i)[st.total_score]
cst_score = 0
for i in [st.atom_pair_constraint, st.coordinate_constraint,
st.angle_constraint, st.dihedral_constraint]:
cst_score += pose.energies().total_energies()[i]
discriminator = 1 * prot_score + 1 * pep_score + 3.5 * cst_score
print(nam, prot_score, pep_score, cst_score, discriminator)
return [nam, prot_score, pep_score, cst_score, discriminator]
def matching_seqs(seq_1, seq_2, frame_size, threshold):
for i in range(len(seq_1))[:(frame_size-1)]:
aframe = seq_1[i:i+frame_size]
for j in range(len(tau_seq))[:(frame_size-1)]:
bframe = seq_2[j:j+frame_size]
match = 0
for k in range(frame_size):
if aframe[k] == bframe[k]:
match += 1
if match>=threshold:
print(i, aframe, j, bframe)
def main(args):
# Get list of fasc files
fasc_files = collect_fasc_files(args.directory)
best_decoys = []
for f in fasc_files:
# Convert file to dict list, clean up outliers
f_converted = fasc_to_dict_list(f)
f_cleaned = filter_by_constraints(f_converted)
# Collect single best model
best_decoys.append(f_cleaned[0])
#for n, i in enumerate(best_decoys):
with open(args.name + '_scores.txt', 'a') as w:
if n == 0:
w.write('\t'.join(list(i.keys()))+'\n')
w.write('\t'.join([str(i) for i in list(i.values())])+'\n')
# Get residue scores
for n, i in enumerate(best_decoys):
o = get_pose_discrimination_scores(join(args.directory, basename(i['filename'])))
with open(args.name + '_discriminators.txt', 'a') as w:
if n == 0:
w.write('\t'.join(['name', 'prot_score', 'pep_score', 'cst_score', 'discriminator'])+'\n')
w.write('\t'.join([str(i) for i in o])+'\n')
if __name__ == '__main__':
args = parse_args()
opts = '-cst_fa_weight 1.0 -run:preserve_header -enzdes::cstfile {} -mute all'
init(opts.format(args.constraints))
main(args)
|
import re
import requests
import time
import os
import logging
from lxml import etree
from logging.handlers import RotatingFileHandler
import pymysql
class LookComUa:
def __init__(self):
self.material_url = 'http://www.look.com.ua'
self.headers = {
'referer': self.material_url,
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36',
}
def get_regex(self, regex, text):
return re.findall(regex, text)[0]
def download(self, pic_id, url, referer_url, dir_path):
regex = r'http://www.look.com.ua/download/' + pic_id + '/(.*?)/'
pic_resolution = re.findall(regex, url)[0]
print('查询图片:{pic_id} {pic_resolution}'.format(pic_id=pic_id, pic_resolution=pic_resolution))
if self.select('pro_pic_detail_select', (pic_id, pic_resolution)):
print('图片:{pic_id} {pic_resolution}已经存在,跳过爬取'.format(pic_id=pic_id, pic_resolution=pic_resolution))
return
self.headers['referer'] = referer_url
while True:
try:
r = requests.get(url, headers=self.headers, timeout=10)
break
except:
time.sleep(5)
logging.error('url:{url}访问失败,准备重新访问。'.format(url=url))
continue
regex = (r'src=["\'](http://www\.look.com\.ua/pic/\d+/'
+ pic_resolution
+ '/look\.com\.ua-'
+ pic_id
+ '\.(.*?))["\']')
res = re.findall(regex, r.text)[0]
pic_url, pic_type = res
file_name = dir_path + '/' + pic_id + '_' + pic_resolution + '.' + pic_type
while True:
try:
r = requests.get(pic_url, headers=self.headers, timeout=10)
break
except:
time.sleep(5)
logging.error('pic_url:{pic_url}访问失败,准备重新访问。'.format(pic_url=pic_url))
continue
print('开始下载图片:{pic_url}'.format(pic_url=pic_url))
with open(file_name, 'wb') as f:
f.write(r.content)
self.insert('pro_pic_detail_insert', (pic_id, pic_resolution))
logging.error('图片{pic_id} {pic_resolution} 已完成下载'.format(pic_id=pic_id, pic_resolution=pic_resolution))
def insert(self, proc, args):
conn = pymysql.connect(
host='localhost',
port=3306,
user='root',
passwd='123asd',
db='pic_material',
)
cur = conn.cursor()
cur.callproc(proc, args)
cur.close()
conn.close()
def update(self, proc, args):
self.insert(proc, args)
def select(self, proc, args):
conn = pymysql.connect(
host='localhost',
port=3306,
user='root',
passwd='123asd',
db='pic_material',
)
cur = conn.cursor()
cur.callproc(proc, args)
data = cur.fetchall()
cur.close()
conn.close()
return data
def get_pic(self, url, referer_url, dir_path, pic_id):
pic_id = re.findall(r'(\d+)', url)[0]
dir_path += pic_id
if not os.path.exists(dir_path):
os.mkdir(dir_path)
self.headers['referer'] = referer_url
while True:
try:
r = requests.get(url, headers=self.headers, timeout=10)
break
except:
time.sleep(5)
logging.error('url:{url} 获取图片{pic_id}失败,准备重新获取。'.format(url=url, pic_id=pic_id))
continue
pic_url_list = re.findall(r'"(http://www.look.com.ua/download/\d+/\d+x\d+/)"', r.text)
index = 0
length = len(pic_url_list)
# for pic_url in pic_url_list:
while index < length:
time.sleep(1)
pic_url = pic_url_list[index]
try:
self.download(pic_id=pic_id, url=pic_url, referer_url=url, dir_path=dir_path)
index += 1
except Exception as err:
logging.error('图片:{url} 下载失败!'.format(url=pic_url))
logging.error('失败原因:{err}'.format(err=err))
def get_pic_info(self, url, page, category):
self.headers['referer'] = self.material_url
while True:
try:
r = requests.get(url, headers=self.headers, timeout=10)
break
except:
time.sleep(5)
logging.error('url:{url} 获取第{page}页失败,准备重新获取。'.format(url=url, page=page))
continue
if page == 0:
regex = r'<span class="nav_ext">...</span> <a href="http://www.look.com.ua/' + category + '/page/(\d+)/'
try:
page_count = int(re.findall(regex, r.text)[0])
except:
page_count = 1
return page_count
else:
pic_url_list = re.findall(r'(http://www.look.com.ua/\d+-.*?)"', r.text)
pic_url_list = list(set(pic_url_list))
return pic_url_list
def get_category_url_list(self, page_source):
selector = etree.HTML(page_source)
sel_categorys = selector.xpath('//div[@class="newsidebar_menu site-menu"]/a')
category_url_list = list()
for sel_category in sel_categorys:
url = sel_category.xpath('@href')[0]
category_url_list.append(url)
return category_url_list
def get_category_pic(self, url, referer_url, dir_path, category):
page_count = self.get_pic_info(url, page=0, category=category)
page = 1
while page < page_count+1:
try:
page_url = url + 'page/' + str(page) + '/'
pic_url_list = self.get_pic_info(page_url, page, category=category)
index = 0
length = len(pic_url_list)
while index < length:
pic_url = pic_url_list[index]
pic_id = re.findall(r'(\d+)', pic_url)[0]
self.insert('pro_pic_id_insert', (pic_id, ))
print('查询图片:{pic_id}是否完成下载'.format(pic_id=pic_id))
if self.select('pro_pic_id_select', (pic_id, ))[0][0]:
print('图片:{pic_id} 已经爬取, 跳过爬取'.format(pic_id=pic_id))
index += 1
continue
try:
self.get_pic(pic_url, referer_url=page_url, dir_path=dir_path, pic_id=pic_id)
self.update('pro_pic_id_update', (pic_id, ))
logging.error('图片:{pic_id}已爬取完成'.format(pic_id=pic_id))
index += 1
except Exception as err:
logging.error('图片:{pic_id} 爬取过程出现异常!'.format(pic_id=pic_id))
logging.error('失败原因:{err}'.format(err=err))
logging.error('图片第{page}页信息获取完成!'.format(page=str(page)))
page += 1
except Exception as err:
logging.error('图片第{page}页信息获取失败!'.format(page=str(page)))
logging.error('失败原因:{err}'.format(err=err))
def spider(self):
if not os.path.exists('LookComUa1'):
os.mkdir('LookComUa1')
while True:
try:
r = requests.get(self.material_url, headers=self.headers, timeout=10)
break
except:
time.sleep(5)
logging.error('获取:{url} 失败,准备重新获取.'.format(url=self.material_url))
continue
category_url_list = self.get_category_url_list(r.text)
category_count = len(category_url_list)
index = 0
while index < category_count:
try:
category = category_url_list[index].split('/')[1]
self.insert('pro_category_insert', (category, ))
dir_path = 'LookComUa1/' + category + '/'
if not os.path.exists(dir_path):
os.mkdir(dir_path)
url = self.material_url + '/' + category + '/'
print('查询分类:{category} 是否爬取完毕'.format(category=category))
if self.select('pro_category_select', (category, ))[0][0]:
print('分类:{category} 已爬取完毕,跳过爬取。'.format(category=category))
index += 1
continue
self.get_category_pic(url, referer_url=self.material_url, dir_path=dir_path, category=category)
self.update('pro_category_update', (category, ))
logging.error('分类{category}:图片全部下载完成!'.format(category=category))
index += 1
except Exception as err:
logging.error('分类{category}:爬取出现异常!'.format(category=category))
logging.error('失败原因:{err}'.format(err=err))
def set_log():
formats = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
datefmts = '%w, %d %b %Y %H:%M:%S'
logging.basicConfig(level=logging.ERROR, format=formats, datefmt=datefmts)
rotate_handler = RotatingFileHandler('LookComUa.log', maxBytes=10*1024*1024, backupCount=5)
rotate_handler.setLevel(logging.INFO)
rotate_handler.setFormatter(logging.Formatter(formats, datefmts))
logging.getLogger('').addHandler(rotate_handler)
def main():
set_log()
logging.error('开始素材网站url:{url}'.format(url='http://www.look.com.ua/'))
look_com_ua = LookComUa()
look_com_ua.spider()
# category = 'mostdownloads'
# base_url = 'http://www.look.com.ua/'
# url = base_url + category + '/'
# if not os.path.exists('LookComUa/mostdownloads/'):
# os.mkdir('LookComUa/mostdownloads/')
# look_com_ua.get_category_pic(url, 'http://www.look.com.ua/', 'LookComUa/mostdownloads/', category='mostdownloads')
if __name__ == '__main__':
main()
|
import sys
from math import ceil
from itertools import accumulate
if len(sys.argv) == 1 or sys.argv[1] == '-v':
print('Input filename:')
f=str(sys.stdin.readline()).strip()
else: f = sys.argv[1]
verbose = sys.argv[-1] == '-v'
for l in open(f):
data = [int(x) for x in l.strip()]
def num(l):
return sum([ x * 10**i for i, x in enumerate(reversed(l))])
def solve_digit(pos: int, digits: list):
base_seq = [0]*(pos+1) + [1]*(pos+1) + [0]*(pos+1) + [-1]*(pos+1)
seq = base_seq[pos+1:] + base_seq * ceil(len(digits) // len(base_seq))
# st = []
acc = 0
for d, s in zip(digits[pos:], seq[:len(digits)-pos]):
# st.append('{}*{}'.format(d, s))
acc += d * s
# print(' + '.join(st), '=', abs(acc) % 10, '\n')
return abs(acc) % 10
'''
Solution 1
'''
d = data[:]
for _ in range(100):
d = [ solve_digit(i, d) for i in range(len(d)) ]
print('Solution 1:', num(d[:8]))
'''
Solution 2
1st digit -------> no zeros
2nd digit -------> 1 zeros | 0 1 1 0 0 -1 -1 0 0 ...
3rd digit -------> 2 zeros | 0 0 1 | 1 1 0 0 0 -1 -1 -1 ...
4th digit -------> 3 zeros | 0 0 0 1 | 1 1 ...
5th digit -------> 4 zeros | 0 0 0 0 1 | 1 1 1 1 0 0 0 0 0 -1 -1 -1 -1 -1 ...
(3M + 1)th ------> 3M zeros
......................................................................
1. By the time we get to index [5976463] all sums before that point will be ZEROs
so we will ignore them.
2. On the other half of our sequence, coeficients are all ones.
0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1
<------<-------|------>------->
3. Calculate in reverse and reverse our answer again.
'''
skip = num(data[:7])
d = (data * 10000)[skip:][::-1]
for _ in range(100):
d = accumulate(d, lambda a, b: abs(a + b) % 10)
print('Solution 2:', num(list(d)[::-1][:8]))
|
from lettuce import *
from nose.tools import assert_equal, assert_in
from webtest import TestApp
from app.code.bank.app import app, BANK
from app.code.bank.account import Account
@step(u'Given I create following account:')
def given_i_create_following_account(step):
for row in step.hashes:
a = Account(row['account_number'], row['balance'])
BANK.add_account(a)
@step(u'Given I create account "([^"]*)" with balance of "([^"]*)"')
def given_i_create_account_group1_with_balance_of_group2(step, acc_nr, balance):
account = Account(acc_nr, balance)
BANK.add_account(account)
@step(u'And I visit the homepage')
def and_i_visit_the_homepage(step):
world.browser = TestApp(app)
world.response = world.browser.get('http://localhost:5000/')
assert_equal(world.response.status_code, 200)
# assert_equal(world.response.text, u'Hello World!')
@step(u'When I enter the account number "([^"]*)"')
def when_i_eneter_the_account_number_group1(step, account_number):
form = world.response.forms['account-form']
form['account_number'] = account_number
world.form_response = form.submit()
assert_equal(world.form_response.status_code, 200)
@step(u'Then I see the balance of "([^"]*)"')
def then_i_see_the_balance_of_group1(step, expected_balance):
assert_in("Balance: {}".format(expected_balance), world.form_response.text)
|
__author__ = 'thomas'
import unittest
from MathFunctions import *
class TestHelperFunctions(unittest.TestCase):
def test_modexp(self):
"""
check modexp of a number
:return:
"""
self.assertEqual(MathFunctions.modexp(2, 5, 7), 4)
self.assertEqual(MathFunctions.modexp(2, 10, 8), 0)
def test_is_wieferich(self):
"""
check if a number is a wieferich number
the only known wieferich numbers so far are 1093 and 3511
:return:
"""
self.assertEqual(MathFunctions.is_wieferich(2), False)
self.assertEqual(MathFunctions.is_wieferich(3), False)
self.assertEqual(MathFunctions.is_wieferich(7), False)
self.assertEqual(MathFunctions.is_wieferich(1093), True)
self.assertEqual(MathFunctions.is_wieferich(3511), True)
self.assertEqual(MathFunctions.is_wieferich(1012393), False)
if __name__ == '__main__':
unittest.main()
|
import os, fnmatch
oldName = input("Insert old app name (stackedql): ") or "stackedql"
newName = input("Insert new app name: ")
if not newName: exit(0)
apppath = "src/"+oldName+"_app"
ferrypath = "src/"+oldName+"_ferry"
newapppath = "src/"+newName+"_app"
newferrypath = "src/"+newName+"_ferry"
def checkdirs():
appcheck = os.path.isdir(apppath)
ferrycheck = os.path.isdir(ferrypath)
if not appcheck: print(apppath,"doesn't exist.")
if not ferrycheck: print(ferrypath,"doesn't exist.")
if not appcheck or not ferrycheck: exit(0)
def renameFolders():
os.rename(apppath, newapppath)
os.rename(ferrypath, newferrypath)
def findReplace(directory, find, replace, filePattern):
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, filePattern):
filepath = os.path.join(path, filename)
with open(filepath) as f:
s = f.read()
s = s.replace(find, replace)
with open(filepath, "w") as f:
f.write(s)
def renameProject(oldProjectName,projectName,path,isApp):
os.chdir(path)
bundleID= input("Insert Bundle ID for "+projectName+f" (com.example.{projectName}): ") or f"com.example.{projectName}"
result = os.system(f'flutter pub global run rename --bundleId {bundleID}')
result = os.system(f'flutter pub global run rename --appname "{projectName}"')
if isApp:
result = os.system(f'flutter create .')
result = os.system(f'rm -rf test')
findReplace("./", f"package:{oldProjectName}", f"package:{projectName}", "*.dart")
findReplace("./", f"{oldName}_", f"{newName}_", "*.yaml")
os.chdir("../../")
def pubGetAndBuild(oldProjectName,projectName,path):
os.chdir(path)
os.system("flutter pub get")
os.system("flutter pub run build_runner build --delete-conflicting-outputs")
os.chdir("../../")
def createPackageProjectWithExample(name):
os.chdir("./src")
os.system(f"flutter create --template=package {name}")
os.chdir(f"./{name}")
os.system(f"flutter create example")
os.chdir("../../")
checkdirs()
renameFolders()
appProjectName = newName+"_app"
uiProjectName = newName+"_ui"
ferryProjectName = newName+"_ferry"
oldappProjectName = oldName+"_app"
olduiProjectName = oldName+"_ui"
oldferryProjectName = oldName+"_ferry"
# os.system("flutter pub global activate rename")
createPackageProjectWithExample(f"{newName}_ui")
renameProject(oldferryProjectName,ferryProjectName,newferrypath,False)
pubGetAndBuild(oldferryProjectName,ferryProjectName,newferrypath)
def changeOtherImports(projectPath,oldOtherPackage,newOtherPackage):
findReplace(newapppath, f"package:{oldOtherPackage}", f"package:{newOtherPackage}", "*.dart")
findReplace(newapppath, "stackedql_ui", newName+"_ui", "*.dart")
renameProject(oldappProjectName,appProjectName,newapppath,True)
changeOtherImports(newapppath,oldferryProjectName,ferryProjectName)
pubGetAndBuild(oldappProjectName,appProjectName,newapppath)
# remember to copy graphql schema
# remember to copy queries and mutations
# delete .git - readme - python script option
# if input("Delete repo files? (y/n):")=="y":
# os.rmdir(".git")
# os.remove("README.md")
# os.remove("rename.py")
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 10:23:54 2020
@author: Alex
"""
import numpy as np
import matplotlib.pyplot as plt
#Defin a routine which in 1D interpolates a lagrange polynomial through 3 samples, and/
#then calculated the minima of the interpolated parabola
def par_min(x0, x1, x2, f):
y0, y1, y2 = f(x0), f(x1), f(x2)
#convergence depends on a residue calculation, so a control bit is used
#so that the residue isnt referenced before the variable is declared
control = 0
#initialise residue as something higher than the threshold convergence value
res = 10
while res > 0.00000001:
y0, y1, y2 = f(x0), f(x1), f(x2)
#Algorithm will produce zero-division if the sample y values are identical, so make sure
#loop ends before this is computed
if y0==y2 or y0==y1 or y1==y2:
break
else:
#x3 is the minima for the interpolated lagrange poly
x3 = 0.5*((x2**2-x1**2)*y0+(x0**2-x2**2)*y1+(x1**2-x0**2)*y2)/\
((x2-x1)*y0+(x0-x2)*y1+(x1-x0)*y2)
a = np.array([y0,y1,y2,f(x3)])
b = np.argmax(a)
#figure out which sample point is the highest, and remove this from the sample set!
if control == 1:
#calculate residue to end loop when convergence complete
res = np.sqrt((x3-xm)**2)
c = [x0, x1, x2, x3]
df = d2dx(c[0], c[1], c[2], c[3], f)
del c[b]
c = np.array(c)
c.sort()
x0, x1, x2 = c
control = 1
xm = x3
return x3, df
#Define a routine which calculated the curvature at the minimum of a lagrange polynomial interpolated
#through 3 sample points
def d2dx(x0,x1, x2,x3, f):
y0, y1, y2 = f(x0), f(x1), f(x2)
#this is essentially the second derivative of the above formula
t0 = (y0*2)/((x0-x1)*(x0-x2))
t1 = (y1*2)/((x1-x0)*(x1-x2))
t2 = (y2*2)/((x2-x0)*(x2-x1))
return (t0+t1+t2)
#define two sample functions for validation of algo
def f(x):
return 1+x**2
def f2(x):
return np.exp(x**2)
#define some sample x values, non-symetric so that we dont automatically get 0
x0, x1, x2 = -0.5, 0, 1
print("for function 1, the minimum and curvature are", par_min(x0, x1, x2, f))
print("for function 2, the minimum and curvature are", par_min(x0, x1, x2, f2))
x = np.linspace(-2, 2, 1000)
def f2(x):
return np.exp(x**2)
plt.figure(figsize=(10,6))
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
plt.rc('figure', titlesize=45)
plt.rc('axes', titlesize=20) # fontsize of the axes title
plt.rc('axes', labelsize=20)
plt.rc('legend', fontsize=22)
plt.plot(x, f(x), 'r-')
plt.plot(x, f2(x), 'b--')
plt.ylim(0, 10)
plt.grid()
#plt.xlabel("x")
#plt.ylabel("f(x)")
plt.xlabel('x')
plt.ylabel('y')
plt.legend([r'$f(x) = {x^2} + 1$', r'$f(x) = {e^{x^2}} + 1$'])
plt.show()
|
from django.contrib import admin
# Register your models here.
from backend.models import *
class SubcategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'get_category_name')
def get_category_name(self, obj):
return obj.category.name
class FilledSubcategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'get_category_name', 'get_university')
def get_category_name(self, obj):
return obj.filled_category.category.name
def get_university(self, obj):
return obj.filled_category.university.name
class CustomUserAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'role', 'position', 'get_university')
def get_university(self, obj):
return obj.university.name
admin.site.register(ProfileCategory)
admin.site.register(ProfileSubcategory, SubcategoryAdmin)
admin.site.register(University)
admin.site.register(UniFiles)
admin.site.register(UniSizeGroup)
admin.site.register(CustomUser, CustomUserAdmin)
admin.site.register(FilledCategory)
admin.site.register(FilledSubcategory, FilledSubcategoryAdmin)
|
# vim: set fileencoding=utf-8 :
"""
~~~~~~~
Classes
~~~~~~~
Contains :class:`DictableModel` that can be used as a base class for
:meth:`sqlalchemy.ext.declarative_base`.
"""
from __future__ import absolute_import, division
from zeelalchemy import utils
class DictableModel(object):
"""Can be used as a base class for :meth:`sqlalchemy.ext.declarative`
Contains the methods :meth:`DictableModel.__iter__`,
:meth:`DictableModel.asdict` and :meth:`DictableModel.fromdict`.
:ivar dictalchemy_exclude: List of properties that should always be \
excluded.
:ivar dictalchemy_exclude_underscore: If True properties starting with an \
underscore will always be excluded.
:ivar dictalchemy_fromdict_allow_pk: If True the primary key can be \
updated by :meth:`DictableModel.fromdict`.
:ivar dictalchemy_asdict_include: List of properties that should always \
be included when calling :meth:`DictableModel.asdict`
:ivar dictalchemy_fromdict_include: List of properties that should always \
be included when calling :meth:`DictableModel.fromdict`
"""
asdict = utils.asdict
fromdict = utils.fromdict
__iter__ = utils.iter
|
n = int(input())
for x in range(n):
a = int(input())
if a % 2 == 0:
print("é par")
else:
print("é impar")
|
import gevent
# gevent中的主要模式是Greenlet
# 以C扩展的模块形式接入到Python的轻量级协程
# 全部运行在操作系统进程的内部 但他们被协作式的调度
def Foo():
print('running in foo')
gevent.sleep(1)
# 模仿IO操作
print('Explicit context switch back to foo ')
def Bar():
print('Explicit context to bar')
# Explicit精确的
gevent.sleep(2)
print("Implicit context switch back to bar")
def Strs():
print('Explicit context to Strs')
gevent.sleep(1.5)
print("Implicit context switch back to Strs")
gevent.joinall([
gevent.spawn(Foo),#产生 发起 spawn
gevent.spawn(Bar),
gevent.spawn(Strs),
])
|
from .roleplaying import Roleplaying
def setup(bot):
bot.add_cog(Roleplaying(bot))
|
#!/usr/bin/python
import sys
sys.path.insert(0,"/var/www/flaskapp/")
from hello import app as application
|
import unittest
from katas.kyu_6.length_of_missing_array import get_length_of_missing_array
class LengthOfMissingArrayTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(get_length_of_missing_array(
[[1, 2], [4, 5, 1, 1], [1], [5, 6, 7, 8, 9]]
), 3)
def test_equal_2(self):
self.assertEqual(get_length_of_missing_array(
[[5, 2, 9], [4, 5, 1, 1], [1], [5, 6, 7, 8, 9]]
), 2)
def test_equal_3(self):
self.assertEqual(get_length_of_missing_array(
[[None], [None, None, None]]
), 2)
def test_equal_4(self):
self.assertEqual(get_length_of_missing_array(
[['a', 'a', 'a'], ['a', 'a'], ['a', 'a', 'a', 'a'], ['a'],
['a', 'a', 'a', 'a', 'a', 'a']]
), 5)
|
#!/usr/bin/env python3
import socket
myServerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
myIP = ('172.20.1.1')
myPort = 5000
myServerInfo = (myIP, myPort)
print('Server Port:', myPort)
myServerSocket.bind(myServerInfo)
myServerSocket.listen(1)
while True:
print(f'Waiting for a connection on {myIP}:{myPort}')
myConnection, clientAddress = myServerSocket.accept()
try:
print(f'Connection established to {clientAddress}')
while True:
incomingData = myConnection.recv(1024)
print(f'Received data: {incomingData}')
if incomingData:
print('Sending data back to the client')
myConnection.sendall(incomingData)
else:
print(f'End of client data {clientAddress}')
myConnection.close()
except socket.error as mySocketError:
print(f'Connection state: {myPort}::{mySocketError}')
myConnection.close()
|
import pyglet
from pyglet.gl import *
import pymunk
from pymunk import Vec2d
import math
import levelassembler
import camera
from math import sin,cos
import particles2D
import loaders
import PiTweener
class Hint:
def __init__(self, position, padding, image):
self.position = position
self.padding = padding
padding_left = self.padding[0]
padding_bottom = self.padding[1]
padding_right = self.padding[2]
padding_top = self.padding[3]
#self.text = text
self.bb = pymunk.BB(position[0] - padding_left,
position[1] - padding_bottom,
position[0] + padding_right,
position[1] + padding_top)
self.left = (position[0] - padding_left, position[1] + padding_top)
self.bottom = (position[0] - padding_left, position[1] - padding_bottom)
self.right = (position[0] + padding_right, position[1] - padding_bottom)
self.top = (position[0] + padding_right, position[1] + padding_top)
alpha = 200
self.color = (200,0,0,alpha)
self.color2 = (0,200,0,alpha)
self.color3 = (200,200,0,alpha)
'''
image = levelassembler.imageloader(image, 'placeholder.png', (10,10))
tex = image.get_texture()
glTexParameteri(tex.target, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(tex.target, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
self.sprite = pyglet.sprite.Sprite(image) # batch = level_batch, group = ordered_group)
#self.sprite.image.width = size[0]
#self.sprite.image.height = size[1]
self.sprite.image.anchor_x = 4
self.sprite.image.anchor_y = 0
self.sprite.scale = .75
'''
self.sprite = loaders.spriteloader(image,
anchor= (4,0),
scale = .75,
linear_interpolation = True)
self.sprite.opacity = 0
self.stage1 = True
self.stage2 = False
self.stage3 = False
self.stage4 = False
self.stage5 = False
self.stage6 = False
self.fangle = 0
self.weighted_angle = 0
def setup_pyglet_batch(self, debug_batch, level_batch, ui_batch, ordered_group, screen_res):
self.outline = debug_batch.add_indexed(4, pyglet.gl.GL_LINES, ordered_group, [0,1,1,2,2,3,3,0], ('v2f'), ('c3B', (0,0,0)*4))
self.bb_outline = debug_batch.add_indexed(4, pyglet.gl.GL_LINES, ordered_group, [0,1,1,2,2,3,3,0],
('v2f', (self.left[0],self.left[1],
self.bottom[0],self.bottom[1],
self.right[0],self.right[1],
self.top[0],self.top[1])),
('c4B', (0,0,0,0)*4))
self.sprite.batch = level_batch
self.sprite.group = ordered_group
self.fangle = 0
self.sangle = 1.5
self.flipped = False
def update(self, player_pos, angle):
x = 23*cos(angle+math.radians(90)) + player_pos[0]
y = 23*sin(angle+math.radians(90)) + player_pos[1]
self.sprite.set_position(x,y)
self.weighted_angle = ((self.weighted_angle*(5-1))+angle) / 5
self.sprite.rotation = math.degrees(-self.weighted_angle)
if not self.bb.contains_vect(player_pos):
self.bb_outline.colors = (self.color*4)
if self.sprite.opacity != 0 or self.sprite.opacity >= 1:
if self.sprite.opacity > 41:
self.sprite.opacity -= 30
if self.sprite.opacity < 41:
self.sprite.opacity -= 10
if self.sprite.opacity < 5:
self.sprite.opacity = 0
if self.sprite.opacity > 10:
self.sprite.scale -= 0.02
self.sangle = 1.5
self.fangle = ((self.fangle*(20-1))+1.5) / 20
self.sprite.rotation = math.degrees(self.fangle-self.weighted_angle)
self.stage1 = True
self.stage2 = False
self.stage3 = False
self.stage4 = False
if self.bb.contains_vect(player_pos):
self.fangle = 0
self.sangle = ((self.sangle*(5-1))+0) / 5
self.sprite.rotation = math.degrees(self.sangle-self.weighted_angle)
self.bb_outline.colors = (self.color2*4)
if self.stage1:
if self.sprite.opacity > 1:
self.sprite.scale += 0.04
if self.sprite.scale > 1:
self.stage1 = False
self.stage2 = True
if self.stage2:
if self.sprite.scale > 1:
self.sprite.scale += 0.04
if self.sprite.scale > 1.25:
self.stage2 = False
self.stage3 = True
if self.stage3:
if self.sprite.scale != 1:
if self.sprite.scale > 1:
self.sprite.scale -= 0.04
if self.sprite.scale < 1:
self.sprite.scale += 0.04
if self.sprite.scale == 1 or self.sprite.scale >= 1.0001:
self.stage3 = False
self.stage4 = True
if self.stage4:
if self.sprite.scale >= .88:
self.sprite.scale -= 0.03
if self.sprite.scale < .88:
self.stage4 = False
self.stage5 = True
if self.stage5:
if self.sprite.scale != 1:
if self.sprite.scale > 1:
self.sprite.scale -= 0.02
if self.sprite.scale < 1:
self.sprite.scale += 0.02
if self.sprite.scale == 1 or self.sprite.scale >= 1.0001:
self.stage5 = False
self.stage6 = True
if self.stage6:
self.sprite.scale = 1
self.stage6 = False
#print(self.sprite.scale)
if self.sprite.opacity != 255:
if self.sprite.opacity < 234:
self.sprite.opacity += 15
if self.sprite.opacity > 250:
self.sprite.opacity = 255
class Finish:
def __init__(self, position, padding, image):
self.position = position
self.padding = padding
padding_left = self.padding[0]
padding_bottom = self.padding[1]
padding_right = self.padding[2]
padding_top = self.padding[3]
#self.text = text
self.bb = pymunk.BB(position[0] - padding_left,
position[1] - padding_bottom,
position[0] + padding_right,
position[1] + padding_top)
self.left = (position[0] - padding_left, position[1] + padding_top)
self.bottom = (position[0] - padding_left, position[1] - padding_bottom)
self.right = (position[0] + padding_right, position[1] - padding_bottom)
self.top = (position[0] + padding_right, position[1] + padding_top)
alpha = 200
self.color = (200,0,0,alpha)
self.color2 = (0,200,0,alpha)
self.color3 = (200,200,0,alpha)
self.sprite = loaders.spriteloader(image,
anchor = ('center', 'center'),
#anchor_offset = (0,-50)
#scale = 2,
#linear_interpolation = True,
)
self.sprite.opacity = 0
self.particle_emit = False
self.fangle = 0
self.weighted_angle = 0
self.tweener = PiTweener.Tweener()
self.finish_sound = loaders.Audio()
self.finish_sound.load('resources/sounds/yay.ogg')
def setup_pyglet_batch(self, debug_batch, level_batch, ui_batch, ordered_group, screen_res):
self.outline = debug_batch.add_indexed(4, pyglet.gl.GL_LINES, ordered_group, [0,1,1,2,2,3,3,0], ('v2f'), ('c3B', (0,0,0)*4))
self.bb_outline = debug_batch.add_indexed(4, pyglet.gl.GL_LINES, ordered_group, [0,1,1,2,2,3,3,0],
('v2f', (self.left[0],self.left[1],
self.bottom[0],self.bottom[1],
self.right[0],self.right[1],
self.top[0],self.top[1])),
('c4B', (0,0,0,0)*4))
self.screen_res = screen_res
print(self.screen_res)
self.sprite.set_position(screen_res[0]//2,screen_res[1]//2)
self.sprite.batch = ui_batch
self.sprite.group = ordered_group
self.sprite_scale = .8
self.sprite_opacity = 0
self.added_tween = False
self.fade_sprite_out = False
self.wait_a_sec = 30
self.emitter = particles2D.Emitter(pos = (screen_res[0]//2,-50))
particle_img = pyglet.resource.image('confetti.png')
self.factory = particles2D.finish_confetti(5,
((-2,3),(5,7)),
particle_img,
batch=ui_batch,
group=ordered_group)
self.emitter_spurt = particles2D.Spurt(self.emitter)
def update(self, player_pos, angle):
self.emitter_spurt.update()
if not self.bb.contains_vect(player_pos):
self.bb_outline.colors = (self.color*4)
self.tweener.update()
if self.bb.contains_vect(player_pos):
self.particle_emit = True
self.bb_outline.colors = (self.color2*4)
if not self.added_tween:
self.emitter_spurt.add_factory(self.factory, .001)
self.finish_sound.play()
self.added_tween = True
self.tweener.add_tween(self,
sprite_scale = 1,
sprite_opacity = 255,
tween_time = 1,
tween_type = self.tweener.OUT_CUBIC,
on_complete_function = self.fade_out)
if self.fade_sprite_out:
if self.wait_a_sec > -1:
self.wait_a_sec -= 1
if self.wait_a_sec == 0:
self.tweener.add_tween(self,
sprite_scale = 1.25,
sprite_opacity = 0,
tween_time = .5,
tween_type = self.tweener.OUT_CUBIC,)
self.sprite.scale = self.sprite_scale
self.sprite.opacity = self.sprite_opacity
def fade_out(self):
self.fade_sprite_out = True
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the queensAttack function below.
# def move_queen(n, updated_row, updated_col, r, c, obstacles):
# p = 0
# while True:
# r = updated_row(r)
# c = updated_col(c)
# key = (r - 1) * n + c
# if (c < 1 or c > n or r < 1 or r > n) or (key in obstacles):
# return p
# p += 1
# return p
#
#
# # Complete the queensAttack function below.
# def queensAttack(n, k, r_q, c_q, obs):
# obstacles = {}
# for b in obs:
# obstacles[(b[0] - 1) * n + b[1]] = None
#
# p = 0
# dr = [-1, -1, -1, 0, 0, 1, 1, 1]
# dc = [0, -1, 1, 1, -1, 0, 1, -1]
#
# for i in range(8):
# p += move_queen(n, (lambda r: r + dr[i]), (lambda c: c + dc[i]), r_q, c_q, obstacles)
#
# return p
def queensAttack(n, k, r_q, c_q, obstacles):
# print(obstacles)
dx = [-1,0,1,1,1,0,-1,-1]
dy = [1,1,1,0,-1,-1,-1,0]
# chess_plate = [[0 if i == 0 else 1 for i in range(n+1)] for _ in range(n+1)]
# chess_plate[0] = [0 for _ in range(n+1)]
# for o in obstacles:
# chess_plate[o[0]][o[1]] = 0
# chess_plate[r_q][c_q] = -1
ans = 0
# for i in chess_plate:
# print(i)
for item in zip(dx, dy):
vx = item[0]
vy = item[1]
# print("vx :{}, vy : {}".format(vx,vy))
start_r = r_q
start_c = c_q
# print("start_r : {}, start_c : {}".format(start_r, start_c))
while True:
start_r += vx
start_c += vy
# print("start_r : {}, start_c : {}".format(start_r, start_c))
# if 1 <= start_r <= n and 1 <= start_c <= n and chess_plate[start_r][start_c] != 0:
# print("obstacles.count([{},{}]) : {}".format(start_r, start_c, obstacles.count([start_r,start_c])))
if 1 <= start_r <= n and 1 <= start_c <= n and not [start_r,start_c] in obstacles:
# print("now : [{}][{}]".format(start_r, start_c))
ans += 1
else:
break
# print("==========================")
return ans
if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
r_qC_q = input().split()
r_q = int(r_qC_q[0])
c_q = int(r_qC_q[1])
obstacles = []
for _ in range(k):
obstacles.append(list(map(int, input().rstrip().split())))
result = queensAttack(n, k, r_q, c_q, obstacles)
print(result)
# fptr.write(str(result) + '\n')
#
# fptr.close()
|
import numpy as np
import matplotlib.pyplot as plt
import time
start_time = time.time() # start time of execution of code
seed = 0.1; # seed value or starting value
rnum = 5000 # number of values 'r' takes.
rlist = np.linspace(0.1,4,rnum) # array storing all values of 'r'
plt.figure() # generating a figure element beforehand
N = 1000 # N: Total number of steps for which the map is generated
xlist = [] # creating an empty list to store all x(n+1) maps for all r's
# here begins TWO while loops. one with i computes a x map with N steps
# another one with j evluates x's for all set of r's
n = np.linspace(1,N,N) # n represents the list containing the number of steps taken
for j in range(len(rlist)):
x0 = seed # assigning the first element as the seed
x = [x0]
for i in range(0,N):
rel = rlist[j]*x0*(1-x0)
x.append(rel)
x0 = rel
xlist.append(x)
xlistarr = np.asarray(xlist)
print(rlist.shape)
print(xlistarr.shape)
for i in range(600,N):
plt.plot(rlist,xlistarr[:,i],'ob',markersize=0.1)
print("--- %s seconds ---" % (time.time() - start_time))
plt.xlabel('r')
plt.ylabel('x')
plt.title('Orbit Diagram for Logistic Map')
#plt.xlim([3.5, 4])
plt.show()
|
print('%2d-%2d' % (3, 1))
print('%.2f' % 3.1415926)
s1 = 72
s2 = 85
r = s2-s1
print('%.1f%%' % r)
print('Hello, {0}, 成绩提升了 {1:.1f}%'.format('小明', 17.125))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.