text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python3
import requests
import os
import hashlib
import io
URL_BASE = "https://cse6040.gatech.edu/datasets/"
def on_vocareum():
return os.path.exists('.voc')
def localize_file(filebase):
if on_vocareum():
local_dir = "../resource/asnlib/publicdata/"
else:
local_dir = ""
return "{}{}".format(local_dir, filebase)
def download(filebase, local_dir="", url_base=URL_BASE, checksum=None):
local_file = localize_file(filebase)
if not os.path.exists(local_file):
url = "{}{}".format(url_base, filebase)
print("Downloading: {} ...".format(url))
r = requests.get(url)
with open(local_file, 'wb') as f:
f.write(r.content)
if checksum is not None:
with io.open(local_file, 'rb') as f:
body = f.read()
body_checksum = hashlib.md5(body).hexdigest()
assert body_checksum == checksum, \
"Downloaded file '{}' has incorrect checksum: '{}' instead of '{}'".format(local_file,
body_checksum,
checksum)
print("'{}' is ready!".format(local_file))
return local_file
def download_dataset(filebases, **kwargs):
for filebase, checksum in filebases.items():
download(filebase, checksum=checksum, **kwargs)
# eof
|
filename='programming.txt'
#写入方式写入文件
#with open(filename,'w') as file:
#追加方式写入文件
with open(filename,'a') as file:
file.write("\nI also love finding meaning in large datasets.\n")
file.write("I love creating apps that can run in a browser.\n")
|
from collections import OrderedDict
def clean_list(list_to_clean):
return list(OrderedDict.fromkeys(list_to_clean))
if __name__ == '__main__':
print(clean_list([32, 32.1, 32.0, -123]))
|
import cv2
import sys
import numpy as np
import os
if len(sys.argv) < 3:
print "Usage : " + sys.argv[0] + " <classifier> <image path>"
sys.exit()
if not os.path.exists("output"):
os.makedirs("output")
def getAdaptiveIndices(ar, loc, mAr, mLoc):
# Make it adaptive
l1, l2, a1, a2 = 0.6, 2.0, 0.6, 2.0
count = len(np.where((ar > a1*mAr) & (ar < a2*mAr))[0])
#while not count == 7 or count == 8:
return np.where((ar > a1*mAr) & (ar < a2*mAr))[0]
cas = cv2.CascadeClassifier(sys.argv[1])
mva = []
for img in sys.argv[2:]:
name = img
img = cv2.imread(img, 0)
cv2.imshow("img", img); cv2.waitKey(0)
#cv2.imwrite("output/" + "input" + ".jpg", img);
roi = []
idx = 0
for (a,b,c,d) in cas.detectMultiScale(img, 1.3, 2):
if idx == 0:
roi = [a, b, c, d]
idx = idx + 1
if c > roi[2] or d > roi[3]:
roi = [a, b, c, d]
if len(roi) == 4:
roi = img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]
#cv2.imshow("roi", roi); cv2.waitKey(0);
_, otsu = cv2.threshold(roi, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow("roi", otsu); cv2.waitKey(0)
#cv2.imwrite("output/" + "otsu" + ".jpg", otsu);
otsuBkup = otsu.copy()
contours, hierarchy = cv2.findContours(otsu, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = np.array(contours)
ar = []
loc = []
for cnt in contours:
ar.append(cv2.contourArea(cnt))
loc.append([np.mean(cnt[:, 0, 0]), np.mean(cnt[:, 0, 1])])
ar = np.array(ar)
loc = np.array(loc)
mAr = np.mean(ar)
mLoc = np.mean(loc)
t = getAdaptiveIndices(ar, loc, mAr, mLoc)
contoursFil = contours[t]
roi_nos = []
idx = 0
l = len(contoursFil)
for cnt in contoursFil:
x, y, w, h = cv2.boundingRect(cnt)
temp = otsuBkup[y:y+h, x:x+w]
temp = cv2.resize(temp, (20, 20))
blkCount = len(np.where(temp == 0)[0])
whiteCount = len(np.where(temp == 255)[0])
mva.append(1.0*whiteCount/blkCount)
# print blkCount, whiteCount
# Compare number of black and white pixels
temp = ~temp
cv2.imshow("roi_"+str(idx), temp);
if idx == l-1:
cv2.waitKey(0);
#cv2.imwrite("output/" + str(idx) + ".jpg", temp);
roi_nos.append(temp)
idx = idx + 1
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
# Preprocessing the dataset
df = pd.read_csv('../iris.csv')
X = df.drop(['variety'], axis=1).values
y = df['variety'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Training the dataset
mlp = MLPClassifier(solver='sgd', activation='logistic', batch_size=20,
learning_rate_init=0.01, max_iter=1000, hidden_layer_sizes=(6, 3))
mlp.fit(X_train, y_train)
# Testing
pred = mlp.predict(X_test)
print("train = " + str(mlp.score(X_train, y_train))) # on training dataset
print("test = " + str(accuracy_score(y_test, pred))) # on testing dataset
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 20 16:44:24 2018
@author: XuL
"""
import re
import pandas as pd
from nltk import tokenize
def find_case(x):
try:
st = re.search(r'Bankr\.|Bank\.', x).span()[0]
end = re.search(r'Case No + \d\d_\d\d\d\d\d|\d_\d\d\d\d\d', x).span()[1]
case = x[st:end]
except:
try:
rge = re.search(r'Case No +\d\d_\d\d\d\d\d|\d_\d\d\d\d\d', x).span()
st = rge[0]
end = rge[1]
case = x[st:end]
except:
try:
rge = re.search(r' and +\d\d_\d\d\d\d\d|\d_\d\d\d\d\d', x).span()
st = rge[0]
end = rge[1]
case = x[st:end]
except:
case = ""
x = x.replace(case,"")
return x
def remov_case(x):
new_x = find_case(x)
while new_x != x:
x = new_x
new_x = find_case(x)
return new_x
def pre_process(text):
# remove web address
try:
st = re.search(r'http', text).span()[0]
end = re.search(r'\.+(net|com)', text).span()[1]
s = text.replace(text[st:end],"")
except:
s = text
# remove dashed line in title
search = re.search(r'--------', s)
if not (pd.isnull(search)):
st = search.span()[0]
ed = st
while s[ed] == '-':
ed += 1
s = re.sub(s[(st-1):(ed)],'.',s)
# substitude hyphen in joint words
s = re.sub(r'--',',',s)
s = re.sub(r'-','_',s)
# remove backslash
s = re.sub(r'/','',s)
# remove comma before and dot after
s = re.sub(r', Inc\.', ' INC', s)
s = re.sub(r' Inc\.', ' INC', s)
s = re.sub(r' INC,', ' INC', s)
s = re.sub(r', INC', ' INC', s)
s = re.sub(r'Incs', 'INC\'s', s)
s = re.sub(r', Esq\.', ' Esq', s)
s = re.sub(r' Esq\.', ' Esq', s)
s = re.sub(r' Esq,', ' Esq', s)
s = re.sub(r', Esq', ' Esq', s)
s = re.sub(r', L\.L\.C\.', ' LLC', s)
s = re.sub(r' L\.L\.C\.', ' LLC', s)
s = re.sub(r' LLC\.', ' LLC', s)
s = re.sub(r' LLC,', ' LLC', s)
s = re.sub(r', LLC', ' LLC', s)
s = re.sub(r', L\.P\.', ' LP', s)
s = re.sub(r' L\.P\.', ' LP', s)
s = re.sub(r' LP\.', ' LP', s)
s = re.sub(r' LP,', ' LP', s)
s = re.sub(r', LP', ' LP', s)
s = re.sub(r', P\.C\.',' PC', s)
s = re.sub(r' P\.C\.',' PC', s)
s = re.sub(r' PC\.',' PC', s)
s = re.sub(r' PC,',' PC', s)
s = re.sub(r', PC',' PC', s)
s = re.sub(r', P\.A\.',' PA', s)
s = re.sub(r' P\.A\.',' PA', s)
s = re.sub(r' PA\.',' PA', s)
s = re.sub(r' PA,',' PA', s)
s = re.sub(r', PA',' PA', s)
s = re.sub(r'General Partnership', 'GP', s)
s = re.sub(r', GP', ' GP', s)
s = re.sub(r' GP,', ' GP', s)
s = re.sub(r', APC', ' APC', s)
s = re.sub(r' APC,', ' APC', s)
s = re.sub(r' No\.', ' No', s)
s = re.sub(r' Nos\.', ' No', s)
s = re.sub(r' Nos', ' No', s)
s = re.sub(r' et.\ al\.', ' et al', s)
s = re.sub(r' et al\.', ' et al', s)
s = re.sub(r' et al\.', ' et al', s)
s = re.sub(r' et al,', ' et al', s)
s = re.sub(r', et al', ' et al', s)
s = re.sub(r' et al', ' Et Al', s)
# switch uppercase and lowercase
s = re.sub(r' Debtors', ' debtors', s)
s = re.sub(r' Debtor', ' debtor', s)
s = re.sub(r's Motion', '\'s motion', s)
s = re.sub(r' Motion', ' motion', s)
s = re.sub(r' Is ', ' is ', s)
s = re.sub(r' Not ', ' not ', s)
s = re.sub(r' Cannot ', ' can not ', s)
s = re.sub(r' Files', ' files', s)
s = re.sub(r' Filed', ' filed', s)
s = re.sub(r' File', ' file', s)
s = re.sub(r' Filing', ' filing', s)
s = re.sub(r', which filed ', ' filing ', s)
s = re.sub(r' dba ', ' DBA ', s)
s = re.sub(r' fdba ', ' FDBA ', s)
s = re.sub(r' fka ', ' FKA ', s)
# convert abbrivations
s = re.sub(r' the U\.S\. Bankruptcy Court', ' the court', s)
s = re.sub(r' the US Bankruptcy Court', ' the court', s)
s = re.sub(r' the United States Bankruptcy Court', ' the court', s)
s = re.sub(r' the Court', ' the court', s)
s = re.sub(r' Corp\.', ' CORP', s)
s = re.sub(r' Co\. ', ' Co ', s)
s = re.sub(r' Dev\.', ' Dev', s)
s = re.sub(r' Assoc\.', ' Association', s)
s = re.sub(r'Mil\.', 'million', s)
s = re.sub(r' Hon\. ', ' Hon ', s)
s = re.sub(r' Ind\. ', ' Ind ', s)
# remove short forms
s = s.replace("′", "'").replace("’", "'").\
replace("won't", "will not").replace("cannot", "can not").\
replace("can't", "can not").replace("n't", " not").\
replace("what's", "what is").replace("'ve", " have").\
replace("I'm", "I am").replace("'re", " are").\
replace("%", " percent ").replace("$", " dollar ").\
replace("'ll", " will").replace(" it's ", " its ")
# remove bankruptcy case numbers
s = remov_case(s)
# remove middle names
s = re.sub(r'([A-Z])\.([A-Z])\.',r'\1\2',s)
# remove non ASCII characters
s = s.encode("ascii", errors='ignore')
s = str(s, 'utf-8')
# remove double commas
s = re.sub(r" , ,", ",", s)
# remove additional white spaces
s = ' '.join(s.split())
return s
def sentence_split(text):
return tokenize.sent_tokenize(text)
def process_filename(s):
last = re.search("_\d\d\d\d\d\d\d\d_",s).span()[0]
s = s[0:last]
s = s.replace("_", " ")
return s
|
# BSD 3-Clause License.
#
# Copyright (c) 2019-2023 Robert A. Milton. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE G00DS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contains the calculation of a single closed Sobol index without storing it."""
from __future__ import annotations
from romcomma.base.definitions import *
from romcomma.gpr.models import GPR
from romcomma.gsa.base import Calibrator, Gaussian, diag_det
class ClosedSobol(gf.Module, Calibrator):
""" Calculates closed Sobol Indices."""
def _serialize_to_tensors(self):
raise NotImplementedError()
def _restore_from_tensors(self, restored_tensors):
raise NotImplementedError()
@classmethod
@property
def META(cls) -> Dict[str, Any]:
""" Default calculation meta.
Returns: An empty dictionary.
"""
return {}
def marginalize(self, m: TF.Slice) -> Dict[str, TF.Tensor]:
""" Calculate everything.
Args:
m: A Tf.Tensor pair of ints indicating the slice [m[0]:m[1]].
Returns: The Sobol ClosedSobol of m.
"""
G, Phi = self.G[..., m[0]:m[1]], self.Phi[..., m[0]:m[1]]
result = {'V': self._V(G, Phi)}
result['S'] = result['V'] / self.V[2]
return result
def _V(self, G: TF.Tensor, Phi: TF.Tensor) -> TF.Tensor:
""" Calculate V.
Args:
G: marginalized
Gamma: marginalized
Returns: V[m], according to marginalization.
"""
Gamma = 1 - Phi
Psi = tf.expand_dims(tf.expand_dims(Gamma, axis=2), axis=2) + Gamma[tf.newaxis, tf.newaxis, ...] # Symmetric in L^4
Psi = Psi - tf.einsum('lLM, jJM -> lLjJM', Gamma, Gamma) # Symmetric in L^4
PsiPhi = tf.einsum('lLjJM, lLM -> lLjJM', Psi, Phi) # Symmetric in L^4
PhiG = tf.expand_dims(tf.einsum('lLM, jJnM -> lLjJnM', Phi, G), axis=2) # Symmetric in L^4 N^2
# print(sym_check(PhiG, [3, 4, 5, 0, 1, 2, 6])) note the symmetry.
PhiGauss = Gaussian(mean=G, variance=Phi, is_variance_diagonal=True, LBunch=2)
H = Gaussian(mean=PhiG, variance=PsiPhi, ordinate=G[..., tf.newaxis, tf.newaxis, tf.newaxis, :], is_variance_diagonal=True, LBunch=2)
H /= PhiGauss.expand_dims([-1, -2, -3]) # Symmetric in L^4 N^2
# print(sym_check(H, [0, 1, 2, 4, 3, 5])) note the symmetry.
V = tf.einsum('lLN, lLNjJn, jJn -> lj', self.g0KY, H.pdf, self.g0KY) # Symmetric in L^2
return V
def _calibrate(self):
""" Called by constructor to calculate all available quantities prior to marginalization.
These quantities suffice to calculate V[0], V[M].
"""
pre_factor = tf.sqrt(diag_det(self.Lambda2[1][0] * self.Lambda2[-1][1])) * self.F
self.g0 = tf.exp(Gaussian(mean=self.gp.X[tf.newaxis, tf.newaxis, ...], variance=self.Lambda2[1][1], is_variance_diagonal=True, LBunch=2).exponent)
self.g0 *= pre_factor[..., tf.newaxis] # Symmetric in L^2
self.g0KY = self.g0 * self.K_inv_Y # NOT symmetric in L^2
self.g0KY -= tf.einsum('lLN -> l', self.g0KY)[..., tf.newaxis, tf.newaxis]/tf.cast(tf.reduce_prod(self.g0KY.shape[1:]), dtype=FLOAT())
self.G = tf.einsum('lLM, NM -> lLNM', self.Lambda2[-1][1], self.gp.X) # Symmetric in L^2
self.Phi = self.Lambda2[-1][1] # Symmetric in L^2
self.V = {0: self._V(self.G, self.Phi)} # Symmetric in L^2
self.V |= {1: tf.linalg.diag_part(self.V[0])}
V = tf.sqrt(self.V[1])
self.V |= {2: tf.einsum('l, i -> li', V, V)}
self.S = self.V[0]/self.V[2]
def _Lambda2(self) -> Dict[int, Tuple[TF.Tensor]]:
""" Calculate and cache the required powers of <Lambda^2 + J>.
Returns: {1: <Lambda^2 + J>, -1: <Lambda^2 + J>^(-1)} for J in {0,1,2}.
"""
if self.is_F_diagonal:
result = tf.einsum('lM, lM -> lM', self.Lambda, self.Lambda)[:, tf.newaxis, :]
else:
result = tf.einsum('lM, LM -> lLM', self.Lambda, self.Lambda)
result = tuple(result + j for j in range(3))
return {1: result, -1: tuple(value**(-1) for value in result)}
def __init__(self, gp: GPR, **kwargs: Any):
""" Construct a ClosedSobol object. A wide range of values are collected or calculated and cached, especially via the final call to self._calibrate.
Args:
gp: The gp to analyze.
**kwargs: The calculation meta to override META.
"""
super().__init__()
self.gp = gp
self.meta = self.META | kwargs
# Unwrap data
self.L, self.M, self.N = self.gp.L, self.gp.M, self.gp.N
self.Ms = tf.constant([0, self.M], dtype=INT())
self.F = tf.constant(self.gp.kernel.data.frames.variance.tf, dtype=FLOAT())
# Cache the training data kernel
self.K_cho = tf.constant(self.gp.K_cho, dtype=FLOAT())
self.K_inv_Y = tf.constant(self.gp.K_inv_Y, dtype=FLOAT())
# Determine if F is diagonal
self.is_F_diagonal = self.meta.pop('is_F_diagonal', None)
if self.is_F_diagonal is None:
gp_options = self.gp.read_meta() if self.gp._meta_json.exists() else self.gp.META
self.is_F_diagonal = not gp_options.pop('kernel', {}).pop("covariance", False)
# Reshape according to is_F_diagonal
if self.is_F_diagonal:
self.F = self.F if self.F.shape[0] == 1 else tf.linalg.diag_part(self.F)
self.F = tf.reshape(self.F, [self.L, 1])
else:
self.K_inv_Y = tf.transpose(self.K_inv_Y, [1, 0, 2])
# Set Lambdas
self.Lambda = tf.broadcast_to(tf.constant(self.gp.kernel.data.frames.lengthscales.np, dtype=FLOAT()), [self.L, self.M])
self.Lambda2 = self._Lambda2()
# Calculate and store values for m=0 and m=M
self._calibrate()
class ClosedSobolWithError(ClosedSobol):
""" Calculates closed Sobol Indices with Errors."""
@classmethod
@property
def META(cls) -> Dict[str, Any]:
""" Default calculation meta. ``is_T_partial`` forces W[Mm] = W[MM] = 0.
Returns:
is_T_partial: If True this effectively asserts the full ['M'] model is variance free, so WmM is not calculated or returned.
"""
return {'is_T_partial': True}
class RankEquation(NamedTuple):
l: str
i: str
j: str
k: str
class RankEquations(NamedTuple):
DIAGONAL: Any
MIXED: Any
RANK_EQUATIONS: RankEquations = RankEquations(DIAGONAL=(RankEquation(l='j', i='k', j='l', k='i'), RankEquation(l='k', i='j', j='i', k='l')),
MIXED=(RankEquation(l='k', i='k', j='j', k='i'),))
def _equateRanks(self, liLNjkJM: TF.Tensor, rank_eq: RankEquation) -> TF.Tensor:
""" Equate the ranks of a tensor, according to eqRanks.
Args:
liLNjkJM: A tensor which must have ranks liLNjkJM.
rank_eq: Which ranks to equate.
Returns:
LNjkS or LNjiS.
"""
shape = liLNjkJM.shape.as_list()
eqRanks_j = 'j' if shape[4] == 1 else rank_eq.j
eqRanks_k = 'k' if shape[5] == 1 else rank_eq.k
liLNjkJM = tf.reshape(liLNjkJM, shape[:-2] + [-1]) # TensorFlow only does einsum up to rank 6!
if rank_eq in self.RANK_EQUATIONS.MIXED:
result = tf.einsum(f'iiLNjkS -> LNjiS', liLNjkJM)
else:
result = tf.einsum(f'liLN{eqRanks_j}{eqRanks_k}S -> LN{rank_eq.j}{rank_eq.k}S', liLNjkJM)
result = tf.reshape(result, result.shape[:-1].as_list() + shape[-2:]) # TensorFlow only does einsum up to rank 6!
return tf.einsum(f'LNjjJM -> LNjJM', result)[..., tf.newaxis, :, :] if rank_eq.j == 'i' else result
def _equatedRanksGaussian(self, mean: TF.Tensor, variance: TF.Tensor, ordinate: TF.Tensor, rank_eqs: Tuple[RankEquation]) -> List[Gaussian]:
""" Equate ranks and calculate Gaussian.
Args:
mean: liLNjkJn.
variance: liLjkJM.
ordinate: liLNM and jkJnM.
rank_eqs: A tuple of RankEquators to apply.
Returns: liLNjkJn.
"""
result = []
N_axis = 3
for rank_eq in rank_eqs:
eq_ranks_variance = self._equateRanks(tf.expand_dims(variance, N_axis), rank_eq)[..., tf.newaxis, :]
eq_ranks_mean = self._equateRanks(mean, rank_eq)[..., tf.newaxis, :]
shape = tf.concat([eq_ranks_mean.shape[:-2], ordinate.shape[-2:]], axis=0) if tf.rank(ordinate) > 2 else None
eq_ranks_mean = (eq_ranks_mean if shape is None else tf.broadcast_to(eq_ranks_mean, shape)) - ordinate
result += [Gaussian(mean=eq_ranks_mean, variance=eq_ranks_variance, is_variance_diagonal=True, LBunch=10000)]
return result
def _OmegaGaussian(self, mp: TF.Slice, G: TF.Tensor, Phi: TF.Tensor, Upsilon: TF.Tensor, rank_eqs: Tuple[RankEquation]) -> List[Gaussian]:
""" The Omega integral for m=mp or m=mp=[:M]. Does not apply when m=[0:0].
Args:
mp: The marginalization m_primed.
G: Un-marginalized. lLNM and jJnM.
Phi: Un-marginalized. ikM and jJM.
Upsilon: Un-marginalized. ikM.
rank_eqs: A tuple of RankEquators to apply.
Returns: liLNjkJn.
"""
Gamma = 1 - Phi
Gamma_inv = 1 / Gamma
Pi = 1 + Phi + tf.einsum('ikM, ikM, ikM -> ikM', Phi, Gamma_inv, Phi)
Pi = 1 / Pi
B = tf.einsum('jJM, jJM -> jJM', Gamma, Phi)[tf.newaxis, :, tf.newaxis, ...]
B += tf.einsum('jJM, ikM, jJM -> ijkJM', Phi, Pi, Phi)
Gamma_reshape = Gamma[:, tf.newaxis, :, tf.newaxis, :]
C = Gamma_reshape / (1 - tf.einsum('lLM, ikM -> liLkM', Phi, Upsilon))
C = tf.einsum('ikM, liLkM -> liLkM', (1 - Upsilon), C)
Omega = tf.einsum('ikM, ikM, ikM -> ikM', Pi, Phi, Gamma_inv)
Omega = tf.einsum('jJM, ikM -> ijkJM', Phi, Omega)
mean = tf.einsum('ijkJM, liLkM, lLM, lLNM -> liLNjkJM', Omega, C, Gamma_inv, G)
variance = B[tf.newaxis, :, tf.newaxis, ...] + tf.einsum('ijkJM, liLkM, ijkJM -> liLjkJM', Omega, C, Omega)
if mp is not self.Ms:
variance = variance[..., mp[0]:mp[1]]
mean = mean[..., mp[0]:mp[1]]
G = G[..., mp[0]:mp[1]]
return self._equatedRanksGaussian(mean, variance, G[:, tf.newaxis, ...], rank_eqs)
def _UpsilonGaussian(self, G: TF.Tensor, Phi: TF.Tensor, Upsilon: TF.Tensor, rank_eqs: Tuple[RankEquation]) -> List[Gaussian]:
""" The Upsilon integral.
Args:
G: lLNM.
Phi: lLM.
Upsilon: ikM.
rank_eqs: A tuple of RankEquators to apply.
Returns: liLNjkJn.
"""
Upsilon_cho = tf.sqrt(Upsilon)
mean = tf.einsum('ikM, lLNM -> liLNkM', Upsilon_cho, G)[..., tf.newaxis, :, tf.newaxis, :]
variance = 1 - tf.einsum('ikM, lLM, ikM -> liLkM', Upsilon_cho, Phi, Upsilon_cho)[..., tf.newaxis, :, tf.newaxis, :]
return self._equatedRanksGaussian(mean, variance, tf.constant(0, dtype=FLOAT()), rank_eqs)
def _mu_phi_mu(self, GGaussian: Gaussian, UpsilonGaussians: List[Gaussian], OmegaGaussians: List[Gaussian], rank_eqs: Tuple[RankEquation]) -> TF.Tensor:
""" Calculate E_m E_mp (mu[m] phi[m][mp] mu[mp]).
Args:
GGaussian: jJn.
UpsilonGaussians: liLNjk.
OmegaGaussians: liLNjkJn.
rank_eqs: A tuple of RankEquators to apply.
Returns: li.
"""
GGaussian = GGaussian.expand_dims([2])
mu_phi_mu = 0.0
for i, rank_eq in enumerate(rank_eqs):
OmegaGaussians[i] /= GGaussian
OmegaGaussians[i].exponent += UpsilonGaussians[i].exponent
if UpsilonGaussians[i].cho_diag.shape[-1] == GGaussian.cho_diag.shape[-1]:
OmegaGaussians[i].cho_diag *= UpsilonGaussians[i].cho_diag
else:
OmegaGaussians[i].cho_diag = (diag_det(OmegaGaussians[i].cho_diag) * diag_det(UpsilonGaussians[i].cho_diag))[..., tf.newaxis]
if rank_eq in self.RANK_EQUATIONS.MIXED:
result = tf.einsum('kLN, LNjkJn, jJn -> jk', self.g0KY, OmegaGaussians[i].pdf, self.g0KY)
mu_phi_mu += tf.einsum('k, jk -> jk', self.mu_phi_mu['pre-factor'], result)
mu_phi_mu = tf.linalg.set_diag(mu_phi_mu, 2 * tf.linalg.diag_part(mu_phi_mu))
elif rank_eq.l == 'k' and rank_eq.i == 'j':
result = tf.einsum('jLN, LNjkJn, jJn -> j', self.g0KY, OmegaGaussians[i].pdf, self.g0KY)
mu_phi_mu += tf.linalg.diag(tf.einsum('j, j -> j', self.mu_phi_mu['pre-factor'], result))
else:
result = tf.einsum(f'jLN, LNjkJn, jJn -> jk', self.g0KY, OmegaGaussians[i].pdf, self.g0KY)
mu_phi_mu += tf.einsum(f'k, jk -> jk', self.mu_phi_mu['pre-factor'], result)
return mu_phi_mu
def _psi_factor(self, G: TF.Tensor, Phi: TF.Tensor, GGaussian: Gaussian) -> TF.Tensor:
""" Calculate the psi_factor E_m or E_mp for E_m E_mp (mu[m] psi[m][mp] mu[mp])
Args:
G: lLNm
Phi: lLm
GGaussian: lLn
Returns: liS
"""
D = Phi[..., tf.newaxis, tf.newaxis, :] - tf.einsum('lLM, iIM, lLM -> lLiIM', Phi, Phi, Phi)
mean = tf.einsum('lLM, iInM -> lLiInM', Phi, G)
mean = mean[:, :, tf.newaxis, ...] - G[..., tf.newaxis, tf.newaxis, tf.newaxis, :]
gaussian = Gaussian(mean=mean, variance=D, is_variance_diagonal=True, LBunch=2)
gaussian /= GGaussian.expand_dims([-1, -2, -3])
factor = tf.einsum('lLN, iIn, lLNiIn -> liIn', self.g0KY, self.g0, gaussian.pdf)
if tf.rank(self.K_cho) == 2 and factor.shape[-2] == 1:
factor = tf.einsum('lNiI -> liIN', tf.linalg.diag(tf.einsum('liIN -> lNi', factor)))
factor = tf.reshape(factor, factor.shape[:-2].as_list() + [-1, 1])
factor = tf.squeeze(tf.linalg.triangular_solve(self.K_cho, factor), axis=-1)
return factor
def _mu_psi_mu(self, psi_factor: TF.Tensor, rank_eqs: Tuple[RankEquation]) -> TF.Tensor:
""" Multiply psi_factors to calculate mu_psi_mu.
Args:
psi_factor: liS.
rank_eqs: A tuple of RankEquators to apply.
Returns: li
"""
first_psi_factor = self.psi_factor if rank_eqs is self.RANK_EQUATIONS.MIXED else psi_factor
first_ein = 'liS' if rank_eqs is self.RANK_EQUATIONS.DIAGONAL else 'iiS'
result = tf.einsum(f'{first_ein}, liS -> li', first_psi_factor, psi_factor)
return tf.linalg.set_diag(result, 2 * tf.linalg.diag_part(result))
def _W(self, mu_phi_mu: TF.Tensor, mu_psi_mu: TF.Tensor) -> TF.Tensor:
""" Calculate W.
Returns: W[mm] if is_T_partial, else W{mm, Mm}
"""
W = mu_phi_mu - mu_psi_mu
W += tf.transpose(W)
return W
def _T(self, Wmm: TF.Tensor, WMm: TF.Tensor = None, Vm: TF.Tensor = None) -> TF.Tensor:
""" Calculate T
Args:
Wmm: li
WMm: li
Vm: li
Returns: The closed index uncertainty T.
"""
if self.meta['is_T_partial']:
Q = Wmm
else:
Q = Wmm - 2 * Vm * WMm / self.V[1] + Vm * Vm * self.Q
return tf.sqrt(tf.abs(Q) / self.V[4])
def marginalize(self, m: TF.Slice) -> Dict[str: TF.Tensor]:
""" Calculate everything.
Args:
m: A Tf.Tensor pair of ints indicating the slice [m[0]:m[1]].
Returns: The Sobol ClosedSobol of m, with errors (T and W).
"""
result = super().marginalize(m)
G, Phi, Upsilon = tuple(tensor[..., m[0]:m[1]] for tensor in (self.G, self.Phi, self.Upsilon))
GGaussian = Gaussian(G, Phi, is_variance_diagonal=True, LBunch=2)
psi_factor = self._psi_factor(G, Phi, GGaussian)
if self.meta['is_T_partial']:
UpsilonGaussians = self._UpsilonGaussian(G, Phi, Upsilon, self.RANK_EQUATIONS.DIAGONAL)
OmegaGaussians = self._OmegaGaussian(m, self.G, self.Phi, self.Upsilon, self.RANK_EQUATIONS.DIAGONAL)
Wmm = self._W(self._mu_phi_mu(GGaussian, UpsilonGaussians, OmegaGaussians, self.RANK_EQUATIONS.DIAGONAL),
self._mu_psi_mu(psi_factor, self.RANK_EQUATIONS.DIAGONAL))
result |= {'W': Wmm, 'T': self._T(Wmm)}
else:
UpsilonGaussians = self.RankEquations(*(self._UpsilonGaussian(G, Phi, Upsilon, rank_eqs) for i, rank_eqs in enumerate(self.RANK_EQUATIONS)))
OmegaGaussians = self.RankEquations(*(self._OmegaGaussian(m, self.G, self.Phi, self.Upsilon, rank_eqs)
for i, rank_eqs in enumerate(self.RANK_EQUATIONS)))
Wmm = (self._W(self._mu_phi_mu(GGaussian, UpsilonGaussians.DIAGONAL, OmegaGaussians.DIAGONAL, self.RANK_EQUATIONS.DIAGONAL),
self._mu_psi_mu(psi_factor, self.RANK_EQUATIONS.DIAGONAL)))
WMm = self._W(self._mu_phi_mu(GGaussian, self.UpsilonGaussians.MIXED, OmegaGaussians.MIXED, self.RANK_EQUATIONS.MIXED),
self._mu_psi_mu(psi_factor, self.RANK_EQUATIONS.MIXED))
result |= {'W': Wmm, 'T': self._T(Wmm, WMm, result['V'])}
return result
def _calibrate(self):
""" Called by constructor to calculate all available quantities prior to marginalization.
These quantities suffice to calculate V[0], V[M], A[00], self.A[m0]=A[M0] and self.A[mm]=A[MM]
"""
super()._calibrate()
if not self.is_F_diagonal:
raise NotImplementedError('If the MOGP kernel covariance is not diagonal, the Sobol error calculation is unstable.')
self.Upsilon = self.Lambda2[-1][2]
self.V |= {4: tf.einsum('li, li -> li', self.V[2], self.V[2])}
self.mu_phi_mu = {'pre-factor': tf.reshape(tf.sqrt(tf.reduce_prod(self.Lambda2[1][0] * self.Lambda2[-1][2], axis=-1)) * self.F, [-1])}
self.mu_phi_mu['pre-factor'] = tf.reshape(self.mu_phi_mu['pre-factor'], [-1])
self.GGaussian = Gaussian(mean=self.G, variance=self.Phi, is_variance_diagonal=True, LBunch=2)
self.psi_factor = self._psi_factor(self.G, self.Phi, self.GGaussian)
if self.meta['is_T_partial']:
self.UpsilonGaussians = self._UpsilonGaussian(self.G, self.Phi, self.Upsilon, self.RANK_EQUATIONS.DIAGONAL)
self.OmegaGaussians = self._OmegaGaussian(self.Ms, self.G, self.Phi, self.Upsilon, self.RANK_EQUATIONS.DIAGONAL)
self.W = self._W(self._mu_phi_mu(self.GGaussian, self.UpsilonGaussians, self.OmegaGaussians, self.RANK_EQUATIONS.DIAGONAL),
self._mu_psi_mu(self.psi_factor, self.RANK_EQUATIONS.DIAGONAL))
else:
self.UpsilonGaussians = self.RankEquations(*(self._UpsilonGaussian(self.G, self.Phi, self.Upsilon, rank_eq)
for i, rank_eq in enumerate(self.RANK_EQUATIONS)))
self.OmegaGaussians = self.RankEquations(*(self._OmegaGaussian(self.Ms, self.G, self.Phi, self.Upsilon, rank_eq)
for i, rank_eq in enumerate(self.RANK_EQUATIONS)))
self.W = self.RankEquations(*(self._W(self._mu_phi_mu(self.GGaussian, self.UpsilonGaussians[i], self.OmegaGaussians[i], rank_eq),
self._mu_psi_mu(self.psi_factor, rank_eq)) for i, rank_eq in enumerate(self.RANK_EQUATIONS)))
self.Q = tf.linalg.diag_part(self.W.MIXED) / (4.0 * self.V[1] * self.V[1])
self.Q = self.Q[tf.newaxis, ...] + self.Q[..., tf.newaxis] + 2.0 * tf.linalg.diag(self.Q)
self.T = self._T(self.W.DIAGONAL, self.W.MIXED, self.V[0])
class ClosedSobolWithRotation(ClosedSobol):
""" Encapsulates the calculation of closed Sobol indices with a rotation U = Theta X."""
def _matrix_inverse(self, tensor: TF.Tensor, I: tf.Tensor = None) -> TF.Tensor:
""" Invert the inner matrix of an (L,L,M,M) or (L,L,L,L,M,M) Tensor.
Args:
tensor: A tensor whose shape matches identity.
I: Supply the (L,L,M,M) identity matrix, otherwise the (L,L,L,L,M,M) identity matrix is used.
Returns: The inner matrix inverse of tensor.
"""
if I is None:
I = tf.eye(self.M, batch_shape=[1, 1, 1, 1], dtype=FLOAT())
ein = 'IiLlmM, IiLlmJ -> IiLlMJ'
else:
ein = 'LlmM, LlmJ -> LlMJ'
result = tf.linalg.cholesky(tensor)
result = tf.linalg.triangular_solve(result, I)
return tf.einsum(ein , result, result)
|
from wtforms_alchemy import ModelForm, ModelFieldList
import wtforms as wtf
|
from eos import SourceManager, JsonDataHandler, JsonCacheHandler
from eos.data.exception import ExistingSourceError
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_cache import Cache
db = SQLAlchemy()
migrate = Migrate()
cache = Cache()
def configure_extensions(app):
"""Registers all relevant extensions."""
db.init_app(app)
migrate.init_app(app, db)
cache.init_app(app)
# Eos isn't a proper Flask extension at this time but it still needs app config to but initialized.
# If we decide we want to support multiple sources or share this with other Flask applications
# we should consider turning this into an extension.
data_handler = JsonDataHandler(app.config['EOS_JSON_DATA'])
cache_handler = JsonCacheHandler(app.config['EOS_CACHE'])
# The test suite is causing this error to be raised... lets just ignore it for now.
try:
SourceManager.add('tq', data_handler, cache_handler, make_default=True)
except ExistingSourceError:
pass
|
from nose.tools import assert_raises, eq_
from eelbrain.plot import _base
from eelbrain.plot._base import Layout
class InfoObj:
"Dummy object to stand in for objects with an info dictionary"
def __init__(self, **info):
self.info = info
def assert_layout_ok(*args, **kwargs):
error = None
l = Layout(*args, **kwargs)
if l.nrow * l.ncol < l.nax:
error = ("%i rows * %i cols = %i < %i (nax). args=%%r, kwargs=%%r"
% (l.nrow, l.ncol, l.nrow * l.ncol, l.nax))
if error:
raise AssertionError(error % (args, kwargs))
def test_layout():
"Test the Layout class"
for nax in xrange(1, 100):
assert_layout_ok(nax, 1.5, 2, True, w=5)
assert_layout_ok(nax, 1.5, 2, True, h=5)
assert_layout_ok(nax, 1.5, 2, True, axw=5)
assert_layout_ok(nax, 1.5, 2, True, axh=5)
assert_layout_ok(nax, 1.5, 2, True, axw=5, w=20)
assert_layout_ok(nax, 1.5, 2, True, axw=5, h=20)
assert_layout_ok(nax, 1.5, 2, True, axh=5, w=20)
assert_layout_ok(nax, 1.5, 2, True, axh=5, h=20)
# single axes larger than figure
assert_raises(ValueError, Layout, 2, 1.5, 2, True, h=5, axh=6)
assert_raises(ValueError, Layout, 2, 1.5, 2, True, w=5, axw=6)
def test_vlims():
"Test vlim determination"
meas = 'm'
# symmetric
sym_cmap = 'polar'
v1 = InfoObj(meas=meas, cmap=sym_cmap, vmax=2)
lims = _base.find_fig_vlims([[v1]])
eq_(lims[meas], (-2, 2))
lims = _base.find_fig_vlims([[v1]], 1)
eq_(lims[meas], (-1, 1))
lims = _base.find_fig_vlims([[v1]], 1, 0)
eq_(lims[meas], (-1, 1))
# zero-based
zero_cmap = 'sig'
v2 = InfoObj(meas=meas, cmap=zero_cmap, vmax=2)
lims = _base.find_fig_vlims([[v2]])
eq_(lims[meas], (0, 2))
lims = _base.find_fig_vlims([[v2]], 1)
eq_(lims[meas], (0, 1))
lims = _base.find_fig_vlims([[v2]], 1, -1)
eq_(lims[meas], (0, 1))
|
import unittest, platform, sys, os
import platform
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from main.activity.desktop_v3.activity_register import registerActivity
from main.page.base import *
from main.page.desktop_v3.header import *
from utils.function.setup import *
from utils.function.logger import *
fullname = "Mirehehe Hahaha"
phone = "081394859148"
gender = "Female"
email = "tkpd.qc+29@gmail.com"
passwd = "12345678"
conf_passwd = "12345678"
check_tos = "yes"
class TokopediaRegister(unittest.TestCase):
dict = {
"site" : "live",
"loop" : 5,
"inc" : 800000,
"name" : "name",
"phone" : "085780548872",
"gender" : "Male",
"prefix_email" : "tkpd.qc+",
"password" : "1234asdf",
"confirm_password" : "1234asdf",
"check_tos" : "yes"
}
def setUp(self):
#self.driver = webdriver.Firefox()
#self.driver = webdriver.Chrome("C:\driver\chromedriver\chromedriver.exe")
self.driver = tsetup('chrome')
self.obj = registerActivity(self.driver)
#sys.stdout = Logger() #function to create log file
def test_loop_register(self):
print("Automation register loop!")
self.obj.set_param(self.dict)
self.obj.loop_reg(self.dict['loop'])
"""def test_fill_registration_form(self):
print('================================')
print('TEST CASE #1 : REGISTRASI NORMAL')
print('================================')
driver = self.driver
#object activity
register = registerActivity()
register.test_do_register(driver, fullname, phone, gender, email, passwd, conf_passwd, check_tos)"""
"""
def test_check_error_message_case1(self):
print('======================================================')
print('TEST CASE #2 : CHECK ERROR MESSAGE IF ALL FIELD = NULL')
print('======================================================')
driver = self.driver
register = registerActivity(driver)
register.check_validasi_input_null(driver)"""
"""def test_link_register_via_fb(self):
print('======================================')
print('TEST CASE #3 : REGISTRASI VIA FACEBOOK')
print('======================================')
driver = self.driver
register = registerActivity()
register.check_link_register_via_fb(driver)
def test_link_register_via_google(self):
print('======================================')
print('TEST CASE #4 : REGISTRASI VIA GOOGLE+')
print('======================================')
driver = self.driver
register = registerActivity()
register.check_link_register_via_google(driver)"""
def tearDown(self):
print("")
self.driver.quit()
if __name__ == "__main__":
unittest.main(warnings='ignore')
#test
|
import redis
client = redis.Redis()
client.set('visitor:home', 1)
for i in range(0,10):
client.incr('visitor:home')
print(client.get('visitor:home').decode('utf-8'))
print("=======")
for i in range(0,10):
client.decr('visitor:home')
print(client.get('visitor:home').decode('utf-8'))
client.delete('visitor:home')
|
#!/bin/python2
"""
openssl enc -d -a -aes-128-cbc -K 41414141414141414141414141414141 -iv 00000000000000000000000000000000 -in <(echo -e $(python2 challenge9.py))
"""
import binascii
import base64
from Crypto.Cipher import AES
from base64 import *
def xor(msg, key):
ret = bytearray()
for i in range(len(msg)):
ret.append((msg[i]) ^ (key[i % len(key)]))
return ret
def read_file():
return b64decode(open("7.txt", "rb").read())
def encrypt_block(msg, key):
cipher = AES.new(key, AES.MODE_ECB)
return cipher.encrypt(msg)
def decrypt_block(msg, key):
cipher = AES.new(key, AES.MODE_ECB)
return cipher.decrypt(msg)
def pad(msg, l):
padding = chr(l - len(msg)) * (l - len(msg))
return bytearray(msg + padding)
def aes_cbc_encrypt(msg, key, iv = None):
block_size = 16
so_far = 0
if iv == None:
iv = "\x00" * block_size
previous_cipher = iv
crypted = bytearray()
while True:
curent_block = pad(msg[so_far:so_far + block_size], 16)
#print curent_block
xored_block = xor(bytearray(curent_block), bytearray(previous_cipher))
cipher = encrypt_block(buffer(xored_block),key)
previous_cipher = cipher
crypted += cipher
so_far += 16
if so_far >= len(msg):
break
return crypted
def aes_cbc_decrypt(msg, key, iv = None):
block_size = 16
so_far = 0
if iv == None:
iv = "\x00" * block_size
previous_cipher = iv
decrypted = bytearray()
while True:
curent_block = msg[so_far:so_far + block_size]
if len(curent_block) != block_size:
raise Exception("bad decrypt: incorrect block size")
dec = decrypt_block(buffer(curent_block), key)
xored_block = xor(bytearray(dec), bytearray(previous_cipher))
previous_cipher = curent_block
decrypted += xored_block
so_far += 16
if so_far >= len(msg):
break
return decrypted
if __name__ == '__main__':
file_contents = base64.b64decode(open("10.txt","rb").read())
print aes_cbc_decrypt(file_contents, "YELLOW SUBMARINE", "0" * 16)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from default.models import ContentTemplates
# Create your views here.
def index(request):
template_data = ContentTemplates.objects.get(template_name='index')
return render(request, 'index.html', {'template_data':template_data})
def aboutus(request):
template_data = ContentTemplates.objects.get(template_name='aboutus')
return render(request, 'aboutus.html', {'template_data':template_data})
|
# Generated by Django 2.2.1 on 2019-05-17 23:54
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('partners', '0002_partner_partner_logo'),
]
operations = [
migrations.AddField(
model_name='partner',
name='partner_description',
field=tinymce.models.HTMLField(blank=True, verbose_name='Content'),
),
]
|
import unittest
from katas.kyu_7.remove_duplicates import unique
class UniqueTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(unique([]), [])
def test_equals_2(self):
self.assertEqual(unique([5, 2, 1, 3]), [5, 2, 1, 3])
def test_equals_3(self):
self.assertEqual(unique([1, 5, 2, 0, 2, -3, 1, 10]),
[1, 5, 2, 0, -3, 10])
|
import Mumble_pb2, socket, ssl, struct, sys, select
from datetime import datetime
from threading import Thread
class Mumbot:
ca_file = 'mumble-ca.crt'
payloads = {
0: Mumble_pb2.Version,
1: Mumble_pb2.UDPTunnel,
2: Mumble_pb2.Authenticate,
3: Mumble_pb2.Ping,
4: Mumble_pb2.Reject,
5: Mumble_pb2.ServerSync,
6: Mumble_pb2.ChannelRemove,
7: Mumble_pb2.ChannelState,
8: Mumble_pb2.UserRemove,
9: Mumble_pb2.UserState,
10: Mumble_pb2.BanList,
11: Mumble_pb2.TextMessage,
12: Mumble_pb2.PermissionDenied,
13: Mumble_pb2.ACL,
14: Mumble_pb2.QueryUsers,
15: Mumble_pb2.CryptSetup,
16: Mumble_pb2.ContextActionModify,
17: Mumble_pb2.ContextAction,
18: Mumble_pb2.UserList,
19: Mumble_pb2.VoiceTarget,
20: Mumble_pb2.PermissionQuery,
21: Mumble_pb2.CodecVersion,
22: Mumble_pb2.UserStats,
23: Mumble_pb2.RequestBlob,
24: Mumble_pb2.ServerConfig,
25: Mumble_pb2.SuggestConfig
}
def __init__(self, host, username, password):
self.host = host
self.username = username
self.password = password
def savecert(self):
sock = ssl.wrap_socket(
socket.socket(socket.AF_INET,
socket.SOCK_STREAM),
ssl_version=ssl.PROTOCOL_TLSv1)
# Connect to server without checking cert
try:
sock.connect((self.host, 64738))
except ssl.SSLError:
pass
else:
# Save cert to ca file
c = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
with open(Mumbot.ca_file, 'a') as certfile:
certfile.write(c)
sock.close()
def recv(self):
# Payload prefix
prefix = self.sock.recv(6)
if (len(prefix) <= 0):
return (-1, None)
type, length = struct.unpack('>hL', prefix)
# Receive payload
data = self.sock.recv(length)
if (len(data) <= 0):
return (-1, None)
# Return protobuf obj
obj = Mumbot.payloads[type]()
try:
obj.ParseFromString(data)
return (type, obj)
except:
return (type, None)
def send(self, payload):
type = 0
data = payload.SerializeToString()
length = len(data)
# Find type no.
for t in Mumbot.payloads:
if isinstance(payload, Mumbot.payloads[t]):
type = t
break
self.sock.send(struct.pack('>hL', type, length) + data)
def start(self):
self.sock = ssl.wrap_socket(
socket.socket(socket.AF_INET,
socket.SOCK_STREAM),
ca_certs=Mumbot.ca_file,
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=ssl.PROTOCOL_TLSv1)
print('Connecting..')
self.sock.connect((self.host, 64738))
print('Connected.')
print('Exchanging versions')
self.recv() # Read server version
ver = Mumbot.payloads[0]()
ver.version = 66053 # 1.2.5
ver.release = '1.2.5-233-gafa6ee4'
ver.os = 'Mumbot'
ver.os_version = 'v0.1'
self.send(ver) # Send Mumbot version
print('Authenticating')
auth = Mumbot.payloads[2]()
auth.username = self.username
auth.password = self.password
self.send(auth) # Authenticate
# Wait for CryptSetup
# Holds key and nonces for connecting to voice using UDP
t, crypt = self.recv()
if t == 4: # Reject
print('Authentication rejected.')
self.sock.close()
return
else:
# Start UDP thread
print('Authenticated')
print('Syncing with server')
while True:
t, o = self.recv()
if t == 5: # ServerSync
print('Finished syncing')
print('Welcome message: ' + o.welcome_text)
break
self.recv() # Discard ServerConfig
# Main loop
ping = 10
timer = datetime.now()
while True:
ready = select.select([self.sock], [], [], ping)
if ready[0]:
t, o = self.recv()
if t in [1, 3]:
pass # Ignore TCP voip tunneling and pings
else:
print(o) # Print interesting things
if (datetime.now() - timer).seconds >= ping:
self.send(Mumbot.payloads[3]()) # Send empty Ping payload
timer = datetime.now()
def main():
if (len(sys.argv) != 4):
print('Usage: ' + sys.argv[0] + ' <host> <username> <password>')
return
_, host, username, password = sys.argv
m = Mumbot(host, username, password)
try:
m.start()
except ssl.SSLError:
print('Server certificate unknown.')
if raw_input('Wanna save it? [yN]: ').lower() == 'y':
m.savecert()
m.start()
main()
|
"""ChanLun URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from Main import views
urlpatterns = [
url(r'^main$', views.Main),
url(r'^KXian$', views.KXian),
url(r'^getOneData$', views.getOneData, name='getOneData'),
url(r'^getUserId$', views.getUserId, name='getUserId'),
url(r'^RealNewPrice$', views.getNewPrice, name='getNewPrice'),
url(r'^ZXG_Recommend$', views.getZXG_Recommend, name='ZXG_Recommend'),
url(r'^MyZXG$', views.getMyZXG, name='getMyZXG'),
url(r'^OneQuotation$', views.getOneQuotation, name='getOneQuotation'),
url(r'^getKLines', views.GetKLines, name='GetKLines')
]
|
'''server.py -- A simple tornado server
2014.May : Mendez
'''
import os
import sys
import tornado.web
import tornado.ioloop
import weather
import reddit
from datetime import datetime
HOSTNAME = 'localhost'
PORT = 5555
# TODO: create a scheduler to run updates at specific times
w = weather.Weather()
w.update()
r = reddit.Reddit()
r.load()
class MainHandler(tornado.web.RequestHandler):
'''Handles the web requests'''
def get(self):
items = ['a','b']
time = datetime.now()
self.render('home.html', items=items, time=time,
weather=w.display(),
# weather=None,
reddit=r.display())
sys.stdout.write('.')
sys.stdout.flush()
def server(hostname, port):
'''Setup the server on this port'''
print 'Starting the Server: http://{}:{}'.format(hostname,port)
try:
mainpath = os.path.dirname(__file__)
settings = dict(
template_path = os.path.join(mainpath, "templates"),
debug = True,
)
handlers = [
(r"/", MainHandler),
(r"/static/(.*)", tornado.web.StaticFileHandler,
{"path": os.path.join(mainpath,'static')}),
]
application = tornado.web.Application(handlers, **settings)
application.listen(port)
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print 'Bye!'
except:
raise
if __name__ == '__main__':
server(HOSTNAME, PORT)
|
lines = []
for _ in range(100):
try:
line = input()
lines.append(line)
except EOFError:
break
[print(line) for line in lines]
|
# -*- coding: utf-8 -*-
"""asyncio unit tests with Django transactional support."""
# :copyright: (c) 2015 Alex Hayes and individual contributors,
# All rights reserved.
# :license: MIT License, see LICENSE for more details.
from collections import namedtuple
version_info_t = namedtuple(
'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'),
)
VERSION = version_info_t(0, 2, 2, '', '')
__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION)
__author__ = 'Alex Hayes'
__contact__ = 'alex@alution.com'
__homepage__ = 'http://github.com/alexhayes/django-async-test'
__docformat__ = 'restructuredtext'
# -eof meta-
# Import all of asynctest
from asynctest.case import *
from asynctest.mock import *
from asynctest.helpers import *
from asynctest.selector import *
# Now import our TestCase
from .testcase import TestCase
|
# -*- coding: utf-8 -*-
from django.contrib.auth import login
from django.shortcuts import redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import user_passes_test
from annoying.decorators import render_to
from django.contrib.auth.models import User
from Professor.models import Professor, Monitor
from Professor.views.utils import prof_monit_exist
from Avaliacao.models import TemplateAvaliacao, Avaliacao
from Avaliacao.Questao.models import QuestaoDeAvaliacao
from Avaliacao.Questao.forms import AlterarNotaQuestaoForm
@prof_monit_exist
@login_required
@render_to('professor/consultar.html')
def consultar(request):
prof=None
monitor=None
try:
prof = request.user.professor_set.get()
except Professor.DoesNotExist:
try:
monitor = request.user.monitor_set.get()
except Monitor.DoesNotExist:
return redirect('/')
template_id = request.GET.get('template',None)
avaliacao_id = request.GET.get('avaliacao',None)
questao_id = request.GET.get('questao',None)
template = None
if template_id != None:
template = get_object_or_404(TemplateAvaliacao,id=template_id)
elif avaliacao_id != None:
avaliacao = get_object_or_404(Avaliacao,id=avaliacao_id)
template=avaliacao.templateAvaliacao
elif questao_id != None:
questao = get_object_or_404(QuestaoDeAvaliacao,id=questao_id)
if request.POST:
form_questao = AlterarNotaQuestaoForm(request.POST, instance=questao)
form_questao.save()
else:
form_questao = AlterarNotaQuestaoForm(instance=questao)
template=questao.avaliacao.templateAvaliacao
if prof != None:
if template == None:
templates = TemplateAvaliacao.objects.filter(turma__in=prof.turmas.all(),ativa=False)
return locals()
if not template.verifica_professor(prof):
return redirect('/')
else:
if template == None:
templates = TemplateAvaliacao.objects.filter(turma__in=monitor.materia.turmas.all(),ativa=False)
return locals()
if not template.verifica_monitor(monitor):
return redirect('/')
template = None if template_id == None else template
return locals()
|
import csv
import spacy
import pandas as pd
from gensim.models import word2vec
import os
import csv
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import pickle
TEST_X_PATH = sys.argv[1]
test_data = pd.read_csv(TEST_X_PATH)
test_x = test_data['comment'].values
nlp = spacy.load("en_core_web_sm")
useless_word = ['.@user', '@user', '..', '...', '@', '*', '#', '&', 'URL', ' ', ' ', ' ']
def loadpkl():
with open('word_to_ix.pkl', 'rb') as f:
return pickle.load(f)
word_to_ix =loadpkl()
test_data = [] # should be a list of lists!!!
for row in test_x:
doc = nlp(row)
inner_list = []
for token in doc:
if token.text in useless_word:
continue
inner_list.append(token.text)
test_data.append(inner_list)
with open('corpus.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
for row in test_data :
writer.writerow(row)
sentences = word2vec.LineSentence('corpus.csv')
test_data = []
for i, data in enumerate(sentences):
test_data.append(data)
cuda = True if torch.cuda.is_available() else False
# sentences = word2vec.LineSentence(seg_test_x)
# test_data = []
# for i, data in enumerate(sentences):
# test_data.append(data)
def text2index(corpus, to_ix): # corpus: 語料庫 should be a list of lists
new_corpus = []
for seq in corpus:
vec = np.zeros(len(to_ix))
for word in seq:
vec[word_to_ix[word]] += 1
new_corpus.append(vec)
return np.array(new_corpus, dtype=np.float32)
test_data = text2index(test_data, word_to_ix)
test_loader = DataLoader(dataset=test_data, batch_size=32, shuffle=False, num_workers=4)
class BoWClassifier(nn.Module):
def __init__(self, num_labels, vocab_size):
super(BoWClassifier, self).__init__()
self.linear = nn.Linear(vocab_size, num_labels)
def forward(self, bow_vec):
return F.log_softmax(self.linear(bow_vec), dim=1)
model =torch.load("bow_dnn_best.pkl",map_location = "cpu")
ans = []
model.eval()
with torch.no_grad():
for instance in test_loader:
if cuda:
instance = instance.cuda()
log_probs = model(instance)
_, predicted = torch.max(log_probs.data, 1)
predicted = predicted.cpu().numpy()
ans.extend(predicted)
### Write file
with open(sys.argv[2],"w") as f: # 'results/predict.csv'
w = csv.writer(f)
title = ['id','label']
w.writerow(title)
for i in range(len(ans)):
content = [i,ans[i]]
w.writerow(content)
|
data = ""
elf_counter = {}
with open("inputData.txt", "r") as infile:
"""
Process inputData.txt and put it into the data array.
"""
for line in infile:
data = int(line)
def get_factors(number):
"""
Note: Taken from Stack Overflow
http://stackoverflow.com/a/6800214
:param number: Number to factorize
:return: Set of factors for this number
"""
return set(reduce(list.__add__,
([i, number // i] for i in range(1, int(number ** 0.5) + 1) if number % i == 0)))
def get_result(number):
"""
:param number: House number
:return: The number of presents in the house of that number
"""
result = get_factors(number)
result = remove_unwanted_elves(result)
register_delivery(result)
return sum(result) * 11
def remove_unwanted_elves(elves):
"""
Removes elves that have already delivered to 50 houses
:param elves: set of elves to check
:return: new set, with disqualified elves removed
"""
new_set = elves.copy()
for elf in elves:
if elf in elf_counter and elf_counter[elf] >= 50:
new_set.discard(elf)
return new_set
def register_delivery(elves):
"""
Ticks every elf up by 1 in the elf_counter
:param elves: set of elves to tick up
:return: nothing
"""
for elf in elves:
if elf in elf_counter:
elf_counter[elf] += 1
else:
elf_counter[elf] = 1
result_so_far = 0
ticker = 0
while result_so_far < data:
ticker += 1
result_so_far = get_result(ticker)
print("Answer: House #" + str(ticker))
|
# -*- python -*-
# Insertion Sort
#
# Build an algorithm for insertion sort. Please watch the video here to understand how insertion sort works and implement the code. The following gif also shows how insertion sort is done.
#
# Again, write the pseudo-code first and test your base cases before you build your code next.
#
# Please refrain from checking other people's code. If your code does NOT work as intended make sure (1) that you're writing pseudo-code first, (2) that your pseudo-code solves your base case, and (3) that your pseudo-code solves other base cases you have specified.
#
# Sometimes, if you are stuck for too long, you need to just start all over as this can be more efficient to do than dwelling on old code with bugs that are hard to trace.
# def insertion_sort( ary ):
# for n in range( 1, len( ary ) ):
# i = n - 1
# print "Debug: insertion_sort: (before) ary={} n={} i={}".format( ary, n, i )
# while i >= 0:
# if ary[n] < ary[i]:
# i -= 1
# else:
# print "Debug: insertion_sort: (during) ary={} n={} i={}".format( ary, n, i )
# ary = ary[0:i-1] + ary[n:n+1] + ary[i:n-1]
# break
# print "Debug: insertion_sort: (after) ary={} n={} i={}".format( ary, n, i )
# return( ary )
#
# return( ary )
def insertion_sort( ary ):
for n in range( 1, len( ary ) ):
n_ins = -1
for i in range( n ):
if ary[n] < ary[i]:
n_ins = i
break
if n_ins != -1:
ary = ary[0:n_ins] + ary[n:n+1] + ary[n_ins:n] + ary[n+1:]
return( ary )
# Testing
ary0 = [1, 5, 3, 4, 2]
print "Ary:", ary0
aryR = insertion_sort( ary0 )
print "Sorted:", aryR
ary1 = [5, 4, 3, 2, 1]
print "Ary:", ary1
aryR = insertion_sort( ary1 )
print "Sorted:", aryR
ary2 = [1, 2, 3, 4, 5]
print "Ary:", ary2
aryR = insertion_sort( ary1 )
print "Sorted:", aryR
|
import os
import re
import sys
import shutil
import logging
SPARK_VERSIONS_FILE_PATTERN = "spark-(.*)-bin-(?:hadoop)?(.*)"
SPARK_VERSIONS_URL = "https://raw.githubusercontent.com/rstudio/spark-install/master/common/versions.json"
WINUTILS_URL = "https://github.com/steveloughran/winutils/archive/master.zip"
NL = os.linesep
def _verify_java():
import subprocess
try:
import re
output = subprocess.check_output(["java", "-version"], stderr=subprocess.STDOUT)
logging.debug(output)
match = re.search(b"(\d+\.\d+)", output)
if match:
logging.debug("Found a match")
if match.group() == b'1.8':
logging.info("Found Java version 8, continuing.")
return True
else:
logging.info("Did not detect Java Version 8, please install Java 8 before continuing.")
return False
else:
logging.info("Java could not be detected on this system, please install Java 8 before continuing.")
return False
except:
logging.info("Warning: Java was not found in your path. Please ensure that Java 8 is configured correctly otherwise launching the gateway will fail")
return False
def _file_age_days(jsonfile):
from datetime import datetime
ctime = os.stat(jsonfile).st_ctime
return (datetime.fromtimestamp(ctime) - datetime.now()).days
def _combine_versions(spark_version, hadoop_version):
return spark_version + " " + hadoop_version
def _download_file(url, local_file):
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
urlretrieve(url, local_file)
def spark_can_install():
install_dir = spark_install_dir()
if not os.path.isdir(install_dir):
os.makedirs(install_dir)
def spark_versions_initialize(connecting=False):
import json
spark_can_install()
jsonfile = os.path.join(spark_install_dir(), "versions.json")
if not os.path.isfile(jsonfile) or _file_age_days(jsonfile) > 30 or connecting:
logging.info("Downloading %s to %s" % (SPARK_VERSIONS_URL, jsonfile))
_download_file(SPARK_VERSIONS_URL, jsonfile)
with open(jsonfile) as jf:
return json.load(jf)
def spark_versions(connecting=False):
versions = spark_versions_initialize(connecting)
installed = set([_combine_versions(v["spark"], v["hadoop"]) for v in spark_installed_versions()])
for v in versions: v["installed"] = _combine_versions(v["spark"], v["hadoop"]) in installed
return versions
def spark_versions_info(spark_version, hadoop_version):
versions = [v for v in spark_versions() if v["spark"] == spark_version and v["hadoop"] == hadoop_version]
if versions == []:
raise ValueError("Unable to find Spark version: %s and Hadoop version: %s" % (spark_version, hadoop_version))
package_name = versions[0]["pattern"]%(spark_version, hadoop_version)
component_name = os.path.splitext(package_name)[0]
package_remote_path = versions[0]["base"] + package_name
return {"component_name": component_name,
"package_name": package_name,
"package_remote_path": package_remote_path}
def spark_installed_versions():
base_dir = spark_install_dir()
versions = []
for candidate in os.listdir(base_dir):
match = re.match(SPARK_VERSIONS_FILE_PATTERN, candidate)
fullpath = os.path.join(base_dir, candidate)
if os.path.isdir(fullpath) and match:
versions.append({"spark": match.group(1), "hadoop": match.group(2), "dir": fullpath})
return versions
def spark_install_available(spark_version, hadoop_version):
info = spark_install_info(spark_version, hadoop_version)
return os.path.isdir(info["spark_version_dir"])
def spark_install_find(spark_version=None, hadoop_version=None, installed_only=True, connecting=False):
versions = spark_versions(connecting)
if installed_only:
versions = filter(lambda v: v["installed"], versions)
if spark_version:
versions = filter(lambda v: v["spark"] == spark_version, versions)
if hadoop_version:
versions = filter(lambda v: v["hadoop"] == hadoop_version, versions)
versions = list(versions)
if versions == []:
logging.critical("Please select an available version pair for Spark and Hadoop from the following list: ")
available_versions = spark_versions_initialize(connecting)
sep = "+" + "-"*18 + "+"
fmt = "|{:>8}| {:>8}|"
logging.critical(NL + NL.join([sep] +
[fmt.format("Spark", "Hadoop")] +
[sep] +
[fmt.format(v["spark"], v["hadoop"]) for v in available_versions] +
[sep]))
raise RuntimeError("Unrecognized combination of Spark/Hadoop versions: (%s, %s). Please select a valid pair of Spark and Hadoop versions to download."%(spark_version, hadoop_version))
candidate = sorted(versions, key=lambda rec: _combine_versions(rec["spark"], rec["hadoop"]))[-1]
return spark_install_info(candidate["spark"], candidate["hadoop"])
def spark_default_version():
if len(spark_installed_versions()) > 0:
version = spark_install_find()
else:
version = sorted(spark_versions_initialize(), key=lambda rec: _combine_versions(rec["spark"], rec["hadoop"]))[-1]
return {"spark": version["spark"], "hadoop": version["hadoop"]}
def spark_install_info(spark_version, hadoop_version):
info = spark_versions_info(spark_version, hadoop_version)
component_name = info["component_name"]
package_name = info["package_name"]
package_remote_path = info["package_remote_path"]
spark_dir = spark_install_dir()
spark_version_dir = os.path.join(spark_dir, component_name)
return {"spark_dir": spark_dir,
"package_local_path": os.path.join(spark_dir, package_name),
"package_remote_path": package_remote_path,
"spark_version_dir": spark_version_dir,
"spark_conf_dir": os.path.join(spark_version_dir, "conf"),
"spark": spark_version,
"hadoop": hadoop_version,
"installed": os.path.isdir(spark_version_dir)}
def spark_uninstall(spark_version, hadoop_version):
logging.debug("Inside uninstall routine.")
info = spark_versions_info(spark_version, hadoop_version)
spark_dir = os.path.join(spark_install_dir(), info["component_name"])
shutil.rmtree(spark_dir, ignore_errors=True)
logging.debug("File tree removed.")
def spark_install_dir():
homedir = os.getenv("LOCALAPPDATA") if sys.platform == "win32" else os.getenv("HOME")
return os.getenv("SPARK_INSTALL_DIR", os.path.join(homedir, "spark"))
def spark_conf_log4j_set_value(install_info, properties, reset):
log4jproperties_file = os.path.join(install_info["spark_conf_dir"], "log4j.properties")
if not os.path.isfile(log4jproperties_file) or reset:
template = os.path.join(install_info["spark_conf_dir"], "log4j.properties.template")
shutil.copyfile(template, log4jproperties_file)
with open(log4jproperties_file, "r") as infile:
lines = infile.readlines()
for i in range(len(lines)):
if lines[i].startswith("#") or "=" not in lines[i]:
continue
k, v = lines[i].split("=")
lines[i] = "=".join((k, properties.get(k, v)))
if k in properties:
del properties[k]
with open(log4jproperties_file, "w") as outfile:
outfile.writelines([line + NL for line in lines])
#Now write out values in Properties that didn't have base values in the template
for key, value in properties.items():
newline = "=".join((key, value))
outfile.writelines([newline + NL])
def spark_hive_file_set_value(hive_path, properties):
with open(hive_path, "w") as hive_file:
hive_file.write("<configuration>" + NL)
for k, v in properties.items():
hive_file.write(NL.join([" <property>",
" <name>" + k + "</name>",
" <value>" + str(v) + "</value>",
" </property>" + NL]))
hive_file.write("</configuration>" + NL)
def spark_conf_file_set_value(install_info, properties, reset):
spark_conf_file = os.path.join(install_info["spark_conf_dir"], "spark-defaults.conf")
if not os.path.isfile(spark_conf_file) or reset:
template = os.path.join(install_info["spark_conf_dir"], "spark-defaults.conf.template")
shutil.copyfile(template, spark_conf_file)
max_key_len = 35
with open(spark_conf_file, "r") as infile:
lines = infile.readlines()
for i in range(len(lines)):
if lines[i].startswith("#") or " " not in lines[i]: continue
k, v = lines[i].split()
lines[i] = ' '.join((k.lpad(max_key_len), properties.get(k, v)))
with open(spark_conf_file, "w") as outfile:
outfile.writelines([line + NL for line in lines])
def spark_set_env_vars(spark_version_dir):
import glob
zipfiles = glob.glob(os.path.join(spark_version_dir, "python", "lib", "*.zip"))
if zipfiles != [] and zipfiles[0] not in sys.path:
position = [index for (index, path) in enumerate(sys.path) if
re.match(SPARK_VERSIONS_FILE_PATTERN, path)] or len(sys.path)
sys.path = sys.path[:position] + zipfiles + sys.path[position:]
persistent_vars = {}
path_delim = ";" if sys.platform == "win32" else ":"
path_values = os.environ.get("PYTHONPATH", "").split(path_delim)
if zipfiles != [] and zipfiles[0] not in path_values:
position = [index for (index, path) in enumerate(path_values) if
re.match(SPARK_VERSIONS_FILE_PATTERN, path)] or len(path_values)
path_values = path_values[:position] + zipfiles + path_values[position:]
os.environ["PYTHONPATH"] = path_delim.join(path_values)
persistent_vars["PYTHONPATH"] = path_delim.join(path_values)
if os.environ.get("SPARK_HOME", "") != spark_version_dir:
os.environ["SPARK_HOME"] = spark_version_dir
persistent_vars["SPARK_HOME"] = spark_version_dir
if persistent_vars == {}:
return
if sys.platform == "win32":
try:
import _winreg as winreg
except ImportError:
import winreg
logging.info("Setting the following variables in your registry under HKEY_CURRENT_USER\\Environment:")
for k, v in persistent_vars.items():
logging.info("%s = %s (REG_SZ)" % (k, v))
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Environment", 0, winreg.KEY_SET_VALUE) as hkey:
for value, value_data in persistent_vars.items():
winreg.SetValueEx(hkey, value, 0, winreg.REG_SZ, value_data)
try:
import win32gui, win32con
win32gui.SendMessageTimeout(win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, "Environment", win32con.SMTO_ABORTIFHUNG, 5000)
except ImportError:
logging.warning("Could not refresh the registry, please install the PyWin32 package")
else:
logging.info("Set the following environment variables in your initialization file such as ~/.bashrc: ")
for k, v in persistent_vars.items():
logging.info("export %s = %s" % (k, v))
def spark_remove_env_vars():
# Remove env variables since there's no other spark installed.
os.environ.pop("SPARK_HOME")
os.environ.pop("PYTHONPATH")
os.unsetenv("SPARK_HOME")
os.unsetenv("PYTHONPATH")
def spark_install_winutils(spark_dir, hadoop_version):
import glob
if not os.path.isdir(os.path.join(spark_dir, "winutils-master")):
_download_file(WINUTILS_URL, os.path.join(spark_dir, "winutils-master.zip"))
from zipfile import ZipFile
with ZipFile(os.path.join(spark_dir, "winutils-master.zip")) as zf:
zf.extractall(spark_dir)
candidates = glob.glob(os.path.join(spark_dir, "winutils-master", "hadoop-" + hadoop_version + "*"))
if candidates == []:
logging.info("No compatible WinUtils found for Hadoop version %s." % hadoop_version)
return
os.environ["HADOOP_HOME"] = candidates[-1]
def spark_install(spark_version=None, hadoop_version=None, reset=True, loglevel="INFO"):
info = spark_install_find(spark_version, hadoop_version, installed_only=False)
spark_can_install()
logging.info("Installing and configuring Spark version: %s, Hadoop version: %s" % (info["spark"], info["hadoop"]))
if not os.path.isdir(info["spark_version_dir"]):
if not os.path.isfile(info["package_local_path"]):
import urllib
logging.info("Downloading %s into %s" % (info["package_remote_path"], info["package_local_path"]))
_download_file(info["package_remote_path"], info["package_local_path"])
logging.info("Extracting %s into %s" % (info["package_local_path"], info["spark_dir"]))
import tarfile
with tarfile.open(info["package_local_path"]) as tf:
tf.extractall(info["spark_dir"])
if loglevel:
from collections import OrderedDict
configs = OrderedDict()
configs["log4j.rootCategory"] = ",".join((loglevel, "console", "localfile"))
configs["log4j.appender.localfile"] = "org.apache.log4j.DailyRollingFileAppender"
configs["log4j.appender.localfile.file"] = "log4j.spark.log"
configs["log4j.appender.localfile.layout"] = "org.apache.log4j.PatternLayout"
configs["log4j.appender.localfile.layout.ConversionPattern"] = "%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n"
spark_conf_log4j_set_value(info, configs, reset)
hive_site_path = os.path.join(info["spark_conf_dir"], "hive-site.xml")
hive_path = None
if not os.path.isfile(hive_site_path) or reset:
hive_properties = OrderedDict()
hive_properties["javax.jdo.option.ConnectionURL"] = "jdbc:derby:memory:databaseName=metastore_db;create=true",
hive_properties["javax.jdo.option.ConnectionDriverName"] = "org.apache.derby.jdbc.EmbeddedDriver"
if sys.platform == "win32":
hive_path = os.path.join(info["spark_version_dir"], "tmp", "hive")
hive_properties["hive.exec.scratchdir"] = hive_path
hive_properties["hive.exec.local.scratchdir"] = hive_path
hive_properties["hive.metastore.warehouse.dir"] = hive_path
spark_hive_file_set_value(hive_site_path, hive_properties)
if hive_path:
spark_properties = OrderedDict()
spark_properties["spark.sql.warehouse.dir"] = hive_path
spark_conf_file_set_value(info, spark_properties, reset)
spark_set_env_vars(info["spark_version_dir"])
if sys.platform == "win32":
spark_install_winutils(info["spark_dir"], info["hadoop"])
def main():
import argparse
parser = argparse.ArgumentParser(description="Spark Installation Script")
parser.add_argument("-sv", "--spark-version", help="Spark Version to be used.", required=False, dest="spark_version")
parser.add_argument("-hv", "--hadoop-version", help="Hadoop Version to be used.", required=False, dest="hadoop_version")
parser.add_argument("-u", "--uninstall", help="Uninstall Spark", action="store_true", default=False, required=False)
parser.add_argument("-i", "--information", help="Show installed versions of Spark", action="store_true", default=False, required=False)
parser.add_argument("-l", "--log-level", help="Set the log level", choices=["DEBUG", "INFO", "WARNING"], default="WARNING", required=False, dest="log_level")
args = parser.parse_args()
# Set up logging parameters
logging.basicConfig(filename="install_spark.log", format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s", level=getattr(logging, args.log_level))
logging.getLogger().addHandler(logging.StreamHandler())
logging.info("Logging started")
# Debug log the values
logging.debug("Spark Version specified: %s" % args.spark_version)
logging.debug("Hadoop Version specified: %s" % args.hadoop_version)
logging.debug("Uninstall argument: %s" % args.uninstall)
logging.debug("Information argument: %s" % args.information)
# Check for uninstall or information flags and react appropriately
if args.uninstall:
if args.spark_version and args.hadoop_version:
spark_uninstall(args.spark_version, args.hadoop_version)
else:
logging.critical("Spark and Hadoop versions must be specified for uninstallation. Use -i to view installed versions.")
elif args.information:
installedversions = list(spark_installed_versions())
fmt = "{:>8}| {:>8}| {:<}"
print(fmt.format("Spark", "Hadoop", "Location"))
for elem in installedversions:
print(fmt.format(elem["spark"], elem["hadoop"], elem["dir"]))
else:
# Verify that Java 1.8 is running on the system and if it is, run the install.
if _verify_java():
logging.debug("Prerequisites checked successfully, running installation.")
logging.debug("Spark Version: %s" % args.spark_version)
logging.debug("Hadoop Version: %s" % args.hadoop_version)
spark_install(args.spark_version, args.hadoop_version, True, "INFO")
logging.debug("Completed the install")
else:
logging.critical("A prerequisite for installation has not been satisfied. Please check output log for details.")
if __name__ == "__main__":
main()
|
# 練習問題1
# from tkinter import *
#
#
# def triangle(x, y, w, h):
# canvas.create_line(x, y, x + w, y)
# canvas.create_line(x + w, y, x, y + h)
# canvas.create_line(x, y + h, x, y)
#
#
# tk = Tk()
# canvas = Canvas(tk, width=500, height=500)
# canvas.pack()
# triangle(100, 100, 200, 300)
# canvas.mainloop() # 処理の後に書く
'-------------------------------------------------'
# # 練習問題2
# from tkinter import *
#
#
# def triangle(x, y, w, h):
# canvas.create_line(x, y, x + w, y)
# canvas.create_line(x + w, y, x, y + h)
# canvas.create_line(x, y + h, x, y)
#
#
# tk = Tk()
# canvas = Canvas(tk, width=500, height=500)
# canvas.pack()
# for x in range(5):
# triangle(100 + x*5, 100 + x*5, 100 - x*15, 100 - x*15)
# canvas.mainloop() # 処理の後に書く
'-------------------------------------------------'
# # 練習問題3
# from tkinter import *
#
#
# def tile(x, y, size):
# global canvas # グローバル変数canvasのキャンバスを使うことを明示
# canvas.create_rectangle(x, y, x + size, y + size)
# canvas.create_rectangle(x, y, x + size / 2, y + size / 2, fill="black")
# canvas.create_rectangle(x + size / 2, y + size / 2, x + size, y + size, fill="black")
#
#
# tk = Tk()
# canvas = Canvas(tk, width=500, height=500)
# canvas.pack()
# tile(100, 100, 200)
# canvas.mainloop() # 処理の後に書く
'--------------------------------------------------------------'
# 練習問題4
# from tkinter import *
#
#
# def convert16(value):
# return f"{int(value * 255) :02x}" # 16進数に変換 02x=2桁
#
#
# def convertRGB(r, g, b):
# return f"#{convert16(r)}{convert16(g)}{convert16(b)}" # rgb の値をとれる
#
#
# def tile(x, y, size, lcolor, rcolor):
# global canvas # グローバル変数canvasのキャンバスを使うことを明示
# canvas.create_rectangle(x, y, x + size, y + size)
# canvas.create_rectangle(x, y, x + size / 2, y + size / 2, fill=rcolor)
# canvas.create_rectangle(x + size / 2, y + size / 2, x + size, y + size, fill=lcolor)
#
#
# tk = Tk()
# canvas = Canvas(tk, width=500, height=500)
# canvas.pack()
# tile(100, 100, 200, convertRGB(0, 0, 1), convertRGB(0, 1, 0))
# canvas.mainloop() # 処理の後に書く
'--------------------------------------------------------------------'
# 練習問題5
# from tkinter import *
#
#
# def square(x, y, size):
# global canvas # グローバル変数canvasのキャンバスを使うことを明示
# canvas.create_rectangle(x, y, x + size, y + size)
#
#
# def circle(x, y, size, start, extent): # 四角形に内接する円(概念)
# canvas.create_arc(x, y, x + size, y + size, start=start, extent=extent, style=ARC) # start=始めの角度 extant=何度進むか
# # style=とりあえず書く
#
# tk = Tk()
# canvas = Canvas(tk, width=500, height=500)
# canvas.pack()
# square(100, 100, 200)
# circle(0, 100, 200, -90, 180)
# circle(200, 100, 200, 90, 180)
# canvas.mainloop() # 処理の後に書く
'--------------------------------------------------'
# 練習問題6,7
# from tkinter import *
#
#
# def square(x, y, size):
# global canvas # グローバル変数canvasのキャンバスを使うことを明示
# canvas.create_rectangle(x, y, x + size, y + size)
#
#
# def circle(x, y, size, start, extent): # 四角形に内接する円(概念)
# canvas.create_arc(x, y, x + size, y + size, start=start, extent=extent, style=ARC) # start=始めの角度 extant=何度進むか
# # style=とりあえず書く
#
#
# def polygon(ps, fill, outline): # ps=リスト
# global canvas
# canvas.create_polygon(ps[0][0], ps[0][1], ps[1][0], ps[1][1], ps[2][0], ps[2][1], ps[3][0], ps[3][1], ps[4][0], ps[4][1],
# ps[5][0], ps[5][1], ps[6][0], ps[6][1], ps[7][0], ps[7][1], fill=fill, outline=outline)
#
#
# tk = Tk()
# canvas = Canvas(tk, width=500, height=500)
# canvas.pack()
# # square(100, 100, 300)
# ps = [[200, 200], [250, 100], [300, 200], [400, 250], [300, 300], [250, 400], [200, 300], [100, 250]]
# polygon(ps, fill="gray", outline="gray")
# canvas.mainloop() # 処理の後に書く
'-----------------------------------------------------------------'
# 練習問題8
# from tkinter import *
#
#
# def square(x, y, size):
# global canvas # グローバル変数canvasのキャンバスを使うことを明示
# canvas.create_rectangle(x, y, x + size, y + size)
#
#
# def circle(x, y, size, start, extent): # 四角形に内接する円(概念)
# canvas.create_arc(x, y, x + size, y + size, start=start, extent=extent, style=ARC) # start=始めの角度 extant=何度進むか
# # style=とりあえず書く
#
#
# def polygon(ps, fill, outline): # ps=リスト
# global canvas
# canvas.create_polygon(ps[0][0], ps[0][1], ps[1][0], ps[1][1], ps[2][0], ps[2][1], ps[3][0], ps[3][1], fill=fill, outline=outline)
#
#
# tk = Tk()
# canvas = Canvas(tk, width=500, height=500)
# canvas.pack()
# ps = [[100, 100], [200, 100], [200, 200], [100, 200]]
# canvas.create_polygon(ps[0][0], ps[0][1], ps[1][0]-50, ps[1][1], ps[2][0]-50, ps[2][1], ps[3][0], ps[3][1], fill="", outline="black")
# canvas.create_polygon(ps[0][0], ps[0][1], ps[1][0], ps[1][1], ps[2][0], ps[2][1], ps[3][0], ps[3][1], fill="", outline="black")
# canvas.create_text(150, 120, text="左揃え", anchor=W)
# canvas.create_text(150, 150, text="中央揃え")
# canvas.create_text(150, 180, text="右揃え", anchor=E)
# canvas.create_text(150, 180, text="右揃えS", anchor=S)
# canvas.create_text(150, 180, text="右揃えN", anchor=N)
# canvas.mainloop() # 処理の後に書く
'-----------------------------------------------------------------------'
# 演習問題1、ヒント
from tkinter import *
def on_click(event): # イベントハンドラはon_をつける # 書き方は決まり
global counter # global変数を使えるようになる(関数を出ても保持させる)
counter += 1
print(f"{counter}:{event.x},{event.y}")
tk = Tk()
canvas = Canvas(tk, width=500, height=500)
canvas.pack()
canvas.bind("<Button-1>", on_click) # クリック処理 # 書き方は決まり 1=左クリック,2=中央クリック,3=右クリック
counter = 0
canvas.mainloop() # 処理の後に書く
|
# 3. Найти самое длинное слово в введенном предложении.
# Учтите что в предложении есть знаки препинания.
# Подсказки: my_string.split([chars]) возвращает список строк.
# len(list) - количество элементов в списке
# каждый проходи цикла сохраняес в c самое длинное слово,
# без знаков препинания, если слово короче предыдущего,
# переходи к следующему
my_sent = input('Введите предложение: ')
my_sent = my_sent.split()
final_sent = ''
for i in my_sent:
i = i.strip(',!.?:;()"')
if len(i) > len(final_sent):
final_sent = i
print(final_sent, '- самое длинное слово в предложении')
|
from random import randint, choice
repeticiones = 10
informantes = 20
respuestas_min = 0
respuestas_max = 100
for n in range(0, repeticiones):
for respuestas in range(respuestas_min, respuestas_max):
caso = []
for x in range(0, respuestas):
opinion = randint(1, informantes), randint(1, informantes) * choice((-1, 1))
while (opinion[0], opinion[1]) in caso or (opinion[0], -opinion[1]) in caso :
opinion = randint(1, informantes), randint(1, informantes) * choice((-1, 1))
caso.append(opinion)
print('{} {}'.format(informantes, respuestas))
for agenteX, agenteY in caso:
print('{} {}'.format(agenteX, agenteY))
print('0 0')
|
# Define a function
def say_hello():
# block belonging to the function.
print('Hello World')
# End of function
say_hello() # call the function
say_hello() # call the function again
# OUTPUT
# python functions_basics.py
# Hello World
# Hello World
|
'''
Created on Dec 3, 2015
@author: Benjamin Jakubowski (buj201)
'''
import unittest
import pandas as pd
from get_and_clean_data import *
from graph_grades_over_time import *
from test_grades import *
class Test(unittest.TestCase):
def test_clean_Grade(self):
bad_grades = pd.DataFrame.from_dict({1:{'GRADE':'A'},
2:{'GRADE':'B'},
3:{'GRADE':'C'},
4:{'GRADE':'not_a_grade'}}, orient='index')
self.assertIs(len(clean_GRADE(bad_grades)), 3)
def test_clean_BORO(self):
bad_boros = pd.DataFrame.from_dict({1:{'BORO':'BROOKLYN'},
2:{'BORO':'BRONX'},
3:{'BORO':'MANHATTAN'},
4:{'BORO':'QUEENS'},
5:{'BORO':'STATEN ISLAND'},
6:{'BORO':'not_a_BORO'}}, orient='index')
self.assertIs(len(clean_BORO(bad_boros)), 5)
def test_plot_num_restaurants_by_grade_by_year(self):
self.assertRaises(ValueError, plot_num_restaurants_by_grade_by_year, 'not_a_BORO')
def test_restaurant_grades_over_time_class(self):
self.assertIsInstance(restaurant_grades_over_time(['A', 'B', 'C', 'B', 'A', 'A', 'B']), restaurant_grades_over_time)
self.assertRaises(TypeError, restaurant_grades_over_time('a string not a list').validate_grade_list, 'a string not a list')
self.assertRaises(ValueError, restaurant_grades_over_time(['A', 'B', 'C', 'D']).validate_grade_list, ['A', 'B', 'C', 'D'])
def test_no_change(self):
self.assertEqual(restaurant_grades_over_time(['A', 'A', 'A']).test_grades(), 0)
def test_improve(self):
self.assertEqual(restaurant_grades_over_time(['C', 'B', 'A']).test_grades(), 1)
def test_decline(self):
self.assertEqual(restaurant_grades_over_time(['A', 'B', 'C']).test_grades(), -1)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
#Import streamlit
import streamlit as st
#Import NumPy and Pandas for data manipulation
import pandas as pd
import numpy as np
from fbprophet import Prophet
from fbprophet.diagnostics import performance_metrics
from fbprophet.diagnostics import cross_validation
from fbprophet.plot import plot_cross_validation_metric
#for encoding binary data to printable ASCII characters and decoding it #back to binary form
import base64
st.title('Time Series Forecasting Using Streamlit')
st.write("IMPORT DATA")
st.write("Import the time series CSV file. It should have two columns labelled as 'ds' and 'y'. The 'ds' column should be of DateTime format by Pandas. The 'y' column must be numeric representing the measurement to be forecasted.")
data = st.file_uploader('Upload here',type='csv')
if data is not None:
appdata = pd.read_csv(data) #read the data fro
appdata['ds'] = pd.to_datetime(appdata['ds'],errors='coerce')
st.write(data) #display the data
max_date = appdata['ds'].max() #compute latest date in the data
st.write("SELECT FORECAST PERIOD") #text displayed
periods_input = st.number_input('How many hours forecast do you want?',min_value = 1, max_value = 24)
#The minimum number of hours a user can select is one, while the maximum is #24 (yearly forecast)
if data is not None:
obj = Prophet() #Instantiate Prophet object
obj.fit(appdata) #fit the data
#text to be displayed
st.write("VISUALIZE FORECASTED DATA")
st.write("The following plot shows future predicted values. 'yhat' is the predicted value.")
if data is not None:
future = obj.make_future_dataframe(periods=periods_input,freq='H')
#Prophet.make_future_dataframe() takes the Prophet model object and #extends the time series dataframe for specified period for which user needs #the forecast
fcst = obj.predict(future) #make prediction for the extended data
forecast = fcst[['ds', 'yhat']]
#The predict() method assigns each row in the ‘future’ dataframe a predicted #value denoted as yhat
#Choose only the forecasted records (having date after the latest date in #original data)
forecast_filtered = forecast[forecast['ds'] > max_date]
st.write(forecast_filtered) #Display some forecasted records
st.write(" The next visual shows the actual (black dots) and predicted(blue line) values over time.")
figure1 = obj.plot(fcst) #plot the actual and predicted values
st.write(figure1) #display the plot
#Plot the trends using Prophet.plot_components()
st.write("The following plots show a high level trend of predicted values, day of week trends and yearly trends (if dataset contains multiple years’ data).Blue shaded area represents upper and lower confidence intervals.")
figure2 = obj.plot_components(fcst)
st.write(figure2)
|
from fitnessCalc import FitnessCalc
from population import Population
from algorithm import Algorithm
from time import time
start = time()
FitnessCalc.set_solution("1111000000000000000000000000000000000000000000000000000000001111")
my_pop = Population(50, True)
generation_count = 0
while my_pop.fitness_of_the_fittest() != FitnessCalc.get_max_fitness():
generation_count += 1
print("Generation : %s\nFittest : %s " % (generation_count, my_pop.fitness_of_the_fittest()))
my_pop = Algorithm.evolve_population(my_pop)
print("******************************************************")
genes_the_fittest = []
for i in range(len(FitnessCalc.Solution)):
genes_the_fittest.append(my_pop.get_fittest().genes[i])
print("Solution found !\nGeneration : %s\nFittest : %s " % (generation_count + 1, my_pop.fitness_of_the_fittest()))
print("Genes of the Fittest : %s " % (genes_the_fittest))
finish = time()
print ("Time elapsed : %s " % (finish - start))
|
# -*- coding: utf-8 -*-
class Queue(object):
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self,item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def peek(self):
if not self.isEmpty():
print self.items[-1]
else:
raise Exception ("Queue is empty")
def size(self):
print "Length of the queue{0}".format(len(self.items))
def elements(self):
print "Elements of the queue"
for element in self.items:
print element
def main():
queue = Queue()
queue.enqueue(1)
queue.enqueue(2)
queue.enqueue(10)
queue.size()
queue.elements()
queue.peek()
if __name__ == '__main__':
main()
|
# Created by Elivelton S.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import argparse
import os
import sys
import time
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user')
parser.add_argument('-p', '--password')
parser.add_argument('-t', '--tag')
args = parser.parse_args()
class instagramBot:
def __init__(self, username, password):
self.username = username
self.password = password
self.drive = webdriver.Firefox(executable_path='/home/r3tr0/Documentos/geckodriver')
def login(self):
driver = self.drive
driver.get('https://www.instagram.com/')
time.sleep(3)
username = driver.find_element_by_xpath("//input[@name='username']")
password = driver.find_element_by_xpath("//input[@name='password']")
username.clear()
username.click()
username.send_keys(self.username)
password.clear()
password.click()
password.send_keys(self.password)
password.send_keys(Keys.RETURN)
time.sleep(3)
self.likePost(args.tag)
def likePost(self, hashtag):
driver = self.drive
driver.get('https://www.instagram.com/explore/tags/'+ hashtag +'/')
time.sleep(3)
for i in range(1,3):
driver.execute_script("window.scrollBy(0,document.body.scrollHeight || document.documentElement.scrollHeight)", "")
time.sleep(3)
divs = driver.find_elements_by_xpath("//div[@class='v1Nh3 kIKUG _bz0w']")
hrefs = [elem.find_element_by_xpath(".//a[@href]") for elem in divs]
pic_hrefs = [elem.get_attribute('href') for elem in hrefs]
[href for href in pic_hrefs if hashtag in href]
print("Na #"+ hashtag + ' tem ' + str(len(pic_hrefs)) + ' links')
count = 0
for pic_href in pic_hrefs:
driver.get(pic_href)
driver.execute_script("window.scrollBy(0,document.body.scrollHeight || document.documentElement.scrollHeight)", "")
try:
count += 1
driver.find_element_by_xpath("//button[@class='wpO6b ']").click()
print("Curtindo: %s de %s publicacoes" %(count, str(len(pic_hrefs))))
time.sleep(19)
except Exception as e:
print(e)
time.sleep(3)
bot = instagramBot(args.user, args.password)
bot.login()
|
class Super:
def method(self):
print('in Super.method')
class Sub(Super):
def method(self): # Override method
print('starting Sub.method') # Add actions here
Super.method(self) # Run default action
print('ending Sub.method')
x = Super()
x.method()
y = Sub()
y.method()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-09 23:08:50
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
#判断对象是否是迭代器
with open('file1',encoding='utf-8',mode='w') as f1:
print(('__iter__' in dir(f1)) and ('__next__' in dir(f1)))
#可迭代对象可以转化为迭代器
s1 = 'jofjs'
obj = iter(s1) #或者 s1.__iter__() 都可以
print(obj)
#迭代器可以一个一个取值,下边这俩都行,每写一次迭代代码,就弹出来一个值
print(next(obj)) #print(obj.__next__())
print(next(obj)) #print(obj.__next__())
print(next(obj)) #print(obj.__next__())
print(next(obj)) #print(obj.__next__())
print(next(obj)) #print(obj.__next__())
#如果迭代超出范围了,就会报错
|
import csv
import os
from typing import Any, Dict, Optional
from parseridge.parser.evaluation.callbacks.base_eval_callback import EvalCallback
class EvalCSVReporter(EvalCallback):
_order = 10
def __init__(self, csv_path: Optional[str] = None):
self.csv_path = csv_path
if self.csv_path:
os.makedirs(os.path.dirname(self.csv_path), exist_ok=True)
self.file = open(csv_path, mode="w")
fieldnames = [
"epoch",
"train_las",
"train_uas",
"dev_las",
"dev_uas",
"test_las",
"test_uas",
"train_loss",
]
self.writer = csv.DictWriter(self.file, fieldnames=fieldnames)
self.writer.writeheader()
def on_shutdown(self, **kwargs: Any) -> None:
if self.csv_path:
self.file.close()
def on_eval_end(
self, scores: Dict[str, Dict[str, float]], loss: float, epoch: int, **kwargs: Any
) -> None:
if self.csv_path:
self.writer.writerow(
{
"epoch": epoch,
"train_las": scores["train"]["las"],
"train_uas": scores["train"]["uas"],
"dev_las": scores["dev"]["las"],
"dev_uas": scores["dev"]["uas"],
"test_las": scores["test"]["las"] or 0.0,
"test_uas": scores["test"]["uas"] or 0.0,
"train_loss": loss,
}
)
|
#import multiprocessing
import socket
import time
from downloader import Downloader
import asyncio
import redis
from beletag_callback import BeletagCallback
from redis_cache import RedisCache
SLEEP_TIME = 1
socket.setdefaulttimeout(60)
class Beletag:
def __init__(self):
self.cb = BeletagCallback()
self.cb.setCategoriesPages()
self.redisClient = redis.StrictRedis(host='localhost', port=6379, db=1)
self.cache = RedisCache(client=self.redisClient)
self.max_tasks = 20
def getAllPages(self):
loop = asyncio.get_event_loop()
tasks = []
D = Downloader(cache=self.cb.redisClientPages)
async def process_queue(url):
while len(self.cb.pages_queue):
if not url or 'http' not in url:
continue
html = D(url, num_retries=5)
if not html:
continue
self.cb.pageParse(url, html)
while tasks or len(self.cb.pages_queue):
while len(tasks) < self.max_tasks and len(self.cb.pages_queue):
tasks.append(asyncio.ensure_future(process_queue(self.cb.pages_queue.pop())))
time.sleep(SLEEP_TIME)
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
""""
def mp_threaded_crawler(*args, **kwargs):
# create a multiprocessing threaded crawler
processes = []
num_procs = multiprocessing.cpu_count()
for _ in range(num_procs):
proc = multiprocessing.Process(target=threaded_crawler_rq,
args=args, kwargs=kwargs)
proc.start()
processes.append(proc)
# wait for processes to complete
for proc in processes:
proc.join()
"""
if __name__ == '__main__':
start_time = time.time()
b = Beletag()
b.getAllPages()
print('Total time: %ss' % (time.time() - start_time))
|
"""
Given a non-empty binary tree, return the average value of the nodes on each
level in the form of an array.
Example 1:
Input:
3
/ \
9 20
/ \
15 7
Output: [3, 14.5, 11]
Explanation:
The average value of nodes on level 0 is 3, on level 1 is 14.5, and on level
2 is 11. Hence return [3, 14.5, 11].
Note:
The range of node's value is in the range of 32-bit signed integer.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def averageOfLevels(self, root: TreeNode) -> List[float]:
if not root:
return []
averages = self.level_order_v2(root)
return averages
def level_order_v2(self, root):
"""
We perform bfs to traverse the tree, and calculate
each levels sum
"""
queue = deque([root])
average_of_levels = []
curr_level = []
while len(queue) != 0:
# for each level
curr_level = []
for i in range(len(queue)):
# pop the value
node = queue.popleft()
curr_level.append(node.val)
# the we add the children
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
average = sum(curr_level) / len(curr_level)
average_of_levels.append(average)
return average_of_levels
|
import traceback
import time
import pdb
GRID_SIZE = 15
WINNING_LENGTH = 5
X = 1
O = -1
def whoWonRow(row):
streak = 0
for value in row:
if streak >= 0 and value == 1:
streak += 1
elif streak <= 0 and value == -1:
streak -= 1
else:
streak = value
if streak >= WINNING_LENGTH:
return 1
if streak <= -WINNING_LENGTH:
return -1
return 0
class Board:
SIZE = 15
def generate_rows(self):
rows = []
for i in range(self.SIZE):
row = []
for j in range(self.SIZE):
row.append(0)
rows.append(row)
return rows
def generate_diagonals(self):
diagonals = []
delka = 1
for i in range(self.SIZE):
diagonal = []
for j in range(delka):
diagonal.append(0)
diagonals.append(diagonal)
delka += 1
delka = 14
for i in range(self.SIZE - 1):
diagonal = []
for j in range(delka):
diagonal.append(0)
diagonals.append(diagonal)
delka -= 1
return diagonals
def __init__(self):
self.rows = self.generate_rows()
self.columns = self.generate_rows()
self.diagonals_descending = self.generate_diagonals()
self.diagonals_ascending = self.generate_diagonals()
def new_turn(self, row, column, player):
self.rows[row][column] = player
self.columns[column][row] = player
ascending_diagonal_number = row + column
if (row + column < self.SIZE):
self.diagonals_ascending[ascending_diagonal_number][column] = player
else:
self.diagonals_ascending[ascending_diagonal_number][self.SIZE - 1 - row] = player
descending_diagonal_number = self.SIZE - 1 - row + column
if (descending_diagonal_number < 15):
self.diagonals_descending[descending_diagonal_number][column] = player
else:
self.diagonals_descending[descending_diagonal_number][row] = player
def get_lines(self):
return self.rows + self.columns + self.diagonals_ascending + self.diagonals_descending
def get(self, row, col):
return self.rows[row][col]
class GomokuTournament:
def __init__(self, playerX, playerO, time_limit):
self.playerX = playerX
self.playerO = playerO
self.timer = {}
self.timer[playerX] = time_limit
self.timer[playerO] = time_limit
self.board = Board()
self.history = []
def game(self):
print(f'started game X:{self.playerX.name} vs. O:{self.playerO.name}')
coordsO = None
coordsX = None
while True:
coordsX = self.player_move(self.playerX, coordsO)
coordsX = self.placeSymbol(X, coordsX)
if (self.whoWon() != 0):
break
coordsO = self.player_move(self.playerO, coordsX)
coordsO = self.placeSymbol(O, coordsO)
if (self.whoWon() != 0):
break
if (coordsX == None and coordsO == None):
print('nobody played a valid move in this round. it is a split.')
return 0
winner = self.whoWon()
return winner
def player_move(self, player, opponent_move):
coords = None
start_time = time.time()
print(f'{player.name} thinking...', flush=True)
try:
coords = player.play(opponent_move)
except Exception as e:
print(f'player {player.name} crashed')
print(e)
traceback.print_exc()
duration = time.time() - start_time
self.timer[player] -= duration
print(f'{player.name} played {coords}')
print(f'{player.name} has {self.timer[player]:.2f} s left')
return coords
def whoWon(self):
if self.timer[self.playerX] < 0:
print(f'{self.playerX.name} ran out of time')
return -1
if self.timer[self.playerO] < 0:
print(f'{self.playerO.name} ran out of time')
return 1
for line in self.board.get_lines():
score = whoWonRow(line)
if score != 0: return score
return 0
def save_logs(self):
with open('logs.txt', 'a') as output_file:
output_file.write(f'X: {self.playerX.name} vs. O:{self.playerO.name}\n')
for line in self.history:
output_file.write(f'{"X" if line[0] == 1 else "O"}\t{line[1]}\t{line[2]}\n')
def placeSymbol(self, player, coords):
try:
row, col = coords
if row >= 15 or row < 0 or col >= 15 or col < 0:
print(f'invalid coordinates {coords}')
return None
if self.board.get(row, col) != 0:
print(f'invalid move. {coords} is already taken')
return None
self.board.new_turn(row, col, player)
self.history.append((player, row, col))
return (row, col)
except Exception as err:
print(err)
print(f'cannot place on coordinates {coords}')
return None
|
# simulated_data.py
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
if __name__ == "__main__":
np.random.seed(1)
# Set the number of samples, the means and
# variances of each of the three simulated clusters
samples = 100
mu = [(7, 5), (8, 12), (1, 10)]
cov = [
[[0.5, 0], [0, 1.0]],
[[2.0, 0], [0, 3.5]],
[[3, 0], [0, 5]],
]
# Generate a list of the 2D cluster points
norm_dists = [
np.random.multivariate_normal(m, c, samples)
for m, c in zip(mu, cov)
]
X = np.array(list(itertools.chain(*norm_dists)))
# Apply the K-Means Algorithm for k=3, which is
# equal to the number of true Gaussian clusters
km3 = KMeans(n_clusters=3)
km3.fit(X)
km3_labels = km3.labels_
# Apply the K-Means Algorithm for k=4, which is
# larger than the number of true Gaussian clusters
km4 = KMeans(n_clusters=4)
km4.fit(X)
km4_labels = km4.labels_
# Create a subplot comparing k=3 and k=4
# for the K-Means Algorithm
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,6))
ax1.scatter(X[:, 0], X[:, 1], c=km3_labels.astype(np.float))
ax1.set_xlabel("$x_1$")
ax1.set_ylabel("$x_2$")
ax1.set_title("K-Means with $k=3$")
ax2.scatter(X[:, 0], X[:, 1], c=km4_labels.astype(np.float))
ax2.set_xlabel("$x_1$")
ax2.set_ylabel("$x_2$")
ax2.set_title("K-Means with $k=4$")
plt.show()
|
"""
4. Faça um Programa que peça as 4 notas bimestrais e mostre a média.
"""
nota1 = int(input("Digite a primeira nota do bimestre: "))
nota2 = int(input("Digite a segunda nota do bimestre: "))
nota3 = int(input("Digite a terceira nota do bimestre: "))
nota4 = int(input("Digite a quarta nota do bimestre: "))
media = (nota1 + nota2 + nota3 + nota4)/ 4
# media = soma/4
print(f"A média das notas bimestrais é igual a {media}")
|
import os
import hashlib
from torrent_parser import flatten_list, rread_dir
from constants import DEF_BLOCK_LENGTH, DEF_PIECE_LENGTH
import math
def pieces_gen(fpath, piece_length=DEF_PIECE_LENGTH):
# Dividir multiples archivos en bloques/piezas
pieces = []
pieces_hash = []
piece = b""
if os.path.isdir(fpath):
files = os.listdir(fpath)
scanned_files = []
if len(files) > 0:
# En caso de encontrar multiples archivos
if len(files) > 1:
for file in files:
nested_file_fpath = "".join([fpath, "/", file])
if os.path.isdir(nested_file_fpath):
# Encontrar todos los archivos no directorio
# dentro del directorio del archivo raiz
files = flatten_list(rread_dir(nested_file_fpath))
if len(files) > 0:
scanned_files.append(files)
else:
print(
f"[ADVERTENCIA] Directorio {nested_file_fpath} esta vacio"
)
elif os.path.isfile(nested_file_fpath):
scanned_files.append(
{
"path": nested_file_fpath,
"length": os.path.getsize(nested_file_fpath),
}
)
flatten_flist = flatten_list(scanned_files)
for file in flatten_flist:
with open(file["path"], "rb") as open_file:
counter = 0
while counter < file["length"]:
piece = open_file.read(piece_length)
pieces.append(piece)
# print(f"Nueva pieza {piece}")
pieces_hash.append(hashlib.sha1(piece).digest())
counter += len(piece)
# Piezas de un solo archivo
elif os.path.isfile(fpath):
file_size = os.path.getsize(fpath)
counter = 0
with open(fpath, "rb") as open_file:
while counter < file_size:
piece = open_file.read(piece_length)
pieces.append(piece)
# print(f"Nueva pieza {piece}")
pieces_hash.append(hashlib.sha1(piece).digest())
counter += len(piece)
return pieces, pieces_hash
def gen_block(piece, start_offset, block_length=DEF_BLOCK_LENGTH):
piece_length = len(piece)
if start_offset + block_length > piece_length:
return piece[start_offset:piece_length]
else:
return piece[start_offset : start_offset + block_length]
def piece_toblocks(piece: bytes, block_length=DEF_BLOCK_LENGTH):
piece_frac, piece_whole = math.modf(len(piece) / block_length)
if piece_frac:
lpiece_size = int(piece_frac * block_length)
blocks = [
{
"block_start": x,
"block": piece[x : x + block_length],
"length": block_length,
}
for x in range(len(piece) - lpiece_size)
if x % block_length == 0
]
blocks.append(
{
"block_start": len(piece) - lpiece_size,
"block": piece[len(piece) - lpiece_size : len(piece)],
"length": lpiece_size,
}
)
print(f"Uttimo bloque de ultima pieza inicia {len(piece) - lpiece_size} tamaño:{lpiece_size}")
return blocks
else:
blocks = [
{
"block_start": x,
"block": piece[x : x + block_length],
"length": block_length,
}
for x in range(len(piece))
if x % block_length == 0
]
return blocks
|
import sys
import glob
import argparse
import re
import os
parser = argparse.ArgumentParser(description='Convert cachegrind to csv')
parser.add_argument('--cap', default="0", type=int, help="exclude function calls below this threshold (microseconds)")
parser.add_argument("--i", default=".", help="directory containing cachegrind files")
parser.add_argument("--o", default=".", help="directory to generate csv file to")
args = parser.parse_args()
if args.o == ".":
args.o = args.i
if not os.path.exists(args.o):
os.makedirs(args.o)
files = glob.glob("/".join([args.i, "cachegrind.*"]))
outname = args.o + "/all.csv"
with open(outname, 'w') as outfile:
for filename in files:
if ".csv" not in filename and ".svg" not in filename:
inname = filename
# outname = "/".join([args.o, inname.split("/").pop() + '.csv'])
key = inname.split("/").pop()
key = key.split(".")
key.pop()
key = ".".join(key)
timestamp = inname.split(".").pop().replace("_", ".")
def append (outfile, key, timestamp, fl, fn, li, tm):
if int(tm) >= args.cap: # only save the call, if it has taken more than args.cap microseconds
outfile.write(",".join([key, timestamp, fl, fn, li, tm]) + "\n")
with open(inname, 'r') as infile:
print "converting", inname, "-->", outname
fl = ""
fn = ""
li = ""
tm = ""
for line in infile:
numbers = re.match(r"([0-9]+)\ ([0-9]+)", line) # find linenumber and microseconds, like this: 26 26
if numbers:
li = numbers.group(1)
tm = numbers.group(2)
elif line.startswith("fl") or line.startswith("cfl"):
# save the old one, if it exists
if fl and fn and li and tm:
append(outfile, key, timestamp, fl, fn, li, tm)
fl = line.replace("\n", '').split('=')[1] # get the function name
elif line.startswith("fn") or line.startswith("cfn"):
fn = line.replace("\n", '').split('=')[1] # get the function name
if fl and fn and li and tm:
append(outfile, key, timestamp, fl, fn, li, tm)
|
print("{:^80}".format("Python Shop"))
print("{:<6}{:<30} ".format("NO.",": 1078718855"))
print("{:<6}{:<30} ".format("Addr.",": 서울시 종로구 종로3가"))
print("{:<6}{:<30} ".format("Name",": 김사장"))
print("{:<6}{:<30} ".format("H.P",": 070-1234-5678"))
print("{:-^80}".format("-"))
print("{:^20}{:^20}{:^20}{:^20}".format("Items","Unit","QTY","Price"))
print("{:-^80}".format("-"))
print("{:^20}{:>20}{:>20,}{:>20,}".format("blue-Tooth",85000,1,85000))
print("{:^20}{:>20,}{:>20}{:>20,}".format("usb3.0 8G",85000,1,85000))
print("{:-^80}".format("-"))
print("{:<20}{:^20}{:^20}{:>20,}".format("Total","","",93000))
print("{:-^80}".format("-"))
print("{:<20}{:^20}{:^20}{:>20,}".format("Total","","",93000))
print("{:<20}{:^20}{:^20}{:>20,}".format("Total","","",93000))
print("{:<20}{:^20}{:^20}{:>20,}".format("Total","","",93000))
print("{:-^80}".format("-"))
|
# Generated by Django 3.2.7 on 2021-09-07 03:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'ordering': ['id'], 'verbose_name': 'Categoria', 'verbose_name_plural': 'Nombres'},
),
migrations.AlterModelOptions(
name='product',
options={'ordering': ['id'], 'verbose_name': 'Productos', 'verbose_name_plural': 'Nombres'},
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=50, verbose_name='Categoria'),
),
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(max_length=50, verbose_name='Productos'),
),
]
|
suku=int(input())
print(suku*(suku-1)//2)
|
from itertools import permutations
M, N = list(map(int, input().split()))
Result = []
for i in range(1, M + 1):
Result.append(i)
for i in list(permutations(Result, N)):
for j in range(len(i)):
print(i[j], end=' ')
print()
|
import random
import socket
import string
from sys import path
path.append('liblsl-Python\\')
from pylsl import StreamOutlet, StreamInfo
tcpport = int(raw_input('TCP Port: '))
tcpaddress = raw_input('TCP Address (default: 127.0.0.1): ') or '127.0.0.1'
# opening socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((tcpaddress, tcpport))
# opening stream outlet
randomid = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
info = StreamInfo('TCP Marker Stream', 'Markers', 1, 0, 'string', randomid)
outlet = StreamOutlet(info)
print("waiting for input...")
while True:
# waiting for data
data = sock.recv(1)
# sending marker
if data:
outlet.push_sample([data])
print "marker: ", data
|
class linknode:
def __init__(self, key=None, value=None,next=None):
self.key = key
self.value = value;
self.next = next;
class lrucache:
def __init__(self, capacity):
self.head = linknode()
self.tail = self.head
self.capacity = capacity
self.hash = {}
def get(self, key):
if key not in self.hash:
return None
result = self.hash[key].next.value
pre = self.hash[key]
self.movenode(pre)
return result
def set(self, key, value):
if key not in self.hash:
node = linknode(key,value)
self.pushback(node)
if len(self.hash) > self.capacity:
self.popfront()
else:
self.hash[key].next.value = value
def popfront(self):
del self.hash[self.head.next.key]
head.next = head.next.next
self.hash[head.next.key] = head
def pushback(self, node):
self.tail.next = node
self.hash[node.key] = self.tail
self.tail = node
def movenode(self,pre):
if( pre.next == self.tail):
return
node = pre.next
pre.next = node.next
self.hash[node.next.key] = pre
node.next = None
self.pushback(node)
mycache = lrucache(5)
mycache.set("jessie",9)
mycache.set("cathy",36)
mycache.set("jim",40)
print(mycache.head.next.value)
print(mycache.tail.value)
print(mycache.get("cathy"))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 21:59:46 2020
@author: thomas
"""
import numpy as np
import pandas as pd
import os, sys
import time as t
import subprocess
from shutil import copyfile
import pathlib
#CONSTANTS
cwd_PYTHON = os.getcwd() + '/'
# constructs a filepath for the pos data of Re = $Re
def filename(cwd):
return cwd+"startLData_Re2_.csv"
def GetLData(cwd):
data = pd.read_csv(filename(cwd),delimiter=' ')
data['parHy'] *= 2.0
data['parHx'] *= 2.0
data = data[data['parThetaBW'] == 112.5].copy()
data = data.sort_values(by=['parThetaBW','parHx','parHy'])
data = data.reset_index(drop=True)
return data
if __name__ == '__main__':
#READ NOTAList.txt to get all sims that did not complete
#Whether through error or through no end state
#Pull Hx, Hy, Theta parameters for each
#Change directory to Theta$Theta/Hx$Hx/Hy$Hy
#Modify 'script_restart.sh and copy to specified directory
#Copy input2D_restart into directory
#Submit with subprocess the command "sbatch script_restart.sh"
cwd_PYTHON = os.getcwd() + '/'
data = GetLData(cwd_PYTHON+'../')
#Restart simulation where it left off. Some at 40s. Some at 20s.
for idx in range(len(data['endTime'])):
parTheta = np.round(data.loc[idx,'parThetaBW'],1)
parHx = np.round(data.loc[idx,'parHx'],1)
parHy = int(np.round(data.loc[idx,'parHy'],1))
cwd_VTK = cwd_PYTHON+"../Fluid/Theta{0}/Hx{1}/Hy{2}/VTK/AVG".format(parTheta,parHx,parHy)
strSBATCH = "sbatch -J AvgVTK_T{0}_Hx{1}_Hy{2}_ -t 1-0 -n 1 -p general -o %x.out --mem-per-cpu=25000 scriptAvg.sh {3} {4} {5}".format(parTheta,parHx,parHy,parTheta,parHx,parHy)
print(strSBATCH)
os.system(strSBATCH)
|
import unittest
from katas.beta.sum_of_values_from_1_to_n_inclusive import total
class SumFromOneToNTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(total(10), 55)
def test_equal_2(self):
self.assertEqual(total(123), 7626)
def test_equal_3(self):
self.assertEqual(total(1000), 500500)
def test_equal_4(self):
self.assertEqual(total(54321), 1475412681)
def test_equal_5(self):
self.assertEqual(total(12345), 76205685)
def test_equal_6(self):
self.assertEqual(total(35765), 639585495)
def test_equal_7(self):
self.assertEqual(total(98765), 4877311995)
def test_equal_8(self):
self.assertEqual(total(56478), 1594910481)
def test_equal_9(self):
self.assertEqual(total(1111111), 617284382716)
def test_equal_10(self):
self.assertEqual(total(2222222), 2469136419753)
def test_equal_11(self):
self.assertEqual(total(3333333), 5555556111111)
def test_equal_12(self):
self.assertEqual(total(4444444), 9876543456790)
def test_equal_13(self):
self.assertEqual(total(5555555), 15432098456790)
def test_equal_14(self):
self.assertEqual(total(6666666), 22222221111111)
def test_equal_15(self):
self.assertEqual(total(7777777), 30246911419753)
|
import argparse
import numpy as np
from algorithm import Surfing
parser = argparse.ArgumentParser()
parser.add_argument('dataset_path', help='Path to where the dataset is located')
parser.add_argument('-d', '--delimiter', default=',',
help='Change dataset parser delimiter definition')
parser.add_argument('-s', '--skip-header', action='store_true',
help='Determine if the dataset parser should skip a header (i.e. if the file has a header)')
if __name__ == '__main__':
args = parser.parse_args()
header = 1 if args.skip_header else 0
dataset = np.genfromtxt(args.dataset_path, delimiter=args.delimiter, skip_header=header)
unlabeled_dataset = dataset[:,:-1]
#unlabeled_dataset = np.random.uniform(0, 1, (1000, 4))
model = Surfing(k=3)
model.fit(unlabeled_dataset)
|
for x in xrange(1,10):
print x
for chx in 'ABC':
print chx
from collections import Iterable
bool = isinstance('abc', Iterable)
print bool
for i, value in enumerate(['A', 'B', 'C']):
print i, value
for x, y in [(1,2), (2,4), (3,9)]:
print x, y
d = {'a':1, 'b':2, 'c':3}
for k in d.keys():
print k
for v in d.values():
print v
for k,v in d.items():
print k, v
|
import ujson as json
from celery import shared_task
from django.dispatch import receiver
from django.db.models.signals import pre_save, post_save
from annoying.functions import get_object_or_None
from .signals import create_message
from .models import Room, Notification, Message, MessageChart
from speakifyit.users.models import ContactRequest
from .serializers import NotificationSerializer
from django.core import serializers
from django.forms.models import model_to_dict
@shared_task
def create_message_task(**kwargs):
if kwargs['msg_type'] == 4:
content = '{} joined the room'.format(kwargs['user'].username)
elif kwargs['msg_type'] == 5:
content = '{} left the room'.format(kwargs['user'].username)
if not content:
content = kwargs['content']
room = get_object_or_None(Room, kwargs['room'])
if room:
msg = Message.objects.create(
user = kwargs['user'], content = content, msg_type = kwargs['msg_type']
)
@shared_task
def send_notification(**kwargs):
notification = Notification.objects.create(
from_user = kwargs['from_user'],
to_user = kwargs['to_user'],
msg_type = kwargs['msg_type'],
content = kwargs['content'],
icon = kwargs['icon'],
link = kwargs['link'],
contact_request = kwargs['contact_request']
)
user = kwargs['from_user']
data = NotificationSerializer(notification).data
user.websocket_group.send(
{"text": json.dumps(data)}
)
@shared_task
def message_edit(**kwargs):
message = get_object_or_None(Nessage, pk=kwargs['message'])
if message:
message.content = kwargs['content']
message.is_editable = False
nessage.save()
chart = MessageChart.objects.create(message=message)
@receiver(create_message, sender=Room)
def receiver_create_message(sender, *args, **kwargs):
create_message_task.apply_async(kwargs=kwargs)
@receiver(post_save, sender=Room)
def send_created_room_notification(sender, instance, created, **kwargs):
if created:
pass
@receiver(post_save, sender=ContactRequest)
def send_requset_notification(sender, instance, created, **kwargs):
if created:
content = 'User {} wants to add you to the chat'.format(instance.request_from)
msg_type = 'create_request'
from_user = instance.request_to
to_user = instance.request_from
else:
if instance.accepted is True:
content = 'User {} added you to the chat'.format(instance.request_to),
msg_type = 'create_request'
from_user = instance.request_from
to_user = instance.request_to
room = Room.objects.create()
room.users.add(instance.request_from, instance.request_to)
room.save()
else:
from_user = instance.request_to
to_user = instance.request_from
content = 'User {} wants to add you to the chat'.format(instance.request_from),
msg_type = 'create_request'
send_notification.apply_async(kwargs={
'from_user': from_user,
'to_user': to_user,
'msg_type': msg_type,
'content': content,
'icon':'',
'link':'',
'contact_request': instance
})
|
#!/usr/bin/env python
# https://github.com/anishathalye/dotbot/wiki/Tips-and-Tricks#uninstall-script
import yaml
import os
import logging
import glob
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
dotfile_groups = os.getenv("DOTFILE_GROUPS")
groups = dotfile_groups.split(",") if dotfile_groups else None
if groups and len(groups) > 0:
logging.info(f"""Remove links from {", ".join(map(lambda f: f"{f}.yml", groups))}.""")
for group in groups:
stream = open(f"{group}.yml", "r")
conf = yaml.load(stream, yaml.FullLoader)
for section in conf:
if "link" in section:
for target in section["link"]:
realpath = os.path.expanduser(target)
logging.debug(f"Checking path: {realpath}")
if os.path.islink(realpath):
logging.info(f"Removing link: {realpath}")
os.unlink(realpath)
else:
logging.warning("No groups found. Set DOTFILE_GROUPS environment variable to uninstall.")
available_config_files = filter(lambda f: f if f not in ["uninstall.yml"] else None, glob.glob("*.yml"))
available_dotfile_groups = ", ".join(map(lambda f: os.path.splitext(f)[0], available_config_files))
logging.info(F"Available DOTFILE_GROUPS: {available_dotfile_groups}.")
|
import sys
have_numpy = True
try:
import numpy
except ImportError:
have_numpy = False
raise
have_gdcm = True
try:
import gdcm
except ImportError:
have_gdcm = False
raise
can_use_gdcm = have_gdcm and have_numpy
def supports_transfer_syntax(dicom_dataset):
return True
def get_pixeldata(dicom_dataset):
# read the file using GDCM
# FIXME this should just use dicom_dataset.PixelData
# instead of dicom_dataset.filename
# but it is unclear how this should be achieved using GDCM
if not can_use_gdcm:
msg = ("GDCM requires both the gdcm package and numpy "
"and one or more could not be imported")
raise ImportError(msg)
gdcm_image_reader = gdcm.ImageReader()
gdcm_image_reader.SetFileName(dicom_dataset.filename)
if not gdcm_image_reader.Read():
raise TypeError("GDCM could not read DICOM image")
gdcm_image = gdcm_image_reader.GetImage()
# determine the correct numpy datatype
gdcm_numpy_typemap = {
gdcm.PixelFormat.INT8: numpy.int8,
gdcm.PixelFormat.UINT8: numpy.uint8,
gdcm.PixelFormat.UINT16: numpy.uint16,
gdcm.PixelFormat.INT16: numpy.int16,
gdcm.PixelFormat.UINT32: numpy.uint32,
gdcm.PixelFormat.INT32: numpy.int32,
gdcm.PixelFormat.FLOAT32: numpy.float32,
gdcm.PixelFormat.FLOAT64: numpy.float64
}
gdcm_pixel_format = gdcm_image.GetPixelFormat().GetScalarType()
if gdcm_pixel_format in gdcm_numpy_typemap:
numpy_dtype = gdcm_numpy_typemap[gdcm_pixel_format]
else:
raise TypeError('{0} is not a GDCM supported '
'pixel format'.format(gdcm_pixel_format))
# GDCM returns char* as type str. Under Python 2 `str` are
# byte arrays by default. Python 3 decodes this to
# unicode strings by default.
# The SWIG docs mention that they always decode byte streams
# as utf-8 strings for Python 3, with the `surrogateescape`
# error handler configured.
# Therefore, we can encode them back to their original bytearray
# representation on Python 3 by using the same parameters.
pixel_bytearray = gdcm_image.GetBuffer()
if sys.version_info >= (3, 0):
pixel_bytearray = pixel_bytearray.encode("utf-8",
"surrogateescape")
# if GDCM indicates that a byte swap is in order, make
# sure to inform numpy as well
if gdcm_image.GetNeedByteSwap():
numpy_dtype = numpy_dtype.newbyteorder('S')
# Here we need to be careful because in some cases, GDCM reads a
# buffer that is too large, so we need to make sure we only include
# the first n_rows * n_columns * dtype_size bytes.
n_bytes = (dicom_dataset.Rows *
dicom_dataset.Columns *
numpy.dtype(numpy_dtype).itemsize)
if len(pixel_bytearray) > n_bytes:
# We make sure that all the bytes after are in fact zeros
padding = pixel_bytearray[n_bytes:]
if numpy.any(numpy.fromstring(padding, numpy.byte)):
pixel_bytearray = pixel_bytearray[:n_bytes]
else:
# We revert to the old behavior which should then result
# in a Numpy error later on.
pass
pixel_array = numpy.fromstring(pixel_bytearray, dtype=numpy_dtype)
length_of_pixel_array = pixel_array.nbytes
expected_length = dicom_dataset.Rows * dicom_dataset.Columns
try:
expected_length *= dicom_dataset.NumberOfFrames
except Exception:
pass
try:
expected_length *= dicom_dataset.SamplesPerPixel
except Exception:
pass
if dicom_dataset.BitsAllocated > 8:
expected_length *= (dicom_dataset.BitsAllocated // 8)
if length_of_pixel_array != expected_length:
raise AttributeError("Amount of pixel data %d does "
"not match the expected data %d" %
(length_of_pixel_array, expected_length))
return pixel_array
|
import matplotlib.pyplot as plt
hfont = {'fontname' : 'Karla'}
years = [1900, 1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015]
pops = [1.6, 2.5, 2.6, 3.0, 3.3, 3.6, 4.2, 4.4, 4.8, 5.3, 5.7, 6.1, 6.5, 6.9, 7.3]
plt.plot(years, pops, color=(255/255, 100/255, 100/255), linewidth=6.0)
plt.ylabel("World Population by Billions")
plt.xlabel("Population Growth by year")
plt.title("World Population Growth", pad="20", **hfont)
plt.show()
|
#!/usr/bin/env python
ROOT_DATA = "/home/conrad/Downloads/data/"
# ---- DO NOT CHANGE BELOW THIS LINE ---- #
import os
import re
from lxml import etree
legit_path = re.compile("(?P<course_id>\d+)/User/(?P<username>[a-zA-Z0-9]+)/\d+")
# ROOT/Forum/{{ course_id }}/User/{{ username }}/{{ readlist_file }}
def list_readlist_files(root):
result = []
for root, dirs, file in os.walk(os.path.join(root, "Forum")):
for f in file:
fullpath = os.path.join(root, f)
research = re.search(legit_path, fullpath)
if research != None:
course_id = research.groupdict()["course_id"]
username = research.groupdict()["username"]
result.append(dict(course_id=course_id,
username=username,
filename=fullpath))
return result
def extract_file_contents(root, files):
"""
files is of the form
[{'course_id':23, 'username':"a2828", 'filename':"..."}, ...]
result is the the files dict with the updated info:
...
'readlist':[34,12,34,54,],
...
"""
for entry in files:
print "working on", entry["filename"]
f = open(entry["filename"], "r")
content = f.read()
f.close()
readlist = [int(x) for x in content.split("\n")[:-1]]
entry["readlist"] = readlist
# NOTE: open the associated /Forum/{{ course_id }}/Data/{{ file }}.xml
# file in order to extract the <entries nid="{{ nid }}"> attribute
# only with {{ nid }} is the {{ id }} unique.
# Example: username: a0848375
# course_id: 119
# file: 3.xml
#
# and
# username: a0848375
# course_id: 119
# file: 1.xml
#
# there are duplicate ids: [80, 81, 82, 83] which can only be uniquely
# determined via the associated {{ nid }}
filename = os.path.basename(entry["filename"])
xml_path = os.path.join(root,"Forum", str(entry["course_id"]), "Data", "%s.xml" % filename)
f = open(xml_path, "r")
content = f.read()
f.close()
print "associated xml:", xml_path
tree = etree.fromstring(content)
nid = tree.xpath("//entries")[0].get("nid")
entry["nid"] = int(nid)
return files
def to_csv(files, filename):
"""
[d,...]
where d keys are: ['username', 'course_id', 'nid', 'readlist']
"""
f = open(filename, "w")
f.write("service,course_id,username,nid,id\n")
for entry in files:
for entry_id in entry["readlist"]:
values = [
"Forum",
str(entry["course_id"]),
entry["username"],
str(entry["nid"]),
str(entry_id)
]
f.write(",".join(values))
f.write("\n")
f.close()
if __name__ == '__main__':
files = list_readlist_files(ROOT_DATA)
to_csv( extract_file_contents ( ROOT_DATA, files ), "forum_readlist.csv" )
|
from .budget.budget import Budget
from .budget.budget_doc import BudgetDoc
from .grant.grant import Grant
from .grant.financing_agency import FinancingAgency
from .grant.grant_domain import Grantdomain
from .costcenter.costcenter import CostCenter
from .project.project import Project
from .project.expense_code import ExpenseCode
|
"""
25-Mile Marathon
Mary wants to run a 25-mile marathon. When she
attempts to sign up for the marathon, she notices
the sign-up sheet doesn't directly state the
marathon's length. Instead, the marathon's length
is listed in small, different portions.
Help Mary find out how long the marathon actually is.
Return True if the marathon is 25 miles long, otherwise, return False.
Examples:
marathon_distance([1, 2, 3, 4]) ➞ False
marathon_distance([1, 9, 5, 8, 2]) ➞ True
marathon_distance([-6, 15, 4]) ➞ True
Notes:
Items in the list will always be integers.
Items in the list may be negative or positive,
but since negative distance isn't possible,
find a way to convert the sum of the distance
into a positive integer.
Return False if the arguments are empty or not provided.
"""
def marathon_distance(lst, expected_distance=25):
return expected_distance == sum([abs(x) for x in lst])
|
from django.contrib import admin
from .models import Question, Choice
# Register your models here.
class ChoiceAdmin(admin.ModelAdmin):
list_display = ['__str__','choice','vote_count']
class Meta:
model = Choice
admin.site.register(Question)
admin.site.register(Choice,ChoiceAdmin)
|
from flask import Blueprint, request, jsonify, current_app as app
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
import datetime
from functools import wraps
from emailer import send
import random
import math
authentication_bp = Blueprint('authentication_bp', __name__)
from models import Users
from models import OTPS
from database import db
# ============================
# OTP GENERATOR
# ============================
def OTP_generator():
digits = [i for i in range(0, 10)]
random_str = ""
for i in range(6):
index = math.floor(random.random()*10)
random_str += str(digits[index])
return random_str
# ============================
# CHECK PRESENCE OF TOKEN
# ============================
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = request.headers.get('Authentication')
if not token:
return jsonify({
'message': 'Token missing'
}), 401
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Users.query.filter_by(public_id=data['public_id']).first()
except:
return jsonify({
'message': 'Token invalid',
}), 401
return f(current_user, *args, **kwargs)
return decorated
# ====================
# USER SIGNUP
# ====================
@authentication_bp.route('/signup', methods=['POST'])
def user_signup():
data = request.get_json()
user = Users.query.filter_by(email=data['email']).first()
if user:
return jsonify(
{
'success': False,
'message': 'User already exists!'
}
), 400
hashed_password = generate_password_hash(data['password'], method='sha256')
new_user = Users(
public_id=str(uuid.uuid4()),
name=data['name'],
email=data['email'],
password=hashed_password,
admin=False
)
token = jwt.encode(
{
'public_id': new_user.public_id,
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=24)
},
app.config['SECRET_KEY']
)
db.session.add(new_user)
db.session.commit()
return jsonify(
{
'success': True,
'message': 'Signup successfull',
'token': token,
'name': new_user.name,
'email': new_user.email
}
)
# ====================
# USER LOGIN
# ====================
@authentication_bp.route('/login', methods=['POST'])
def user_login():
data = request.get_json()
user = Users.query.filter_by(email=data['email']).first()
# user not found
if not user:
return jsonify(
{
'success': False,
'message': 'Authentication failed'
}
), 401
# password matched
if check_password_hash(user.password, data['password']):
token = jwt.encode(
{
'public_id': user.public_id,
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=24)
},
app.config['SECRET_KEY']
)
return jsonify(
{
'success': True,
'message': 'Login successfull',
'token': token,
'name': user.name,
'email': user.email
}
)
# password not matched
return jsonify(
{
'success': False,
'message': 'Authentication failed'
}
), 401
# ====================
# GET ALL USERS
# ====================
@authentication_bp.route('/users', methods = ['GET'])
@token_required
def get_all_users(current_user):
if not current_user.admin:
return jsonify({
'message': 'Cannot perform that function!'
})
users = Users.query.all()
output = []
for user in users:
user_data = {}
user_data['id'] = user.id
user_data['public_id'] = user.public_id
user_data['name'] = user.name
user_data['email'] = user.email
user_data['password'] = user.password
user_data['admin'] = user.admin
output.append(user_data)
return jsonify({'users': output})
# ====================
# PASSWORD CHANGE
# ====================
@authentication_bp.route('/change-password', methods=['POST'])
@token_required
def changepassword(current_user):
public_id = current_user.public_id
user = Users.query.filter_by(public_id = public_id).first()
# user not found
# if not user:
# return jsonify(
# {
# 'success': False,
# 'message': 'Authentication failed'
# }
# ), 401
data = request.get_json()
# password matching
if check_password_hash(user.password, data['password']):
if data['newpassword'] == data['confirmpassword']:
hashed_password = generate_password_hash(data['newpassword'], method='sha256')
user.password = hashed_password
db.session.commit()
return jsonify(
{
'success': True,
'message': 'Password changed successfully!',
}
)
else:
return jsonify(
{
'success': False,
'message': 'Passwords did not match. Enter again!',
}
)
# password not matched
return jsonify(
{
'success': True,
'message': 'Please enter correct password!'
}
), 200
# ====================
# FORGOT PASSWORD
# ====================
@authentication_bp.route('/forgot-password', methods=['POST'])
def forgotpassword():
data = request.get_json()
user = Users.query.filter_by(email = data['email']).first()
# user not found
if not user:
return jsonify(
{
'success': False,
'message': 'User not found!'
}
), 401
# token = jwt.encode(
# {
# 'public_id': user.public_id,
# 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=10)
# },
# app.config['SECRET_KEY']
# )
OTP = OTP_generator()
row = OTPS.query.filter_by(user_id = user.id).first()
if not row:
new_otp = OTPS(
user_id = user.id,
email = data['email'],
otp = OTP
)
db.session.add(new_otp)
db.session.commit()
else:
row.otp = OTP
db.session.commit()
# Call to send email function in emailer.py
status = send(data['email'], OTP)
# token_str = token.decode("utf-8")
lnk = 'http://127.0.0.1:5000/reset-password?token='
if status == 202:
return jsonify(
{
'success': True,
'message': 'Email sent succesfully!!',
'OTP': OTP,
'Link': lnk
}
), 200
else:
return jsonify(
{
'success': False,
'message': 'Please Try Again!!',
}
), 200
# ====================
# VERIFY OTP
# ====================
@authentication_bp.route('/verify-otp', methods=['POST'])
def verifyotp():
data = request.get_json()
row = OTPS.query.filter_by(email=data['email'],otp=data['otp']).first()
if not row:
return jsonify(
{
'success': False,
'message': 'OTP is Incorrect. Enter Again!'
}
), 200
if data['otp'] == row.otp:
return jsonify(
{
'success': True,
'message': 'Verification Successful!'
}
), 200
# ====================
# RESET PASSWORD
# ====================
@authentication_bp.route('/reset-password', methods=['POST'])
def resetpassword():
passw = request.get_json()
user = Users.query.filter_by(email = passw['email']).first()
if passw['newpassword'] == passw['confirmpassword']:
hashed_password = generate_password_hash(passw['newpassword'], method='sha256')
user.password = hashed_password
db.session.commit()
return jsonify(
{
'success': True,
'message': 'Password changed successfully!',
}
), 200
else:
return jsonify(
{
'success': False,
'message': 'Password did not match. Enter again!',
}
)
|
# coding: utf-8
from __future__ import unicode_literals
import os
import platform
from itertools import imap
def get_A_B(x1, y1, x2, y2):
if x1 == x2:
return 'undf', 'undf'
x1 = float(x1)
x2 = float(x2)
y1 = float(y1)
y2 = float(y2)
if not x1:
b = y1
a = (y2-b)/x2
elif not x2:
b = y2
a = (y1-b)/x1
else:
x_diff = x1/x2
b = (y1-x_diff*y2)/(1-x_diff)
a = (y1-b)/x1
return [a, b]
def coord_processing(pol_id, icoords):
to_return = []
prev, next = icoords.next(), icoords.next()
while next:
y1 = prev[0]
x1 = prev[1]
cut_ind_y1 = y1.index('.')+10
cut_ind_x1 = x1.index('.')+10
cut_y1, prec_y1 = y1[:cut_ind_y1], y1[cut_ind_y1:]
cut_x1, prec_x1 = float(x1[:cut_ind_x1]), x1[cut_ind_x1:]
y2 = next[0]
x2 = next[1]
cut_ind_y2 = y2.index('.')+10
cut_ind_x2 = x2.index('.')+10
cut_y2, prec_y2 = y2[:cut_ind_y2], y2[cut_ind_y2:]
cut_x2, prec_x2 = float(x2[:cut_ind_x2]), x2[cut_ind_x2:]
if cut_x1 < cut_x2:
li = [float(x1), prec_x1, x1, y1, x2, y2, ]
elif cut_x2 < cut_x1:
li = [float(x2), prec_x2, x2, y2, x1, y1, ]
else:
if prec_x1 < prec_x2:
li = [float(x1), prec_x1, x1, y1, x2, y2, ]
elif prec_x2 < prec_x1:
li = [float(x2), prec_x2, x2, y2, x1, y1, ]
else:
print 'Warning, pol_id={0} has vertical line, what to do???'.format(pol_id)
print 'x1: ', x1, 'y1: ', y1, 'x2: ', x2, 'y2: ', y2
try:
prev = next
next = icoords.next()
except StopIteration:
next = None
continue
li.append(pol_id)
li.extend(get_A_B(x1, y1, x2, y2))
to_return.append(li)
try:
prev = next
next = icoords.next()
except StopIteration:
next = None
return to_return
if platform.system() == 'Windows':
# fixme прописать нужное
files_dir = 'c://python27/tree/EttonProducts/offline/Files/{0}'
# dma_file = 'c://python27/tree/EttonProducts/offline/dma.data'
dma_file = 'c://python27/tree/EttonProducts/offline/dma-cut'
else:
files_dir = '/home/damir/Projects/Tree/outer_sort/Files/{0}'
dma_file = '/home/damir/Projects/Tree/outer_sort/dma.data'
with open(dma_file) as dma: # 210 polygons, 9797 coordinates (8792)
j = 0
l = []
for line in dma:
pol_id, s, coords = line.split('\t')
icoords = imap(lambda x: x.split(), coords.split(','))
l.extend(coord_processing(pol_id, icoords))
if len(l) > 1000:
print len(l), 'len is up 1000'
l.sort(key=lambda el: (el[0], el[1]))
with open(files_dir.format('f{0}'.format(j)), 'w') as f:
for li in l:
f.write(' '.join(imap(str, li))+'\n')
l = []
j += 1
# if j == 2:
# break
files_ = next(os.walk(files_dir.format('')))[2]
def get_file_next_line(filename):
"""
Returns next line of opened file or None
"""
try:
f_line = filename.next()
except StopIteration:
# print filename, 'Stop ietr'
f_line = None
return f_line
def merge(files):
# не обрабатывается, если все данные уместились в один файл изначально
if len(files) > 1:
f1_name, f2_name = files[:2]
with open(files_dir.format(f1_name+f2_name), 'w') as f_merge:
with open(files_dir.format(f1_name)) as f1:
with open(files_dir.format(f2_name)) as f2:
f1_line = get_file_next_line(f1)
f2_line = get_file_next_line(f2)
while 1:
if f1_line and f2_line:
f1_info = f1_line.split()
f2_info = f2_line.split()
f1_x = float(f1_info[0])
f2_x = float(f2_info[0])
if f1_x < f2_x:
f_merge.write(f1_line)
f1_line = get_file_next_line(f1)
elif f1_x > f2_x:
f_merge.write(f2_line)
f2_line = get_file_next_line(f2)
else:
if f1_info[1] < f2_info[1]:
f_merge.write(f1_line)
f1_line = get_file_next_line(f1)
elif f1_info[1] > f2_info[1]:
f_merge.write(f2_line)
f2_line = get_file_next_line(f2)
else:
f_merge.write(f1_line)
f_merge.write(f2_line)
f1_line = get_file_next_line(f1)
f2_line = get_file_next_line(f2)
elif f1_line is None and f2_line is None:
break
elif f2_line is None:
while f1_line:
f_merge.write(f1_line)
f1_line = get_file_next_line(f1)
break
else: # f1_line is None
while f2_line:
f_merge.write(f2_line)
f2_line = get_file_next_line(f2)
break
os.remove(files_dir.format(f1_name))
os.remove(files_dir.format(f2_name))
new_files = files[2:] + [f1_name+f2_name, ]
merge(new_files)
# либо в конце остался только последний отсортированный,
# либо изначально был только 1 файл тоже отсортированный
else:
pass
merge(files_)
|
#cd d:05learn/python_learn/selflearning
# print(r)
# print(1 == 2)
# print(1==2)
# print(1!=2)
# print("a" in "basic")
#import random
#r = random.randrange(1,10000)
#if r%2 == 0:
# print(r,'is even.')
#else:
# print(r,'is odd.')
#for i in range(10):
# if i%2 !=0:
# print(i)
#for n in range(2,100):
# if n == 2:
# print(n)
# continue
# for i in range(2,int(n**0.5) + 1):
# if(n%i) == 0:
# break
# else:
# print(n)
#a = abs(-3.1415926)
#print(a)
# 判断一个数据是否为质数
#def is_prime(n):
# if n<2:
# return False
# if n == 2:
# return True
# for m in range(2,int(n**0.5) + 1):
# if(n%m) == 0:
# return False
# else:
# return True
#for i in range(80,110):
# if(is_prime(i)):
# print(i)
x=0
x+= 2
print(x)
|
# Verifizierung der Zertifikate... openssl verify -CAfile rootcrt.pem servercrt.pem
from cryptography.hazmat.primitives import serialization
from ..Config import Config
from pathlib import Path
from flask import current_app
from cryptography.x509.base import rsa, Certificate
class FileSystemProvider():
cert_installation_path = Path(Config.certInstallationPath)
def __init__(self) -> None:
pass
def getInstallationPath(self) -> Path:
return self.cert_installation_path
def check_prerequisite(self):
if not self.cert_installation_path.exists():
current_app.logger.info("Specified installationpath: " + str(self.cert_installation_path))
current_app.logger.error("The default installationpath for the certificates does not exist! You have to create manually yourself! :(")
@classmethod
def checkDomainExists(cls, persistence_identifier: str) -> bool:
domainDependingDirectory = cls.cert_installation_path.joinpath(persistence_identifier)
return domainDependingDirectory.exists()
@classmethod
def createNamespaceForDomainForSavingKeysAndCertificate(cls, persistence_identifier: str) -> bool:
domainNamespace = cls.cert_installation_path.joinpath(persistence_identifier)
if not cls.checkDomainExists(persistence_identifier):
domainNamespace.mkdir(mode=0o777)
return True
else:
return domainNamespace.exists()
@classmethod
def checkKeyExists(cls, persistence_identifier: str) -> bool:
keyFilename = persistence_identifier + Config.keyDefaultFiletype
return cls.cert_installation_path.joinpath(persistence_identifier, keyFilename).exists()
@classmethod
def createKeyEntry(cls, persistence_identifier: str, key: rsa.RSAPrivateKey) -> bool:
if not cls.createNamespaceForDomainForSavingKeysAndCertificate(persistence_identifier):
return False
else:
if not cls.checkKeyExists(persistence_identifier):
keyFilename = persistence_identifier + Config.keyDefaultFiletype
keyFile = cls.cert_installation_path.joinpath(persistence_identifier, keyFilename)
keyFile.touch()
keyFile.write_bytes(key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption()
))
return True
else:
return False
@classmethod
def checkCertExists(cls, persistence_identifier: str) -> bool:
certFilename = persistence_identifier + Config.certDefaultFiletype
return cls.cert_installation_path.joinpath(persistence_identifier, certFilename).exists()
@classmethod
def createCertEntry(cls, persistence_identifier: str, cert: Certificate) -> bool:
if not cls.createNamespaceForDomainForSavingKeysAndCertificate(persistence_identifier):
return False
else:
if not cls.checkCertExists(persistence_identifier):
certFilename = persistence_identifier + Config.certDefaultFiletype
certFile = cls.cert_installation_path.joinpath(persistence_identifier, certFilename)
certFile.touch()
certFile.write_bytes(cert.public_bytes(
serialization.Encoding.PEM
))
return True
else:
return False
@classmethod
def checkCSRExists(cls, persistence_identifier: str):
pass
|
"""
Project functionality.
"""
from ace import config
from ace import client
import json
import sys
def dump_project(args):
"""
Dumps the project to standard out.
"""
if not config.get_active_project(args):
raise ValueError('Must specify project.')
project_resource = client.get_resource('axilent.library','project',config.get_library_key(args),args)
project_data = project_resource.get()
sys.stdout.write(json.dumps(project_data['project-data']))
def load_project(args):
"""
Loads the project from the data file.
"""
if not (config.get_active_project(args) and args.data_file):
raise ValueError('Must specify both project and data file.')
project_resource = client.get_resource('axilent.library','project',config.get_library_key(args),args)
data = None
with open(args.data_file,'r') as data_file:
data = data_file.read()
project_resource.put(data={'project-data':data})
print 'Project data loaded.'
def ping_library(args):
"""
Pings the project library.
"""
c = client.get_client('axilent.library',config.get_library_key(args),args)
c.ping()
print 'pong'
|
import _thread
import pickle
import socket
from game import Game
server = "192.168.0.102" # Local IP address of the server
port = 5555 # Port to listen
# Instantiate server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Setup server
try:
s.bind((server, port))
except socket.error as e:
print(e)
# Start server
s.listen(2)
print("Server started, waiting for connection...")
connected = set() # Holds the IDs of connected players
games = {} # gameId: Game instance
idCount = 0 # Holds the count of connected players
def threaded_client(conn, p, gameId):
"""
A new instance of this client is created for every new client that connects.
All the instances run on their own thread.
This implements all the server side functionality for the client
:param conn: socket connection instance
:param p: [0, 1] player number
:param gameId: to which game the player belongs
:return: None
"""
global idCount
# As soon as the player is connected, his player number is send as encoded string
conn.send(str.encode(str(p)))
reply = ""
while True:
# Data from client is an encoded string as defined in Network
data = conn.recv(4096).decode()
try:
# if the game exists, fetch its instance
if gameId in games:
game = games[gameId]
if not data:
break
# if we have data
else:
if data == 'reset':
game.reset()
# if it is a move, 'rock', 'paper' or 'scissors'
elif data != 'get':
game.play(p, data)
# Any required operations are done and the whole game instance is sent
reply = game
conn.send(pickle.dumps(reply))
else:
break
except socket.error as e:
print(e)
break
print("Lost connection")
# If the game still exists
try:
del games[gameId]
print("Closing game: ", gameId)
except KeyError as e:
print(e)
idCount -= 1
conn.close()
while True:
conn, addr = s.accept()
print("Connected to", addr)
# Increment player count
idCount += 1
# Assume player 1
p = 0
# Game ID is int division of idCount
gameId = (idCount - 1) // 2
# If he's 1st player, create a new game with gameId
if idCount % 2 == 1:
games[gameId] = Game(gameId)
print("Creating new game")
# Else, ready the game with gameId and make that player player 2
else:
games[gameId].ready = True
p = 1
# Start the player's thread
_thread.start_new_thread(threaded_client, (conn, p, gameId))
|
# linux: curl https://raw.githubusercontent.com/CrazyVideoGamer/auto-backup/main/get_auto_backup.py | python3 <-- not yet tested
# windows cmd: curl https://raw.githubusercontent.com/CrazyVideoGamer/auto-backup/main/get_auto_backup.py | python <-- not yet tested
from pathlib import Path
import argparse
import sys, platform, subprocess
def does_git_exist():
system = platform.system()
if system == "Linux":
log = subprocess.run("command -v git >/dev/null 2>&1 || { echo 'error: git not found' >&2;}", shell=True)
if log.stdout == b'error: git not found':
return False
return True
elif system == "Windows":
log = subprocess.run("where git >nul 2>&1 || ( echo error: git not found )", shell=True)
if log.stdout == b'error: git not found':
return False
return True
def error_message(message: str) -> None:
# \u001b[31m makes it red, \u001b[0m makes the color reset
system = platform.system()
if system == "Windows":
subprocess.run("color", shell=True)
print("\u001b[31;1m" + message + "\u001b[0m", file=sys.stderr)
sys.exit(0)
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Install or uninstall auto-backup")
parser.add_argument("--uninstall", action='store_true')
return parser
args = create_parser().parse_args()
system = platform.system()
if system == "Linux":
install_path = Path.home() / ".auto-backup"
if not args.uninstall:
if (install_path.exists() and (install_path / '.is-auto-backup').exists()):
error_message("Already installed auto-backup")
elif (install_path.exists() and not (install_path / '.is-auto-backup').exists()):
error_message("Could not create the ~/.auto-backup directory: directory already exists")
else: # this else statement only runs if ~/.auto-backup doesn't exist
install_path.mkdir(exist_ok=True)
(install_path / ".is-auto-backup").touch()
# add this git repository (and also leave out the .git directory and name it `backup`)
if does_git_exist():
if not (install_path / "backup").exists():
print("Installing auto-backup...")
subprocess.run("git clone --quiet --depth=1 --branch=main https://github.com/CrazyVideoGamer/auto-backup.git backup", cwd=install_path, shell=True)
subprocess.run("rm -rf ./backup/.git", cwd=install_path, shell=True)
subprocess.run("pip install -r requirements.txt -t lib2 > /dev/null 2>&1", cwd=(install_path / "backup"), shell=True)
print("Done!\n")
else:
error_message("Please install git using `sudo apt install git`")
print(f"add `export PATH={str(install_path / 'auto-backup' / 'backup' / 'bin')}:$PATH` to ~/.bashrc to complete the installation")
else:
if (not install_path.exists() or (install_path.exists() and not (install_path / '.is-auto-backup').exists())):
error_message("auto-backup is not installed")
print("Uninstalling auto-backup...")
subprocess.run(f"rm -r {str(install_path)}", shell=True)
print("Done!")
elif system == "Windows":
install_path = Path.home() / "auto-backup"
if not args.uninstall:
if (install_path.exists() and (install_path / 'is-auto-backup').exists()):
error_message("Already installed auto-backup")
elif (install_path.exists() and not (install_path / 'is-auto-backup').exists()):
error_message(f"Could not create the {str(install_path)} directory: directory already exists")
else:
install_path.mkdir(exist_ok=True)
(install_path / "is-auto-backup").touch() # note that it is "is-auto-backup", not ".is-auto-backup"
# add this git repository (and also leave out the .git directory and name it `backup`)
if does_git_exist():
if not (install_path / "backup").exists():
print("Installing auto-backup...")
subprocess.run("git clone --quiet --depth=1 --branch=main https://github.com/CrazyVideoGamer/auto-backup.git backup", cwd=install_path, shell=True)
subprocess.run("rd /s /q \"backup/.git\"", cwd=install_path, shell=True)
subprocess.run("pip install -r requirements.txt -t lib > nul 2>&1", cwd=(install_path / "backup"), shell=True)
print("Done!\n")
else:
error_message("Please install git first (go to https://git-scm.com/) ")
print(f"add {str(install_path / 'auto-backup' / 'backup' / 'bin')} to PATH to complete the installation")
else:
if (not install_path.exists() or (install_path.exists() and not (install_path / 'is-auto-backup').exists())):
error_message("auto-backup is not installed")
print("Uninstalling auto-backup...")
subprocess.run(f"rd /s /q {str(install_path)}", shell=True)
print("Done!")
else:
error_message("System {system} is not supported")
|
class Event:
def __init__(self, eventName, eventDate, eventDesc, eventWeb, eventCategory, eventFood):
self.eventName = eventName
self.eventDate = eventDate
self.eventDesc = eventDesc
self.eventWeb = eventWeb
self.eventCategory = eventCategory
self.eventFood = eventFood
|
import A
A.txt_line("D://hello.txt","D://hello2.txt")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:15:16 2018
@author: user
質數
"""
def compute(x):
i=2
while i < x:
if (x%i)==0:
return False
i+=1
return True
x=int(input())
if x <= 1:
print("Not Prime")
elif x == 2:
print("Prime")
else:
r=compute(x)
if r ==True:
print("Prime")
else:
print("Not Prime")
|
#!/usr/bin/env python
import unittest
from main.activity.activity_login import *
from main.activity.activity_logout import *
from main.activity.activity_myshop_editor import *
from main.lib.user_data import *
from main.function.setup import *
class Test_add_etalase(unittest.TestCase):
_site = "live"
def setUp(self):
print ('[VALIDATION TEST] "Myshop-Etalase"')
self.driver = tsetup("firefox")
def test_1_check_add_and_edit_etalase(self):
print ("TEST #1 : [Validation] Add and Edit Etalase using Unknown Character")
driver = self.driver
email = user2['email']
pwd = user2['pwd']
#Object activity
login = loginActivity()
myshop_etalase = myshopEditorActivity()
logout = logoutActivity()
#Object initiation
myshop_etalase.setObject(driver)
#Action
login.do_login(driver, email, pwd, self._site)
myshop_etalase.goto_myshop_editor(self._site)
myshop_etalase.click_tab_etalase(self.driver)
myshop_etalase.add_then_edit_etalase(self.driver)
#self.etalse.do_validation()
logout.do_logout(driver,self._site)
def test_2_check_act_n_times(self):
#test_user_login(self.driver, self.dict_user['email'], self.dict_user['password'], self._site)
print ("TEST #2 : [Validation] Act N-times ")
driver = self.driver
email = user2['email']
pwd = user2['pwd']
#Object activity
login = loginActivity()
myshop_etalase = myshopEditorActivity()
logout = logoutActivity()
#Object initiation
myshop_etalase.setObject(driver)
#Action
login.do_login(driver, email, pwd, self._site)
myshop_etalase.goto_myshop_editor(self._site)
myshop_etalase.click_tab_etalase(self.driver)
#
print ("==========")
print ("Act - Edit")
print ("==========")
myshop_etalase.act_n_times(self.driver, "edit", 15)
#self.etalse.do_validation()
logout.do_logout(driver,self._site)
def tearDown(self):
print("Testing akan selesai dalam beberapa saat..")
time.sleep(3)
self.driver.close()
# main
if __name__ == "__main__":
unittest.main(warnings='ignore')
|
#!/usr/bin/python
import numpy as np
import pandas as pd
import random
import sys
import csv
def create_data(self):
data_frame = pd.read_csv(sys.argv[1])
#sluzi na vygenerovanie testovacieho datasetu o velkosti 250 zaznamov, zaznamy
#zmaze z povodneho suboru,vo vysledku ostane dataset o velkosti 1315 zaznamov
#na tvorbu mensich datasetov
sample_frame_indexes = random.sample(data_frame.index,250)
sample_frame = data_frame.ix[sample_frame_indexes]
print(sample_frame)
sample_frame.to_csv('test250_data_new.csv')
new_frame = data_frame.drop(sample_frame_indexes)
print(new_frame)
new_frame.to_csv('train_data_cleaned_new.csv')
create_data(sys.argv[1])
#create_test_frame(sys.argv[1])
#count_frame(sys.argv[1])
#edit_data(sys.argv[1])
#create_data(sys.argv[1])
#count_results(sys.argv[1])
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'IpTracker.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(547, 451)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setStatusTip("")
self.centralwidget.setObjectName("centralwidget")
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setGeometry(QtCore.QRect(10, 10, 331, 391))
font = QtGui.QFont()
font.setPointSize(12)
self.textBrowser.setFont(font)
self.textBrowser.setObjectName("textBrowser")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(360, 20, 161, 187))
self.widget.setObjectName("widget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButtonRefresh = QtWidgets.QPushButton(self.widget)
font = QtGui.QFont()
font.setPointSize(12)
self.pushButtonRefresh.setFont(font)
self.pushButtonRefresh.setObjectName("pushButtonRefresh")
self.verticalLayout.addWidget(self.pushButtonRefresh)
self.label = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(12)
self.label.setFont(font)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.labelcurrentIP = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(12)
self.labelcurrentIP.setFont(font)
self.labelcurrentIP.setObjectName("labelcurrentIP")
self.verticalLayout.addWidget(self.labelcurrentIP)
self.label_3 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setText("")
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.labelPastIP = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(12)
self.labelPastIP.setFont(font)
self.labelPastIP.setText("")
self.labelPastIP.setObjectName("labelPastIP")
self.verticalLayout.addWidget(self.labelPastIP)
self.pushButtonClose = QtWidgets.QPushButton(self.widget)
font = QtGui.QFont()
font.setPointSize(12)
self.pushButtonClose.setFont(font)
self.pushButtonClose.setObjectName("pushButtonClose")
self.verticalLayout.addWidget(self.pushButtonClose)
self.labelTimeNow = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(12)
self.labelTimeNow.setFont(font)
self.labelTimeNow.setObjectName("labelTimeNow")
self.verticalLayout.addWidget(self.labelTimeNow)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 547, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Ip_Time_Loger"))
self.textBrowser.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:12pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">1</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">2</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">3</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">4</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">5</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">6</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">7</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">8</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">9</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">10</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">11</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">12</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">13</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">14</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">15</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">16</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">17</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">18</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">19</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'MS Shell Dlg 2\';\">20</span></p></body></html>"))
self.pushButtonRefresh.setText(_translate("MainWindow", "Обновить"))
self.label.setText(_translate("MainWindow", "Текущий IP"))
self.labelcurrentIP.setText(_translate("MainWindow", "127.0.0.255"))
self.pushButtonClose.setText(_translate("MainWindow", "Закрыть"))
self.labelTimeNow.setText(_translate("MainWindow", "Time"))
|
from enum import Enum, IntEnum
from typing import Dict, List
from spectree import BaseFile, ExternalDocs, SecurityScheme, SecuritySchemeData, Tag
from spectree._pydantic import BaseModel, Field, root_validator
from spectree.utils import hash_module_path
api_tag = Tag(
name="API", description="🐱", externalDocs=ExternalDocs(url="https://pypi.org")
)
class Order(IntEnum):
asce = 0
desc = 1
class Query(BaseModel):
order: Order
class FormFileUpload(BaseModel):
file: BaseFile
class Form(BaseModel):
name: str
limit: str
class JSON(BaseModel):
name: str
limit: int
class ListJSON(BaseModel):
__root__: List[JSON]
class StrDict(BaseModel):
__root__: Dict[str, str]
class Resp(BaseModel):
name: str
score: List[int]
class Language(str, Enum):
en = "en-US"
zh = "zh-CN"
class Headers(BaseModel):
lang: Language
@root_validator(pre=True)
def lower_keys(cls, values):
return {key.lower(): value for key, value in values.items()}
class Cookies(BaseModel):
pub: str
class DemoModel(BaseModel):
uid: int
limit: int
name: str = Field(..., description="user name")
class DemoQuery(BaseModel):
names1: List[str] = Field(...)
names2: List[str] = Field(..., style="matrix", explode=True, non_keyword="dummy")
def get_paths(spec):
paths = []
for path in spec["paths"]:
if spec["paths"][path]:
paths.append(path)
paths.sort()
return paths
# data from example - https://swagger.io/docs/specification/authentication/
SECURITY_SCHEMAS = [
SecurityScheme(
name="auth_apiKey",
data=SecuritySchemeData.parse_obj(
{"type": "apiKey", "name": "Authorization", "in": "header"}
),
),
SecurityScheme(
name="auth_apiKey_backup",
data=SecuritySchemeData.parse_obj(
{"type": "apiKey", "name": "Authorization", "in": "header"}
),
),
SecurityScheme(
name="auth_BasicAuth",
data=SecuritySchemeData.parse_obj({"type": "http", "scheme": "basic"}),
),
SecurityScheme(
name="auth_BearerAuth",
data=SecuritySchemeData.parse_obj({"type": "http", "scheme": "bearer"}),
),
SecurityScheme(
name="auth_openID",
data=SecuritySchemeData.parse_obj(
{
"type": "openIdConnect",
"openIdConnectUrl": "https://example.com/.well-known/openid-cfg",
}
),
),
SecurityScheme(
name="auth_oauth2",
data=SecuritySchemeData.parse_obj(
{
"type": "oauth2",
"flows": {
"authorizationCode": {
"authorizationUrl": "https://example.com/oauth/authorize",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"read": "Grants read access",
"write": "Grants write access",
"admin": "Grants access to admin operations",
},
},
},
}
),
),
]
WRONG_SECURITY_SCHEMAS_DATA = [
{
"name": "auth_apiKey_name",
"data": {"type": "apiKey", "name": "Authorization"},
},
{
"name": "auth_apiKey_in",
"data": {"type": "apiKey", "in": "header"},
},
{
"name": "auth_BasicAuth_scheme",
"data": {"type": "http"},
},
{
"name": "auth_openID_openIdConnectUrl",
"data": {"type": "openIdConnect"},
},
{
"name": "auth_oauth2_flows",
"data": {"type": "oauth2"},
},
{
"name": "empty_Data",
"data": {},
},
{"name": "wrong_Data", "data": {"x": "y"}},
]
def get_model_path_key(model_path: str) -> str:
"""
generate short hashed prefix for module path (instead of its path to avoid
code-structure leaking)
:param model_path: `str` model path in string
"""
model_path, _, model_name = model_path.rpartition(".")
if not model_path:
return model_name
return f"{model_name}.{hash_module_path(module_path=model_path)}"
|
def findNumbers(num):
if(num%5==0):
return None
f = num*5
for i in range(f,f+5):
print(i)
num = 2
findNumbers(24)
|
from django.http import HttpResponse
from django.shortcuts import render
from .models import Animal, AnimalPhoto, Seviye
def home(request):
return HttpResponse(render(request, 'giris.html'))
def oyun(request):
seviyes = Seviye.objects.all()
animals = Animal.objects.all()
animalPh = AnimalPhoto.objects.all()
context = {'animal': animals, 'animalPhoto': animalPh, 'seviye': seviyes}
return render(request, 'index.html', context)
|
import numpy as np
import warnings
class CorrectionWarning(Warning):
""" Warning type used when something fishy is detected in our correction steps
to use:
>>> import warnings
>>> warnings.warn('message', category=CorrectionWarning)
By default, only one identical warning will be shown originating on a particular line,
and subsequent identical warnings will be ignored.
This can be controlled using the warnings API documented at https://docs.python.org/3/library/warnings.html
"""
def warn_if_any_true(possible_warnings_series):
""" Raise a CorrectionWarning if any of the values in a boolean series are True.
Args:
possible_warnings_series: pandas Series of booleans, which if True, should raise a warning.
Returns:
None
Warns:
uses the warnings API and CorrectionWarning if any red flags are present.
"""
if possible_warnings_series.dtype != np.bool_:
raise ValueError(
f"possible warnings {possible_warnings_series} must be a boolean series"
)
# Filter only to True values (`possible_warnings_series` itself is a boolean series which can be used to filter)
raised_warnings = possible_warnings_series[possible_warnings_series]
if raised_warnings.any():
warnings.warn(
f"Diagnostic warning flags: {raised_warnings.index.values}",
category=CorrectionWarning,
stacklevel=2, # Raise on the immediate caller's level, not this level
)
|
from bs4 import BeautifulSoup
import urllib2
import csv
import random
import time
import os
## setting working directory ------------------------------------------------
os.chdir("C:/Users/wooki/Documents/GitHub/pythoncourse2018/homeworks/hw2")
with open('hw2_lim.csv', 'wb') as f:
w = csv.DictWriter(f, fieldnames = ("Title", "Published date", "Issues", "Number of signatures"))
w.writeheader()
still_petitions = True
i=0
while still_petitions:
web_address='https://petitions.whitehouse.gov/?page=%i' %i
i += 1
web_page = urllib2.urlopen(web_address)
all_html = BeautifulSoup(web_page.read())
all_petition = all_html.find_all("h3")
for p in all_petition[3:] :
petition = {} ## empty dictionary to fill in
petition["Title"] = p.get_text().encode("utf-8")
address = p.find('a')
extension = address["href"]
#date
pet_page = urllib2.urlopen('https://petitions.whitehouse.gov%s' % extension)
petition_html = BeautifulSoup(pet_page.read())
fordate= petition_html.find('h4', {'class': "petition-attribution"})
pre= fordate.text.strip()
petition["Published date"] =pre.split("on")[1].encode("utf-8")
#issue
issue= petition_html.find('div', {'class': "field-item even"})
petition["Issues"] =issue.text.strip().encode("utf-8")
#Number of signature
num_sig= petition_html.find("span", {'class':"signatures-number"})
petition["Number of signatures"] =num_sig.text.strip().encode("utf-8")
## write observation to csv
w.writerow(petition)
try:
page= all_html.find('div', {'class':"page-load-next"}).text
except AttributeError:
still_petitions = False
|
"""
This program includes methods used to simulate basic battle simulations
for the game StarCraft II
"""
import random
from sim_units import get_Units
Units = get_Units()
def combat_sim(army_comp1, army_comp2, MAX_ROUNDS=200):
"""
Army 1 is enemy, Army 2 is test
Input two army compositions written as dictionary with unit names as
keys, unit counts as values. Simulates combat with the two army.
The first army is assumed to be the enemy army, the second army is
the army composition we are testing viability for.
If combat takes longer than 200 seconds then it is assumed to be a
stalemate and army2 is forcibly killed off
Returns remaining army composition of army2
"""
army1 = build_army(army_comp1)
army2 = build_army(army_comp2)
round1 = True
rounds = 0
while ((get_health(army1) > 0) and (get_health(army2) > 0)
and (rounds <= MAX_ROUNDS)):
if round1:
deal_ranged_dps(army1, army2)
deal_ranged_dps(army2, army1)
round1 = False
else:
healing_units(army1)
healing_units(army2)
deal_damage(army1, army2)
deal_damage(army2, army1)
army1 = remove_dead_units(army1)
army2 = remove_dead_units(army2)
build_Interceptor(army1)
build_Interceptor(army2)
track_Locust_Broodlings(army1)
track_Locust_Broodlings(army2)
rounds += 1
if rounds >= MAX_ROUNDS:
army2.clear()
return army2
class Unit:
"""
This class is used to represent each unit in an army
"""
def __init__(self, name, army):
"""
Creates a new Unit object with the stats of the given name
"""
self.name = name
if Units[name]['armor'] > 0:
self.hp = Units[name]['hp'] * Units[name]['armor'] * 1.5
else:
self.hp = Units[name]['hp']
self.max_hp = self.hp
self.dps = Units[name]['dps']
self.ranged = Units[name]['ranged']
self.attributes = Units[name]['attributes']
self.type = Units[name]['type']
self.targetable = Units[name]['targetable']
self.bonuses = Units[name]['bonuses']
self.bonus_dps = Units[name]['bonus_dps']
self.time = Units[name]['time']
self.healer = Units[name]['healer']
# self.repaired is used only for healing units so that they cannot
# repair and attack in the same round
# repaired = True if Unit has repaired that round
self.repaired = False
# Carrier/Interceptor interaction
if name == 'Carrier':
self.child = {}
for n in range(8):
self.child[n] = Unit('Interceptor', army)
army.append(self.child[n])
# child_time is used to keep track of time until able to
# build a new Interceptor child
self.child_time = 0
# SwarmHost/Locust interaction
if name == 'SwarmHost':
self.child = {}
for n in range(2):
self.child[n] = Unit('Locust', army)
army.append(self.child[n])
if name == 'Locust':
self.live_time = 18
# BroodLord/Broodling interaction
if name == 'BroodLord':
self.spawn_time = 0
if name == 'Broodling':
self.live_time = 6
def __str__(self):
return self.name
def __repr__(self):
return str(self.name) + "_HP:" + str(self.hp)
def healing_units(army):
"""
Input is a list of Units in an army
If applicable, Units with the "healer" tag restore
health to friendly Units in their army.
Medivacs are allowed to split their healing, but we are
restricting SCVs to only heal one target due to the
variablity of the SCV repair ability
"""
for unit in army:
if unit.healer:
allies = army.copy()
allies.remove(unit)
if unit.name == 'Medivac':
heal = 12.6
attribute = 'Biological'
# Medivacs split their hps the same way other units can split dps
while ((heal > 0) and (get_healable_units(allies, attribute) is not None)):
target = get_healable_units(allies, attribute)
restore = target.max_hp - target.hp
temp_heal = heal
heal -= restore
restore -= temp_heal
target.hp = target.max_hp - restore
# prevent overhealing
if target.hp > target.max_hp:
target.hp = target.max_hp
unit.repaired = True
elif unit.name == 'SCV':
# SCVs will only be allowed to repair a single target
attribute = 'Mechanical'
if get_healable_units(allies, attribute) is not None:
target = get_healable_units(allies, attribute)
heal = target.max_hp / target.time
target.hp += heal
# prevent overhealing
if target.hp > target.max_hp:
target.hp = target.max_hp
unit.repaired = True
def get_healable_units(allies, attribute):
"""
Helper function to be used with healing_units
allies is list of friendly Units, attribute is a string
either "Biological" or "Mechanical" that determines the
type of unit that can be healed
Randomly chooses an allied Unit that can be healed
returns that chosen Unit, returns None if no unit can be healed
"""
heal_targets = []
for ally in allies:
if (attribute in ally.attributes) and (ally.hp < ally.max_hp):
heal_targets.append(ally)
if len(heal_targets) == 0:
return None
else:
index = random.randint(0,len(heal_targets)-1)
target = heal_targets[index]
return target
def get_health(army):
"""
Input is list of Units in an army
Returns the sum of the remaining units in that army
"""
health = 0
for unit in army:
health += unit.hp
return health
def get_damage(army):
"""
Input is a list of Units in an army
Returns the total dps of that army
"""
damage = 0
for unit in army:
damage += unit.dps
return damage
def build_army(army_comp):
"""
Input is a dictionary representing an army composition
Returns a list of Units matching the army composition
"""
army = []
for name in army_comp:
count = army_comp[name]
while count >= 1:
army.append(Unit(name, army))
count -= 1
return army
def get_attackable_unit(unit, enemy_army):
"""
Input is attacking Unit and list of Units in enemy army
Returns a random Unit in enemy army that attacking unit can attack
If no enemy Unit can be attacked, return None
"""
# create list of enemies unit can attack
# targeting type (ground/air) and if enemy has hp>0
enemies = []
for enemy in enemy_army:
if enemy.hp > 0:
targetable = 0
for target_type in unit.targetable:
for enemy_type in enemy.type:
targetable += int(enemy_type == target_type)
if targetable > 0:
enemies.append(enemy)
if len(enemies) == 0:
return None
else:
# randomly chooses an enemy to attack
index = random.randint(0,len(enemies)-1)
enemy = enemies[index]
return enemy
def deal_damage(army, enemy_army):
"""
Input army is a list of attacking Units,
enemy_army is list of Units being attacked.
Calculates the damage dealt to enemy_army by all attacking units
Updates the list of enemy Units with damaged health numbers
"""
for unit in army:
deal_unit_dps(unit, enemy_army, army)
def deal_unit_dps(unit, enemy_army, ally_army):
"""
Input is attacking Unit and list of Units being attacked
Calculates the damage dealt to enemy army by the single unit
Updates the list of enemy Units with damaged health numbers
"""
if not unit.repaired:
damage = unit.dps
bonus_dmg = unit.bonus_dps
while (damage > 0) and (get_attackable_unit(unit, enemy_army) is not None):
enemy = get_attackable_unit(unit, enemy_army)
# check for bonus damage. bonus damage is kept seperate from
# normal damage
bonus = 0
# so long as there is some bonus dps to damage, check if
# there is at least one matching attribute
if bonus_dmg > 0:
for bonus_att in unit.bonuses:
for att in enemy.attributes:
bonus += int(bonus_att == att)
if bonus > 0:
bonus_dmg_tmp = bonus_dmg
bonus_dmg -= enemy.hp
enemy.hp -= bonus_dmg_tmp
dmg_temp = damage
damage -= enemy.hp
enemy.hp -= dmg_temp
# Banelings kill themselves after attacking
if unit.name == 'Baneling':
unit.hp = 0
# BroodLords spawn Broodlings on hit, every 1.79 seconds
# This will be rounded to spawn a Broodling every other round
if unit.name == 'BroodLord':
if unit.spawn_time == 0:
ally_army.append(Unit('Broodling'))
unit.spawn_time = 1
else:
unit.spawn_time -= 1
# reset repaired status for next round
unit.repaired = False
def deal_ranged_dps(army, enemy_army):
"""
Input army is a list of attacking Units,
enemy_army is list of Units being attacked.
Calculates the damage dealt to enemy_army only by ranged units
Updates the list of enemy Units with damaged health numbers
"""
for unit in army:
if unit.ranged:
deal_unit_dps(unit, enemy_army, army)
def remove_dead_units(army):
"""
Input is a list of Units in an army
Removes all Units with hp <=0 from that list
Returns updated list
"""
new_army = []
# removes normal dead units
for unit in army:
if unit.hp >= 0:
new_army.append(unit)
# if Interceptor dies, free up that child space for the Carrier
if unit.name == 'Carrier':
for n in unit.child:
if unit.child[n] is not None:
if unit.child[n].hp <= 0:
unit.child[n] = None
# removes alive Interceptors if parent Carrier is dead
for unit in army:
if (unit.name == 'Carrier') and (unit.hp <= 0):
for n in unit.child:
if unit.child[n] in new_army:
new_army.remove(unit.child[n])
return new_army
def build_Interceptor(army):
"""
Input is an army.
For every Carrier in that army, if an Interceptor
slot is availible, build a new Interceptor and
add that Interceptor back into the army
"""
for unit in army:
if unit.name == 'Carrier':
for n in unit.child:
if unit.child[n] is None:
if unit.child_time == 0:
unit.child[n] = Unit('Interceptor', army)
army.append(unit.child[n])
unit.child_time = 9
def track_Locust_Broodlings(army):
"""
Input is a list of Units in an army
Locusts can only live for 18 rounds and
Broodlings can only live for 6 rounds
Kills Locusts and Broodlings after exceeding their
respective time limits
"""
for unit in army:
if unit.name == 'Locust':
if unit.live_time <= 0:
army.remove(unit)
else:
unit.live_time -= 1
if unit.name == 'Broodling':
if unit.live_time <= 0:
army.remove(unit)
else:
unit.live_time -= 1
|
'''
imputationflask.views
-------------------
Define routes render by flask
'''
# external imports
from flask import render_template, request, current_app, Blueprint
from werkzeug.exceptions import HTTPException
from matplotlib import cm
import json
frontend = Blueprint('frontend', __name__)
def make_graph_data(pred_description):
colormap_name = 'tab20c'
graph_data = dict()
for key, pred in pred_description.items():
if key in current_app.persistent.binaries_dict['numeric_mappers'].keys():
graph_data[key] = json.dumps([{'label': current_app.persistent.binaries_dict['recordname2description'][key],
'data': [{'x': float(x), 'y': float(y)} for x, y in zip(pred['x'], pred['y'])],
'showLine': True,
'pointRadius': 0,
'borderColor': "#00468C",
'borderWidth': 1,
'backgroundColor': "#3e95cdcc"}])
else:
N = len(pred['y'])
cmap = cm.get_cmap(colormap_name, N)
colors = [f'#{"".join(str(hex(int(255*c))[2:]) for c in color[:3] )}'
for color in cmap.colors] # TODO: This probably has a more elegant solution
graph_data[key] = json.dumps([{
'label': pred['x'][i],
'data': [round(float(pred['y'][i]), 2)],
'backgroundColor': colors[i],
'borderWidth': 1}
for i in range(N)])
return graph_data
@frontend.route('/')
@frontend.route('/web_app', methods=["GET", "POST"])
def web_app():
form = current_app.persistent.census_form.get_instance(
request_form=request.form)
# we got some data, time to make predictions
if request.method == 'POST':
pred_description = current_app.persistent.predictor(request.form)
graph_data = make_graph_data(pred_description)
else:
graph_data = None
return render_template('webapp.html',
form=form,
graph_data=graph_data,
description_dict=(current_app
.persistent
.binaries_dict['recordname2description']),
numeric_keys=list(current_app
.persistent
.binaries_dict['numeric_mappers']
.keys()))
@frontend.route('/privacy')
def privacy():
return render_template('privacy.html')
@frontend.route('/how_it_works')
def how_it_works():
return render_template('under_construction.html')
@frontend.route('/about_api')
def about_api():
return render_template('under_construction.html')
def handle_error(e):
code = 500
if isinstance(e, HTTPException):
code = e.code
return render_template('error.html', error_code=code), code
|
import sys
#http://www.scipy.org/
try:
from numpy import dot
from numpy.linalg import norm
except:
print "Error: Requires numpy from http://www.scipy.org/. Have you installed scipy?"
sys.exit()
def removeDuplicates(list):
""" remove duplicates from a list """
return set((item for item in list))
def cosine(vector1, vector2):
""" related documents j and q are in the concept space by comparing the vectors :
cosine = ( V1 * V2 ) / ||V1|| x ||V2|| """
return float(dot(vector1,vector2) / (norm(vector1) * norm(vector2)))
def jaccard(vector1, vector2):
orr = 0
andd = 0
zipped = zip(vector1,vector2)
for (i, j) in zipped:
if(i > 0 and j > 0):
andd += 1
if(i > 0 or j > 0):
orr += 1
return float(andd) / float(orr)
|
from django.shortcuts import render
from django.core import serializers
from features.models import Feature
from tickets.models import Ticket
from features.forms import featureForm
from cart.views import add_to_cart
from django.utils import timezone
from django.http import JsonResponse
import json
# Create your views here.
def index(request):
"""A view that displays the index page"""
features = Feature.objects.filter(status=4)
productFeatures = Feature.objects.filter(status=2)
devFeatures = Feature.objects.filter(status=3)
tickets = Ticket.objects.all()
"""See functions below"""
bugData = get_bugData()
featureDevData, featureVarData = get_featureData()
return render(request, 'index.html', {'features': features, 'productFeatures': productFeatures, 'devFeatures': devFeatures, 'tickets': tickets, 'featureDevData': featureDevData, 'featureVarData':featureVarData, 'bugData': bugData})
def get_bugData():
"""To get the correct data regarding bugs that have been resolved and how long that has taken"""
bugs = Ticket.objects.filter(status=3)
bugs_list = []
for ticket in bugs:
deltaT = ticket.fixed_date-ticket.published_date
bugs_list.append(round(deltaT.total_seconds()/3600/24))
bugs_list_set = list(set(bugs_list))
count_list = ['Days to Resolve',]
for n in bugs_list_set:
count_list.append(bugs_list.count(n))
bugT = ['bugT']
for bug in bugs_list_set:
bugT.append(bug)
bugData = [bugT, count_list]
return bugData
def get_featureData():
"""To get data on how often new features are developed"""
features = Feature.objects.filter(status=4).order_by('feature_added_date')
flist = []
dlist = ['Timeline',]
nlist = ['Number of Features over Time',]
for feature in features:
flist.append(feature.version)
dlist.append(str(feature.feature_added_date)[0:19])
for i in range(0, len(dlist)-1):
nlist.append(i+1)
featureDevData = [dlist, nlist]
featureVarData = flist
return featureDevData, featureVarData
|
#!/usr/bin/python3
if __name__ == "__main__":
import sys
sum = 0
count = len(sys.argv)
for nums in range(1, count):
sum = sum + int(sys.argv[nums])
print("{}".format(sum))
|
import os
from datetime import datetime
import pandas # python3 -m pip install pandas
import progressbar # python3 -m pip install progressbar2
import DiffMon
class DiffMonTest:
def __init__(self,
outputDir,
diffDataset,
diffDatasetPath,
chkptSize,
maxDiffDrop,
maxWaitTime,
waitTimeEnable,
restartAppr):
"""
Construct a difficulty monitor tester class.
Args:
outputDir : The path to the output directory.
diffDataset : The dataset that contains all historical
block difficulty values and block times,
in Pandas object.
diffDatasetPath : The path to the dataset that contains all
historical block difficulty values and block times.
(optional, ignored if `diffDataset` is not `None`)
chkptSize : Checkpoint size.
maxDiffDrop : Maximum difficulty drop allowed, in percentage.
maxWaitTime : Maximum wait time.
waitTimeEnable : Do we want to enable maximum wait time or not?
restartAppr : Approach of restart process.
Integer begin from 1.
Currently there're two different approaches.
"""
self.diffDataset = diffDataset if diffDataset is not None else pandas.read_csv(diffDatasetPath)
self.chkptSize = chkptSize
self.diffMon = DiffMon.DiffMon(chkptSize,
maxDiffDrop,
maxWaitTime,
waitTimeEnable,
restartAppr)
self.outFileName = 'chkpt_{chkpt}_maxdrop_{maxdrop}_maxwait_{maxwait}_wait_{wait}_rs_{rs}.csv'.format(
chkpt=chkptSize,
maxdrop=maxDiffDrop,
maxwait=maxWaitTime,
wait=waitTimeEnable,
rs=restartAppr)
self.testName = self.outFileName
self.outFile = open(os.path.join(outputDir, self.outFileName), 'w')
self.outFile.write('\"Block_Number\",\"Block_Time\",\"Timestamp\",\"Diff_Ref\",\"Diff_Drop\",\"BTime_Shutdown\",\"Diff_Shutdown\",\"Checkpt_Start\",\"Checkpt_End\",\"Ref_BlockNum\",\"At_Checkpt\"\n')
self.outFile.flush()
# Number of shutdown we found
self.shutdownCount = 0
self.btShutdownCount = 0
self.dfShutdownCount = 0
self.totalSteps = len(self.diffDataset.index)
self.currentStep = 0;
self.maxDiffDrop = 0;
def __del__(self):
self.outFile.close()
def Begin(self):
"""
Start the test
"""
try:
bnumIndex = self.diffDataset.columns.get_loc('Block_Number')
timeIndex = self.diffDataset.columns.get_loc('Block_Time')
diffIndex = self.diffDataset.columns.get_loc('Difficulty')
self.currentStep = 0
prevTime = 0
#with progressbar.ProgressBar(max_value=self.totalSteps) as bar:
for block in self.diffDataset.itertuples(index=False):
# Getting basic values
bNum = block[bnumIndex]
time = block[timeIndex]
diff = block[diffIndex]
blockTime = time - prevTime
# Send to difficulty monitor
res = self.diffMon.Update(bNum, diff, blockTime)
if res is not None:
# Count num of shutdowns
self.shutdownCount += 1
if res[2] is True:
self.btShutdownCount += 1
elif res[3] is True:
self.dfShutdownCount += 1
# check max drop
if res[1] < self.maxDiffDrop:
self.maxDiffDrop = res[1]
timeReadable = datetime.fromtimestamp(time)
self.outFile.write('{bNum},{bTime},\"{time}\",{diffRef},{diffDrop},{btDown},{dfDown},{chkpt_s},{chkpt_e},{ref_bnum},{chkpt_pos}\n'.format(
bNum = bNum,
bTime = blockTime,
time = str(timeReadable),
diffRef = res[0],
diffDrop= res[1],
btDown = res[2],
dfDown = res[3],
chkpt_s = res[4],
chkpt_e = res[5],
ref_bnum= res[6],
chkpt_pos=self.diffMon.chkptCount))
# Updates for the loop
prevTime = time
self.currentStep += 1
#bar.update(self.currentStep)
# Finished processing all blocks
self.outFile.write('\"#####TABLE_ENDS#####\"\n\n')
# output summary
self.outFile.write('\"Checkpoint Size: {chkptSize}\"\n'.format(chkptSize=self.chkptSize))
self.outFile.write('\"Num of Shutdown: {shutdownCount}\"\n'.format(shutdownCount=self.shutdownCount))
self.outFile.write('\"Max Diff. Drop : {maxDiffDrop}\"\n'.format(maxDiffDrop=self.maxDiffDrop))
self.outFile.write('\"Num of Chkpts : {chkptCount}\"\n'.format(chkptCount=self.diffMon.chkptCount))
self.outFile.write('\n')
self.outFile.write('\"Num of BlockTime Shutdown: {shutdownCount}\"\n'.format(shutdownCount=self.btShutdownCount))
self.outFile.write('\"Num of Difficulty Shutdown: {shutdownCount}\"\n'.format(shutdownCount=self.dfShutdownCount))
self.outFile.flush()
except:
self.currentStep = self.totalSteps
raise
|
import datetime
ano = datetime.date.today().year
r = input ('A confederação do brazil precisa de um atleta \n para você se cadastrar primeiro digite seu nome : ')
r1 = int (input ('Agora digite seu ano de nascimento: '))
r4 = r.upper()
r2 = ano - r1 # saber a idade do mesmo.
if r2 <= 9 :
print ('MIRIN')
elif r2 <=14 and r2 > 9 :
print (f'OLA {r4} VOCÊ TEM {r2} ANOS E ESTA NA CATEGORIA INFANTIL')
elif r2 <= 19 and r2 > 14 :
print (f'OLÁ {r4} VOCÊ TEM {r2} ANOS E ESTA NA CATEGORIA JUNIOR ')
elif r2 == 20 :
print (f'OLÁ {r4} VOCÊ TEM {r2} ANOS E ESTA NA CATEGORIA SÊNIOR')
elif r2 > 20 :
print (f'OLÁ {r4} VOCÊ TEM {r2} ANOS E ESTA NA CATEGORIA MASTER')
else:
print ('Desculpe não conseguimos entender oque você digitou.')
|
from django import forms
from .models import song, album, vocalist, hashtag, language, mood
import django_filters
class addSong(forms.ModelForm):
class Meta:
model = song
exclude = ['isDeleted']
class updateSong(forms.ModelForm):
class Meta:
model = song
fields = ['title']
class updateAlbum(forms.ModelForm):
class Meta:
model = song
fields = ['album_name']
class addAlbum(forms.ModelForm):
class Meta:
model = album
fields = ['album_name']
class updateVocalist(forms.ModelForm):
class Meta:
model = song
fields = ['vocalist_name']
class addVocalist(forms.ModelForm):
class Meta:
model = vocalist
fields = ['vocalist_name']
class updateHashtag(forms.ModelForm):
class Meta:
model = song
fields = ['hashtags']
class addHashtag(forms.ModelForm):
class Meta:
model = hashtag
fields = ['hashtags']
class updateLanguage(forms.ModelForm):
class Meta:
model = song
fields = ['languages']
class addLanguage(forms.ModelForm):
class Meta:
model = language
fields = ['languages']
class updateMood(forms.ModelForm):
class Meta:
model = song
fields = ['moods']
class addMood(forms.ModelForm):
class Meta:
model = mood
fields = ['moods']
|
import os
import json
class DecryptorOptions():
def __init__(self):
configurationOptions = self.configurationOptions()
self.decryptorApplicationPath = configurationOptions['decryptorApplicationPath']
self.decryptorSlots = int(configurationOptions['decryptorSlots'])
def allKeys(self):
return ['decryptorApplicationPath', 'decryptorSlots']
def pathKeys(self):
return ['decryptorApplicationPath']
def folderKeys(self):
return []
def configurationPath(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'options.json')
def configurationOptions(self):
if not os.path.exists(self.configurationPath()):
raise Exception('No configuration file found!!')
configurationOptions = None
errors = []
with open(self.configurationPath(), 'r') as f:
configurationOptions = json.loads(f.read())
for element in self.allKeys():
if element not in configurationOptions.keys():
errors.append('Configuration Errors: No value found for %s' % (element))
for element in self.pathKeys():
if not os.path.exists(configurationOptions[element]):
errors.append('Configuration Errors: No file found at path for %s' % (element))
for element in self.folderKeys():
if not os.path.exists(os.path.dirname(configurationOptions[element])):
errors.append('Configuration Errors: No folder found at path for %s' % (element))
if len(errors)>0:
print "\n".join(errors)
print "\n"
raise Exception('Configuration Errors Found!!!!!')
return configurationOptions
def decryptorOptionsWithInputAssetAndDestinationFile(self, inputAsset, destinationFile):
return [self.decryptorApplicationPath, inputAsset, destinationFile]
if __name__ == '__main__':
print DecryptorOptions().decryptorSlots, type(DecryptorOptions().decryptorSlots)
|
# pylint: disable=E1101
"""This module sends requests to a Borda server in order to create an election,
register candidates and voters, and issue votes on behalf of the voters."""
import argparse
import json
import sys
import logging
import requests
def resource(path):
"""Append a path to an entrypoint to form a REST resource"""
base_url = 'http://localhost:1031/'
return base_url + path
def create_new_election(args):
"""Create a new election on the server"""
try:
election = requests.post(resource('election'))
return election.status_code == requests.codes.ok
except requests.exceptions.ConnectionError as ex:
logging.error(ex.message)
return False
def add_candidate(args):
"""Add a new candidate to the open election"""
candidate = {'name': args.name}
try:
candidate_request = requests.put(
resource('election'), data=candidate)
return candidate_request.status_code == requests.codes.ok
except requests.exceptions.ConnectionError as ex:
logging.error(ex.message)
return False
def list_candidates(args):
"""List all candidates"""
try:
candidates_request = requests.get(resource('vote'))
js = candidates_request.json()
candidates = json.loads(js)
for num, candidate in enumerate(candidates):
print "{0}: {1}".format(num + 1, candidate)
return candidates_request == requests.codes.ok
except requests.exceptions.ConnectionError as ex:
logging.error(ex.message)
return False
def add_voter(args):
"""Add a voter to the open election"""
voter = {'name': args.name}
try:
voter_request = requests.post(
resource('vote'), data=voter)
return voter_request.status_code == requests.codes.ok
except requests.exceptions.ConnectionError as ex:
logging.error(ex.message)
return False
def voter_votes(args):
"""A named voter issues a sorted list of votes"""
voter = {
'name': args.name,
'votes': args.votes}
try:
voter_votes_request = requests.put(
resource('vote'), data=voter)
return voter_votes_request.status_code == requests.codes.ok
except requests.exceptions.ConnectionError as ex:
logging.error(ex.message)
return False
def get_election_winner(args):
"""Get the winner of the open election"""
try:
winner = requests.get(resource('election'))
print winner.text
return winner.status_code == requests.codes.ok
except requests.exceptions.ConnectionError as ex:
logging.error(ex.message)
return False
def comma_separated_strings(votes_string):
"""Argparse type check for a comma separated list of strings"""
try:
return votes_string.split(',')
except ValueError:
msg = "%r is not a comma separated list of strings" % votes_string
raise argparse.ArgumentTypeError(msg)
def run():
"""Main entry point"""
parser = argparse.ArgumentParser(prog='borda')
subparsers = parser.add_subparsers(help='sub-command help')
parse_create_new_election = subparsers.add_parser('election')
parse_create_new_election.set_defaults(func=create_new_election)
parse_add_candidate = subparsers.add_parser('candidate')
parse_add_candidate.add_argument('-n', '--name', required=True)
parse_add_candidate.set_defaults(func=add_candidate)
parse_add_voter = subparsers.add_parser('voter')
parse_add_voter.add_argument('-n', '--name', required=True)
parse_add_voter.set_defaults(func=add_voter)
parse_vote = subparsers.add_parser('vote')
parse_vote.add_argument('-n', '--name', required=True)
parse_vote.add_argument('-v', '--votes', required=True,
type=comma_separated_strings)
parse_vote.set_defaults(func=voter_votes)
parse_winner = subparsers.add_parser('winner')
parse_winner.set_defaults(func=get_election_winner)
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
success = args.func(args)
if success:
sys.exit(0)
else:
sys.exit(1)
|
#-*- encoding:utf-8 -*-
from hello import Major,db
f= open('major.txt', 'rt',encoding="utf-8")
for x in f:
db.session.add(Major(mname=x[:-1]))
db.session.commit()
f.close()
|
# variables 了解变量
import keyword
print(keyword.kwlist)
keyWord = input("pls input a string:")
flag = keyword.iskeyword(keyWord)
if flag:
print("true")
else :
print("false")
|
# Using text2 from the nltk book corpa, create your own version of the
# MadLib program.
# Requirements:
# 1) Only use the first 150 tokens
# 2) Pick 5 parts of speech to prompt for, including nouns
# 3) Replace nouns 15% of the time, everything else 10%
##
# Deliverables:
# 1) Print the orginal text (150 tokens)
# 1) Print the new text
print("START*******")
import nltk
import random
from nltk import word_tokenize, sent_tokenize
nltk.download('punkt')
debug = False
if debug:
print ("Getting information from file madlib_test.txt...\n")
firststring = (text2 [:150]) #finds the first 150 tokens
para = ' '.join(firststring)
tokens = nltk.word_tokenize(para)
print("TOKENS")
print(tokens) #printing the tokens from the nltk text2 book
taggedtokens = nltk.pos_tag(tokens)
print("TAGGED TOKENS")
print(taggedtokens) #printing the tagged tokens
if debug:
print ("First few tagged tokens are:")
for tup in taggedtokens[:5]: #sets the limit of 5 tagged tokens that I am looking for
print (tup)
tagmap = {"NN":"a noun","NNS":"a plural noun","NNP":"Proper noun, singular","VB":"a verb","JJ":"an adjective"}
substitution_probabilities = {"NN":.15,"NNS":.10,"NNP":.10,"VB":.10,"JJ":.10, "VPB":.10} #gives the possibilities of what it is looking for and the probability
def spaced(word):
if word in [",", ".", "?", "!", ":"]:
return word
else:
return " " + word
final_words = []
for (word, tag) in taggedtokens: #using a tuble to iterate through taggedtokens
if tag not in substitution_probabilities or random.random() > substitution_probabilities[tag]:
final_words.append(spaced(word)) #appending the final_words if it meets the requirements above
else:
new_word = input("Please enter %s:\n" % (tagmap[tag]))
final_words.append(spaced(new_word)) #appending new_word if it meets the requirement above
print ("".join(final_words))
print("\n\nEND*******")
|
from django.urls import path
from . import views
from .engine.hier_deploy import views as hier_deploy_views
app_name = 'inventory'
urlpatterns = [
path('v1',
views.InventoryListView.as_view(),
name='inventory_list'),
path('v1/inventory-create',
hier_deploy_views.InventoryCreateView.as_view(),
name='inventory_create'),
path('v1/inventory-create/data.json',
hier_deploy_views.InventoryCreateJSONView.as_view(),
name='inventory_json_create'),
path('v1/inventory-check',
views.InventoryCheckView.as_view(),
name='inventory_check'),
path('v1/<inventory_name>',
views.InventoryDetailView.as_view(),
name='inventory_detail'),
path('v1/<inventory_name>/sync',
views.InventorySyncView.as_view(),
name='inventory_sync'),
path('v1/<inventory_name>/delete',
views.InventoryDeleteView.as_view(),
name='inventory_delete'),
path('v1/<inventory_name>/data.json',
views.InventoryDetailJSONView.as_view(),
name='inventory_json_detail'),
path('v1/<inventory_name>/create',
views.ResourceCreateView.as_view(),
name='resource_create'),
path('v1/<inventory_name>/node-create',
hier_deploy_views.NodeCreateView.as_view(),
name='node_create'),
path('v1/<inventory_name>/node/<node_name>',
hier_deploy_views.NodeDetailView.as_view(),
name='node_detail'),
path('v1/<inventory_name>/node/<node_name>/update',
hier_deploy_views.NodeUpdateView.as_view(),
name='node_update'),
path('v1/<inventory_name>/param-create',
hier_deploy_views.ParamCreateView.as_view(),
name='param_create'),
path('v1/<inventory_name>/param/<param_name>/update',
hier_deploy_views.ParamUpdateView.as_view(),
name='param_update'),
path('v1/<inventory_name>/param/<param_name>/delete',
hier_deploy_views.ParamDeleteView.as_view(),
name='param_delete'),
path('v1/<inventory_name>/classify',
views.ResourceClassifyView.as_view(),
name='resource_classify'),
path('v1/<inventory_name>/resource/<resource_name>',
views.ResourceDetailView.as_view(),
name='resource_detail'),
path('v1/<inventory_name>/resource/<resource_name>/delete',
views.ResourceDeleteView.as_view(),
name='resource_delete'),
path('v1/<inventory_name>/<resource_name>/data.json',
views.ResourceDetailJSONView.as_view(),
name='resource_json_detail'),
path('v1/<inventory_name>/model/<form_name>/simple-generate',
views.ClassGenerateView.as_view(),
name='model_generate'),
path('v1/<inventory_name>/model/<form_name>/wizard-generate',
views.ClassGenerateWizardView.as_view(),
name='model_generate_wizard'),
]
|
# tboz203
# 2015-05-12
# reyna_tests/models.py
from decimal import Decimal
from django.db import models
class Test(models.Model):
'''
A collection of questions
'''
name = models.CharField(max_length=64)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class Question(models.Model):
'''
A question on a test.
'''
# consider adding an order field?
text = models.CharField(max_length=200)
test = models.ForeignKey(Test)
def __str__(self):
return self.text
class Meta:
ordering = ('text',)
class Choice(models.Model):
'''
One possible answer to a question (multiple choice or true/false)
'''
question = models.ForeignKey(Question)
text = models.CharField(max_length=256)
is_correct = models.BooleanField()
def __str__(self):
return self.text
class Meta:
ordering = ('text',)
class Attempt(models.Model):
'''
One test-taker's attempt to pass a test.
'''
user = models.CharField(max_length=64)
test = models.ForeignKey(Test)
date = models.DateTimeField()
choices = models.ManyToManyField(Choice)
score = models.DecimalField(max_digits=5, decimal_places=2,
default=Decimal('0'))
def __str__(self):
return "{} - {}".format(self.user, self.test)
class Meta:
ordering = ('user', 'test', 'date')
|
"""
Given the triangle of consecutive odd numbers:
1
3 5
7 9 11
13 15 17 19
21 23 25 27 29
...
Calculate the row sums of this triangle from the row index (starting at index 1) e.g.:
row_sum_odd_numbers(1); # 1
row_sum_odd_numbers(2); # 3 + 5 = 8
"""
def row_sum_odd_numbers(n):
# start = ((n - 1) * n // 2 + 1) * 2 - 1
# return sum(list(range(start, start + (n - 1) * 2 + 1, 2)))
return sum(list(range(n**2 - n + 1, n**2 + n, 2)))
print(row_sum_odd_numbers(1))
print(row_sum_odd_numbers(2))
print(row_sum_odd_numbers(3))
print(row_sum_odd_numbers(4))
print(row_sum_odd_numbers(5))
print(row_sum_odd_numbers(13))
print(row_sum_odd_numbers(19))
print(row_sum_odd_numbers(41))
"""
Test.assert_equals(row_sum_odd_numbers(1), 1)
Test.assert_equals(row_sum_odd_numbers(2), 8)
Test.assert_equals(row_sum_odd_numbers(13), 2197)
Test.assert_equals(row_sum_odd_numbers(19), 6859)
Test.assert_equals(row_sum_odd_numbers(41), 68921)
"""
|
"""
Minutiae Extractor to get Minutia from files (fingerprint templates)
"""
from Minutia import MinutiaNBIS
class MinutiaeExtractor:
NBIS_FORMAT = 1
def __init__(self, extractor_format=NBIS_FORMAT):
self.extractor_type = extractor_format
def extract_minutiae_from_xyt(self, file_path):
""" Extracts minutiae from a fingerprint .xyt file
:returns a list of Minutia with descending order of quality """
minutiae_list = []
with open(file_path, 'r') as file:
for line in file:
x, y, theta, quality = line.split(' ')
minutia = MinutiaNBIS(int(x), int(y), int(theta), int(quality))
minutiae_list.append(minutia)
# sort list according to quality of minutiae
minutiae_list.sort(key=lambda m: int(m.quality), reverse=True)
return minutiae_list
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 00:36:08 2019
@author: leiyuan
"""
#colors reference guide
#https://matplotlib.org/api/colors_api.html
'''
https://matplotlib.org/examples/color/colormaps_reference.html
Sequential:
These colormaps are approximately monochromatic colormaps varying smoothly
between two color tones---usually from low saturation (e.g. white) to high
saturation (e.g. a bright blue). Sequential colormaps are ideal for
representing most scientific data since they show a clear progression from
low-to-high values.
Diverging:
These colormaps have a median value (usually light in color) and vary
smoothly to two different color tones at high and low values. Diverging
colormaps are ideal when your data has a median value that is significant
(e.g. 0, such that positive and negative values are represented by
different colors of the colormap).
Qualitative:
These colormaps vary rapidly in color. Qualitative colormaps are useful for
choosing a set of discrete colors. For example::
color_list = plt.cm.Set3(np.linspace(0, 1, 12))
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# Have colormaps separated into categories:
# http://matplotlib.org/examples/color/colormaps_reference.html
cmaps = [('Perceptually Uniform Sequential', [
'viridis', 'plasma', 'inferno', 'magma']),
('Sequential', [
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']),
('Sequential (2)', [
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
'hot', 'afmhot', 'gist_heat', 'copper']),
('Diverging', [
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),
('Qualitative', [
'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']),
('Miscellaneous', [
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',
'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])]
nrows = max(len(cmap_list) for cmap_category, cmap_list in cmaps)
#select the cmap category which has most cmap so the height is largest
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))# for imshow
'''
imshow(x)
X : array-like or PIL image
The image data. Supported array shapes are:
(M, N): an image with scalar data. The data is visualized using a colormap.
(M, N, 3): an image with RGB values (float or uint8).
(M, N, 4): an image with RGBA values (float or uint8), i.e. including transparency.
'''
def plot_color_gradients(cmap_category, cmap_list, nrows):
fig, axes = plt.subplots(nrows=nrows)
#fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)
axes[0].set_title(cmap_category + ' colormaps', fontsize=14)
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
for cmap_category, cmap_list in cmaps:
plot_color_gradients(cmap_category, cmap_list, nrows)
plt.show()
#for a single cmap
#no.1 256 for sequential
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))# for imshow
fig, axes = plt.subplots(nrows = 4,figsize = (5,12))
axes[0].imshow(gradient, aspect='auto', cmap=plt.get_cmap('ocean'))
#no.2 12 for sequential
gradient = np.linspace(0, 1, 20)
gradient = np.vstack((gradient, gradient))# for imshow
axes[1].imshow(gradient, aspect='auto', cmap=plt.get_cmap('ocean'))
#no.3 20 for Set1(9) which is qualitative(listed colormap)
gradient = np.linspace(0, 1, 20)
gradient = np.vstack((gradient, gradient))# for imshow
axes[2].imshow(gradient, aspect='auto', cmap=plt.get_cmap('Set1'))
#no.4 make cmap num equal to X
color_list = plt.cm.Set1(np.linspace(0, 1, 9))
color_list = np.concatenate((color_list,color_list,color_list[:2]))
cmap = colors.ListedColormap(color_list)
gradient = np.linspace(0, 1, 20)
gradient = np.vstack((gradient, gradient))# for imshow
axes[3].imshow(gradient, aspect='auto', cmap=cmap)
plt.show()
#1 matplotlib.colors.cnames
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot a sin curve using the x and y axes.
x = np.linspace(0, 1, 100)
y = np.sin(x * 2 * np.pi) / 2 + 0.5
ax.plot(x, y, zs=0, zdir='z', label='curve in (x,y)')
# Plot scatterplot data on the x and z axes.
color_list = plt.cm.ocean(np.linspace(0, 1, 100))
# By using zdir='y', the y value of these points is fixed to the zs value 0
# and the (x,y) points are plotted on the x and z axes.
ax.scatter(x, y, zs=0, zdir='y', c=color_list, label='points in (x,z)')
# Make legend, set axes limits and labels
ax.legend()
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# Customize the view angle so it's easier to see that the scatter points lie
# on the plane y=0
ax.view_init(elev=20., azim=-35)
plt.show()
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
def f(t):
s1 = np.cos(2*np.pi*t)
e1 = np.exp(-t)
return np.multiply(s1, e1)
################
# First subplot
################
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
t3 = np.arange(0.0, 2.0, 0.01)
# Twice as tall as it is wide.
fig = plt.figure(figsize=plt.figaspect(2.))
fig.suptitle('A tale of 2 subplots')
ax = fig.add_subplot(2, 1, 1)
l = ax.plot(t1, f(t1), 'bo',
t2, f(t2), 'k--', markerfacecolor='green')
ax.grid(True)
ax.set_ylabel('Damped oscillation')
#################
# Second subplot
#################
ax = fig.add_subplot(2, 1, 2, projection='3d')
X = np.arange(-5, 5, 0.25)
xlen = len(X)
Y = np.arange(-5, 5, 0.25)
ylen = len(Y)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
linewidth=0, antialiased=False)
ax.set_zlim3d(-1, 1)
plt.show()
"""
animation example 1
"""
from matplotlib import pyplot as plt
import numpy as np
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib import animation
fig = plt.figure()
ax = p3.Axes3D(fig)
def gen(n):
phi = 0
while phi < 2*np.pi:
yield np.array([np.cos(phi), np.sin(phi), phi])
phi += 2*np.pi/n
def update(num, data, line):
line.set_data(data[:2, :num])
line.set_3d_properties(data[2, :num])
N = 100
data = np.array(list(gen(N))).T
line, = ax.plot(data[0, 0:1], data[1, 0:1], data[2, 0:1])
# Setting the axes properties
ax.set_xlim3d([-1.0, 1.0])
ax.set_xlabel('X')
ax.set_ylim3d([-1.0, 1.0])
ax.set_ylabel('Y')
ax.set_zlim3d([0.0, 10.0])
ax.set_zlabel('Z')
ani = animation.FuncAnimation(fig, update, N, fargs=(data, line), interval=10000/N, blit=False)
ani.save('matplot001.gif', writer='pillow')
plt.show()
"""
animation example 2
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
fig, ax = plt.subplots()
xdata, ydata = [], []
ln, = ax.plot([], [], 'r-', animated=False)
def init():
ax.set_xlim(0, 2*np.pi)
ax.set_ylim(-1, 1)
return ln,
def update(frame):
xdata.append(frame)
ydata.append(np.sin(frame))
ln.set_data(xdata, ydata)
return ln,
ani = FuncAnimation(fig, update, frames=np.linspace(0, 2*np.pi, 128),
init_func=init, blit=True)
plt.show()
ani.save('matplot002.gif', writer='pillow')
"""
animation example 3
"""
fig, ax = plt.subplots()
x = np.linspace(0, 2*np.pi, 200)
y = np.sin(x)
l = ax.plot(x, y)
dot, = ax.plot([], [], 'ro')
def init():
ax.set_xlim(0, 2*np.pi)
ax.set_ylim(-1, 1)
return l
def gen_dot():
for i in np.linspace(0, 2*np.pi, 200):
newdot = [i, np.sin(i)]
yield newdot
def update_dot(newd):
dot.set_data(newd[0], newd[1])
return dot,
ani = animation.FuncAnimation(fig, update_dot, frames = gen_dot(), interval = 100, init_func=init)
ani.save('matplot003.gif', writer='pillow', fps=30)
"""
animation example 4
"""
from math import sin, cos
from scipy.integrate import odeint
g = 9.8
leng = 1.0
b_const = 0.2
# no decay case:
def pendulum_equations1(w, t, l):
th, v = w
dth = v
dv = - g/l * sin(th)
return dth, dv
# the decay exist case:
def pendulum_equations2(w, t, l, b):
th, v = w
dth = v
dv = -b/l * v - g/l * sin(th)
return dth, dv
t = np.arange(0, 20, 0.1)
track = odeint(pendulum_equations1, (1.0, 0), t, args=(leng,))
#track = odeint(pendulum_equations2, (1.0, 0), t, args=(leng, b_const))
xdata = [leng*sin(track[i, 0]) for i in range(len(track))]
ydata = [-leng*cos(track[i, 0]) for i in range(len(track))]
fig, ax = plt.subplots()
ax.grid()
line, = ax.plot([], [], 'o-', lw=2)
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
def init():
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
time_text.set_text('')
return line, time_text
def update(i):
newx = [0, xdata[i]]
newy = [0, ydata[i]]
line.set_data(newx, newy)
time_text.set_text(time_template %(0.1*i))
return line, time_text
ani = animation.FuncAnimation(fig, update, range(1, len(xdata)), init_func=init, interval=50)
#ani.save('single_pendulum_decay.gif', writer='imagemagick', fps=100)
ani.save('matplot004.gif', writer='pillow', fps=100)
plt.show()
'''
a.fig 绘制动图的画布名称
b.func自定义动画函数,即下边程序定义的函数update
c.frames动画长度,一次循环包含的帧数,在函数运行时,其值会传递给函数update(n)的形参“n”
d.init_func自定义开始帧,即传入刚定义的函数init,初始化函数
e.interval更新频率,以ms计
f.blit选择更新所有点,还是仅更新产生变化的点。应选择True,但mac用户请选择False,否则无法显
'''
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser('Plot Evaluation')
parser.add_argument('--patterns',
type=str,
nargs='+')
parser.add_argument('--names',
type=str,
nargs='+')
parser.add_argument('--epoch-len',
type=int,
default=1000)
parser.add_argument('--tag',
type=str,
default='ray/tune/evaluation/episode-reward-mean')
parser.add_argument('--csv-file',
type=str,
default='data.csv')
parser.add_argument('--plt-file',
type=str,
default='data.png')
parser.add_argument('--title',
type=str,
default='Evaluation')
parser.add_argument('--style',
type=str,
default='darkgrid')
args = parser.parse_args()
sns.set(style=args.style)
df = pd.DataFrame(columns=['Algorithm',
'Type',
'Timestep',
'Average Return'])
i = 0
for pattern, name in zip(args.patterns, args.names):
all_paths = []
all_path_returns = []
for t in tf.io.gfile.glob(pattern):
path = []
path_return = 0.0
for e in tf.compat.v1.train.summary_iterator(t):
for v in e.summary.value:
if v.tag == args.tag and e.step <= 100:
path_return += v.simple_value
path.append([name,
"All",
e.step * args.epoch_len,
v.simple_value])
df.loc[i] = path[-1]
i += 1
all_paths.append(path)
all_path_returns.append(path_return)
for e in all_paths[np.argmax(all_path_returns)]:
df.loc[i] = [e[0], "Max", *e[2:]]
i += 1
for e in all_paths[np.argmin(all_path_returns)]:
df.loc[i] = [e[0], "Min", *e[2:]]
i += 1
df.to_csv(args.csv_file)
sns.lineplot(x="Timestep",
y="Average Return",
hue="Algorithm",
style="Type",
data=df)
plt.title(args.title)
plt.savefig(args.plt_file)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.