text stringlengths 8 6.05M |
|---|
#Accepts an accession number, returns a protein sequence
from Bio import SeqIO
from Bio import Entrez
from Bio.Blast import NCBIWWW,NCBIXML
import argparse
from time import sleep
parser = argparse.ArgumentParser(description='Get aa sequence from accession number')
parser.add_argument('-an', '--accession', help='the accession number for the sequence')
parser.add_argument('-o', '--output', help='the output directory')
parser.add_argument('-l', '--orglist', help='organism list')
args = parser.parse_args()
#Get original sequence
Entrez.email = 'dlawre14@slu.edu'
handle = Entrez.efetch(db="protein", id=args.accession, retmode='xml')
records = Entrez.read(handle)
name = records[0]['GBSeq_organism'].lower().replace(' ','_')
with open(args.output+'/'+name+'.fasta', 'w') as f:
f.write('>gi|' + args.accession + '|' + records[0]['GBSeq_definition'] + '\n' + records[0]['GBSeq_sequence'])
#Run blast on organism list to find orthologs
query = args.output+'/'+name+'.fasta'
seq = SeqIO.read(query,'fasta')
with open(args.orglist, 'r') as f:
for line in f:
line = line.rstrip('\n')
result = NCBIWWW.qblast('blastp', 'nr', seq.seq, entrez_query=line+'[Organism]')
records = NCBIXML.parse(result)
accno = None
for record in records:
accno = record.alignments[0].accession
handle = Entrez.efetch(db='protein', id=accno, retmode='xml')
records = Entrez.read(handle)
with open('/'.join(query.split('/')[0:-1]) + '/' + line.lower().replace(' ','_') + '.fasta', 'w') as o:
o.write('>gi|' + accno + '|' + records[0]['GBSeq_definition'] + '\n' + records[0]['GBSeq_sequence'])
|
#!/usr/bin/env python3
from hashlib import sha256
from os import fsync, path
from signal import SIGINT, signal
from time import monotonic
from sys import exit
def signal_handler(signum, frame):
"""
"""
print('\nE: got signal {}'.format(signum))
exit(1)
def get_len(fpath):
"""
"""
try:
with open(fpath, "rb") as f:
try:
f.seek(0, 2)
except Exception as e:
print(e)
return -1
try:
position = f.tell()
except Exception as e:
print(e)
return -1
return position
except Exception as e:
print(e)
return -1
def get_mode():
"""
"""
while True:
print('Select mode:')
mode = input(
'1 - insertion (insert file in other file)\n2 - extraction '
'(extract file from other file)\n: ')
if mode in ('1', '2'):
break
print('E: invalid mode')
continue
if mode == '1':
print('I: mode: insertion')
else:
print('I: mode: extraction')
return mode
signal(SIGINT, signal_handler)
mode = get_mode()
while True:
if mode == '1':
i_file = input('Input file (file to hide): ')
else:
i_file = input('Input file (container): ')
if i_file == '':
print('E: input file is not set')
continue
i_file = path.realpath(i_file)
if not path.exists(i_file):
print('E: {} does not exist'.format(i_file))
continue
print('I: input file real path:', [i_file])
i_size = get_len(i_file)
if i_size == 0:
print('E: input file has zero size, nothing to hide!')
continue
if i_size == -1:
continue
print('I: input file size:', i_size)
try:
i_object = open(i_file, 'rb')
break
except Exception as e:
print('E: {}'.format(e))
if mode == '1':
while True:
o_file = input('Output file (container): ')
if o_file == '':
print('E: output file is not set')
continue
o_size = get_len(o_file)
if o_size == -1:
continue
o_file = path.realpath(o_file)
if not path.exists(o_file):
print('E: {} does not exist'.format(i_file))
continue
print('I: output file real path:', [o_file])
if o_file == i_file:
print('E: input and output files should not be at the same path!')
continue
print('I: output file size:', o_size)
if o_size < i_size:
print('E: output file must be not smaller than input file')
continue
try:
o_object = open(o_file, 'rb+')
break
except Exception as e:
print(e)
continue
else:
while True:
o_file = input('Output file: ')
if o_file == '':
print('E: output file is not set')
continue
o_file = path.realpath(o_file)
print('I: output file real path:', [o_file])
if path.exists(o_file):
print('E: {} already exists'.format([o_file]))
continue
try:
o_object = open(o_file, 'wb')
break
except Exception as e:
print(e)
continue
if mode == '1':
max_init_pos = o_size - i_size
else:
max_init_pos = i_size - 1
while True:
init_pos = input(
'Initial position\n(valid values are [0; {}], default=0): '.format(
max_init_pos))
if init_pos == '':
init_pos = 0
try:
init_pos = int(init_pos)
except Exception:
print('E: invalid value')
continue
print('I: initial position:', init_pos)
if init_pos > max_init_pos or init_pos < 0:
print('E: invalid initial position')
continue
break
if mode == '1':
while True:
do_cont = input('Output file will be partially overwritten.\n'
'Do you want to continue? (y|n): ')
if do_cont in ('y', 'Y'):
break
if do_cont in ('n', 'N'):
print('Exit.')
exit()
if mode == '2':
while True:
max_data_size = i_size - init_pos
data_size = input(
'Data size to extract\n(valid values are [1; {}], '
'default={}): '.format(
max_data_size, max_data_size))
if data_size == '':
data_size = max_data_size
try:
data_size = int(data_size)
print('I: data size to extract:', data_size)
except Exception:
print('E: invalid value')
continue
if data_size > max_data_size or data_size < 1:
print('E: invalid data size')
continue
break
print('I: reading, writing, fsyncing...')
M = 1024**2
CHUNK_SIZE = M * 32
if mode == '1':
data_size = i_size
chunks_num = data_size // CHUNK_SIZE
ost_size = data_size % CHUNK_SIZE
if mode == '1':
o_object.seek(init_pos)
else:
i_object.seek(init_pos)
m = sha256()
t0 = monotonic()
t00 = t0
written_sum = 0
print()
print('\033Mwritten {}, {} MiB, {}% '.format(
written_sum,
round(written_sum / M, 1),
round(written_sum / data_size * 100, 1)
))
if chunks_num > 0:
for i in range(chunks_num):
i_data = i_object.read(CHUNK_SIZE)
o_object.write(i_data)
fsync(o_object.fileno())
written_sum += CHUNK_SIZE
m.update(i_data)
if monotonic() - t0 > 2:
t0 = monotonic()
print('\033MI: written: {}, {} MiB, {}% in {}s, avg {} '
'MiB/s'.format(
written_sum,
round(written_sum / M, 1),
round(written_sum / data_size * 100, 1),
round(monotonic() - t00, 1),
round(written_sum / M / (monotonic() - t00), 1)))
if ost_size > 0:
i_data = i_object.read(ost_size)
o_object.write(i_data)
fsync(o_object.fileno())
written_sum += ost_size
m.update(i_data)
print('\033MI: written: {}, {} MiB, {}% in {}s, avg {} MiB/s'.format(
written_sum,
round(written_sum / M, 1),
round(written_sum / data_size * 100, 1),
round(monotonic() - t00, 1),
round(written_sum / M / (monotonic() - t00), 1)))
sha256sum = m.hexdigest()
if mode == '1':
print('Remember this to extract the data from the container:')
print('Initial position {}, Data size {}, Final position {}'.format(
init_pos, i_size, o_object.tell()))
else:
print('I: final position:', i_object.tell())
print('I: file extracted successfully')
i_object.close()
o_object.close()
print('SHA256:', sha256sum)
print('OK')
|
from snippets.models import Snippet
from snippets.serializers import SnippetSerializer
from snippets.serializers import UserSerializer
from rest_framework import generics
from django.contrib.auth.models import User
class SnippetList(generics.ListCreateAPIView):
"""
List all snippets, or create a new snippet.
"""
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class SnippetDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve, update or delete a snippet instance
"""
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
|
def Calc(A,B,Op):
if Op==1:
return A-B
elif Op==2:
return A*B
elif Op==3:
return A/B
else:
return A+B
A=float(input("ะ: "))
B=float(input("B: "))
N1=int(input("N1: "))
N2=int(input("N2: "))
N3=int(input("N3: "))
print(Calc(A,B,N1))
print(Calc(A,B,N2))
print(Calc(A,B,N3))
|
import os
os.chdir("D:\Deep_Learning_A_Z\Artificial_Neural_Networks")
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
#X = dataset.iloc[:, [2, 3]].values Takes only 2 & 3
X = dataset.iloc[:,3:13].values
y = dataset.iloc[:, 13].values
# Encoding categorical data
# Encoding the Independent Variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_1.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:,1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Lets make the ANN
# Import keras libraries and packages
import keras
from keras.models import Sequential # Initialize Neural Network
from keras.layers import Dense #Build the layers of ANN
#Initalizing the ANN
classifier= Sequential()
# Adding input layer and first hidden layer
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))
#Adding output layer
classifier.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
#Compling the ANN
classifier.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
#Fitting the ANN to trainign set
classifier.fit(X_train,y_train,batch_size=10,epochs=100)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
#New observation Prediction
new_prediction = classifier.predict(sc.transform(np.array([[0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])))
new_prediction = (new_prediction > 0.5)
## Part4 - Evaluating, Implemnting & Tuning ANN
#Evaluating using cross validation
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from keras.models import Sequential # Initialize Neural Network
from keras.layers import Dense
def build_classifier():
classifier= Sequential()
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))
classifier.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
classifier.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
return classifier
classifier= KerasClassifier(build_fn= build_classifier,batch_size=10,epochs=100)
accuracies=cross_val_score(estimator= classifier,X = X_train,y = y_train,cv = 10)
mean = accuracies.mean()
varience = accuracies.std()
#Dropuout Regularization to avoid overfitting
from keras.layers import Dropout
#Initalizing the ANN
classifier= Sequential()
# Adding input layer with drop out
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))
classifier.add(Dropout(rate=0.1))
# Adding first hidden layer with drop out
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))
classifier.add(Dropout(rate=0.1))
#Tuning ANN with GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential # Initialize Neural Network
from keras.layers import Dense
def build_classifier(optimizer):
classifier= Sequential()
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))
classifier.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
classifier.compile(optimizer=optimizer,loss='binary_crossentropy',metrics=['accuracy'])
return classifier
classifier= KerasClassifier(build_fn= build_classifier)
parameters = {'batch_size' : [25,32],
'epochs' : [100,500],
'optimizer': ['adam','rmsprop']}
grid_search = GridSearchCV(estimator=classifier,param_grid=parameters, scoring='accuracy',cv=10)
grid_search = grid_search.fit(X_train,y_train)
best_params = grid_search.best_params_
best_score = grid_search.best_score_
|
from django.db import models
# Create your models here.
class ESTATUS(models.Model):
IDESTATUS = models.AutoField(primary_key=True)
DESCRIPCION = models.CharField(max_length=50)
def __str__(self):
return '%s %s'%(str(self.IDESTATUS), self.DESCRIPCION)
class PERIODOESCOLAR(models.Model):
IDPERIODO = models.AutoField(primary_key=True)
NOMBRE = models.CharField(max_length=50, null=False)
FECHA_INI = models.DateField(null=False)
FECHA_FIN = models.DateField(null=False)
def __str__(self):
return '%s %s'%(str(self.IDPERIODO), self.NOMBRE)
class PROCESO(models.Model):
IDPROCESO = models.AutoField(primary_key=True)
ID_PERIODO = models.ForeignKey(PERIODOESCOLAR)
NOMBRE = models.CharField(max_length=30)
def __str__(self):
return '%s %s'%(str(self.IDPROCESO), self.NOMBRE)
class CATCARRERAS(models.Model):
IDCARRERA = models.AutoField(primary_key=True)
DESCRIPCION = models.CharField(max_length=50, null=False)
ABREVIATURA = models.CharField(max_length=12, null=False)
def __str__(self):
return '%s %s'%(str(self.IDCARRERA), self.ABREVIATURA)
class CATEMPRESAS(models.Model):
IDEMPRESA = models.AutoField(primary_key=True)
NOMBRE = models.CharField(max_length=50, null=False)
REPRESENTANTE = models.CharField(max_length=100, null=True)
TELEFONO = models.BigIntegerField(null=False)
DOMICILIO = models.CharField(max_length=120, null=False)
EMAIL = models.EmailField(null=False)
def __str__(self):
return '%s %s'%(str(self.IDEMPRESA), self.NOMBRE)
class TIPOPERSONA(models.Model):
IDTIPOPERSONA = models.AutoField(primary_key=True)
NOMBRE = models.CharField(max_length=30, null=False)
def __str__(self):
return '%s %s'%(str(self.IDTIPOPERSONA), self.NOMBRE)
class CATPERSONAS(models.Model):
SEXO_CHOICE = (
('M','Masculino'),
('F','Femenino'),
)
IDPERSONA = models.AutoField(primary_key=True)
IDTIPOPERSONA = models.ForeignKey(TIPOPERSONA)
NOMBRE= models.CharField(max_length=50, null=False)
APELLIDO_PAT = models.CharField(max_length=50, null=False)
APELLIDO_MAT = models.CharField(max_length=50, null=True)
SEXO = models.CharField(max_length=1,choices=SEXO_CHOICE, null=False, default='M')
USUARIO = models.CharField(max_length=30, null=False, unique=True)
PASSWORD = models.CharField(max_length=30, null=False)
ACTIVO = models.BooleanField(null=False)
def __str__(self):
return '%s %s'%(str(self.IDPERSONA), self.NOMBRE)
class CATALUMNOS(models.Model):
IDALUMNO = models.OneToOneField(CATPERSONAS, primary_key=True)
MATRICULA = models.CharField(max_length=6, null=False)
IDPROCESO = models.ForeignKey(PROCESO)
IDCARRERA = models.ForeignKey(CATCARRERAS)
IDEMPRESA = models.ForeignKey(CATEMPRESAS)
IDESTATUS = models.ForeignKey(ESTATUS)
ACTIVO = models.BooleanField(null=False)
def __str__(self):
return '%s %s'%(str(self.IDALUMNO), self.MATRICULA)
class CATCALIFICACIONES(models.Model):
IDCALIF = models.AutoField(primary_key=True)
IDALUMNO = models.ForeignKey(CATALUMNOS)
CAL_AA = models.IntegerField(null=False)
CAL_AL = models.IntegerField(null=False)
def __str__(self):
return '%s %s'%(str(self.IDALUMNO), self.IDCALIF)
class CATHISTREP(models.Model):
IDALUMNO = models.ForeignKey(CATALUMNOS)
FECHA_SUBIDA = models.DateField(null=False)
TIPO_REP = models.CharField(max_length=20, null=False)
class CATMAESTROS(models.Model):
IDMAESTRO = models.AutoField(primary_key=True)
IDPERSONA = models.ForeignKey(CATPERSONAS)
def __str__(self):
return '%s %s'%(str(self.IDMAESTRO), self.IDPERSONA)
class ASIGNA_EMPRESA(models.Model):
IDASIGNA = models.AutoField(primary_key=True)
IDEMPRESA = models.ForeignKey(CATEMPRESAS)
IDALUMNO = models.ForeignKey(CATALUMNOS)
FECHA_VINCU = models.DateField(null=False)
FECHA_CAMBIO = models.DateField(null=True)
def __str__(self):
return '%s %s'%(str(self.IDASIGNA), self.IDEMPRESA)
|
#!/usr/bin/env python
"""
A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
"""
from os import path
try:
from pip.req import parse_requirements
except ImportError:
# pip >= 10
from pip._internal.req import parse_requirements
from setuptools import find_packages, setup
def get_requirements(requirements_file):
"""Use pip to parse requirements file."""
requirements = []
if path.isfile(requirements_file):
for req in parse_requirements(requirements_file, session="hack"):
try:
if req.markers:
requirements.append("%s;%s" % (req.req, req.markers))
else:
requirements.append("%s" % req.req)
except AttributeError:
# pip >= 20.0.2
requirements.append(req.requirement)
return requirements
if __name__ == "__main__":
HERE = path.abspath(path.dirname(__file__))
INSTALL_REQUIRES = get_requirements(path.join(HERE, "requirements.txt"))
MYSQL_REQUIRES = get_requirements(path.join(HERE, "mysql-requirements.txt"))
POSTGRESQL_REQUIRES = get_requirements(
path.join(HERE, "postgresql-requirements.txt"))
LDAP_REQUIRES = get_requirements(path.join(HERE, "ldap-requirements.txt"))
with open(path.join(HERE, "README.rst")) as readme:
LONG_DESCRIPTION = readme.read()
def local_scheme(version):
"""Skip the local version (eg. +xyz of 0.6.1.dev4+gdf99fe2)
to be able to upload to Test PyPI"""
return ""
setup(
name="modoboa",
description="Mail hosting made simple",
long_description=LONG_DESCRIPTION,
license="ISC",
url="http://modoboa.org/",
author="Antoine Nguyen",
author_email="tonio@ngyn.org",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django :: 2.2",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Communications :: Email",
"Topic :: Internet :: WWW/HTTP",
],
keywords="email",
packages=find_packages(
exclude=["doc", "test_data", "test_project", "frontend"]),
include_package_data=True,
zip_safe=False,
scripts=["bin/modoboa-admin.py"],
install_requires=INSTALL_REQUIRES,
use_scm_version={
"local_scheme": local_scheme
},
python_requires=">=3.7",
setup_requires=["setuptools_scm>6.4"],
extras_require={
"ldap": LDAP_REQUIRES,
"mysql": MYSQL_REQUIRES,
"postgresql": POSTGRESQL_REQUIRES,
"argon2": ["argon2-cffi >= 16.1.0"],
},
)
|
if __name__ == '__main__' and __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
from utct.TensorFlow.trainer1 import Trainer1
from common.TensorFlow.optimizer import Optimizer
from common.train_config import TrainConfig
from common.train_saver import TrainSaver
from mnist_data_source import MnistDataSource
from mnist_model import MnistModel
def main():
model = MnistModel()
optimizer = Optimizer()
data_source = MnistDataSource()
cfg = TrainConfig()
cfg.load(
model,
optimizer,
data_source,
task_name="mnist",
framework_name='TensorFlow')
saver = TrainSaver(
cfg.prm['work_dir'],
cfg.prm['project_name'],
cfg.prm['model_filename_prefix'],
data_source=data_source,
task_name="mnist",
suffix="_tf")
trainer = Trainer1(
model=model,
optimizer=optimizer,
data_source=data_source,
saver=saver)
trainer.train(
num_epoch=cfg.prm['max_num_epoch'],
epoch_tail=cfg.prm['min_num_epoch'],
dat_gaussian_blur_sigma_max=1.0,
dat_gaussian_noise_sigma_max=0.05,
dat_perspective_transform_max_pt_deviation=1,
dat_max_scale_add=1.0 / (28.0 / 2),
dat_max_translate=2.0,
dat_rotate_max_angle_rad=0.2617994)
if __name__ == '__main__':
main()
|
from .BrushControl import BrushControl
from PyQt5 import QtWidgets, QtCore
class FontChooserControl(BrushControl):
def __init__(self, brush, label, callback = None, fontName = ""):
BrushControl.__init__(self, brush, label, callback)
self.label = QtWidgets.QLabel(label)
self.control = QtWidgets.QFontComboBox()
self.control.currentFontChanged.connect(self.valueChanged)
def getValue(self):
return self.control.currentFont()
|
import nltk
from nltk.util import ngrams
from collections import defaultdict
import random
import argparse
parser = argparse.ArgumentParser(description='Ngram Model')
parser.add_argument('--data', type=str, default='../../data/full_dataset.txt',
help='location of data corpus')
parser.add_argument('--n', type=int, default=3,
help='random seed')
args = parser.parse_args()
fname = args.data
data = []
f = open(fname, 'rb')
data = [line for line in f]
f.close()
"""
Trains ngram model
"""
def train(n, data):
model = defaultdict(lambda: defaultdict(lambda: 0))
for tweet in data:
for words in list(ngrams(tweet.split(), n, pad_right=True, pad_left=True)):
prefix, suffix = words[:n-1], words[n-1]
model[prefix][suffix] += 1
# Generates probabilities
for prefix in model:
total_count = float(sum(model[prefix].values()))
if total_count > 0:
for suffix in model[prefix]:
model[prefix][suffix] /= total_count
return model
def generate(model, n, seed=None, debug=True):
text = [None] * (n - 1)
if seed:
text = seed
sentence_finished = False
while not sentence_finished:
r = random.random()
#r = random.randint(0,1e6) * 1e-10
accumulator = 0.0
prefix_key = tuple(text[-(n-1):])
for word in model[prefix_key].keys():
accumulator += model[prefix_key][word]
if accumulator >= r:
text += [word]
break
if text[-n:] == [None]*(n):
sentence_finished = True
if debug and len(' '.join(filter(None, text))) > 240: # Hacky. TODO Fix
sentence_finished = True
l = len(text)
return text[n:l-n]
n = args.n
print("Training {}-gram".format(n))
model = train(n, data)
for i in range(3):
text = generate(model, n, debug=True)
text = ' '.join(filter(None, text))
print('{}\n'.format(text))
model[None, None]
|
from DominoExceptions import BadSumException, BadDominoException
from Solitaire import Solitaire
class InteractiveSolitaire(Solitaire):
def turn(self):
"""This is a function that handles a turn in the solitaire's game"""
# We choose which dominoes are going to be removed
# and we sort them from the bigger to the smaller
num_domino = list(input("Choose the number of the dominos to remove "))
try:
num_domino = [int(i) - 1 for i in num_domino]
except ValueError:
print("Please enter a sequence of int! (ex: 145 to select dominos 1, 4 and 5)")
return
dominos_to_discard = [self.hand[i] for i in num_domino]
try:
self.check_dominos(num_domino)
self.play_turn(dominos_to_discard)
except BadSumException as bad_sum:
print(bad_sum)
except BadDominoException as bad_domino:
print(bad_domino)
finally:
print(f"It remains {len(self.deck)} dominos in the deck and {len(self.hand)} in you hand")
def play(self):
"""Manage the game"""
while not self.victory:
if self.is_game_lost():
print("You have lost... Too bad !")
exit(0)
self.print()
self.turn()
print("You have won ! Congratulation !")
|
from matrices import Matriz
from dataclasses import dataclass
from enum import Enum
import re
class LexerException(Exception):
def __init__(self):
self.message = "Carรกcter invรกlido."
class Tipo(Enum):
LEF_PARENS = 0
RIG_PARENS = 1
NUMERO = 2
COMA = 3
@dataclass
class Token:
tipo_token: Tipo
valor: str
def __str__(self):
return f"Tipo: {self.tipo_token} Valor: {self.valor}"
class Tokenizador:
def __init__(self, cadena: str):
self.cadena = cadena.replace(' ', '')
self.puntero = 0
def formar_numero(self):
numero = ""
# Nรบmero con signo negativo.
if self.cadena[self.puntero] == '-':
numero += self.cadena[self.puntero]
self.puntero += 1
while self.puntero < len(self.cadena) and self.cadena[self.puntero].isnumeric():
numero += self.cadena[self.puntero]
self.puntero += 1
if not re.match("-?[1-9][0-9]*", numero) and numero != '0':
return None
return numero
def tokenizar(self):
while self.puntero < len(self.cadena):
if self.cadena[self.puntero] == '[':
self.puntero += 1
yield Token(Tipo.LEF_PARENS, '[')
elif self.cadena[self.puntero] == ']':
self.puntero += 1
yield Token(Tipo.RIG_PARENS, ']')
elif self.cadena[self.puntero] == ',':
self.puntero += 1
yield Token(Tipo.COMA, ',')
elif self.cadena[self.puntero] == '-' or self.cadena[self.puntero].isnumeric():
token = Token(Tipo.NUMERO, self.formar_numero())
if token.valor is None:
raise LexerException
yield token
else:
raise LexerException
|
from constante import *
import math
from laser import Laser
class Player(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image = playerIMG
self.rect = self.image.get_rect()
self.rect.x = ecranW/2
self.rect.y = ecranH/2
self.angle = 0
self.origin_image = self.image
self.lasers = pygame.sprite.Group()
def shoot(self):
self.lasers.add(Laser(self, self.rect.centerx, self.rect.centery))
def rotating(self):
mouse = pygame.mouse.get_pos()
print(self.angle)
offset = (self.rect.centerx - mouse[0], self.rect.centery - mouse[1])
self.angle = math.degrees(math.atan2(*offset))
old_center = self.rect.center
self.image = pygame.transform.rotozoom(self.origin_image, self.angle, 1)
self.rect = self.image.get_rect(center=old_center)
self.distance = int(math.sqrt((offset[0] * offset[0]) + (offset[1] * offset[1])))
|
""" Script to check conversion from nPE to MeV of positron, which were simulated with tut_detsim.py of JUNO offline
version J18v1r1-pre1.
This is a cross-check to script check_conversion_npe_mev.py, where the conversion from nPE to MeV of neutron and
protons is calculated.
The conversion factor of positrons should be equal to the conversion factor of neutrons and protons.
More information: info_conversion_proton_neutron.odt (/home/astro/blum/juno/atmoNC/data_NC/conversion_nPE_MeV/)
"""
import datetime
import ROOT
import sys
from NC_background_functions import energy_resolution
import numpy as np
from matplotlib import pyplot as plt
from decimal import Decimal
from matplotlib.colors import LogNorm
# get the date and time, when the script was run:
date = datetime.datetime.now()
now = date.strftime("%Y-%m-%d %H:%M")
# set the path of the input files (in this folder the already analyzed pulse shapes of the prompt signal of the IBD
# events are saved):
input_path_PE = "/home/astro/blum/juno/IBD_events/hittimes/"
# path, where the root files of the IBD events are saved to get the visible energy of the event:
input_path_Qedep = "/local/scratch1/pipc51/astro/blum/IBD_hepevt/"
# set path, where results should be saved:
output_path = "/home/astro/blum/juno/atmoNC/data_NC/conversion_nPE_MeV/"
# set the number of the first file and number of the last file that should be read:
start_number = 0
stop_number = 199
# number of entries in the input files:
Number_entries_input = 100
# total number of events:
number_positron = (stop_number - start_number + 1) * Number_entries_input
# set the radius for the volume cut in mm:
r_cut = 17700
# preallocate array, where number of PE of the prompt signal (positron) is stored:
array_number_pe = np.array([])
# preallocate array, where visible energy of the positron in MeV is stored:
array_Qedep = np.array([])
# preallocate arrays for nPE and Qedep for different radii:
array_Npe_0_8m = np.array([])
array_Npe_8_12m = np.array([])
array_Npe_12_14m = np.array([])
array_Npe_14_16m = np.array([])
array_Npe_16_17m = np.array([])
array_Npe_17_17_7m = np.array([])
array_Qedep_0_8m = np.array([])
array_Qedep_8_12m = np.array([])
array_Qedep_12_14m = np.array([])
array_Qedep_14_16m = np.array([])
array_Qedep_16_17m = np.array([])
array_Qedep_17_17_7m = np.array([])
# loop over all files:
for filenumber in range(start_number, stop_number+1, 1):
# load ROOT file:
rfile = ROOT.TFile(input_path_Qedep + "user_IBD_hepevt_{0:d}.root".format(filenumber))
# get the "geninfo"-TTree from the TFile:
rtree_geninfo = rfile.Get("geninfo")
# get the number of events in the geninfo Tree:
number_events_geninfo = rtree_geninfo.GetEntries()
# get the 'prmtrkdep' tree:
rtree_prmtrkdep = rfile.Get('prmtrkdep')
# get number of events in the tree:
number_events_prmtrkdep = rtree_prmtrkdep.GetEntries()
# check if number of events are equal in both trees:
if number_events_geninfo == number_events_prmtrkdep:
number_events = number_events_geninfo
else:
sys.exit("ERROR: number of events in t Trees are NOT equal!!")
# check if number_events is equal to number_entries_input (if not, the detector simulation was incorrect!!):
if number_events != Number_entries_input:
sys.exit("ERROR: number of events are not equal to {0:d} -> Detector Simulation not correct!"
.format(Number_entries_input))
# loop over every event, i.e. every entry, in the TTree:
for event in range(number_events):
""" read 'geninfo' tree to check initial energy and to apply volume cut: """
# get the current event in the TTree:
rtree_geninfo.GetEntry(event)
# get event ID of geninfo-tree:
evt_id_geninfo = int(rtree_geninfo.GetBranch('evtID').GetLeaf('evtID').GetValue())
# get position of 0th initial particle in x, y, z in mm (positions of the initial particles are equal):
x_init = float(rtree_geninfo.GetBranch('InitX').GetLeaf('InitX').GetValue(0))
y_init = float(rtree_geninfo.GetBranch('InitY').GetLeaf('InitY').GetValue(0))
z_init = float(rtree_geninfo.GetBranch('InitZ').GetLeaf('InitZ').GetValue(0))
# calculate the radius in mm:
r_init = np.sqrt(x_init**2 + y_init**2 + z_init**2)
if r_init > r_cut:
# apply volume cut:
continue
""" read 'prmtrkdep' tree to check quenched deposit energy: """
# get the current event in the TTree:
rtree_prmtrkdep.GetEntry(event)
# get number of particles:
n_part = int(rtree_prmtrkdep.GetBranch('nInitParticles').GetLeaf('nInitParticles').GetValue())
# loop over the initial particles to get Qedep of the positron:
for index in range(n_part):
# get PDGID of the particle:
PDGID = int(rtree_prmtrkdep.GetBranch('PDGID').GetLeaf('PDGID').GetValue(index))
# check if particle is positron:
if PDGID == -11:
# get quenched energy of positron in MeV:
qedep_value = float(rtree_prmtrkdep.GetBranch('Qedep').GetLeaf('Qedep').GetValue(index))
# consider energy resolution of detector:
if qedep_value > 0:
# get the value of sigma of energy resolution for value of qedep_value:
sigma_energy = energy_resolution(qedep_value)
# generate normal distributed random number with mean = qedep_value and sigma = sigma_energy:
qedep_value = np.random.normal(qedep_value, sigma_energy)
array_Qedep = np.append(array_Qedep, qedep_value)
if r_init < 8000:
array_Qedep_0_8m = np.append(array_Qedep_0_8m, qedep_value)
elif 8000 <= r_init < 12000:
array_Qedep_8_12m = np.append(array_Qedep_8_12m, qedep_value)
elif 12000 <= r_init < 14000:
array_Qedep_12_14m = np.append(array_Qedep_12_14m, qedep_value)
elif 14000 <= r_init < 16000:
array_Qedep_14_16m = np.append(array_Qedep_14_16m, qedep_value)
elif 16000 <= r_init < 17000:
array_Qedep_16_17m = np.append(array_Qedep_16_17m, qedep_value)
else:
array_Qedep_17_17_7m = np.append(array_Qedep_17_17_7m, qedep_value)
""" get number of PE of prompt signal from the hittimes txt files: """
prompt_signal = np.loadtxt(input_path_PE + "file{0:d}_evt{1:d}_prompt_signal.txt".format(filenumber, event))
# sum up prompt signal from index 6 to end (index 0 to 2 defines reconstructed position and index 3 to 5
# defines the time window of the prompt signal):
nPE = np.sum(prompt_signal[6:])
# append nPE of the prompt signal to array:
array_number_pe = np.append(array_number_pe, nPE)
if r_init < 8000:
array_Npe_0_8m = np.append(array_Npe_0_8m, nPE)
elif 8000 <= r_init < 12000:
array_Npe_8_12m = np.append(array_Npe_8_12m, nPE)
elif 12000 <= r_init < 14000:
array_Npe_12_14m = np.append(array_Npe_12_14m, nPE)
elif 14000 <= r_init < 16000:
array_Npe_14_16m = np.append(array_Npe_14_16m, nPE)
elif 16000 <= r_init < 17000:
array_Npe_16_17m = np.append(array_Npe_16_17m, nPE)
else:
array_Npe_17_17_7m = np.append(array_Npe_17_17_7m, nPE)
# array_number_pe contains nPE and array_Qedep contains Qedep of all events inside r_cut!
""" do linear fit """
# do linear fit with np.linalg.lstsq:
# The model is y = a * x; x = array_number_pe, y = array_qedep
# x needs to be a column vector instead of a 1D vector for this, however.
array_number_pe_columnvector = array_number_pe[:, np.newaxis]
# first value of output is slope of linear fit (fir_result is array with one entry):
fit_result = np.linalg.lstsq(array_number_pe_columnvector, array_Qedep, rcond=None)[0]
# take first entry of fit_result:
fit_result = fit_result[0]
# set x axis for linear fit:
fit_x_axis = np.arange(0, max(array_number_pe), 100)
# set y axis for linear fit:
fit_y_axis = fit_result * fit_x_axis
""" do linear fit for different radii: """
# x needs to be a column vector instead of a 1D vector for this, however.
array_Npe_c_0_8m = array_Npe_0_8m[:, np.newaxis]
# first value of output is slope of linear fit (fir_result is array with one entry):
fit_result_0_8m = np.linalg.lstsq(array_Npe_c_0_8m, array_Qedep_0_8m, rcond=None)[0]
# take first entry of fit_result:
fit_result_0_8m = fit_result_0_8m[0]
# set y axis for linear fit:
fit_y_axis_0_8m = fit_result_0_8m * fit_x_axis
# x needs to be a column vector instead of a 1D vector for this, however.
array_Npe_c_8_12m = array_Npe_8_12m[:, np.newaxis]
# first value of output is slope of linear fit (fir_result is array with one entry):
fit_result_8_12m = np.linalg.lstsq(array_Npe_c_8_12m, array_Qedep_8_12m, rcond=None)[0]
# take first entry of fit_result:
fit_result_8_12m = fit_result_8_12m[0]
# set y axis for linear fit:
fit_y_axis_8_12m = fit_result_8_12m * fit_x_axis
# x needs to be a column vector instead of a 1D vector for this, however.
array_Npe_c_12_14m = array_Npe_12_14m[:, np.newaxis]
# first value of output is slope of linear fit (fir_result is array with one entry):
fit_result_12_14m = np.linalg.lstsq(array_Npe_c_12_14m, array_Qedep_12_14m, rcond=None)[0]
# take first entry of fit_result:
fit_result_12_14m = fit_result_12_14m[0]
# set y axis for linear fit:
fit_y_axis_12_14m = fit_result_12_14m * fit_x_axis
# x needs to be a column vector instead of a 1D vector for this, however.
array_Npe_c_14_16m = array_Npe_14_16m[:, np.newaxis]
# first value of output is slope of linear fit (fir_result is array with one entry):
fit_result_14_16m = np.linalg.lstsq(array_Npe_c_14_16m, array_Qedep_14_16m, rcond=None)[0]
# take first entry of fit_result:
fit_result_14_16m = fit_result_14_16m[0]
# set y axis for linear fit:
fit_y_axis_14_16m = fit_result_14_16m * fit_x_axis
# x needs to be a column vector instead of a 1D vector for this, however.
array_Npe_c_16_17m = array_Npe_16_17m[:, np.newaxis]
# first value of output is slope of linear fit (fir_result is array with one entry):
fit_result_16_17m = np.linalg.lstsq(array_Npe_c_16_17m, array_Qedep_16_17m, rcond=None)[0]
# take first entry of fit_result:
fit_result_16_17m = fit_result_16_17m[0]
# set y axis for linear fit:
fit_y_axis_16_17m = fit_result_16_17m * fit_x_axis
# x needs to be a column vector instead of a 1D vector for this, however.
array_Npe_c_17_17_7m = array_Npe_17_17_7m[:, np.newaxis]
# first value of output is slope of linear fit (fir_result is array with one entry):
fit_result_17_17_7m = np.linalg.lstsq(array_Npe_c_17_17_7m, array_Qedep_17_17_7m, rcond=None)[0]
# take first entry of fit_result:
fit_result_17_17_7m = fit_result_17_17_7m[0]
# set y axis for linear fit:
fit_y_axis_17_17_7m = fit_result_17_17_7m * fit_x_axis
# """ plot Qedep as function of nPE: """
# h1 = plt.figure(1, figsize=(15, 8))
# plt.plot(array_number_pe, array_Qedep, "rx", label="positron ({0:d} entries)".format(len(array_number_pe)))
# plt.xlabel("number of p.e.")
# plt.ylabel("visible energy in JUNO detector (in MeV)")
# plt.title("Visible energy vs. number of p.e.")
# plt.legend()
# plt.grid()
# plt.savefig(output_path + "qedep_vs_nPE_positron.png")
#
# """ plot Qedep as function of nPE with fit: """
# h2 = plt.figure(2, figsize=(15, 8))
# plt.plot(array_number_pe, array_Qedep, "rx",
# label="{0:d} entries".format(len(array_number_pe)))
# plt.plot(fit_x_axis, fit_y_axis, "b", label="linear fit: f(x) = {0:.3E} * x"
# .format(fit_result))
# plt.xlabel("number of p.e.")
# plt.ylabel("visible energy in JUNO detector (in MeV)")
# plt.title("Visible energy vs. number of p.e.\n(with linear fit)")
# plt.legend()
# plt.grid()
# plt.savefig(output_path + "fit_qedep_vs_nPE_positron.png")
#
# """ display Qedep as function of nPE in 2D histogram: """
# h3 = plt.figure(3, figsize=(15, 8))
bins_edges_nPE = np.arange(0, max(array_number_pe), 2000)
bins_edges_Qedep = np.arange(0, max(array_Qedep)+2, 2)
# plt.hist2d(array_number_pe, array_Qedep, [bins_edges_nPE, bins_edges_Qedep], norm=LogNorm(),
# cmap="rainbow")
# plt.xlabel("number of p.e.")
# plt.ylabel("visible energy in JUNO detector (in MeV)")
# plt.title("Visible energy vs. number of p.e.")
# plt.colorbar()
# plt.legend()
# plt.grid()
# plt.savefig(output_path + "hist2d_Qedep_vs_nPE_positron.png")
#
# """ display Qedep as function of nPE in 2D histogram with fit: """
# h4 = plt.figure(4, figsize=(15, 8))
# plt.hist2d(array_number_pe, array_Qedep, [bins_edges_nPE, bins_edges_Qedep], norm=LogNorm(),
# cmap="rainbow")
# plt.plot(fit_x_axis, fit_y_axis, "k", label="{1:d} entries\nlinear fit: f(x) = {0:.3E} * x"
# .format(fit_result, len(array_number_pe)))
# plt.xlabel("number of p.e.")
# plt.ylabel("visible energy in JUNO detector (in MeV)")
# plt.title("Visible energy vs. number of p.e.\nwith linear fit")
# plt.colorbar()
# plt.legend()
# plt.grid()
# plt.savefig(output_path + "hist2d_Qedep_vs_nPE_positron_fit.png")
""" display Qedep as function of nPE in 2D histogram with fit: """
h5 = plt.figure(5, figsize=(15, 8))
plt.hist2d(array_Npe_0_8m, array_Qedep_0_8m, [bins_edges_nPE, bins_edges_Qedep], norm=LogNorm(),
cmap="rainbow")
plt.plot(fit_x_axis, fit_y_axis_0_8m, "k", label="{1:d} entries\nlinear fit: f(x) = {0:.3E} * x"
.format(fit_result_0_8m, len(array_Npe_0_8m)))
plt.xlabel("number of p.e.")
plt.ylabel("visible energy in JUNO detector (in MeV)")
plt.title("Visible energy vs. number of p.e.\nwith linear fit for R < 8 m")
plt.colorbar()
plt.legend()
plt.grid()
plt.savefig(output_path + "hist2d_Qedep_vs_nPE_positron_fit_0_8m.png")
""" display Qedep as function of nPE in 2D histogram with fit: """
h6 = plt.figure(6, figsize=(15, 8))
plt.hist2d(array_Npe_8_12m, array_Qedep_8_12m, [bins_edges_nPE, bins_edges_Qedep], norm=LogNorm(),
cmap="rainbow")
plt.plot(fit_x_axis, fit_y_axis_8_12m, "k", label="{1:d} entries\nlinear fit: f(x) = {0:.3E} * x"
.format(fit_result_8_12m, len(array_Npe_8_12m)))
plt.xlabel("number of p.e.")
plt.ylabel("visible energy in JUNO detector (in MeV)")
plt.title("Visible energy vs. number of p.e.\nwith linear fit for 8 m < R < 12 m")
plt.colorbar()
plt.legend()
plt.grid()
plt.savefig(output_path + "hist2d_Qedep_vs_nPE_positron_fit_8_12m.png")
""" display Qedep as function of nPE in 2D histogram with fit: """
h7 = plt.figure(7, figsize=(15, 8))
plt.hist2d(array_Npe_12_14m, array_Qedep_12_14m, [bins_edges_nPE, bins_edges_Qedep], norm=LogNorm(),
cmap="rainbow")
plt.plot(fit_x_axis, fit_y_axis_12_14m, "k", label="{1:d} entries\nlinear fit: f(x) = {0:.3E} * x"
.format(fit_result_12_14m, len(array_Npe_12_14m)))
plt.xlabel("number of p.e.")
plt.ylabel("visible energy in JUNO detector (in MeV)")
plt.title("Visible energy vs. number of p.e.\nwith linear fit for 12 m < R < 14 m")
plt.colorbar()
plt.legend()
plt.grid()
plt.savefig(output_path + "hist2d_Qedep_vs_nPE_positron_fit_12_14m.png")
""" display Qedep as function of nPE in 2D histogram with fit: """
h8 = plt.figure(8, figsize=(15, 8))
plt.hist2d(array_Npe_14_16m, array_Qedep_14_16m, [bins_edges_nPE, bins_edges_Qedep], norm=LogNorm(),
cmap="rainbow")
plt.plot(fit_x_axis, fit_y_axis_14_16m, "k", label="{1:d} entries\nlinear fit: f(x) = {0:.3E} * x"
.format(fit_result_14_16m, len(array_Npe_14_16m)))
plt.xlabel("number of p.e.")
plt.ylabel("visible energy in JUNO detector (in MeV)")
plt.title("Visible energy vs. number of p.e.\nwith linear fit for 14 m < R < 16 m")
plt.colorbar()
plt.legend()
plt.grid()
plt.savefig(output_path + "hist2d_Qedep_vs_nPE_positron_fit_14_16m.png")
""" display Qedep as function of nPE in 2D histogram with fit: """
h9 = plt.figure(9, figsize=(15, 8))
plt.hist2d(array_Npe_16_17m, array_Qedep_16_17m, [bins_edges_nPE, bins_edges_Qedep], norm=LogNorm(),
cmap="rainbow")
plt.plot(fit_x_axis, fit_y_axis_16_17m, "k", label="{1:d} entries\nlinear fit: f(x) = {0:.3E} * x"
.format(fit_result_16_17m, len(array_Npe_16_17m)))
plt.xlabel("number of p.e.")
plt.ylabel("visible energy in JUNO detector (in MeV)")
plt.title("Visible energy vs. number of p.e.\nwith linear fit for 16 m < R < 17 m")
plt.colorbar()
plt.legend()
plt.grid()
plt.savefig(output_path + "hist2d_Qedep_vs_nPE_positron_fit_16_17m.png")
""" display Qedep as function of nPE in 2D histogram with fit: """
h10 = plt.figure(10, figsize=(15, 8))
plt.hist2d(array_Npe_17_17_7m, array_Qedep_17_17_7m, [bins_edges_nPE, bins_edges_Qedep], norm=LogNorm(),
cmap="rainbow")
plt.plot(fit_x_axis, fit_y_axis_17_17_7m, "k", label="{1:d} entries\nlinear fit: f(x) = {0:.3E} * x"
.format(fit_result_17_17_7m, len(array_Npe_17_17_7m)))
plt.xlabel("number of p.e.")
plt.ylabel("visible energy in JUNO detector (in MeV)")
plt.title("Visible energy vs. number of p.e.\nwith linear fit for 17 m < R < 17.7 m")
plt.colorbar()
plt.legend()
plt.grid()
plt.savefig(output_path + "hist2d_Qedep_vs_nPE_positron_fit_17_17_7m.png")
plt.close()
|
#!/usr/bin/python
import os, subprocess, sys
subprocess.call(['python', 'virtualenv.py', 'flask'])
if sys.platform == 'win32':
bin = 'Scripts'
else:
bin = 'bin'
subprocess.call(['python', '-m', 'venv', 'flask'])
#subprocess.call(['easy_install', 'virtualenv'])
subprocess.call([os.path.join('flask', bin, 'easy_install'), 'virtualenv'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'virtualenv'])
subprocess.call([os.path.join('flask', bin, 'virtualenv'), 'flask'])
#subprocess.call(['pip', 'install', 'virtualenv'])
#subprocess.call(['virtualenv', 'flask'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'flask<0.10.1'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'flask-login'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'flask-openid'])
if sys.platform == 'win32':
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--no-deps', 'lamson', 'chardet', 'flask-mail'])
else:
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'flask-mail'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'sqlalchemy==1.0.12'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'flask-sqlalchemy>=2.1'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'psycopg2==2.6.1'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'flask-migrate==1.8.0'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'sqlalchemy-migrate'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'flask-whooshalchemy'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'Flask-UUID'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'flask-wtf'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'flask-babel'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'flup'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'Flask-Passlib'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', 'Flask-HTTPAuth'])
|
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import pandas as pd
from transfer import transfer_suggestion
from pydantic import BaseModel
from google.cloud import storage
import requests
class Item(BaseModel):
team_list: list
budget: float
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Allows all origins
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
@app.get("/")
def index():
return {"greeting": "Hello world! Welcome to our Fantasy Football Predictor"}
def get_best_11(df,week=38):
best_15=df[df.GW==week]
best_11=best_15.head(0).copy()
sub_4=best_15.head(0).copy()
best_11=pd.concat([best_11,best_15[best_15.position=='GK'].sort_values('predicted_points',ascending=False).head(1)])
best_11=pd.concat([best_11,best_15[best_15.position=='DEF'].sort_values('predicted_points',ascending=False).head(3)])
captain=best_15.sort_values('predicted_points',ascending=False).head(1).name.unique()[0]
vice_captain=best_15.sort_values('predicted_points',ascending=False).head(2).name.unique()[1]
for i,row in best_15.sort_values('predicted_points',ascending=False).iterrows():
row=pd.DataFrame([row])
if len(best_11)<11:
if row.name.unique()[0] in best_11.name.unique():
continue
elif row.position.unique()[0] == 'GK':
sub_4=pd.concat([sub_4,row])
continue
else:
best_11=pd.concat([best_11,row])
elif row.name.unique()[0] in best_11.name.unique():
continue
elif len(best_11)==11 and len(sub_4)<4:
sub_4=pd.concat([sub_4,row])
return best_11, sub_4, captain, vice_captain
@app.post("/give_prediction")
def give_prediction(input:Item):
#df is 2 columns (player_name, position) of 15 rows (15 players)
#df is a float value
#load_model_from_gcp()
team_list=input.team_list
assert(len(team_list)==15)
budget=input.budget
assert(type(budget)==float)
#all_predicted_players=main()
#url='https://storage.googleapis.com/wagon-613-fflpred/predictions/predictions'
url='https://storage.googleapis.com/wagon-613-fflpred/predictions/latest_prediction.csv'
all_predicted_players=pd.read_csv(url)
#all_predicted_players.drop(columns='Unnamed: 0',inplace=True)
all_predicted_players.drop_duplicates(subset=['name'],inplace=True)
player_list=pd.DataFrame(all_predicted_players.head(0))
for player in team_list:
player_list=pd.concat([player_list,all_predicted_players[all_predicted_players.name==player]])
best_transfers=transfer_suggestion(player_list[['name','position']],budget,all_predicted_players)
best_11, sub_4, captain, vice_captain = get_best_11(player_list)
dict_best_transfers=best_transfers[['leaving_player','incoming_player','points_difference']].to_dict()
dict_best_11=best_11[['name','position']].to_dict()
dict_sub_4=sub_4[['name','position']].to_dict()
return {'best_11':dict_best_11,
'subs_4':dict_sub_4,
'captain':captain,
'vice_captain':vice_captain,
'best_transfers':dict_best_transfers}
@app.get("/players")
def players():
#url='https://storage.googleapis.com/wagon-613-fflpred/predictions/predictions'
url='https://storage.googleapis.com/wagon-613-fflpred/predictions/latest_prediction.csv'
all_predicted_players=pd.read_csv(url)
#all_predicted_players.drop(columns='Unnamed: 0',inplace=True)
all_predicted_players.drop_duplicates(subset=['name'],inplace=True)
all_player_dict={}
for i,player in all_predicted_players.iterrows():
if player.position in all_player_dict.keys():
all_player_dict[player.position]+=[player['name']]
else:
all_player_dict[player.position]=[player['name']]
all_player_dict
return all_player_dict
|
from left_recursion import *
def create_first_follow_matrix(): # creates and return it
first_follow_matrix = {}
for nonterminal in input_dict["Grammar"]["Nonterminal"]:
first_set = set()
follow_set = set()
verified_nonterminal_list = [] #will contain nonterminals for which we should not find the follow_set because we already started finding it
find_first(first_set,nonterminal)
verified_nonterminal_list.append(nonterminal)
find_follow(follow_set,nonterminal,verified_nonterminal_list)
first_follow_matrix[nonterminal] = {"first": list(first_set),"follow": list(follow_set)}
return first_follow_matrix
def find_first(first_set,nonterminal): # finds all firsts refering to a nonterminal
for production in input_dict["Grammar"]["Productions"]:
for left, right in production.items(): #left and right side of production
if(left == nonterminal):
if(right != ""):
extract_first_from_right(first_set,right[0])
else:
first_set.add("")
def extract_first_from_right(first_set,symbol): #finds a first from first symbol of the right part
if symbol in input_dict["Grammar"]["Terminal"]:
first_set.add(symbol) #symbol should be terminal
elif symbol in input_dict["Grammar"]["Nonterminal"]:
find_first(first_set,symbol) #symbol should be nonterminal
def find_follow(follow_set,nonterminal,verified_nonterminal_list):
if(nonterminal==input_dict["Grammar"]["Start"]): # adds $ to follow for start nonterminal
follow_set.add("$")
for production in input_dict["Grammar"]["Productions"]:
for left,right in production.items():
n = find_index_nonterminal_in_right(nonterminal,right)
if(n != -1): #if true then nonterminal found and n represents its first index
char_to_jump = 0
while(True):
right_after_nonterminal = right[n+len(nonterminal)+char_to_jump:]
# print(left,right,right_after_nonterminal)
# print(follow_set, nonterminal, verified_nonterminal_list)
if(right_after_nonterminal != ""): #testing string after nonterminal finding follow of the nonterminal
first_symbol = find_first_symbol_from_right(right_after_nonterminal)
# print("first symbol",first_symbol)
epsilon_test = extract_first_from_right2(follow_set,first_symbol)
# print("epsilon_test",epsilon_test)
# print(follow_set)
if(epsilon_test):
char_to_jump += len(first_symbol)
else:
break
elif left not in verified_nonterminal_list: #this check is used to exit the posible infinite loop
verified_nonterminal_list.append(left)
find_follow(follow_set,left,verified_nonterminal_list)
# print(follow_set)
break
else:
break
def find_index_nonterminal_in_right(nonterminal,right):
n = right.find(nonterminal)
if(n != -1):
test_nonterminal_list = [elem for elem in input_dict["Grammar"]["Nonterminal"] if elem!=nonterminal]
for test_nonterminal in test_nonterminal_list:
n2 = right.find(test_nonterminal,n)
if n2 == n and len(test_nonterminal) > len(nonterminal):
return -1
return n
def find_first_symbol_from_right(right):
for terminal in input_dict["Grammar"]["Terminal"]:
if(right.startswith(terminal)):
return terminal
len_of_nonterminal = 0
for nonterminal in input_dict["Grammar"]["Nonterminal"]:
if right.startswith(nonterminal) and len(nonterminal)>len_of_nonterminal:
len_of_nonterminal = len(nonterminal)
nonterm = nonterminal
return nonterm
def extract_first_from_right2(follow_set,first_symbol): #finds a follow from first symbol of the right part
epsilon_test = False
if first_symbol in input_dict["Grammar"]["Terminal"]:
follow_set.add(first_symbol) #symbol should be terminal
elif first_symbol in input_dict["Grammar"]["Nonterminal"]:
epsilon_test = find_first_for_follow(follow_set,first_symbol) #symbol should be nonterminal
return epsilon_test
def find_first_for_follow(follow_set,nonterminal): # finds all firsts refering to a nonterminal
epsilon_test = False
for production in input_dict["Grammar"]["Productions"]:
for left, right in production.items(): #left and right side of production
if(left == nonterminal):
if(right != ""):
first_symbol = find_first_symbol_from_right(right)
extract_first_from_right2(follow_set,first_symbol)
else:
epsilon_test = True
return epsilon_test
print("\nThe first follow matrix\n")
pprint(create_first_follow_matrix())
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'employee_objective',
'version': '1.0',
'category': 'Sales',
'sequence': 5,
'summary': 'Track employee-s sale/leads/invoicing individual objectives.',
'description': "",
'website': '',
'depends': [
'sale_margin_extend',
'hr',
'web',
'web_kanban_gauge',
],
'data': [
'security/ir.model.access.csv',
'security/objective_gauges_security.xml',
'views/control_objective_user.xml',
'views/sales_panel.xml',
#'views/employee_objective_panel_views.xml',
],
'demo': [
],
#'css': ['static/src/css/crm.css'],
'installable': True,
'application': True,
'auto_install': False,
#'uninstall_hook': 'uninstall_hook',
}
|
import time
import math
import datetime
import microdotphat
microdotphat.set_clear_on_exit(True)
def set_brightness(value):
microdotphat.set_brightness(value)
def wait(seconds):
time.sleep(seconds)
def reset():
microdotphat.clear()
microdotphat.show()
def print_string_fade(text):
start = time.time()
speed = 3
shown = True
b = 0
while shown:
b = (math.sin((time.time() - start) * speed) + 1) / 2
microdotphat.set_brightness(b)
if b < 0.002 and shown:
microdotphat.clear()
microdotphat.write_string(text, kerning=False)
microdotphat.show()
shown = False
if b > 0.998:
shown = True
time.sleep(0.01)
def print_string(text):
microdotphat.write_string(text, kerning=False)
microdotphat.show()
# Duration in seconds
def show_time(duration = 10):
repeat_in = 0.05
repeat_times = int(duration / repeat_in)
for counter in range(0, repeat_times):
microdotphat.clear()
t = datetime.datetime.now()
if t.second % 2 == 0:
microdotphat.set_decimal(3, 1)
else:
microdotphat.set_decimal(3, 0)
microdotphat.write_string(t.strftime(' %H%M'), kerning=False)
microdotphat.show()
time.sleep(repeat_in)
def display_strings_array(lines):
delay = 0.03
line_height = microdotphat.HEIGHT + 2
lengths = [0] * len(lines)
offset_left = 0
for line, text in enumerate(lines):
lengths[line] = microdotphat.write_string(text, offset_x=offset_left, offset_y=line_height * line, kerning=False)
offset_left += lengths[line]
microdotphat.set_pixel(0, (len(lines) * line_height) - 1, 0)
current_line = 0
microdotphat.show()
pos_x = 0
pos_y = 0
for current_line in range(len(lines)):
time.sleep(delay * 10)
for y in range(lengths[current_line]):
microdotphat.scroll(1, 0)
pos_x += 1
time.sleep(delay)
microdotphat.show()
if current_line == len(lines) - 1:
microdotphat.scroll_to(0, 0)
else:
for x in range(line_height):
microdotphat.scroll(0, 1)
pos_y += 1
microdotphat.show()
time.sleep(delay)
|
C=str(input("C= "))
N1=int(input("N1= "))
N2=int(input("N2= "))
CCC={
'N':[(1=='W'),(2=='N'),(-1=='E')],
'E':[(1=='N'),(2=='E'),(-1=='S')],
'S':[(1=='E'),(2=='S'),(-1=='W')],
'W':[(1=='S'),(2=='W'),(-1=='N')]
}
if(C=='N')and(C=='E')and(C=='S')and(C=='W'):
print(CCC.get(C)) |
import logging
import time
import numpy as np
from numpy.random import RandomState
import lasagne
from lasagne.updates import adam
from lasagne.objectives import categorical_crossentropy
from lasagne.nonlinearities import elu,softmax,identity
from hyperoptim.parse import cartesian_dict_of_lists_product,\
product_of_list_of_lists_of_dicts
from hyperoptim.util import save_npy_artifact, save_pkl_artifact
from braindecode.datasets.combined import CombinedCleanedSet
from braindecode.mywyrm.processing import resample_cnt, bandpass_cnt, exponential_standardize_cnt
from braindecode.datasets.cnt_signal_matrix import CntSignalMatrix
from braindecode.datasets.signal_processor import SignalProcessor
from braindecode.datasets.loaders import MultipleBCICompetition4Set2B
from braindecode.models.deep5 import Deep5Net
from braindecode.datahandling.splitters import SeveralSetsSplitter,\
concatenate_sets, CntTrialSingleFoldSplitter
from braindecode.datahandling.batch_iteration import CntWindowTrialIterator
from braindecode.veganlasagne.layers import get_n_sample_preds
from braindecode.veganlasagne.monitors import CntTrialMisclassMonitor, LossMonitor, RuntimeMonitor,\
KappaMonitor
from braindecode.experiments.experiment import Experiment
from braindecode.veganlasagne.stopping import MaxEpochs, NoDecrease, Or
from braindecode.veganlasagne.update_modifiers import MaxNormConstraintWithDefaults
from braindecode.veganlasagne.objectives import tied_neighbours_cnt_model,\
sum_of_losses
from braindecode.util import FuncAndArgs
from braindecode.mywyrm.clean import NoCleaner, BCICompetitionIV2ABArtefactMaskCleaner
from braindecode.veganlasagne.clip import ClipLayer
log = logging.getLogger(__name__)
def get_templates():
return {'categorical_crossentropy': lambda : categorical_crossentropy,
'tied_loss': lambda : FuncAndArgs(sum_of_losses,
loss_expressions=[categorical_crossentropy,
tied_neighbours_cnt_model ,
]
)
}
def get_grid_param_list():
dictlistprod = cartesian_dict_of_lists_product
default_params = [{
'save_folder': './data/models/sacred/paper/bcic-iv-2b/cv-proper-sets/',
'only_return_exp': False,
'n_chans': 3,
}]
subject_folder_params = dictlistprod({
'subject_id': range(1,10),
'data_folder': ['/home/schirrmr/data/bci-competition-iv/2b/'],
})
exp_params = dictlistprod({
'run_after_early_stop': [True,],})
stop_params = dictlistprod({
'stop_chan': ['misclass']})#,
loss_params = dictlistprod({
'loss_expression': ['$tied_loss']})#'misclass',
preproc_params = dictlistprod({
'filt_order': [3,],#10
'low_cut_hz': [4],
'sets_like_fbcsp_paper': [False, True]})
grid_params = product_of_list_of_lists_of_dicts([
default_params,
exp_params,
subject_folder_params,
stop_params,
preproc_params,
loss_params,
])
return grid_params
def sample_config_params(rng, params):
return params
def run(ex, data_folder, subject_id, n_chans,
stop_chan, filt_order, low_cut_hz, loss_expression,
only_return_exp, run_after_early_stop, sets_like_fbcsp_paper):
start_time = time.time()
assert (only_return_exp is False) or (n_chans is not None)
ex.info['finished'] = False
# trial ivan in milliseconds
# these are the samples that will be predicted, so for a
# network with 2000ms receptive field
# 1500 means the first receptive field goes from -500 to 1500
train_segment_ival = [1500,4000]
test_segment_ival = [1500,4000]
add_additional_set = True
session_ids = [1,2,]
if sets_like_fbcsp_paper:
if subject_id in [4,5,6,7,8,9]:
session_ids = [3] # dummy
add_additional_set = False
elif subject_id == 1:
session_ids = [1,]
else:
assert subject_id in [2,3]
session_ids = [1,2]
train_loader = MultipleBCICompetition4Set2B(subject_id,
session_ids=session_ids, data_folder=data_folder)
test_loader = MultipleBCICompetition4Set2B(subject_id,
session_ids=[3], data_folder=data_folder)
# Preprocessing pipeline in [(function, {args:values)] logic
cnt_preprocessors = [
(resample_cnt , {'newfs': 250.0}),
(bandpass_cnt, {
'low_cut_hz': low_cut_hz,
'high_cut_hz': 38,
'filt_order': filt_order,
}),
(exponential_standardize_cnt, {})
]
marker_def = {'1- Left Hand': [1], '2 - Right Hand': [2]}
train_signal_proc = SignalProcessor(set_loader=train_loader,
segment_ival=train_segment_ival,
cnt_preprocessors=cnt_preprocessors,
marker_def=marker_def)
train_set = CntSignalMatrix(signal_processor=train_signal_proc, sensor_names='all')
test_signal_proc = SignalProcessor(set_loader=test_loader,
segment_ival=test_segment_ival,
cnt_preprocessors=cnt_preprocessors,
marker_def=marker_def)
test_set = CntSignalMatrix(signal_processor=test_signal_proc, sensor_names='all')
train_cleaner = BCICompetitionIV2ABArtefactMaskCleaner(marker_def=marker_def)
test_cleaner = BCICompetitionIV2ABArtefactMaskCleaner(marker_def=marker_def)
combined_set = CombinedCleanedSet(train_set, test_set,train_cleaner, test_cleaner)
if not only_return_exp:
combined_set.load()
# only need train set actually, split is done later per fold
combined_set = combined_set.test_set
if add_additional_set:
combined_set.additional_set = train_set
in_chans = train_set.get_topological_view().shape[1]
input_time_length = 1000 # implies how many crops are processed in parallel, does _not_ determine receptive field size
# receptive field size is determined by model architecture
num_filters_time = 25
filter_time_length = 10
num_filters_spat = 25
pool_time_length = 3
pool_time_stride = 3
num_filters_2 = 50
filter_length_2 = 10
num_filters_3 = 100
filter_length_3 = 10
num_filters_4 = 200
filter_length_4 = 10
final_dense_length = 2
n_classes = 2
final_nonlin=softmax
first_nonlin=elu
first_pool_mode='max'
first_pool_nonlin=identity
later_nonlin=elu
later_pool_mode='max'
later_pool_nonlin=identity
drop_in_prob=0.0
drop_prob=0.5
batch_norm_alpha=0.1
double_time_convs=False
split_first_layer=True
batch_norm=True
def run_exp(i_fold):
# ensure reproducibility by resetting lasagne/theano random generator
lasagne.random.set_rng(RandomState(34734))
d5net = Deep5Net(in_chans=in_chans, input_time_length=input_time_length, num_filters_time=num_filters_time,
filter_time_length=filter_time_length,
num_filters_spat=num_filters_spat, pool_time_length=pool_time_length, pool_time_stride=pool_time_stride,
num_filters_2=num_filters_2, filter_length_2=filter_length_2,
num_filters_3=num_filters_3, filter_length_3=filter_length_3,
num_filters_4=num_filters_4, filter_length_4=filter_length_4,
final_dense_length=final_dense_length, n_classes=n_classes,
final_nonlin=final_nonlin, first_nonlin=first_nonlin,
first_pool_mode=first_pool_mode, first_pool_nonlin=first_pool_nonlin,
later_nonlin=later_nonlin, later_pool_mode=later_pool_mode, later_pool_nonlin=later_pool_nonlin,
drop_in_prob=drop_in_prob, drop_prob=drop_prob, batch_norm_alpha=batch_norm_alpha,
double_time_convs=double_time_convs, split_first_layer=split_first_layer, batch_norm=batch_norm)
final_layer = d5net.get_layers()[-1]
final_layer = ClipLayer(final_layer, 1e-4, 1-1e-4)
dataset_splitter = CntTrialSingleFoldSplitter(n_folds=10, i_test_fold=i_fold,
shuffle=True)
iterator = CntWindowTrialIterator(batch_size=45,input_time_length=input_time_length,
n_sample_preds=get_n_sample_preds(final_layer))
monitors = [LossMonitor(), CntTrialMisclassMonitor(input_time_length=input_time_length),
KappaMonitor(input_time_length=iterator.input_time_length,
mode='max'), RuntimeMonitor()]
#n_no_decrease_max_epochs = 2
#n_max_epochs = 4
n_no_decrease_max_epochs = 80
n_max_epochs = 800
# real values for paper were 80 and 800
remember_best_chan = 'valid_' + stop_chan
stop_criterion = Or([NoDecrease(remember_best_chan, num_epochs=n_no_decrease_max_epochs),
MaxEpochs(num_epochs=n_max_epochs)])
dataset = combined_set
splitter = dataset_splitter
updates_expression = adam
updates_modifier = MaxNormConstraintWithDefaults({})
preproc = None
exp = Experiment(final_layer, dataset,splitter,preproc,iterator,
loss_expression,updates_expression, updates_modifier, monitors,
stop_criterion, remember_best_chan, run_after_early_stop,
batch_modifier=None)
if only_return_exp:
return exp
exp.setup()
exp.run()
return exp
all_monitor_chans = []
n_folds = 10
for i_fold in range(n_folds):
log.info("Running fold {:d} of {:d}".format(i_fold+1, n_folds))
exp = run_exp(i_fold)
if only_return_exp:
return exp
all_monitor_chans.append(exp.monitor_chans)
end_time = time.time()
run_time = end_time - start_time
ex.info['finished'] = True
keys = all_monitor_chans[0].keys()
for key in keys:
ex.info[key] = np.mean([mchans[key][-1] for mchans in
all_monitor_chans])
ex.info['runtime'] = run_time
save_pkl_artifact(ex, all_monitor_chans, 'all_monitor_chans.pkl')
|
__author__ = "Narwhale"
|
###################################################
# #
# General utility file for scoring methods, pca, #
# etc. #
# #
# Authors: Amy Peerlinck and Neil Walton #
# #
###################################################
import numpy as np
from sklearn.decomposition import PCA
from math import ceil
from sklearn.metrics import silhouette_score as sc, f1_score as fscore
class Pca:
'''
The PCA class. Can fit be fit to data and transform
a specified data point to the number of specified
principal components
'''
def __init__(self, data, n=1):
self.data = data
self.n = n
self.pca = self._get_pca()
def _get_pca(self):
'''
Return the pca model for the specified n. If n<1 then
the number of components should be the size of the
cluster times n
'''
if self.n<=0:
raise ValueError('Number of components cannot be negative.')
elif self.n<1:
n = ceil(len(self.data.T)*self.n)
else:
n = self.n
n = min(n, len(self.data.T)) #Can't have fewer components than features
return PCA(n_components=n).fit(self.data)
def get_components(self):
'''
Return the top n principal components of the data.
If n<1, Reduce the dimension of each cluster by size of
cluster times n (rounded up to nearest integer)
'''
return self.pca.transform(self.data)
def silhouette(data, clusters):
score = sc(data, clusters)
return score
def f_score(true, pred):
score = fscore(true, pred, average='weighted')
return score
|
prod_list_url = 'lighting/lamps/desk-lamps'
header_search_results = 'Search Results for'
prod_links_css = "#sbprodgrid a.productbox"
prod_title_css = '[data-qa="text-pdp-product-title"]'
prod_option_css = 'a.js-visual-option'
add_to_cart_button_css = 'input[data-data-qa="button-add-to-cart"]'
def assert_at_prod_list(sb):
sb.assert_text(header_search_results)
def get_all_products(sb):
return sb.find_elements(prod_links_css)
def pick_product(sb):
sb.click_if_visible(prod_option_css)
sb.click(add_to_cart_button_css)
def nav_to_a_product(sb):
home_url = sb.get_current_url()
sb.open(home_url + prod_list_url)
sb.click(prod_links_css)
sb.assert_element(prod_title_css)
|
import math
import numpy
# Horizontal: Item
item = ['Phim hร nh ฤแปng', 'Phim Hร n Quแปc', 'Phim khoa hแปc', 'Phim tรฌnh cแบฃm', 'Phim Nhแบญt Bแบฃn']
# Vertical: User
user = ['Tรบ', 'Thแบฏng', 'Trรขm', 'ร', 'Trinh']
rating_matrix = [
[0,4,1,4,1],
[1,2,5,2,5],
[4,5,1,3,4],
[0,1,5,1,4],
[4,3,1,3,1],
]
# Item similarity matrix
similarity = numpy.zeros((len(user),len(user)))
# Calculate the similarity
for i in range(len(item)):
for j in range(len(item)):
if i == j:
similarity[i][j] = 1
else:
sum1 = 0
sum2 = 0
sum3 = 0
# Calculate average without 0 elements
avg_user_i = sum(rating_matrix[i])/(len(rating_matrix[i])-sum(map(lambda x : x == 0, rating_matrix[i])))
avg_user_j = sum(rating_matrix[j])/(len(rating_matrix[j])-sum(map(lambda x : x == 0, rating_matrix[j])))
# Calculate similarity using formula
for k in range(len(rating_matrix[0])):
if rating_matrix[i][k] != 0 and rating_matrix[j][k] != 0:
sum1 += (rating_matrix[i][k]-avg_user_i)*(rating_matrix[j][k]-avg_user_j)
sum2 += pow((rating_matrix[i][k]-avg_user_i), 2)
sum3 += pow((rating_matrix[j][k]-avg_user_j), 2)
similarity[i][j] = sum1/(math.sqrt(sum2)*math.sqrt(sum3))
# Making prediction on every empty review
for item_index in range(len(rating_matrix)):
for user_index in range(len(rating_matrix[0])):
if rating_matrix[item_index][user_index] == 0:
print("Guessing rating of item " + item[item_index] + " for user " + user[user_index])
k_neighbor = []
for neighbor_index in range(len(similarity[item_index])):
# Similarity rate
if similarity[item_index][neighbor_index] > 0.5 and item_index != neighbor_index:
print("Neighbor: " + item[neighbor_index])
k_neighbor.append(neighbor_index)
# Make prediction
avg_user_i = sum(rating_matrix[item_index])/(len(rating_matrix[item_index])-sum(map(lambda x : x == 0, rating_matrix[item_index])))
sum1 = 0
sum2 = 0
for k_i in range(len(k_neighbor)):
avg_user_l = sum(rating_matrix[k_neighbor[k_i]])/(len(rating_matrix[k_neighbor[k_i]])-sum(map(lambda x : x == 0, rating_matrix[k_neighbor[k_i]])))
sum1 += similarity[item_index][k_neighbor[k_i]]*(rating_matrix[k_neighbor[k_i]][user_index]-avg_user_l)
sum2 += similarity[item_index][k_neighbor[k_i]]
if(sum2 == 0):
print("Cannot make prediction because this item has no neighbor")
else:
print("Prediction rating: " + str(avg_user_i + (sum1/sum2)))
print("-----------------------------------------------------------------")
|
'''
import shutil
import requests
from slackbot.bot import default_reply
import os
from keras.preprocessing import image
from sklearn.model_selection import train_test_split
import keras
import numpy as np
import tensorflow as tf
import random as rn
import os
from keras import backend as K
import numpy as np
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense
from keras.optimizers import RMSprop
import pandas as pd
from keras.preprocessing.image import load_img, img_to_array
from keras.preprocessing.image import ImageDataGenerator
flag = 0
egg_flag = 0
@default_reply
def make_me_showwin(message):
global flag
global egg_flag
global model
global egg_model
if 'files' not in message.body: # ใใกใคใซใใฉใใๅคๆญ
return message.reply(DEFAULT_REPLY)
url = message.body['files'][0]['url_private'] # ใใใง็ปๅใฎURLใๅๅพใงใใ
resp = requests.get(url,
headers={'Authorization': 'Bearer ' + os.environ["SLACK_API_TOKEN"]},
stream=True)
f = open('ramen.jpg', "wb")
shutil.copyfileobj(resp.raw, f)
f.close()
input_shape = (224, 224, 3) #ใขใใซใฎๅ
ฅๅใตใคใบใซใใฃใฆๅคๆด
ramen_img = image.img_to_array(image.load_img("C:/Users/internship/git/team-d/slackbot/ramen.jpg", target_size=input_shape[:2]))
test_datagen = ImageDataGenerator(rescale=1./255, samplewise_std_normalization=True)
ramen_img = ramen_img.reshape(1, 224, 224, 3)
ramen_img = test_datagen.flow(ramen_img)
ramen_img = ramen_img[0]
#ramen_img/=255
#img = load_img(file)
#x = img_to_array(ramen_img)
#x = np.expand_dims(x, axis=0)
# ImageDataGeneratorใฎ็ๆ
datagen = ImageDataGenerator(
samplewise_std_normalization=True
)
# 9ๅใฎ็ปๅใ็ๆใใพใ
g = datagen.flow(ramen_img, batch_size=1, save_to_dir='', save_prefix='ramen', save_format='jpg')
if flag == 0:
with open('C:/Users/internship/git/team-d/model/ramen_model(0.72)/ramen_adam_epoch100_20_noma.json', 'r') as f:
model = model_from_json(f.read())
model.load_weights('C:/Users/internship/git/team-d/model/ramen_model(0.72)/ramen_adam_epoch100_20_noma0.72.h5')
flag = 1
if egg_flag == 0:
with open('C:/Users/internship/git/team-d/model/egg_model/egg.json', 'r') as f:
egg_model = model_from_json(f.read())
egg_model.load_weights('C:/Users/internship/git/team-d/model/egg_model/egg_weight.hdf5')
egg_flag = 1
pred = model.predict(ramen_img, batch_size=1)
print(pred)
ramen_type = np.asarray(["ๅณๅใฉใผใกใณ","้คๆฒนใฉใผใกใณ","ๅกฉใฉใผใกใณ"])
result = pred[0].argmax()
print(ramen_type[result])
shape = 150
input_shape = (shape, shape, 3) #ใขใใซใฎๅ
ฅๅใตใคใบใซใใฃใฆๅคๆด
ramen_img = image.img_to_array(image.load_img("C:/Users/internship/git/team-d/slackbot/ramen.jpg", target_size=input_shape[:2]))
ramen_img = ramen_img.reshape(1, shape, shape, 3)
ramen_img/=255
egg_pred = egg_model.predict(ramen_img, batch_size=1)
print(egg_pred)
if egg_pred[0][0] < 0.5:
egg_str = 'ๅตๅ
ฅใใฎ'
else:
egg_str = 'ๅตใๅ
ฅใฃใฆใชใ'
message.reply('ใใฎใฉใผใกใณใฏ' + egg_str + ramen_type[result] +'ใงใใญ')
message.reply('ใใฎใฉใผใกใณใฏ' + ramen_type[result] +'ใงใใญ')
''' |
def next_element(arr):
stack = []
ans = {}
stack.append(arr[0])
for i in range(1, len(arr)):
next = arr[i]
while len(stack) != 0 and arr[i] > stack[-1]:
ans[stack[-1]] = i+1
stack.pop()
stack.append(next)
for i in stack:
ans.append([i, -1])
return ans
arr = [4, 5, 2, 25]
print(next_element(arr))
|
#!/usr/bin/python
import urllib,urllib2,sys,csv
def readPage(pageId):
url='http://www.osha.gov/pls/imis/establishment.inspection_detail'
params=urllib.urlencode({'id':pageId})
req=urllib2.Request(url,params)
response=urllib2.urlopen(req)
con=response.readlines()
return con
def extractValue(page):
nr=0
reportID=0
openDate=0
violations=0
penalty=0
vmark=0
pmark=0
for pline in page:
# print pline
if '>Nr: ' in pline:
# print pline
nr=pline.split('Nr: ')[1].split('<')[0]
reportID=pline.split('Report ID:')[1].split('<')[0]
openDate=pline.split('Open Date: ')[1].split('<')[0]
if 'Current Violations' in pline:
vmark=1
if vmark==1 and '</TR>' in pline:
vmark=0
varTemp=pline.split('align="right">')[1].split('<BR')[0]
if(varTemp!=''):
violations=varTemp
else:
violations=0
if 'Current Penalty' in pline:
pmark=1
if pmark==1 and '</TR>' in pline:
pmark=0
varTemp=pline.split('align="right">')[1].split('<BR')[0]
if(varTemp!=''):
penalty=varTemp
else:
penalty=0
return [nr,reportID,openDate,violations,penalty]
### MAIN START HERE ###
if(len(sys.argv)!=3):
sys.stdout.write(sys.argv[0]+' <id file> <output.csv>\n')
exit(1)
f=open(sys.argv[1],'r')
fout=open(sys.argv[2],'w')
csvW=csv.writer(fout)
csvW.writerow(['Nr','Report ID','Open Date','Total Current Violations','Total Current Penalty'])
ids=f.readlines()
cnt=0
for oid in ids:
oid=oid.strip('\n')
if (cnt%10==0):
print str(cnt)+' id= '+oid
cnt=cnt+1
try:
page=readPage(oid)
except:
page='Timeout'
if(page!='Timeout'):
val=extractValue(page)
else:
val=[oid,0,0,0,0]
csvW.writerow(val)
f.close()
fout.close()
|
#!/usr/bin/env python
__author__ = 'Ajit Apte'
from django.utils import simplejson
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class EventsHandler(webapp.RequestHandler):
@classmethod
def _convert_events(cls, events):
"""Converts Yahoo Upcoming events to Furlango events."""
furlango_events = []
for event in events:
furlango_event = {}
furlango_event['id'] = event['id']
furlango_event['name'] = event['name']
furlango_event['url'] = event['url']
furlango_event['venue_name'] = event['venue_name']
furlango_event['venue_address'] = event['venue_address']
furlango_event['venue_city'] = event['venue_city']
furlango_event['venue_state_code'] = event['venue_state_code']
furlango_event['venue_zip'] = event['venue_zip']
furlango_event['start_date'] = event['start_date']
furlango_event['start_time'] = event['start_time']
furlango_event['end_date'] = event['end_date']
furlango_event['end_time'] = event['end_time']
furlango_event['ticket_url'] = event['ticket_url']
furlango_event['description'] = EventsHandler._escape(event['description'])
if event['ticket_free'] == 1:
furlango_event['ticket_price'] = 'free'
else:
if event['ticket_price'] == '':
furlango_event['ticket_price'] = 'Not Available'
else:
furlango_event['ticket_price'] = event['ticket_price']
furlango_events.append(furlango_event)
return furlango_events
@classmethod
def _escape(cls, text):
"""Escapes all HTML characters for security."""
html_escape_table = {
'&': '&',
'"': '"',
"'": ''',
'>': '>',
'<': '<',
}
return "".join(html_escape_table.get(c, c) for c in text)
def get(self):
self.response.headers['Content-Type'] = 'text/html'
# required parameters
lat = self.request.get('lat')
lng = self.request.get('lng')
# optional parameters
query = self.request.get('query')
categories = self.request.get('categories')
min_date = self.request.get('min_date')
max_date = self.request.get('max_date')
url = ('http://upcoming.yahooapis.com/services/rest/?api_key=ea79f3c7b2' +
'&method=event.search' +
'&per_page=100' +
'&format=json' +
'&sort=popular-score-desc' +
'&location=' + lat + ',' + lng)
if query == '':
url += ('&category_id=' + categories +
'&min_date=' + min_date +
'&max_date=' + max_date)
else:
url += '&search_text=' + query
result = urlfetch.fetch(url)
if result.status_code == 200:
text = {'status': {'code': 0, 'message': 'OK'}}
events = simplejson.loads(result.content)['rsp']['event']
text['events'] = EventsHandler._convert_events(events)
self.response.out.write(simplejson.dumps(text))
else:
self.response.out.write(simplejson.dumps(
{'status': {'code': 1, 'message': 'Could not fetch data.'}}))
# Add more REST API handlers here
application = webapp.WSGIApplication([('/rest/v1/events', EventsHandler)], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
__author__ = 'apple'
try:
from osgeo import ogr
print 'Import of ogr from osgeo worked. Hurray!\n'
except:
print 'Import of ogr from osgeo failed\n\n' |
#!/usr/bin/env python2
##################################################
# GNU Radio Python Flow Graph
# Title: Default Test
# Generated: Thu Dec 10 22:45:58 2015
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
class default_test(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "Default Test")
##################################################
# Variables
##################################################
self.uplink_offset = uplink_offset = 0
self.uplink_freq = uplink_freq = 400e6
self.samp_rate = samp_rate = 250e3
self.downlink_offset = downlink_offset = 0
self.downlink_freq = downlink_freq = 400e6
##################################################
# Blocks
##################################################
self.uplink_filter = filter.freq_xlating_fir_filter_ccc(1, (1, ), uplink_freq, samp_rate)
self.downlink_filter = filter.freq_xlating_fir_filter_ccc(1, (1, ), downlink_freq, samp_rate)
self.blocks_throttle_0_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_null_sink_0 = blocks.null_sink(gr.sizeof_gr_complex*1)
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_gr_complex*1, "/tmp/test.dat", False)
self.blocks_file_sink_0.set_unbuffered(False)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, 1000, 1, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_sig_source_x_0, 0), (self.downlink_filter, 0))
self.connect((self.analog_sig_source_x_0, 0), (self.uplink_filter, 0))
self.connect((self.blocks_throttle_0, 0), (self.blocks_file_sink_0, 0))
self.connect((self.blocks_throttle_0_0, 0), (self.blocks_null_sink_0, 0))
self.connect((self.downlink_filter, 0), (self.blocks_throttle_0, 0))
self.connect((self.uplink_filter, 0), (self.blocks_throttle_0_0, 0))
def get_uplink_offset(self):
return self.uplink_offset
def set_uplink_offset(self, uplink_offset):
self.uplink_offset = uplink_offset
def get_uplink_freq(self):
return self.uplink_freq
def set_uplink_freq(self, uplink_freq):
self.uplink_freq = uplink_freq
self.uplink_filter.set_center_freq(self.uplink_freq)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.blocks_throttle_0_0.set_sample_rate(self.samp_rate)
def get_downlink_offset(self):
return self.downlink_offset
def set_downlink_offset(self, downlink_offset):
self.downlink_offset = downlink_offset
def get_downlink_freq(self):
return self.downlink_freq
def set_downlink_freq(self, downlink_freq):
self.downlink_freq = downlink_freq
self.downlink_filter.set_center_freq(self.downlink_freq)
def main(top_block_cls=default_test, options=None):
tb = top_block_cls()
tb.start()
tb.wait()
if __name__ == '__main__':
main()
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
#๋ฐ์ดํฐ ์ฝ์ด๋ค์ด๊ธฐ
wine = pd.read_csv("./data/winequality-white.csv", sep=';', encoding='utf-8')
#๋ฐ์ดํฐ๋ฅผ ๋ ์ด๋ธ๊ณผ ๋ฐ์ดํฐ๋ก ๋ถ๋ฆฌํ๊ธฐ
y = wine["quality"]
# x = wine.iloc[:, 0:4]
x = wine.drop(["quality"], axis=1)
# print(x)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state=66)
#ํ์ตํ๊ธฐ
model = RandomForestClassifier(n_estimators=1000, class_weight='balanced_subsample')
model.fit(x_train, y_train)
aaa = model.score(x_test, y_test)
#ํ๊ฐํ๊ธฐ
y_pred = model.predict(x_test)
print(classification_report(y_test, y_pred))
print("์ ๋ต๋ฅ =", accuracy_score(y_test, y_pred))
print(aaa) |
#I pledge my Honor that I have abided by the Stevens Honor System.
#I understand that I may access the course textbook and course lecture notes but I am not to access any other resource.
#I also pledge that I worked alone on this exam.
#Meher Kohlli
def Mathematicalfunction():
print("\nMATHEMATICAL FUNCTIONS")
print("For addition, please enter 1")
print("For subtraction, please enter 2")
print("For multiplication, please enter 3")
print("For division, please enter 4")
entry = float(input("Enter a number: "))
num1 = float(input("\nEnter your first number: "))
num2 = float(input("\nEnter your second number: "))
if entry == 1:
print(num1 + num2)
elif entry == 2:
print (num1 - num2)
elif entry == 3:
print(num1 * num2)
elif entry == 4:
print(num1 / num2)
else:
print("Invalid selection")
def Stringfunctions():
print("\nSTRING FUNCTIONS")
print("To determine the number of vowels in a string, please enter 1")
print("To encrypt a string, please enter 2")
selection = float(input("Enter a number: "))
if selection < 1 or selection > 2:
print("Invalid selection")
else:
string = input("Enter a string of words: ")
if selection == 1:
vowels = 0
for i in string:
if (i == 'a' or i == 'e' or i == 'o' or i == 'u' or i == 'y' or i == 'i' or i == 'A' or i == 'E' or i == 'I' or i == 'O' or i == 'U' or i == 'Y'):
vowels = vowels + 1
print("The number of vowels in", string, "are: ", vowels)
elif selection == 2:
for i in string:
x = ord(i)
new = (x + 10) * 3
print("the encrypted message is: ", new)
else:
print("Invalid selection")
def main():
def mainmenu():
print("\nMAIN MENU")
print("For mathematical functions, please enter 1")
print("For string functions, please enter 2")
mainmenu()
choice = float(input("Enter a choice: "))
if choice == 1:
Mathematicalfunction()
elif choice == 2:
Stringfunctions()
else:
print("Invalid selection")
main()
|
from flask import Flask, flash, render_template, request
import pickle
import numpy as np
app = Flask(__name__)
app.secret_key = b'dasdasda\n\xec]/'
clf_model = pickle.load(open('beta_model_3.pkl', 'rb'))
@app.route("/")
def home():
a = [['0', '0', '0', '0', '0', '3000', '0', '66', '360', '0', '0']]
pred = clf_model.predict(a)
reasons = []
if pred == 'N':
reasons = getFactors(a)
return render_template("index.html")
if __name__ == "__main__":
app.run()
@app.route('/submit', methods=['GET', 'POST'])
def run():
a = []
a.append(request.form['gender'])
a.append(request.form['spouse'])
a.append(request.form['dependent'])
a.append(request.form['education'])
a.append(request.form['employed'])
for i in range(4):
a.append(request.form["attribute" + str(i)])
a.append(request.form['credit'])
a.append(request.form['property'])
checkEmpty([a])
reasons = []
pred = clf_model.predict([a])
if pred == 'N':
reasons = getFactors([a])
return render_template("index.html", approved=getApproved(pred), reasons=reasons, len=len(reasons))
def checkEmpty(a):
a = a[0]
if any(x == '' for x in a):
flash('Looks like you have not fully filled out the form!')
return False
return True
def getApproved(pred):
if (pred == 'Y'): return "Approved!"
return "Not Approved."
def getFactors(a):
a = a[0]
factors = []
if a[9] == '0': # Bad Credit History
factors.append("Your Credit History is insufficient. Try getting more credit experience before applying again.")
if a[10] == '0': # Rural Customer
factors.append("Rural Customers have a more difficult time getting a loan based on property appreciation. Try improving other factors before applying again.")
if int(a[7]) > 180: # High Loan
factors.append("The borrowing amount you asked for is a little high. Please try a lower amount.")
if a[3] == 1: # No Education
factors.append("Your education status may impact your ability to get a loan. Lenders prefer those with a diploma or degree.")
if int(a[5]) < 4200: # Low Income
factors.append("The income you have provided may be too low for lenders.")
return factors
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 26 00:46:22 2018
@author: Angela
"""
def smallest_factor(n):
"""Return the smallest prime factor of the positive integer n."""
if n==1: return 1
for i in range(2, int(n**.5)):
if n % i == 0: return i
return n
#Test for zero and negative values
def test_smallest_factor():
assert smallest_factor(0)==None, "error"
assert smallest_factor(-1)==None, "error" |
# Write a class Book โeach book has a title (string) and one or more authors.
# Write a class to represent an author โeach author has a name (string) and an email address (string
class Book:
def __init__(self, title):
self._title = title
self._authors = []
def add_author(self, a):
self._authors.append(a)
# def _author_list(self, l):
# result = ""
# for a in l:
# result = result + a._name + " " + a._email + ", "
# return result
def print_book(self):
print(self._title)
for a in self._authors:
print(a)
def __str__(self):
return self._title + ' - ' + ' - '.join(map(str, self._authors))
# return self._title + self._author_list(self._authors)
class Author:
def __init__(self, name, email):
self._name = name
self._email = email
def __str__(self):
return self._name + " " + self._email
a1 = Author("Roald Dahl", "roald@books.com")
a2 = Author("Richard Herlihy", "richardherlihy@gmail.com")
print(a1)
my_book = Book("Fantastic Mr. Fox")
my_book.add_author(a1)
my_book.add_author(a2)
my_book.print_book()
print(my_book) |
import requests
requests.post('http://httpbin.org/post')
requests.put('http://httpbin.org/put')
requests.delete('http://httpbin.org/delete')
requests.head('http://httpbin.org/get')
requests.options('http://httpbin.org/get') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 26 13:46:06 2018
@author: thomas
"""
import os
dir_path = os.path.dirname(os.path.realpath(__file__)) + '/'
cwd = os.getcwd()
St = os.path.basename(cwd)
Re = os.path.split(os.path.dirname(cwd))[-1]
OpenDatabase("localhost:"+dir_path+"VTK/AVG.vtk")
DefineScalarExpression("AverageUx", "AverageVelocity[0]")
DefineScalarExpression("AverageUy", "AverageVelocity[1]")
#Begin Average Omega and Velocity Operations
AddPlot("Pseudocolor", "AverageOmega")
PseudocolorAtts = PseudocolorAttributes()
PseudocolorAtts.scaling = PseudocolorAtts.Linear # Linear, Log, Skew
PseudocolorAtts.skewFactor = 1
PseudocolorAtts.limitsMode = PseudocolorAtts.OriginalData # OriginalData, CurrentPlot
PseudocolorAtts.minFlag = 1
PseudocolorAtts.min = -10
PseudocolorAtts.maxFlag = 1
PseudocolorAtts.max = 10
PseudocolorAtts.centering = PseudocolorAtts.Nodal # Natural, Nodal, Zonal
PseudocolorAtts.colorTableName = "difference"
PseudocolorAtts.invertColorTable = 0
PseudocolorAtts.opacityType = PseudocolorAtts.FullyOpaque # ColorTable, FullyOpaque, Constant, Ramp, VariableRange
PseudocolorAtts.opacityVariable = ""
PseudocolorAtts.opacity = 1
PseudocolorAtts.opacityVarMin = 0
PseudocolorAtts.opacityVarMax = 1
PseudocolorAtts.opacityVarMinFlag = 0
PseudocolorAtts.opacityVarMaxFlag = 0
PseudocolorAtts.pointSize = 0.05
PseudocolorAtts.pointType = PseudocolorAtts.Point # Box, Axis, Icosahedron, Octahedron, Tetrahedron, SphereGeometry, Point, Sphere
PseudocolorAtts.pointSizeVarEnabled = 0
PseudocolorAtts.pointSizeVar = "default"
PseudocolorAtts.pointSizePixels = 2
PseudocolorAtts.lineStyle = PseudocolorAtts.SOLID # SOLID, DASH, DOT, DOTDASH
PseudocolorAtts.lineType = PseudocolorAtts.Line # Line, Tube, Ribbon
PseudocolorAtts.lineWidth = 0
PseudocolorAtts.tubeResolution = 10
PseudocolorAtts.tubeRadiusSizeType = PseudocolorAtts.FractionOfBBox # Absolute, FractionOfBBox
PseudocolorAtts.tubeRadiusAbsolute = 0.125
PseudocolorAtts.tubeRadiusBBox = 0.005
PseudocolorAtts.tubeRadiusVarEnabled = 0
PseudocolorAtts.tubeRadiusVar = ""
PseudocolorAtts.tubeRadiusVarRatio = 10
PseudocolorAtts.tailStyle = PseudocolorAtts.None # None, Spheres, Cones
PseudocolorAtts.headStyle = PseudocolorAtts.None # None, Spheres, Cones
PseudocolorAtts.endPointRadiusSizeType = PseudocolorAtts.FractionOfBBox # Absolute, FractionOfBBox
PseudocolorAtts.endPointRadiusAbsolute = 0.125
PseudocolorAtts.endPointRadiusBBox = 0.05
PseudocolorAtts.endPointResolution = 10
PseudocolorAtts.endPointRatio = 5
PseudocolorAtts.endPointRadiusVarEnabled = 0
PseudocolorAtts.endPointRadiusVar = ""
PseudocolorAtts.endPointRadiusVarRatio = 10
PseudocolorAtts.renderSurfaces = 1
PseudocolorAtts.renderWireframe = 0
PseudocolorAtts.renderPoints = 0
PseudocolorAtts.smoothingLevel = 0
PseudocolorAtts.legendFlag = 0
PseudocolorAtts.lightingFlag = 1
PseudocolorAtts.wireframeColor = (0, 0, 0, 0)
PseudocolorAtts.pointColor = (0, 0, 0, 0)
SetPlotOptions(PseudocolorAtts)
AddPlot("Vector", "AverageVelocity")
VectorAtts = VectorAttributes()
VectorAtts.glyphLocation = VectorAtts.UniformInSpace # AdaptsToMeshResolution, UniformInSpace
VectorAtts.useStride = 0
VectorAtts.stride = 1
VectorAtts.nVectors = 10000
VectorAtts.lineStyle = VectorAtts.SOLID # SOLID, DASH, DOT, DOTDASH
VectorAtts.lineWidth = 0
VectorAtts.scale = 0.05
VectorAtts.scaleByMagnitude = 0
VectorAtts.autoScale = 1
VectorAtts.headSize = 0.25
VectorAtts.headOn = 1
VectorAtts.colorByMag = 0
VectorAtts.useLegend = 0
VectorAtts.vectorColor = (0, 0, 0, 255)
VectorAtts.colorTableName = "Default"
VectorAtts.invertColorTable = 0
VectorAtts.vectorOrigin = VectorAtts.Tail # Head, Middle, Tail
VectorAtts.minFlag = 0
VectorAtts.maxFlag = 0
VectorAtts.limitsMode = VectorAtts.OriginalData # OriginalData, CurrentPlot
VectorAtts.min = 0
VectorAtts.max = 1
VectorAtts.lineStem = VectorAtts.Line # Cylinder, Line
VectorAtts.geometryQuality = VectorAtts.Fast # Fast, High
VectorAtts.stemWidth = 0.08
VectorAtts.origOnly = 1
VectorAtts.glyphType = VectorAtts.Arrow # Arrow, Ellipsoid
SetPlotOptions(VectorAtts)
AddOperator("Box")
SetActivePlots(1)
BoxAtts = BoxAttributes()
BoxAtts.amount = BoxAtts.Some # Some, All
BoxAtts.minx = -2
BoxAtts.maxx = 2
BoxAtts.miny = -2
BoxAtts.maxy = 2
BoxAtts.minz = 0
BoxAtts.maxz = 1
BoxAtts.inverse = 0
SetOperatorOptions(BoxAtts, 0)
SetActivePlots(0)
BoxAtts = BoxAttributes()
BoxAtts.amount = BoxAtts.Some # Some, All
BoxAtts.minx = -2
BoxAtts.maxx = 2
BoxAtts.miny = -2
BoxAtts.maxy = 2
BoxAtts.minz = 0
BoxAtts.maxz = 1
BoxAtts.inverse = 0
SetOperatorOptions(BoxAtts, 0)
#Remove Database and User Info
AnnotationAtts = AnnotationAttributes()
AnnotationAtts.userInfoFlag = 0
AnnotationAtts.userInfoFont.font = AnnotationAtts.userInfoFont.Arial # Arial, Courier, Times
AnnotationAtts.userInfoFont.scale = 1
AnnotationAtts.userInfoFont.useForegroundColor = 1
AnnotationAtts.userInfoFont.color = (0, 0, 0, 255)
AnnotationAtts.userInfoFont.bold = 0
AnnotationAtts.userInfoFont.italic = 0
AnnotationAtts.databaseInfoFlag = 0
SetAnnotationAttributes(AnnotationAtts)
DrawPlots()
# Begin spontaneous state
View2DAtts = View2DAttributes()
View2DAtts.windowCoords = (-2, 2, -2, 2)
View2DAtts.viewportCoords = (0.2, 0.95, 0.15, 0.95)
View2DAtts.fullFrameActivationMode = View2DAtts.Auto # On, Off, Auto
View2DAtts.fullFrameAutoThreshold = 100
View2DAtts.xScale = View2DAtts.LINEAR # LINEAR, LOG
View2DAtts.yScale = View2DAtts.LINEAR # LINEAR, LOG
View2DAtts.windowValid = 1
SetView2D(View2DAtts)
# End spontaneous state
SaveWindowAtts = SaveWindowAttributes()
SaveWindowAtts.outputToCurrentDirectory = 0
SaveWindowAtts.outputDirectory = dir_path
SaveWindowAtts.fileName = "AvgFluidFlow"+Re+St
SaveWindowAtts.family = 0
SaveWindowAtts.format = SaveWindowAtts.PNG # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY
SaveWindowAtts.width = 1920
SaveWindowAtts.height = 1080
SaveWindowAtts.screenCapture = 0
SaveWindowAtts.saveTiled = 0
SaveWindowAtts.quality = 80
SaveWindowAtts.progressive = 0
SaveWindowAtts.binary = 0
SaveWindowAtts.stereo = 0
SaveWindowAtts.compression = SaveWindowAtts.PackBits # None, PackBits, Jpeg, Deflate
SaveWindowAtts.forceMerge = 0
SaveWindowAtts.resConstraint = SaveWindowAtts.ScreenProportions # NoConstraint, EqualWidthHeight, ScreenProportions
SaveWindowAtts.advancedMultiWindowSave = 0
SetSaveWindowAttributes(SaveWindowAtts)
SaveWindow()
#Begin Lineout Operations
DeleteAllPlots()
AddPlot("Pseudocolor", "AverageUx", 1, 0)
DrawPlots()
SetQueryFloatFormat("%g")
#Create a Lineout operation (Horizontal)
Query("Lineout", end_point=(4, 0.01, 0), num_samples=500, start_point=(0.0, 0.01, 0), use_sampling=0, vars=("AverageUx"))
SetActiveWindow(2)
CurveAtts = CurveAttributes()
CurveAtts.showLines = 0
CurveAtts.lineStyle = CurveAtts.SOLID # SOLID, DASH, DOT, DOTDASH
CurveAtts.lineWidth = 0
CurveAtts.showPoints = 1
CurveAtts.symbol = CurveAtts.Point # Point, TriangleUp, TriangleDown, Square, Circle, Plus, X
CurveAtts.pointSize = 5
CurveAtts.pointFillMode = CurveAtts.Static # Static, Dynamic
CurveAtts.pointStride = 1
CurveAtts.symbolDensity = 50
CurveAtts.curveColorSource = CurveAtts.Cycle # Cycle, Custom
CurveAtts.curveColor = (255, 0, 0, 255)
CurveAtts.showLegend = 1
CurveAtts.showLabels = 1
CurveAtts.designator = "A"
CurveAtts.doBallTimeCue = 0
CurveAtts.ballTimeCueColor = (0, 0, 0, 255)
CurveAtts.timeCueBallSize = 0.01
CurveAtts.doLineTimeCue = 0
CurveAtts.lineTimeCueColor = (0, 0, 0, 255)
CurveAtts.lineTimeCueWidth = 0
CurveAtts.doCropTimeCue = 0
CurveAtts.timeForTimeCue = 0
CurveAtts.fillMode = CurveAtts.NoFill # NoFill, Solid, HorizontalGradient, VerticalGradient
CurveAtts.fillColor1 = (255, 0, 0, 255)
CurveAtts.fillColor2 = (255, 100, 100, 255)
CurveAtts.polarToCartesian = 0
CurveAtts.polarCoordinateOrder = CurveAtts.R_Theta # R_Theta, Theta_R
CurveAtts.angleUnits = CurveAtts.Radians # Radians, Degrees
SetPlotOptions(CurveAtts)
SaveWindowAtts = SaveWindowAttributes()
SaveWindowAtts.outputToCurrentDirectory = 0
SaveWindowAtts.outputDirectory = dir_path
SaveWindowAtts.fileName = "Horizontal."+Re+"."+St
SaveWindowAtts.family = 0
SaveWindowAtts.format = SaveWindowAtts.CURVE # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY
SaveWindowAtts.width = 1024
SaveWindowAtts.height = 1024
SaveWindowAtts.screenCapture = 0
SaveWindowAtts.saveTiled = 0
SaveWindowAtts.quality = 80
SaveWindowAtts.progressive = 0
SaveWindowAtts.binary = 0
SaveWindowAtts.stereo = 0
SaveWindowAtts.compression = SaveWindowAtts.PackBits # None, PackBits, Jpeg, Deflate
SaveWindowAtts.forceMerge = 0
SaveWindowAtts.resConstraint = SaveWindowAtts.ScreenProportions # NoConstraint, EqualWidthHeight, ScreenProportions
SaveWindowAtts.advancedMultiWindowSave = 0
SetSaveWindowAttributes(SaveWindowAtts)
SaveWindow()
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
AddPlot("Pseudocolor", "AverageUy", 1, 0)
DrawPlots()
#Create a Lineout operation (Vertical)
Query("Lineout", end_point=(0.01, 4.0, 0), num_samples=500, start_point=(0.01, 0.0, 0), use_sampling=0, vars=("AverageUy"))
SetActiveWindow(2)
SetPlotOptions(CurveAtts)
SaveWindowAtts = SaveWindowAttributes()
SaveWindowAtts.outputToCurrentDirectory = 0
SaveWindowAtts.outputDirectory = dir_path
SaveWindowAtts.fileName = "Vertical."+Re+"."+St
SaveWindowAtts.family = 0
SaveWindowAtts.format = SaveWindowAtts.CURVE # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY
SaveWindowAtts.width = 1024
SaveWindowAtts.height = 1024
SaveWindowAtts.screenCapture = 0
SaveWindowAtts.saveTiled = 0
SaveWindowAtts.quality = 80
SaveWindowAtts.progressive = 0
SaveWindowAtts.binary = 0
SaveWindowAtts.stereo = 0
SaveWindowAtts.compression = SaveWindowAtts.PackBits # None, PackBits, Jpeg, Deflate
SaveWindowAtts.forceMerge = 0
SaveWindowAtts.resConstraint = SaveWindowAtts.ScreenProportions # NoConstraint, EqualWidthHeight, ScreenProportions
SaveWindowAtts.advancedMultiWindowSave = 0
SetSaveWindowAttributes(SaveWindowAtts)
SaveWindow()
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
SaveSession(dir_path+"AverageFlow.session")
DeleteAllPlots()
exit()
|
from .base_page import BasePage
from .locators import ProductPageLocators
class ProductPage(BasePage):
def add_object_to_basket_solve(self):
button_basket = self.browser.find_element(*ProductPageLocators.ADD_TO_BASKET)
button_basket.click()
self.solve_quiz_and_get_code()
def add_object_to_basket(self):
button_basket = self.browser.find_element(*ProductPageLocators.ADD_TO_BASKET)
button_basket.click()
def check_name_in_basket(self):
object_name = self.browser.find_element(*ProductPageLocators.PRODUCT_NAME).text
object_name_added = self.browser.find_element(*ProductPageLocators.PRODUCT_NAME_ADDED).text
assert object_name_added == object_name, 'Object name in basket is incorrect'
def check_price_in_basket(self):
object_price = self.browser.find_element(*ProductPageLocators.PRODUCT_PRICE).text
object_price_added = self.browser.find_element(*ProductPageLocators.PRODUCT_PRICE_ADDED).text
assert object_price_added == object_price, 'Object price in basket is incorrect'
def should_be_adding_basket_button(self):
assert self.is_element_present(*ProductPageLocators.ADD_TO_BASKET), \
'No button adding to basket on a page'
def should_be_price_in_basket(self):
assert self.is_element_present(*ProductPageLocators.PRODUCT_PRICE_ADDED),\
'No price in basket on a page'
def should_not_be_success_message(self):
assert self.is_not_element_present(*ProductPageLocators.SUCCESS_MESSAGE),\
'Success message is presented, but should not be'
def should_disappear(self):
assert self.is_disappeared(*ProductPageLocators.SUCCESS_MESSAGE),\
'Success message is not disappeared'
def should_be_name_in_basket(self):
assert self.is_element_present(*ProductPageLocators.PRODUCT_NAME_ADDED),\
'No object name in basket on a page'
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 10:51:13 2015
@author: eejvt
Code developed by Jesus Vergara Temprado
Contact email eejvt@leeds.ac.uk
University of Leeds 2015
"""
import numpy as np
import sys
import matplotlib.pyplot as plt
#sys.path.append('C:\opencv\build\x64\vc12\bin')
from glob import glob
import os
import csv
#flow=12.5
import random
#folder='C:\Users\eejvt\Mace head 2015\Experiments\ul-assay\\'
folder='//foe-data-10/a86/shared/Mace Head 15/'
day='150828'
os.chdir(folder+day)
a=glob('*\\')
#fig=plt.figure()
#ax=plt.subplot(211)
#bx=plt.subplot()
awnser_blank=0
def f(x, A, B): # this is your 'straight line' y=f(x)
return np.exp(-B*x + A)
print 'Substract same day blanks: 2 \nSubstract big blank: 1 \nNo blanks: 0:No'
awnser_blank= int(raw_input())
if awnser_blank==2:
os.chdir(folder+day+'\\blanks')
reader = csv.reader(open('parameterization.csv', 'rb'))
blank_param = dict(x for x in reader)
elif awnser_blank==1:
os.chdir(folder+'\\big_blank')
reader = csv.reader(open('parameterization.csv', 'rb'))
blank_param = dict(x for x in reader)
#%%
fig=plt.figure(figsize=(20, 15))
ax=plt.subplot(121)
bx=plt.subplot(122)
for ifile in range (len(a)):
if a[ifile]=='blanks\\':
continue
os.chdir(folder+day+'\\'+a[ifile])
print 'Read? 1:Yes 0:No'
print a[ifile][12:][:-1]
awnser= int(raw_input())
if not awnser:
continue
#events=np.genfromtxt('events_frame.csv',delimiter=',')
temps=np.genfromtxt('temps.csv',delimiter=',')
ff=np.genfromtxt('ff.csv',delimiter=',')
if os.path.isfile('sampling_log.csv'):
sampling_log=np.genfromtxt('sampling_log.csv',delimiter=',',dtype=None)
flow=float(sampling_log[0,1])
st_h=float(sampling_log[1,1][:2])
st_m=float(sampling_log[1,1][3:5])
end_h=float(sampling_log[2,1][:2])
end_m=float(sampling_log[2,1][3:5])
minutes=(end_h-st_h)*60+end_m-st_m
mass_full=float(sampling_log[3,1])
mass_empty=float(sampling_log[4,1])
water_mass=mass_full-mass_empty
water_volume=water_mass#ml
print 'minutes sampling', minutes
total_volume=minutes*flow
print 'volume of air (l)',total_volume
air_per_ml=total_volume/water_volume
print 'liters of air per ml',air_per_ml
#1ml=1000ul
air_per_ul=air_per_ml/1000
print 'liters of air per ul',air_per_ul
#sampling_log
INPconc=-np.log(1-ff)/air_per_ul#INP/l
print 'INP calculated','L^{-1}'
if awnser_blank:
substraction=f(temps,float(blank_param['A']),float(blank_param['B']))
errsub=substraction-f(temps,float(blank_param['A'])-float(blank_param['errA']),float(blank_param['B'])-float(blank_param['errB']))
INPconc=(INPconc*air_per_ul-substraction)/air_per_ul#Check this substraction
#INPconc[INPconc<0]=0
for i in range(len(INPconc)-1):
if INPconc[i+1]<INPconc[i]:
INPconc[i+1]=INPconc[i]
N=len(temps)
'''
#ff_std=np.sqrt(((1-ff)**2*ff*N+ff**2*(1-ff)*N)/N)
ff_std=np.sqrt(N*ff*(1-ff))/N
#ff_std=np.sqrt(((1-ff)**2*ff*N+ff**2*(1-ff)*N)/N)
ff_up=ff+1.96*ff_std
ff_down=ff-1.96*ff_std
errff=1.96*ff_std
err_air_per_ul=air_per_ul*0.1
#std=np.sqrt(np.log(INPconc)/ff*len(events))
err_up=(1+1.96/(np.sqrt(ff*len(events)-1)))*ff*len(events)/air_per_ul
err_down=(1-1.96/(np.sqrt(ff*len(events)-1)))*ff*len(events)/air_per_ul
INP_err_sub=1/air_per_ul*errsub
INP_err_ff=1/(air_per_ml*(1-ff))*errff
INP_err_v=-(-substraction+np.log(1-ff))/air_per_ul**2*err_air_per_ul
INP_err_total=INP_err_ff+INP_err_sub+INP_err_v
r = lambda: random.randint(0,255)
c1,c2,c3=r(),r(),r()
INP_up=INPconc+INP_err_total#err_up#np.exp(np.log(INPconc)+std)
INP_down=INPconc-INP_err_total#err_down#np.exp(np.log(INPconc)-std)
bx.plot(temps,INP_up,'--',c='#%02X%02X%02X' % (c1,c2,c3),lw=2)
bx.plot(temps,INP_down,'--',c='#%02X%02X%02X' % (c1,c2,c3),lw=2)
'''
bx.plot(temps,INPconc,label=a[ifile][12:][:-1],lw=2)#c='#%02X%02X%02X' % (c1,c2,c3))
bx.set_xlabel('Temperature')
bx.set_ylabel('$INP (L^{-1})$')
bx.set_yscale('log')
observation=1
np.savetxt('INP.csv',INPconc,delimiter=',')
N=len(temps)
'''
ff_std=np.sqrt(N*ff*(1-ff))/N
#ff_std=np.sqrt(((1-ff)**2*ff*N+ff**2*(1-ff)*N)/N)
ff_up=ff+1.96*ff_std
ff_down=ff-1.96*ff_std
errff=1.96*ff_std
ax.plot(temps,ff_up,'--')
ax.plot(temps,ff_down,'--')
'''
ax.plot(temps,ff,'o',label=a[ifile][12:][:-1])
ax.set_xlabel('Temperature')
ax.set_ylabel('Fraction frozen')
big_title='Mace Head Day %s/%s/20%s'%(day[-2:],day[2:4],day[:2])
plt.figtext(0.40,0.95,big_title,fontsize=20)
ax.legend(loc='best', fontsize = 'small')
#ax.set_xlim(-30,-10)
bx.legend(loc='best', fontsize = 'small')
bx.set_xlim(-30,-10)
os.chdir(folder+day)
plt.savefig('INP_day_%s.png'%day)
#if observation
|
"""empty message
Revision ID: 0daa0acd5042
Revises: 70e970adcb0f
Create Date: 2019-04-25 21:25:28.759000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0daa0acd5042'
down_revision = '70e970adcb0f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'users_perm_key', 'users', type_='unique')
op.drop_column('users', 'perm')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('perm', sa.VARCHAR(length=120), autoincrement=False, nullable=True))
op.create_unique_constraint(u'users_perm_key', 'users', ['perm'])
# ### end Alembic commands ###
|
#!/usr/bin/env python
import bottle
import redis
import settings
import hashlib
settings.r = redis.Redis(host=settings.REDIS_HOST,port=settings.REDIS_PORT,db=settings.REDIS_DB)
from bottle_session import Session
from model import User,Post,Timeline
reserved_usernames = 'follow mentions home signup login logout post'
def authenticate(func):
def auth(*args,**kwargs):
session = Session(bottle.request,bottle.response)
if not session.is_new():
user = User.find_by_id(session['id'])
if user:
return func(user,*args,**kwargs)
bottle.redirect('/login')
return auth
def logged_in_user():
session = Session(bottle.request,bottle.response)
if not session.is_new():
return User.find_by_id(session['id'])
return None
def user_is_logged():
if logged_in_user():
return True
return False
@bottle.route('/')
def index():
if user_is_logged():
bottle.redirect('/home')
return bottle.template('home_not_logged',logged=False)
@bottle.route('/home')
@authenticate
def home(user):
bottle.TEMPLATES.clear()
counts = user.followees_count,user.followers_count,user.tweet_count
if len(user.posts()) > 0:
last_tweet = user.posts()[0]
else:
last_tweet = None
return bottle.template('timeline',timeline=user.timeline(),page="timeline",username = user.username,counts=counts,last_tweet = last_tweet,logged=True)
@bottle.route('/mentions')
@authenticate
def mentions(user):
counts= user.followees_count,user.followers_count,user.tweet_count
return bottle.template("mentions",mentions=user.mentions(),page="mentions",username=user.username,counts=counts,posts=user.post()[:1],logged=True)
@bottle.route('/:name')
def user_page(name):
is_following,is_logged= False,user_is_logged()
user= User.find_by_username(name)
if user:
counts = user.followees_count,user.followers_count,user.tweet_count
logged_user= logged_in_user()
himself = logged_user.username == name
if logged_user:
is_following = logged_user.following(user)
return bottle.template('user',posts=user.posts(),counts=counts,page='user',username=user.username,logged=is_logged,is_following=is_following,himself=himself)
else:
return bottle.HTTPError(code=404)
@bottle.route('/:name/following')
@authenticate
def following(name):
user = User.find_by_username(name)
is_logged=user_is_logged()
if user:
return bottle.template('following',followees= user.followees,page="followee",logged=is_logged)
return bottle.HTTPError(code=404)
@bottle.route('/:name/followers')
@authenticate
def followers(name):
user = User.find_by_username(name)
is_logged = user_is_logged()
if user:
return bottle.template('followers',followers=user.followers,page="followers",logged=is_logged)
return bottle.HTTPError(code=404)
@bottle.route('/:name/statuses/:id')
@bottle.validate(id=int)
def status(name,id):
post = Post.find_by_id(id)
if post:
if post.user.username == name:
return bottle.template('single',username=post.user.username,tweet=post,page='single',logged=user_is_logged())
return bottle.HTTPError(code=404,message='tweet not found')
@bottle.route('/post',method='POST')
@authenticate
def post(user):
content = bottle.request.POST['content']
Post.create(user,content)
bottle.redirect('/home')
@bottle.route('/follow/:name',method='POST')
@authenticate
def followpost(user,name):
user_to_follow = User.find_by_username(name)
if user_to_follow:
user.follow(user_to_follow)
bottle.redirect('/%s' % name)
@bottle.route('/unfollow/:name',method='POST')
@authenticate
def unfollowpost(user,name):
user_to_unfollow = User.find_by_username(name)
if user_to_unfollow:
user.stop_following(user_to_unfollow)
bottle.redirect('/%s' % name)
@bottle.route('/signup')
@bottle.route('/login')
def login():
bottle.TEMPLATES.clear()
if user_is_logged():
bottle.redirect('/home')
return bottle.template('login',page='login',error_login=False,error_signup=False,logged=False)
@bottle.route('/login',method='POST')
def login():
if 'name' in bottle.request.POST and 'password' in bottle.request.POST:
name = bottle.request.POST['name']
password = bottle.request.POST['password']
user=User.find_by_username(name)
if user and user.password == hashlib.md5(settings.SALT+password).hexdigest():
sess = Session(bottle.request,bottle.response)
sess['id']=user.id
sess.save()
bottle.redirect('/home')
return bottle.template('login',page='login',error_login=True,error_signup=False,logged=False)
@bottle.route('/logout')
def logout():
sess = Session(bottle.request,bottle.response)
sess.invalidate()
bottle.redirect('/')
@bottle.route('/signup',method='POST')
def sign_up():
if 'name' in bottle.request.POST and 'password' in bottle.request.POST:
name= bottle.request.POST['name']
if name not in reserved_usernames.split():
password = bottle.request.POST['password']
user = User.create(name,password)
if user:
sess = Session(bottle.request,bottle.response)
sess['id'] = user.id
sess.save()
bottle.redirect('/home')
return bottle.template('login',page='login',error_login=False,error_signup=True,logged=False)
@bottle.route('/static/:filename')
def static_file(filename):
bottle.send_file(filename,root='static/')
bottle.debug(True)
bottle.run(host='localhost',port=9746,reloader=True)
|
# Generated by Django 3.0.6 on 2020-05-28 11:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('musicRun', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='song',
name='artist',
),
migrations.RemoveField(
model_name='song',
name='name',
),
migrations.AlterField(
model_name='song',
name='bpm',
field=models.IntegerField(blank=True),
),
]
|
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
from flask import request
from google.auth.transport.requests import AuthorizedSession
import dataflow_pipeline.felicidad_y_cultura.tabla_personal_beam as tabla_personal_beam
import dataflow_pipeline.felicidad_y_cultura.tabla_calificaciones_beam as tabla_calificaciones_beam
import dataflow_pipeline.felicidad_y_cultura.tabla_afirmaciones_beam as tabla_afirmaciones_beam
import dataflow_pipeline.felicidad_y_cultura.tabla_integracion_beam as tabla_integracion_beam
import dataflow_pipeline.felicidad_y_cultura.tabla_subcategoria_beam as tabla_subcategoria_beam
import dataflow_pipeline.felicidad_y_cultura.tabla_criterio_beam as tabla_criterio_beam
import dataflow_pipeline.felicidad_y_cultura.tabla_categoria_beam as tabla_categoria_beam
import cloud_storage_controller.cloud_storage_controller as gcscontroller
import os
import time
import socket
import _mssql
import datetime
import sys
#coding: utf-8
clima_api = Blueprint('clima_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
@clima_api.route("/personal")
def personal():
client = bigquery.Client()
QUERY = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Clima_personal"')
query_job = client.bigquery(QUERY)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_registro,documento,nombres,sexo,proceso,codigo,centro_costos,gerente,cargo,ciudad,fecha_ingreso,empleador,tipo_contrato,usuario_da,lider,estado FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_registro']).encode('utf-8') + "|"
text_row += str(row['documento']).encode('utf-8') + "|"
text_row += str(row['nombres']).encode('utf-8') + "|"
text_row += str(row['sexo']).encode('utf-8') + "|"
text_row += str(row['proceso']).encode('utf-8') + "|"
text_row += str(row['codigo']).encode('utf-8') + "|"
text_row += str(row['centro_costos']).encode('utf-8') + "|"
text_row += str(row['gerente']).encode('utf-8') + "|"
text_row += str(row['cargo']).encode('utf-8') + "|"
text_row += str(row['ciudad']).encode('utf-8') + "|"
text_row += str(row['fecha_ingreso']).encode('utf-8') + "|"
text_row += str(row['empleador']).encode('utf-8') + "|"
text_row += str(row['tipo_contrato']).encode('utf-8') + "|"
text_row += str(row['usuario_da']).encode('utf-8') + "|"
text_row += str(row['lider']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "Clima/personal" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-felicidad_y_cultura")
try:
deleteQuery = "DELETE FROM `contento-bi.Felicidad_y_Cultura.Personal` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = tabla_personal_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-felicidad_y_cultura')
blob = bucket.blob("Clima/personal" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "Cargue exitoso Tabla del personal" + flowAnswer
################################################ TABLA CALIFICACIONES ###########################################
@clima_api.route("/calificaciones")
def calificaciones():
client = bigquery.Client()
QUERY = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Clima_calificaciones"')
query_job = client.query(QUERY)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_registro,documento,id_afirmacion,nota,fecha_registro FROM ' + tabla_bd)
# conn.execute_query('SELECT id_registro,documento,id_afirmacion,nota,fecha_registro FROM ' + TABLE_DB + 'WHERE documento = "1017246086"')
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_registro']).encode('utf-8') + "|"
text_row += str(row['documento']).encode('utf-8') + "|"
text_row += str(row['id_afirmacion']).encode('utf-8') + "|"
text_row += str(row['nota']).encode('utf-8') + "|"
text_row += str(row['fecha_registro']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "Clima/calificaciones" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-felicidad_y_cultura")
try:
# deleteQuery = "DELETE FROM `contento-bi.Felicidad_y_Cultura.Calificaciones` WHERE SUBSTR(fecha_registro,0,10) > '2020-09-30' "
#
deleteQuery = "DELETE FROM `contento-bi.Felicidad_y_Cultura.Calificaciones` WHERE id_afirmacion is not null"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = tabla_calificaciones_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-felicidad_y_cultura')
blob = bucket.blob("Clima/calificaciones" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
return " Cargue exitoso Tabla de calificaciones" + "flowAnswer"
####################################### TABLA AFIRMACIONES ##########################################
@clima_api.route("/afirmaciones")
def afirmaciones():
client = bigquery.Client()
QUERY = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Clima_afirmaciones"')
query_job = client.query(QUERY)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_afirmacion,desc_afirmacion FROM ' + tabla_bd)
# conn.execute_query('SELECT id_registro,documento,id_afirmacion,nota,fecha_registro FROM ' + TABLE_DB + " WHERE CONVERT(DATE, fecha_registro) = CONVERT(DATE,GETDATE())")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_afirmacion']).encode('utf-8') + "|"
text_row += str(row['desc_afirmacion']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "Clima/afirmaciones" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-felicidad_y_cultura")
try:
deleteQuery = "DELETE FROM `contento-bi.Felicidad_y_Cultura.Afirmaciones` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = tabla_afirmaciones_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-felicidad_y_cultura')
blob = bucket.blob("Clima/afirmaciones" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
return " Cargue exitoso Tabla de afirmaciones" + "flowAnswer"
######################################## TABLA REL INTEGRA ##############################################
@clima_api.route("/integra")
def integra():
client = bigquery.Client()
QUERY = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Clima_integra"')
query_job = client.query(QUERY)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_rel_integra,id_criterio,id_categoria,id_subcategoria,id_afirmacion FROM ' + tabla_bd)
# conn.execute_query('SELECT id_registro,documento,id_afirmacion,nota,fecha_registro FROM ' + TABLE_DB + " WHERE CONVERT(DATE, fecha_registro) = CONVERT(DATE,GETDATE())")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_rel_integra']).encode('utf-8') + "|"
text_row += str(row['id_criterio']).encode('utf-8') + "|"
text_row += str(row['id_categoria']).encode('utf-8') + "|"
text_row += str(row['id_subcategoria']).encode('utf-8') + "|"
text_row += str(row['id_afirmacion']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "Clima/integracion" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-felicidad_y_cultura")
try:
deleteQuery = "DELETE FROM `contento-bi.Felicidad_y_Cultura.Integracion` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = tabla_integracion_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-felicidad_y_cultura')
blob = bucket.blob("Clima/integracion" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
return " Cargue exitoso Tabla de integracion" + "flowAnswer"
######################################### TABLA SUBCATEGORIA ########################################
@clima_api.route("/subcategoria")
def subcategoria():
client = bigquery.Client()
QUERY = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Clima_subcategoria"')
query_job = client.query(QUERY)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_subcategoria,desc_subcategoria FROM ' + tabla_bd)
# conn.execute_query('SELECT id_registro,documento,id_afirmacion,nota,fecha_registro FROM ' + TABLE_DB + " WHERE CONVERT(DATE, fecha_registro) = CONVERT(DATE,GETDATE())")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_subcategoria']).encode('utf-8') + "|"
text_row += str(row['desc_subcategoria']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "Clima/subcategoria" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-felicidad_y_cultura")
try:
deleteQuery = "DELETE FROM `contento-bi.Felicidad_y_Cultura.Subcategoria` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = tabla_subcategoria_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-felicidad_y_cultura')
blob = bucket.blob("Clima/subcategoria" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
return " Cargue exitoso Tabla de subcategoria" + "flowAnswer"
############################################### TABLA CRITERIO ######################################
@clima_api.route("/criterio")
def criterio():
client = bigquery.Client()
QUERY = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Clima_criterio"')
query_job = client.query(QUERY)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_criterio,descripcion_criterio FROM ' + tabla_bd)
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_criterio']).encode('utf-8') + "|"
text_row += str(row['descripcion_criterio']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "Clima/criterio" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-felicidad_y_cultura")
try:
deleteQuery = "DELETE FROM `contento-bi.Felicidad_y_Cultura.Criterio` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = tabla_criterio_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-felicidad_y_cultura')
blob = bucket.blob("Clima/criterio" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
return " Cargue exitoso Tabla de criterios" + "flowAnswer"
############################################### TABLA CATEGORIA ###########################################
@clima_api.route("/categoria")
def categoria():
client = bigquery.Client()
QUERY = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Clima_categoria"')
query_job = client.query(QUERY)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_categoria,desc_categoria FROM ' + tabla_bd)
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_categoria']).encode('utf-8') + "|"
text_row += str(row['desc_categoria']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "Clima/categoria" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-felicidad_y_cultura")
try:
deleteQuery = "DELETE FROM `contento-bi.Felicidad_y_Cultura.Categoria` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = tabla_categoria_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-felicidad_y_cultura')
blob = bucket.blob("Clima/categoria" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
return " Cargue exitoso Tabla de categoria" + "flowAnswer" |
# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from original_approach.encoding import encode_similarity_matrix
df = pd.read_csv('raw/epitope_table_export_1608718806.csv', skiprows=[0])
df
# %%
df['len'] = df['Description'].apply(len)
peptides = list(df[df['len'] == 9]['Description'].unique())
print(len(peptides))
# print(peptides)
invalid = []
valid = []
for s in peptides:
try:
encode_similarity_matrix([s])
valid += [s]
except Exception as e:
invalid += [s]
if len(invalid):
print('peptides not included due to error')
print(invalid)
peptides = valid
|
from typing import Dict
import importlib
import numpy as np
from manythings.util_yaml import yaml_loader
import click
import wandb
from wandb.keras import WandbCallback
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, TensorSpec
# api_key = os.environ['WANDB_API_KEY']
@click.command()
@click.argument("experiment-config",
type=click.Path(exists=True),
default="manythings/experiment.yaml")
@click.option("--latent_dim", default=300)
@click.option("--decay", default=1.0e-06)
@click.option("--dropout", default=0.2)
@click.option("--epochs", default=3)
@click.option("--learn_rate", default=0.01)
@click.option("--momentum", default=0.9)
def main(experiment_config, latent_dim: int, decay: float, dropout: float,
epochs: int, learn_rate: float, momentum: float):
""" Update values in experiment configuration file """
exp_config = yaml_loader("manythings/experiment.yaml")
proj_name = exp_config.get("project_name")
net_name = exp_config.get("network")["name"]
net_args = exp_config.get("network")["net_args"]
net_args["hidden_layer_size"] = latent_dim
net_args["decay"] = decay
net_args["dropout"] = dropout
net_args["epochs"] = epochs
net_args["learn_rate"] = learn_rate
net_args["momentum"] = momentum
dataset_cls = exp_config.get("dataset")["name"]
dataset_args = exp_config.get("dataset")["dataset_args"]
model = exp_config.get("model")
wandb.login()
train(proj_name, model, dataset_cls, net_name, net_args, dataset_args)
def train(
proj_name: str,
Model: str,
dataset_cls: str,
net_fn: str,
net_args: Dict,
dataset_args: Dict,
):
""" Train Function """
dataset_module = importlib.import_module(
f"manythings.data.dta_{dataset_cls}")
dataset_cls_ = getattr(dataset_module, dataset_cls)
network_module = importlib.import_module(f"manythings.networks.{net_fn}")
network_fn_ = getattr(network_module, net_fn)
model_module = importlib.import_module(f"manythings.models.{Model}")
model_cls_ = getattr(model_module, Model)
config = {
"model": Model,
"dataset_cls": dataset_cls,
"net_fn": net_fn,
"net_args": net_args,
"dataset_args": dataset_args
}
input_schema = Schema([
TensorSpec(np.dtype(np.uint8), (-1, 71), "encoder_input"),
TensorSpec(np.dtype(np.uint8), (-1, 93), "decoder_input")
])
output_schema = Schema([TensorSpec(np.dtype(np.float32), (-1, 93))])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
data = dataset_cls_()
data.load_or_generate()
data.preprocess()
with wandb.init(project=proj_name, config=config):
""""""
config = wandb.config
model = model_cls_(dataset_cls_, network_fn_, net_args, dataset_args)
callbacks = [WandbCallback(
# training_data=(
# [data.encoder_input_data, data.decoder_input_data],
# data.decoder_target_data
# ),
# log_weights=True,
# log_gradients=True
)]
model.fit(callbacks=callbacks)
mlflow.keras.save_model(model.network,
"saved_models/seq2seq",
signature=signature)
if __name__ == "__main__":
main()
|
# scalable ttt
# 21 22 23 24 25
# 16 17 18 19 20
# 11 12 13 14 15
# 6 7 8 9 10
# 1 2 3 4 5
# s = size
while True:
try:
s = int(input("Please specify number for rows/columns "))
break
except:
print("Sorry, please try again.")
"""
def field(x):
y = x
field = x * y
return x, y, field
fieldsize = (list(field(s)))
r=fieldsize[0] #s
c=fieldsize[1] #s
f=fieldsize[2] #s*s
print("there are " + str(r) + " rows")
print("there are " + str(c) + " columns")
print("there are " + str(f) + " fields")
field = list(range(0,f+1))
print(field)
# row +1 col +s diag + (s-1)
row1 = list(range(1,s+1))
# range linke seite fรผr reihen
print(1)
print(1+s)
print(1+(s*2))
# ...
print("")
# range rechte seite fรผr reihen (plus 1)
print(s)
print((s*2))
print((s*3))
#...
print("")
print(list(range(1,(s+1))))
print(list(range((s)+1,(s*2)+1)))
print(list(range((s*2)+1,(s*3)+1)))
print("#")
"""
field = [i for i in range(1,s*s+1)]
print(field)
rowlist=[]
def rowprinter():
x = 0
while x < s:
rows = (list(range(1+(x*s),(s+1+(x*s)))))
yield rows
x += 1
for r in rowprinter():
rowlist.append(r)
print(rowlist)
columnlist=[]
def columnprinter():
x = 0
while x < s:
columns = (list(range((1+x), (x + s*s), s)))
yield columns
x += 1
for c in columnprinter():
columnlist.append(c)
print(columnlist)
diagonallist=[]
def diagonalprinter():
x = 0
diagonal1 = list(range((1+x), (1+s*s), (s+1)))
diagonal2 = list(range(s, (s*s), (s-1)))
diagonallist.append(diagonal1)
diagonallist.append(diagonal2)
diagonalprinter()
print(diagonallist)
match = ["x" for i in range(s)]
print(match)
x = int(input("number: "))
for n, num in enumerate(field):
if num == x:
field[n] = "x"
print(field)
for row in rowlist:
if x in row:
for n, num in enumerate(row):
if num == x:
row[n] = "x"
print(rowlist)
"""
print("#")
fieldscalable = list(range(1, (s*r)+1))
print(fieldscalable)
print("| ", end="")
for number in fieldscalable:
print(number,end=" | ")
print("")
print(r)
print(c)
print(f)
1 1+s
1+s 1+s*2
1+s*2 1+s*3
"""
|
from . import api, brackets, utils
TOURNAMENT_PREFIX = 'tournament/'
EVENT_URL = '/event/'
VALID_PARAMS = ['event', 'phase', 'groups', 'stations']
def show(tournament_name, params=[], filter_response=True):
"""Retrieve a single tournament record by `tournament name`"""
utils._validate_query_params(params=params, valid_params=VALID_PARAMS, route_type='tournament')
uri = TOURNAMENT_PREFIX + tournament_name
response = api.get(uri, params)
if filter_response:
response = _filter_tournament_response(response, params)
return response
def show_events(tournament_name):
"""Returns a list of events for a tournament"""
uri = TOURNAMENT_PREFIX + tournament_name
response = api.get(uri, ['event'])
result = {}
result = _append_events(response, result)
return result
def show_with_brackets(tournament_name, event, tournament_params=[], ):
"""Returns tournament meta information along with a list of bracketIds for an event"""
tournament = show(tournament_name, tournament_params)
brackets = event_brackets(tournament_name, event)
return utils.merge_two_dicts(tournament, brackets)
def show_sets(tournament_name, event, tournament_params=[]):
"""Returns all sets from a tournament"""
tournament = show_with_brackets(tournament_name, event, tournament_params)
results = []
for bracket_id in tournament['bracket_ids']:
bracket_sets = brackets.sets(bracket_id)
for _set in bracket_sets:
if len(_set) > 0:
results.append(_set)
return results
def show_players(tournament_name, event_name, tournament_params=[]):
"""Returns all players from a tournament"""
tournament = show_with_brackets(tournament_name, tournament_params)
results = []
for bracket_id in tournament['bracket_ids']:
bracket_players = brackets.players(bracket_id)
for player in bracket_players:
results.append(player)
return list({v['tag']: v for v in results}.values())
def show_player_sets(tournament_name, event, player_tag):
"""Returns all players from a tournament"""
tournament = show_with_brackets(tournament_name, event)
player = None
bracket_sets = []
for bracket_id in tournament['bracket_ids']:
player_sets = brackets.sets_played_by_player(bracket_id, player_tag)
if len(player_sets) == 0:
continue
if player is None:
player = player_sets['player']
_sets = player_sets['sets']
bracket_sets = bracket_sets + _sets
return {
'player': player,
'sets': bracket_sets
}
def show_head_to_head(tournament_name, event, player1_tag, player2_tag):
"""Returns sets played between 2 players"""
player1_sets = show_player_sets(tournament_name, event, player1_tag)
result_sets = {
'player': player1_sets['player'],
'sets': []
}
for _set in player1_sets['sets']:
if _set['opponent_info']['tag'].lower() == player2_tag:
result_sets['sets'].append(_set)
return result_sets
def event_brackets(tournament_name, event='melee-singles', filter_response=True):
# first, get the events for the tournament...
events = show_events(tournament_name)
"""Returns a list of brackets ids for an event"""
utils._validate_query_params(params=[event], valid_params=events['events'], route_type='event')
uri = TOURNAMENT_PREFIX + tournament_name + '/event/' + event
response = api.get(uri, ['groups'])
if filter_response:
response = _filter_event_bracket_response(response)
return response
def _filter_event_bracket_response(response):
"""Filters the Smash.gg response to something more managable"""
bracket_ids = []
for bracket in response['entities']['groups']:
bracket_ids.append(str(bracket['id']))
return {
'bracket_ids': bracket_ids,
'event_name': response['entities']['event']['typeDisplayStr'],
'bracket_full_source_url': response['entities']['event']['slug']
}
def _filter_tournament_response(response, params=[]):
"""Filters the Smash.gg response to something more managable"""
result = {
'tournament_id': response['entities']['tournament']['id'],
'venue_name': response['entities']['tournament']['venueName'],
'venue_address': response['entities']['tournament']['venueAddress'],
'name': response['entities']['tournament']['name'],
'tournament_full_source_url': response['entities']['tournament']['slug'],
'links': response['entities']['tournament']['links'],
'state_short': response['entities']['tournament']['regionDisplayName'],
'start_at': response['entities']['tournament']['startAt'],
'end_at': response['entities']['tournament']['endAt'],
'details': response['entities']['tournament']['details']
}
if 'event' in params:
result = _append_events(response, result)
if 'phase' in params:
result = _append_phases(response, result)
if 'groups' in params:
result = _append_groups(response, result)
return result
def _append_groups(response, result):
result['groups'] = []
groups = response['entities'].get('groups', [])
for group in groups:
group_dict = {
"group_id": group['id'],
'phase_id': group['phaseId'],
'title': group['title'],
'winners_target_phase': group['winnersTargetPhaseId'],
}
result['groups'].append(group_dict)
return result
def _append_phases(response, result):
result['phases'] = []
phases = response['entities'].get('phase', [])
for phase in phases:
phase_dict = {
"phase_id": phase['id'],
'event_id': phase['eventId'],
'phase_name': phase['name'],
'is_exhibition': phase['isExhibition'],
'type_id': phase['typeId']
}
result['phases'].append(phase_dict)
return result
def _append_events(response, result):
result['events'] = []
events = response['entities'].get('event', [])
for event in events:
slug = event['slug']
slug = slug.split("/")
result['events'].append(slug[-1])
return result
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio.sequence import GrammaredSequence
from skbio.util import classproperty
from skbio.util import assert_data_frame_almost_equal
from skbio.metadata import IntervalMetadata
class ExampleGrammaredSequence(GrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
@classproperty
def definite_chars(cls):
return set("ABC")
@classproperty
def default_gap_char(cls):
return '-'
@classproperty
def gap_chars(cls):
return set('-.')
class ExampleMotifsTester(ExampleGrammaredSequence):
@property
def _motifs(self):
# These aren't really motifs, just a way to excercise the code paths
return {
"name1": lambda x, _, __: str(x),
"name2": lambda x, _, __: len(x)
}
class TestGrammaredSequence(TestCase):
def test_default_gap_must_be_in_gap_chars(self):
with self.assertRaisesRegex(
TypeError,
r"default_gap_char must be in gap_chars for class "
"GrammaredSequenceInvalidDefaultGap"):
class GrammaredSequenceInvalidDefaultGap(ExampleGrammaredSequence):
@classproperty
def default_gap_char(cls):
return '*'
def test_degenerates_must_expand_to_valid_definites(self):
with self.assertRaisesRegex(
TypeError,
r"degenerate_map must expand only to characters included in "
"definite_chars for class "
"GrammaredSequenceInvalidDefaultGap"):
class GrammaredSequenceInvalidDefaultGap(ExampleGrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("B")}
@classproperty
def definite_chars(cls):
return set("A")
def test_gap_chars_and_degenerates_share(self):
with self.assertRaisesRegex(
TypeError,
r"gap_chars and degenerate_chars must not share any characters"
" for class GrammaredSequenceGapInDegenerateMap"):
class GrammaredSequenceGapInDegenerateMap(
ExampleGrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB")}
@classproperty
def definite_chars(cls):
return set("ABC")
@classproperty
def gap_chars(cls):
return set(".-X")
def test_gap_chars_and_definites_share(self):
with self.assertRaisesRegex(
TypeError,
(r"gap_chars and definite_chars must not share any characters "
"for class GrammaredSequenceGapInDefiniteMap")):
class GrammaredSequenceGapInDefiniteMap(
ExampleGrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB")}
@classproperty
def definite_chars(cls):
return set("ABC")
@classproperty
def gap_chars(cls):
return set(".-A")
def test_degenerates_and_definites_share(self):
with self.assertRaisesRegex(
TypeError,
(r"degenerate_chars and definite_chars must not share any "
"characters for class GrammaredSequenceInvalid")):
class GrammaredSequenceInvalid(ExampleGrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB")}
@classproperty
def definite_chars(cls):
return set("ABCX")
def test_instantiation_with_no_implementation(self):
class GrammaredSequenceSubclassNoImplementation(GrammaredSequence):
pass
with self.assertRaises(TypeError) as cm:
GrammaredSequenceSubclassNoImplementation()
self.assertIn("abstract class", str(cm.exception))
self.assertIn("definite_chars", str(cm.exception))
self.assertIn("degenerate_map", str(cm.exception))
def test_init_default_parameters(self):
seq = ExampleGrammaredSequence('.-ABCXYZ')
npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
self.assertEqual(seq.metadata, {})
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=range(8)))
self.assertEqual(seq.interval_metadata,
IntervalMetadata(8))
def test_init_nondefault_parameters(self):
im = IntervalMetadata(8)
im.add([(1, 8)], metadata={'gene': 'p53'})
seq = ExampleGrammaredSequence(
'.-ABCXYZ',
metadata={'id': 'foo'},
positional_metadata={'quality': range(8)},
interval_metadata=im)
npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
self.assertEqual(seq.metadata, {'id': 'foo'})
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'quality': range(8)}))
self.assertEqual(seq.interval_metadata, im)
def test_init_valid_empty_sequence(self):
# just make sure we can instantiate an empty sequence regardless of
# `validate` and `lowercase` parameters. more extensive tests
# are performed in Sequence base class unit tests
for validate in (True, False):
for lowercase in (True, False):
seq = ExampleGrammaredSequence(
'', validate=validate, lowercase=lowercase)
self.assertEqual(seq, ExampleGrammaredSequence(''))
def test_init_valid_single_character_sequence(self):
for validate in (True, False):
for lowercase in (True, False):
seq = ExampleGrammaredSequence(
'C', validate=validate, lowercase=lowercase)
self.assertEqual(seq, ExampleGrammaredSequence('C'))
def test_init_valid_multiple_character_sequence(self):
for validate in (True, False):
for lowercase in (True, False):
seq = ExampleGrammaredSequence(
'BAACB.XYY-AZ', validate=validate, lowercase=lowercase)
self.assertEqual(seq, ExampleGrammaredSequence('BAACB.XYY-AZ'))
def test_init_validate_parameter_single_character(self):
seq = 'w'
with self.assertRaisesRegex(ValueError, r"character.*'w'"):
ExampleGrammaredSequence(seq)
# test that we can instantiate an invalid sequence. we don't guarantee
# anything working beyond instantiation
ExampleGrammaredSequence(seq, validate=False)
def test_init_validate_parameter_multiple_characters(self):
# mix of valid and invalid characters with repeats and lowercased
# alphabet characters
seq = 'CBCBBbawCbbwBXYZ-.x'
with self.assertRaisesRegex(ValueError, r"\['a', 'b', 'w', 'x'\]"):
ExampleGrammaredSequence(seq)
ExampleGrammaredSequence(seq, validate=False)
def test_init_lowercase_all_lowercase(self):
s = 'cbcbbbazcbbzbxyz-.x'
with self.assertRaisesRegex(ValueError,
r"\['a', 'b', 'c', 'x', 'y', 'z'\]"):
ExampleGrammaredSequence(s)
seq = ExampleGrammaredSequence(s, lowercase=True)
self.assertEqual(seq, ExampleGrammaredSequence('CBCBBBAZCBBZBXYZ-.X'))
def test_init_lowercase_mixed_case(self):
s = 'CBCBBbazCbbzBXYZ-.x'
with self.assertRaisesRegex(ValueError, r"\['a', 'b', 'x', 'z'\]"):
ExampleGrammaredSequence(s)
seq = ExampleGrammaredSequence(s, lowercase=True)
self.assertEqual(seq, ExampleGrammaredSequence('CBCBBBAZCBBZBXYZ-.X'))
def test_init_lowercase_no_validation(self):
s = 'car'
with self.assertRaisesRegex(ValueError, r"\['a', 'c', 'r'\]"):
ExampleGrammaredSequence(s)
with self.assertRaisesRegex(ValueError, r"character.*'R'"):
ExampleGrammaredSequence(s, lowercase=True)
ExampleGrammaredSequence(s, lowercase=True, validate=False)
def test_init_lowercase_byte_ownership(self):
bytes = np.array([97, 98, 97], dtype=np.uint8)
with self.assertRaisesRegex(ValueError, r"\['a', 'b'\]"):
ExampleGrammaredSequence(bytes)
seq = ExampleGrammaredSequence(bytes, lowercase=True)
self.assertEqual(seq, ExampleGrammaredSequence('ABA'))
# should not share the same memory
self.assertIsNot(seq._bytes, bytes)
# we should have copied `bytes` before modifying in place to convert to
# upper. make sure `bytes` hasn't been mutated
npt.assert_equal(bytes, np.array([97, 98, 97], dtype=np.uint8))
def test_init_lowercase_invalid_keys(self):
for invalid_key in ((), [], 2):
invalid_type = type(invalid_key)
with self.assertRaisesRegex(TypeError,
r"lowercase keyword argument expected "
"a bool or string, but got %s" %
invalid_type):
ExampleGrammaredSequence('ACGTacgt', lowercase=invalid_key)
def test_degenerate_chars(self):
expected = set("XYZ")
self.assertIs(type(ExampleGrammaredSequence.degenerate_chars), set)
self.assertEqual(ExampleGrammaredSequence.degenerate_chars, expected)
ExampleGrammaredSequence.degenerate_chars.add("W")
self.assertEqual(ExampleGrammaredSequence.degenerate_chars, expected)
self.assertEqual(ExampleGrammaredSequence('').degenerate_chars,
expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').degenerate_chars = set("BAR")
# TODO: duplicate of test_definite_chars, remove when nondegenerate_chars,
# is removed
def test_nondegenerate_chars(self):
expected = set("ABC")
self.assertEqual(ExampleGrammaredSequence.nondegenerate_chars,
expected)
ExampleGrammaredSequence.degenerate_chars.add("D")
self.assertEqual(ExampleGrammaredSequence.nondegenerate_chars,
expected)
self.assertEqual(ExampleGrammaredSequence('').nondegenerate_chars,
expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').nondegenerate_chars = set("BAR")
def test_definite_chars(self):
expected = set("ABC")
self.assertEqual(ExampleGrammaredSequence.definite_chars,
expected)
ExampleGrammaredSequence.degenerate_chars.add("D")
self.assertEqual(ExampleGrammaredSequence.definite_chars,
expected)
self.assertEqual(ExampleGrammaredSequence('').definite_chars,
expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').definite_chars = set("BAR")
def test_gap_chars(self):
expected = set(".-")
self.assertIs(type(ExampleGrammaredSequence.gap_chars), set)
self.assertEqual(ExampleGrammaredSequence.gap_chars, expected)
ExampleGrammaredSequence.gap_chars.add("_")
self.assertEqual(ExampleGrammaredSequence.gap_chars, expected)
self.assertEqual(ExampleGrammaredSequence('').gap_chars, expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').gap_chars = set("_ =")
def test_default_gap_char(self):
self.assertIs(type(ExampleGrammaredSequence.default_gap_char), str)
self.assertEqual(ExampleGrammaredSequence.default_gap_char, '-')
self.assertEqual(ExampleGrammaredSequence('').default_gap_char, '-')
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').default_gap_char = '.'
def test_alphabet(self):
expected = set("ABC.-XYZ")
self.assertIs(type(ExampleGrammaredSequence.alphabet), set)
self.assertEqual(ExampleGrammaredSequence.alphabet, expected)
ExampleGrammaredSequence.alphabet.add("DEF")
self.assertEqual(ExampleGrammaredSequence.alphabet, expected)
self.assertEqual(ExampleGrammaredSequence('').alphabet, expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').alphabet = set("ABCDEFG.-WXYZ")
def test_degenerate_map(self):
expected = {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
self.assertEqual(ExampleGrammaredSequence.degenerate_map, expected)
ExampleGrammaredSequence.degenerate_map['W'] = set("ABC")
ExampleGrammaredSequence.degenerate_map['X'] = set("CA")
self.assertEqual(ExampleGrammaredSequence.degenerate_map, expected)
self.assertEqual(ExampleGrammaredSequence('').degenerate_map, expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').degenerate_map = {'W': "ABC"}
def test_gaps(self):
self.assertIs(type(ExampleGrammaredSequence("").gaps()), np.ndarray)
self.assertIs(ExampleGrammaredSequence("").gaps().dtype,
np.dtype('bool'))
npt.assert_equal(ExampleGrammaredSequence("ABCXBZYABC").gaps(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleGrammaredSequence(".-.-.").gaps(),
np.ones(5).astype(bool))
npt.assert_equal(ExampleGrammaredSequence("A.B-C.X-Y.").gaps(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("AB.AC.XY-").gaps(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("A.BC.-").gaps(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_gaps(self):
self.assertIs(type(ExampleGrammaredSequence("").has_gaps()), bool)
self.assertIs(type(ExampleGrammaredSequence("-").has_gaps()), bool)
self.assertFalse(ExampleGrammaredSequence("").has_gaps())
self.assertFalse(ExampleGrammaredSequence("ABCXYZ").has_gaps())
self.assertTrue(ExampleGrammaredSequence("-").has_gaps())
self.assertTrue(ExampleGrammaredSequence("ABCXYZ-").has_gaps())
def test_degenerates(self):
self.assertIs(type(ExampleGrammaredSequence("").degenerates()),
np.ndarray)
self.assertIs(ExampleGrammaredSequence("").degenerates().dtype,
np.dtype('bool'))
npt.assert_equal(ExampleGrammaredSequence("ABCBC-.AB.").degenerates(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleGrammaredSequence("ZYZYZ").degenerates(),
np.ones(5).astype(bool))
npt.assert_equal(ExampleGrammaredSequence("AX.Y-ZBXCZ").degenerates(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("ABXACY.-Z").degenerates(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("AZBCXY").degenerates(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_degenerates(self):
self.assertIs(type(ExampleGrammaredSequence("").has_degenerates()),
bool)
self.assertIs(type(ExampleGrammaredSequence("X").has_degenerates()),
bool)
self.assertFalse(ExampleGrammaredSequence("").has_degenerates())
self.assertFalse(ExampleGrammaredSequence("A-.BC").has_degenerates())
self.assertTrue(ExampleGrammaredSequence("Z").has_degenerates())
self.assertTrue(ExampleGrammaredSequence("ABC.XYZ-").has_degenerates())
# TODO: duplicate of test_definites; remove when nondegenerates is removed
def test_nondegenerates(self):
self.assertIs(type(ExampleGrammaredSequence("").nondegenerates()),
np.ndarray)
self.assertIs(ExampleGrammaredSequence("").nondegenerates().dtype,
np.dtype('bool'))
npt.assert_equal(
ExampleGrammaredSequence("XYZYZ-.XY.").nondegenerates(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleGrammaredSequence("ABABA").nondegenerates(),
np.ones(5).astype(bool))
npt.assert_equal(
ExampleGrammaredSequence("XA.B-AZCXA").nondegenerates(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(
ExampleGrammaredSequence("XXAZZB.-C").nondegenerates(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("YB.-AC").nondegenerates(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_definites(self):
self.assertIs(type(ExampleGrammaredSequence("").definites()),
np.ndarray)
self.assertIs(ExampleGrammaredSequence("").definites().dtype,
np.dtype('bool'))
npt.assert_equal(
ExampleGrammaredSequence("XYZYZ-.XY.").definites(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleGrammaredSequence("ABABA").definites(),
np.ones(5).astype(bool))
npt.assert_equal(
ExampleGrammaredSequence("XA.B-AZCXA").definites(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(
ExampleGrammaredSequence("XXAZZB.-C").definites(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("YB.-AC").definites(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
# TODO: duplicate of test_has_definites; remove when has_nondegenerates is
# removed.
def test_has_nondegenerates(self):
self.assertIs(type(ExampleGrammaredSequence("").has_nondegenerates()),
bool)
self.assertIs(type(ExampleGrammaredSequence("A").has_nondegenerates()),
bool)
self.assertFalse(ExampleGrammaredSequence("").has_nondegenerates())
self.assertFalse(
ExampleGrammaredSequence("X-.YZ").has_nondegenerates())
self.assertTrue(ExampleGrammaredSequence("C").has_nondegenerates())
self.assertTrue(
ExampleGrammaredSequence(".XYZ-ABC").has_nondegenerates())
def test_has_definites(self):
self.assertIs(type(ExampleGrammaredSequence("").has_definites()),
bool)
self.assertIs(type(ExampleGrammaredSequence("A").has_definites()),
bool)
self.assertFalse(ExampleGrammaredSequence("").has_definites())
self.assertFalse(
ExampleGrammaredSequence("X-.YZ").has_definites())
self.assertTrue(ExampleGrammaredSequence("C").has_definites())
self.assertTrue(
ExampleGrammaredSequence(".XYZ-ABC").has_definites())
def test_degap(self):
kw = {
'metadata': {
'id': 'some_id',
'description': 'some description',
},
}
self.assertEqual(
ExampleGrammaredSequence(
"", positional_metadata={'qual': []}, **kw).degap(),
ExampleGrammaredSequence(
"", positional_metadata={'qual': []}, **kw))
self.assertEqual(
ExampleGrammaredSequence(
"ABCXYZ",
positional_metadata={'qual': np.arange(6)},
**kw).degap(),
ExampleGrammaredSequence(
"ABCXYZ",
positional_metadata={'qual': np.arange(6)},
**kw))
self.assertEqual(
ExampleGrammaredSequence(
"ABC-XYZ",
positional_metadata={'qual': np.arange(7)},
**kw).degap(),
ExampleGrammaredSequence(
"ABCXYZ",
positional_metadata={'qual': [0, 1, 2, 4, 5, 6]},
**kw))
self.assertEqual(
ExampleGrammaredSequence(
".-ABC-XYZ.",
positional_metadata={'qual': np.arange(10)},
**kw).degap(),
ExampleGrammaredSequence(
"ABCXYZ",
positional_metadata={'qual': [2, 3, 4, 6, 7, 8]},
**kw))
self.assertEqual(
ExampleGrammaredSequence(
"---.-.-.-.-.",
positional_metadata={'quality': np.arange(12)},
**kw).degap(),
ExampleGrammaredSequence(
"",
positional_metadata={'quality': np.array([], dtype=np.int64)},
**kw))
def test_expand_degenerates_no_degens(self):
seq = ExampleGrammaredSequence("ABCABCABC")
self.assertEqual(list(seq.expand_degenerates()), [seq])
def test_expand_degenerates_all_degens(self):
exp = [
ExampleGrammaredSequence('ABA'), ExampleGrammaredSequence('ABC'),
ExampleGrammaredSequence('ACA'), ExampleGrammaredSequence('ACC'),
ExampleGrammaredSequence('BBA'), ExampleGrammaredSequence('BBC'),
ExampleGrammaredSequence('BCA'), ExampleGrammaredSequence('BCC')
]
# Sort based on sequence string, as order is not guaranteed.
obs = sorted(ExampleGrammaredSequence('XYZ').expand_degenerates(),
key=str)
self.assertEqual(obs, exp)
def test_expand_degenerates_with_metadata(self):
kw = {
"metadata": {
"id": "some_id",
"description": "some description"
},
"positional_metadata": {
"quality": np.arange(3),
},
}
exp = [ExampleGrammaredSequence('ABA', **kw),
ExampleGrammaredSequence('ABC', **kw),
ExampleGrammaredSequence('BBA', **kw),
ExampleGrammaredSequence('BBC', **kw)]
obs = sorted(
ExampleGrammaredSequence('XBZ', **kw).expand_degenerates(),
key=str)
self.assertEqual(obs, exp)
def test_to_regex_no_degens(self):
seq = ExampleGrammaredSequence('ABC')
regex = seq.to_regex()
self.assertEqual(regex.pattern, str(seq))
def test_to_regex_with_degens(self):
seq = ExampleGrammaredSequence('AYZ')
regex = seq.to_regex()
self.assertFalse(any(regex.match(s) is None
for s in 'ABA ABC ACA ACC'.split()))
self.assertTrue(all(regex.match(s) is None
for s in 'CBA BBA ABB AAA'.split()))
def test_find_motifs_no_motif(self):
seq = ExampleMotifsTester("ABCABCABC")
with self.assertRaises(ValueError) as cm:
seq.find_motifs("doesn't-exist")
self.assertIn("doesn't-exist", str(cm.exception))
seq = ExampleGrammaredSequence("ABCABCABC")
with self.assertRaises(ValueError) as cm:
seq.find_motifs("doesn't-exist")
self.assertIn("doesn't-exist", str(cm.exception))
def test_find_motifs(self):
seq = ExampleMotifsTester("ABC")
self.assertEqual(seq.find_motifs("name1"), "ABC")
self.assertEqual(seq.find_motifs("name2"), 3)
def test_repr(self):
# basic sanity checks for custom repr stats. more extensive testing is
# performed on Sequence.__repr__
# minimal
obs = repr(ExampleGrammaredSequence(''))
self.assertEqual(obs.count('\n'), 7)
self.assertTrue(obs.startswith('ExampleGrammaredSequence'))
self.assertIn('length: 0', obs)
self.assertIn('has gaps: False', obs)
self.assertIn('has degenerates: False', obs)
self.assertIn('has definites: False', obs)
self.assertTrue(obs.endswith('-'))
# no metadata, mix of gaps, degenerates, and definites
obs = repr(ExampleGrammaredSequence('AY-B'))
self.assertEqual(obs.count('\n'), 8)
self.assertTrue(obs.startswith('ExampleGrammaredSequence'))
self.assertIn('length: 4', obs)
self.assertIn('has gaps: True', obs)
self.assertIn('has degenerates: True', obs)
self.assertIn('has definites: True', obs)
self.assertTrue(obs.endswith('0 AY-B'))
# metadata and positional metadata of mixed types
obs = repr(
ExampleGrammaredSequence(
'ABCA',
metadata={'foo': 42, b'bar': 33.33, None: True, False: {},
(1, 2): 3, 'acb' * 100: "'"},
positional_metadata={'foo': range(4),
42: ['a', 'b', [], 'c']}))
self.assertEqual(obs.count('\n'), 18)
self.assertTrue(obs.startswith('ExampleGrammaredSequence'))
self.assertIn('None: True', obs)
self.assertIn('\'foo\': 42', obs)
self.assertIn('42: <dtype: object>', obs)
self.assertIn('\'foo\': <dtype: int64>', obs)
self.assertIn('length: 4', obs)
self.assertIn('has gaps: False', obs)
self.assertIn('has degenerates: False', obs)
self.assertIn('has definites: True', obs)
self.assertTrue(obs.endswith('0 ABCA'))
# sequence spanning > 5 lines
obs = repr(ExampleGrammaredSequence('A' * 301))
self.assertEqual(obs.count('\n'), 12)
self.assertTrue(obs.startswith('ExampleGrammaredSequence'))
self.assertIn('length: 301', obs)
self.assertIn('has gaps: False', obs)
self.assertIn('has degenerates: False', obs)
self.assertIn('has definites: True', obs)
self.assertIn('...', obs)
self.assertTrue(obs.endswith('300 A'))
if __name__ == "__main__":
main()
|
import numpy as np
import re
import sys
import time
from multiprocessing import Process, Queue
from plotter.utils.gcode import *
from plotter.utils.calibration import *
def processPlotterQueue(plotter):
"""Callback for plotter process."""
plotter.processQueueAsync()
class BasePlotter:
"""Base class of all plotter implementations. Always call '__init__' in the derived class after
setting up all (custom) member variables. Otherwise the (custom) config and calibration is
not available in the worker process."""
def __init__(self, config, initial_lengh, PhysicsEngineClass):
"""Sets up the worker process and initializes the system."""
base = config["base_width"]
self.calib = Calibration(base,
PhysicsEngineClass.calcOrigin(
initial_lengh, base),
stepsPerMM=config["steps_per_mm"],
resolution=config["movement_resolution"])
self.config = config
self.physicsEngine = PhysicsEngineClass(self.config, self.calib)
self.currPos = np.zeros((2,))
self.currCordLength = self.physicsEngine.point2CordLength(self.currPos)
self.speed = 10000
self.penIsDown = False
self.workerProcess = Process(target=processPlotterQueue, args=(self,))
self.workerQueue = Queue(1000)
self.workerProcess.start()
def shutdown(self):
"""Stops the worker queue and the worker process."""
print("Shutting down..")
self.workerQueue.put(None)
self.workerQueue.close()
self.workerQueue.join_thread()
self.workerProcess.join()
def executeGCodeFile(self, file_name):
"""Opens the specified gcode file and pushes each command into the worker queue.
This will block if the queue is full."""
with open(file_name, 'r') as f:
lines = f.readlines()
for c in lines:
self.workerQueue.put(c.strip())
def processQueueAsync(self):
"""Plotter worker function which runs in the worker process.
Processes new commands by reading from the worker queue."""
print("Plotter process started")
item = self.workerQueue.get()
while(item is not None):
self.executeCmd(item)
item = self.workerQueue.get()
print("Plotter process stopped")
exit(0)
def executeCmd(self, cmd):
"""Executes a command. Should only be called from the worker process."""
if cmd.startswith("G0"):
d = decodeGCode(cmd)
if not d:
return
if "S" in d:
self.setSpeed(d["S"])
self.moveToPos([d.get("X", self.currPos[0]),
d.get("Y", self.currPos[1])])
elif cmd.startswith("G28"):
self.moveToPos([0, 0])
elif cmd.startswith("G2"):
d = decodeGCode(cmd)
if not d:
return
if "S" in d:
self.setSpeed(d["S"])
if "R" not in d and "X" not in d and "Y" not in d:
print(d)
print("Unexpected cmd type. Failed to process command.")
return
self.moveArc([d["X"], d["Y"]], d["R"],
d.get("A", 0), d.get("B", 360))
elif cmd.startswith("M3"):
self.penDown()
elif cmd.startswith("M4"):
self.penUp()
else:
print("Unexpected cmd type. Failed to process command.")
def moveToPos(self, targetPos):
"""Move to specified position.
Raises an error if not implemented by derived class."""
raise NotImplementedError("Function moveToPos not implemented")
def moveArc(self, center, radius, startAngle, endAngle):
"""Move on an arc.
Raises an error if not implemented by derived class."""
raise NotImplementedError("Function moveArc not implemented")
def penUp(self):
"""Lift the pen.
Raises an error if not implemented by derived class."""
raise NotImplementedError("Function penUp not implemented")
def penDown(self):
"""Lower the pen.
Raises an error if not implemented by derived class."""
raise NotImplementedError("Function penDown not implemented")
def setSpeed(self, s):
"""Sets the current speed. (used by subsequent commands)."""
self.speed = s
def __str__(self):
return """
{}
------------ State ------------
Current Position: {}
Current Length: {}
PenState: {}
Current Speed: {}
Queue Size: {}
-------------------------------""".format(self.calib, self.currPos, self.currCordLength, "DOWN" if self.penIsDown else "UP", self.speed, self.workerQueue.qsize())
|
# Submitter: loganw1(Wang, Logan)
from goody import type_as_str
import inspect
class Check_All_OK:
"""
Check_All_OK class implements __check_annotation__ by checking whether each
annotation passed to its constructor is OK; the first one that
fails (by raising AssertionError) prints its problem, with a list of all
annotations being tried at the end of the check_history.
"""
def __init__(self, *args):
self._annotations = args
def __repr__(self):
return 'Check_All_OK(' + ','.join([str(i) for i in self._annotations]) + ')'
def __check_annotation__(self, check, param, value, check_history):
for annot in self._annotations:
check(param, annot, value,
check_history + 'Check_All_OK check: ' + str(annot) + ' while trying: ' + str(self) + '\n')
class Check_Any_OK:
"""
Check_Any_OK implements __check_annotation__ by checking whether at least
one of the annotations passed to its constructor is OK; if all fail
(by raising AssertionError) this classes raises AssertionError and prints
its failure, along with a list of all annotations tried followed by the
check_history.
"""
def __init__(self, *args):
self._annotations = args
def __repr__(self):
return 'Check_Any_OK(' + ','.join([str(i) for i in self._annotations]) + ')'
def __check_annotation__(self, check, param, value, check_history):
failed = 0
for annot in self._annotations:
try:
check(param, annot, value, check_history)
except AssertionError:
failed += 1
if failed == len(self._annotations):
assert False, repr(param) + ' failed annotation check(Check_Any_OK): value = ' + repr(value) + \
'\n tried ' + str(self) + '\n' + check_history
class Check_Annotation:
# Start with binding the class attribute to True allowing checking to occur
# (but only it the object's attribute self._checking_on is bound to True)
checking_on = True
# To check the decorated function f, first bind self._checking_on as True
def __init__(self, f):
self._f = f
self._checking_on = True
# Check whether param's annot is correct for value, adding to check_history
# if recurs; defines many local function which use it parameters.
def check(self, param, annot, value, check_history=''):
# Define local functions for checking, list/tuple, dict, set/frozenset,
# lambda/functions, and str (str for extra credit)
# Many of these local functions called by check, call check on their
# elements (thus are indirectly recursive)
def check_default(a, val):
if isinstance(val, a) == False:
raise AssertionError(
f"'{param}' failed annotation check(wrong type): value = '{val}'\n was type {type_as_str(val)} ...should be type {str(a)[7:-1]}{check_history}")
def check_list(a, val):
if isinstance(val, list) == False:
raise AssertionError(
f"'{param}' failed annotation check(wrong type): value = '{val}'\n was type {type_as_str(val)} ...should be type list")
elif len(a) == 1:
for index in range(len(val)):
self.check(param, a[0], val[index], check_history=check_history + f"\nlist[{index}] check: {a[0]}")
else:
if len(a) != len(val):
raise AssertionError(
f"'{param} failed annotation check(wrong number of elements): value = {val}\n annotation had {len(a)} elements{a}")
else:
for index in range(len(a)): self.check(param, a[index], val[index],
check_history=check_history + f"\nlist[{index}] check: {a[index]}")
def check_tuple(a, val):
if isinstance(val, tuple) == False:
raise AssertionError(
f"'{param}' failed annotation check(wrong type): value = '{val}'\n was type {type_as_str(val)} ...should be type tuple")
elif len(a) == 1:
for index in range(len(val)): self.check(param, a[0], val[index],
check_history=check_history + f"\ntuple[{index}] check: {a[0]}")
else:
if len(a) != len(val):
raise AssertionError(
f"'{param} failed annotation check(wrong number of elements): value = {val}\n annotation had {len(a)} elements{a}")
else:
for index in range(len(a)): self.check(param, a[index], val[index],
check_history=check_history + f"\ntuple[{index}] check: {a[index]}")
def check_dict(a, val):
if isinstance(val, dict) == False:
raise AssertionError(
f"'{param}' failed annotation check(wrong type): value = '{val}'\n was type {type_as_str(val)} ...should be type dict")
elif len(a) != 1:
raise AssertionError(
f"'{param}' annotation inconsistency: {type_as_str(a)} should have 1 item but had {len(a)}\n annotation = {a}")
else:
for key in val:
self.check(param, list(a.keys())[0], key,
check_history=check_history + f"\ndict key check: {list(a.keys())[0]}")
self.check(param, list(a.values())[0], val[key],
check_history=check_history + f"\ndict value check: {list(a.values())[0]}")
def check_set(a, val):
if isinstance(val, set) == False:
raise AssertionError(
f"'{param}' failed annotation check(wrong type): value = '{val}'\n was type {type_as_str(val)} ...should be type set")
elif len(a) != 1:
raise AssertionError(
f"'{param}' annotation inconsistency: {type_as_str(a)} should have 1 value but had {len(a)}\n annotation = {a}")
else:
for item in val: self.check(param, list(a)[0], item,
check_history=check_history + f"\nset value check: {list(a)[0]}")
def check_frozenset(a, val):
if isinstance(val, frozenset) == False:
raise AssertionError(
f"'{param}' failed annotation check(wrong type): value = '{val}'\n was type {type_as_str(val)} ...should be type frozenset")
elif len(a) != 1:
raise AssertionError(
f"'{param}' annotation inconsistency: {type_as_str(a)} should have 1 item but had {len(a)}\n annotation = {a}")
else:
for item in val: self.check(param, list(a)[0], item,
check_history=check_history + f"\nfrozenset value check: {list(a)[0]}")
def check_lambda(a, val):
if len(a.__code__.co_varnames) != 1:
raise AssertionError(
f"'{param}' annotation inconsistency: predicate should have 1 parameter but had {len(a.__code__.co_varnames)}\n predicate = {a}")
try:
a(val)
except Exception as e:
raise AssertionError(f"'{param}' annotation predicate({a}) raised exception\n exception = {e}")
if not a(val): raise AssertionError(
f"'{param}' failed annotation check: value = {val}\n predicate = {a}{check_history}")
# Start with matching check's function annotation with its arguments
if annot is None:
return
elif annot in (int, str, float, complex, bool, iter,list):
check_default(annot, value)
elif isinstance(annot, list):
check_list(annot, value)
elif isinstance(annot, tuple):
check_tuple(annot, value)
elif isinstance(annot, dict):
check_dict(annot, value)
elif isinstance(annot, set):
check_set(annot, value)
elif isinstance(annot, frozenset):
check_frozenset(annot, value)
elif inspect.isfunction(annot):
check_lambda(annot, value)
else:
try:
annot.__check_annotation__(self.check, param, value, check_history)
except:
raise AssertionError(f"'{param}' annotation undecipherable: {annot}")
# Return result of calling decorated function call, checking present
# parameter/return annotations if required
def __call__(self, *args, **kargs):
# Return the parameter/argument bindings via an OrderedDict (derived
# from dict): it binds the function header's parameters in their order
def param_arg_bindings():
f_signature = inspect.signature(self._f)
bound_f_signature = f_signature.bind(*args, **kargs)
for param in f_signature.parameters.values():
if not (param.name in bound_f_signature.arguments):
bound_f_signature.arguments[param.name] = param.default
return bound_f_signature.arguments
# If annotation checking is turned off at the class or function level
# just return the result of calling the decorated function
if (self._checking_on == False or self.checking_on == False):
return self._f(*args, **kargs)
# Otherwise do all the annotation checking
try:
# For each detected annotation, check if the parameter satisfies it
check_history = ''
params_assigned = param_arg_bindings()
for annot_to_check, specified_annot_type in self._f.__annotations__.items():
if annot_to_check != 'return':
self.check(annot_to_check, specified_annot_type, params_assigned[annot_to_check],
check_history + ' check: ' + str(annot_to_check) + ' while trying: ' + str(
self) + '\n')
# Compute/remember the value of the decorated function
answer = self._f(*args, **kargs)
# If 'return' is in the annotation, check it
if 'return' in self._f.__annotations__.keys():
self.check('return', self._f.__annotations__['return'], answer,
check_history + ' check: ' + str('return') + ' while trying: ' + str(
self) + '\n')
params_assigned['_return'] = answer
# Return the decorated answer
return answer
# TODO REMOVE COMMENTS
# On first AssertionError, print the source lines of the function and reraise
except AssertionError:
'''
print(80*'-')
for l in inspect.getsourcelines(self._f)[0]: # ignore starting line #
print(l.rstrip())
print(80*'-')
'''
raise
if __name__ == '__main__':
# an example of testing a simple annotation
def f(x:int): pass
f = Check_Annotation(f)
f(3)
#f('a')
# driver tests
import driver
driver.default_file_name = 'bscp4W21.txt'
# driver.default_show_exception= True
# driver.default_show_exception_message= True
# driver.default_show_traceback= True
driver.driver()
|
# Generated by Django 2.0.5 on 2018-07-31 12:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_management_app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='assignment',
name='course_id',
),
migrations.AddField(
model_name='course',
name='assignment',
field=models.CharField(default='do some homework', max_length=100),
preserve_default=False,
),
migrations.DeleteModel(
name='Assignment',
),
]
|
import functools
import math
import numpy as np
import plotly.graph_objs as go
import plotly.figure_factory as ff
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import State, Input, Output
from datetime import date, timedelta, datetime
import pandas as pd
import re
import dash_table
import plotly.express as px
# from merge_csv import *
from trending_tweets import *
# from privacy_history import *
# from privacy_history_multithreading import *
import ast
import base64
# analyse_over_all_dates()
# merge_all_csv()
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
colors = {
'background': 'white',
'text': 'black',
}
# https://github.com/plotly/dash/issues/71
image_filename = 'DIRI.png' # replace with your own image
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
server = app.server
app.title = 'Privacy Twitter Analysis'
app.layout = html.Div([
html.Div([
html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode())),
html.H1('Twitter Privacy Analysis'),
html.H3('Organization References'),
html.Div('Siddharth Diwan, Anirudh Syal'),
], style={'width': '40%', 'height': '40%', 'align-items': 'center', 'justify-content': 'center', 'padding': '20px', 'background-color': 'white', 'border-radius': '5px', 'box-shadow': '2px 2px 2px lightgrey', 'position': 'absolute', 'right': '5%', 'top': '10%', 'color': 'black'
}),
html.Div([
html.Div([
dcc.DatePickerRange(
id='date_picker',
# min_date_allowed=date(2020, 8, 1),
# max_date_allowed=date(2020, 12, 31),
start_date=date(2021, 1, 8),
end_date=date(2021, 2, 28),
# style={"margin-top": "15px"}
),
dcc.Dropdown(
id='choice_consolidated_trending',
options=[
# {'label': 'Full Consolidated', 'value': 'full'},
{'label': 'Trending Retweets',
'value': 'trending_retweets'},
{'label': 'Trending Favourites',
'value': 'trending_favs'},
],
value='trending_retweets',
clearable=False,
style={"margin-top": "15px"}
),
html.Div(children=[
], style={"margin-top": "15px"}),
dcc.Slider(
id='choice_trending_thresh_slider',
# placeholder='Enter a threshold',
min=0,
max=1000,
updatemode='drag',
value=0
),
html.Div(id='choice_trending_thresh', children=[
]),
dcc.Dropdown(
id='choice_analysis',
options=[
{'label': 'Daily', 'value': 'Daily'},
{'label': 'Overall', 'value': 'overall'},
],
value='Daily',
clearable=False,
style={"margin-top": "15px"}
),
], style={'width': '40%', 'align-items': 'center', 'justify-content': 'center', 'padding': '20px', 'background-color': '#f7f7f7', 'border-radius': '5px', 'box-shadow': '2px 2px 2px lightgrey'
}),
html.Div([
html.Div([
dcc.Dropdown(
id='choice_all_tweets',
options=[
{'label': 'All Tweets', 'value': 'tweets_all'},
{'label': 'Tweets Mentioning Organizations',
'value': 'tweets_orgs'},
],
value='tweets_all',
clearable=False,
),
dcc.Dropdown(
id='choice_orgs_selection',
multi=True,
style={"margin-top": "15px"}
),
dcc.Dropdown(
id='choice_org_centric',
options=[
{'label': 'Date Centric', 'value': 'date_centric'},
{'label': 'Organization Centric',
'value': 'org_centric'},
],
value='org_centric',
clearable=False,
style={"margin-top": "15px"}
),
dcc.Dropdown(
id='choice_tweet_property',
options=[
{'label': 'Retweets', 'value': 'Retweets'},
{'label': 'Favourties', 'value': 'Favourites'},
{'label': 'Sentiment', 'value': 'Sentiment'},
],
value='Retweets',
clearable=False,
style={"margin-top": "15px"}
),
], style={'width': '40%', 'align-items': 'center', 'justify-content': 'center', 'padding': '20px', 'background-color': '#f7f7f7', 'border-radius': '5px', 'box-shadow': '2px 2px 2px lightgrey'
}),
html.Div([
dcc.Graph(
id='graph_central_tendency'
),
], style={'width': '97%', 'align-items': 'center', 'justify-content': 'center', 'padding': '20px', 'background-color': '#f7f7f7', 'border-radius': '5px', 'box-shadow': '2px 2px 2px lightgrey', 'margin-top': '10px'
}),
], style={'width': '97%', 'align-items': 'center', 'justify-content': 'center', 'padding': '20px', 'background-color': '#e6e6e6', 'border-radius': '5px', 'box-shadow': '2px 2px 2px lightgrey', 'margin-top': '10px'
}),
html.Div([
html.Div([
dcc.Dropdown(
id='choice_organizations',
options=[
{'label': 'Top n Organizations Mentioned',
'value': 'organizations'},
{'label': 'Top n Industries', 'value': 'tags'},
# {'label': 'Top n Hashtags', 'value': 'hashtags'},
],
value='organizations',
clearable=False
),
dcc.Input(
id='choice_max_x',
type='number',
value=10,
placeholder='Enter Max Displayed'
),
dcc.Input(
id='choice_min_count',
type='number',
value=0,
placeholder='Enter Min Frequency'
),
], style={'width': '40%', 'align-items': 'center', 'justify-content': 'center', 'padding': '20px', 'background-color': '#f7f7f7', 'border-radius': '5px', 'box-shadow': '2px 2px 2px lightgrey'
}),
html.Div([
dcc.Graph(
id='graph_organizations_and_tags'
)
], style={'width': '97%', 'align-items': 'center', 'justify-content': 'center', 'padding': '20px', 'background-color': '#f7f7f7', 'border-radius': '5px', 'box-shadow': '2px 2px 2px lightgrey', 'margin-top': '10px'
}),
], style={'width': '97%', 'align-items': 'center', 'justify-content': 'center', 'padding': '20px', 'background-color': '#e6e6e6', 'border-radius': '5px', 'box-shadow': '2px 2px 2px lightgrey', 'margin-top': '10px'
}),
html.Div([
"Siddharth Diwan ",
html.A(
'LinkedIn', href='https://www.linkedin.com/in/siddharth-diwan-10a4701b3/'),
" | Anirudh Syal ",
html.A('LinkedIn', href='https://www.linkedin.com/in/anirudhsyal/')
], style={'align-items': 'center', 'justify-content': 'center', 'text-align': 'center', 'background-color': 'white', 'border-radius': '5px', 'box-shadow': '2px 2px 2px lightgrey', 'color': 'black', 'margin-top': '10px'
}),
], style={'width': '97%', 'align-items': 'center', 'justify-content': 'center', 'padding': '20px', 'background-color': '#bababa', 'border-radius': '5px', 'box-shadow': '2px 2px 2px lightgrey', 'margin-top': '10px'
}),
], style={
'backgroundColor': 'white',
'color': colors['text'],
})
def diverge_sentiment(df, color_col_name):
if color_col_name == 'display_orgs & date':
df['vader_polarity'] = np.where(
df['vader_polarity'] < 0.7, -1, 1)
df['count'] = [1] * len(df.index)
sum_sr = df.groupby(
['date', 'display_orgs', 'vader_polarity'])['count'].sum()
sum_df = pd.DataFrame()
date_arr = []
orgs_arr = []
vader_arr = []
count_arr = []
for i in range(0, sum_sr.size):
index = sum_sr.index[i]
value = sum_sr.values[i]
# #print('index', index)
# #print('value', value)
date_arr.append(index[0])
orgs_arr.append(index[1])
vader_arr.append(index[2])
count_arr.append(value)
sum_df['date'] = date_arr
sum_df['display_orgs'] = orgs_arr
sum_df['sentiment_score'] = vader_arr
sum_df['count'] = count_arr
else:
df['vader_polarity'] = np.where(
df['vader_polarity'] < 0.7, -1, 1)
df['count'] = [1] * len(df.index)
sum_sr = df.groupby(
[color_col_name, 'vader_polarity'])['count'].sum()
sum_df = pd.DataFrame()
color_col_arr = []
vader_arr = []
count_arr = []
for i in range(0, sum_sr.size):
index = sum_sr.index[i]
value = sum_sr.values[i]
# #print('index', index)
# #print('value', value)
color_col_arr.append(index[0])
vader_arr.append(index[1])
count_arr.append(value)
sum_df[color_col_name] = color_col_arr
sum_df['sentiment_score'] = vader_arr
sum_df['count'] = count_arr
return sum_df
@ app.callback(
dash.dependencies.Output('graph_central_tendency', 'figure'),
[dash.dependencies.Input('date_picker', 'start_date'),
dash.dependencies.Input('date_picker', 'end_date'),
dash.dependencies.Input('choice_consolidated_trending', 'value'),
dash.dependencies.Input('choice_all_tweets', 'value'),
dash.dependencies.Input('choice_tweet_property', 'value'),
dash.dependencies.Input('choice_analysis', 'value'),
dash.dependencies.Input('choice_trending_thresh_slider', 'value'),
dash.dependencies.Input('choice_org_centric', 'value'),
dash.dependencies.Input('choice_orgs_selection', 'value')],
)
def update_graph_central_tendency(start_date, end_date, data_selection, tweets_selection, tendency_selection, analysis_selection, thresh, centric_selection, companies_selection):
print('companies_selection', companies_selection)
if companies_selection == None:
companies_selection = []
# #print('yo')
start_date_obj = datetime.strptime(start_date, '%Y-%m-%d')
end_date_obj = datetime.strptime(end_date, '%Y-%m-%d')
# https://stackoverflow.com/questions/59882714/python-generating-a-list-of-dates-between-two-dates
# https://stackoverflow.com/questions/18684076/how-to-create-a-list-of-date-string-in-yyyymmdd-format-with-python-pandas
date_range = [d.strftime('%Y-%m-%d') for d in pd.date_range(
start_date_obj, end_date_obj-timedelta(days=1), freq='d')]
if data_selection == 'full':
df = pd.read_csv('results/consolidated_date_formatted.csv')
elif data_selection == 'trending_retweets':
df = filter_by_col('retweet_count', thresh)
elif data_selection == 'trending_favs':
df = filter_by_col('fav_count', thresh)
# # https://stackoverflow.com/questions/12096252/use-a-list-of-values-to-select-rows-from-a-pandas-dataframe
# #print('df_date', df['date'])
df = df[df['date'].isin(date_range)]
# #print('date_range', date_range)
if analysis_selection == 'Daily':
if tweets_selection == 'tweets_all':
if (tendency_selection == 'Retweets'):
fig = px.strip(df, x="date", y="retweet_count",
hover_data=["tweet_cleaned"])
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
# # https://www.codegrepper.com/code-examples/python/how+to+find+mean+of+one+column+based+on+another+column+in+python
# mean_sr = df.groupby('date')['retweet_count'].mean()
# mean_df = pd.DataFrame(
# {'date': mean_sr.index, 'retweet_count': mean_sr.values})
# median_sr = df.groupby('date')['retweet_count'].median()
# median_df = pd.DataFrame(
# {'date': median_sr.index, 'retweet_count': median_sr.values})
# # https://stackoverflow.com/questions/62122015/how-to-add-traces-in-plotly-express
# fig.add_trace(go.Scatter(
# x=mean_df['date'], y=mean_df['retweet_count'], name='mean', visible="legendonly"))
# fig.add_trace(go.Scatter(
# x=median_df['date'], y=median_df['retweet_count'], name='median', visible="legendonly"))
return fig
elif (tendency_selection == 'Favourites'):
fig = px.strip(df, x="date", y="fav_count",
)
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
# mean_sr = df.groupby('date')['fav_count'].mean()
# mean_df = pd.DataFrame(
# {'date': mean_sr.index, 'fav_count': mean_sr.values})
# median_sr = df.groupby('date')['fav_count'].median()
# median_df = pd.DataFrame(
# {'date': median_sr.index, 'fav_count': median_sr.values})
# fig.add_trace(go.Scatter(
# x=mean_df['date'], y=mean_df['fav_count'], name='mean', visible="legendonly"))
# fig.add_trace(go.Scatter(
# x=median_df['date'], y=median_df['fav_count'], name='median', visible="legendonly"))
return fig
elif (tendency_selection == 'Sentiment'):
df = diverge_sentiment(df, 'date')
# #print('df gleeeeeeeeeeee', sum_df)
fig = px.bar(df, x="date", y="count",
color="sentiment_score")
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
# mean_sr = df.groupby(
# 'date')['vader_polarity'].mean()
# mean_df = pd.DataFrame(
# {'date': mean_sr.index, 'vader_polarity': mean_sr.values})
# median_sr = df.groupby('date')['vader_polarity'].median()
# median_df = pd.DataFrame(
# {'date': median_sr.index, 'vader_polarity': median_sr.values})
# fig.add_trace(go.Scatter(
# x=mean_df['date'], y=mean_df['vader_polarity'], name='mean', visible="legendonly"))
# fig.add_trace(go.Scatter(
# x=median_df['date'], y=median_df['vader_polarity'], name='median', visible="legendonly"))
return fig
elif tweets_selection == 'tweets_orgs':
def common_data(list1, list2):
# traverse in the 1st list
for x in list1:
# #print('x', x)
# traverse in the 2nd list
for y in list2:
# if one common
if x == y:
# #print('found!')
return True
# #print('not found')
return False
# print('loblo', df)
# print('1', df['tweet_mentioned_organizations'])
mask = [common_data(ast.literal_eval(orgs), list(companies_selection))
for orgs in df['tweet_mentioned_organizations']]
df = df[mask]
# print('Daily, orgs, centric selection', centric_selection)
# print('2', df['tweet_mentioned_organizations'])
if centric_selection == 'date_centric':
if (tendency_selection == 'Retweets'):
# print('hi retweets')
fig = px.strip(df, x="date", y="retweet_count",
)
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
# https://www.codegrepper.com/code-examples/python/how+to+find+mean+of+one+column+based+on+another+column+in+python
# mean_sr = df.groupby('date')['retweet_count'].mean()
# mean_df = pd.DataFrame(
# {'date': mean_sr.index, 'retweet_count': mean_sr.values})
# median_sr = df.groupby('date')['retweet_count'].median()
# median_df = pd.DataFrame(
# {'date': median_sr.index, 'retweet_count': median_sr.values})
# # https://stackoverflow.com/questions/62122015/how-to-add-traces-in-plotly-express
# fig.add_trace(go.Scatter(
# x=mean_df['date'], y=mean_df['retweet_count'], name='mean', visible="legendonly"))
# fig.add_trace(go.Scatter(
# x=median_df['date'], y=median_df['retweet_count'], name='median', visible="legendonly"))
return fig
elif (tendency_selection == 'Favourites'):
fig = px.strip(df, x="date", y="fav_count",
)
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
# mean_sr = df.groupby('date')['fav_count'].mean()
# mean_df = pd.DataFrame(
# {'date': mean_sr.index, 'fav_count': mean_sr.values})
# median_sr = df.groupby('date')['fav_count'].median()
# median_df = pd.DataFrame(
# {'date': median_sr.index, 'fav_count': median_sr.values})
# fig.add_trace(go.Scatter(
# x=mean_df['date'], y=mean_df['fav_count'], name='mean', visible="legendonly"))
# fig.add_trace(go.Scatter(
# x=median_df['date'], y=median_df['fav_count'], name='median', visible="legendonly"))
return fig
elif (tendency_selection == 'Sentiment'):
df = diverge_sentiment(df, 'date')
# #print('df gleeeeeeeeeeee', sum_df)
fig = px.bar(df, x="date", y="count",
color="sentiment_score")
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
# mean_sr = df.groupby(
# 'date')['vader_polarity'].mean()
# mean_df = pd.DataFrame(
# {'date': mean_sr.index, 'vader_polarity': mean_sr.values})
# median_sr = df.groupby('date')['vader_polarity'].median()
# median_df = pd.DataFrame(
# {'date': median_sr.index, 'vader_polarity': median_sr.values})
# fig.add_trace(go.Scatter(
# x=mean_df['date'], y=mean_df['vader_polarity'], name='mean', visible="legendonly"))
# fig.add_trace(go.Scatter(
# x=median_df['date'], y=median_df['vader_polarity'], name='median', visible="legendonly"))
return fig
# elif analysis_selection == 'overall':
# count_arr = [1] * len(df.index)
# df['count'] = count_arr
# if (tendency_selection == 'Retweets'):
# fig = px.histogram(df, x="retweet_count", y="count",
# color="date", marginal='rug', hover_data=['vader_polarity'], nbins=50, )
# fig.update_layout(
# plot_bgcolor=colors['background'],
# paper_bgcolor=colors['background'],
# font_color=colors['text']
# )
# return fig
# elif (tendency_selection == 'Favourites'):
# fig = px.histogram(df, x="fav_count", y="count",
# color="date", marginal='rug', hover_data=['vader_polarity'], nbins=50, )
# fig.update_layout(
# plot_bgcolor=colors['background'],
# paper_bgcolor=colors['background'],
# font_color=colors['text']
# )
# return fig
# elif (tendency_selection == 'Sentiment'):
# df = diverge_sentiment(df, 'date')
# fig = px.bar(df, x="vader_polarity", y="count",
# color="date")
# # fig = px.histogram(df, x="vader_polarity", y="count",
# # color="date", marginal='rug', hover_data=['tweet_mentioned_organizations'], nbins=50, )
# fig.update_layout(
# plot_bgcolor=colors['background'],
# paper_bgcolor=colors['background'],
# font_color=colors['text']
# )
# return fig
elif centric_selection == 'org_centric':
# print(df.columns)
# print(df)
orgs_literal = list(map(lambda x: ast.literal_eval(
x), df['tweet_mentioned_organizations'].to_numpy()))
# print('orgs_literal', orgs_literal)
# #print(orgs_literal)
df['display_orgs'] = list(map(
lambda x: functools.reduce(lambda a, b: a + ', ' + b, x), orgs_literal))
# print('FINALLYFINALLY', df['display_orgs'])
if (tendency_selection == 'Retweets'):
# print('hi retweets')
fig = px.strip(df, x="date", y="retweet_count", color='display_orgs'
)
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
# https://www.codegrepper.com/code-examples/python/how+to+find+mean+of+one+column+based+on+another+column+in+python
# mean_sr = df.groupby('date')['retweet_count'].mean()
# mean_df = pd.DataFrame(
# {'date': mean_sr.index, 'retweet_count': mean_sr.values})
# median_sr = df.groupby('date')['retweet_count'].median()
# median_df = pd.DataFrame(
# {'date': median_sr.index, 'retweet_count': median_sr.values})
# # https://stackoverflow.com/questions/62122015/how-to-add-traces-in-plotly-express
# fig.add_trace(go.Scatter(
# x=mean_df['date'], y=mean_df['retweet_count'], name='mean', visible="legendonly"))
# fig.add_trace(go.Scatter(
# x=median_df['date'], y=median_df['retweet_count'], name='median', visible="legendonly"))
return fig
elif (tendency_selection == 'Favourites'):
fig = px.strip(df, x="date", y="fav_count", color='display_orgs'
)
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
# mean_sr = df.groupby('date')['fav_count'].mean()
# mean_df = pd.DataFrame(
# {'date': mean_sr.index, 'fav_count': mean_sr.values})
# median_sr = df.groupby('date')['fav_count'].median()
# median_df = pd.DataFrame(
# {'date': median_sr.index, 'fav_count': median_sr.values})
# fig.add_trace(go.Scatter(
# x=mean_df['date'], y=mean_df['fav_count'], name='mean', visible="legendonly"))
# fig.add_trace(go.Scatter(
# x=median_df['date'], y=median_df['fav_count'], name='median', visible="legendonly"))
return fig
elif (tendency_selection == 'Sentiment'):
df = diverge_sentiment(df, 'display_orgs & date')
# #print('df gleeeeeeeeeeee', sum_df)
fig = px.bar(df, x="date", y="count",
color="sentiment_score", text="display_orgs")
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
# mean_sr = df.groupby(
# 'date')['vader_polarity'].mean()
# mean_df = pd.DataFrame(
# {'date': mean_sr.index, 'vader_polarity': mean_sr.values})
# median_sr = df.groupby('date')['vader_polarity'].median()
# median_df = pd.DataFrame(
# {'date': median_sr.index, 'vader_polarity': median_sr.values})
# fig.add_trace(go.Scatter(
# x=mean_df['date'], y=mean_df['vader_polarity'], name='mean', visible="legendonly"))
# fig.add_trace(go.Scatter(
# x=median_df['date'], y=median_df['vader_polarity'], name='median', visible="legendonly"))
return fig
elif analysis_selection == 'overall':
# print('overall')
if tweets_selection == 'tweets_all':
count_arr = [1] * len(df.index)
df['count'] = count_arr
df['sentiment_score'] = df['vader_polarity']
if (tendency_selection == 'Retweets'):
fig = px.histogram(df, x="retweet_count", y="count",
color="date",
# marginal='rug',
hover_data=['sentiment_score'], nbins=50, )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
return fig
elif (tendency_selection == 'Favourites'):
fig = px.histogram(df, x="fav_count", y="count",
color="date",
# marginal='rug',
hover_data=['sentiment_score'], nbins=50,
labels={
"date": "HAHA",
"count": "HOHO",
})
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
return fig
elif (tendency_selection == 'Sentiment'):
df = diverge_sentiment(df, 'date')
fig = px.bar(df, x="sentiment_score", y="count",
color="date")
# fig = px.histogram(df, x="vader_polarity", y="count",
# color="date", marginal='rug', hover_data=['tweet_mentioned_organizations'], nbins=50, )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
return fig
elif tweets_selection == 'tweets_orgs':
# print('tweets_orgs')
def common_data(list1, list2):
# #print('list1', list1)
# #print('list2', list2)
# traverse in the 1st list
for x in list1:
# #print('x', x)
# traverse in the 2nd list
for y in list2:
# if one common
if x == y:
# #print('found!')
return True
# #print('not found')
return False
mask = [common_data(ast.literal_eval(orgs), list(companies_selection))
for orgs in df['tweet_mentioned_organizations']]
df = df[mask]
df['sentiment_score'] = df['vader_polarity']
# print('Daily, orgs, centric selection', centric_selection)
if centric_selection == 'date_centric':
# #print('df', df)
# fig = px.strip(df, x="date", y="retweet_count", )
# return 'hi'
count_arr = [1] * len(df.index)
df['count'] = count_arr
if (tendency_selection == 'Retweets'):
fig = px.histogram(df, x="retweet_count", y="count",
color="date",
# marginal='rug',
hover_data=['sentiment_score'], nbins=50, )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
return fig
elif (tendency_selection == 'Favourites'):
fig = px.histogram(df, x="fav_count", y="count",
color="date",
# marginal='rug',
hover_data=['sentiment_score'], nbins=50, )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
return fig
elif (tendency_selection == 'Sentiment'):
df = diverge_sentiment(df, 'date')
fig = px.bar(df, x="sentiment_score", y="count",
color="date")
# fig = px.histogram(df, x="vader_polarity", y="count",
# color="date", marginal='rug', hover_data=['tweet_mentioned_organizations'], nbins=50, )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
return fig
elif centric_selection == 'org_centric':
# print('org_centric')
orgs_literal = list(map(lambda x: ast.literal_eval(
x), df['tweet_mentioned_organizations'].to_numpy()))
# #print(orgs_literal)
df['display_orgs'] = list(map(
lambda x: functools.reduce(lambda a, b: a + ', ' + b, x, ), orgs_literal))
# print("HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH")
# print(df.columns)
# #print('after filtering the companies selected', df)
# #print('df', df)
# fig = px.strip(df, x="date", y="retweet_count", )
# return 'hi'
count_arr = [1] * len(df.index)
df['count'] = count_arr
df['sentiment_score'] = df['vader_polarity']
if (tendency_selection == 'Retweets'):
fig = px.histogram(df, x="retweet_count", y="count",
color="display_orgs", marginal='rug', hover_data=['date', 'fav_count'], nbins=50, )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
return fig
elif (tendency_selection == 'Favourites'):
fig = px.histogram(df, x="fav_count", y="count",
color="display_orgs", marginal='rug', hover_data=['date', 'retweet_count', 'sentiment_score'], nbins=50, )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
return fig
elif (tendency_selection == 'Sentiment'):
df = diverge_sentiment(df, 'display_orgs')
fig = px.bar(df, x="sentiment_score", y="count",
color="display_orgs")
# fig = px.histogram(df, x="vader_polarity", y="count",
# color="display_orgs", marginal='rug', hover_data=['date', 'retweet_count', 'fav_count'], nbins=50, )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text']
)
return fig
@ app.callback(
dash.dependencies.Output('graph_organizations_and_tags', 'figure'),
[dash.dependencies.Input('date_picker', 'start_date'),
dash.dependencies.Input('date_picker', 'end_date'),
dash.dependencies.Input('choice_tweet_property', 'value'),
dash.dependencies.Input('choice_analysis', 'value'),
dash.dependencies.Input('choice_organizations', 'value'),
dash.dependencies.Input('choice_consolidated_trending', 'value'),
dash.dependencies.Input('choice_trending_thresh_slider', 'value'),
dash.dependencies.Input('choice_max_x', 'value'),
dash.dependencies.Input('choice_min_count', 'value')])
def update_graph_organizations(start_date, end_date, mode_selection, analysis_selection, organizations_selection, data_selection, thresh, max_x, min_count):
# #print('hi')
start_date_obj = datetime.strptime(start_date, '%Y-%m-%d')
end_date_obj = datetime.strptime(end_date, '%Y-%m-%d')
# https://stackoverflow.com/questions/59882714/python-generating-a-list-of-dates-between-two-dates
# https://stackoverflow.com/questions/18684076/how-to-create-a-list-of-date-string-in-yyyymmdd-format-with-python-pandas
date_range = [d.strftime('%Y-%m-%d') for d in pd.date_range(
start_date_obj, end_date_obj-timedelta(days=1), freq='d')]
# #print('date_range', date_range)
# df = pd.read_csv('results/consolidated_date_formatted.csv')
if data_selection == 'full':
df = pd.read_csv('results/consolidated_date_formatted.csv')
elif data_selection == 'trending_retweets':
df = filter_by_col('retweet_count', thresh)
elif data_selection == 'trending_favs':
df = filter_by_col('fav_count', thresh)
df = df[df['date'].isin(date_range)]
# #print('df', df)
if organizations_selection == 'organizations':
# #print('organizations!')
# create a 2D array with Organization references having their own row
indiv_org_ref_arr = []
for _, row in df.iterrows():
# https://stackoverflow.com/questions/23119472/in-pandas-python-reading-array-stored-as-string
# to_csv makes arrays of strings string so need to extract the array back
orgs_literal = ast.literal_eval(
row['tweet_mentioned_organizations'])
# #print('orgs_literal', orgs_literal)
for org in orgs_literal:
# #print('index', index)
# #print('date', row['date'])
# #print('date type', type(row['date']))
# #print('org', org)
# #print('org type', type(org))
orgs_dict_literal = ast.literal_eval(
row['tweet_orgs_classified'])
# #print('orgs_dict_literal', orgs_dict_literal)
# #print('org_tag', orgs_dict_literal[org])
# #print('vader_polarity', row['vader_polarity'])
indiv_org_ref_arr.append(
[row['date'], org, orgs_dict_literal[org], row['vader_polarity'], 1])
# indiv_org_ref_arr.append(
# [row['date'], org, row['tweet_organization_tags'][org], row['vader_polarity'], 1])
# convert the 2D array into a df
indiv_org_ref_df = pd.DataFrame(
indiv_org_ref_arr, columns=['date', 'organization', 'tags', 'sentiment', 'count'])
# #print('indiv_org_ref_df: ', indiv_org_ref_df)
sum_sr = indiv_org_ref_df.groupby(
['date', 'organization'])['count'].sum()
# #print(sum_sr)
sum_df = pd.DataFrame()
# #print('date', sum_sr.loc['date'])
# #print('date', sum_sr.loc['organization'])
# #print('date', sum_sr.loc['count'])
# #print('shit', sum_sr.index[10])
# #print('date ting', sum_sr.index.unique(level='date'))
# #print('org ting', sum_sr.index.unique(level='organization'))
# #print('values', sum_sr.values)
date_arr = []
org_arr = []
count_arr = []
for i in range(0, sum_sr.size):
index = sum_sr.index[i]
value = sum_sr.values[i]
date_arr.append(index[0])
org_arr.append(index[1])
count_arr.append(value)
sum_df['date'] = date_arr
sum_df['organization'] = org_arr
sum_df['count'] = count_arr
sum_df = sum_df[sum_df['count'] > min_count]
# print('sum_df', sum_df)
if (analysis_selection == 'overall'):
fig = px.bar(sum_df, x="organization", y="count",
color="date", )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text'],
xaxis={'categoryorder': 'total descending'}
)
if max_x != None:
fig.update_xaxes(range=(-0.5, int(max_x) - 0.5))
return fig
elif (analysis_selection == 'Daily'):
# #print('Daily!')
# #print('indiv_org_ref_df', indiv_org_ref_df)
# indiv_org_ref_df.sort_values(by=['col1'])
fig = px.bar(sum_df,
x='date', y='count', color="organization",
)
# fig = px.bar(indiv_org_ref_df, x="organization", y="count",
# color="organization",
# hover_data=['tags', 'sentiment'], )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text'],
xaxis={'categoryorder': 'total descending'}
)
return fig
elif organizations_selection == 'tags':
# create a 2D array with tags having their own row
indiv_tags_arr = []
for _, row in df.iterrows():
tags_dict_literal = ast.literal_eval(
row['tweet_tags_classified'])
for tag in tags_dict_literal.keys():
indiv_tags_arr.append(
[row['date'], tag, tags_dict_literal[tag], row['vader_polarity'], 1])
# convert the 2D array into a df
indiv_tags_df = pd.DataFrame(
indiv_tags_arr, columns=['date', 'tag', 'companies', 'sentiment', 'count'])
sum_sr = indiv_tags_df.groupby(
['date', 'tag'])['count'].sum()
# #print(sum_sr)
sum_df = pd.DataFrame()
# #print('date', sum_sr.loc['date'])
# #print('date', sum_sr.loc['organization'])
# #print('date', sum_sr.loc['count'])
# #print('shit', sum_sr.index[10])
# #print('date ting', sum_sr.index.unique(level='date'))
# #print('org ting', sum_sr.index.unique(level='organization'))
# #print('values', sum_sr.values)
date_arr = []
tag_arr = []
count_arr = []
for i in range(0, sum_sr.size):
index = sum_sr.index[i]
value = sum_sr.values[i]
date_arr.append(index[0])
tag_arr.append(index[1])
count_arr.append(value)
sum_df['date'] = date_arr
sum_df['tag'] = tag_arr
sum_df['count'] = count_arr
sum_df = sum_df[sum_df['count'] > min_count]
if (analysis_selection == 'overall'):
fig = px.bar(sum_df, x="tag", y="count",
color="date", )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text'],
xaxis={'categoryorder': 'total descending'}
)
if max_x != None:
fig.update_xaxes(range=(-0.5, int(max_x) - 0.5))
return fig
elif (analysis_selection == 'Daily'):
fig = px.bar(sum_df, x="date", y="count",
color="tag", )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text'],
xaxis={'categoryorder': 'total descending'}
)
return fig
elif organizations_selection == 'hashtags':
# #print('hashtags!')
# create a 2D array with Organization references having their own row
indiv_hashtag_ref_arr = []
for index, row in df.iterrows():
# print(index)
# https://stackoverflow.com/questions/23119472/in-pandas-python-reading-array-stored-as-string
# to_csv makes arrays of strings string so need to extract the array back
# #print('row tings', row['hashtags'])
# #print('row tings type', type(row['hashtags']))
hashtag_literal = ast.literal_eval(
row['hashtags'])
# #print('hashtag_literal', hashtag_literal)
for hashtag in hashtag_literal:
# #print('index', index)
# #print('date', row['date'])
# #print('date type', type(row['date']))
# #print('hashtag', hashtag)
# #print('hashtag type', type(hashtag))
# hashtag_dict_literal = ast.literal_eval(
# row['hashtags'])
# #print('hashtag_dict_literal', hashtag_dict_literal)
# #print('hashtag_tag', hashtag)
# #print('vader_polarity', row['vader_polarity'])
indiv_hashtag_ref_arr.append(
[row['date'], hashtag, row['vader_polarity'], 1])
# indiv_hashtag_ref_arr.append(
# [row['date'], hashtag, row['tweet_hashtaganization_tags'][hashtag], row['vader_polarity'], 1])
# convert the 2D array into a df
indiv_hashtag_ref_df = pd.DataFrame(
indiv_hashtag_ref_arr, columns=['date', 'hashtag', 'sentiment', 'count'])
# #print('indiv_hashtag_ref_df: ', indiv_hashtag_ref_df)
sum_sr = indiv_hashtag_ref_df.groupby(
['date', 'hashtag'])['count'].sum()
# print('done with sr')
# #print(sum_sr)
sum_df = pd.DataFrame()
# #print('date', sum_sr.loc['date'])
# #print('date', sum_sr.loc['organization'])
# #print('date', sum_sr.loc['count'])
# #print('shit', sum_sr.index[10])
# #print('date ting', sum_sr.index.unique(level='date'))
# #print('org ting', sum_sr.index.unique(level='organization'))
# #print('values', sum_sr.values)
date_arr = []
hashtag_arr = []
count_arr = []
for i in range(0, sum_sr.size):
# print(i)
index = sum_sr.index[i]
value = sum_sr.values[i]
date_arr.append(index[0])
hashtag_arr.append(index[1])
count_arr.append(value)
sum_df['date'] = date_arr
sum_df['hashtag'] = hashtag_arr
sum_df['count'] = count_arr
sum_df = sum_df[sum_df['count'] > min_count]
if (analysis_selection == 'overall'):
fig = px.bar(sum_df, x="hashtag", y="count",
color="date")
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text'],
xaxis={'categoryorder': 'total descending'}
)
if max_x != None:
fig.update_xaxes(range=(-0.5, int(max_x) - 0.5))
return fig
elif (analysis_selection == 'Daily'):
# print('Daily!')
fig = px.bar(sum_df, x="date", y="count",
color="hashtag", )
fig.update_layout(
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font_color=colors['text'],
xaxis={'categoryorder': 'total descending'}
)
return fig
elif organizations_selection == 'phrases':
fig = go.Figure()
# fig.add_layout_image(
# dict(
# source="results/phrases_cloud.png",
# xref="x",
# yref="y",
# x=0,
# y=3,
# sizex=2,
# sizey=2,
# sizing="stretch",
# opacity=0.5,
# layer="below"
# )
# )
# Create figure
fig = go.Figure()
# Constants
img_width = 1600
img_height = 900
scale_factor = 0.5
# Add invisible scatter trace.
# This trace is added to help the autoresize logic work.
fig.add_trace(
go.Scatter(
x=[0, img_width * scale_factor],
y=[0, img_height * scale_factor],
mode="markers",
marker_opacity=0
)
)
# Configure axes
fig.update_xaxes(
visible=False,
range=[0, img_width * scale_factor]
)
fig.update_yaxes(
visible=False,
range=[0, img_height * scale_factor],
# the scaleanchor attribute ensures that the aspect ratio stays constant
scaleanchor="x"
)
# Add image
fig.add_layout_image(
dict(
x=0,
sizex=img_width * scale_factor,
y=img_height * scale_factor,
sizey=img_height * scale_factor,
xref="x",
yref="y",
opacity=1.0,
layer="below",
# sizing="stretch",
source="https://raw.githubusercontent.com/sidwan02/isbdiri-privacy-twitter-analysis/main/results/phrases_cloud.png?token=AQ3LMOJZNGN2KT5CHVZ6PMTAB6D5W")
# src="results/phrases_cloud.png")
)
# Configure other layout
# fig.update_layout(
# width=img_width * scale_factor,
# height=img_height * scale_factor,
# margin={"l": 0, "r": 0, "t": 0, "b": 0},
# )
return fig
elif organizations_selection == 'weighted':
df['vader_polarity'] = np.where(
df['vader_polarity'] < 0.7, -1, 1)
df['virality'] = df['retweet_count'] + df['fav_count']
df['consolidated_sentiment'] = df['virality'] * df['vader_polarity']
sum_sr = df.groupby('date')['consolidated_sentiment'].sum()
# print('sum_sr', sum_sr)
sum_sr_2 = df.groupby('date')['virality'].sum()
sum_df = pd.DataFrame()
date_arr = []
sentiment_sum_arr = []
virality_arr = []
for i in range(0, sum_sr.size):
index = sum_sr.index[i]
value = sum_sr.values[i]
# #print('index', index)
# #print('value', value)
date_arr.append(index)
sentiment_sum_arr.append(value)
for i in range(0, sum_sr_2.size):
# index = sum_sr.index[i]
value = sum_sr_2.values[i]
# #print('index', index)
# #print('value', value)
# .append(index)
virality_arr.append(value)
# print('sentiment_sum_arr', sentiment_sum_arr)
# print('virality_arr', virality_arr)
sum_df['date'] = date_arr
sum_df['privacy_sentiment'] = np.array(
sentiment_sum_arr) / np.array(virality_arr)
# print('privacy_sentiment', sum_df['privacy_sentiment'])
# sum_df['count'] = count_arr
# sum_df['sentiment_sum'] = sum_df['sentiment_sum'] / total_count
return px.line(sum_df, x='date', y='privacy_sentiment')
@ app.callback(
dash.dependencies.Output('choice_organizations', 'options'),
[dash.dependencies.Input('choice_analysis', 'value')])
def update_tweet_property_options(analysis_selection):
# print('updating!', analysis_selection)
if analysis_selection == 'Daily':
property_options = [{'label': 'Organizations Mentioned',
'value': 'organizations'},
{'label': 'Industry', 'value': 'tags'},
# {'label': 'Hashtags', 'value': 'hashtags'},
{'label': 'Privacy Sentiment', 'value': 'weighted'}]
elif analysis_selection == 'overall':
property_options = [{'label': 'Organizations Mentioned',
'value': 'organizations'},
{'label': 'Industry', 'value': 'tags'},
# {'label': 'Hashtags', 'value': 'hashtags'},
{'label': 'Frequent Phrases', 'value': 'phrases'}]
return property_options
@ app.callback(
dash.dependencies.Output('choice_orgs_selection', 'style'),
[dash.dependencies.Input('choice_all_tweets', 'value')])
def update_org_centric_input_visibility(choice_tweets):
if choice_tweets == 'tweets_all':
return {'display': 'none'}
elif choice_tweets == 'tweets_orgs':
return {'display': 'block'}
@ app.callback(
dash.dependencies.Output('choice_orgs_selection', 'options'),
[dash.dependencies.Input('choice_all_tweets', 'value')])
def update_org_selection_options(choice_tweets):
print('updating!', choice_tweets)
if choice_tweets == 'tweets_all':
return []
elif choice_tweets == 'tweets_orgs':
print('working')
df = pd.read_csv('results/consolidated_date_formatted.csv')
# for orgs in df['tweet_mentioned_organizations']:
# print(ast.literal_eval(orgs))
orgs_list = []
try:
for org_list in df['tweet_mentioned_organizations']:
# print(ast.literal_eval(org_list))
orgs_list.append(ast.literal_eval(org_list))
# print(orgs_list)
except:
print("An exception occurred")
# orgs_list = [ast.literal_eval(orgs)
# for orgs in df['tweet_mentioned_organizations']]
# print('orgs_list', orgs_list)
# orgs_list = df['tweet_mentioned_organizations']
print('DONE!')
# orgs = []
orgs = orgs_list
orgs = np.unique(
np.array(functools.reduce(lambda a, b: a + b, orgs_list)))
print('orgs', orgs)
return [{'label': org, 'value': org} for org in orgs]
@ app.callback(
dash.dependencies.Output('choice_orgs_selection', 'value'),
[dash.dependencies.Input('choice_all_tweets', 'value'),
dash.dependencies.Input('choice_orgs_selection', 'options')])
def update_org_dafault_selection(choice_tweets, org_options):
# #print('updating!', choice_tweets)
if choice_tweets == 'tweets_all':
return []
elif choice_tweets == 'tweets_orgs':
return [org_options[0]['value']]
# @ app.callback(
# dash.dependencies.Output('choice_orgs_selection', 'style'),
# [dash.dependencies.Input('choice_org_centric', 'value'),
# dash.dependencies.Input('choice_analysis', 'value')])
# def update_org_selection_visibility(choice_org_centric, analysis_selection):
# if (choice_org_centric == 'org_centric') & (analysis_selection == 'overall'):
# return {'display': 'block'}
# else:
# return {'display': 'none'}
@ app.callback(
dash.dependencies.Output('choice_max_x', 'style'),
[dash.dependencies.Input('choice_analysis', 'value'),
dash.dependencies.Input('choice_organizations', 'value')])
def update_max_x_input_visibility(analysis_selection, organizations_selection):
if (analysis_selection == 'overall') & (organizations_selection != 'phrases') & (organizations_selection != 'weighted'):
return {'display': 'block'}
else:
return {'display': 'none'}
@ app.callback(
dash.dependencies.Output('choice_min_count', 'style'),
dash.dependencies.Input('choice_organizations', 'value'))
def update_choice_min_input_visibility(organizations_selection):
if ((organizations_selection == 'phrases') | (organizations_selection == 'weighted')):
return {'display': 'none'}
else:
return {'display': 'block'}
# @ app.callback(
# dash.dependencies.Output('choice_max_x', 'placeholder'),
# [dash.dependencies.Input('choice_organizations', 'value')])
# def update_max_x_placeholder_text(organizations_selection):
# if organizations_selection == 'organizations':
# return 'Enter Max Organizations'
# elif organizations_selection == 'tags':
# return 'Enter Max Tags'
# elif organizations_selection == 'hashtags':
# return 'Enter Max Hashtags'
# @ app.callback(
# dash.dependencies.Output('choice_min_count', 'placeholder'),
# [dash.dependencies.Input('choice_organizations', 'value')])
# def update_min_count_placeholder_text(organizations_selection):
# if organizations_selection == 'organizations':
# return 'Enter Min Organization Frequency'
# elif organizations_selection == 'tags':
# return 'Enter Min Tag Frequency'
# elif organizations_selection == 'hashtags':
# return 'Enter Min Hashtag Frequency'
# @ app.callback(
# dash.dependencies.Output('choice_trending_thresh', 'style'),
# [dash.dependencies.Input('choice_consolidated_trending', 'value')])
# def update_thresh_input_visibility(data_selection):
# if data_selection == 'full':
# return {'display': 'none'}
# elif data_selection == 'trending_retweets':
# return {'display': 'block'}
# elif data_selection == 'trending_favs':
# return {'display': 'block'}
@ app.callback(
dash.dependencies.Output('choice_org_centric', 'style'),
[dash.dependencies.Input('choice_analysis', 'value'),
dash.dependencies.Input('choice_all_tweets', 'value')])
def update_analysis_drop_visibility(analysis_selection, tweets_selection):
# if (analysis_selection == 'overall') & (tweets_selection == 'tweets_orgs'):
if (tweets_selection == 'tweets_orgs'):
return {'display': 'block'}
else:
return {'display': 'none'}
@ app.callback(Output('choice_trending_thresh', 'children'),
Input('choice_trending_thresh_slider', 'value'))
def display_value(value):
return f'Threshold Chosen: {value}'
if __name__ == '__main__':
app.run_server(debug=True)
|
import sys
from PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QDoubleSpinBox
from PyQt5.QtWidgets import QApplication, QGridLayout
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
date = self.getdata()
rates = sorted(self.rates.keys())
dateLabel = QLabel(date)
self.fromComboBox = QComboBox() # drop menu
self.fromComboBox.addItems(rates)
self.fromSpinBox = QDoubleSpinBox() # num input
self.fromSpinBox.setRange(0.01, 10000000.00)
self.fromSpinBox.setValue(1.00)
self.toComboBox = QComboBox()
self.toComboBox.addItems(rates)
self.toLabel = QLabel("1.00")
grid = QGridLayout()
grid.addWidget(dateLabel, 0, 0)
grid.addWidget(self.fromComboBox, 1, 0)
grid.addWidget(self.fromSpinBox, 1, 1)
grid.addWidget(self.toComboBox, 2, 0)
grid.addWidget(self.toLabel, 2, 1)
self.setLayout(grid)
self.fromComboBox.currentIndexChanged.connect(self.updateUi)
self.toComboBox.currentIndexChanged.connect(self.updateUi)
self.fromSpinBox.valueChanged.connect(self.updateUi)
self.setWindowTitle("Currency")
def updateUi(self):
to = str(self.toComboBox.currentText())
from_ = str(self.fromComboBox.currentText())
amount = (self.rates[from_]) / self.rates[to] * self.fromSpinBox.value()
self.toLabel.setText("%0.2f" % amount)
def getdata(self):
self.rates = {
"China yuan": 1,
"USA dollar": 7,
"Japan yuan": 0.05
}
date = "Exchange Rates Date: " + "2021-02-08"
return date
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec() |
from typing import List
from leetcode import test
def predicate_the_winner(nums: List[int]) -> bool:
dp = [[-1] * 21 for _ in range(21)]
def range_sum(start: int, end: int) -> int:
nonlocal nums
return sum(nums[i] for i in range(start - 1, end))
def dfs(left: int, right: int) -> int:
nonlocal nums
if dp[left][right] == -1:
if left == right:
dp[left][right] = nums[left - 1]
else:
dp[left][right] = range_sum(left, right) - min(
dfs(left + 1, right), dfs(left, right - 1)
)
return dp[left][right]
return dfs(1, len(nums)) * 2 >= range_sum(1, len(nums))
test(
predicate_the_winner,
[
([1, 5, 2], False),
([1, 5, 233, 7], True),
],
)
|
"""
Code that goes along with the Airflow located at:
http://airflow.readthedocs.org/en/latest/tutorial.html
"""
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import date,datetime, timedelta
import os
import getpass
from airflow.operators import WaitGCSOperator
default_args = {
'owner': 'alexey.rudenko2002@umusic.com',
'depends_on_past': False,
#'start_date': datetime(2017, 9, 26),
#'start_date': datetime.now(),
#'email': ['airflow@airflow.com'],
'email': ['arudenko2002@yahoo.com'],
'email_on_failure': True,
'email_on_retry': True,
'retries': 1,
'retry_delay': timedelta(minutes=1)
#'schedule_interval': '30,*,*,*,*',
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
project = '{{var.value.project}}'
runner = '{{var.value.runner}}'
from_mongodb_users = '{{var.value.from_mongodb_users}}'
artist_track_images = '{{var.value.artist_track_images}}'
playlist_geography = '{{var.value.playlist_geography}}'
playlist_track_action = '{{var.value.playlist_track_action}}'
product = '{{var.value.product}}'
playlist_track_history = '{{var.value.playlist_track_history}}'
playlist_history = '{{var.value.playlist_history}}'
streams = '{{var.value.streams}}'
canopus_resource = '{{var.value.canopus_resource}}'
canopus_name = '{{var.value.canopus_name}}'
mail_alerts_output = '{{var.value.mail_alerts_output}}'
temp_directory = '{{var.value.temp_directory}}'
whom = '{{var.value.whom}}'
gmail = '{{var.value.gmail}}'
mongoDB = '{{var.value.mongodb}}'
outputfile = '{{var.value.outputfile}}'
inputfile = '{{var.value.inputfile}}'
destination_table = '{{var.value.destination_table}}'
environment='{{var.value.environment}}'
#executionDate='{{var.value.executionDate}}'
schedule = '0 15 * * *'
sleep_seconds = '{{var.value.sleep}}'
alsome = '{{var.value.alsome}}'
mongodb = '{{var.value.step_mongodb}}'
enrichment = '{{var.value.step_enrichment}}'
major_sql = '{{var.value.step_major_sql}}'
#schedule = '{{var.value.schedule_interval}}'
#if executionDate is "today":
executionDate=str(datetime.now())[:10]
fillTrackArtistImage = 'java -cp /opt/app/swift-subscriptions/track-alerts/SwiftTrendSubscriptions-0.1.jar TrackAction.FillArtistTrackImage' \
+ ' --executionDate '+executionDate +"" \
+ ' --project ' + project + ' --runner ' + runner + ' --from_mongodb_users ' + from_mongodb_users + ' --artist_track_images ' + artist_track_images + '' \
+ ' --playlist_geography ' + playlist_geography + ' --playlist_track_action ' + playlist_track_action + '' \
+ ' --product '+product+' --playlist_track_history '+playlist_track_history+' --playlist_history '+playlist_history + ' --streams ' + streams + '' \
+ ' --canopus_resource '+canopus_resource+' --canopus_name '+canopus_name+'' \
+ ' --outputfile gs://umg-dev/swift_alerts --temp_directory '+temp_directory
print fillTrackArtistImage
sleep_pause = 'sleep 600'
print sleep_pause
actualDate = str(datetime.now()+timedelta(days=-1))[:10]
loadTrackArtistImage = "bq load -F '{' umg-dev:swift_alerts.artist_track_images " \
"gs://umg-dev/swift_alerts/imagesAPI_"+actualDate+"/images_"+actualDate+".csv* " \
"canopus_id:int64,artist_name:string,isrc:string,track_uri:string,artist_uri:string,track_image:string,artist_image:string,last_update:timestamp"
print loadTrackArtistImage
bucket="umg-dev"
prefix = "swift_alerts/imagesAPI_"+actualDate+"/images_"
dagTask = DAG(
'artist_track_images', default_args=default_args,
#schedule_interval='30 * * * *',
start_date=datetime(2018, 02, 06, 0, 0, 0),
schedule_interval='0 10 * * *')
# t10
t0 = BashOperator(
task_id='api_image_table',
#bash_command='java -cp /opt/app/swift-subscriptions/track-alerts/SwiftTrendSubscriptions-0.1.jar TrackAction.FillArtistTrackImage --executionDate 2017-11-05 --project umg-dev --runner DataflowRunner --temp_directory gs://umg-dev/temp --outputfile gs://umg-dev/swift_alerts',
bash_command=fillTrackArtistImage,
dag=dagTask)
GCS_Files = WaitGCSOperator(
task_id='GCS_Files',
bucket=bucket,
prefix=prefix,
number="2",
google_cloud_storage_conn_id="google_cloud_default",
dag=dagTask
)
t2 = BashOperator(
task_id='load_image_table',
#bash_command='java -cp /opt/app/swift-subscriptions/track-alerts/SwiftTrendSubscriptions-0.1.jar TrackAction.FillArtistTrackImage --executionDate 2017-11-05 --project umg-dev --runner DataflowRunner --temp_directory gs://umg-dev/temp --outputfile gs://umg-dev/swift_alerts',
bash_command=loadTrackArtistImage,
dag=dagTask)
t0>>GCS_Files>>t2
|
from CallBackOperator import CallBackOperator
from SignalGenerationPackage.DynamicPointsDensitySignal.DynamicPointsDensityUIParameters import DynamicPointsDensityUIParameters
class AccelerationTimeCallBackOperator(CallBackOperator):
def __init__(self, model):
super().__init__(model)
# overridden
def ConnectCallBack(self, window):
self.window = window
self.setup_callback_and_synchronize_slider(
validator_min= DynamicPointsDensityUIParameters.AccelerationTimeSliderMin,
validator_max= DynamicPointsDensityUIParameters.AccelerationTimeSliderMax,
validator_accuracy= DynamicPointsDensityUIParameters.AccelerationTimeLineEditAccuracy,
line_edit= window.AccelerationTimelineEdit,
slider_min= DynamicPointsDensityUIParameters.AccelerationTimeSliderMin,
slider_max= DynamicPointsDensityUIParameters.AccelerationTimeSliderMax,
slider= window.AccelerationTimehorizontalSlider,
update_slider_func= self.update_acceleration_time_slider,
update_line_edit_func= self.update_acceleration_time_line_edit
)
def update_acceleration_time_slider(self):
self.update_slider(
line_edit=self.window.AccelerationTimelineEdit,
slider=self.window.AccelerationTimehorizontalSlider,
calc_constant=DynamicPointsDensityUIParameters.AccelerationTimeCalcConstant
)
def update_acceleration_time_line_edit(self):
self.update_line_edit(
line_edit=self.window.AccelerationTimelineEdit,
slider=self.window.AccelerationTimehorizontalSlider,
calc_constant=DynamicPointsDensityUIParameters.AccelerationTimeCalcConstant,
update_model_func=self.update_acceleration_time
)
def update_acceleration_time(self, val):
self.model.AccelerationTime = val
|
import pyaudio
import wave
import speech_recognition as sr
CHUNK = 1024
r = sr.Recognizer()
r.energy_threshold = 4000
# obtain audio from microphone
#source = sr.Microphone(device_index=None, sample_rate=16000, chunk_size=CHUNK)
#with source:
with sr.Microphone() as source:
#print("Calibrating mic...")
# listen for 5 seconds and create ambient noise energy level
#r.adjust_for_ambient_noise(source, duration=2)
print("Spit some fiyah...")
audio = r.listen(source)
# recognize speech using Sphinx
try:
print("Sphinx thinks you said '" + r.recognize_sphinx(audio_data=audio, language="en-US") + "'")
except sr.UnknownValueError:
print("Sphinx could not understand audio")
except sr.RequestError as e:
print("Sphinx error; {0}".format(e)) |
__author__ = 'peace'
def parse():
infile = open("source.c", "r")
print(outerIfAndBody(infile.readlines()))
def recurse(lines):
paths = outerIfAndBody(lines)
for path in paths:
for index, body in path:
if len(body ) != 0:
path[index] = outerIfAndBody(body)
recurse(body)
def outerIfAndBody(lines):
searchStr = ''
gameOn = False
curIndex = 0
length = len(lines)
huntingElse = False
nodeCount = 0
paths = []
sibling = []
topIfCount = 0
comingFrom = 0
linesToAppend = []
for i in range(length):
line = lines[i].strip()
if (not gameOn) and str(line).strip().startswith("if"):
gameOn = True
topIfCount = nodeCount + 1
if gameOn:
searchStr += line
linesToAppend.append(line)
if huntingElse:
if i == length - 1:
appendSibling(comingFrom, sibling, topIfCount, nodeCount, linesToAppend[1:-2], end=True)
if searchStr.startswith('else if'):
appendSibling(comingFrom, sibling, topIfCount, nodeCount, linesToAppend[1:-2])
comingFrom = 1
huntingElse = False
linesToAppend = []
elif searchStr.startswith('else'):
appendSibling(comingFrom, sibling, topIfCount, nodeCount, linesToAppend[1:-2])
huntingElse = False
comingFrom = 2
linesToAppend = []
elif searchStr.startswith('if'):
appendSibling(comingFrom, sibling, topIfCount, nodeCount, linesToAppend[1:-2], end=True)
huntingElse = False
paths.append(sibling)
sibling = []
comingFrom = 0
topIfCount = nodeCount
linesToAppend = []
else:
continue
# find the index of }
if str(searchStr).find('}', curIndex, len(searchStr)+1) != -1:
curIndex = str(searchStr).find('}', curIndex, len(searchStr)+1)+1
leftCount = str(searchStr)[:curIndex].count('}')
rightCount = str(searchStr)[:curIndex].count('{')
if leftCount != 0 and leftCount == rightCount:
# we have found the end of the if
# bodyToAppend = searchStr[startIndex+1:curIndex-1]
searchStr = searchStr[curIndex:].strip()
curIndex = 0
huntingElse = True
nodeCount += 1
# add the final sibling
if sibling is not []:
paths.append(sibling)
return paths
def appendSibling(comingFrom, sibling, topIfCount, nodeCount, linesToAppend, end=False):
index = ''
yeah = False
for line in linesToAppend:
if str(line).startswith('if'):
yeah = True
linesToAppend = linesToAppend if yeah else []
if comingFrom == 0:
index = str(nodeCount)+'T'
sibling.append({index: linesToAppend})
if end :
index = str(nodeCount)+'F'
sibling.append({index: []})
elif comingFrom == 1:
for j in range(topIfCount, nodeCount):
index += str(j)+'F'
index += str(nodeCount)+'T'
sibling.append({index: linesToAppend})
if end:
index = ''
for j in range(topIfCount, nodeCount+1):
index += str(j)+'F'
sibling.append({index: []})
elif comingFrom == 2:
for j in range(topIfCount, nodeCount):
index += str(j)+'F'
sibling.append({index: linesToAppend})
parse()
|
from pico2d import *
import game_framework
# import boys_state
import title_state
import time
def enter():
global logo,startedOn
startedOn = time.time()
logo = load_image('../res/kpu_credit.png')
def exit():
global logo
del logo
#logo = None
def draw():
clear_canvas()
logo.draw(400, 300)
update_canvas()
def update():
global startedOn
elapsed = time.time() - startedOn
print(elapsed)
if elapsed >= 1.0:
game_framework.change_state(title_state)
return
delay(0.03)
def handle_events():
pass
def pause():
pass
def resume():
pass
if __name__ == '__main__':
import sys
current_module = sys.modules[__name__]
open_canvas()
game_framework.run(current_module)
close_canvas()
|
#read a particular line from a file. User provides bothe the line
#numbe and the file name
file_str = input("Open what file: ")
status = True
while status:
try:
input_file = open(file_str)
find_line_str = input("Which line (integer): ")
find_line_int = int(find_line_str)
for count, line_str in enumerate(input_file):
if count == find_line_int:
print("Line {} of file {} is {}".format(find_line_int, file_str, line_str))
status = False
break
else:
print("Line {} of file {} not found ".format(find_line_int, file_str))
input_file.close()
except FileNotFoundError:
print("File not foundf")
file_str = input("Open what file:")
except ValueError:
print("Line", find_line_str, "isn't a legal line number")
find_line_str = input("Which line (integer): ")
print("End of program") |
import glob
import os
classes=['1','2','7']
val='MOT17-13'
data_dir='/home/waiyang/crowd_counting/Dataset/MOT17det/train'
train_data='/home/waiyang/crowd_counting/keras-yolo3/MOT_train.txt'
val_data='/home/waiyang/crowd_counting/keras-yolo3/MOT_val.txt'
MOT_sets={}
for MOTset in glob.glob(os.path.join(data_dir,"*")):
MOT_dict={}
MOT_name=MOTset.replace(data_dir,'').replace('/','')
if MOT_name!=val:
continue
for img in glob.glob(os.path.join(MOTset,'img1','*jpg')):
img_info={}
img_index=img[:-4].replace(os.path.join(MOTset,'img1'),'').replace('/','').lstrip('0')
img_info['location']=img
img_info['bbox']=[]
MOT_dict[img_index] = img_info
with open (os.path.join(MOTset,'gt','gt.txt'),'r') as f:
lines=f.readlines()
for line in lines:
data=line.split(',')
if data[7] in classes:
bbox={}
bbox['box']=data[2:6]
bbox['class']='0'
MOT_dict[data[0]]['bbox'].append(bbox)
print(MOT_dict)
MOT_sets[MOT_name]=MOT_dict
with open(val_data,'w') as outfile:
for k in MOT_sets:
MOT_dict=MOT_sets[k]
for i in MOT_dict:
outfile.write(MOT_dict[i]['location']+' ')
for bbox in MOT_dict[i]['bbox']:
for b in bbox['box']:
outfile.write(b+',')
outfile.write(bbox['class'])
outfile.write(' ')
outfile.write('\n')
|
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
dummy = ListNode(0)
current = dummy
while l1 is not None or l2 is not None:
if l1 is not None and (l2 is None or l1.val <= l2.val):
current.next = l1
l1 = l1.next
elif l2 is not None and (l1 is None or l2.val <= l1.val):
current.next = l2
l2 = l2.next
current = current.next
return dummy.next
#Quick Test Code
a = ListNode(1)
a.next = ListNode(5)
b = ListNode(3)
b.next = ListNode(4)
c = Solution().mergeTwoLists(a, b)
|
def string_chunk(*args):
if len(args)!=2 or type(args[1]) is not int or args[1]<1: return []
return [args[0][x:x+args[1]] for x in range(0,len(args[0]),args[1])]
'''
You should write a function that takes a string and a positive integer n,
splits the string into parts of length n and returns them in an array.
It is ok for the last element to have less than n characters.
If n is not a valid size (> 0) (or is absent), you should return an empty array.
If n is greater than the length of the string, you should return an array with
the only element being the same string.
Examples:
string_chunk('codewars', 2) # ['co', 'de', 'wa', 'rs']
string_chunk('thiskataeasy', 4) # ['this', 'kata', 'easy']
string_chunk('hello world', 3) # ['hel', 'lo ', 'wor', 'ld']
string_chunk('sunny day', 0) # []
'''
|
import sys
import os
f = open("C:/Users/user/Documents/atCoderProblem/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
n = input()
digit = len(n)
n = int(n)
def make753(num):
cand = [""]
endj = 0
for i in range(num):
for j in range(endj,3 ** i + endj):
cand.append(cand[j] + "7")
cand.append(cand[j] + "5")
cand.append(cand[j] + "3")
endj = j + 1
del cand[0]
return cand
cand = make753(digit)
delete = []
for i in range(len(cand)):
if "3" not in cand[i] or "5" not in cand[i] or "7" not in cand[i]:
delete.append(i)
diff = 0
for i in range(len(delete)):
del cand[delete[i] - diff]
diff += 1
for i in range(len(cand)):
cand[i] = int(cand[i])
cand.sort()
for i in range(len(cand)):
if n < cand[i]:
print(i)
break
else:
print(len(cand))
|
from django.conf.urls import url
from .controllers import generate, rearrange, home
urlpatterns = [
url(r'^generate$', generate),
url(r'^rearrange', rearrange),
url(r'^$', home),
]
|
import boto3
import json
def get_lambda_info():
"""
function to get lambda configurations
"""
# choosing ec2 to get region names
conn = boto3.client('ec2')
regions = [region['RegionName'] for region in conn.describe_regions()['Regions']]
func_info = []
# looping thorugh regions
for region in regions:
func_names = []
conn = boto3.client('lambda', region_name=region)
# lisitng fucntions to get function names
response = conn.list_functions()
response = response['Functions']
for i in response:
func_names.append(i["FunctionName"])
# describe eaach function
for i in func_names:
func_dets = []
response = conn.get_function(
FunctionName=i
)
response = response['Configuration']
req_keys = ['FunctionName', 'FunctionArn', 'Runtime', 'Role', 'CodeSize', 'Description', 'Timeout',
'MemorySize', 'LastModified']
for k in response.keys():
for l in range(len(req_keys)):
if k == req_keys[l]:
func_dets.append(response[k])
func_info.append(func_dets)
lambda_info = []
req_keys = ['FunctionName', 'FunctionArn', 'Runtime', 'Role', 'CodeSize', 'Description', 'Timeout',
'MemorySize', 'LastModified']
# converting list to dictionary
for i in func_info:
dict_lambda = dict(zip(req_keys, i))
lambda_info.append(dict_lambda)
final_dict = {"Lambda Fucntions": lambda_info}
# Convert to json
json_lambda = json.dumps(final_dict, indent=4, default=str)
print(json_lambda)
get_lambda_info()
|
import getopt
import sys
version = '1.0'
verbose = False
output_filename = 'default.out'
print('ARGV :', sys.argv[1:])
options, remainder = getopt.getopt(
sys.argv[1:],
'o:v',
['output=', 'verbose', 'version=']
)
print('OPTIONS :', options)
for opt, arg in options:
if opt in ('-o', '--output'):
output_filename = arg
elif opt in ('-v', '--verbose'):
verbose = True
elif opt == '--version':
version = arg
print('VERSION :', version)
print('VERBOSE :', verbose)
print('OUTPUT :', output_filename)
print('REMAINING:', remainder)
'''
python getopt_example.py -o foo
python getopt_example.py -ofoo
python getopt_example.py --output foo
python getopt_example.py --output=foo
ARGV : ['--output=foo']
OPTIONS : [('--output', 'foo')]
VERSION : 1.0
VERBOSE : False
OUTPUT : foo
REMAINING : []
'''
|
from common.run_method import RunMethod
import allure
@allure.step("ๆ็ /ๅจ็บฟไฝไธ/ๆๅธ้APP่ทๅ็ญ็บงๅ่กจ")
def online_homework_getOnlineHomeworkClasses_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "ๆ็ /ๅจ็บฟไฝไธ/ๆๅธ้APP่ทๅ็ญ็บงๅ่กจ"
url = f"/service-question/online/homework/getOnlineHomeworkClasses"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("ๆ็ /ๅจ็บฟไฝไธ/ๆๅธ้APP้่ฟ็ญ็บงid่ทๅ่ฏพๆฌกไฟกๆฏ")
def online_homework_getClassScheduleByClassId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "ๆ็ /ๅจ็บฟไฝไธ/ๆๅธ้APP้่ฟ็ญ็บงid่ทๅ่ฏพๆฌกไฟกๆฏ"
url = f"/service-question/online/homework/getClassScheduleByClassId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("ๆ็ /ๅจ็บฟไฝไธ/ๆๅธ้APPๆ นๆฎ็ญ็บงid๏ผ่ฏพๆฌกid่ทๅๅญฆ็ๅๅ")
def online_homework_getStudentInfoByClassIdAndClassScheduleId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "ๆ็ /ๅจ็บฟไฝไธ/ๆๅธ้APPๆ นๆฎ็ญ็บงid๏ผ่ฏพๆฌกid่ทๅๅญฆ็ๅๅ"
url = f"/service-question/online/homework/getStudentInfoByClassIdAndClassScheduleId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("ๆ็ /้ขๅบ/่ทๅๅญฆ็ไฝไธไฟกๆฏ")
def online_homework_getHomeworkStudentScheduleInfo_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "ๆ็ /้ขๅบ/่ทๅๅญฆ็ไฝไธไฟกๆฏ"
url = f"/service-question/online/homework/getHomeworkStudentScheduleInfo"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("ๆ็ /้ขๅบ/ไฟๅญๅญฆ็็กฎ่ฎคๅ็็ญ้ขไฟกๆฏ")
def online_homework_confirmOnlineHomeworkImageInfo_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "ๆ็ /้ขๅบ/ไฟๅญๅญฆ็็กฎ่ฎคๅ็็ญ้ขไฟกๆฏ"
url = f"/service-question/online/homework/confirmOnlineHomeworkImageInfo"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("้ขๅบ/ๅจ็บฟไฝไธ/่ทๅ่ๅธๅฏนๅบ็ญ็บงๅพ
ๆน้
ไฝไธ็ๅญฆ็ๅ่กจ")
def online_homework_getUnCorrectHomeworkStudentInfo_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "้ขๅบ/ๅจ็บฟไฝไธ/่ทๅ่ๅธๅฏนๅบ็ญ็บงๅพ
ๆน้
ไฝไธ็ๅญฆ็ๅ่กจ"
url = f"/service-question/online/homework/getUnCorrectHomeworkStudentInfo"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("้ขๅบ/ๅจ็บฟไฝไธ/่ทๅ็ญ็บง่ฎฒๆฌกๅฏนๅบๆชๆไบค๏ผๅทฒๆไบค๏ผๅทฒๆน้
ๅ่กจ")
def online_homework_getHomeworkStudentRecordByStatus_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "้ขๅบ/ๅจ็บฟไฝไธ/่ทๅ็ญ็บง่ฎฒๆฌกๅฏนๅบๆชๆไบค๏ผๅทฒๆไบค๏ผๅทฒๆน้
ๅ่กจ"
url = f"/service-question/online/homework/getHomeworkStudentRecordByStatus"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("้ขๅบ/ๅจ็บฟไฝไธ/ๅญฆ็ๆ็
งไธไผ ")
def online_homework_addHomeworkStudentUploadPage_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "้ขๅบ/ๅจ็บฟไฝไธ/ๅญฆ็ๆ็
งไธไผ "
url = f"/service-question/online/homework/addHomeworkStudentUploadPage"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("้ขๅบ/ๅจ็บฟไฝไธ/ๆดๆฐๅญฆ็ไฝไธ่ฎฐๅฝ")
def online_homework_modHomeworkStudentRecord_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "้ขๅบ/ๅจ็บฟไฝไธ/ๆดๆฐๅญฆ็ไฝไธ่ฎฐๅฝ"
url = f"/service-question/online/homework/modHomeworkStudentRecord"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("้ขๅบ/ๅจ็บฟไฝไธ/่ทๅ็ญ็บง่ฏพๆฌกๅฏนๅบ็ไฝไธ้กตๆฐ")
def online_homework_getHomeworkStudentUploadPageNumber_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "้ขๅบ/ๅจ็บฟไฝไธ/่ทๅ็ญ็บง่ฏพๆฌกๅฏนๅบ็ไฝไธ้กตๆฐ"
url = f"/service-question/online/homework/getHomeworkStudentUploadPageNumber"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("้ขๅบ/ๅจ็บฟไฝไธ/่ๅธๆนๆณจไฝไธๆญฃ่ฏฏ")
def online_homework_correctHomeworkWhetherRight_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "้ขๅบ/ๅจ็บฟไฝไธ/่ๅธๆนๆณจไฝไธๆญฃ่ฏฏ"
url = f"/service-question/online/homework/correctHomeworkWhetherRight"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("้ขๅบ/ๅจ็บฟไฝไธ/่ๅธๆนๆณจไฝไธ")
def online_homework_correctHomeworkAnalyse_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: urlๅฐๅๅ้ข็ๅๆฐ
:body: ่ฏทๆฑไฝ
:return_json: ๆฏๅฆ่ฟๅjsonๆ ผๅผ็ๅๅบ๏ผ้ป่ฎคๆฏ๏ผ
:header: ่ฏทๆฑ็header
:host: ่ฏทๆฑ็็ฏๅข
:return: ้ป่ฎคjsonๆ ผๅผ็ๅๅบ๏ผ return_json=False่ฟๅๅๅงๅๅบ
'''
name = "้ขๅบ/ๅจ็บฟไฝไธ/่ๅธๆนๆณจไฝไธ"
url = f"/service-question/online/homework/correctHomeworkAnalyse"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
###MetabolomicsParser
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module contains methods for reading the HMDB and storing relationships"""
import sys, string
import os.path
import unique
import export
import update; reload(update)
import OBO_import
import gene_associations
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv": dir_list2.append(entry)
return dir_list2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def downloadHMDBMetaboCardFlatFile():
url = 'http://www.hmdb.ca/public/downloads/current/metabocards.zip'
fln,status = update.download(url,'BuildDBs/HMDB/','')
def downloadKEGGPathwayIDs():
url = 'ftp://ftp.genome.jp/pub/kegg/pathway/map_title.tab'
fln,status = update.download(url,'BuildDBs/HMDB/','')
class HMBDInfo:
def __init__(self,entry_data):
self.kegg_list=[]; self.smpdb_list=[]; self.protein_names=[]
for name in entry_data:
if entry_data[name] == 'Not Available': entry_data[name] = ''
self.hmdb_id = entry_data['hmdb_id']
self.description = entry_data['description']
self.name = entry_data['name']
self.secondary_id = entry_data['secondary_id']
self.iupac = entry_data['iupac']
self.biocyc_id = entry_data['biocyc_id']
self.cas_number = entry_data['cas_number']
self.chebi_id = entry_data['chebi_id']
self.pubchem_compound_id = entry_data['pubchem_compound_id']
self.kegg_compound_id = entry_data['kegg_compound_id']
for name in entry_data:
if 'kegg_id' in name and 'pathway' in name:
id = entry_data[name][3:]
if id in kegg_pathways:
KEGG_pathway = kegg_pathways[id][0]
self.kegg_list.append(KEGG_pathway+':'+entry_data[name])
elif 'smpdb' in name and 'pathway' in name:
SMPDB_name = string.replace(name,'smpdb_id','name')
SMPDB_pathway = entry_data[SMPDB_name]
if len(entry_data[name])>0:
self.smpdb_list.append(SMPDB_pathway+':'+entry_data[name])
elif 'metabolic_enzyme' in name and 'gene_name' in name:
if len(entry_data[name])>0:
self.protein_names.append(entry_data[name])
def HMDB(self): return self.hmdb_id
def Description(self): return self.description
def Name(self): return self.name
def SecondaryIDs(self): return self.secondary_id
def IUPAC(self): return self.iupac
def CAS(self): return self.cas_number
def CheBI(self): return self.chebi_id
def KEGGCompoundID(self): return self.kegg_compound_id
def KEGG(self): return self.kegg_list
def SMPDB(self): return self.smpdb_list
def Pathways(self): return self.kegg_list+self.smpdb_list
def PathwaysStr(self): return string.join(self.Pathways(),',')
def BioCyc(self):return self.biocyc_id
def PubChem(self):return self.pubchem_compound_id
def ProteinNames(self):return self.protein_names
def ProteinNamesStr(self):return string.join(self.protein_names,',')
def __repr__(self): return self.HMDB()
def importHMDBMetaboCardFlatFile():
filename = 'BuildDBs/HMDB/metabocards.txt'
fields_to_store = ['hmdb_id','description','name','secondary_id','iupac','biocyc_id','cas_number','chebi_id','kegg_compound_id','pubchem_compound_id','pathway_1_kegg_id']
fields_to_store+= ['metabolic_enzyme_1_gene_name','metabolic_enzyme_1_swissprot_id','metabolic_enzyme_2_gene_name','metabolic_enzyme_2_swissprot_id']
fields_to_store+= ['pathway_1_smpdb_id','pathway_2_smpdb_id','pathway_3_smpdb_id','pathway_1_name','pathway_2_name','pathway_3_name','pathway_2_kegg_id','pathway_3_kegg_id']
fn=filepath(filename); field_data=''; field_name=''; entry_data={}; hmdb=[]; global kegg_pathways
kegg_pathways = gene_associations.importGeneric('BuildDBs/HMDB/map_title.tab'); x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if len(data)>0:
if data[0]=='#': field_name = data[2:-1]
else: field_data += data
else:
if field_name in fields_to_store:
entry_data[field_name] = field_data
#else: print [field_name]
field_name=''; field_data=''
if 'END_METABOCARD' in data:
ed = HMBDInfo(entry_data)
hmdb.append(ed)
entry_data={}
x+=1
#if x>5: break
print len(hmdb),'HMDB entries obtained'
exportTables(hmdb)
def exportTables(metabolite_list):
infer_enzyme_to_metabolite_pathway_data = 'no'
current_species_dirs = unique.returnDirectories('/Databases')
### Save results to all species directories
for species_code in current_species_dirs:
print 'Exporting metabolite data for:',species_code
gene_dir = 'Databases/'+species_code+'/gene/HMDB.txt'
gene_data = export.ExportFile(gene_dir)
hmdb_cas_dir = 'Databases/'+species_code+'/uid-gene/HMDB-CAS.txt'
hmdb_cas_data = export.ExportFile(hmdb_cas_dir)
hmdb_chebi_dir = 'Databases/'+species_code+'/uid-gene/HMDB-ChEBI.txt'
hmdb_chebi_data = export.ExportFile(hmdb_chebi_dir)
hmdb_pubchem_dir = 'Databases/'+species_code+'/uid-gene/HMDB-PubChem.txt'
hmdb_pubchem_data = export.ExportFile(hmdb_pubchem_dir)
hmdb_keggcomp_dir = 'Databases/'+species_code+'/uid-gene/HMDB-KeggCompound.txt'
hmdb_keggcomp_data = export.ExportFile(hmdb_keggcomp_dir)
hmdb_mapp_dir = 'Databases/'+species_code+'/gene-mapp/HMDB-MAPP.txt'
hmdb_mapp_data = export.ExportFile(hmdb_mapp_dir)
cas_denom_dir = 'Databases/'+species_code+'/gene-mapp/denominator/CAS.txt'
cas_denom_data = export.ExportFile(cas_denom_dir)
hmdb_go_dir = 'Databases/'+species_code+'/gene-go/HMDB-GeneOntology.txt'
if infer_enzyme_to_metabolite_pathway_data == 'yes':
hmdb_go_data = export.ExportFile(hmdb_go_dir)
headers = ['hmdb_id','name','description','secondary_id','iupac','cas_number','chebi_id','pubchem_compound_id','Pathways','ProteinNames']
headers = string.join(headers,'\t')+'\n'
gene_data.write(headers)
### Attempt to add GO and pathway data from database based on associated protein IDs (simple translation from human)
mod = 'Ensembl'
try: gene_annotations = gene_associations.importGeneData(species_code,mod)
except Exception:
mod = 'EntrezGene'
try: gene_annotations = gene_associations.importGeneData(species_code,mod)
except Exception: gene_annotations={}
symbol_associations={}
for geneid in gene_annotations: symbol_associations[gene_annotations[geneid].SymbolLower()] = geneid
gotype = 'null'
try: gene_to_go = gene_associations.importGeneGOData(species_code,mod,gotype)
except Exception: gene_to_go={}
try: gene_to_mapp = gene_associations.importGeneMAPPData(species_code,mod)
except Exception: gene_to_mapp = {}
for ed in metabolite_list:
values = [ed.HMDB(),ed.Name(),ed.Description(),ed.SecondaryIDs(),ed.IUPAC(),ed.CAS(),ed.CheBI(),ed.PubChem(),ed.PathwaysStr(),ed.ProteinNamesStr()]
values = string.join(values,'\t')+'\n'; gene_data.write(values)
if len(ed.Pathways())>1:
for pathway in ed.Pathways():
values = [ed.HMDB(),'Ch',pathway]; values = string.join(values,'\t')+'\n'; hmdb_mapp_data.write(values)
if len(ed.CAS())>0:
values = [ed.HMDB(),ed.CAS()]; values = string.join(values,'\t')+'\n'; hmdb_cas_data.write(values)
values = [ed.CAS(),'Ca']; values = string.join(values,'\t')+'\n'; cas_denom_data.write(values)
if len(ed.CheBI())>0: values = [ed.HMDB(),ed.CheBI()]; values = string.join(values,'\t')+'\n'; hmdb_chebi_data.write(values)
if len(ed.PubChem())>0: values = [ed.HMDB(),ed.PubChem()]; values = string.join(values,'\t')+'\n'; hmdb_pubchem_data.write(values)
if len(ed.KEGGCompoundID())>0: values = [ed.HMDB(),ed.KEGGCompoundID()]; values = string.join(values,'\t')+'\n'; hmdb_keggcomp_data.write(values)
temp_go={}; temp_mapp={}
if infer_enzyme_to_metabolite_pathway_data == 'yes':
### If associated enzyme annotated, use the gene symbol to find GO terms associated with the gene symbol for the metabolite
### Not sure if this is a bad idea or not
for protein_name in ed.ProteinNames():
protein_name = string.lower(protein_name)
if protein_name in symbol_associations:
geneid = symbol_associations[protein_name]
if geneid in gene_to_go:
for goid in gene_to_go[geneid]: temp_go[goid]=[]
if geneid in gene_to_mapp:
for mapp in gene_to_mapp[geneid]: temp_mapp[mapp]=[]
for goid in temp_go:
values = [ed.HMDB(),'GO:'+goid]; values = string.join(values,'\t')+'\n'; hmdb_go_data.write(values)
for mapp in temp_mapp:
values = [ed.HMDB(),'Ch',mapp]; values = string.join(values,'\t')+'\n'; hmdb_mapp_data.write(values)
gene_data.close(); hmdb_mapp_data.close(); hmdb_cas_data.close(); hmdb_chebi_data.close(); hmdb_pubchem_data.close();
if infer_enzyme_to_metabolite_pathway_data == 'yes':
hmdb_go_data.close()
print 'File:',gene_dir,'exported.'
def verifyFile(filename):
fn=filepath(filename); file_found = 'yes'
try:
for line in open(fn,'rU').xreadlines():break
except Exception: file_found = 'no'
return file_found
def buildMetabolomicsDatabase(force):
### No need to specify a species since the database will be added only to currently installed species
if force == 'yes':
downloadHMDBMetaboCardFlatFile()
downloadKEGGPathwayIDs()
importHMDBMetaboCardFlatFile()
if __name__ == '__main__':
buildMetabolomicsDatabase('no'); sys.exit()
downloadHMDBMetaboCardFlatFile()#;sys.exit()
downloadKEGGPathwayIDs()
importHMDBMetaboCardFlatFile()#;sys.exit()
|
"""VLAN Tags Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from fmcapi.api_objects.helper_functions import validate_vlans
import logging
import warnings
class VlanTags(APIClassTemplate):
"""The VlanTags Object in the FMC."""
VALID_JSON_DATA = ["id", "name", "type", "data", "description"]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/vlantags"
REQUIRED_FOR_POST = ["name", "data"]
def __init__(self, fmc, **kwargs):
"""
Initialize VlanTags object.
Set self.type to VlanTag and parse kwargs.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for VlanTags class.")
self.type = "VlanTag"
self.parse_kwargs(**kwargs)
def vlans(self, start_vlan, end_vlan=""):
"""
Associate VLANs.
:param start_vlan: (int) Lower VLAN.
:param end_vlan: (int) Upper VLAN.
"""
logging.debug("In vlans() for VlanTags class.")
start_vlan, end_vlan = validate_vlans(start_vlan=start_vlan, end_vlan=end_vlan)
self.data = {"startTag": start_vlan, "endTag": end_vlan}
class VlanTag(VlanTags):
"""
Dispose of this Class after 20210101.
Use VlanTags() instead.
"""
def __init__(self, fmc, **kwargs):
warnings.resetwarnings()
warnings.warn("Deprecated: VlanTag() should be called via VlanTags().")
super().__init__(fmc, **kwargs)
|
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#?check_same_thread=False
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
#sqlite:///Resources/hawaii.sqlite
Base = automap_base()
Base.prepare(engine , reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
app = Flask(__name__)
@app.route("/")
def welcome():
""" All available api routes"""
return(
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start/end"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
"""Convert the query results to a Dictionary using date as the key and prcp as the value.
Return the JSON representation of your dictionary """
# Design a query to retrieve the last 12 months of precipitation data and plot the results
sel = [Measurement.prcp,Measurement.date]
most_recent_entry= session.query(*sel).order_by( Measurement.id.desc()).first()
past12_months= session.query(*sel).all()
#take the date of the most recent entry
# most_recent_date=most_recent_entry[2]
most_recent_date=most_recent_entry[1]
#parse out the year from the string and subtract 1
last_year=int(most_recent_date[:4])-1
#replace most current year with the last year
one_year_ago=str(last_year)+most_recent_date[4:]
# Perform a query to retrieve the data and precipitation scores
qry2 = session.query(*sel).filter(Measurement.date <= most_recent_date).filter(Measurement.date >= one_year_ago)
prec_dict={}
for date_key in qry2:
#since there are multiple date keys, you must append the the values to their respective keys using an array
if date_key[1] in prec_dict:
# append the new number to the existing array at this slot
prec_dict[date_key[1]].append(date_key[0])
else:
# create a new array in this slot
prec_dict[date_key[1]] = [date_key[0]]
return jsonify(prec_dict)
@app.route("/api/v1.0/stations")
def stations():
""" Return a JSON list of stations from the dataset """
station_names = session.query(Station.station).all()
list_names=list(np.ravel(station_names))
return jsonify(list_names)
@app.route("/api/v1.0/<start>")
def start_date(start):
interval_date = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).all()
result=[]
for internal_results in interval_date:
result.append(internal_results[0])
result.append(internal_results[1])
result.append(internal_results[2])
return jsonify(result)
@app.route("/api/v1.0/tobs")
def tobs():
""" query for the dates and temperature observations from a year from the last data point. Return a JSON list of Temperature Observations (tobs) for the previous year"""
# Query the last 12 months of temperature observation data for all stations
#find the last date for this station
last_date= session.query(Measurement.date).order_by (Measurement.id.desc()).first()
twelve_months_ago=int(last_date[0][:4])-1
past_year=str(twelve_months_ago)+last_date[0][4:]
past_year
#Now retreive the past 12 months of temp data
temp_year_data= session.query(Measurement.tobs).filter (Measurement.date<=last_date[0]).filter(Measurement.date >= past_year).all()
#parse through the query results and append tobs to a list
temp_list=[]
for data in temp_year_data:
temp_list.append(data)
return jsonify(temp_list)
@app.route("/api/v1.0/<start1>/<end1>")
def start_end(start1, end1):
"""Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive """
interval_date1 = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start1).filter(Measurement.date <= end1).all()
results=[]
for internal_results1 in interval_date1:
results.append(internal_results1[0])
results.append(internal_results1[1])
results.append(internal_results1[2])
return jsonify(results)
if __name__ == "__main__":
app.run(debug=True, port=5000) |
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import numpy as np
import OpenGL.GL as gl
import OpenGL.GLU as glu
import pygame
#local imports
import resources
from common import SETTINGS, COLORS
from screen import Screen
class TextDisplay(Screen):
def __init__(self,
**kwargs
):
Screen.__init__(self, **kwargs)
def setup(self,
text_content = "DEFAULT",
text_color = 'black',
text_bgColor = None,
font_size = 288,
font_type = None,
screen_background_color = 'white',
scale_refObj = None,
**kwargs
):
Screen.setup(self,
background_color = screen_background_color,
**kwargs
)
#if text_bgColor is unspecified, set to same as background color (renders faster than using alpha)
if text_bgColor == None:
text_bgColor = screen_background_color
self.text_content = text_content
self.text_color = COLORS.get(text_color, text_color)
self.text_bgColor = COLORS.get(text_bgColor, text_bgColor)
self.font_size = font_size
self.font_type = font_type
self.scale_refObj = scale_refObj
def get_coords(self, xPos, yPos): #xPos, yPos in pixels with origin at center of screen
xPos = self.screen_right * (float(xPos) / (self.screen_width/2))
yPos = self.screen_top * (float(yPos) / (self.screen_height/2))
return (xPos, yPos)
def render(self):
#this will draw background and vsync_patch
Screen.render(self)
# get file path for font file if it was specified
font_path = None
if self.font_type is None:
font_path = self.font_type
else:
font_path = resources.get_fontpath(self.font_type)
#render textSurface from text_content
self.font = pygame.font.Font(font_path, self.font_size)
rgb = [int(c*255) for c in self.text_color]
rgb_bg = [int(c*255) for c in self.text_bgColor]
self.textSurface = self.font.render(self.text_content, 1, rgb, rgb_bg)
#Scaling font; attempting to render text that is too wide/tall sets the raster position off screen and nothing is rendered
if self.textSurface.get_width() > self.screen_width:
percent_scale = float(self.screen_width) / self.textSurface.get_width()
self.font = pygame.font.Font(font_path, int(font_size * percent_scale))
self.textSurface = self.font.render(text_content, 1, rgb, rgb_bg)
print("'", text_content, "' is too wide for screen; scaling to fit")
if self.textSurface.get_height() > self.screen_height:
percent_scale = float(self.screen_height) / self.textSurface.get_height()
self.font = pygame.font.Font(font_path, int(font_size * percent_scale))
self.textSurface = self.font.render(text_content, 1, rgb, rgb_bg)
print("'", text_content, "' is too tall for screen; scaling to fit")
#prepare some values for rendering centered text
centerOffset_pixels = [-self.textSurface.get_width()/2, -self.textSurface.get_height()/2]
raster_position = self.get_coords(*centerOffset_pixels)
textData = pygame.image.tostring(self.textSurface, "RGBA", True)
#render text
gl.glRasterPos2d(*raster_position)
gl.glDrawPixels(self.textSurface.get_width(), self.textSurface.get_height(), gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, textData)
# check if we are scaling size to match other TextDisplay obj's last run() and scale textSurface if needed
if not self.scale_refObj is None:
ref_surface = scale_refObj.render_surface(scale_refObj.text_content, scale_refObj.font_size)
width_scale = float(ref_surface.get_width()) / float(self.textSurface.get_width())
height_scale = float(ref_surface.get_height()) / float(self.textSurface.get_height())
new_width = int(width_scale * self.textSurface.get_width())
new_height = int(height_scale * self.textSurface.get_height())
self.textSurface = pygame.transform.scale(self.textSurface, (new_width,new_height))
################################################################################
# TEST CODE
################################################################################
if __name__ == "__main__":
TD = TextDisplay.with_pygame_display()
TD.setup(font_size = 288,
font_type = "FreeMono.ttf",
screen_background_color = "white",
)
TD.run(duration = 5, vsync_value = 10)
|
import subprocess
def getip():
# Set up the interfaces we are looking for
interfaces= {"wlan0:":"none"}
# Get the network interfaces
#ifconfig=subprocess.Popen("ifconfig", shell=True, stdout=subprocess.PIPE).stdout.read()
ifconfig=subprocess.check_output("ifconfig",shell=True).decode("utf-8")
# Go through the lines
for line in ifconfig.split('\n'):
# Split up the lines by space
parts=line.split(' ')
# The interface names are in pos 0
if parts[0] != '':
interface=parts[0]
# We only care about lines that have at least 10 parts
if len(parts)>10:
# If we find the internet address line
if parts[8] == "inet":
# And it's in an interface we care about
if interface in interfaces:
# Update the interfaces list
interfaces[interface]=parts[9]
display=[]
for name,value in interfaces.items():
display.append(value)
return display
if __name__ == "__main__":
print(getip())
|
# Import models
from mmic_docking.models import InputDock
from mmelemental.models import Molecule
from mmic_autodock_vina.models import AutoDockComputeInput
# Import components
from mmic.components.blueprints import GenericComponent
from mmic_cmd.components import CmdComponent
from mmelemental.util.units import convert
from cmselemental.util.decorators import classproperty
from typing import Any, Dict, Optional, Tuple, List
import os
import string
import tempfile
class AutoDockPrepComponent(GenericComponent):
"""Preprocessing component for autodock"""
@classproperty
def input(cls):
return InputDock
@classproperty
def output(cls):
return AutoDockComputeInput
@classproperty
def version(cls):
return ""
def execute(
self, inputs: InputDock, config: Optional["TaskConfig"] = None
) -> Tuple[bool, AutoDockComputeInput]:
if isinstance(inputs, dict):
inputs = self.input(**inputs)
binput = self.build_input(inputs, config)
return True, AutoDockComputeInput(proc_input=inputs, **binput)
def build_input(
self, inputs: InputDock, config: Optional["TaskConfig"] = None
) -> Dict[str, Any]:
if inputs.molecule.ligand.identifiers is None:
ligand_pdbqt = self.pdbqt_prep(
inputs.molecule.ligand, config=config, args=["-h"]
)
else:
ligand_pdbqt = self.smiles_prep(
smiles=inputs.molecule.ligand.identifiers.smiles
)
receptor_pdbqt = self.pdbqt_prep(
receptor=inputs.molecule.receptor,
config=config,
args=["-xrh"],
)
inputDict = self.check_computeparams(inputs)
inputDict["ligand"] = ligand_pdbqt
inputDict["receptor"] = receptor_pdbqt
return inputDict
# helper functions
def pdbqt_prep(
self,
receptor: Molecule,
config: "TaskConfig" = None,
args: Optional[List[str]] = None,
) -> str:
"""Returns a pdbqt molecule for rigid docking."""
env = os.environ.copy()
if config:
env["MKL_NUM_THREADS"] = str(config.ncores)
env["OMP_NUM_THREADS"] = str(config.ncores)
scratch_directory = config.scratch_directory if config else None
pdb_file = tempfile.NamedTemporaryFile(suffix=".pdb").name
receptor.to_file(pdb_file, mode="w")
# Assume protein is rigid and ass missing hydrogens
outfile = tempfile.NamedTemporaryFile(suffix=".pdbqt").name
command = ["obabel", pdb_file, "-O" + outfile]
if args:
command.extend(args)
obabel_input = {
"command": command,
"infiles": [pdb_file],
"outfiles": [outfile],
"scratch_directory": scratch_directory,
"environment": env,
}
obabel_output = CmdComponent.compute(obabel_input)
final_receptor = obabel_output.outfiles[outfile]
return final_receptor
def smiles_prep(self, smiles: str, config: Optional["TaskConfig"] = None) -> str:
"""Returns a pdbqt molecule from smiles for rigid docking."""
env = os.environ.copy()
if config:
env["MKL_NUM_THREADS"] = str(config.ncores)
env["OMP_NUM_THREADS"] = str(config.ncores)
scratch_directory = config.scratch_directory if config else None
smi_file = tempfile.NamedTemporaryFile(suffix=".smi").name
with open(smi_file, "w") as fp:
fp.write(smiles)
outfile = tempfile.NamedTemporaryFile(suffix=".pdbqt").name
obabel_input = {
"command": [
"obabel",
smi_file,
"-O" + outfile,
"--gen3d",
"-h",
],
"infiles": [smi_file],
"outfiles": [outfile],
"scratch_directory": scratch_directory,
"environment": env,
}
obabel_output = CmdComponent.compute(obabel_input)
final_ligand = obabel_output.outfiles[outfile]
return final_ligand
def check_computeparams(self, input_model: InputDock) -> Dict[str, Any]:
geometry = convert(
input_model.molecule.receptor.geometry,
input_model.molecule.receptor.geometry_units,
"angstrom",
)
outputDict = {}
searchSpace = input_model.search_space
if not searchSpace:
xmin, xmax = geometry[:, 0].min(), geometry[:, 0].max()
ymin, ymax = geometry[:, 1].min(), geometry[:, 1].max()
zmin, zmax = geometry[:, 2].min(), geometry[:, 2].max()
else:
xmin, xmax, ymin, ymax, zmin, zmax = convert(
searchSpace, input_model.search_space_units, "angstrom"
)
outputDict["center_x"] = (xmin + xmax) / 2.0
outputDict["size_x"] = xmax - xmin
outputDict["center_y"] = (ymin + ymax) / 2.0
outputDict["size_y"] = ymax - ymin
outputDict["center_z"] = (zmin + zmax) / 2.0
outputDict["size_z"] = zmax - zmin
outputDict["out"] = os.path.abspath("autodock.pdbqt")
outputDict["log"] = os.path.abspath("autodock.log")
return outputDict
|
import requests
import lxml.html
import sqlite3
class crawer:
def __init__(self):
self.base_list_url = 'https://www.melon.com/mymusic/playlist/mymusicplaylist_list.htm?memberKey=41920075'
self.header = { 'User-Agent' : 'Mozilla/5.0'}
self.init_playlist_url = []
self.detail_playlist_url = []
self.html_list= []
def put_database(self):
if len(self.detail_playlist_url) == 0 :
print('์์ธ ํ๋ ์ด๋ฆฌ์คํธ๊ฐ ์ง์ ๋์ง ์์์ต๋๋ค.')
print('get_detail_play_list()๋ฅผ ์ํํ์ธ์')
return False
conn = sqlite3.connect('./music.db')
db_cursor = conn.cursor()
db_cursor.executemany('INSERT INTO PLAYLIST(playlist_url) values(?)',self.detail_playlist_url)
conn.commit()
conn.close()
return True
def get_detail_play_list(self):
'''
ํ๋ ์ด๋ฆฌ์คํธ ์์ ์์
๋ชฉ๋ก์ ๊ฐ์ ธ์ค๊ธฐ ์ํ URL ๋ชฉ๋ก์ ์์งํ๋ค.
ํด๋น ๋ถ๋ถ์ ๋ฉ๋ก ์์ ์ฒ๋ฆฌํ ๋ถ๋ถ๋๋ฌธ์ ๋จ์ํ get ์์ฒญ์ผ๋ก ๋ฐ์์ค์ง ๋ชปํ๋ค.
- selenium์ ํตํด html ์ ์์งํ๋ค.
'''
self.init_play_list()
# ์นด์ดํธ๊ฐ ์ง์ ๋์ง ์์ init_playlist_url
init_playlist_url = self.init_playlist_url
from selenium import webdriver
wd = webdriver.Chrome('./chromedriver')
count = 0
for index, playlist_url in enumerate(init_playlist_url) :
wd.get(playlist_url)
p_html = wd.page_source
html_root = lxml.html.fromstring(p_html)
p_count = html_root.xpath("//span[@class='cnt']")[0].text
p_count = p_count.replace("(","")
p_count = p_count.replace(")","")
count = int(p_count)
page_num = count//50
page_list = [str(p_page * 50 + 1) for p_page in range(page_num + 1) ]
#if page_list == [''] : page_list = ['1']
for start_index in page_list :
self.detail_playlist_url.append((playlist_url + start_index,))
print(self.detail_playlist_url[-1])
return self.detail_playlist_url
def init_play_list(self) :
'''
ํ๋ ์ด๋ฆฌ์คํธ ๋ชฉ๋ก์ ๊ฐ์ ธ์ค๋ ํจ์
- ํ๋ ์ด๋ฆฌ์คํธ๋ง ๊ฐ์ ธ์ฌ ๋ฟ ํ๋ ์ด๋ฆฌ์คํธ ์์ ์์
๋ชฉ๋ก์ ๊ฐ์ ธ์ค์ง ์๋๋ค.
'''
base_list_url = self.base_list_url
header =self.header
playno_list = []
r = requests.get(base_list_url,headers=header)
if r.status_code != 200:
print('URL์์ GET ์์ฒญ์ ๋ฐ์์ฃผ์ง ์์ต๋๋ค')
return self.init_playlist_url
html = r.text
root_element = lxml.html.fromstring(html)
buttons = root_element.xpath("//button[@class='btn_icon like']")
for button in buttons :
playno_list.append(button.attrib['data-play-no'])
playno_list.append('475596947')
playno_list.append('475336153')
playno_list.append('467370167')
playno_list.append('456850600')
for playno in playno_list :
self.init_playlist_url.append(f"https://www.melon.com/mymusic/playlist/mymusicplaylistview_inform.htm?plylstSeq={playno}#params[plylstSeq]={playno}&po=pageObj&startIndex=")
return self.init_playlist_url
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Some simple performance benchmarks for beets.
"""
from beets.plugins import BeetsPlugin
from beets import ui
from beets import vfs
from beets import library
from beets.util.functemplate import Template
from beets.autotag import match
from beets import plugins
from beets import importer
import cProfile
import timeit
def aunique_benchmark(lib, prof):
def _build_tree():
vfs.libtree(lib)
# Measure path generation performance with %aunique{} included.
lib.path_formats = [
(library.PF_KEY_DEFAULT,
Template('$albumartist/$album%aunique{}/$track $title')),
]
if prof:
cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree},
'paths.withaunique.prof')
else:
interval = timeit.timeit(_build_tree, number=1)
print('With %aunique:', interval)
# And with %aunique replaceed with a "cheap" no-op function.
lib.path_formats = [
(library.PF_KEY_DEFAULT,
Template('$albumartist/$album%lower{}/$track $title')),
]
if prof:
cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree},
'paths.withoutaunique.prof')
else:
interval = timeit.timeit(_build_tree, number=1)
print('Without %aunique:', interval)
def match_benchmark(lib, prof, query=None, album_id=None):
# If no album ID is provided, we'll match against a suitably huge
# album.
if not album_id:
album_id = '9c5c043e-bc69-4edb-81a4-1aaf9c81e6dc'
# Get an album from the library to use as the source for the match.
items = lib.albums(query).get().items()
# Ensure fingerprinting is invoked (if enabled).
plugins.send('import_task_start',
task=importer.ImportTask(None, None, items),
session=importer.ImportSession(lib, None, None, None))
# Run the match.
def _run_match():
match.tag_album(items, search_ids=[album_id])
if prof:
cProfile.runctx('_run_match()', {}, {'_run_match': _run_match},
'match.prof')
else:
interval = timeit.timeit(_run_match, number=1)
print('match duration:', interval)
class BenchmarkPlugin(BeetsPlugin):
"""A plugin for performing some simple performance benchmarks.
"""
def commands(self):
aunique_bench_cmd = ui.Subcommand('bench_aunique',
help='benchmark for %aunique{}')
aunique_bench_cmd.parser.add_option('-p', '--profile',
action='store_true', default=False,
help='performance profiling')
aunique_bench_cmd.func = lambda lib, opts, args: \
aunique_benchmark(lib, opts.profile)
match_bench_cmd = ui.Subcommand('bench_match',
help='benchmark for track matching')
match_bench_cmd.parser.add_option('-p', '--profile',
action='store_true', default=False,
help='performance profiling')
match_bench_cmd.parser.add_option('-i', '--id', default=None,
help='album ID to match against')
match_bench_cmd.func = lambda lib, opts, args: \
match_benchmark(lib, opts.profile, ui.decargs(args), opts.id)
return [aunique_bench_cmd, match_bench_cmd]
|
# -*- python -*-
# Assignment: Car
# Create a class called Car.
# In the__init__(), allow the user to specify the following
# - attributes:
# - price
# - speed
# - fuel
# - mileage
#
# If the price is greater than 10,000, set the tax to be 15%.
# Otherwise, set the tax to be 12%.
#
# Create six different instances of the class Car.
# In the class have a method called display_all() that returns all the information about the car as a string.
# In your __init__(), call this display_all() method to display information about the car once the attributes have been defined.
#
# A sample output would be like this:
#
# Price: 2000
# Speed: 35mph
# Fuel: Full
# Mileage: 15mpg
# Tax: 0.12
# Price: 2000
# Speed: 5mph
# Fuel: Not Full
# Mileage: 105mpg
# Tax: 0.12
# Price: 2000
# Speed: 15mph
# Fuel: Kind of Full
# Mileage: 95mpg
# Tax: 0.12
# Price: 2000
# Speed: 25mph
# Fuel: Full
# Mileage: 25mpg
# Tax: 0.12
# Price: 2000
# Speed: 45mph
# Fuel: Empty
# Mileage: 25mpg
# Tax: 0.12
# Price: 20000000
# Speed: 35mph
# Fuel: Empty
# Mileage: 15mpg
# Tax: 0.15
class Car( object ):
def __init__( self, price, speed, fuel, mileage ):
self.price = price
self.speed = speed
self.fuel = fuel
self.mileage = mileage
self.tax = 0.12
if self.price > 10000:
self.tax = 0.15
self.display_all()
def display_all( self ):
print "Price:", self.price
print "Speed:", "{}mph".format( self.speed )
print "Fuel:", self.fuel
print "Mileage:", "{}mpg".format( self.mileage )
print "Tax:", self.tax
car1 = Car( 9000, 80, "Full", 70 )
car2 = Car( 10000, 100, "Not Full", 50 )
car3 = Car( 15000, 120, "Kind of Full", 40 )
car4 = Car( 20000, 125, "Full", 30 )
car5 = Car( 30000, 130, "Empty", 25 )
car6 = Car( 50000, 190, "Almost Empty", 20 )
|
import os
from datetime import datetime
from flask import Flask, request, flash, url_for, redirect, \
render_template, abort, send_from_directory, jsonify, session
import pymongo
app = Flask(__name__)
app.config.from_pyfile('flaskapp.cfg')
@app.route("/", methods=['GET', 'POST'])
def hello():
try:
conn=pymongo.MongoClient(os.environ['OPENSHIFT_MONGODB_DB_URL'])
db = conn.test
tim = db.tim
myList = list(tim.find())
print myList
return render_template('./app.html', myList = myList)
except:
print "Failed"
return render_template('./app.html', myList = [])
@app.route("/val/<post_id>", methods=['GET', 'POST'])
def d(post_id):
try:
conn=pymongo.MongoClient(os.environ['OPENSHIFT_MONGODB_DB_URL'])
db = conn.test
tim = db.tim
data = {}
data['time'] = post_id
tim.insert(data)
return redirect('/')
except:
print "Failed"
return redirect('/')
if __name__ == '__main__':
app.run(debug="True")
|
from keras.engine import Model
import numpy as np
from keras.preprocessing import image
from keras.applications.resnet50 import ResNet50, preprocess_input
import argparse
from os import path, listdir, makedirs
def create_model():
model = ResNet50(include_top=False, input_shape=(224, 224, 3), weights=None, pooling='avg')
return Model(model.input, model.output)
def preprocess(img):
img = img[..., ::-1]
img[..., 0] -= 91.4953
img[..., 1] -= 103.8827
img[..., 2] -= 131.0912
return img
def extract_features(source, destination, weights=None):
model = create_model()
if weights is not None:
print('Loading weights from {}'.format(weights))
model.load_weights(weights, by_name=True)
if path.isfile(source):
full_path = True
source_list = np.sort(np.loadtxt(source, dtype=np.str))
else:
full_path = False
source_list = listdir(source)
for image_name in source_list:
if not full_path:
image_path = path.join(source, image_name)
else:
image_path = image_name
image_name = path.split(image_name)[1]
if not image_path.lower().endswith('.png') and not image_path.lower().endswith('.jpg') \
and not image_path.lower().endswith('.bmp'):
continue
img = image.load_img(image_path, target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
if weights is not None:
# img = img[..., ::-1]
# img /= 255
img = preprocess(img)
if weights is None:
img = preprocess_input(img)
features = model.predict(img)
dest_path = destination
if full_path:
sub_folder = path.basename(
path.normpath(path.split(image_path)[0]))
dest_path = path.join(destination, sub_folder)
if not path.exists(dest_path):
makedirs(dest_path)
features_name = path.join(dest_path, image_name[:-3] + 'npy')
np.save(features_name, features)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extract features with CNN')
parser.add_argument('--source', '-s', help='Folder with images.')
parser.add_argument('--dest', '-d', help='Folder to save the extractions.')
parser.add_argument('--weights', '-w', help='Weight path for the network.', default=None)
args = parser.parse_args()
if not path.exists(args.dest):
makedirs(args.dest)
extract_features(args.source, args.dest, args.weights)
|
from os import system
from colorama import Fore
from subprocess import check_output
system('cls')
print(Fore.GREEN + """
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
Windows Log Deleter
Created By : Mr.Bug Hunter
Telegram ID 1 : @bug_hunter_us
Telegram ID 2 : @Mr_Bug_HunTer
Instagram ID : mr_bug_hunter
""")
eventlogs = ['Security' , 'Application' , 'System' , 'Setup', 'Internet Explorer']
for event in eventlogs:
try:
check_output(["wevtutil.exe" , "cl" , event.strip("\r")])
print(Fore.GREEN + "[+] {} Logs Deleted .".format(event))
except:
print(Fore.RED + "[-] {} Logs Not Deleted .".format(event))
input(Fore.GREEN + "[+] " + Fore.WHITE + "Press ENTER To Exit ")
|
def calc(x):
total1 = ''.join(str(ord(i)) for i in x)
total2 = total1.replace('7','1')
return sum(map(int,total1))-sum(map(int,total2))
'''
Given a string, turn each letter into its ASCII character code and join
them together to create a number - let's call this number total1:
'ABC' --> 'A' = 65, 'B' = 66, 'C' = 67 --> 656667
Then replace any incidence of the number 7 with the number 1, and call this number 'total2':
total1 = 656667
^
total2 = 656661
^
Then return the difference between the sum of the digits in total1 and total2:
(6 + 5 + 6 + 6 + 6 + 7)
- (6 + 5 + 6 + 6 + 6 + 1)
-------------------------
6
'''
|
# 1ใไธ่ฝ
# 2ใ่ฝ
# 3ใๅญ็ฌฆ่ฝฌASCII็ ๏ผord()
# ASCII็ ่ฝฌๅญ็ฌฆ๏ผchr()
# 4ใ่ฟ็ฎๅญ็ฌฆไธฒ๏ผ็ถๅ่ตๅผ็ปๅฆไธไธชๅ้
# 5ใ
s = 's,pa,m'
s = s.split(',')
print(s[1])
# 6ใ
print(len('a\nb\x1f\000d'))
|
import unittest
from poker.card import Card
from poker.validators import StraightValidator
class StraightValidatorTest(unittest.TestCase):
def setUp(self):
self.three_of_clubs = Card(rank = "3", suit = "Clubs")
self.four_of_diamonds = Card(rank = "4", suit = "Diamonds")
self.five_of_spades = Card(rank = "5", suit = "Spades")
self.six_of_hearts = Card(rank = "6", suit = "Hearts")
self.seven_of_diamonds = Card(rank = "7", suit = "Diamonds")
self.seven_of_hearts = Card(rank = "7", suit = "Hearts")
self.eight_of_clubs = Card(rank = "8", suit = "Clubs")
self.nine_of_clubs = Card(rank = "9", suit = "Clubs")
self.ace_of_spades = Card(rank = "Ace", suit = "Spades")
self.cards = [
self.three_of_clubs,
self.four_of_diamonds,
self.five_of_spades,
self.six_of_hearts,
self.seven_of_diamonds,
self.seven_of_hearts,
self.eight_of_clubs,
self.nine_of_clubs,
self.ace_of_spades
]
def test_validates_that_cards_have_a_straight(self):
validator = StraightValidator(cards = self.cards)
self.assertEqual(
validator.is_valid(),
True
)
def test_returns_a_straight_from_card_collection(self):
validator = StraightValidator(cards = self.cards)
self.assertEqual(
validator.valid_cards(),
[
self.five_of_spades,
self.six_of_hearts,
self.seven_of_diamonds,
self.eight_of_clubs,
self.nine_of_clubs
]
)
def test_does_not_figure_a_2_hand_card_as_Straight(self):
two_cards = [
Card(rank = "2", suit = "Spades"),
Card(rank = "3", suit = "Hearts")
]
validator = StraightValidator(cards = two_cards)
self.assertEqual(
validator.is_valid(),
False
) |
import json
from pandas import DataFrame
from pprint import pprint
from binance import Binance
from datetime import datetime
def timestamp_to_real_time(ts):
return datetime.utcfromtimestamp(ts / 1000).strftime('%Y-%m-%d %H:%M:%S')
def get_profit_details(coin):
coin_name = coin[:-3]
# get all orders
all_orders = binance.api.get_all_orders(symbol=coin)
all_orders = list(filter(lambda x: x['status'] == 'FILLED', all_orders))
# calculation
position = {}
all_trades = []
for order in all_orders:
side = order['side']
price = float(order['price'])
num = float(order['executedQty'])
time = int(order['time'])
if side == 'BUY':
position = {
'buy_price': price,
'buy_time': time,
'num': num
}
else:
buy_time = position['buy_time']
buy_price = position['buy_price']
profit = float(price - position['buy_price'])
cur_trade = {
'coin_name': coin_name,
'buy_time': buy_time,
'sell_time': time,
'buy_price': buy_price,
'sell_price': price,
'num': num,
'profit': profit,
'profit_rate': round(profit * 100 / buy_price, 2)
}
all_trades.append(cur_trade)
return all_trades
def convert_to_panda_readable(all_trades, columns):
res = {}
for p in columns:
res[p] = []
for trade in all_trades:
for p in columns:
res[p].append(trade[p])
return res
if __name__ == '__main__':
with open('keys.json') as key_file:
keys = json.load(key_file)
key_binance = keys['binance2']
# exchange instance
binance = Binance(key_binance['key'], key_binance['secret'])
# get all trading pairs
all_tickers = binance.api.get_all_tickers()
all_pairs = map(lambda x: x['symbol'], all_tickers)
all_pairs = list(filter(lambda x: x[-3:] == 'BTC', all_pairs))
# calculate all trade profits
print('calculating ...')
all_trades = []
finished = 0
task_count = len(all_pairs)
target = 0.1
for pair in all_pairs:
# add all trades of this pair
all_trades.extend(get_profit_details(pair))
# print out some progress info
finished += 1
if (finished / task_count) >= target:
print(str(int(target * 100)) + '%')
target += 0.1
# sort by sell time
all_trades = sorted(all_trades, key=lambda x: x['sell_time'])
# convert time stamp to be readable
def _convert_timestamp(x):
x['sell_time'] = timestamp_to_real_time(x['sell_time'])
x['buy_time'] = timestamp_to_real_time(x['buy_time'])
return x
all_trades = map(_convert_timestamp, all_trades)
# convert to panda data frame
columns = [
'coin_name',
'buy_time',
'sell_time',
'buy_price',
'sell_price',
'num',
'profit',
'profit_rate'
]
all_trades = convert_to_panda_readable(all_trades, columns)
df = DataFrame(all_trades)
print('finished! => results.csv\n' + '-' * 50)
# write to results
df.to_csv('results.csv', columns=columns)
|
from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class ContactForm(forms.Form):
fullname=forms.CharField(widget=forms.TextInput( attrs={"class":"form-control","placeholder":"full name"}))
email=forms.EmailField( widget=forms.EmailInput(attrs={"class":"form-control","placeholder":"email"}))
content = forms.CharField(widget=forms.Textarea(attrs={"class":"form-control","placeholder":"Your message"})
)
def clean_code(self):
email = self.cleaned_data.get("email")
if not "gmail.com" in email:
raise forms.ValidationError("Email hs to be gmail.com")
return email
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField()
class RegisterForm(forms.Form):
username = forms.CharField()
email= forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
password2 = forms.CharField(label='Confirm password',widget=forms.PasswordInput)
def clean_username(self):
username=self.cleaned_data.get('username')
qs=User.objects.filter(username=username)
if qs.exists():
raise forms.ValidationError("Username taken")
return username
def clean_email(self):
email=self.cleaned_data.get('email')
qs=User.objects.filter(email=email)
if qs.exists():
raise forms.ValidationError("email taken")
return email
def clean_code(self):
data=self.cleaned_data
password = self.cleaned_data.get('password')
password2= self.cleaned_data.get('password2')
if password2!=password:
raise forms.ValidationError("passwords do not match")
return email |
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from os.path import join
class Hsv:
def __init__(self, filename, path=None):
if path:
print('fname in hsv ', join(path, filename))
self.img = cv.imread(join(path, filename))
else:
self.img = cv.imread(filename)
def to_hsv(self):
# Convert BGR to HSV
#print (self.img)
#print(self.img)
hsv = cv.cvtColor(self.img, cv.COLOR_BGR2HSV)
'''
print(hsv.shape)
print(np.max(hsv[0]))
print(np.max(hsv[1]))
print(np.max(hsv[2]))
print(np.min(hsv[0]))
print(np.min(hsv[1]))
print(np.min(hsv[2]))
print(hsv[0])
'''
return hsv
def blue_mask(self, hsv, low, high, ind):
blue = np.uint8([[[0, 0, 255]]])
hsv_blue = cv.cvtColor(blue,cv.COLOR_BGR2HSV)
hsv_low_blue = np.array(low) #([220,50,50])
hsv_high_blue = np.array(high) #([260, 255, 255])
# Threshold the HSV image to get only blue colors
mask = cv.inRange(hsv, hsv_low_blue, hsv_high_blue)
# Bitwise-AND mask and original image
res = cv.bitwise_and(self.img, self.img, mask= mask)
cv.imshow('frame',self.img)
cv.imshow('mask',mask)
cv.imwrite('mask_new.jpg', mask)
cv.imshow('res',res)
cv.imwrite('res_new.jpg',res)
return res
h = Hsv('fisheye_1414615708319_1403623620709_00013_20140624_172851_jpg')
hsv = h.to_hsv()
#h.new_blue_mask(hsv)
ind = 0
h.blue_mask(hsv, [100,50,50], [140,255,255], 1)
# [86, 31, 4], [220, 88, 50]
#for low, high in ([[100,50,50],[140,255,255]], [[100,50,50],[140,255,255]], [[100,50,50],[140,255,255]], [[100,50,50],[140,255,255]], [[100,50,50],[140,255,255]]) :
# h.blue_mask(hsv) ##,[110,50,50],[130,255,255]) |
wordList = []
pageList = []
while 1:
words, pageNum = input().split(' ')
wordList.append(words)
pageNum.append(pageNum)
wordList.sort()
|
# libraries
import os
import requests
import time
import pyaudio
import RPi.GPIO as GPIO
# interface
from interface.lights import Lights
# recording
from record import Record
# ordering
from ordering.speech_processing import SpeechProcessing
import ordering.speech_processing_threads as SpeechProcessingThreads
from ordering.order_request import OrderRequest
# config
from helper.toml_loader import Config
# authentication
from authentication.auth import Authentication
def check_connection(host):
# check connection
response = os.system("ping -c 1 " + host)
while response != 0:
response = os.system("ping -c 1 " + host)
lights_change(255, 0, 0, 5, 1)
time.sleep(10)
def lights_change(r, g, b, intensity=100, duration=0):
lights = Lights("change", {
"r": r,
"g": g,
"b": b,
"intensity": intensity,
"duration": duration
})
lights.start()
lights.join()
def result_output(order, result, headline):
print(headline)
if result:
print('ORDER: ' + order)
if len(result["drinks"]):
print('---- DRINKS')
for drink in result["drinks"]:
print(drink['name'])
print(drink['nb'])
print(drink['size'])
print('---')
if len(result["services"]):
print('---- SERVICES')
for service in result["services"]:
print(service['name'])
print('---')
def main():
# authentication
ro_auth = Authentication()
ro_auth.login()
# load config
config = Config("config.toml")
cfg = config.load()
# prepare button interface
BUTTON = 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUTTON, GPIO.IN)
button = False
# initialize recording thread
recording_thread = Record(pyaudio.PyAudio())
# make sure microServices are available
check_connection("138.68.71.39")
# get voiceDevice info
url = cfg["adminApi"]["host"] + ":" + str(cfg["adminApi"]["port"])
url += "/voicedevice/" + cfg["roCredentials"]["voiceDeviceId"]
print(url)
headers = {
"Access-Token": ro_auth.access()
}
print(headers)
response = requests.get(url, headers=headers)
voice_device = None
print(response)
print(response.text)
if response.status_code == 200:
voice_device = response.json()
# initialize orderRequest
order_request = OrderRequest(ro_auth, voice_device)
# start main loop
while True:
state = GPIO.input(BUTTON)
if not state and not button:
check_connection("138.68.71.39")
recording_thread.start()
lights_change(255, 255, 255)
button = True
if state and button:
lights = Lights("pulse", {"r": 255, "g": 255, "b": 255})
lights.start()
recording_thread.stop()
wave_output = recording_thread.join()
if wave_output:
# reinitialize thread
recording_thread = Record(pyaudio.PyAudio())
speech_processing = SpeechProcessing(wave_output, ro_auth, voice_device)
google = SpeechProcessingThreads.GoogleSpeech(speech_processing)
google.start()
wit = SpeechProcessingThreads.WitAi(speech_processing)
wit.start()
bing = SpeechProcessingThreads.BingVoice(speech_processing)
bing.start()
result = {
"items": []
}
order = bing.join()
if order:
result = order_request.request(order)
print_headline = 'BING'
if not order or (not len(result["drinks"]) and not len(result["services"])):
order = wit.join()
if order:
result = order_request.request(order)
print_headline = 'WIT AI'
if not order or (not len(result["drinks"]) and not len(result["services"])):
print_headline = 'GOOGLE'
if order:
result = order_request.request(order)
order = google.join()
if order:
print(print_headline + ' order: ' + order)
else:
order = False
print_headline = False
lights.stop()
lights.join()
if wave_output:
if order and (len(result["drinks"]) or len(result["services"])):
lights_change(r=0, g=255, b=0, duration=1)
result_output(order, result, '------------- ' + print_headline + ' -------------')
else:
lights_change(r=255, g=0, b=0, duration=1)
else:
lights_change(0, 0, 0)
# reinitialize thread
recording_thread = Record(pyaudio.PyAudio())
button = False
if __name__ == "__main__":
main()
|
# Copyright 2017 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections, errno, logging, os, socket, yaml
def dict_representer(dumper, data):
return dumper.represent_dict(data.iteritems())
def unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
return node
def dict_constructor(loader, node):
return collections.OrderedDict(loader.construct_pairs(node))
def setup_yaml():
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
yaml.add_representer(collections.OrderedDict, dict_representer)
yaml.add_representer(os._Environ, dict_representer)
yaml.add_representer(unicode, unicode_representer)
yaml.add_constructor(_mapping_tag, dict_constructor)
def setup_logging():
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s forge 0.0.1 %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S"
)
NOISY = ('socketio', 'engineio')
for n in NOISY:
logging.getLogger(n).setLevel(logging.WARNING)
def setup():
setup_yaml()
setup_logging()
def search_parents(name, start=None, root=False):
rootiest = None
prev = None
path = start or os.getcwd()
while path != prev:
prev = path
candidate = os.path.join(path, name)
if os.path.exists(candidate):
if root:
rootiest = candidate
else:
return candidate
path = os.path.dirname(path)
return rootiest
|
# x = "sajjad"
# for item in x:
# print(item)
# y = ("sajjad", "Parvane")
# for item in y:
# # print(item)
# z = ["Sajjad","Parvaneh"]
# for item in z:
# print(item)
w = {
"sajjad": {
"name" : "sajjad",
"age" : "37",
"genfer" : "male"
},
"parvane":"female"
}
for item in w:
print(w[item])
print(item)
# print(w)
# print(w.items())
# print(type(w.items()))
#
# for k,v in w.items():
# print(k,v)
# for i in range(5, 10, 2):
# print(i) |
#!/usr/bin/python
#coding=utf-8
import re
import json
def redict(regex, words):
r = re.compile(regex)
match_words = filter(r.match, words)
return match_words
if __name__ == '__main__':
dic = json.loads(open('dict.json', 'r').read())
words = set(dic.keys())
print map(lambda word: {word:dic[word]}, redict('.*gnu.*', words))
|
from collections import defaultdict, deque, Counter
from heapq import heapify, heappop, heappush
import math
from copy import deepcopy
from itertools import combinations, permutations, product, combinations_with_replacement
from bisect import bisect_left, bisect_right
import sys
def input():
return sys.stdin.readline().rstrip()
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getListGraph():
return list(map(lambda x:int(x) - 1, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
mod = 10 ** 9 + 7
MOD = 998244353
# sys.setrecursionlimit(1000000)
inf = float('inf')
eps = 10 ** (-10)
dy = [0, 1, 0, -1]
dx = [1, 0, -1, 0]
#############
# Main Code #
#############
# codeforces round650 F1 - Flying Sort (Easy Version)
# distinct
# ๅใๅพใใซ็ฝฎใใ
# ๆไฝๅๆฐใฎๆๅฐๅคใๅใซใใฃใ๏ผ BITใใ
# ใพใๅๅดใฎๆไฝใ ใใชใ
# 0 2 1 โ 1 0 2 โ 0 1 2
# 0 1 3 2 โ 2 0 1 3 โ 1 2 0 3 โ 0 1 2 3
# 0 2 3 1 โ 1 0 2 3 โ 0 1 2 3
# ๅใใใใใคใๅพใใใใใใคใๆ
ๅฝใใใ
# 2 3 5 1 4 โ 2 3 1, 5 4
# ๆซๅฐพใฎindexใ่ฆใใ็พๅจใฎๅ
้ ญใใๅพใใซใใใพใใ๏ผใใใใฐๆไฝใ่กใ
# ไธ็ชๅใซ็ฝฎใๆไฝใฎๅ ดๅใๆฐๅญใฎๅคงใใๆนใใ่ฆใฆ่กใฃใฆ
# ใใฎๆฐๅญใฎๅ ดๆใใใใพใงใฎๆฐๅญใฎๅ
้ ญใใๅใซใใใใ่ฆใ
# ๅพใใซใใใฐๅฝ่ฉฒๆฐๅญใๅ
้ ญใซ็ฝฎใ
T = getN()
for _ in range(T):
N = getN()
A = getList()
A = [[A[i], i] for i in range(N)]
A.sort()
fore, back = [0] * N, [0] * N
for i in range(N):
# fore
cnt, fir = 0, N
for j in range(i, -1, -1):
# do operation
if A[j][1] > fir:
fir = -1
cnt += 1
fir = min(fir, A[j][1])
fore[i] = cnt
# back
cnt, last = 0, -1
for j in range(N - i - 1, N):
if A[j][1] < last:
last = N
cnt += 1
last = max(last, A[j][1])
back[-i - 1] = cnt
print(min([fore[i] + back[i] for i in range(N)]))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from netCDF4 import Dataset, num2date # to work with NetCDF files
from os.path import expanduser
import matplotlib.pyplot as plt
import xarray as xr
import glob, os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import metpy.calc as mpcalc
from metpy.units import units
import seaborn as sns
import pandas as pd
sns.set(context='notebook', style='whitegrid', palette='deep', font='sans-serif', font_scale=3.2, color_codes=True, rc=None)
#Directory where sondes are stored
#dir_profile = "/media/ludo/DATA/google-drive/Thรจse/EUREC4a/github/Input/Products/"
#path_to_sonde_profiles = os.path.join(dir_profile,"rad_profiles_all_sondes_ERA.nc")
#dir_profile = "/Users/annaleaalbright/Dropbox/EUREC4A/RadiativeProfiles/Data/"
#fp_rad_profiles = os.path.join(dir_profile, "rad_profiles_all_sondes_ERA.nc")
dir_profile = "../output/rad_profiles"
fp_rad_profiles = os.path.join(dir_profile,"rad_profiles.nc")
sonde_profiles = xr.open_dataset(fp_rad_profiles)
def get_relative_humidity(profiles):
profiles["play"].attrs['units'] = 'hPa'
profiles["tlay"].attrs['units'] = 'kelvin'
rh = mpcalc.relative_humidity_from_mixing_ratio(profiles["mr"], profiles["tlay"], profiles["play"])
profiles["rh"] = (["launch_time","zlay"], rh.magnitude)
return profiles
def get_specific_humidity(profiles):
qv = mpcalc.specific_humidity_from_mixing_ratio(profiles["mr"])
profiles["qv"] = (["launch_time","zlay"], qv.magnitude)
return profiles
def plot_average_profiles(profiles):
profiles = get_relative_humidity(profiles)
profiles = get_specific_humidity(profiles)
tlay_median = profiles["tlay"].quantile(0.5, dim="launch_time")
tlay_25 = (profiles["tlay"]).quantile(0.25, dim="launch_time")
tlay_75 = (profiles["tlay"]).quantile(0.75, dim="launch_time")
tlay_05 = (profiles["tlay"]).quantile(0.05, dim="launch_time")
tlay_95 = (profiles["tlay"]).quantile(0.95, dim="launch_time")
qv_median = profiles["qv"].quantile(0.5, dim="launch_time")*1000
qv_25 = (profiles["qv"]).quantile(0.25, dim="launch_time")*1000
qv_75 = (profiles["qv"]).quantile(0.75, dim="launch_time")*1000
qv_05 = (profiles["qv"]).quantile(0.05, dim="launch_time")*1000
qv_95 = (profiles["qv"]).quantile(0.95, dim="launch_time")*1000
rh_median = profiles["rh"].quantile(0.5, dim="launch_time")
rh_25 = (profiles["rh"]).quantile(0.25, dim="launch_time")
rh_75 = (profiles["rh"]).quantile(0.75, dim="launch_time")
rh_05 = (profiles["rh"]).quantile(0.05, dim="launch_time")
rh_95 = (profiles["rh"]).quantile(0.95, dim="launch_time")
q_rad_median = profiles["q_rad"].quantile(0.5, dim="launch_time")
q_rad_25 = (profiles["q_rad"]).quantile(0.25, dim="launch_time")
q_rad_75 = (profiles["q_rad"]).quantile(0.75, dim="launch_time")
q_rad_05 = (profiles["q_rad"]).quantile(0.05, dim="launch_time")
q_rad_95 = (profiles["q_rad"]).quantile(0.95, dim="launch_time")
q_rad_lw_median = profiles["q_rad_lw"].quantile(0.5, dim="launch_time")
q_rad_lw_25 = (profiles["q_rad_lw"]).quantile(0.25, dim="launch_time")
q_rad_lw_75 = (profiles["q_rad_lw"]).quantile(0.75, dim="launch_time")
q_rad_lw_05 = (profiles["q_rad_lw"]).quantile(0.05, dim="launch_time")
q_rad_lw_95 = (profiles["q_rad_lw"]).quantile(0.95, dim="launch_time")
q_rad_sw = profiles["q_rad_sw"].where(profiles["q_rad_sw"].mean(dim="zlay") > 0, drop=True)
q_rad_sw_median = q_rad_sw.quantile(0.5, dim="launch_time")
q_rad_sw_25 = q_rad_sw.quantile(0.25, dim="launch_time")
q_rad_sw_75 = q_rad_sw.quantile(0.75, dim="launch_time")
q_rad_sw_05 = q_rad_sw.quantile(0.05, dim="launch_time")
q_rad_sw_95 = q_rad_sw.quantile(0.95, dim="launch_time")
zlay = profiles["zlay"]/1000
fig, ax = plt.subplots(2,3,figsize=(30,30))
ax[0,0].set_xlabel('Temperature (K)')
ax[0,1].set_xlabel('Specific humidity (g/kg)')
ax[0,2].set_xlabel('Relative humidity (%)')
ax[0,0].set_ylabel('Altitude (km)')
ax[1,0].set_ylabel('Altitude (km)')
fs=43
ax[0,1].set_title('Environmental means', fontsize=fs)
ax[1,0].set_title('Shortwave', fontsize=fs)
ax[1,1].set_title('Longwave', fontsize=fs)
ax[1,2].set_title('Net', fontsize=fs)
ax[1,1].set_xlabel('Heating rates (K/day)')
ymin=0.03
ymax=10
for k in range(3):
ax[1,k].set_xlim([-6.5,6.5])
for i in range (2):
ax[i,k].grid(color='k', linestyle='--', linewidth=0.8)
ax[i,k].set_ylim([ymin,ymax])
ax[i,k].tick_params(direction='in', bottom=True, top=True, left=True, right=True,grid_alpha=0.6)
for axis in ['top','bottom','left','right']:
ax[i,k].spines[axis].set_linewidth(1.3)
ax[i,k].spines['right'].set_visible(False)
ax[i,k].spines['top'].set_visible(False)
cl= "k"
alpha=0.30
alpha1=0.10
ax[0,0].plot(tlay_median, zlay, color=cl, linewidth=3, label="median")
ax[0,0].fill_betweenx(zlay,tlay_25, tlay_75, alpha=alpha, color=cl, label="25-75%")
ax[0,0].fill_betweenx(zlay,tlay_05, tlay_25, alpha=alpha1, color=cl, label="5-95%")
ax[0,0].fill_betweenx(zlay,tlay_75, tlay_95, alpha=alpha1, color=cl)
ax[0,0].legend(loc="lower left")
ax[0,1].fill_betweenx(zlay, qv_25, qv_75, alpha=alpha, color=cl)
ax[0,1].fill_betweenx(zlay,qv_05, qv_25, alpha=alpha1, color=cl)
ax[0,1].fill_betweenx(zlay,qv_75, qv_95, alpha=alpha1, color=cl)
ax[0,1].plot(qv_median, zlay, color=cl, linewidth=3)
ax[0,2].fill_betweenx(zlay, rh_25, rh_75, alpha=alpha, color=cl)
ax[0,2].fill_betweenx(zlay,rh_05, rh_25, alpha=alpha1, color=cl)
ax[0,2].fill_betweenx(zlay,rh_75, rh_95, alpha=alpha1, color=cl)
ax[0,2].plot(rh_median, zlay, color=cl, linewidth=3)
ax[1,0].fill_betweenx(zlay, q_rad_sw_25, q_rad_sw_75, alpha=alpha, color=cl)
ax[1,0].fill_betweenx(zlay,q_rad_sw_05, q_rad_sw_25, alpha=alpha1, color=cl)
ax[1,0].fill_betweenx(zlay,q_rad_sw_75, q_rad_sw_95, alpha=alpha1, color=cl)
ax[1,0].plot(q_rad_sw_median, zlay, color=cl, linewidth=3)
ax[1,1].fill_betweenx(zlay, q_rad_lw_25, q_rad_lw_75, alpha=alpha, color=cl)
ax[1,1].fill_betweenx(zlay,q_rad_lw_05, q_rad_lw_25, alpha=alpha1, color=cl)
ax[1,1].fill_betweenx(zlay,q_rad_lw_75, q_rad_lw_95, alpha=alpha1, color=cl)
ax[1,1].plot(q_rad_lw_median, zlay, color=cl, linewidth=3)
ax[1,2].fill_betweenx(zlay, q_rad_25, q_rad_75, alpha=alpha, color=cl)
ax[1,2].fill_betweenx(zlay,q_rad_05, q_rad_25, alpha=alpha1, color=cl)
ax[1,2].fill_betweenx(zlay,q_rad_75, q_rad_95, alpha=alpha1, color=cl)
ax[1,2].plot(q_rad_median, zlay, color=cl, linewidth=3)
x_text=0.9
y_text=0.9
ax[0,0].text(x_text,y_text,'(a)',transform = ax[0,0].transAxes,fontsize=fs)
ax[0,1].text(x_text,y_text,'(b)',transform = ax[0,1].transAxes,fontsize=fs)
ax[0,2].text(x_text,y_text,'(c)',transform = ax[0,2].transAxes,fontsize=fs)
ax[1,0].text(x_text,y_text,'(d)',transform = ax[1,0].transAxes,fontsize=fs)
ax[1,1].text(x_text,y_text,'(e)',transform = ax[1,1].transAxes,fontsize=fs)
ax[1,2].text(x_text,y_text,'(f)',transform = ax[1,2].transAxes,fontsize=fs)
fig.tight_layout()
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
fig.savefig('../figures/Fig2_Average_profiles.png')
plot_average_profiles(sonde_profiles)
|
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that extra filters are pruned correctly for Visual Studio 2010
and later.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['msvs'])
test.run_gyp('filters.gyp', '-G', 'standalone', '-G', 'msvs_version=2010')
test.must_not_exist('no_source_files.vcxproj.filters')
test.must_not_exist('one_source_file.vcxproj.filters')
test.must_not_exist('two_source_files.vcxproj.filters')
test.must_contain('three_files_in_two_folders.vcxproj.filters', '''\
<ItemGroup>
<ClCompile Include="..\\folder1\\a.c">
<Filter>folder1</Filter>
</ClCompile>
<ClCompile Include="..\\folder1\\b.c">
<Filter>folder1</Filter>
</ClCompile>
<ClCompile Include="..\\folder2\\c.c">
<Filter>folder2</Filter>
</ClCompile>
</ItemGroup>
'''.replace('\n', '\r\n'))
test.must_contain('nested_folders.vcxproj.filters', '''\
<ItemGroup>
<ClCompile Include="..\\folder1\\nested\\a.c">
<Filter>folder1\\nested</Filter>
</ClCompile>
<ClCompile Include="..\\folder2\\d.c">
<Filter>folder2</Filter>
</ClCompile>
<ClCompile Include="..\\folder1\\nested\\b.c">
<Filter>folder1\\nested</Filter>
</ClCompile>
<ClCompile Include="..\\folder1\\other\\c.c">
<Filter>folder1\\other</Filter>
</ClCompile>
</ItemGroup>
'''.replace('\n', '\r\n'))
test.pass_test()
|
#!/usr/bin/env python3
import paho.mqtt.client as mqtt
# This is the Subscriber
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe("devices/MyDevice/sensors/TC1/value")
def on_message(client, userdata, msg):
print("new value!")
print(msg.payload.decode())
client.disconnect()
client = mqtt.Client()
client.connect("api.waziup.io")
client.on_connect = on_connect
client.on_message = on_message
client.publish("devices/MyDevice/sensors/TC1/value", '{"value": "39"}');
client.loop_forever()
|
# Generated by Django 3.0.3 on 2020-06-11 21:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0016_auto_20200611_1852'),
]
operations = [
migrations.RenameField(
model_name='clientemodel',
old_name='numero_casa',
new_name='numerocasa',
),
]
|
import re, htmlentitydefs
#
# Remove entities (html or xml) form the input
# based on http://effbot.org/zone/re-sub.htm#unescape-html
#
def unescapeEntities(xmlText):
def replaceEntities(matchObject):
text = matchObject.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text
return re.sub(r'&#?\w+;', replaceEntities, xmlText)
|
#!user/bin/env python
################################################################################
# File: object_library.py
# Author: Vikram Prasad
# Date: January 25, 2018
# Desc: This file defines all the class objects needed for the spending
# tracker.
################################################################################
#imports, boilerplate stuff
import time
import sys
import os
import numpy as np
import pickle
class LineItem(object):
#object attributes
def __init__(self, line_item_keywords):
'''
Takes in a line with comma-seperated elements pulled from a spreadsheet
downloaded from a credit card statement. Each of these lines describes a
particular purchase (line item) and has features associated with it.
This object consolidates all that information in an accessible class.
'''
#unpack csv_line
date, description, amount = line_item_keywords
#cleanup date
month, day, year = self.unpack_date(date)
#classify
category = self.categorize(establishment)
#save attributes
self.month = month
self.day = day
self.year = year
self.amount = amount
self.establishment = establishment
self.category = category
def categorize(self, establishment):
'''
Based on the establishment and exogenously defined rules, this function
will return the cateogry.
'''
pass
def unpack_date(self, date):
'''
Takes a string form of a full date and returns the month, day, and year.
'''
pass
|
"""This module handles execution of job tasks.
"""
import logging
from functools import partial as build_func
logger = logging.getLogger(__name__)
def task_callback(loop, task, tasks, task_map, future):
"""Gets called when a task has finished executing.
Determines whether the result produced by the task is a map or a
list of maps, updates the completed count for the task, checks to see
if all tasks have been executed for all inputs, and runs all children
tasks with all input data sets.
"""
try:
data = future.result()
except:
# There should be a better way to handle this. For now, log the exception and stop event loop
logger.exception('Exception thrown while getting task result.', exc_info=True)
loop.stop()
return
data_type = get_data_type(data)
if not data_type and task["type"] != "output":
logger.error("Invalid data type returned from task.")
loop.stop()
else:
update_task_complete_count(task_map, task)
if check_job_complete(task_map):
loop.stop()
else:
for child_task in tasks:
if child_task["parent"] == task["id"]:
update_task_todo_count(task_map, child_task, data)
if data_type == "dict":
run_task(loop, child_task, data, tasks, task_map)
elif data_type == "list":
for data_member in data:
run_task(loop, child_task, data_member, tasks, task_map)
def run_task(loop, task, data, tasks, task_map):
"""Used to run a given task.
"""
logger.info('Running task name: "%s"', task.get('name'))
if task["type"] == "output":
task_future = loop.create_task(task["method"](task, data))
else:
task_future = loop.create_task(task["method"](data))
task_future.add_done_callback(build_func(task_callback, loop, task, tasks, task_map))
def get_data_type(data):
"""Qualifies and returns the type of data as a string. If not an acceptable
type, returns None.
"""
if type(data) is dict:
return "dict"
elif type(data) is list:
return "list"
else:
return None
def check_job_complete(task_map):
"""Validates a map for every task id. Ensures that each task has been
run the required number of times.
"""
valid_list = [check_task_complete(task_map[task]) for task in task_map]
valid = list(set(valid_list))
if len(valid) == 1 and valid[0]:
return True
return False
def check_task_complete(task):
"""Validates a single task for its todo value against its complete value.
"""
if task["todo"] and task["todo"] == task["complete"]:
return True
return False
def update_task_todo_count(task_map, task, data):
"""Updates the count of a task's todo value on a task map.
Sets the number of times a task must be executed to be considered
complete.
"""
data_type = get_data_type(data)
if data_type == "dict":
task_map["task_"+task["id"]]["todo"] += 1
elif data_type == "list":
task_map["task_"+task["id"]]["todo"] += len(data)
return
def update_task_complete_count(task_map, task):
"""Updates the completed count for a given task on the task map.
"""
task_map["task_"+task["id"]]["complete"] += 1
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.