text stringlengths 8 6.05M |
|---|
import sys
import os
import os.path
import re
import dummy_wintypes
import struct_parser
import func_parser
import def_parser
TYPE_EQUIVALENCE = [
('PWSTR', 'LPWSTR'),
('SIZE_T', 'c_ulong'),
('PSIZE_T', 'POINTER(SIZE_T)'),
('PVOID', 'c_void_p'),
('PPS_POST_PROCESS_INIT_ROUTINE', 'PVOID'),
('NTSTATUS', 'DWORD'),
('PULONG', 'POINTER(ULONG)'),
('PDWORD', 'POINTER(DWORD)'),
('LPDWORD', 'POINTER(DWORD)'),
('LPTHREAD_START_ROUTINE', 'PVOID'),
('PHANDLER_ROUTINE', 'PVOID'),
('LPBYTE', 'POINTER(BYTE)'),
('ULONG_PTR','PULONG'),
('CHAR', 'c_char'),
('UCHAR', 'c_char'),
('PUCHAR', 'POINTER(UCHAR)'),
('FARPROC', 'PVOID'),
('HGLOBAL', 'PVOID'),
('PSID', 'PVOID'),
('PVECTORED_EXCEPTION_HANDLER', 'PVOID'),
#('HRESULT', 'c_long'), # VERY BAD : real HRESULT raise by itself -> way better
('ULONGLONG', 'c_ulonglong'),
('LONGLONG', 'c_longlong'),
('ULONG64', 'c_ulonglong'),
('DWORD64', 'ULONG64'),
('PULONG64', 'POINTER(ULONG64)'),
('PHANDLE', 'POINTER(HANDLE)'),
('HKEY', 'HANDLE'),
('PHKEY', 'POINTER(HKEY)'),
('ACCESS_MASK', 'DWORD'),
('REGSAM', 'ACCESS_MASK'),
# Will be changed at import time
('LPCONTEXT', 'PVOID'),
('HCERTSTORE', 'PVOID'),
('HCRYPTMSG', 'PVOID'),
]
# For functions returning void
TYPE_EQUIVALENCE.append(('VOID', 'DWORD'))
known_type = dummy_wintypes.names + list([x[0] for x in TYPE_EQUIVALENCE])
FUNC_FILE = "winfunc.txt"
STRUCT_FILE = "winstruct.txt"
DEF_FILE = "windef.txt"
NTSTATUS_FILE = "ntstatus.txt"
GENERATED_STRUCT_FILE = "winstructs"
GENERATED_FUNC_FILE = "winfuncs"
GENERATED_DEF_FILE = "windef"
GENERATED_NTSTATUS_FILE = "ntstatus"
OUT_DIRS = ["..\windows\generated_def"]
if len(sys.argv) > 1:
OUT_DIRS.append(sys.argv[1])
def get_all_struct_name(structs, enums):
res = []
for s in structs + enums:
res.append(s.name)
res.extend(s.typedef)
return res
def generate_type_equiv_code(type_equiv):
ctypes_str = ""
for type_equiv in type_equiv:
ctypes_str += "{0} = {1}\n".format(*type_equiv)
ctypes_str += "\n"
return ctypes_str
def verif_funcs_type(funcs, structs, enums):
all_struct_name = get_all_struct_name(structs, enums)
for f in funcs:
ret_type = f.return_type
if ret_type not in known_type and ret_type not in all_struct_name:
import pdb; pdb.set_trace()
raise ValueError("UNKNOW RET TYPE {0}".format(ret_type))
for param_type, _ in f.params:
# Crappy but fuck it !
if param_type.startswith("POINTER(") and param_type.endswith(")"):
param_type = param_type[len("POINTER("): -1]
if param_type not in known_type and param_type not in all_struct_name:
import pdb; pdb.set_trace()
raise ValueError("UNKNOW PARAM TYPE {0}".format(param_type))
def check_in_define(name, defs):
return any(name == d.name for d in defs)
def validate_structs(structs, enums, defs):
all_struct_name = get_all_struct_name(structs, enums)
for struct in structs:
for field_type, field_name, nb_rep in struct.fields:
if field_type.name not in known_type + all_struct_name:
import pdb; pdb.set_trace()
raise ValueError("UNKNOW TYPE {0}".format(field_type))
try:
int(nb_rep)
except ValueError:
if not check_in_define(nb_rep, defs):
raise ValueError("UNKNOW DEFINE {0}".format(nb_rep))
common_header = "#Generated file\n"
defs_header = common_header + """
import sys
import platform
if sys.version_info.major == 3:
long = int
bits = platform.architecture()[0]
bitness = int(bits[:2])
NATIVE_WORD_MAX_VALUE = 0xffffffff if bitness == 32 else 0xffffffffffffffff
class Flag(long):
def __new__(cls, name, value):
return super(Flag, cls).__new__(cls, value)
def __init__(self, name, value):
self.name = name
def __repr__(self):
return "{0}({1})".format(self.name, hex(self))
__str__ = __repr__
"""
def generate_defs_ctypes(defs):
ctypes_lines = [defs_header] + [d.generate_ctypes() for d in defs]
ctypes_code = "\n".join(ctypes_lines)
return ctypes_code
funcs_header = common_header + """
from ctypes import *
from ctypes.wintypes import *
from .{0} import *
"""[1:].format(GENERATED_STRUCT_FILE)
def generate_funcs_ctypes(funcs):
ctypes_code = funcs_header
all_funcs_name = [f.name for f in funcs]
ctypes_code += "functions = {0}\n\n".format(str(all_funcs_name))
for func in funcs:
ctypes_code += func.generate_ctypes() + "\n"
return ctypes_code
structs_header = common_header + """
from ctypes import *
from ctypes.wintypes import *
from .windef import *
"""[1:]
def generate_struct_ctypes(structs, enums):
ctypes_str = structs_header
ctypes_str += generate_type_equiv_code(TYPE_EQUIVALENCE)
all_struct_name = [s.name for s in structs]
ctypes_str += "structs = {0}\n\n".format(str(all_struct_name))
all_enum_name = [e.name for e in enums]
ctypes_str += "enums = {0}\n\n".format(str(all_enum_name))
# Enums declarations
for enum in enums:
ctypes_str += "# Enum {0} definitions\n".format(enum.name)
ctypes_str += enum.generate_ctypes() + "\n"
# Struct declarations
for struct in structs:
ctypes_str += "# Struct {0} definitions\n".format(struct.name)
ctypes_str += struct.generate_ctypes() + "\n"
return ctypes_str
def write_to_out_file(name, data):
for out_dir in OUT_DIRS:
f = open("{0}/{1}.py".format(out_dir, name), 'w')
f.write(data)
f.close()
def_code = open(DEF_FILE, 'r').read()
funcs_code = open(FUNC_FILE, 'r').read()
structs_code = open(STRUCT_FILE, 'r').read()
defs = def_parser.WinDefParser(def_code).parse()
funcs = func_parser.WinFuncParser(funcs_code).parse()
structs, enums = struct_parser.WinStructParser(structs_code).parse()
validate_structs(structs, enums, defs)
verif_funcs_type(funcs, structs, enums)
# Create Flags for ntstatus
nt_status_defs = []
for line in open(NTSTATUS_FILE):
code, name, descr = line.split("|", 2)
nt_status_defs.append(def_parser.WinDef(name, code))
defs = nt_status_defs + defs
defs_ctypes = generate_defs_ctypes(defs)
funcs_ctypes = generate_funcs_ctypes(funcs)
structs_ctypes = generate_struct_ctypes(structs, enums)
for out_dir in OUT_DIRS:
if not os.path.exists(out_dir):
os.mkdir(out_dir)
write_to_out_file(GENERATED_DEF_FILE, defs_ctypes)
write_to_out_file(GENERATED_FUNC_FILE, funcs_ctypes)
write_to_out_file(GENERATED_STRUCT_FILE, structs_ctypes)
NTSTATUS_HEAD = """
class NtStatusException(Exception):
ALL_STATUS = {}
def __init__(self , code):
try:
x = self.ALL_STATUS[code]
except KeyError:
x = (code, 'UNKNOW_ERROR', 'Error non documented in ntstatus.py')
self.code = x[0]
self.name = x[1]
self.descr = x[2]
return super(NtStatusException, self).__init__(*x)
def __str__(self):
return "{e.name}(0x{e.code:x}): {e.descr}".format(e=self)
@classmethod
def register_ntstatus(cls, code, name, descr):
if code in cls.ALL_STATUS:
return # Use the first def
cls.ALL_STATUS[code] = (code, name, descr)
"""
nt_status_exceptions = [NTSTATUS_HEAD]
for line in open(NTSTATUS_FILE):
code, name, descr = line.split("|", 2)
code = int(code, 0)
b = descr
descr = re.sub(" +", " ", descr[:-1]) # remove \n
descr = descr.replace('"', "'")
nt_status_exceptions.append('NtStatusException.register_ntstatus({0}, "{1}", "{2}")'.format(hex(code), name, descr))
write_to_out_file(GENERATED_NTSTATUS_FILE, "\n".join(nt_status_exceptions))
for out_dir in OUT_DIRS:
print("Files generated in <{0}>".format(os.path.abspath(out_dir)))
|
import sys
sys.path.append('../streamlit-recommendation')
import os
import time
import gc
import argparse
import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from fuzzywuzzy import fuzz
from helper import data_processing
def get_recomendation(movie_set, final_movie_df, final_rating_df, exploration):
'''
driver function to get recommendation based on a set of movies and user define exploration
:return: a data frame of movie and youtube url
'''
recommender = KnnRecommender(movie_set['title'].tolist(), final_movie_df,
final_rating_df, exploration)
recommender.make_recommendations(11)
data = recommender.return_recommendations()
return data
class KnnRecommender():
"""
This is an item-based collaborative filtering recommender with
KNN implmented by sklearn
"""
def __init__(self, movie_set, final_movie_df, final_rating_df, exploration):
"""
Recommender requires path to data: movies data and ratings data
Parameters
----------
path_movies: str, movies data file path
path_ratings: str, ratings data file path
"""
self.movie_rating_thres = 0
self.user_rating_thres = 0
self.model = NearestNeighbors()
self.set_filter_params(0, 0)
self.set_model_params(20, 'brute', 'cosine', -1)
self.recommendations = {}
self.movie_set = movie_set
self.exploration = exploration
self.final_movie_df = final_movie_df
self.final_rating_df = final_rating_df
def set_filter_params(self, movie_rating_thres, user_rating_thres):
"""
set rating frequency threshold to filter less-known movies and
less active users
Parameters
----------
movie_rating_thres: int, minimum number of ratings received by users
user_rating_thres: int, minimum number of ratings a user gives
"""
self.movie_rating_thres = movie_rating_thres
self.user_rating_thres = user_rating_thres
def set_model_params(self, n_neighbors, algorithm, metric, n_jobs=None):
"""
set model params for sklearn.neighbors.NearestNeighbors
Parameters
----------
n_neighbors: int, optional (default = 5)
algorithm: {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
metric: string or callable, default 'minkowski', or one of
['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan']
n_jobs: int or None, optional (default=None)
"""
if n_jobs and (n_jobs > 1 or n_jobs == -1):
os.environ['JOBLIB_TEMP_FOLDER'] = '/tmp'
self.model.set_params(**{
'n_neighbors': n_neighbors,
'algorithm': algorithm,
'metric': metric,
'n_jobs': n_jobs})
def _prep_data(self):
"""
prepare data for recommender
1. movie-user scipy sparse matrix
2. hashmap of movie to row index in movie-user scipy sparse matrix
"""
# read data
# df_movies, df_ratings = data_processing.load_data()
df_movies = self.final_movie_df[['movieId', 'title']]
df_ratings = self.final_rating_df[['userId', 'movieId', 'rating']]
df_movies.astype({'movieId': 'int32', 'title': 'str'})
df_ratings.astype({'userId': 'int32', 'movieId': 'int32', 'rating': 'float32'})
df_ratings = df_ratings[df_ratings['movieId'].isin(df_movies['movieId'].tolist())]
df_movies_cnt = pd.DataFrame(
df_ratings.groupby('movieId').size(),
columns=['count'])
popular_movies = list(set(df_movies_cnt.query('count >= @self.movie_rating_thres').index)) # noqa
movies_filter = df_ratings.movieId.isin(popular_movies).values
df_users_cnt = pd.DataFrame(
df_ratings.groupby('userId').size(),
columns=['count'])
active_users = list(set(df_users_cnt.query('count >= @self.user_rating_thres').index)) # noqa
users_filter = df_ratings.userId.isin(active_users).values
df_ratings_filtered = df_ratings[movies_filter & users_filter]
# pivot and create movie-user matrix
movie_user_mat = df_ratings_filtered.pivot(
index='movieId', columns='userId', values='rating').fillna(0)
# create mapper from movie title to index
hashmap = {
movie: i for i, movie in
enumerate(list(df_movies.set_index('movieId').loc[movie_user_mat.index].title)) # noqa
}
# transform matrix to scipy sparse matrix
movie_user_mat_sparse = csr_matrix(movie_user_mat.values)
# clean up
del df_movies, df_movies_cnt, df_users_cnt
del df_ratings, df_ratings_filtered, movie_user_mat
gc.collect()
return movie_user_mat_sparse, hashmap
def _idx_lookup(self, hashmap, fav_movie):
idx = hashmap.get(fav_movie)
if idx:
return hashmap.get(fav_movie)
else:
return hashmap.popitem()[1]
def _fuzzy_matching(self, hashmap, fav_movie):
"""
return the closest match via fuzzy ratio.
If no match found, return None
Parameters
----------
hashmap: dict, map movie title name to index of the movie in data
fav_movie: str, name of user input movie
Return
------
index of the closest match
"""
match_tuple = []
# get match
for title, idx in hashmap.items():
print(f"{title} : {type(title)}")
print(f"{fav_movie} : {type(fav_movie)}")
try:
ratio = fuzz.ratio(title.lower(), fav_movie.lower())
if ratio >= 60:
match_tuple.append((title, idx, ratio))
except AttributeError:
pass
# sort
match_tuple = sorted(match_tuple, key=lambda x: x[2])[::-1]
if not match_tuple:
print('Oops! No match is found')
else:
print('Found possible matches in our database: '
'{0}\n'.format([x[0] for x in match_tuple]))
return match_tuple[0][1]
def _inference(self, model, data, hashmap,
fav_movie, n_recommendations):
"""
return top n similar movie recommendations based on user's input movie
Parameters
----------
model: sklearn model, knn model
data: movie-user matrix
hashmap: dict, map movie title name to index of the movie in data
fav_movie: str, name of user input movie
n_recommendations: int, top n recommendations
Return
------
list of top n similar movie recommendations
"""
# fit
model.fit(data)
# get input movie index
print('You have input movie:', fav_movie)
idx = self._idx_lookup(hashmap, fav_movie)
# inference
print('Recommendation system start to make inference')
print('......\n')
t0 = time.time()
distances, indices = model.kneighbors(
data[idx],
n_neighbors=n_recommendations + 1)
# get list of raw idx of recommendations
raw_recommends = \
sorted(
list(
zip(
indices.squeeze().tolist(),
distances.squeeze().tolist()
)
),
key=lambda x: x[1]
)[:0:-1]
print('It took my system {:.2f}s to make inference \n\
'.format(time.time() - t0))
# return recommendation (movieId, distance)
return raw_recommends
def make_recommendations(self, n_recommendations):
"""
make top n movie recommendations
Parameters
----------
fav_movie: str, name of user input movie
n_recommendations: int, top n recommendations
"""
# get data
movie_user_mat_sparse, hashmap = self._prep_data()
# get recommendations
for fav_movie in self.movie_set:
raw_recommends = self._inference(
self.model, movie_user_mat_sparse, hashmap,
fav_movie, n_recommendations)
raw_recommends = sorted(raw_recommends, key=lambda x: x[1])
# print and package results
recommendations = []
reverse_hashmap = {v: k for k, v in hashmap.items()}
print('Recommendations for {}:'.format(fav_movie))
for i, (idx, dist) in enumerate(raw_recommends):
try:
print('{0}: {1}, with distance '
'of {2}'.format(i + 1, reverse_hashmap[idx], dist))
recommendations.append((reverse_hashmap[idx], dist))
except KeyError:
print('{0}: {1}, with distance '
'of {2}'.format(i + 1, "RANDOM", 99))
recommendations.append((self.final_movie_df.sample(1)['title'].values[0], dist))
self.recommendations[fav_movie] = recommendations
def return_recommendations(self):
'''
returns 5 recommendations based on user input exploration level
:return: data frame of movie title and distance
'''
title_list = []
for movie in self.movie_set:
title = self.recommendations[movie][self.exploration][0]
title_list.append(title)
data = self.final_movie_df[self.final_movie_df['title'].isin(title_list)]\
.sort_values("year", ascending=False)\
.reset_index()
return data
|
from django.contrib import admin
from . import models
admin.site.register(models.Publisher)
admin.site.register(models.Informations)
|
#-*- encoding: utf-8 -*-
import netsvc
import pooler, tools
import math
import decimal_precision as dp
from tools.translate import _
from osv import fields, osv
def arrot(cr,uid,valore,decimali):
#import pdb;pdb.set_trace()
return round(valore,decimali(cr)[1])
class FiscalDocHeader(osv.osv):
_inherit = 'fiscaldoc.header'
_columns={
'esenzione_conai':fields.many2one('conai.esenzioni', 'Tipo di Esenzione Conai'),
'scad_esenzione_conai': fields.date('Scadenza Esenzione Conai', required=False, readonly=False),
}
def onchange_partner_id(self, cr, uid, ids, part,context):
res = super(FiscalDocHeader,self).onchange_partner_id(cr, uid, ids, part,context)
val = res.get('value', False)
warning = res.get('warning', False)
if part:
part = self.pool.get('res.partner').browse(cr, uid, part)
if part.esenzione: #esiste un codice di esenzione conai
val['esenzione_conai']= part.esenzione.id
val['scad_esenzione_conai']=part.scad_esenzione
return {'value': val, 'warning': warning}
FiscalDocHeader()
class FiscalDocRighe(osv.osv):
_inherit = 'fiscaldoc.righe'
def _tot_riga_conai(self, cr, uid, ids, field_name, arg, context=None):
res = {}
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if line.name.esenzione_conai and line.name.scad_esenzione_conai>=line.name.data_documento: # c'è una esenzione conai
# c'è un codice di esenzione
res[line.id] = line.prezzo_conai * line.peso_conai
res[line.id] = res[line.id] *(1-line.name.esenzione_conai.perc/100)
else:
res[line.id] = line.prezzo_conai * line.peso_conai
res[line.id]= arrot(cr,uid,res[line.id],dp.get_precision('Account')) # arrotonda a 2 cifre in genere
return res
_columns = {
'cod_conai':fields.many2one('conai.cod', 'Codice Conai'),
'peso_conai':fields.float('Peso Conai', digits=(2, 7)),
'prezzo_conai':fields.float('Valore Unitario ', digits=(2, 7), required=False),
'totale_conai': fields.function(_tot_riga_conai, method=True, string='Totale riga Conai', digits_compute=dp.get_precision('Account')),
# 'totale_conai':fields.float('Totale Conai', digits=(12, 7)),
}
def onchange_articolo(self, cr, uid, ids, product_id, listino_id, qty, partner_id, data_doc, uom,context):
res = super(FiscalDocRighe, self).onchange_articolo(cr, uid, ids, product_id, listino_id, qty, partner_id, data_doc, uom,context)
v = res.get('value', False)
warning = res.get('warning', False)
domain = res.get('domain', False)
if product_id:
art_obj = self.pool.get("product.product").browse(cr, uid, [product_id])[0]
if art_obj.conai.id:
prz_conai = art_obj.conai.valore
v['cod_conai'] = art_obj.conai.id
v['peso_conai'] = art_obj.production_conai_peso * qty
v['prezzo_conai'] = prz_conai
else:
v['cod_conai'] = 0.0
v['peso_conai'] = 0.0
v['prezzo_conai'] = 0.0
return {'value': v, 'domain': domain, 'warning': warning}
#return {'value':v}
def on_change_qty(self, cr, uid, ids, product_id, listino_id, qty, partner_id, uom, data_doc,context): #
res = super(FiscalDocRighe, self).on_change_qty(cr, uid, ids, product_id, listino_id, qty, partner_id, uom, data_doc,context)
v = res.get('value', False)
warning = res.get('warning', False)
domain = res.get('domain', False)
if product_id:
art_obj = self.pool.get("product.product").browse(cr, uid, [product_id])[0]
if art_obj.conai.id:
prz_conai = art_obj.conai.valore
v['cod_conai'] = art_obj.conai.id
v['peso_conai'] = art_obj.production_conai_peso * qty
v['prezzo_conai'] = prz_conai
else:
v['cod_conai'] = 0.0
v['peso_conai'] = 0.0
v['prezzo_conai'] = 0.0
return {'value': v, 'domain': domain, 'warning': warning}
#return {'value':v}
FiscalDocRighe()
class conai_castelletto(osv.osv):
_name = "conai.castelletto"
_description = "Castelletto CONAI"
_columns = {
'name': fields.many2one('fiscaldoc.header', 'Numero Documento', required=True, ondelete='cascade', select=True, readonly=True),
'imballo':fields.many2one('conai.cod', 'Codice CONAI', required=True, ondelete='cascade', select=True, readonly=True),
'codice_iva':fields.many2one('account.tax', 'Codice Iva', readonly=False, required=True),
'peso':fields.float('Peso', digits=(12, 7)),
'totale_conai':fields.float('Totale Imponibile', digits_compute=dp.get_precision('Account')),
}
def agg_tot_conai(self, cr, uid, ids, context):
if ids:
#import pdb;pdb.set_trace()
lines = self.pool.get('fiscaldoc.righe').search(cr, uid, [('name', '=', ids)]) # okkio dai per scontato di avere un solo documento sotto il culo
idsd = self.search(cr, uid, [('name', '=', ids)])
if idsd:
ok = self.unlink(cr,uid,idsd) # cancella subito il castelletto esistente
riga_cast = {}
for riga in self.pool.get('fiscaldoc.righe').browse(cr, uid, lines, context=context):
if riga.cod_conai: # c'è il conai
#import pdb;pdb.set_trace()
par = [('name','=',riga.name.id),('imballo','=',riga.cod_conai.id),('codice_iva','=',riga.codice_iva.id)]
id_cats = self.search(cr,uid,par)
if id_cats:
for indice in id_cats:
# esiste già un record simile e quindi aggiungo il totale
peso = self.browse(cr,uid,indice).peso+riga.peso_conai
totale_conai = self.browse(cr,uid,indice).totale_conai+riga.totale_conai
ok = self.write(cr,uid,indice,{'peso':peso,'totale_conai':totale_conai})
else:
cast_riga = {
'name':riga.name.id,
'imballo':riga.cod_conai.id,
'codice_iva':riga.codice_iva.id,
'peso':riga.peso_conai,
'totale_conai':riga.totale_conai,
}
idcast = self.create(cr,uid,cast_riga)
return True
conai_castelletto()
class FiscalDocIva(osv.osv):
_inherit = "fiscaldoc.iva"
def agg_righe_iva(self, cr, uid, ids, context):
def get_perc_iva(self, cr, uid, ids, idiva, context):
dati = self.pool.get('account.tax').read(cr, uid, [idiva], (['amount', 'type']), context=context)
return dati[0]['amount']
res = super(FiscalDocIva, self).agg_righe_iva(cr, uid, ids, context) # prima ricalcola il castelletto iva standard
res = self.pool.get('conai.castelletto').agg_tot_conai(cr,uid,ids,context)
conai_ids = self.pool.get('conai.castelletto').search(cr,uid,[('name','=',ids)])
if conai_ids: #ci sono righe di castelletto conai
for riga_cast in self.pool.get('conai.castelletto').browse(cr,uid,conai_ids):
iva_id = self.pool.get("fiscaldoc.iva").search(cr,uid,[('name','=',riga_cast.name.id),('codice_iva','=',riga_cast.codice_iva.id)])
if iva_id: # somma il solo imponibile
iva={}
iva['imponibile']= self.pool.get("fiscaldoc.iva").browse(cr,uid,iva_id[0]).imponibile+riga_cast.totale_conai
ok = self.pool.get("fiscaldoc.iva").write(cr,uid,[iva_id[0]],iva)
# ora ricalcola l'imposta
righe_iva = self.pool.get("fiscaldoc.iva").search(cr,uid,[('name','=',ids)])
for rg_iva in self.pool.get("fiscaldoc.iva").browse(cr,uid,righe_iva):
perc_iva = get_perc_iva(self, cr, uid, ids, rg_iva.codice_iva.id, context)
imposta = rg_iva.imponibile * perc_iva
imposta = arrot(cr,uid,imposta,dp.get_precision('Account'))
ok = self.pool.get("fiscaldoc.iva").write(cr,uid,[rg_iva.id],{'imposta':imposta})
FiscalDocIva()
|
a = input('a=:')
print (a) |
from urllib import request
from bs4 import BeautifulSoup
import pymysql
class spider:
def __init__(self):
self.url = r"http://www.jianshu.com"
self.header = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'}
self.db_config = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '111111',
'db': 'pytest',
'charset': 'utf8'
}
self.connction = pymysql.connect(**self.db_config)
def spider_data_base(self):
page = request.Request(self.url, headers=self.header)
page_info = request.urlopen(page).read().decode("utf-8")
soup = BeautifulSoup(page_info, 'html.parser')
urls = soup.find_all('a', 'title')
print(urls)
try:
with self.connction.cursor() as cursor:
sql = 'insert into titles(title,url) value (%s,%s)'
for u in urls:
cursor.execute(sql, (u.string, r'http://www.jianshu.com' + u.attrs['href']))
self.connction.commit()
finally:
self.connction.close()
spider().spider_data_base()
|
#import sys
#input = sys.stdin.readline
def a_win(x,y):
if x == y:
return True
if x == 1 and y == 0:
return True
if x == 0 and y == 2:
return True
if x == 2 and y == 1:
return True
return False
def main():
n, k = map(int,input().split())
s = list(input())
R = 0
P = 1
S = 2
G = []
for t in s:
if t == "R":
G.append(R)
elif t == "P":
G.append(P)
else:
G.append(S)
gamari = G[-pow(2,k,n):]
q = n
r = len(gamari)
# 2^k = q*?+r
for kk in range(k,0,-1):
if q == 1:
break
H = []
if q%2 == 0:
for i in range(0,q,2):
a, b = G[i], G[i+1]
if a_win(a,b):
H.append(a)
else:
H.append(b)
hq = q//2
else:
for i in range(0,q*2,2):
a, b = G[i%q], G[(i+1)%q]
if a_win(a,b):
H.append(a)
else:
H.append(b)
hq = q
G = H
q = hq
if G[0] == 0:
print("R")
elif G[0] == 1:
print("P")
else:
print("S")
# hr = pow(2,kk-1,hq)
# hamari = []
# for i in range(0,2,r):
# a, b = gamari[i], gamari[i+1]
# if a_win(a,b):
# hamari.append(a)
# else:
# hamari.append(b)
# r //= 2
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
"""
test for the Interface module.
"""
import unittest
from base_test import PschedTestBase
from pscheduler.interface import interface_affinity, source_interface, LocalIPList
class TestInterface(PschedTestBase):
"""
Interface tests.
"""
# The following are wrappers around another library and don't need
# testing:
# source_interface
# source_affinity
def test_local_ip(self):
"""Local ip test"""
localips = LocalIPList(refresh=5)
self.assertTrue('127.0.0.1' in localips)
def test_source_interface(self):
"""Source interface test"""
ip = '127.0.0.1'
address, interface = source_interface(ip)
self.assertEqual(address, ip)
self.assertNotEqual(interface, None)
if __name__ == '__main__':
unittest.main()
|
import rhinoscriptsyntax as rs
import math as ma
import random as rd
def delete_all():
all_objects = rs.ObjectsByType(0)
rs.DeleteObjects(all_objects)
def delete_something(n):
something = rs.ObjectsByType(n)
rs.DeleteObjects(something)
a = 0.1
b = 0.01
g = 1.0
t = 0.0
dt = 0.01
L = 0.5
class Firefly:
def __init__(self, nod, x, group, nop):
self.nod = nod
self.x = x
self.group = group
self.nop = nop
def evaluate(self):
func = self.x[0]*self.x[1]*ma.sin(2*ma.pi*self.x[0])*ma.cos(2*ma.pi*self.x[1])
self.ri = func
def move(self):
for target in self.group:
if self.ri < target.ri:
r2 =0.0
for i in range(0,self.nod):
r2 = r2 + (self.x[i]-target.x[i])**2
for i in range(0,self.nod):
ex = (rd.random() - 0.5)*L
self.x[i] = self.x[i] + b*ma.exp(-g*r2)*(target.x[i]-self.x[i])+ a*ex*ma.exp(-t)
def display(self):
rs.AddCircle([self.x[0]*50, self.x[1]*50, 0], 0.5)
swarm = []
nod = 2
def look_for_firefly(nop):
for i in range(0, nop):
x =[]
for k in range(0, nod):
x.append(rd.random())
swarm.append(Firefly(nod, x, swarm, nop))
t = 0.0
dt = 0.01
for step in range(0, 500):
t = t + dt
for one in swarm:
one.evaluate()
for one in swarm:
one.move()
for one in swarm:
one.display()
print one.x[0],one.x[1]
look_for_firefly(50)
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
X = [[6, 2], [8, 1], [10, 0], [14, 2], [18, 0]]
y = [[7], [9], [13], [17.5], [18]]
model = LinearRegression()
model.fit(X,y)
X_test = [[8, 2], [9, 0], [11, 2], [16, 2], [12, 0]]
y_test = [[11], [8.5], [15], [18], [11]]
predictions = model.predict(X_test)
print(predictions)
print("="*40)
# for i, prediction in enumerate(predictions):
# print(prediction, y_test[i])
# r-squared is the proportion of the variance in the response variable that is explained by the model.
# An r-squared score of one indicates that the response variable can be predicted without any error using the model.
# An r-squared score of one half indicates that half of the variance in the response variable can be predicted using the model.
r_sq = model.score(X_test, y_test) # R^2= coefficient of determination
print('R^2:', r_sq)
print("="*40)
|
#!/usr/bin/env python
# coding: utf-8
# In[167]:
import pandas as pd
import numpy as np
import warnings
import re
import nltk
import seaborn as sns
import matplotlib.pyplot as plt
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
import warnings
warnings.filterwarnings(action = 'ignore')
import gensim
from gensim.models import Word2Vec,Phrases
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Bidirectional
from keras.layers.embeddings import Embedding
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,confusion_matrix
nltk.download('wordnet')
# In[118]:
trainset=pd.read_csv('/Users/molly1998/Desktop/python//train.csv')
testset=pd.read_csv('/Users/molly1998/Desktop/python//test.csv')
dataset=pd.concat([trainset,testset],ignore_index=True)
dataset_comm=np.concatenate([trainset['text'],testset['text']],axis=0)
dataset_tar=np.concatenate([trainset['sentiment'],testset['sentiment']],axis=0)
len(dataset_comm)
len(dataset_tar)
# In[82]:
dataset_comm[1]
# In[83]:
en_stops = set(stopwords.words('english'))
# In[104]:
def clean_review(comments: str) -> str:
#Remove non-letters
letters_only = re.compile(r'[^A-Za-z\s]').sub(" ", comments)
#Convert to lower case
lowercase_letters = letters_only.lower()
return lowercase_letters
def lemmatize(tokens: list) -> list:
#Lemmatize
tokens = list(map(WordNetLemmatizer().lemmatize, tokens))
lemmatized_tokens = list(map(lambda x: WordNetLemmatizer().lemmatize(x,"v"), tokens))
#Remove stop words
meaningful_words = list(filter(lambda x: not x in en_stops, lemmatized_tokens))
return meaningful_words
def preprocess(review: str, total: int, show_progress: bool = True) -> list:
if show_progress:
global counter
counter += 1
print('Processing... %6i/%6i'% (counter, total), end='\r')
review = clean_review(review)
tokens = word_tokenize(review)
lemmas = lemmatize(tokens)
return lemmas
counter=0
all_comments=list(map(lambda x: preprocess(x,len(dataset_comm)),dataset_comm))
# In[107]:
all_comments[1]
# In[121]:
dataset_tar[dataset_tar=="neg"]=0
dataset_tar[dataset_tar=="pos"]=1
##check if data is balanced
sns.countplot(x='sentiment', data=dataset)
# In[141]:
##count words in each comment
dataset['count_words']=list(map(lambda x: len(x),all_comments))
dataset['count_words']
fig, ax = plt.subplots()
sns.distplot(dataset['count_words'], bins=dataset['count_words'].max(),
hist_kws={"alpha": 0.9, "color": "red"}, ax=ax,
kde_kws={"color": "black", 'linewidth': 3})
ax.set_xlim(left=0, right=np.percentile(dataset['count_words'], 95))
ax.set_xlabel('Words in Comments')
ymax = 0.014
plt.ylim(0, ymax)
ax.set_title('Words per comments distribution', fontsize=20)
plt.legend()
plt.show()
# data=data_senti.copy(deep=True)
# X_train=data.loc[:35000,'text']
# y_train=data.loc[:35000,'sentiment']
# X_test=data.loc[35000:,'text']
# y_test=data.loc[35000:,'sentiment']
# x_word2_train=X_train.copy()
# x_word2_test=X_test.copy()
# In[146]:
bigrams = Phrases(sentences=all_comments)
trigrams = Phrases(sentences=bigrams[all_comments])
# In[147]:
print(trigrams[bigrams[all_comments[1]]])
# In[148]:
w2v_model = word2vec.Word2Vec(sentences = trigrams[bigrams[all_comments]], min_count = 35, size = 256,
window = 8, workers=5, sample=1e-3)
w2v_model.init_sims(replace=True)
# In[153]:
w2v_model.wv.most_similar("love")
# In[163]:
print(list(w2v_model.wv.vocab.keys()).index("love"))
print(list(w2v_model.wv.vocab.keys()).index("fell_love"))
# In[156]:
get_ipython().run_cell_magic('time', '', "def vectorize(text,vocabulary):\n keys = list(vocabulary.keys())\n filter_unknown = lambda x: vocabulary.get(x, None) is not None\n encode = lambda x: list(map(keys.index, filter(filter_unknown, x)))\n vectorized = list(map(encode, text))\n return vectorized\npadded=pad_sequences(vectorize(trigrams[bigrams[all_comments]],w2v_model.wv.vocab),maxlen=250,padding='post')")
# In[200]:
X_train, X_test, y_train, y_test = train_test_split(padded,dataset_tar,test_size=0.15,shuffle=True,random_state=42)
# In[201]:
X_train=np.asarray(X_train).astype(np.int)
X_test=np.asarray(X_test).astype(np.int)
y_test=np.asarray(y_test).astype(np.int)
y_train=np.asarray(y_train).astype(np.int)
# In[235]:
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Bidirectional,BatchNormalization,Flatten
from keras.layers.embeddings import Embedding
def build_model(embedding_matrix: np.ndarray, input_length: int):
model = Sequential()
model.add(Embedding(
input_dim = embedding_matrix.shape[0],
output_dim = embedding_matrix.shape[1],
input_length = input_length,
weights = [embedding_matrix],
trainable=False))
#model.add(Bidirectional(LSTM(88, recurrent_dropout=0.1)))
#model.add(Dense(32))
#model.add(BatchNormalization())
model.add(Dense(64))
model.add(Dropout(0.25))
model.add(Flatten())
#model.add(Dense(128))
#model.add(Dropout(0.15))
model.add(Dense(1, activation='sigmoid'))
model.summary()
return model
model = build_model(
embedding_matrix=w2v_model.wv.vectors,
input_length=250)
model.compile(
loss="binary_crossentropy",
optimizer='adam',
metrics=['accuracy'])
# In[236]:
mlp_model = model.fit(
x=X_train,
y=y_train,
validation_data=(X_test, y_test),
batch_size=100,
epochs=20)
# In[237]:
y_train_pred = model.predict_classes(X_train)
y_test_pred = model.predict_classes(X_test)
def plot_confusion_matrix(y_true, y_pred, ax, class_names, vmax=None,
normed=True, title='Confusion matrix'):
matrix = confusion_matrix(y_true,y_pred)
if normed:
matrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]
sns.heatmap(matrix, vmax=vmax, annot=True, square=True, ax=ax,
cmap=plt.cm.Blues_r, cbar=False, linecolor='black',
linewidths=1, xticklabels=class_names)
ax.set_title(title, y=1.20, fontsize=16)
#ax.set_ylabel('True labels', fontsize=12)
ax.set_xlabel('Predicted labels', y=1.10, fontsize=12)
ax.set_yticklabels(class_names, rotation=0)
fig, (axis1, axis2) = plt.subplots(nrows=1, ncols=2)
plot_confusion_matrix(y_test, y_test_pred, ax=axis2,
title='Confusion matrix (test data)',
class_names=['Positive', 'Negative'])
plot_confusion_matrix(y_train, y_train_pred, ax=axis1,
title='Confusion matrix (train data)',
class_names=['Positive', 'Negative'])
# In[ ]:
|
# Generated by Django 2.0.4 on 2019-01-23 23:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('PMGMP', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='pmgbpmodel',
name='parameters',
),
]
|
/Users/rasmuslevinsson/anaconda3/lib/python3.6/io.py |
from django.http import HttpResponse
from django.shortcuts import render
from .models import Books, Authors
def main_title(request):
return render(request, "main_title.html")
def books_title(request):
books = Books.objects.all()
context = {"books": books}
return render(request,"books_title.html",context)
def authors_title(request):
authors = Authors.objects.all()
context = {"authors":authors}
return render(request,"authors_title.html", context)
def book_title(request, id_book):
book = Books.objects.get(id_book=id_book)
author = Authors.objects.get(author = book.author)
context = {"book_name": book.book_name, "book_author": book.author,
"book_description": book.description, "id_author": author.id_author}
return render(request, "book_title.html", context)
def author_title(request,id_author):
author = Authors.objects.get(id_author=id_author)
books = Books.objects.filter(author=author)
context = {'author': author.author, "books": books, "author_description": author.description}
return render(request, "author_title.html", context)
|
import sqlite3
import json
from typing import List
class SQLiteLoader():
"""Загружает данные из SQLite, преобразовывает их и возвращает список словарей для
последующей обработки в PostgresSaver.
Parameters
----------
connection : sqlite3.Connection
Объект соединения с SQLite
"""
SQL = """
/* Используем CTE для читаемости. Здесь нет прироста
производительности, поэтому можно поменять на subquery */
WITH x as (
-- Используем group_concat, чтобы собрать id и имена
-- всех актёров в один список после join'а с таблицей actors
-- Отметим, что порядок id и имён совпадает
-- Не стоит забывать про many-to-many связь между
-- таблицами фильмов и актёров
SELECT m.id, group_concat(a.id) as actors_ids, group_concat(a.name) as actors_names
FROM movies m
LEFT JOIN movie_actors ma on m.id = ma.movie_id
LEFT JOIN actors a on ma.actor_id = a.id
GROUP BY m.id
)
-- Получаем список всех фильмов со сценаристами и актёрами
SELECT m.id, genre, director, title, plot, imdb_rating, x.actors_ids, x.actors_names,
/* Этот CASE решает проблему в дизайне таблицы:
если сценарист всего один, то он записан простой строкой
в столбце writer и id. В противном случае данные
хранятся в столбце writers и записаны в виде
списка объектов JSON.
Для исправления ситуации применяем хак:
приводим одиночные записи сценаристов к списку
из одного объекта JSON и кладём всё в поле writers */
CASE
WHEN m.writers = '' THEN '[{"id": "' || m.writer || '"}]'
ELSE m.writers
END AS writers
FROM movies m
LEFT JOIN x ON m.id = x.id
"""
def __init__(self, connection: sqlite3.Connection):
self.conn = connection
self.conn.row_factory = self.dict_factory
@staticmethod
def dict_factory(cursor: sqlite3.Cursor, row: tuple) -> dict:
"""Так как в SQLite нет встроенной фабрики для строк в виде dict, делаем свою.
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def load_writers_names(self) -> dict:
"""Получаем список всех сценаристов, так как нет возможности
получить их в одном запросе.
Returns
-------
dict
Словарь всех сценаристов вида
{
"Writer": {"writer_id": "id", "writer_name": "name"},
...
}
"""
writers = {}
# Используем DISTINCT, чтобы отсекать возможные дубли
for writer in self.conn.execute("SELECT DISTINCT id, name FROM writers"):
writers[writer["id"]] = writer
return writers
def _transform_row(self, row: dict, writers: dict) -> dict:
"""Основная логика преобразования данных из SQLite во внутреннее
представление, которое дальше будет уходить в PostgreSQL.
Решаемы проблемы:
1) genre в БД указан в виде строки из одного или нескольких
жанров, разделённых запятыми -> преобразовываем в список жанров.
2) writers из запроса в БД приходит в виде списка словарей id'шников
-> обогащаем именами из полученных заранее сценаристов
и добавляем к каждому id ещё и имя
3) actors формируем из двух полей запроса (actors_ids и actors_names)
в список словарей, наподобие списка сценаристов.
4) для полей imdb_rating, director и description меняем
поля "N/A" на None.
Parameters
----------
row : dict
Строка из БД
writers : dict
Текущие сценаристы
Returns
-------
dict
Подходящая строка для обработки
"""
movie_writers = []
writers_set = set()
for writer in json.loads(row['writers']):
writer_id = writer['id']
if writers[writer_id]['name'] != 'N/A' and writer_id not in writers_set:
movie_writers.append(writers[writer_id])
writers_set.add(writer_id)
actors_names = []
if row['actors_ids'] is not None and row['actors_names'] is not None:
actors_names = [x for x in row['actors_names'].split(',') if x != 'N/A']
new_row = {
"id": row['id'],
"genre": row['genre'].replace(' ', '').split(','),
"actors": actors_names,
"writers": [x['name'] for x in movie_writers],
"imdb_rating": float(row['imdb_rating']) if row['imdb_rating'] != 'N/A' else None,
"title": row['title'],
"director": [
x.strip() for x in row['director'].split(',')
] if row['director'] != 'N/A' else None,
"description": row['plot'] if row['plot'] != 'N/A' else None
}
return new_row
def load_movies(self) -> List[dict]:
"""Основной метод для выгрузки данных из MySQL.
Returns
-------
List[dict]
Итоговые данные из БД
"""
movies = []
writers = self.load_writers_names()
for row in self.conn.execute(self.SQL):
transformed_row = self._transform_row(row, writers)
movies.append(transformed_row)
return movies
|
from rlib.objectmodel import we_are_translated
from rlib.osext import raw_input
from som.vm.globals import nilObject
from som.vm.symbols import symbol_for
class Shell(object):
def __init__(self, universe):
self.universe = universe
def start(self):
from som.vm.universe import std_println, error_println
counter = 0
it = nilObject
std_println('SOM Shell. Type "quit" to exit.\n')
while True:
try:
# Read a statement from the keyboard
stmt = raw_input("---> ")
if stmt == "quit" or stmt == "":
return it
if stmt == "\n":
continue
# Generate a temporary class with a run method
stmt = (
"Shell_Class_"
+ str(counter)
+ " = ( run: it = ( | tmp | tmp := ("
+ stmt
+ " ). 'it = ' print. ^tmp println ) )"
)
counter += 1
# Compile and load the newly generated class
shell_class = self.universe.load_shell_class(stmt)
# If success
if shell_class:
shell_object = self.universe.new_instance(shell_class)
shell_method = shell_class.lookup_invokable(symbol_for("run:"))
it = shell_method.invoke_2(shell_object, it)
except Exception as ex: # pylint: disable=broad-except
if not we_are_translated(): # this cannot be done in rpython
import traceback
traceback.print_exc()
error_println("Caught exception: %s" % ex)
|
#!/usr/bin/env python
import numpy as np
from math import sqrt
DEFAULT_C1 = 1e-4
EPS = 1e-7
NEWTON_C2 = 0.9
CG_C2 = 0.1 # better c2 for conjugate gradient according to Nocedal/Wright
DEFAULT_C2 = NEWTON_C2
def backtracking_linesearch(f, df, x, p, **kwargs):
"""
Convenience wrapper for _backtracking_linesearch. Takes care of creating the phi function:
phi(alpha) = f(x + alpha * p)
Parameters
----------
alpha_init : initial value for factor
f : callable
function to evaluate, this should be f(alpha) = g(x + alpha * pk), where pk is the direction vector/array
f is expected to only take alpha, since that's the only thing that changes in this algorithm
df : callable
derivative function of f
x : array_like
current point at which we are doing line search
p : array_like
direction vector
c1 : float, optional
typically 1e-4
factor for the sufficient decrease/Armijo condition
c2 : float, optional
typically 0.1 for nonlinear conjugate gradient, 0.9 for newton and quasi-newton
factor for the curvature condition
alpha_init: float, optional
initial point for alpha, the value we are optimizing for
Returns
----------
(alpha, fval, dfval) : tuple
alpha : float
how far to step in search direction - xk + alpha * pk. None if line search is unsuccessful
fval : float
function value at alpha
dfval : float
derivative value at alpha
func_calls : float
number of times phi was called, mainly for testing/debugging purposes
dfunc_calls : float
number of times dphi was called, mainly for testing/debugging purposes
"""
func_calls = [0]
dfunc_calls = [0]
def phi(alphak):
func_calls[0] += 1
return f(x + alphak * p)
def dphi(alphak):
dfunc_calls[0] += 1
return np.dot(df(x + alphak * p), p)
return _backtracking_linesearch(phi, dphi, **kwargs), func_calls[0], dfunc_calls[0]
def _backtracking_linesearch(f, df, alpha_init=1, max_iter=40, shrink_factor=0.5, grow_factor=2.1, c1=DEFAULT_C1, \
c2=DEFAULT_C2, min_step_size=1e-10, max_step_size=100, \
use_wolfe=True, use_strong_wolfe=True, decay=None, decay_iter_start=5):
"""
Parameters
----------
alpha_init : initial value for factor
f : callable
function to evaluate, this should be f(alpha) = g(x + alpha * pk), where pk is the direction vector/array
f is expected to only take alpha, since that's the only thing that changes in this algorithm
df : callable
derivative function of f, projected onto direction pk.
c1 : float, optional
typically 1e-4
factor for the sufficient decrease/Armijo condition
c2 : float, optional
typically 0.1 for nonlinear conjugate gradient, 0.9 for newton and quasi-newton
factor for the curvature condition
Returns
----------
alpha : float
how far to step in search direction - xk + alpha * pk
fk : float
function value at current alpha
dfk : float
derivative value at current alpha
Conditions:
0 < c1 < c2 < 1
"""
assert shrink_factor * grow_factor != 1.0
assert c1 < 0.5
assert c1 > 0.0
assert c2 > c1
assert c2 < 1.0
f0, df0 = f(0.0), df(0.0)
fk, dfk = f(alpha_init), df(alpha_init)
alpha = alpha_init
#print("k, alpha, fk, f0, c1, df0, f0 + c1 * alpha * df0")
for k in range(max_iter):
if decay and k > decay_iter_start:
it = k - decay_iter_start
grow_factor *= max(1.0, grow_factor * 1. / (1. + (decay * it)))
shrink_factor *= min(1.0, shrink_factor * (1. + (decay * it)/ 1.))
fk = f(alpha)
dfk = df(alpha)
# pheta in nocedal wright, for changing alpha
alpha_mult = 1.0
# sufficient decrease condition
#print(k, alpha, fk, f0, c1, df0, f0 + c1 * alpha * df0, "before sufficient decrease")
if fk > (f0 + c1 * alpha * df0):
alpha_mult = shrink_factor
# curvature condition
elif use_wolfe and dfk < c2 * df0:
alpha_mult = grow_factor
elif use_strong_wolfe and dfk > -c2 * df0:
alpha_mult = shrink_factor
else:
# converged
#print("converged")
break
alpha *= alpha_mult
if alpha < min_step_size:
#print("Step size got too small in backtracking line search")
return None, fk, dfk
elif alpha > max_step_size:
#print("Step size got too big in backtracking line search")
return None, fk, dfk
else:
#print("line search didn't converge")
return None, fk, dfk
return alpha, fk, dfk
#########################################################################################################
def interpolating_line_search(f, df, x, p, **kwargs):
"""
from Nocedal and Wright book.
This does not do curvature and strong wolfe checks, just armajilo/sufficient decrease
Parameters
----------
alpha_init : initial value for factor
f : callable
function to evaluate, this should be f(alpha) = g(x + alpha * pk), where pk is the direction vector/array
f is expected to only take alpha, since that's the only thing that changes in this algorithm
df : callable
derivative function of f
x : array_like
current point at which we are doing line search
p : array_like
direction vector
c1 : float, optional
typically 1e-4
factor for the sufficient decrease/Armijo condition
"""
func_calls = [0]
dfunc_calls = [0]
def phi(alphak):
func_calls[0] += 1
return f(x + alphak * p)
def dphi(alphak):
dfunc_calls[0] += 1
return np.dot(df(x + alphak * p), p)
return _interpolating_line_search(phi, dphi, **kwargs), func_calls[0], dfunc_calls[0]
def _interpolating_line_search(f, df, alpha_init=1, max_iter=40, c1=DEFAULT_C1, \
c2=DEFAULT_C2, min_step_size=1e-10, max_step_size=100, **kwargs):
assert c1 < 0.5
assert c1 > 0.0
assert c2 > c1
assert c2 < 1.0
f0, df0 = f(0.0), df(0.0)
fk, dfk = f(alpha_init), df(alpha_init)
alpha0 = alpha_init
if fk <= (f0 + c1 * alpha0 * df0):
# satisfied condition
return alpha0, fk, dfk
# quadratic interpolation
alpha_quad = - (df0 * (alpha0 ** 2.0)) / \
(2.0 * (fk - f0 - (df0 * alpha0)))
fquad = f(alpha_quad)
dfquad = df(alpha_quad)
if fquad <= (f0 + c1 * alpha0 * df0):
return alpha_quad, fquad, dfquad
while alpha_quad > min_step_size:
# do cubic interpolation
denom = alpha0 ** 2.0 * alpha_quad ** 2.0 * (alpha_quad - alpha0)
row1 = (fquad - f0 - (df0 * alpha_quad))
row2 = (fk - f0 - df0 * alpha0)
a = (((alpha0 ** 2.0) * row1) + \
(-(alpha_quad ** 2.0) * row2)) / denom
b = ((-(alpha0 ** 3.0) * row1) + \
((alpha_quad ** 3.0) * row2)) / denom
alpha_cubic = (-b + np.sqrt(abs(b**2 - 3 * a * df0))) / (3.0*a + EPS)
fcubic = f(alpha_cubic)
dfcubic = df(alpha_cubic)
if fcubic <= f0 + c1 * alpha_cubic * df0:
return alpha_cubic, fcubic, dfcubic
# From Nocedal and Wright:
# If the new alpha is too close to its predecessor or else too much smaller than
# the predecessor, we reset alpha to be predecessor/2.0
# This safeguard procedures ensures that we make reasonable progress on each iteration
# and that the final alpha is not too small
if (alpha_quad - alpha_cubic) > (alpha_quad) / 2.0 or \
(1 - (alpha_cubic / alpha_quad)) < 0.96:
alpha_cubic = alpha_quad / 2.0
# replace predecessor estimates with new
# cubic interpolation works by keeping last 2 estimates updated.
alpha0 = alpha_quad
alpha_quad = alpha_cubic
fk = fquad
fquad = fcubic
dfk = dfquad
dfquad = dfcubic
return None, fquad, dfquad
#########################################################################################################
def strong_wolfe_with_zoom(f, df, x, p, **kwargs):
func_calls = [0]
dfunc_calls = [0]
def phi(alphak):
func_calls[0] += 1
return f(x + alphak * p)
def dphi(alphak):
dfunc_calls[0] += 1
return np.dot(df(x + alphak * p), p)
return _strong_wolfe_with_zoom(phi, dphi, **kwargs), func_calls[0], dfunc_calls[0]
def _cubicmin(a, fa, fpa, b, fb, c, fc):
"""
Finds the minimizer for a cubic polynomial that goes through the
points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
If no minimizer can be found return None
"""
# f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
C = fpa
db = b - a
dc = c - a
denom = (db * dc) ** 2 * (db - dc)
d1 = np.empty((2, 2))
d1[0, 0] = dc ** 2
d1[0, 1] = -db ** 2
d1[1, 0] = -dc ** 3
d1[1, 1] = db ** 3
[A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
fc - fa - C * dc]).flatten())
A /= denom
B /= denom
radical = B * B - 3 * A * C
xmin = a + (-B + np.sqrt(radical)) / (3 * A)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
def _quadmin(a, fa, fpa, b, fb):
"""
Finds the minimizer for a quadratic polynomial that goes through
the points (a,fa), (b,fb) with derivative at a of fpa,
"""
# f(x) = B*(x-a)^2 + C*(x-a) + D
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
D = fa
C = fpa
db = b - a * 1.0
B = (fb - D - C * db) / (db * db)
xmin = a - C / (2.0 * B)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
def _cubic_interpolate(alpha_0, alpha_1, f_0, f_1, df_0, df_1):
"""
pg. 59 from Nocedal/Wright
we do a subscript of 0 for i, and 1 for i+1
"""
d1 = df_0 + (df_1 - (3 * (f_0 - f_1) / (alpha_0 - alpha_1)))
# in the book, they have a sign of (alpha_i - alpha_i-1), but we ensure that this is always positive
d2 = sqrt((d1 ** 2.0) - (df_0 * df_1))
alpha_j = alpha_1 - (alpha_1 - alpha_0) * \
((df_1 + d2 - d1) / (df_1 - df_0 + 2 * d2))
dalpha = (alpha_1 - alpha_0)
min_allowed = alpha_0 + 0.05 * dalpha
max_allowed = alpha_0 + 0.95 * dalpha
if alpha_j < min_allowed:
return min_allowed
elif alpha_j > max_allowed:
return max_allowed
return alpha_j
def _zoom_interpolate(i, alpha_lo, alpha_hi, alpha_0, f_lo, f_hi, f_0, df_lo, df_hi, df_0, alpha_rec, f_rec):
#return _cubic_interpolate(alpha_lo, alpha_hi, f_lo, f_hi, df_lo, df_hi)
delta1 = 0.2 # cubic interpolant check
delta2 = 0.1 # quadratic interpolant check
dalpha = alpha_hi - alpha_lo
a, b = alpha_lo, alpha_hi
if dalpha < 0:
a, b = alpha_hi, alpha_lo
a_j = None
if (i > 0):
cchk = delta1 * dalpha
a_j = _cubicmin(alpha_lo, f_lo, df_lo, alpha_hi, f_hi,
alpha_rec, f_rec)
if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):
qchk = delta2 * dalpha
a_j = _quadmin(alpha_lo, f_lo, df_lo, alpha_hi, f_hi)
if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
a_j = alpha_lo + 0.5*dalpha
return a_j
def _zoom(f, df, alpha_lo, alpha_hi, alpha_0, f_lo, f_hi, f_0, df_lo, df_hi, df_0, c1, c2, max_zoom_iter=40):
"""
From Nocedal/Wright:
We now specify the function zoom, which requires a little explanation. The order of its
input arguments is such that each call has the form zoom(αlo,αhi), where
(a) the interval bounded by αlo and αhi contains step lengths that satisfy the strong Wolfe conditions;
(b) αlo is, among all step lengths generated so far and satisfying the sufficient decrease condition,
the one giving the smallest function value; and
(c) αhi is chosen so that φ (αlo)(αhi − αlo) < 0.
Each iteration of zoom generates an iterate αj between αlo and αhi, and then replaces one
of these end points by αj in such a way that the properties(a), (b), and (c)continue to hold.
Algorithm 3.6 (zoom). repeat
Interpolate (using quadratic, cubic, or bisection) to find
a trial step length αj between αlo and αhi;
Evaluate φ(αj );
if φ(αj ) > φ(0) + c1 * αj * φ'(0) or φ(αj ) ≥ φ(αlo)
αhi ←αj;
else
Evaluate φ'(αj );
if |φ'(αj )| ≤ −c2φ'(0)
Set α∗ ← αj and stop;
if φ'(αj)(αhi −αlo) ≥ 0
αhi ← αlo;
αlo ←αj;
end (repeat)
"""
alpha_rec = 0
f_rec = f_0
for i in range(max_zoom_iter):
if alpha_lo > alpha_hi:
alpha_j = _zoom_interpolate(i, alpha_hi, alpha_lo, alpha_0, f_hi, f_lo, f_0, df_hi, df_lo, df_0, alpha_rec, f_rec)
else:
alpha_j = _zoom_interpolate(i, alpha_lo, alpha_hi, alpha_0, f_lo, f_hi, f_0, df_lo, df_hi, df_0, alpha_rec, f_rec)
f_j = f(alpha_j)
df_j = df(alpha_j)
if f_j > (f_0 + c1 * alpha_j * df_0) or f_j >= f_lo:
alpha_rec = alpha_hi
f_rec = f_hi
alpha_hi = alpha_j
f_hi = f_j
df_hi = df_j
else:
if abs(df_j) <= -c2 * df_0:
return alpha_j
if df_j * (alpha_hi - alpha_lo) >= 0:
f_rec = f_hi
alpha_rec = alpha_hi
alpha_hi = alpha_lo
f_hi = f_lo
df_hi = df_lo
else:
alpha_rec = alpha_lo
f_rec = f_lo
alpha_lo = alpha_j
f_lo = f_j
df_lo = df_j
print("Didn't find appropriate value in zoom")
return None
def _strong_wolfe_with_zoom(f, df, alpha_init=0, max_iter=100, c1=DEFAULT_C1, \
c2=DEFAULT_C2, min_step_size=1e-10, max_step_size=1e10, **kwargs):
f0, df0 = f(0.0), df(0.0)
alphaprev = 0.0
# In Nocedal, Wright, this was left unspecified.
alphak = 1.0
while np.isnan(f(alphak)):
# in case we set this to be too large
alphak /= 2.
if alphak <= min_step_size:
print("alphak is <= min_step_size before loop!")
return None, f0, df0
fprev, dfprev = f0, df0
fk, dfk = f(alphak), df(alphak)
for i in range(max_iter):
if fk > (f0 + c1 * alphak * df0) or fk >= fprev and i > 0:
alpha_zoom = _zoom(f, df, alphaprev, alphak, alpha_init, fprev, fk, f0, \
dfprev, dfk, df0, c1, c2)
if not alpha_zoom:
alpha_zoom = alphak
return alpha_zoom, f(alpha_zoom), df(alpha_zoom)
if np.abs(dfk) <= -c2 * df0:
return alphak, fk, dfk
if dfk >= 0:
alpha_zoom = _zoom(f, df, alphak, alphaprev, alpha_init, fk, fprev, f0, \
dfk, dfprev, df0, c1, c2)
if not alpha_zoom:
alpha_zoom = alphak
return alpha_zoom, f(alpha_zoom), df(alpha_zoom)
alphaprev = alphak
alphak *= 1.25
fprev = fk
fk = f(alphak)
dfprev = dfk
dfk = df(alphak)
if alphak > max_step_size:
print("reached max step size")
return max_step_size, fk, dfk
else:
print("didn't converge")
return None, fk, dfk
return alphak, fk, dfk
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import math
import pytest
@pytest.fixture()
def browser():
print("\nstart browser for test..")
browser = webdriver.Chrome()
yield browser
print("\nquit browser..")
browser.quit()
@pytest.mark.parametrize('get_link',
["https://stepik.org/lesson/236895/step/1", "https://stepik.org/lesson/236896/step/1",
"https://stepik.org/lesson/236897/step/1", "https://stepik.org/lesson/236898/step/1",
"https://stepik.org/lesson/236899/step/1", "https://stepik.org/lesson/236903/step/1",
"https://stepik.org/lesson/236904/step/1", "https://stepik.org/lesson/236905/step/1"])
class TestAnswer(object):
def test_send_answer_for_pages(self, browser, get_link):
link = get_link
browser.get(link)
browser.implicitly_wait(5)
input = browser.find_element_by_tag_name("textarea")
answer = math.log(int(time.time()))
input.send_keys(str(answer))
# WebDriverWait(browser, 5).until(EC.element_to_be_clickable((By.TAG_NAME, "button")))
button = browser.find_element_by_tag_name("button")
button.click()
message = browser.find_element_by_class_name("smart-hints__hint").text
print("\n" + message)
WebDriverWait(browser, 3).until(EC.text_to_be_present_in_element((By.CLASS_NAME, "smart-hints__hint"), "Correct!"))
|
#from '/Users/vsubr2/spark-1.6.1-bin-hadoop2.6/bin/pyspark'
import collections
from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local").setAppName("PopularMoviesv")
sc = SparkContext(conf=conf)
data = sc.textFile("file:///Users/vsubr2/Projects/KaneSpark/ml-100k/u.data")
ratings = data.map(lambda l:l.split()).map(lambda l:Rating(int(l[0]), int(l[l]), float([2]))).cache()
rank = 10
numIterations == 20
|
import pickle
from data_loader import DataLoader
from classifier import model
def train(data_path, model_file):
data_loader = DataLoader(data_path)
data_loader()
weights = load_weights(model_file) if model_file else None
classifier = model(data_loader.train_x,
data_loader.train_y,
data_loader.valid_x,
data_loader.valid_y,
threshold = 0.4,
weights = weights,
learning_rate = 0.003)
classifier(epoch = 10000)
save_model(classifier)
def save_model(model):
pkl_representation = pickle.dumps(model)
with open('model', 'wb') as file:
pickle.dump(pkl_representation, file)
def load_weights(path):
pkl_obj = None
model_obj = None
with open(path, 'rb') as file:
pkl_obj = pickle.load(file)
if pkl_obj:
model_obj = pickle.loads(pkl_obj)
return model_obj.weights
if __name__=='__main__':
train(data_path = 'data.csv', model_file = 'model')
|
"""
Shows basic usage of the Google Calendar API. Creates a Google Calendar API
service object and outputs a list of the next 10 events on the user's calendar.
"""
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from datetime import datetime
import calendar
from os.path import exists
from os import makedirs
from dateutil import parser
base_path = '/home/jake/.cache/conky/auth/google'
if not exists(base_path):
makedirs(base_path)
# Setup the Calendar API
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
store = file.Storage(base_path + '/credentials.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('calendar', 'v3', http=creds.authorize(Http()))
# Call the Calendar API
now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=5, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
with open('/tmp/conky/calendar_events', 'w') as f:
if not events:
f.write('No upcoming events found.')
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
f.write(datetime.strftime(parser.parse(start), '%b %d') + "||" + event['summary'] + "\n")
# print()
# cal = calendar.Calendar()
# print(cal.yeardayscalendar(year=2018, width=4))
# print()
# print()
# textcal = calendar.TextCalendar()
# print(textcal.formatyear(2018))
# first_of_month = datetime.today().replace(day=1)
# calendar = calendar.monthcalendar(first_of_month.year, first_of_month.month)
# # for week in calendar:
# # print(week)
# # day_of_first = first_of_month.weekday()
# # name_of_first = first_of_month.strftime("%A")
# #
# # print(day_of_first)
# # print(name_of_first)
# #
# # for i in range(1 - day_of_first, 7*5):
# # if i < 1 print(monthrange())
# # print(i) |
## -*- coding: utf-8 -*-
"""
Created on Tue 27 Oct 2020
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
This work was funded by Joanna Leng's EPSRC funded RSE Fellowship (EP/R025819/1)
@copyright 2020
@author: j.h.pickering@leeds.ac.uk and j.leng@leeds.ac.uk
"""
# set up linting conditions
# pylint: disable = too-many-public-methods
# pylint: disable = c-extension-no-member
import os
import csv
import pathlib
import numpy as np
import PyQt5.QtWidgets as qw
import PyQt5.QtGui as qg
import PyQt5.QtCore as qc
import PyQt5.QtPrintSupport as qp
from regionselection.gui.Ui_regionselectionmainwindow import Ui_RegionSelectionMainWindow
from regionselection.gui.resultstablewidget import ResultsTableWidget
from regionselection.gui.regionselectionwidget import RegionSelectionWidget
from regionselection.gui.regionstablemodel import RegionsTableModel
from regionselection.util.drawrect import DrawRect
import regionselection.util.autosavebinary as autosave
class RegionSelectionMainWindow(qw.QMainWindow, Ui_RegionSelectionMainWindow):
"""
The main window
"""
## signal to indicate the user has selected a new rectangle
new_selection = qc.pyqtSignal(DrawRect)
## signal to indicate the user has read a data file
replace_data = qc.pyqtSignal(list)
def __init__(self, parent=None):
"""
the object initalization function
Args:
parent (QObject): the parent QObject for this window
Returns:
None
"""
super().__init__(parent)
self.setupUi(self)
## the drawing widget
self._drawing_widget = None
## the results widget
self._results_widget = None
## storage for the image
self._image = None
## storage for the regions
self._regions = []
self.setup_drawing_tab()
self.setup_table_tab()
## storage for the autosave object
self._autosave = None
## name of the current project
self._project = None
def make_autosave(self):
"""
create a new autosave file
"""
self._autosave = autosave.AutoSaveBinary(self._project)
def setup_drawing_tab(self):
"""
initalize the drawing widget
"""
tab = self._tabWidget.widget(0)
self._drawing_widget = RegionSelectionWidget(tab, self)
layout = qw.QVBoxLayout(tab)
layout.addWidget(self._drawing_widget)
def setup_table_tab(self):
"""
initalize the results table widget
"""
tab = self._tabWidget.widget(1)
model = RegionsTableModel(self._regions)
self._results_widget = ResultsTableWidget(tab, model)
layout = qw.QVBoxLayout(tab)
layout.addWidget(self._results_widget)
self.new_selection.connect(model.add_region)
self.replace_data.connect(model.replace_data)
model.dataChanged.connect(self.data_changed)
@qc.pyqtSlot(qc.QModelIndex, qc.QModelIndex)
def data_changed(self, tl_index, br_index):
"""
callback for user editing of the data via tableview
Args:
tl_index (qc.QModelIndex) top left location in data
br_index (qc.QModelIndex) bottom right location in data
"""
self._drawing_widget.repaint()
self.autosave()
@qc.pyqtSlot(DrawRect)
def new_region(self, region):
"""
slot for signal that a new regions has been selected, emit own signal
Args:
region (DrawRect) the region that is to be added
Emits:
new_selection (DrawRect) forward the message to the data model
"""
self.new_selection.emit(region)
self.autosave()
@qc.pyqtSlot()
def load_data(self):
"""
callback for loading data from csv file
"""
if self._image is None:
qw.QMessageBox.information(self, "No Image", "You must have an image")
return
if len(self._regions) > 0:
reply = qw.QMessageBox.question(self,
"Overwrite",
"You will loose current data?")
if reply == qw.QMessageBox.No:
return
# get a list of backups and list of project names
projects = autosave.AutoSaveBinary.list_backups(os.getcwd())
matches = [tmp for tmp in projects if tmp[1] == self._project]
if len(matches) > 0:
reply = qw.QMessageBox.question(self,
"Duplicate",
"A back up of the project exists. Load instead?")
if reply == qw.QMessageBox.Yes:
self.load_backup_file(matches[0][0])
return
file_name, _ = qw.QFileDialog.getOpenFileName(
self,
self.tr("Save File"),
os.path.expanduser('~'),
self.tr("CSV (*.csv)"))
if file_name is not None and file_name != '':
with open(file_name, 'r') as in_file:
reader = csv.reader(in_file)
self.read_regions_csv_file(reader)
def read_regions_csv_file(self, reader):
"""
read a csv file of regions
Args:
reader (csv.reader) a ready to go csv file reader
"""
# get the project name and
self._project = next(reader, "No Name")
self.setWindowTitle(self._project[0])
# pop the headers
next(reader, None)
# replace the regions
regions = []
for row in reader:
region = DrawRect(np.uint32(row[0]),
np.uint32(row[1]),
np.uint32(row[2]),
np.uint32(row[3]))
regions.append(region)
self.replace_data.emit(regions)
self.make_autosave()
def load_backup_file(self, file_name):
"""
read and load a binary backup
Args:
file_name (string) the file path including name
"""
self._project, regions = autosave.AutoSaveBinary.get_backup_project(file_name)
self.setWindowTitle(self._project)
self.replace_data.emit(regions)
@qc.pyqtSlot()
def save_data(self):
"""
callback to save the data
"""
if len(self._regions) < 1:
qw.QMessageBox.information(self, "Save", "You have no data to save")
return
file_name, _ = qw.QFileDialog.getSaveFileName(
self,
self.tr("Save File"),
os.path.expanduser('~'),
self.tr("CSV (*.csv)"))
if file_name is not None and file_name != '':
data = []
for region in self._regions:
data.append([region.top, region.bottom, region.left, region.right])
with open(file_name, 'w', newline='') as file:
writer = csv.writer(file)
header = ["top y", "bottom y", "left x", "right x"]
writer.writerow([self._project])
writer.writerow(header)
writer.writerows(data)
@qc.pyqtSlot()
def print_table(self):
"""
callback for printing the table as pdf
"""
file_name, _ = qw.QFileDialog.getSaveFileName(self,
self.tr("Save Pdf"),
os.path.expanduser('~'),
self.tr("PDF (*.pdf)"))
if file_name is None or file_name == '':
return
printer = qp.QPrinter(qp.QPrinter.PrinterResolution)
printer.setOutputFormat(qp.QPrinter.PdfFormat)
printer.setPaperSize(qp.QPrinter.A4)
printer.setOutputFileName(file_name)
doc = qg.QTextDocument()
html_string = self._results_widget.get_table_as_html()
doc.setHtml(html_string)
doc.print(printer)
@qc.pyqtSlot()
def save_image(self):
"""
callback for saving the current image
"""
file_name, _ = qw.QFileDialog.getSaveFileName(self,
self.tr("Save PNG"),
os.path.expanduser('~'),
self.tr("PNG (*.png)"))
if file_name is None or file_name == '':
return
pixmap = self._drawing_widget.get_current_pixmap()
if pixmap is not None:
pixmap.save(file_name)
@qc.pyqtSlot()
def load_image(self):
"""
callback for loading an image
"""
file_name, _ = qw.QFileDialog.getOpenFileName(self,
"Read Results File",
os.path.expanduser('~'),
"PNG (*.png);; JPEG (*.jpg)")
path = pathlib.Path(file_name)
if file_name is not None and file_name != '':
reply = qw.QInputDialog.getText(self,
"Project Name",
"Proj Name",
qw.QLineEdit.Normal,
path.stem)
if not reply[1]:
return
if reply[0] != '':
self._project = reply[0]
else:
self._project = file_name
self._image = qg.QImage(file_name)
self._drawing_widget.display_image(self._image)
self.setWindowTitle(self._project)
def get_regions(self):
"""
getter for the regions list
"""
return self._regions
def autosave(self):
"""
autosave the data, creating a new file if necessary
"""
if self._autosave is None:
self.make_autosave()
self._autosave.save_data(self._regions)
|
from flask import *
from database import *
import json
import time, datetime
import thermocontrol
import switch
app = Flask(__name__)
@app.route('/')
def hello_world():
#return "test"
return render_template('index.html', is_pid_on=thermocontrol.is_pid_on(), setpoint=thermocontrol.setpoint())
@app.route('/plot')
def plot():
temps = Event.select().where(Event.event_type==Event.PID_TEMPERATURE).order_by(Event.timestamp.asc())
temp_objs = {
'key':'Temperature',
'values': [],
'color': '#E45B53',
}
h_objs = {
'key':'Heating',
'values': [],
'bar':True,
'color':'#c9d2e2',
}
for t in temps:
ts = datetime.datetime(year = t.timestamp.year, month=t.timestamp.month, day=t.timestamp.day, hour=t.timestamp.hour, minute=t.timestamp.minute, second=t.timestamp.second)
timestamp = int(time.mktime(ts.timetuple()))
temperature = t.param_3
temp_objs['values'].append([timestamp*1000, temperature])
heats = Event.select().where(Event.event_type==Event.PID_HEATING).order_by(Event.timestamp.asc())
for t in heats:
ts = datetime.datetime(year = t.timestamp.year, month=t.timestamp.month, day=t.timestamp.day, hour=t.timestamp.hour, minute=t.timestamp.minute, second=t.timestamp.second)
timestamp = int(time.mktime(ts.timetuple()))
state = int(t.param_3)
h_objs['values'].append([timestamp*1000, state])
return json.dumps([h_objs, temp_objs])
return "test"
pass
@app.route('/temp')
def temp():
temp = Event.select().where(Event.event_type==Event.TEMPERATURE).limit(1).order_by(Event.timestamp.desc())[0].param_3
return str(temp)
@app.route('/static/<path:filename>')
def static_files(filename):
return send_from_directory('static',filename)
@app.route('/turn_pid_on')
def turn_pid_on():
thermocontrol.on();
#time.sleep()
thermocontrol.pid_on()
return "1"
@app.route('/turn_pid_off')
def turn_pid_off():
thermocontrol.off();
thermocontrol.pid_off()
switch.off()
return "1"
@app.route('/set_setpoint', methods=['POST'])
def set_setpoint():
setpoint = float(request.values['setpoint'].replace(',','.'))
return thermocontrol.setpoint(setpoint)
if __name__ == '__main__':
turn_pid_off()
app.run('0.0.0.0', 5000, debug=True) |
from distutils.core import setup
INSTALL_REQUIRES = [
"pandas",
"numpy",
]
setup(
name="sudoku",
version="1.0",
author="Hendrik Scherner",
packages=["sudoku"],
install_requires=INSTALL_REQUIRES,
)
|
import sqlite3
import yaml
import os
class Database:
def __init__(self, dbconfig):
dbfile = os.path.join(os.path.dirname(__file__), '..', '..', dbconfig.get("db_file"))
self.db = self.create_or_open_db(dbfile)
self.db.row_factory = self.dict_factory
def dict_factory(self, cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def create_or_open_db(self, db_file):
db_is_new = not os.path.exists(db_file)
conn = sqlite3.connect(db_file, check_same_thread=False)
if db_is_new:
sql = '''create table if not exists red(
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT UNIQUE,
url TEXT,
date_added TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
active BOOL DEFAULT true);'''
conn.execute(sql)
return conn
def get_redirections(self):
cursor = self.db.cursor()
cursor.execute ('''SELECT id, domain, url, date_added, active, status, message from red order by id desc''')
rows = []
for row in cursor:
rows.append({
"id": row["id"],
"active": row["active"],
"domain": row["domain"],
"date_added": row["date_added"].split(" ")[0],
"message": row["message"],
"status": row["status"],
"url": row["url"]
})
return sorted(rows, key=lambda k: k['status'], reverse=True)
def get_redirection(self, id):
cursor = self.db.cursor()
cursor.execute ('''SELECT id, domain, url, date_added, active from red where id=?''', (id, ))
return cursor.fetchone()
def get_redirection_by_domain(self, domain):
cursor = self.db.cursor()
cursor.execute ('''SELECT id, domain, url, date_added, active from red where domain=?''', (domain, ))
return cursor.fetchone()
def add_redirection(self, red):
cursor = self.db.cursor()
self.db.execute ('''INSERT INTO red(domain, url, status, message) VALUES(?, ?, ?, ?)''', (red['domain'], red['url'], red['status'], red['message']))
self.db.commit()
return self.get_redirection(cursor.lastrowid)
def update_redirection(self, id, redirection):
self.db.execute ('''UPDATE red SET url=? WHERE id=?''', (redirection, id, ))
self.db.commit()
def update_status(self, id, status, message):
self.db.execute ('''UPDATE red SET status=?, message=? WHERE id=?''', (status, message, id, ))
self.db.commit()
def del_redirection(self, id):
self.db.execute ('''DELETE FROM red WHERE id=?''', (id, ))
self.db.commit()
def del_redirection_by_domain(self, domain):
self.db.execute ('''DELETE FROM red WHERE domain=?''', (domain, ))
self.db.commit()
|
import numpy as np
from typing import Union
from .base import BaseWidget
from .utils import get_unit_colors
from .unit_locations import UnitLocationsWidget
from .unit_waveforms import UnitWaveformsWidget
from .unit_waveforms_density_map import UnitWaveformDensityMapWidget
from .autocorrelograms import AutoCorrelogramsWidget
from .amplitudes import AmplitudesWidget
class UnitSummaryWidget(BaseWidget):
"""
Plot a unit summary.
If amplitudes are alreday computed they are displayed.
Parameters
----------
waveform_extractor: WaveformExtractor
The waveform extractor object
unit_id: into or str
The unit id to plot the summary of
unit_colors : dict or None
If given, a dictionary with unit ids as keys and colors as values
sparsity : ChannelSparsity or None
Optional ChannelSparsity to apply.
If WaveformExtractor is already sparse, the argument is ignored
"""
possible_backends = {}
def __init__(self, waveform_extractor, unit_id, unit_colors=None,
sparsity=None, radius_um=100, backend=None, **backend_kwargs):
we = waveform_extractor
if unit_colors is None:
unit_colors = get_unit_colors(we.sorting)
if we.is_extension('unit_locations'):
plot_data_unit_locations = UnitLocationsWidget(we, unit_ids=[unit_id],
unit_colors=unit_colors, plot_legend=False).plot_data
unit_locations = waveform_extractor.load_extension("unit_locations").get_data(outputs="by_unit")
unit_location = unit_locations[unit_id]
else:
plot_data_unit_locations = None
unit_location = None
plot_data_waveforms = UnitWaveformsWidget(we, unit_ids=[unit_id], unit_colors=unit_colors,
plot_templates=True, same_axis=True, plot_legend=False,
sparsity=sparsity).plot_data
plot_data_waveform_density = UnitWaveformDensityMapWidget(we, unit_ids=[unit_id], unit_colors=unit_colors,
use_max_channel=True, plot_templates=True,
same_axis=False).plot_data
if we.is_extension('correlograms'):
plot_data_acc = AutoCorrelogramsWidget(we, unit_ids=[unit_id], unit_colors=unit_colors,).plot_data
else:
plot_data_acc = None
# use other widget to plot data
if we.is_extension('spike_amplitudes'):
plot_data_amplitudes = AmplitudesWidget(we, unit_ids=[unit_id], unit_colors=unit_colors,
plot_legend=False, plot_histograms=True).plot_data
else:
plot_data_amplitudes = None
plot_data = dict(
unit_id=unit_id,
unit_location=unit_location,
plot_data_unit_locations=plot_data_unit_locations,
plot_data_waveforms=plot_data_waveforms,
plot_data_waveform_density=plot_data_waveform_density,
plot_data_acc=plot_data_acc,
plot_data_amplitudes=plot_data_amplitudes,
)
BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs)
|
import tkinter as tk
from tkinter import ttk
from tkinter import *
import bcrypt
from datetime import date
from datetime import timedelta
from GestionHabitaciones import *
import sqlite3
class RegistroHuesped:
valorEntry = ""
def Inicio(self,ventanaMenuPrincipal):
################################################ creaciòn de la ventana principal ###############################################################
self.ventana = tk.Toplevel(ventanaMenuPrincipal)
self.ventana.title("Registro")
self.ventana.geometry("800x600")
self.ventana.configure(background = "#181818")
self.ventana.resizable(0,0)
self.center(self.ventana)
self.ventana.transient(ventanaMenuPrincipal)
################################################ Creación de los botones ###############################################################
self.botonRegistro = tk.Button(self.ventana, text = "Registro de Huespedes", command=lambda: self.RegistroFront(self.ventana), background="#5FBD94", activebackground="#6BD8A9")
self.botonRegistro.place(x=300, y=50, width=200, height=100)
self.botonRegistro = tk.Button(self.ventana, text = "Lista de Huespedes", background="#5FBD94", activebackground="#6BD8A9", command=lambda: self.ListaHuespedes(self.ventana))
self.botonRegistro.place(x=300, y=175, width=200, height=100)
self.botonRegistro = tk.Button(self.ventana, text = "Check-Out", background="#5FBD94", activebackground="#6BD8A9", command=lambda: self.CheckOut(self.ventana))
self.botonRegistro.place(x=300, y=300, width=200, height=100)
self.botonRegistro = tk.Button(self.ventana, text = "Salir", command=lambda: self.Salir(ventanaMenuPrincipal), background="#D76458", activebackground="#FF7A6C")
self.botonRegistro.place(x=300, y=425, width=200, height=100)
self.ventana.mainloop()
def Salir(self, ventanaMenuPrincipal):
#ventanaMenuPrincipal.deiconify()
self.ventana.destroy()
################################################# creaciòn de la ventana de registro ###############################################################
def RegistroFront(self, ventana):
self.ventana2 = tk.Toplevel(ventana)
self.ventana2.title("Check-In")
self.ventana2.geometry("600x550")
self.ventana2.configure(background = "#181818")
self.ventana2.resizable(0,0)
self.center(self.ventana2)
self.ventana2.transient(ventana)
################################################## Nombre ##############################################################################################
self.labelNombre = tk.Label(self.ventana2, text = "Nombre: ")
self.labelNombre.grid(column = 0, row = 0, padx = 4, pady = 6)
self.labelNombre.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (Nombre)
self.nombreIngresado = tk.StringVar()
self.inputNombre = tk.Entry(self.ventana2, width = 20, textvariable = self.nombreIngresado)
self.inputNombre.grid(column = 1, row = 0)
################################################## Apellido #######################################################################################
self.labelApellido = tk.Label(self.ventana2, text = "Apellido: ")
self.labelApellido.grid(column = 0, row = 1, padx = 4, pady = 6)
self.labelApellido.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos(Apellido)
self.apellidoIngresado = tk.StringVar()
self.inputApellido = tk.Entry(self.ventana2, width = 20, textvariable = self.apellidoIngresado)
self.inputApellido.grid(column = 1, row = 1)
################################################### DNI ##############################################################################################
self.labelDni = tk.Label(self.ventana2, text = "DNI: ")
self.labelDni.grid(column = 0, row = 2, padx = 4, pady = 6)
self.labelDni.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (DNI)
self.dniIngresado = tk.StringVar()
self.inputDNI = tk.Entry(self.ventana2, width = 20, textvariable = self.dniIngresado)
self.inputDNI.grid(column = 1, row = 2)
################################################### Telefono ##############################################################################################
self.labelTel = tk.Label(self.ventana2, text = "Tel/Cel: ")
self.labelTel.grid(column = 0, row = 3, padx = 4, pady = 6)
self.labelTel.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos(Telefono)
self.telIngresado = tk.StringVar()
self.inputTel = tk.Entry(self.ventana2, width = 20, textvariable = self.telIngresado)
self.inputTel.grid(column = 1, row = 3)
##################################################### Email #####################################################################################
self.labelCorreo = tk.Label(self.ventana2, text = "Email: ")
self.labelCorreo.grid(column = 0, row = 4, padx = 4, pady = 6)
self.labelCorreo.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (Email)
self.correoIngresado = tk.StringVar()
self.inputCorreo = tk.Entry(self.ventana2, width = 20, textvariable = self.correoIngresado)
self.inputCorreo.grid(column = 1, row = 4)
##################################################### Domicilio #####################################################################################
self.labelDir = tk.Label(self.ventana2, text = "Dirección: ")
self.labelDir.grid(column = 0, row = 5, padx = 4, pady = 6)
self.labelDir.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (dirección)
self.dirIngresada = tk.StringVar()
self.inputDir = tk.Entry(self.ventana2, width = 20, textvariable = self.dirIngresada)
self.inputDir.grid(column = 1, row = 5)
###################################################### Fecha de Nacimiento ########################################################################
self.labelFecha = tk.Label(self.ventana2, text = "Fecha de Nacimiento: ")
self.labelFecha.grid(column = 0, row = 6, padx = 4, pady = 6)
self.labelFecha.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (fecha de nacimiento)
self.fechaIngresada = tk.StringVar()
self.inputFecha = tk.Entry(self.ventana2, width = 20, textvariable = self.fechaIngresada)
#self.inputFecha.insert(0, "dd/mm/aaaa")
#self.inputFecha.delete(0, tk.END)
self.inputFecha.grid(column = 1, row = 6)
###################################################### Nacionalidad ###############################################################################
self.labelNacion = tk.Label(self.ventana2, text = "Nacionalidad: ")
self.labelNacion.grid(column = 0, row = 7, padx = 4, pady = 6)
self.labelNacion.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (Nacionalidad)
self.inputNacion = ttk.Combobox(self.ventana2, width = 19, text = "Nacionalidad", state="readonly")
self.inputNacion["values"] = ["Afganistán", "Albania", "Alemania", "Andorra", "Angola", "Antigua y Barbuda", "Arabia Saudita", "Argelia", "Argentina",
"Armenia", "Australia", "Austria", "Azerbaiyán", "Bahamas", "Bangladés", "Barbados", "Baréin", "Bélgica", "Belice", "Benín",
"Bielorrusia", "Birmania", "Bolivia", "Bosnia y Herzegovina", "Botsuana", "Brasil", "Brunéi", "Bulgaria", "Burkina Faso", "Burundi",
"Bután", "Cabo Verde", "Camboya", "Camerún", "Canadá", "Catar", "Chad", "Chile", "China", "Chipre", "Ciudad del Vaticano", "Colombia",
"Comoras", "Corea del Norte", "Corea del Sur", "Costa de Marfil", "Costa Rica", "Croacia", "Cuba", "Dinamarca", "Dominica", "Ecuador",
"Egipto", "El Salvador", "Emiratos Árabes Unidos", "Eritrea", "Eslovaquia", "Eslovenia", "España", "Estados Unidos", "Estonia", "Etiopía",
"Filipinas", "Finlandia", "Fiyi", "Francia", "Gabón", "Gambia", "Georgia", "Ghana", "Granada", "Grecia", "Guatemala", "Guyana", "Guinea",
"Guinea ecuatorial", "Guinea-Bisáu", "Haití", "Honduras", "Hungría", "India", "Indonesia", "Irak", "Irán", "Irlanda", "Islandia", "Islas Marshall",
"Islas Salomón", "Israel", "Italia", "Jamaica", "Japón", "Jordania", "Kazajistán", "Kenia", "Kirguistán", "Kiribati", "Kuwait", "Laos", "Lesoto"
"Letonia", "Líbano", "Liberia", "Libia", "Liechtenstein", "Lituania", "Luxemburgo", "Macedonia del Norte", "Madagascar", "Malasia", "Malaui",
"Maldivas", "Malí", "Malta", "Marruecos", "Mauricio", "Mauritania", "México", "Micronesia", "Moldavia", "Mónaco", "Mongolia", "Montenegro", "Mozambique",
"Namibia", "Nauru", "Nepal", "Nicaragua", "Níger", "Nigeria", "Noruega", "Nueva Zelanda", "Omán", "Países Bajos", "Pakistán", "Palaos", "Panamá"
"Papúa Nueva Guinea", "Paraguay", "Perú", "Polonia", "Portugal", "Reino Unido", "República Centroafricana", "República Checa", "República del Congo"
"República Democrática del Congo", "República Dominicana", "Ruanda", "Rumanía", "Rusia", "Samoa", "San Cristóbal y Nieves", "San Marino", "San Vicente y las Granadinas",
"Santa Lucía", "Santo Tomé y Príncipe", "Senegal", "Serbia", "Seychelles", "Sierra Leona", "Singapur", "Siria", "Somalia", "Sri Lanka", "Suazilandia"
"Sudáfrica", "Sudán", "Sudán del Sur", "Suecia", "Suiza", "Surinam", "Tailandia", "Tanzania", "Tayikistán", "Timor Oriental", "Togo", "Tonga", "Trinidad y Tobago",
"Túnez", "Turkmenistán", "Turquía", "Tuvalu", "Ucrania", "Uganda", "Uruguay", "Uzbekistán", "Vanuatu", "Venezuela", "Vietnam", "Yemen", "Yibuti"
"Zambia", "Zimbabue", ]
self.inputNacion.grid(column = 1, row = 7)
self.inputNacion.set("Nacionalidad")
#print(str(self.nacionIngresada))
####################################################### Forma de Pago ########################################################################################
self.labelPago = tk.Label(self.ventana2, text = "Forma de Pago: ")
self.labelPago.grid(column = 0, row = 8, padx = 4, pady = 6)
self.labelPago.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (Forma de pago)
self.inputFDP = ttk.Combobox(self.ventana2, width = 19, text = "Forma De Pago", state = "readonly")
self.inputFDP["values"] = ["Efectivo","Débito","Crédito","Depósito","Transferencia","Pago Online"]
#Si se elige otro, se tiene que cambiar el state del combobox para poder escribir la forma de pago
#o se tiene que habilitar un textfield para poder ingresar la forma de pago manualmente
self.inputFDP.grid(column = 1, row = 8)
self.inputFDP.set("Forma de Pago")
####################################################### Estadía ##############################################################################################
self.labelEstadia = tk.Label(self.ventana2, text = "Estadía (cantidad noches): ")
self.labelEstadia.grid(column = 0, row = 9, padx = 4, pady = 6)
self.labelEstadia.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (Estadía)
self.estadiaIngresada = tk.StringVar()
self.estadiaIngresada.set(0)
self.inputEstadia = tk.Entry(self.ventana2, width = 20, textvariable = self.estadiaIngresada)
self.inputEstadia.grid(column = 1, row = 9)
##################################################### Patente ##############################################################################################
self.labelPatente = tk.Label(self.ventana2, text = "Patente: ")
self.labelPatente.grid(column = 0, row = 10, padx = 4, pady = 6)
self.labelPatente.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (Patente)
self.patenteIngresada = tk.StringVar()
self.inputPatente = tk.Entry(self.ventana2, width = 20, textvariable = self.patenteIngresada)
self.inputPatente.grid(column = 1, row = 10)
###################################################### Check-In ##############################################################################################
self.labelChkIn = tk.Label(self.ventana2, text = "Check-In")
self.labelChkIn.grid(column = 0, row = 11, padx = 4, pady = 6)
self.labelChkIn.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (Check-in)
self.fechaDeIngreso = tk.StringVar()
self.inputFIngreso = tk.Entry(self.ventana2, width = 20, textvariable = self.fechaDeIngreso, state = "readonly")
self.inputFIngreso.grid(column = 1, row = 11)
self.ahora = date.today()
self.fechaDeIngreso.set(self.ahora)
###################################################### Check-Out ##############################################################################################
self.labelChkOut = tk.Label(self.ventana2, text = "Check-Out")
self.labelChkOut.grid(column = 0, row = 12, padx = 4, pady = 6)
self.labelChkOut.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos (Check-Out)
self.fechaDeSalida = tk.StringVar()
self.inputFDeSalida = tk.Entry(self.ventana2, width = 20, textvariable = self.fechaDeSalida, state = "readonly")
self.inputFDeSalida.grid(column = 1, row = 12)
###################################################### HABITACIÓN ##############################################################################################
self.labelHab = tk.Label(self.ventana2, text = "Habitación asignada")
self.labelHab.grid(column = 0, row = 13, padx = 4, pady = 6)
self.labelHab.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso de datos
self.habEntry = tk.StringVar()
self.habEntry.set(0)
self.inputHab = tk.Entry(self.ventana2, width = 20, textvariable = self.habEntry, state = "readonly")
self.inputHab.grid(column = 1, row = 13)
###################################################### BOTONES VENTANA 2 ######################################################################################
#Botones
self.botonValidar = tk.Button(self.ventana2, text = "Check-In", command=lambda:self.logicaRegistro(), background="#5FBD94", activebackground="#6BD8A9")
self.botonValidar.place(x = 475, y = 495, width = 80, height = 45)
self.botonCerrar = tk.Button(self.ventana2, text = "Volver", command=lambda:self.Volver(self.ventana2), background="#D76458", activebackground="#FF7A6C")
self.botonCerrar.place(x = 370, y = 495, width = 80, height = 45)
self.botonVerificar = tk.Button(self.ventana2, text = "Verificar Hab.", command=lambda:self.VerifHabitacion(), background="#C1C1C1", activebackground="#DADADA")
self.botonVerificar.place(x = 198, y = 495, width = 120, height = 45)
################################################## ETIQUETAS DE LOS ERRORES ####################################################################
#ERROR EN EL NOMBRE
self.errorLabelNombre = tk.Label(self.ventana2, text=" ")
self.errorLabelNombre.grid(column=2, row=0)
self.errorLabelNombre.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN EL APELLIDO
self.errorLabelApellido = tk.Label(self.ventana2, text = " ")
self.errorLabelApellido.grid(column = 2, row = 1)
self.errorLabelApellido.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN EL DNI
self.errorLabelDNI = tk.Label(self.ventana2, text = " ")
self.errorLabelDNI.grid(column = 2, row = 2)
self.errorLabelDNI.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN EL TELÉFONO
self.errorLabelTel = tk.Label(self.ventana2, text = " ")
self.errorLabelTel.grid(column = 2, row = 3)
self.errorLabelTel.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN EL EMAIL
self.errorLabelEmail = tk.Label(self.ventana2, text = " ")
self.errorLabelEmail.grid(column = 2, row = 4)
self.errorLabelEmail.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN EL DOMICILIO
self.errorLabelDomicilio = tk.Label(self.ventana2, text = " ")
self.errorLabelDomicilio.grid(column = 2, row = 5)
self.errorLabelDomicilio.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN LA FECHA DE NACIMIENTO
self.errorLabelFechaNac = tk.Label(self.ventana2, text = " ")
self.errorLabelFechaNac.grid(column = 2, row = 6)
self.errorLabelFechaNac.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN LA NACIONALIDAD
self.errorLabelNacionalidad = tk.Label(self.ventana2, text = " ")
self.errorLabelNacionalidad.grid(column = 2, row = 7)
self.errorLabelNacionalidad.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN LA FORMA DE PAGO
self.errorLabelFDP = tk.Label(self.ventana2, text = " ")
self.errorLabelFDP.grid(column = 2, row = 8)
self.errorLabelFDP.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN LA ESTADIA
self.errorLabelEstadia = tk.Label(self.ventana2, text = " ")
self.errorLabelEstadia.grid(column = 2, row = 9)
self.errorLabelEstadia.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN LA PATENTE
self.errorLabelPatente = tk.Label(self.ventana2, text = " ")
self.errorLabelPatente.grid(column = 2, row = 10)
self.errorLabelPatente.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN LA FECHA DEL CHECK-IN
self.errorLabelIngreso = tk.Label(self.ventana2, text = " ")
self.errorLabelIngreso.grid(column = 2, row = 11)
self.errorLabelIngreso.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN LA FECHA DEL CHECK-OUT
self.errorLabelSalida = tk.Label(self.ventana2, text = " ")
self.errorLabelSalida.grid(column = 2, row = 12)
self.errorLabelSalida.configure(foreground="red", background = "#181818", font=('times 11 italic'))
#ERROR EN LA HABITACIÓN SELECCIONADA
self.errorLabelHab = tk.Label(self.ventana2, text = " ")
self.errorLabelHab.grid(column = 2, row = 13)
self.errorLabelHab.configure(foreground="red", background = "#181818", font=('times 11 italic'))
################################################### LÓGICA DE LA VENTANA REGISTRO ###############################################################################
def logicaRegistro(self):
#Variables para realizar las validaciones
self.condicion = 0
self.habitacion = int(self.habEntry.get())
self.nombre = self.nombreIngresado.get()
self.apellido = self.apellidoIngresado.get()
self.dni = self.dniIngresado.get()
#self.largo = len(self.dni)
self.tel = self.telIngresado.get()
#self.cantDigitos = len(self.tel)
self.email = self.correoIngresado.get()
self.direccion = self.dirIngresada.get()
self.fecha = self.fechaIngresada.get()
self.nacionIngresada = self.inputNacion.get()
self.fdpIngresada = self.inputFDP.get()
self.estadia = self.estadiaIngresada.get()
self.patente = self.patenteIngresada.get()
self.ingreso = self.fechaDeIngreso.get()
self.salida = self.fechaDeSalida.get()
#################################################### verificar nombre ##########################################################################
self.nombreValido = True
if(len(self.nombre) <= 0):
self.nombreValido = False
else:
for i in self.nombre:
if(i.isalpha() or i==" "):
self.nombreValido = True
else:
self.nombreValido = False
break
pass
if(self.nombreValido == False):
self.condicion = self.condicion - 1
self.errorLabelNombre['text'] = "Nombre ingresado no válido"
else:
self.condicion = self.condicion + 1
self.errorLabelNombre['text'] = " "
#################################################### Verificar Apellido ##########################################################################
self.apellidoValido = True
if(len(self.apellido) <= 0):
self.apellidoValido = False
else:
for i in self.apellido:
if(i.isalpha()):
self.apellidoValido = True
else:
self.apellidoValido = False
break
pass
if(self.apellidoValido == False):
self.condicion = self.condicion - 1
self.errorLabelApellido['text'] = "Apellido ingresado no válido"
else:
self.condicion = self.condicion + 1
self.errorLabelApellido['text'] = " "
#################################################### Verificar DNI ##########################################################################
self.dniValido = True
#Primero valido el largo sin importar de qué tipo sean los caracteres
if(len(self.dni) > 8 or len(self.dni) < 7):
self.dniValido = False
else:
#Si el largo es correcto me fijo que sean todos números
for i in self.dni:
if(i.isdigit()):
self.dniValido = True
else:
self.dniValido = False
break
pass
if(self.dniValido == False):
self.condicion = self.condicion - 1
self.errorLabelDNI['text'] = "DNI ingresado no válido"
else:
self.condicion = self.condicion + 1
self.errorLabelDNI['text'] = " "
#################################################### Verificar teléfono ##########################################################################
self.telefonoValido = True
if(len(self.tel) != 10 or len(self.tel) <= 0):
self.telefonoValido = False
else:
for i in self.tel:
if(i.isdigit()):
self.telefonoValido = True
else:
self.telefonoValido = False
break
pass
if(self.telefonoValido == False):
self.condicion = self.condicion - 1
self.errorLabelTel['text'] = "Teléfono ingresado no válido"
else:
self.condicion = self.condicion + 1
self.errorLabelTel['text'] = " "
##################################################### Verificar Correo ##########################################################################
emailValido = True
largoEmail = len(self.email)
if(largoEmail == 0):
emailValido = False
else:
for i in self.email:
if(i.isalpha() and i.islower()):
emailValido = True
elif(i.isdigit()):
emailValido = True
elif(i.isalpha() == False or i.isdigit() == False):
if(i != "@" and i != "." and i != "-" and i != "_"):
emailValido = False
break
pass
if(emailValido == False):
self.condicion = self.condicion - 1
self.errorLabelEmail['text'] = "Correo ingresado no válido"
else:
self.condicion = self.condicion + 1
self.errorLabelEmail['text'] = " "
###################################################### Verificar Domicilio ##########################################################################
esValido = False
for i in self.direccion:
if(i.isalpha()):
esValido = True
elif(i.isdigit):
esValido = True
elif(i.isalpha()== False or i.isdigit() == False):
esValido = False
break
else:
esValido = False
break
pass
if(esValido == False):
self.condicion = self.condicion - 1
self.errorLabelDomicilio['text'] = "Domicilio ingresado no válido"
else:
self.condicion = self.condicion + 1
self.errorLabelDomicilio['text'] = " "
##################################################### Verificar Fecha Nacimiento #####################################################################
self.dia = 0
self.mes = 0
self.anio = 0
fechaValida = True
for i in self.fecha:
if(i.isdigit() == False):
if(i != "/"):
fechaValida = False
break
else:
self.dia = self.fecha[0:2]
self.mes = self.fecha[3:5]
self.anio = self.fecha[6:10]
print(self.dia)
print(self.mes)
print(self.anio)
self.enteroDia = int(self.dia)
self.enteroMes = int(self.mes)
self.enteroAnio = int(self.anio)
self.fechaActual = date.today()
self.fechaActualTupla = self.fechaActual.timetuple()
self.anioActual = self.fechaActualTupla.tm_year
#print(self.fechaActualTupla.tm_year)
if(self.enteroAnio < (self.anioActual - 100) or self.enteroAnio > self.anioActual):
fechaValida = False
elif(self.enteroMes < 1 or self.enteroMes > 12):
fechaValida = False
elif(self.enteroDia < 1 or self.enteroDia > 31):
fechaValida = False
if((self.enteroMes == 1 or self.enteroMes == 3 or self.enteroMes == 5 or self.enteroMes == 7 or self.enteroMes == 8
or self.enteroMes == 10 or self.enteroMes == 12) and (self.enteroDia > 31)):
print("El mes ingresado no contiene esa cantidad de días 31+")
fechaValida = False
elif((self.enteroMes == 4 or self.enteroMes == 6 or self.enteroMes == 9 or self.enteroMes == 11) and (self.enteroDia > 30)):
print("El mes ingresado no contiene esa cantidad de días 30+")
fechaValida = False
#si el mes ingresado es febrero
elif(self.enteroMes == 2):
#me fijo si el año es bisiesto
if((self.enteroAnio % 4 == 0) or ((self.enteroAnio % 100 != 0) and (self.enteroAnio % 400 == 0))):
#si es bisiesto febrero no puede tener más de 29 días
if(self.enteroDia > 29):
print("Febrero tiene 29 dìas bisiesto")
fechaValida = False
#Si el año no es bisiesto
else:
#febrero no puede tener más de 28 dias
if(self.enteroDia > 28):
print("Febrero tiene 28 dìas No bisiesto")
fechaValida = False
if(fechaValida == False):
self.condicion = self.condicion - 1
self.errorLabelFechaNac['text'] = "Fecha ingresada no válida"
else:
self.condicion = self.condicion + 1
self.errorLabelFechaNac['text'] = " "
###################################################### Verificar Nacionalidad ##########################################################################
if(self.nacionIngresada == "Nacionalidad"):
self.condicion = self.condicion - 1
self.errorLabelNacionalidad['text'] = "Seleccione una opción"
else:
self.condicion = self.condicion + 1
self.errorLabelNacionalidad['text'] = " "
###################################################### Verificar Forma de Pago ##########################################################################
if(self.fdpIngresada == "Forma de Pago"):
self.condicion = self.condicion - 1
self.errorLabelFDP['text'] = "Seleccione una opción"
else:
self.condicion = self.condicion + 1
self.errorLabelFDP['text'] = " "
##################################################### Verificar Estadía(cant noches) ##########################################################################
estadiaValida = False
for i in self.estadia:
if(i.isdigit()== True):
estadiaValida = True
else:
estadiaValida = False
pass
if(int(self.estadia) == 0):
estadiaValida = False
if(estadiaValida == False):
self.condicion = self.condicion - 1
self.errorLabelEstadia['text'] = "Estadía ingresada no válido"
else:
self.condicion = self.condicion + 1
self.errorLabelEstadia['text'] = " "
####################################################### Verificar Patente ##########################################################################
patenteValida = False
if(len(self.patente) <= 0):
patenteValida = False
else:
for i in self.patente:
if(i.isalpha() == True):
patenteValida = True
elif(i.isdigit() == True):
patenteValida = True
else:
patenteValida = False
pass
if(patenteValida == False):
self.condicion = self.condicion - 1
self.errorLabelPatente['text'] = "Patente ingresada no válida"
else:
self.condicion = self.condicion + 1
self.errorLabelPatente['text'] = " "
######################################################## Verificar Fecha Ingreso ##########################################################################
self.condicion = self.condicion + 1
######################################################## Verificar Fecha Salida ##########################################################################
if(len(self.salida) <= 0):
self.errorLabelSalida['text'] = "La fecha ingresada no es válida"
else:
self.errorLabelSalida['text'] = " "
self.condicion = self.condicion + 1
#print("Condicion final: ")
#print(self.condicion)
######################################################## Verificar Disponibilidad ##########################################################################
self.habitacionValida = True
if(self.habitacion <= 0 or self.habitacion > 18):
self.habitacionValida = False
else:
for i in str(self.habitacion):
if(i.isdigit()):
self.habitacionValida = True
else:
self.habitacionValida = False
break
pass
if(self.habitacionValida):
self.condicion = self.condicion + 1
self.errorLabelHab['text'] = "Se ha ingresado con éxito"
self.IngresoCliente()
else:
self.condicion = self.condicion - 1
self.errorLabelHab['text'] = "Habitación seleccionada no válida"
################################################## MÉTODOS ##########################################################################################
def VerifHabitacion(self):
self.dias = timedelta(days = int(self.estadiaIngresada.get()))
self.calcularSalida = self.ahora + self.dias
self.salida = self.calcularSalida
self.fechaDeSalida.set(self.calcularSalida)
gestion2 = GestionHabitaciones()
gestion2.FrontHome(self.ventana2, self.habEntry)
def Volver(self, ventana2):
self.ventana2.destroy()
def IngresoCliente(self):
print(self.condicion)
if(self.condicion==14):
self.conexion = sqlite3.connect("empleadosDB.db")
self.cursor = self.conexion.cursor()
self.cursor.execute("PRAGMA foreign_keys = 0")
self.conexion.commit()
self.cursor.execute("INSERT INTO clientes(id_habitacion,nombre,apellido,dni,telefono,email,domicilio,fechaNacimiento,nacionalidad,formaDePago,estadia,patente,checkIn,checkOut) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", (self.habitacion, self.nombre, self.apellido, self.dni, self.tel, self.email, self.direccion, self.fecha, self.nacionIngresada,self.fdpIngresada, self.estadia, self.patente, self.ingreso,self.salida))
self.conexion.commit()
self.cursor.execute("UPDATE habitaciones SET disponibilidad=1 WHERE id=?",(self.habitacion,))
self.conexion.commit()
self.cursor.execute("PRAGMA foreign_keys = 1")
self.conexion.commit()
self.conexion.close()
self.ventana2.destroy()
################################################# CREACION DE LA VENTANA LISTA DE HUESPEDES ###############################################################
def ListaHuespedes(self, ventana):
self.ventana3 = tk.Toplevel(ventana)
self.ventana3.title("Huéspedes")
self.ventana3.geometry("500x450")
self.ventana3.configure(background = "#181818")
self.center(self.ventana3)
self.ventana3.resizable(0,0)
""" NECESITO ESTABLECER UN CRITERIO DE BÚSQUEDA PARA ENTRAR A LA BASE DE DATOS Y TRAER LOS DATOS DEL CLIENTE
TENGO PENSADO PONER UN CUADRO DE BÚSQUEDA QUE CONSULTE EL DNI A LA BASE DE DATOS Y TRAIGA TODOS LOS DATOS
DE ESE CLIENTE, AUNQUE CREO QUE SE PODRÍA ASIGNAR UN ID AL CLIENTE CUANDO SE LE ASIGNA LA HABITACIÓN TAMBIÉN"""
self.labelBusqueda = tk.Label(self.ventana3, text = "DNI: ")
self.labelBusqueda.grid(column = 0, row = 0, padx = 4, pady = 6)
self.labelBusqueda.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso del criterio de búsqueda, con el que se va a hacer la consulta a la base de datos
self.criterioBusqueda = tk.StringVar()
self.inputBusqueda = tk.Entry(self.ventana3, width = 30, textvariable = self.criterioBusqueda)
self.inputBusqueda.grid(column = 1, row = 0)
self.labelTituloMuestra = tk.Label(self.ventana3, text = "Resultados de la búsqueda")
self.labelTituloMuestra.grid(column = 1, row = 3, padx = 4, pady = 6)
#self.labelTituloMuestra.place(x = 175, y = 50)
self.labelTituloMuestra.config(bg="#181818" ,fg = "White", font = ("Chilanka",16))
################################################## Nombre ##############################################################################################
self.labelNombreMuestra = tk.Label(self.ventana3, text = "Nombre: ")
self.labelNombreMuestra.grid(column = 0, row = 4, padx = 50, pady = 6)
self.labelNombreMuestra.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Obtención de datos, acá tiene que hacer la consulta a la base de datos, traer la info y llenar los campos
self.nombreObtenido = tk.StringVar()
self.muestraNombre = tk.Entry(self.ventana3, width = 30, textvariable = self.nombreObtenido, state = "readonly")
self.muestraNombre.grid(column = 1, row = 4)
################################################## Apellido ##############################################################################################
self.labelApellidoMuestra = tk.Label(self.ventana3, text = "Apellidos: ")
self.labelApellidoMuestra.grid(column = 0, row = 5, padx = 50, pady = 6)
self.labelApellidoMuestra.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.apellidoObtenido = tk.StringVar()
self.muestraApellido = tk.Entry(self.ventana3, width = 30, textvariable = self.apellidoObtenido, state = "readonly")
self.muestraApellido.grid(column = 1, row = 5)
################################################## DNI ##############################################################################################
self.labelDNIMuestra = tk.Label(self.ventana3, text = "DNI: ")
self.labelDNIMuestra.grid(column = 0, row = 6, padx = 50, pady = 6)
self.labelDNIMuestra.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.dniObtenido = tk.StringVar()
self.muestraDNI = tk.Entry(self.ventana3, width = 30, textvariable = self.dniObtenido, state = "readonly")
self.muestraDNI.grid(column = 1, row = 6)
################################################## Telefono ##############################################################################################
self.labelTelMuestra = tk.Label(self.ventana3, text = "Teléfono: ")
self.labelTelMuestra.grid(column = 0, row = 7, padx = 50, pady = 6)
self.labelTelMuestra.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.telObtenido = tk.StringVar()
self.muestraTel = tk.Entry(self.ventana3, width = 30, textvariable = self.telObtenido, state = "readonly")
self.muestraTel.grid(column = 1, row = 7)
################################################## Nacionalidad ##############################################################################################
self.labelNacMuestra = tk.Label(self.ventana3, text = "Nacionalidad: ")
self.labelNacMuestra.grid(column = 0, row = 8, padx = 50, pady = 6)
self.labelNacMuestra.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.nacionalidadObtenida = tk.StringVar()
self.muestraNacion = tk.Entry(self.ventana3, width = 30, textvariable = self.nacionalidadObtenida, state = "readonly")
self.muestraNacion.grid(column = 1, row = 8)
################################################## Estadia ##############################################################################################
self.labelMuestraEstadia = tk.Label(self.ventana3, text = "Estadía: ")
self.labelMuestraEstadia.grid(column = 0, row = 9, padx = 50, pady = 6)
self.labelMuestraEstadia.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.estadiaObtenida = tk.StringVar()
self.muestraEstadia = tk.Entry(self.ventana3, width = 30, textvariable = self.estadiaObtenida, state = "readonly")
self.muestraEstadia.grid(column = 1, row = 9)
################################################## Check-in ##############################################################################################
self.labelMuestraIngreso = tk.Label(self.ventana3, text = "Check-in: ")
self.labelMuestraIngreso.grid(column = 0, row = 10, padx = 50, pady = 6)
self.labelMuestraIngreso.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.ingresoObtenido = tk.StringVar()
self.muestraIngreso = tk.Entry(self.ventana3, width = 30, textvariable = self.ingresoObtenido, state = "readonly")
self.muestraIngreso.grid(column = 1, row = 10)
################################################## Check-out ##############################################################################################
self.labelMuestraSalida = tk.Label(self.ventana3, text = "Check-out: ")
self.labelMuestraSalida.grid(column = 0, row = 11, padx = 50, pady = 6)
self.labelMuestraSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.salidaObtenida = tk.StringVar()
self.muestraSalida = tk.Entry(self.ventana3, width = 30, textvariable = self.salidaObtenida, state = "readonly")
self.muestraSalida.grid(column = 1, row = 11)
###################################################### BOTONES VENTANA 3 ######################################################################################
self.botonCerrar = tk.Button(self.ventana3, text = "Cerrar", command=lambda:self.CerrarLista(self.ventana3), background="#D76458", activebackground="#FF7A6C")
self.botonCerrar.place(x = 220, y = 400, width = 80, height = 45)
self.botonBuscar = tk.Button(self.ventana3, text = "Buscar", command=lambda:self.BusquedaCliente(), background="#D8D8D8", activebackground="#EAEDEC")
self.botonBuscar.grid(column = 1, row = 2, padx = 4, pady = 6)
################################################### LÓGICA DE LA VENTANA LISTA HUESPEDES ###############################################################################
def BusquedaCliente(self):
self.dni=self.criterioBusqueda.get()
self.conexion = sqlite3.connect("empleadosDB.db")
self.cursor = self.conexion.cursor()
self.cursor.execute("SELECT * FROM clientes WHERE dni=?",(self.dni,))
self.datosCliente=self.cursor.fetchone()
self.conexion.commit()
self.conexion.close()
self.nombreObtenido.set(self.datosCliente[2])
self.apellidoObtenido.set(self.datosCliente[3])
self.dniObtenido.set(self.datosCliente[4])
self.telObtenido.set(self.datosCliente[5])
self.nacionalidadObtenida.set(self.datosCliente[9])
self.estadiaObtenida.set(self.datosCliente[11])
self.ingresoObtenido.set(self.datosCliente[13])
self.salidaObtenida.set(self.datosCliente[14])
def CerrarLista(self, ventana3):
self.ventana3.destroy()
###################################################### CREACIÓN DE LA VENTANA DE CHECK OUT ######################################################################################
def CheckOut(self, ventana):
self.ventana4 = tk.Toplevel(ventana)
self.ventana4.title("Check-Out")
self.ventana4.geometry("500x600")
self.ventana4.configure(background = "#181818")
self.center(self.ventana4)
self.ventana4.resizable(0,0)
self.lblBusquedaOut = tk.Label(self.ventana4, text = "Búsqueda: ")
self.lblBusquedaOut.grid(column = 0, row = 0, padx = 4, pady = 6)
self.lblBusquedaOut.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#Ingreso del criterio de búsqueda, con el que se va a hacer la consulta a la base de datos
self.entradaBusqueda = tk.StringVar()
self.inputBusquedaEntrada = tk.Entry(self.ventana4, width = 30, textvariable = self.entradaBusqueda)
self.inputBusquedaEntrada.grid(column = 1, row = 0)
self.labelTituloMuestra = tk.Label(self.ventana4, text = "Resultados de la búsqueda")
self.labelTituloMuestra.grid(column = 1, row = 3)
#self.labelTituloMuestra.place(x = 175, y = 50)
self.labelTituloMuestra.config(bg= "#181818", fg = "White", font = ("Chilanka",16))
################################################## Nombre ##############################################################################################
self.lblNombreSalida = tk.Label(self.ventana4, text = "Nombre: ")
self.lblNombreSalida.grid(column = 0, row = 4, padx = 25, pady = 6)
self.lblNombreSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.nombreSalida = tk.StringVar()
self.muestraNombreSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.nombreSalida, state = "readonly")
self.muestraNombreSalida.grid(column = 1, row = 4)
################################################## Apellido ##############################################################################################
self.lblApellidoSalida = tk.Label(self.ventana4, text = "Apellido")
self.lblApellidoSalida.grid(column = 0, row = 5, padx = 25, pady = 6)
self.lblApellidoSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.apellidoSalida = tk.StringVar()
self.muestraApellidoSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.apellidoSalida, state = "readonly")
self.muestraApellidoSalida.grid(column = 1, row = 5)
################################################## DNI ##############################################################################################
self.lblDniSalida = tk.Label(self.ventana4, text = "DNI: ")
self.lblDniSalida.grid(column = 0, row = 6, padx = 25, pady = 6)
self.lblDniSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.dniSalida = tk.StringVar()
self.muestraDNISalida = tk.Entry(self.ventana4, width = 30, textvariable = self.dniSalida, state = "readonly")
self.muestraDNISalida.grid(column = 1, row = 6)
################################################## Telefono ##############################################################################################
self.lblTelSalida = tk.Label(self.ventana4, text = "Teléfono: ")
self.lblTelSalida.grid(column = 0, row = 7, padx = 25, pady = 6)
self.lblTelSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.telSalida = tk.StringVar()
self.muestraTelSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.telSalida, state = "readonly")
self.muestraTelSalida.grid(column = 1, row = 7)
################################################## Email ##############################################################################################
self.lblEmailSalida = tk.Label(self.ventana4, text = "Email: ")
self.lblEmailSalida.grid(column = 0, row = 8, padx = 25, pady = 6)
self.lblEmailSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.emailSalida = tk.StringVar()
self.muestraEmailSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.emailSalida, state = "readonly")
self.muestraEmailSalida.grid(column = 1, row = 8)
################################################## Domicilio ##############################################################################################
self.lblDomicilioSalida = tk.Label(self.ventana4, text = "Domicilio: ")
self.lblDomicilioSalida.grid(column = 0, row = 9, padx = 25, pady = 6)
self.lblDomicilioSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.dirSalida = tk.StringVar()
self.muestraDirSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.dirSalida, state = "readonly")
self.muestraDirSalida.grid(column = 1, row = 9)
################################################## Fecha Nacimiento ##############################################################################################
self.lblFechaNacSalida = tk.Label(self.ventana4, text = "Fecha de Nacimiento: ")
self.lblFechaNacSalida.grid(column = 0, row = 10, padx = 25, pady = 6)
self.lblFechaNacSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.fechaNacSalida = tk.StringVar()
self.muestraFechaNacSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.fechaNacSalida, state = "readonly")
self.muestraFechaNacSalida.grid(column = 1, row = 10)
################################################## Nacionalidad ##############################################################################################
self.lblNacionalidadSalida = tk.Label(self.ventana4, text = "Nacionalidad: ")
self.lblNacionalidadSalida.grid(column = 0, row = 11, padx = 25, pady = 6)
self.lblNacionalidadSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.nacionSalida = tk.StringVar()
self.muestraNacionSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.nacionSalida, state = "readonly")
self.muestraNacionSalida.grid(column = 1, row = 11)
################################################## Forma de Pago ##############################################################################################
self.lblFDPSalida = tk.Label(self.ventana4, text = "Forma de Pago: ")
self.lblFDPSalida.grid(column = 0, row = 12, padx = 25, pady = 6)
self.lblFDPSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.formaSalida = tk.StringVar()
self.muestraFormaSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.formaSalida, state = "readonly")
self.muestraFormaSalida.grid(column = 1, row = 12)
################################################## Estadia ##############################################################################################
self.lblEstadiaSalida = tk.Label(self.ventana4, text = "Estadía: ")
self.lblEstadiaSalida.grid(column = 0, row = 13, padx = 25, pady = 6)
self.lblEstadiaSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.estSalida = tk.StringVar()
self.muestraEstSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.estSalida, state = "readonly")
self.muestraEstSalida.grid(column = 1, row = 13)
################################################## Patente ##############################################################################################
self.lblPatenteSalida = tk.Label(self.ventana4, text = "Patente: ")
self.lblPatenteSalida.grid(column = 0, row = 14, padx = 25, pady = 6)
self.lblPatenteSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.patSalida = tk.StringVar()
self.muestraPatSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.patSalida, state = "readonly")
self.muestraPatSalida.grid(column = 1, row = 14)
################################################## Chechk-in ##############################################################################################
self.lblChinSalida = tk.Label(self.ventana4, text = "Check-in: ")
self.lblChinSalida.grid(column = 0, row = 15, padx = 25, pady = 6)
self.lblChinSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.chinSalida = tk.StringVar()
self.muestraChinSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.chinSalida, state = "readonly")
self.muestraChinSalida.grid(column = 1, row = 15)
################################################## Check-out ##############################################################################################
self.lblCoutSalida = tk.Label(self.ventana4, text = "Check-out: ")
self.lblCoutSalida.grid(column = 0, row = 16, padx = 25, pady = 6)
self.lblCoutSalida.configure(foreground = "White", background = "#181818", font=('times 11 italic'))
#
self.coutSalida = tk.StringVar()
self.muestraCoutSalida = tk.Entry(self.ventana4, width = 30, textvariable = self.coutSalida, state = "readonly")
self.muestraCoutSalida.grid(column = 1, row = 16)
################################################## BOTONES VENTANA 4 CHK-OUT ##############################################################################################
self.botonValidar = tk.Button(self.ventana4, text = "Confirmar", command=lambda: self.EliminarCliente(), background="#5FBD94", activebackground="#6BD8A9")
self.botonValidar.place(x = 275, y = 550, width = 80, height = 45)
self.botonCerrar = tk.Button(self.ventana4, text = "Cerrar", command=lambda:self.CerrarCheckOut(self.ventana4), background="#D76458", activebackground="#FF7A6C")
self.botonCerrar.place(x = 175, y = 550, width = 80, height = 45)
self.botonBusqueda = tk.Button(self.ventana4, text = "Buscar", command=lambda:self.Busqueda(), background="#D8D8D8", activebackground="#EAEDEC")
self.botonBusqueda.grid(column = 1, row = 2, padx = 4, pady = 6)
################################################### LÓGICA DE LA VENTANA CHECK-OUT ###############################################################################
def EliminarCliente(self):
self.dni=self.entradaBusqueda.get()
self.conexion = sqlite3.connect("empleadosDB.db")
self.cursor = self.conexion.cursor()
self.cursor.execute("PRAGMA foreign_keys = 0")
self.conexion.commit()
self.cursor.execute("DELETE FROM clientes WHERE dni=?",(self.dni,))
self.conexion.commit()
self.cursor.execute("UPDATE habitaciones SET disponibilidad=0 WHERE id=?",(self.datosCliente[1],))
self.conexion.commit()
self.cursor.execute("UPDATE estacionamientos SET id_Cliente=null, ocupado=0 WHERE id_cliente=?",(self.datosCliente[0],))
self.conexion.commit()
self.cursor.execute("PRAGMA foreign_keys = 1")
self.conexion.commit()
self.conexion.close()
self.ventana4.destroy()
def Busqueda(self):
self.dni=self.entradaBusqueda.get()
self.conexion = sqlite3.connect("empleadosDB.db")
self.cursor = self.conexion.cursor()
self.cursor.execute("SELECT * FROM clientes WHERE dni=?",(self.dni,))
self.datosCliente=self.cursor.fetchone()
self.conexion.commit()
self.conexion.close()
print(self.datosCliente)
self.nombreSalida.set(self.datosCliente[2])
self.apellidoSalida.set(self.datosCliente[3])
self.dniSalida.set(self.datosCliente[4])
self.telSalida.set(self.datosCliente[5])
self.emailSalida.set(self.datosCliente[6])
self.dirSalida.set(self.datosCliente[7])
self.fechaNacSalida.set(self.datosCliente[8])
self.nacionSalida.set(self.datosCliente[9])
self.formaSalida.set(self.datosCliente[10])
self.estSalida.set(self.datosCliente[11])
self.patSalida.set(self.datosCliente[12])
self.chinSalida.set(self.datosCliente[13])
self.coutSalida.set(self.datosCliente[14])
def CerrarCheckOut(self, ventana4):
self.ventana4.destroy()
def center(self,win):
win.update_idletasks()
width = win.winfo_width()
height = win.winfo_height()
x = (win.winfo_screenwidth() // 2) - (width // 2)
y = (win.winfo_screenheight() // 2) - (height // 2)
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
import subprocess
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
from moviepy.editor import *
from PIL import Image
cap = cv2.VideoCapture("ball3.mp4")
fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frameCount/fps
seg_count=2*int(duration)
Z = []
for i in range (0,seg_count):
cap = cv2.VideoCapture('segments3/'+str(i)+'.mp4')
cap.set(2, 0)
res, frame = cap.read()
#cv2.imshow('lol',frame)
dumy=np.array(100000*i)
histo = cv2.calcHist([frame],[0],None,[256],[0,256])
pushh=np.append(dumy,histo)
Z.append(pushh)
# convert to np.float32
Z = np.float32(Z)
klusters=5
criteria = (cv2.TERM_CRITERIA_MAX_ITER, 10, 1.000)
ret,label,center=cv2.kmeans(Z,klusters,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
labelmap=[]
for i in range (0,klusters):
tempo=[]
for j in range (0,seg_count):
if label[j]==i:
tempo.append(j)
labelmap.append(tempo)
labelmap.sort()
for i in range (0,klusters):
clips=[]
for j in range (0,len(labelmap[i])):
name='segments3/'+str(labelmap[i][j])+'.mp4'
clips.append(VideoFileClip(name))
final_clip = concatenate_videoclips(clips)
final_clip.write_videofile('CLUSTERS3/cluster'+str(i)+'.mp4')
|
from django.shortcuts import render
from .models import *
from rest_framework import viewsets,permissions
from .serializers import *
from rest_framework.pagination import LimitOffsetPagination,PageNumberPagination
from .pagination import PostPageNumberPagination
from rest_framework.filters import SearchFilter,OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
# вывод всех продуктов по категории
class PodushkiViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
permission_classes = [permissions.AllowAny, ]
queryset = Podushki.objects.all()
serializer_class = PodushkiSerializer
filter_backends = [DjangoFilterBackend,OrderingFilter,SearchFilter]
filter_fields = ['slug','brend','tkan','size','type','filler','top']
pagination_class = PostPageNumberPagination#PageNumberPagination #LimitOffsetPagination
class SearchAPIView(generics.ListCreateAPIView):
permission_classes = [permissions.AllowAny, ]
search_fields = ['brend__name','tkan__name','name']
filter_backends = (SearchFilter,)
queryset = Podushki.objects.all()
serializer_class = PodushkiSerializer
class PodushkiTcanViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.AllowAny, ]
queryset = Tkan.objects.all()
serializer_class = TkanSerializer
# filter_fields = ('slug',)
class PodushkiBrendViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.AllowAny, ]
queryset = Brend.objects.all()
serializer_class = BrendSerializer
class PodushkiSizeViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.AllowAny, ]
queryset = Size.objects.all()
serializer_class = SizeSerializer
class PodushkiTypeViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.AllowAny, ]
queryset = Type.objects.all()
serializer_class = SizeSerializer
class PodushkiFillerViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.AllowAny, ]
queryset = Filler.objects.all()
serializer_class = SizeSerializer
class GetPodushkiImageViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.AllowAny, ]
queryset = PodushkiImage.objects.all()
serializer_class = PodushkiImageSerializer
filter_fields = ('product',)
|
from flask import Blueprint
blueprint = Blueprint(
"locations_blueprint",
__name__,
url_prefix="/locations",
template_folder="templates",
static_folder="static",
)
|
#__author: "Jing Xu"
#date: 2018/1/25
import os,sys
from core import db_handler
from conf import settings
from core import logger
import json
import time
import random
def v_code():
code = ''
for i in range(5):
add_str = random.choice([str(random.randrange(10)),chr(random.randrange(65,91)),chr(random.randrange(97,123))])
code += add_str
return code
def acc_auth(account,password):
'''
account auth func
:param account: credit account number
:param password: credit card password
:return: if passed the authentication , retun the account object, otherwise ,return None
'''
db_path = db_handler.db_handler(settings.DATABASE)
account_file = "%s/%s.json" %(db_path,account)
print(account_file)
if os.path.isfile(account_file):
with open(account_file,'r') as f:
account_data = json.load(f)
if account_data['password'] == password:
exp_time_stamp = time.mktime(time.strptime(account_data['expire_date'], "%Y-%m-%d"))
if time.time() > exp_time_stamp:
print("\033[31;1mAccount [%s] has expired,please contact the back to get a new card!\033[0m" % account)
else: #passed the authentication
return account_data
else:
print("\033[31;1mAccount ID or password is incorrect!\033[0m")
else:
print("\033[31;1mAccount [%s] does not exist!\033[0m" % account)
def acc_login(user_data,log_obj):
'''
account login func
:user_data: user info data , only saves in memory
:return:
'''
retry_count = 0
while user_data['is_authenticated'] is not True and retry_count < 3 :
account = input("\033[32;1maccount:\033[0m").strip()
password = input("\033[32;1mpassword:\033[0m").strip()
vcode_flag = True
while vcode_flag:
verification_code = v_code()
print("\033[32;1m [%s] \033[0m" % verification_code)
icode = input("\033[31;1mverification_code(Enter [R] to refresh):\033[0m")
if icode != 'R' and icode != 'r':
vcode_flag = False
if icode == verification_code:
auth = acc_auth(account, password)
if auth: #not None means passed the authentication
user_data['is_authenticated'] = True
user_data['account_id'] = account
print("welcome")
return auth
retry_count +=1
else:
log_obj.error("account [%s] too many login attempts" % account)
exit()
def acc_logout(user_data, acc_data):
while user_data['is_authenticated'] is True:
key = input("\033[32;1mPress [y] to logout,[b] to back:\033[0m")
if key == "b" or key == "B":
return acc_data
elif key == "y" or key == "Y":
user_data['is_authenticated'] = False
user_data['account_id'] = None
acc_data = None
sys.exit()
|
#!env python3
# -*- coding: utf-8 -*-
from flask import Flask, render_template_string, request
app = Flask(__name__)
index_html = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body>
<h1>query</h1>
<ul>
<li><a href="/result?fruit_no=1">1番</a></li>
<li><a href="/result?fruit_no=2">2番</a></li>
<li><a href="/result?fruit_no=3">3番</a></li>
</ul>
<h1>form</h1>
<form method="get" action="/output">
<p>名前:<input type="text" name="name" size="40"></p>
<button type="submit">送信</button>
</form>
<h1>url_for</h1>
<ul>
<li><a href="/result/1/">1番</a></li>
<li><a href="/result/2/">2番</a></li>
<li><a href="/result/3/">3番</a></li>
</ul>
</body>
</html>
'''
result_html = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body>
<p><strong> {{ fruit }} </strong>が当たりました。</p>
</body>
</html>
'''
output_html = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
</head>
<body>
<h1>入力された名前は「{{ name }}」です</h1>
</body>
</html>
'''
@app.route("/")
def index():
return render_template_string(index_html)
@app.route("/result")
def result():
fruits = {'1': 'もも', '2': 'りんご', '3': 'みかん'}
fruit_no = request.args.get('fruit_no', '')
return render_template_string(result_html, fruit=fruits[fruit_no])
@app.route("/output")
def output():
your_name = request.args.get('name', '')
return render_template_string(output_html, name=your_name)
@app.route("/result/<fruit_no>/")
def result_with_url_for(fruit_no):
fruits = {'1': 'もも', '2': 'りんご', '3': 'みかん'}
return render_template_string(result_html,
fruit=fruits[fruit_no])
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 10:56:47 2018
@author: withheart
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train_data = pd.read_csv('input/train.csv')
test_data = pd.read_csv('input/test.csv')
print(train_data.head())
print(train_data.info())
train_data['Survived'].value_counts().plot.pie(autopct = '%1.2f%%')
def fill_embark_with_mean(data):
data.Embarked[data.Embarked.isnull()] = data.Embarked.dropna().mode().values
return data
def fill_cabin_with_unknow(data):
data['Cabin'] = data.Cabin.fillna('U0')
return data
def fill_age_with_rfr(data):
from sklearn.ensemble import RandomForestRegressor
age_df = data[['Age','Survived','Fare','Parch','SibSp','Pclass']]
age_df_notnull = age_df.loc[(data['Age'].notnull())]
age_df_isnull = age_df.loc[(data['Age'].isnull())]
X = age_df_notnull.values[:,1:]
Y = age_df_notnull.values[:,0]
rfr = RandomForestRegressor(n_estimators = 1000,n_jobs = -1)
rfr.fit(X,Y)
predictAges = rfr.predict(age_df_isnull.values[:,1:])
data.loc[data['Age'].isnull(),['Age']] = predictAges
return data
train_data = fill_embark_with_mean(train_data)
train_data = fill_cabin_with_unknow(train_data)
train_data = fill_age_with_rfr(train_data)
print("--------------------------")
print(train_data.info())
print(train_data.groupby(['Sex','Survived'])['Survived'].count())
#train_data[['Sex','Survived']].groupby(['Sex']).mean().plot.bar()
#train_data[['Pclass','Survived']].groupby(['Pclass']).mean().plot.bar()
print(train_data.groupby(['Pclass','Survived'])['Survived'].count())
print(train_data['Age'].describe()) |
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QAction
import re
import socket
import threading
import json
import sys
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1091, 588)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_3 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setSpacing(0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setObjectName("stackedWidget")
self.mainPage = QtWidgets.QWidget()
self.mainPage.setObjectName("mainPage")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.mainPage)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.mainPageContainer = QtWidgets.QWidget(self.mainPage)
self.mainPageContainer.setObjectName("mainPageContainer")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.mainPageContainer)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.mainPageTitle = QtWidgets.QLabel(self.mainPageContainer)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(24)
self.mainPageTitle.setFont(font)
self.mainPageTitle.setAlignment(QtCore.Qt.AlignCenter)
self.mainPageTitle.setObjectName("mainPageTitle")
self.verticalLayout_8.addWidget(self.mainPageTitle)
self.mainPageVerticalContainer = QtWidgets.QWidget(self.mainPageContainer)
self.mainPageVerticalContainer.setObjectName("mainPageVerticalContainer")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.mainPageVerticalContainer)
self.verticalLayout_9.setObjectName("verticalLayout_9")
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_9.addItem(spacerItem)
self.ipAddressContainer = QtWidgets.QHBoxLayout()
self.ipAddressContainer.setObjectName("ipAddressContainer")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.ipAddressContainer.addItem(spacerItem1)
self.ipAddressLbl = QtWidgets.QLabel(self.mainPageVerticalContainer)
self.ipAddressLbl.setObjectName("ipAddressLbl")
self.ipAddressContainer.addWidget(self.ipAddressLbl)
self.ipAddressField = QtWidgets.QLineEdit(self.mainPageVerticalContainer)
self.ipAddressField.setObjectName("ipAddressField")
self.ipAddressContainer.addWidget(self.ipAddressField)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.ipAddressContainer.addItem(spacerItem2)
self.verticalLayout_9.addLayout(self.ipAddressContainer)
self.usernameContainer = QtWidgets.QHBoxLayout()
self.usernameContainer.setObjectName("usernameContainer")
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.usernameContainer.addItem(spacerItem3)
self.usernameLbl = QtWidgets.QLabel(self.mainPageVerticalContainer)
self.usernameLbl.setObjectName("usernameLbl")
self.usernameContainer.addWidget(self.usernameLbl)
self.usernameField = QtWidgets.QLineEdit(self.mainPageVerticalContainer)
self.usernameField.setObjectName("usernameField")
self.usernameContainer.addWidget(self.usernameField)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.usernameContainer.addItem(spacerItem4)
self.verticalLayout_9.addLayout(self.usernameContainer)
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_9.addItem(spacerItem5)
self.connectBtnContainer = QtWidgets.QHBoxLayout()
self.connectBtnContainer.setObjectName("connectBtnContainer")
self.connectBtn = QtWidgets.QPushButton(self.mainPageVerticalContainer)
self.connectBtn.setMaximumSize(QtCore.QSize(450, 16777215))
self.connectBtn.setObjectName("connectBtn")
self.connectBtnContainer.addWidget(self.connectBtn)
self.verticalLayout_9.addLayout(self.connectBtnContainer)
spacerItem6 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_9.addItem(spacerItem6)
self.verticalLayout_8.addWidget(self.mainPageVerticalContainer)
self.verticalLayout_8.setStretch(0, 1)
self.verticalLayout_8.setStretch(1, 2)
self.verticalLayout_6.addWidget(self.mainPageContainer)
self.stackedWidget.addWidget(self.mainPage)
self.chatPage = QtWidgets.QWidget()
self.chatPage.setObjectName("chatPage")
self.hboxlayout = QtWidgets.QHBoxLayout(self.chatPage)
self.hboxlayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.hboxlayout.setContentsMargins(0, 0, 0, 0)
self.hboxlayout.setSpacing(0)
self.hboxlayout.setObjectName("hboxlayout")
self.chatWidget = QtWidgets.QWidget(self.chatPage)
self.chatWidget.setStyleSheet("background-color:white;")
self.chatWidget.setObjectName("chatWidget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.chatWidget)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.chatVerticalContainer = QtWidgets.QVBoxLayout()
self.chatVerticalContainer.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
self.chatVerticalContainer.setSpacing(0)
self.chatVerticalContainer.setObjectName("chatVerticalContainer")
self.messagesScrollArea = QtWidgets.QScrollArea(self.chatWidget)
self.messagesScrollArea.setStyleSheet("background:white;\n"
"border-color:white;\n"
"border:solid;")
self.messagesScrollArea.setWidgetResizable(True)
self.messagesScrollArea.setObjectName("messagesScrollArea")
self.messagesScrollAreaContent = QtWidgets.QWidget()
self.messagesScrollAreaContent.setGeometry(QtCore.QRect(0, 0, 871, 509))
self.messagesScrollAreaContent.setObjectName("messagesScrollAreaContent")
self.verticalLayout = QtWidgets.QVBoxLayout(self.messagesScrollAreaContent)
self.verticalLayout.setObjectName("verticalLayout")
self.messagesScrollArea.setWidget(self.messagesScrollAreaContent)
self.chatVerticalContainer.addWidget(self.messagesScrollArea)
self.chatFieldContainer = QtWidgets.QHBoxLayout()
self.chatFieldContainer.setSpacing(0)
self.chatFieldContainer.setObjectName("chatFieldContainer")
self.messageTextEdit = QtWidgets.QTextEdit(self.chatWidget)
self.messageTextEdit.setMaximumSize(QtCore.QSize(16777215, 75))
self.messageTextEdit.setStyleSheet("background-color:#eeeeee;\n"
"border: solid;\n"
"border-color: #eeeeee;\n"
"border-radius:5px;\n"
"border-width:3px;\n"
"margin-bottom:10px;\n"
"margin-right:20px;\n"
"margin-left:20px;\n"
"margin-top:20px;")
self.messageTextEdit.setObjectName("messageTextEdit")
self.chatFieldContainer.addWidget(self.messageTextEdit)
self.sendBtn = QtWidgets.QPushButton(self.chatWidget)
self.sendBtn.setMinimumSize(QtCore.QSize(70, 20))
self.sendBtn.setMaximumSize(QtCore.QSize(16000000, 55))
self.sendBtn.setStyleSheet("border:none;\n"
"background:#eeeeee;\n"
"margin-top:9px;")
self.sendBtn.setObjectName("sendBtn")
self.chatFieldContainer.addWidget(self.sendBtn)
spacerItem7 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.chatFieldContainer.addItem(spacerItem7)
self.chatVerticalContainer.addLayout(self.chatFieldContainer)
self.chatVerticalContainer.setStretch(0, 9)
self.chatVerticalContainer.setStretch(1, 1)
self.verticalLayout_3.addLayout(self.chatVerticalContainer)
self.hboxlayout.addWidget(self.chatWidget)
self.membersWidget = QtWidgets.QWidget(self.chatPage)
self.membersWidget.setStyleSheet("background-color:#00A5FF;")
self.membersWidget.setObjectName("membersWidget")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.membersWidget)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.membersContainer = QtWidgets.QVBoxLayout()
self.membersContainer.setObjectName("membersContainer")
self.membersLbl = QtWidgets.QLabel(self.membersWidget)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(16)
self.membersLbl.setFont(font)
self.membersLbl.setStyleSheet("color:white;")
self.membersLbl.setAlignment(QtCore.Qt.AlignCenter)
self.membersLbl.setObjectName("membersLbl")
self.membersContainer.addWidget(self.membersLbl)
self.membersListWidget = QtWidgets.QListWidget(self.membersWidget)
self.membersListWidget.setStyleSheet("border-color:#00A5FF;\n"
"border:solid;\n"
"color:white;")
self.membersListWidget.setObjectName("membersListWidget")
self.membersContainer.addWidget(self.membersListWidget)
spacerItem8 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.membersContainer.addItem(spacerItem8)
self.disconnectBtn = QtWidgets.QPushButton(self.membersWidget)
self.disconnectBtn.setMinimumSize(QtCore.QSize(0, 30))
self.disconnectBtn.setStyleSheet("border:none;\n"
"color:white;\n"
"background:#dd0000;")
self.disconnectBtn.setObjectName("disconnectBtn")
self.membersContainer.addWidget(self.disconnectBtn)
self.verticalLayout_5.addLayout(self.membersContainer)
self.hboxlayout.addWidget(self.membersWidget)
self.hboxlayout.setStretch(0, 4)
self.hboxlayout.setStretch(1, 1)
self.stackedWidget.addWidget(self.chatPage)
self.gridLayout_3.addWidget(self.stackedWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Chat App"))
self.mainPageTitle.setText(_translate("MainWindow", "Chat App"))
self.ipAddressLbl.setText(_translate("MainWindow", "IP Address:"))
self.usernameLbl.setText(_translate("MainWindow", "Username:"))
self.connectBtn.setText(_translate("MainWindow", "Connect!"))
self.sendBtn.setText(_translate("MainWindow", "Send!"))
self.membersLbl.setText(_translate("MainWindow", "Members (3):"))
self.disconnectBtn.setText(_translate("MainWindow", "Disconnect"))
class mainAppWindow(QMainWindow, Ui_MainWindow):
newMessage = QtCore.pyqtSignal(dict)
def __init__(self, parent=None):
super(mainAppWindow, self).__init__(parent)
self.setupUi(self)
self.sendBtn.clicked.connect(self.send_message)
self.disconnectBtn.clicked.connect(self.disconnect)
self.connectBtn.clicked.connect(self.connect)
self.newMessage.connect(self.new_msg)
self.client = socket.socket()
def new_msg(self, msg):
msg_text = msg["text"]
msg_sender = msg["sender"]
new_msg_lbl = QtWidgets.QLabel("msgLabel")
new_msg_lbl.setText(f" {msg_sender}:\n {msg_text}")
#TODO: try to find a bettter formula to calculate the size of the message box/label.
new_msg_lbl.setMinimumSize(len(msg_sender) * 10, 45)
new_msg_lbl.setMaximumSize(len(f" {msg_text}") * 5, len(f" {msg_text}") * 5)
new_msg_lbl.setStyleSheet("background:#eeeeee;")
self.verticalLayout.addWidget(new_msg_lbl)
def _client_connection(self, host, port, username):
self.client.send(str.encode(username))
while True:
try:
msg_size = int(self.client.recv(2048).decode("utf-8"))
msg = self.client.recv(msg_size)
msg_data = json.loads(msg.decode("utf-8"))
if (msg_type := msg_data["data_type"]) == "user_data":
users = msg_data["value"]
self.membersLbl.setText(f"Members ({len(users)}):")
self.membersListWidget.clear()
for user in users:
self.membersListWidget.addItem(user)
elif msg_type == "message_data":
sender = msg_data["sender"]
text = msg_data["value"]
#All of the spaces are just for a slight margin on the ui...
self.newMessage.emit({"text": text, "sender": sender})
except:
break
def connect(self):
#Address format ip:port
regex = r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?::[0-9]{1,4})?\b"
address = self.ipAddressField.text()
username = self.usernameField.text()
if address == "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("The address field is empty!")
msg.setWindowTitle("Error!")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
return
elif username == "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("The username field is empty!")
msg.setWindowTitle("Error!")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
return
if re.match(regex, address):
addressSplit = address.split(":")
host = addressSplit[0]
port = int(addressSplit[1])
self.client = socket.socket()
try:
self.client.connect((host, port))
#Load the chat screen.
self.stackedWidget.setCurrentIndex(1)
c_thread = threading.Thread(target=self._client_connection, args=(host, port, username,))
c_thread.start()
except Exception as e:
print(e)
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Failed to connect to this server!")
msg.setWindowTitle("Error!")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
return
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("This address is invalid, the format should be: ip:port.")
msg.setWindowTitle("Error!")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
def _disconnect_client(self):
disconnect_msg = "!disconnect"
d_msg = disconnect_msg.encode("utf-8")
d_msg_len = str(len(d_msg)).encode("utf-8")
try:
self.client.send(d_msg_len)
self.client.send(d_msg)
except ConnectionResetError:
#Meaning the server already closed the connection.
#Do nothing.
pass
def disconnect(self):
disconenct_msg_box = QMessageBox()
disconenct_msg_box.setText("Are you sure you want to disconnect from this server?")
disconenct_msg_box.setStandardButtons(QMessageBox.Yes | QMessageBox.Cancel)
disconenct_msg_box = disconenct_msg_box.exec_()
if disconenct_msg_box == QMessageBox.Yes:
#Disconnect
self._disconnect_client()
#Return to the home page.
self.stackedWidget.setCurrentIndex(0)
else:
pass
def send_message(self):
message = self.messageTextEdit.toPlainText()
if message != "":
msg = message.encode("utf-8")
msg_len = str(len(msg)).encode("utf-8")
#Send the amount of data first
try:
self.client.send(msg_len)
#Then send the actual data
self.client.send(msg)
except ConnectionResetError:
connectionWarning = QMessageBox()
connectionWarning.setText("Lost connection to the server.")
connectionWarning.setIcon(QMessageBox.Critical)
connectionWarning.setStandardButtons(QMessageBox.Ok)
#Waiting for the user to click ok.
connectionWarning = connectionWarning.exec_()
self.stackedWidget.setCurrentIndex(0)
self.messageTextEdit.clear()
else:
pass
def closeEvent(self, event):
close = QMessageBox()
close.setText("Are you sure you want to exit?")
close.setStandardButtons(QMessageBox.Yes | QMessageBox.Cancel)
close = close.exec_()
if close == QMessageBox.Yes:
try:
self._disconnect_client()
event.accept()
except OSError:
#Meaning the client is not connected.
pass
else:
event.ignore()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
MainWindow = mainAppWindow()
MainWindow.show()
sys.exit(app.exec_()) |
from sklearn import tree, svm
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.externals import joblib
treeClassifier = tree.DecisionTreeClassifier()
linearSVC = LinearSVC()
svc = svm.SVC()
titles = ['front_bumber','rear_bumper','left_side','right_side','fl_lights','fr_lights','rl_lights','rr_lights','front_windshield','rear_windshield','roof','fire']
#Spaced every 10 for counting purposes
#Damage: 0 = no , 1 = medium, 2 = high
X = [
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
[2, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 2, 0, 0, 0, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 0, 0, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 0, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0],
[1, 0, 0, 0, 2, 2, 0, 0, 1, 1, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0],
]
#Spaced every 5 for counting purposes~
Y = [
'write-off', 'write-off', 'write-off', 'write-off', 'write-off',
'write-off', 'write-off', 'write-off', 'write-off', 'write-off',
'write-off', 'write-off', 'write-off', 'write-off', 'write-off',
'write-off', 'write-off', 'write-off', 'write-off', 'write-off',
'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off',
'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off',
'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off',
'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off',
'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off',
'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off', 'not-write-off',
]
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.4, random_state=1)
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
svc = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
# This is the Server-side code to decide which classifier is best
# Use the linearSVC_model as it is the best. Tested for different values of C and Gamma, and changing training set size.
treeClassifier_model = treeClassifier.fit(X_train, y_train)
svc_model = svc.fit(X_train,y_train)
linearSVC_model = linearSVC.fit(X_train, y_train)
print 'TreeClassifier:'
print treeClassifier_model.score(X_test, y_test)
print 'SVC:'
print svc_model.score(X_test, y_test)
print svc_model.best_params_
print '-------------'
print 'LinearSVC'
print linearSVC_model.score(X_test, y_test)
#reconstruction
y_pred = svc.predict(X_test)
print(classification_report(y_test, y_pred, target_names=['not-write-off','write-off']))
# API has only 1 get method which uses the LinearSVC classifier variable to call treeClassifier_model.predict(input from website)
print treeClassifier_model.predict([[0,0,0,0,0,0,0,0,0,1,0,0]])
#Save model to file
joblib.dump(treeClassifier_model, 'treeClassifier_model.pkl')
#Load and predict
predict()
|
#!/usr/bin/env python3
""" Manipulation of CNF formulae and satisfying assignments """
import os.path
import subprocess
def sign(x):
return 0 if x == 0 else int(x / abs(x))
class FormatError(IOError):
""" Failure to parse a CNF/SAT file """
def __init__(self, message):
super().__init__(message)
class SAT(object):
""" A (CNF) formula and a satisfying assignment for a SAT problem """
def __read_formula(self, cnf_file):
self.comments = []
self.formula = []
self.width = 0
try:
for line in open(cnf_file):
if line.startswith('c '):
self.comments.append(line[2:])
elif line.startswith('p '):
p, cnf, nv, nc = line.split()
self.num_variables = int(nv)
self.num_clauses = int(nc)
assert p == "p" and cnf == "cnf"
assert self.num_variables > 0 and self.num_clauses > 0
else: # formula clause
clause = list(map(int, line.split()))
assert clause.pop() == 0
for literal in clause:
assert abs(literal) <= self.num_variables
self.width = max(self.width, len(clause))
self.formula.append(clause)
except:
raise FormatError("Invalid CNF file")
def __read_solution(self, sat_file):
self.solution = {}
file = open(sat_file)
try:
result = file.readline()
if result == 'UNSAT\n' or result == 'INDET\n':
self.solution = None
return
assert result == 'SAT\n'
assignment = [int(lit) for lit in file.readline().split()]
assert assignment.pop() == 0
for literal in assignment:
self.solution[abs(literal)] = sign(literal)
except:
raise FormatError("Invalid SAT file")
def __init__(self, instance,
command='source ~/.bashrc\nminisat {} {}',
solution=True):
""" Process a formula from a file in DIMACS CNF format """
cnf_file = instance + '.cnf'
sat_file = instance + '.sat'
self.__read_formula(cnf_file)
if solution:
if not os.path.exists(sat_file):
subprocess.call([command.format(cnf_file, sat_file)], shell=True)
self.__read_solution(sat_file)
@staticmethod
def is_horn(clause):
return len([lit for lit in clause if lit > 0]) <= 1
@staticmethod
def is_antihorn(clause):
return len([lit for lit in clause if lit < 0]) <= 1
|
from google.appengine.api.modules import (
get_current_module_name, get_current_version_name
)
from module_a import Entity
def deferred_create_entity(entity_id):
Entity(id=entity_id, created_at_module=get_current_module_name(),
created_at_version=get_current_version_name()).put()
|
n1=float(input('1ª nota: '))
n2=float(input('2ª nota: '))
print(f'a media é: {(n1+n2)/2}')
|
from api.repositories.repos import Repos
class Github():
def __init__(self, **kwargs):
self.api_root_url = "http://api.github.com"
self.repos = Repos(self.api_root_url, **kwargs)
if __name__ == '__main__':
r = Github(token="de4260ff9e5a21e64973cd55c693154c3b56a837")
x = r.repos.list_your_repos()
print(x.text)
r = Github(username="lizhi03", password="LZwshh182")
x = r.repos.list_your_repos()
print(x.text)
|
""" Integrates the Chameleon template language.
This is basically a copy of more.chameleon, with the additional inclusion of a
gettext translation function defined by :mod:`onegov.core.i18n`.
To use a chameleon template, applications have to specify the templates
directory, in addition to inheriting from
:class:`onegov.core.framework.Framework`.
For example::
from onegov.core.framework import Framework
class App(Framework):
pass
@App.template_directory()
def get_template_directory():
return 'templates'
@App.path()
class Root:
pass
@App.html(model=Root, template='index.pt')
def view_root(self, request):
return {
'title': 'The Title'
}
The folder can either be a directory relative to the app class or an absolute
path.
"""
import os.path
from chameleon import PageTemplate as PageTemplateBase
from chameleon import PageTemplateFile as PageTemplateFileBase
from chameleon import PageTemplateLoader
from chameleon import PageTextTemplateFile
from chameleon.astutil import Builtin
from chameleon.tal import RepeatDict
from chameleon.utils import Scope
from functools import cached_property
from onegov.core.framework import Framework
from typing import Any, TypeVar, TYPE_CHECKING
if TYPE_CHECKING:
from _typeshed import StrPath
from chameleon.zpt.template import Macro
from collections.abc import Callable, Iterable, Mapping
from .request import CoreRequest
_T = TypeVar('_T')
AUTO_RELOAD = os.environ.get('ONEGOV_DEVELOPMENT') == '1'
BOOLEAN_HTML_ATTRS = frozenset(
[
# List of Boolean attributes in HTML that should be rendered in
# minimized form (e.g. <img ismap> rather than <img ismap="">)
# From http://www.w3.org/TR/xhtml1/#guidelines (C.10)
"compact",
"nowrap",
"ismap",
"declare",
"noshade",
"checked",
"disabled",
"readonly",
"multiple",
"selected",
"noresize",
"defer",
]
)
class PageTemplate(PageTemplateBase):
def __init__(self, *args: Any, **kwargs: Any):
kwargs.setdefault('boolean_attributes', BOOLEAN_HTML_ATTRS)
super().__init__(*args, **kwargs)
class PageTemplateFile(PageTemplateFileBase):
def __init__(self, *args: Any, **kwargs: Any):
kwargs.setdefault('boolean_attributes', BOOLEAN_HTML_ATTRS)
super().__init__(*args, **kwargs)
def get_default_vars(
request: 'CoreRequest',
content: 'Mapping[str, Any]',
suppress_global_variables: bool = False
) -> dict[str, Any]:
default = {
'request': request,
'translate': request.get_translate(for_chameleon=True)
}
default.update(content)
if suppress_global_variables:
return default
else:
return request.app.config.templatevariables_registry.get_variables(
request, default)
class TemplateLoader(PageTemplateLoader):
""" Extends the default page template loader with the ability to
lookup macros in various folders.
"""
formats = {
'xml': PageTemplateFile,
'text': PageTextTemplateFile,
}
@cached_property
def macros(self) -> 'MacrosLookup':
return MacrosLookup(self.search_path, name='macros.pt')
@cached_property
def mail_macros(self) -> 'MacrosLookup':
return MacrosLookup(self.search_path, name='mail_macros.pt')
class MacrosLookup:
""" Takes a list of search paths and provides a lookup for macros.
This means that when a macro is access through this lookup, it will travel
up the search path of the template loader, to look for the macro and
return with the first match.
As a result, it is possible to have a macros.pt file in each search path
and have them act as if they were one file, with macros further up the
list of paths having precedence over the macros further down the path.
For example, given the search paths 'foo' and 'bar', foo/macros.pt could
define 'users' and 'page', while bar/macros.pt could define 'users' and
'site'. In the lookup this would result in 'users' and 'page' being loaded
loaded from foo and 'site' being loaded from bar.
"""
def __init__(
self,
search_paths: 'Iterable[StrPath]',
name: str = 'macros.pt'
):
paths = (os.path.join(base, name) for base in search_paths)
paths = (path for path in paths if os.path.isfile(path))
# map each macro name to a template
self.lookup = {
name: template
for template in (
PageTemplateFile(
path,
search_paths,
auto_reload=AUTO_RELOAD,
)
for path in reversed(list(paths))
)
for name in template.macros.names
}
def __getitem__(self, name: str) -> 'Macro':
# macro names in chameleon are normalized internally and we need
# to do the same to get the correct name in any case:
name = name.replace('-', '_')
return self.lookup[name].macros[name]
@Framework.template_loader(extension='.pt')
def get_template_loader(
template_directories: list[str],
settings: dict[str, Any]
) -> TemplateLoader:
""" Returns the Chameleon template loader for templates with the extension
``.pt``.
"""
return TemplateLoader(
template_directories,
default_extension='.pt',
prepend_relative_search_path=False,
auto_reload=AUTO_RELOAD,
)
@Framework.template_render(extension='.pt')
def get_chameleon_render(
loader: TemplateLoader,
name: str,
original_render: 'Callable[[str, CoreRequest], _T]'
) -> 'Callable[[dict[str, Any], CoreRequest], _T]':
""" Returns the Chameleon template renderer for the required template.
"""
template = loader.load(name, 'xml')
def render(content: dict[str, Any], request: 'CoreRequest') -> Any:
variables = get_default_vars(request, content)
return original_render(template.render(**variables), request)
return render
def render_template(
template: str,
request: 'CoreRequest',
content: dict[str, Any],
suppress_global_variables: bool = True
) -> str:
""" Renders the given template. Use this if you need to get the rendered
value directly. If oyu render a view, this is not needed!
By default, mail templates (templates strting with 'mail_') skip the
inclusion of global variables defined through the template_variables
directive.
"""
if suppress_global_variables == 'infer':
suppress_global_variables = template.startswith('mail_')
registry = request.app.config.template_engine_registry
page_template = registry._template_loaders['.pt'][template]
variables = get_default_vars(
request, content, suppress_global_variables=suppress_global_variables)
return page_template.render(**variables)
def render_macro(
macro: 'Macro',
request: 'CoreRequest',
content: dict[str, Any],
suppress_global_variables: bool = True
) -> str:
""" Renders a :class:`chameleon.zpt.template.Macro` like this::
layout.render_macro(layout.macros['my_macro'], **vars)
This code is basically a stripped down version of this:
`<https://github.com/malthe/chameleon/blob\
/257c9192fea4b158215ecc4f84e1249d4b088753/src/chameleon\
/zpt/template.py#L206>`_.
As such it doesn't treat chameleon like a black box and it will probably
fail one day in the future, if Chameleon is refactored. Our tests will
detect that though.
"""
if not hasattr(request, '_macro_variables'):
variables = get_default_vars(
request=request,
content={},
suppress_global_variables=suppress_global_variables
)
variables.setdefault('__translate', variables['translate'])
variables.setdefault('__convert', variables['translate'])
variables.setdefault('__decode', bytes.decode)
variables.setdefault('__on_error_handler', Builtin('str'))
variables.setdefault('target_language', None)
request._macro_variables = variables # type:ignore[attr-defined]
else:
variables = request._macro_variables.copy()
variables.update(content)
variables['repeat'] = RepeatDict({})
stream: list[str] = []
macro.include(stream, Scope(variables), {})
return ''.join(stream)
|
# -*- coding: utf-8 -*-
from menucontext import MenuContext
response.files.append(URL('static', 'css/last.css'))
menuadds = MenuContext(db)
response.menu += menuadds.menudocs()
response.menu += menuadds.menupags()
def read():
slug = request.args(1)
pid = int(request.args(0))
post_data = db(db.post.id == pid).select(
db.post.title,
db.post.body,
db.post.created_on,
db.post.is_active,
db.post.markup,
db.post.xurl,
)
markmin_extra = {'code_cpp':lambda text: CODE(text, language = 'cpp').xml(),
'code_java':lambda text: CODE(text, language = 'java').xml(),
'code_python':lambda text: CODE(text, language = 'python').xml(),
'code_html':lambda text: CODE(text, language = 'html').xml()}
if len(post_data) > 0:
for p in post_data:
date = p.created_on
response.title = p.title
response.subtitle = date
title = H1(A(p.title, _href = URL(c = 'post', f = 'read.html', args = [pid,slug])), _class = 'title ui-corner-all')
if p.is_active == False:
response.flash = 'Este post está actualmente desactivado.'
# generando el texto para postear a identi.ca
#dentmsg = str(p.title+' '+p.xurl)[:140]
post_meta = DIV(
UL(
#LI(A('permalink ',_href=URL(f='read',args=[slug,pid],extension='html'),_class='meta_link')),
LI(A('permalink ', _href = p.xurl, extension = 'html', _class = 'meta_link')),
LI(A('editar', _href= URL(c = 'gestor', f = 'index.html', args = ['post','edit','post',pid], user_signature=True), _class = 'meta_link')),
#A('editar ', _href = URL(c='',f = 'post', args = ['edit', pid], extension = 'html'), _class = 'meta_link')),
#LI(A('dent ',_href='http://identi.ca/index.php?action=newnotice&status_textarea=%s' % dentmsg,_class='meta_link',_taget='_new'),_class='dent'),
_class = 'sf-menu der'),
#SPAN(date, _class = 'izq'),
_class = 'post_meta')
#revisa el tipo de marcado del post
#y setea la forma correcta de rendirlo
if p.markup.name == 'markmin':
post_content = DIV(
MARKMIN(p.body, extra = markmin_extra),
_class = 'post_body'
)
elif p.markup.name == 'html':
post_content = DIV(
XML(p.body),
_class = 'post_body'
)
elif p.markup.name == 'template':
import gluon.template
try:
post_content = XML((gluon.template.render(p.body,context=globals())))
except:
post_content = 'asdf'
return dict(meta = post_meta, body = post_content, title = title, created_on = date, active = p.is_active)
else:
raise HTTP(404, 'No existe la publicación.<a href="%s">Ir a la Portada</a>' % URL(c = 'd2efault', f = 'index.html'))
@auth.requires_login()
def add():
if request.args(0) == 'edit' or 'post' and request.args(1):
pid = int(request.args(1))
response.title = 'Editando post'
else:
pid = ''
response.title = 'Nuevo post'
form = SQLFORM(db.post, pid, deletable = True, formstyle='divs')
#response.files.append(URL('static','js/fileuploader.js'))
#form.append(LOAD(c='widget',f='uploader.load'))
if form.process().accepted:
# genera url corta
#db.post[int(form.vars.id)]=dict(xurl=xurl(XML(FULL_URL+str(URL(f='read',args=[form.vars.id,IS_SLUG.urlify(form.vars.title)])))))
#redirect(URL(f = 'read', args = [IS_SLUG.urlify(form.vars.title), form.vars.id]))
session.flash = 'Señale ahora el bloque donde quiere presentar el post recién creado.'
redirect(URL(r = request, c = 'gestor', f = 'index', args = ['post', 'context.post', form.vars.id, 'new','context'], user_signature=True))
elif form.errors:
response.flash = 'err'
return dict(form = form)
|
# +JMJ+
# Paul A Maurais
# 2018
class Card:
"""A single card. Attributes: name (Ace, One, ... King), suit, value (1-10), rank (1-13), and dataDict (tuple of the other four attributes)"""
def __init__(self, name='', suit='N', value=0):
"""name (Ace, One, ... King), suit, value (1-10), rank (1-13). Assigns all four to the instance and an instance tuple"""
self.name = name
self.suit = suit
self.value = value
self.rank = self.assignRank()
self.dataDict = {"name": name, "suit": suit, "value": value, "rank": self.rank}
def assignRank(self):
"""returns a rank value. The rank differs from the value ie value of king is 10 and rank is 13. used to score runs"""
if self.value < 10:
return self.value
elif self.name == 'Ten':
return 10
elif self.name == 'Jack':
return 11
elif self.name == 'Queen':
return 12
else:
return 13
def __getitem__(self, key):
"""Indexing Scheme: 0-> name, 1-> suit, 2->value, 3->Rank """
return self.dataDict[key]
def __str__(self):
return str(self.dataDict['name']+' of '+self.dataDict['suit'])
# +JMJ+
|
"""
Python solution for Supervised Neural Network Problem.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def weight_init(shape):
"""
Function to initialize the weight matrix.
Arguments: shape: Shape of the weight matrix (W)
"""
mean = 0
std = 2/shape[1]
w = np.random.normal(mean,std,shape)
w = np.reshape(w,shape)
return w
def bias_init(shape):
"""
Function to initialize the bias matrix.
Arguments: shape: Shape of the bias matrix (b)
"""
b = np.zeros(shape)
return b
def sigmoid(val):
val = 1/ (1+ np.exp(-val))
return val
def softmax(val):
prob = np.exp(val)
prob = prob/np.sum(prob,axis=0)
return prob
def forward_pass(x,weights,bias):
"""
Function to compute the activations during forward pass through the NN.
Arguments:
x: The input matrix
weights: List containing the weight matrices.
bias: List containing the bias matrices.
"""
act = []
a = x
for i,w,b in zip(range(len(weights)),weights,bias):
z = np.matmul(w,a) + b
if i != (len(weights)-1):
a = sigmoid(z)
act.append(a)
else:
prob = softmax(z) #last layer. Softmax gives the final probability of the input belonging to all classes.
act.append(prob)
return act,np.round(prob) #np.round on prob gives the prediction for the input x as one hot encoded label.
def cost_calc(labels,prob):
"""
Using cross entropy cost
Arguments:
labels: The one-hot encoded label matrix corresponding to the input.
prob: The Probability matrix as obained by passing the input through the network.
"""
c = -np.trace(np.matmul(labels,np.log(prob)))
return c
def error_calc(labels,activations,weights):
"""
Function to calculate the error values at each layer. (in reverse order - last layer error is calculated first)
Arguments:
labels: One-hot encoded label matrix.
activations: List containing the activations matrices.
weights: List containing the weight matrices.
"""
errors = []
e_out = -(np.transpose(labels)-activations[-1])
errors.append(e_out)
e = e_out
j = len(activations)-2
while j>=0:
e = np.matmul(np.transpose(weights[j+1]),e)
e = np.multiply(e,np.multiply(activations[j],1-activations[j]))
errors.append(e)
j = j - 1
return errors
def grad_calc(errors,activations,x):
"""
Function to calculate gradients for the weights and bias matrices.
Arguments:
errors: List containing error matrices (in reverse order)
activations: List containing activation matrices.
x: The input matrix.
"""
w_grad = []
b_grad = []
for j in range(len(errors)):
if j == 0:
w_gr = np.matmul(errors[-1],np.transpose(x))
b_gr = np.sum(errors[-1],axis=1,keepdims=True)
else:
w_gr = np.matmul(errors[-(1+j)],np.transpose(activations[j-1]))
b_gr = np.sum(errors[-(1+j)],axis=1,keepdims=True)
w_grad.append(w_gr)
b_grad.append(b_gr)
return w_grad,b_grad
def reg_cost_calc(weights,decay):
"""
Function to calculate regularization cost.
Arguments:
weights: List containing weight matrices.
decay: The regularization constant.
"""
w_sum = 0
for k in range(len(weights)):
w_sum = w_sum + np.sum(weights[k]*weights[k])
return (decay/2)*w_sum
def back_prop(x,y,weights,bias,decay,lr_rate):
"""
Function to do back propogation - calculate updated weight and bias matrices
Arguments:
x: The input matrix.
y: The one hot encoded label matrix.
weights: List containing weight matrices.
bias: List containing bias matrices.
decay: Regularization constant.
lr_rate: Learning Rate for the NN.
"""
act,pred = forward_pass(np.transpose(x),weights,bias)
cost = cost_calc(y,act[-1])
errors = error_calc(y,act,weights)
w_grad,b_grad = grad_calc(errors,act,np.transpose(x))
reg_cost = reg_cost_calc(weights,decay)
no_of_ex = x.shape[0]
new_w = []
new_b = []
for w_old,b_old,w_gr,b_gr in zip(weights,bias,w_grad,b_grad):
w_n = w_old - lr_rate*((w_gr/no_of_ex) + decay*w_old)
b_n = b_old - lr_rate*(b_gr/no_of_ex)
new_w.append(w_n)
new_b.append(b_n)
#grad_check(x,y,weights,bias,w_grad)
return new_w,new_b,(cost+reg_cost),pred
def grad_check(x,y,weights,bias,grad):
"""
Function to do gradient checking
Arguments:
x: The input matrix.
y: The output matrix. (One-hot encoded)
weights: List containing weight matrices.
bias: List containing bias matrices.
grad: List containing gradient matrices.
"""
eps = pow(10,-4) #small number with which parameters will be updated
for layer in range(int(len(weights)/2)): #doing gradient checking for half the layers and half the neurons
for row in range(int(weights[layer].shape[0]/2)):
for col in range(int(weights[layer].shape[1]/2)):
theta_layer_pos = weights[layer].copy()
theta_layer_neg = weights[layer].copy()
theta_layer_pos[row,col] = theta_layer_pos[row,col] + eps
theta_layer_neg[row,col] = theta_layer_neg[row,col] - eps
theta_pos = weights[:]
theta_pos[layer] = theta_layer_pos
theta_neg = weights[:]
theta_neg[layer] = theta_layer_neg
act_pos,_ = forward_pass(np.transpose(x),theta_pos,bias)
act_neg,_ = forward_pass(np.transpose(x),theta_neg,bias)
cost_pos = cost_calc(y,act_pos[-1])
cost_neg = cost_calc(y,act_neg[-1])
grad_norm = (cost_pos - cost_neg)/(2*eps)
print (layer,row,col,grad_norm,grad[layer][row,col])
def dense_to_one_hot(labels,num_class):
"""
Convert dense labels to one hot encoded.
"""
num_labels = labels.shape[0]
index_offset = np.arange(num_labels)*num_class
labels_one_hot = np.zeros((num_labels,num_class))
labels_one_hot.flat[index_offset+labels.ravel()] = 1
return labels_one_hot
def acc(pred,lab):
"""
Function to calculate accuracy.
Arguments:
pred: The predicted matrix - one hot encoded
labels: The label matrix - one hot encoded
"""
return 100.0 * (np.sum(np.argmax(pred,1) == np.argmax(lab,1))/pred.shape[0])
data = pd.read_csv("mnist.csv")
data = np.array(data)
np.random.shuffle(data)
train_data = data[0:30000,1:]
train_label = data[0:30000,0]
test_data = data[30000:,1:]
test_label = data[30000:,0]
train_data[train_data>0] = 1
test_data[test_data>0] = 1
no_of_classes = 10
test_label = dense_to_one_hot(test_label,no_of_classes)
train_label = dense_to_one_hot(train_label,no_of_classes)
train_data = np.array(train_data)
train_label = np.array(train_label)
test_data = np.array(test_data)
test_label = np.array(test_label)
"""
network initialization
Input Layer - 784 neurons
Hidden Layer - 256 neurons
Output Layer - 10 neurons -- for MNIST classes
"""
ww= []
bb = []
s1 = 256,784
s2 = 256,1
w = weight_init(s1)
b = bias_init(s2)
ww.append(w)
bb.append(b)
s1 = 10,256
s2 = 10,1
w = weight_init(s1)
b = bias_init(s2)
ww.append(w)
bb.append(b)
decay = 0.0001
lr_rate = 0.05
cost_batches = []
batch_size = 64
max_bs_len = int(30000/batch_size)*batch_size
for epoch in range(5):
bs = 0
while bs<(max_bs_len)-batch_size:
x = train_data[bs:(bs+batch_size)]
y = train_label[bs:(bs+batch_size)]
ww_new,bb_new,batch_cost,pred = back_prop(x,y,ww,bb,decay,lr_rate)
ww = ww_new[:]
bb = bb_new[:]
cost_batches.append(batch_cost)
bs = bs+batch_size
x = test_data
y = test_label
_,test_pred = forward_pass(np.transpose(x),ww,bb)
acc_cv = acc(test_pred,np.transpose(y))
print ("accuracy = %r" %(acc_cv))
# plot the training cost
plt.plot(cost_batches, marker='o')
plt.xlabel('Iterations')
plt.ylabel('J(theta)')
|
#!/usr/bin/env python
## coding: UTF-8
# ros系のライブラリ
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Float32MultiArray
from sensor_msgs.msg import CameraInfo
# ros以外
import cv2
import numpy as np
import math
import dlib
from cv_bridge import CvBridge, CvBridgeError
import imutils
import os
import atexit
import time
from time import sleep
import datetime
from statistics import mean, median,variance,stdev
import sys
# 自作モジュール
import dlib_module as dm
class DlibEx:
def __init__(self):
here_path = os.path.dirname(__file__)
if here_path == "":
here_path = "."
self.predictor_path = here_path + "/shape_predictor_68_face_landmarks.dat"
self.face = dm.FaceDLib(self.predictor_path)
self._bridge = CvBridge()
self.detector = dlib.get_frontal_face_detector()
camera_info = rospy.wait_for_message("/kinect2/hd/camera_info", CameraInfo)
r = 1
self.width = int(camera_info.width * r)
self.height = int(camera_info.height * r)
self.MAR_THRESH = 0.70 # mouth aspect ratioの閾値(marの値がこの値以上になった場合口が開いていると判断する)
self.f = open(here_path + "/userdata/records/dlib_ex/dl" + str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) + ".txt", "a")
self.f.write("isOpen|MAR|score" + "\n")
self._image_sub = rospy.Subscriber('/kinect2/hd/image_color', Image, self.callback)
self.is_open_flag = 0
self.fps_array = [0, 5, 10, 15, 20, 25, 56, 52, 1, 100] # 初期値適当
self.start = time.time()
self.frame_no = 1
self.rect_array = []
# mouth_aspect_ratioを使用して口が動いているかを判定する
def mouth_motion_with_mar(self, mar, flag):
# 口が閉まっている場合カウントを1ずつ増やしていく
if mar < self.MAR_THRESH:
self.mouth_close_count += 1
# 口が開いている場合にカウントを0にする
else:
self.mouth_close_count = 0
print("mouth_close_count:", self.mouth_close_count)
# カウントが10以上の場合人が話していないと判断する
if self.mouth_close_count >= 10:
self.start_flag = 1 # 1度カウントが10を超えたらフラグを立てて(1にして)、以後はカウントが10より小さい場合に口が動いていると判定する
# print("話していません")
return False
else:
if self.start_flag == 1:
if self.speaking_flag == 0:
self.speaking_flag = 1
# print("話しています")
return True
else:
# print("話していません")
return False
def all_done(self):
self.f.close()
print("")
# print("==================================================")
# print("xy: " + str(min(self.rect_array)) + ", fps: " + str(self.fps) + ", image: " + str(self.image.shape))
# print("==================================================")
print("ex done!")
def debug(self, d):
print(d)
# ROSによって繰り返し呼び出される
def callback(self, data):
cv_img = self._bridge.imgmsg_to_cv2(data, 'bgr8')
# cv_img = cv2.resize(cv_img, (self.width, self.height))
cv_img = imutils.resize(cv_img, int(sys.argv[1]))
self.image = cv_img
img_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
# 画像の中から顔を検出
rects, scores, idx = self.detector.run(img_gray, 0, 0)
if len(scores) == 0:
s = ""
else:
s = str(scores[0])
# print ("rects: " + str(rects))
# mouth aspect ratio(口の開き具合の指標)を取得
mar = self.face.mouth_aspect_ratio(img_gray, rects)
# print ()
#
# # デバッグ用表示
display_image = self.face.face_shape_detector_display(cv_img, img_gray, rects, mar, self.MAR_THRESH)
if len(rects) != 0:
rect = rects[0]
cv2.rectangle(display_image, (rect.left(), rect.top()), (rect.right(), rect.bottom()), (255, 0, 0), 2)
self.rect_array.append(rect.left()-rect.right())
cv2.imshow('img', display_image)
k = cv2.waitKey(1)
if self.is_open_flag % 2 == 0:
d = "Close"
self.is_open = "FALSE"
else:
d = "OPEN"
if self.is_open_flag == 1:
self.is_open = "a"
if self.is_open_flag == 3:
self.is_open = "i"
if self.is_open_flag == 5:
self.is_open = "u"
if self.is_open_flag == 7:
self.is_open = "e"
if self.is_open_flag == 9:
self.is_open = "o"
# self.debug(d)
if k == 13: # エンター押したら
self.is_open_flag = self.is_open_flag + 1
self.f.write(str(self.is_open) + "|" + str(mar) + "|" + s + "\n")
self.fps = self.frame_no / (time.time() - self.start)
self.fps_array.append(self.fps)
self.fps_array.pop(0)
self.frame_no = self.frame_no + 1
v = variance(self.fps_array)
# print("v: " + str(v) + ", fps: " + str(self.fps))
# if v < 10 ** (-4):
# print("==================================================")
if __name__ == "__main__":
rospy.init_node('dlib_ex',anonymous=True)
de = DlibEx()
sleep(1)
atexit.register(de.all_done)
rospy.spin()
|
x = 1 # local variable
gv.x = 2 # global variable shared by all scripts
print x,' ',gv.x |
import math
import itertools
import numpy as np
n = int(input())
result = 0
list2 = list(range(1,n+1))
all = list(itertools.combinations_with_replacement(list2, 3))
for i in all:
if(i[0]==i[1] and i[0]==i[2]):
result += np.gcd.reduce(i)
elif (i[0]==i[1] or i[0]==i[2] or i[1]==i[2]):
result += np.gcd.reduce(i)*3
else:
result += np.gcd.reduce(i)*6
print(all)
print(result)
|
# This program recursively parses all C++ header and code file in a
# given directory and extract their depencencies on each other.
# The dependency info is stored in a text file in a format that is
# recognized by the "dot" program (GraphViz)
#
# Input : Path to src
# Output: .dot file
#
# Note: The program makes few assumptions which are not documented.
# However, the code is small and can be easily understood and modified
import os
import sys
if (len(sys.argv) < 3):
print 'Usage: genDot.py <src_dir> <dot_file_path>'
sys.exit(-1)
rootdir = sys.argv[1]
depGraphOutFile = sys.argv[2]
dGraph = {}
fileList = []
extList = ['.h', '.hxx', '.cpp', '.c', '.cxx', '.hpp', '.cc']
for root, subFolders, files in os.walk(rootdir):
if '.svn' in subFolders:
subFolders.remove('.svn')
for f in files:
ext = f[f.rfind('.'):]
if ext in extList:
fp = os.path.join(root, f)
fileList.append(fp)
#print fileList
#parse each file and build graph
line = []
for f in fileList:
fd = open( f, 'r')
lines = fd.readlines()
fd.close()
rn = f[f.rfind('/')+1:]
rn = rn.replace('.' , '_')
dGraph[rn] = []
for l in lines:
l = l.strip('\n')
l = l.strip('\r')
l = l.strip(' ')
if l.find('#include') == 0 and l.find('"') > 0:
if l.rfind('/') > 0:
cn = l[l.rfind('/')+1:].strip('"')
else:
cn = l[len('#include')+1:].strip(' ').strip('"')
cn = cn.replace('.','_')
dGraph[rn].append(cn)
#print dGraph
#dump graph to .DOT format
df = open(depGraphOutFile, "w")
df.write('digraph G {\n')
for k in dGraph:
nodes = dGraph[k]
for n in nodes:
df.write(k + ' -> ' + n + ';\n')
df.write('}\n')
df.close()
|
import textwrap
from .script import Script
class BuildScript(Script):
export = ('base', 'repo')
def __init__(self, container):
super().__init__()
self.container = container
for var in self.export:
self.append(f'{var}="{container.variable(var)}"')
self.append('''
mounts=()
umounts() {
for i in "${mounts[@]}"; do
umount $i
mounts=("${mounts[@]/$i}")
done
buildah unmount $ctr
trap - 0
}
trap umounts 0
ctr=$(buildah from $base)
mnt=$(buildah mount $ctr)
''')
def config(self, line):
self.append(f'buildah config {line} $ctr')
def _run(self, cmd, inject=False):
user = self.container.variable('username')
_cmd = textwrap.dedent(cmd)
if cmd.startswith('sudo '):
_cmd = _cmd[5:]
heredoc = False
for i in ('\n', '>', '<', '|', '&'):
if i in _cmd:
heredoc = True
break
if heredoc:
_cmd = ' '.join(['bash -eux <<__EOF\n', _cmd.strip(), '\n__EOF'])
if cmd.startswith('sudo '):
return f'buildah run --user root $ctr -- {_cmd}'
elif user and self.container.variable('user_created'):
return f'buildah run --user {user} $ctr -- {_cmd}'
else:
return f'buildah run $ctr -- {_cmd}'
def run(self, cmd):
self.append(self._run(cmd))
def copy(self, src, dst):
self.append(f'buildah copy $ctr {src} {dst}')
def mount(self, src, dst):
self.run('sudo mkdir -p ' + dst)
self.append('mkdir -p ' + src)
self.append(f'mount -o bind {src} $mnt{dst}')
self.append('mounts=("$mnt' + dst + '" "${mounts[@]}")')
|
import googlemaps
import requests
import json
import os
import numpy as np
from openpyxl import load_workbook
gmaps = googlemaps.Client(key='AIzaSyBxAxKmbEhLrO08SmCi9M_4r6w9Y7MOER4')
basic_url = 'https://maps.googleapis.com/maps/api'
mykey = 'key=AIzaSyBxAxKmbEhLrO08SmCi9M_4r6w9Y7MOER4'
def geocoding(address):
loc = 'address=' + address + '&'
geocode_url = basic_url + '/geocode/json?' + loc + mykey
r = requests.get(geocode_url)
r.json()
geocode_json = json.loads(r.text)
t = geocode_json['results']
lat = t[0]['geometry']['location']['lat']
lng = t[0]['geometry']['location']['lng']
coords = [lat,lng]
return coords
#%% get camera name and translate to lat&lng
camname = list() # store translatable camera location like '2+Ave+@+14+St,new+york,ny'
coords = np.zeros((56,2)) # store translated coordinates
os.chdir('C:\\Users\\Zhimiao\\Documents\\courses\\2-IVP\\project\\crawl\\1-10am')
for directories in os.listdir(os.getcwd()):
name = directories.replace(' ','+')+',new+york,ny'
camname.append(name)
# print(directories)
camname[44] = 'Broadway+@+46+St+South,new+york,ny' # original name includes invalid character
for i in range(len(camname)):
coords[i] = geocoding(camname[i])
#%%save camera address and coordinates
os.chdir('C:\\Users\\Zhimiao\\Documents\\courses\\2-IVP\\project\\visualization')
name2coord = load_workbook('database.xlsx')
name2coord.active = 1
sheet_translation = name2coord.active
for i in range(len(camname)):
sheet_translation.cell(row = i + 2, column = 1).value = camname[i]
sheet_translation.cell(row = i + 2, column = 2).value = coords[i][0]
sheet_translation.cell(row = i + 2, column = 3).value = coords[i][1]
name2coord.save('database.xlsx')
name2coord.save('translation.xlsx')
name2coord.close()
#%% mark in map to check if coordinates are correct
#marker = 'markers=size:tiny|'
#for i in range(56):
# latm = coords[i][0]
# lngm = coords[i][1]
# marker += str(latm) +','+ str(lngm) + '|'
#marker = marker[:-1]
#
#loc_para = 'center=40.753504,-73.980880&zoom=13&'
#map_para = 'size=800x400&scale=2&'
#
#map_parameters = loc_para + map_para + marker + '&'
#
#staticmap_url = basic_url + '/staticmap?' + map_parameters
#
#r = requests.get(staticmap_url[:-1])
##os.chdir('C:\\Users\\Zhimiao\\Documents\\courses\\2-IVP\\project\\visualization')
#f = open('direct translated coords' + '.png','wb')
#f.write(r.content)
#f.close()
# fail to check in the markers map!
#%% write camera location and coordinate to the database
os.chdir('C:\\Users\\Zhimiao\\Documents\\courses\\2-IVP\\project\\visualization')
wb_database = load_workbook('database.xlsx')
wb_database.active = 2
sheet_database = wb_database.active
#
#wb2 = load_workbook('new_database.xlsx')
#wb2.active = 1
#sheet2 = wb2.active
for i in range(len(camname)):
sheet_database.cell(row = i + 2, column = 4).value = camname[i]
sheet_database.cell(row = i + 2, column = 5).value = coords[i][0]
sheet_database.cell(row = i + 2, column = 6).value = coords[i][1]
wb_database.save('database.xlsx')
wb_database.save('database - database.xlsx')
wb_database.close()
#%%
#def search_location(camname,camadd,sheet_raw,sheet_database):
# tcamname = str(camname).replace(' ','+')
# index = tcamname.find('St')
# if tcamname[:index] == str(camadd)[:index]:
# lat = sheet2.cell(row = i + 2, column = 5).value
# lng = sheet2.cell(row = i + 2, column = 6).value
## lat1 = sheet2.cell(row = i + 2, column = 9).value
## lng1 = sheet2.cell(row = i + 2, column = 10).value
## lat2 = sheet2.cell(row = i + 2, column = 11).value
## lng2 = sheet2.cell(row = i + 2, column = 12).value
# return lat,lng,lat1,lng1,lat2,lng2
#%% match raw data to camera address, and write number of cars to the database
wb_rawdata = load_workbook('database.xlsx')
wb_rawdata.active = 0
sheet_rawdata = wb_rawdata.active
wb_database = load_workbook('database.xlsx')
wb_database.active = 2
sheet_database = wb_database.active
for j in range(3):
for i in range(43):
name = sheet_rawdata.cell(row = 2 + j + i*3, column = 1).value
value = sheet_rawdata.cell(row = 2 + j + i*3, column = 2).value
sheet_rawdata.cell(row = 2 + j + i*3, column = 3).value = j
# print(name,value)
tname = str(name).replace(' ','+')
index = tname.find('St')
if index == -1:
index = tname.find('ST')
for k in range(56):
camadd = sheet_database.cell(row = 2 + k, column = 4).value
if tname[:index] == str(camadd)[:index]:
# print(tname,camadd)
sheet_database.cell(row = 2 + k, column = 3).value = name
sheet_database.cell(row = 2 + k, column = 7 + j).value = value
#wb_rawdata.save('database.xlsx')
#wb_rawdata.save('rawdata - copy.xlsx')
#wb_rawdata.save('database.xlsx')
#wb_database.save('database - database.xlsx')
wb_database.save('database.xlsx')
wb_rawdata.close()
wb_database.close()
#%% transform address
#sheet.cell(row = 2, column = 2).value = 'hello'
#for i in range(len(road_loc)):
# sheet.cell(row = i + 2, column = 4).value = road_loc[i]
# index = road_loc[i].find('Ave')
# if index == -1:
# index = road_loc[i].find('AVE')
# sheet.cell(row = i + 2, column = 1).value = road_loc[i][:index-1]
#wb2.save('newnew_sample_data.xlsx')
#wb2.close()
#
#
##%%
#for i in range(len(road_loc)):
# address = sheet.cell(row = i + 2, column = 4).value
# add = geocoding(address)
# sheet.cell(row = i + 2, column = 5).value = add[0]
# sheet.cell(row = i + 2, column = 6).value = add[1]
#
#wb2.save('new_sample_data.xlsx')
#wb2.close()
#%%manually adjust the order and misleading locations in sheets
print('please verify coordinates by running drawpath.py')
#%% GeoJSON output for javascript
#wb3 = load_workbook('newnew_sample_data.xlsx')
#wb3.active = 1
#sheet3 = wb3.active
#
#paths = sheet3.iter_cols(min_col = 9, max_col = 12, min_row = 2, max_row = 57)
#for rows in paths:
# for cell in rows:
# print cell
##sheet3.active_cell
|
"this is a test"
import os
import shutil
def SetupSymb():
"""This function creates local copies of symbology. """
destsymbdir = "C:\Mapping_Project\MXDs\Symbology"
if not os.path.exists("C:\Mapping_Project\MXDs\Symbology"):
os.mkdir("C:\Mapping_Project\MXDs\Symbology")
if not os.path.exists("C:\Mapping_Project\MXDs\Symbology\PercentChange.lyr"):
for fl in glob(r"\\ca1ntap01\grm\PAT\ArcMappingTool\Symbology\PercentChange\\*"):
shutil.copy2(fl,destsymbdir)
def SetupPerilsEUWS(maptype):
""" Moves the RMS MXD template and shapefiles from the GRM drive to the local drive. MXD location is hardcoded. Maptype options = 'CRESTA' or 'POST'"""
destmxddir = "C:\Mapping_Project\MXDs"
destSHPdir = "C:\Mapping_Project\Shapefiles"
SetupSymb()
if maptype == 'CRESTA':
try:
shutil.copy2(r"\\ca1ntap01\grm\PAT\ArcMappingTool\MXDs\EUWS\EUWS_CRESTA.mxd",destmxddir)
except:
if os.path.exists("C:\Mapping_Project\MXDs\EUWS_CRESTA.mxd") :
print "The EUWS map already exists on your local machine."
else:
print "Error with SetupPerilsEUWS"
for fl in glob(r"\\ca1ntap01\grm\PAT\ArcMappingTool\Shapefiles\EUWS_CRESTA\*"):
try:
shutil.copy2(fl,destSHPdir)
except:
print "Was not able to copy over ", os.path.basename(fl)
if maptype == 'POST':
try:
shutil.copy2(r"\\ca1ntap01\grm\PAT\ArcMappingTool\MXDs\EUWS\EUWS_Postcode.mxd",destmxddir)
except:
if os.path.exists("C:\Mapping_Project\MXDs\EUWS_Postcode.mxd") :
print "The EUWS map already exists on your local machine."
else:
print "Error with SetupPerilsEUWS"
for fl in glob(r"\\ca1ntap01\grm\PAT\ArcMappingTool\Shapefiles\EUWS_Postcode\*"):
try:
shutil.copy2(fl,destSHPdir)
except:
print "Was not able to copy over ", os.path.basename(fl)
def SetupPerilsEUFL(maptype):
""" Moves the RMS MXD template and shapefiles from the GRM drive to the local drive. MXD location is hardcoded. Maptype options = 'CRESTA' or 'POST'"""
destmxddir = "C:\Mapping_Project\MXDs"
destSHPdir = "C:\Mapping_Project\Shapefiles"
SetupSymb()
if maptype == 'CRESTA':
try:
shutil.copy2(r"\\ca1ntap01\grm\PAT\ArcMappingTool\MXDs\EUFL\EUFL_CRESTA.mxd",destmxddir)
except:
if os.path.exists("C:\Mapping_Project\MXDs\EUFL_CRESTA.mxd") :
print "The EUFL map already exists on your local machine."
else:
print "Error with SetupPerilsEUFL"
for fl in glob(r"\\ca1ntap01\grm\PAT\ArcMappingTool\Shapefiles\EUFL_CRESTA\*"):
try:
shutil.copy2(fl,destSHPdir)
except:
print "Was not able to copy over ", os.path.basename(fl)
if maptype == 'POST':
try:
shutil.copy2(r"\\ca1ntap01\grm\PAT\ArcMappingTool\MXDs\EUFL\EUFL_Postcode.mxd",destmxddir)
except:
if os.path.exists("C:\Mapping_Project\MXDs\EUFL_Postcode.mxd") :
print "The EUFL map already exists on your local machine."
else:
print "Error with SetupPerilsEUFL"
for fl in glob(r"\\ca1ntap01\grm\PAT\ArcMappingTool\Shapefiles\EUFL_Postcode\*"):
try:
shutil.copy2(fl,destSHPdir)
except:
print "Was not able to copy over ", os.path.basename(fl)
|
#Operaciones con listas en Python
#Cuando trabajamos con listas podemos también hacer operaciones por ejemplo,
#https://www.geeksforgeeks.org/python-list/
#Unir listas
my_lista = [1]
my_lista2 = [2,3,4]
my_lista3 = my_lista + my_lista2
print("hola" + str(my_lista3[1:]))
my_lista3[1:]
my_lista3 # [1,2,3,4]
#Multiplicar elementos
#my_lista = ['a']
my_lista2 = my_lista * 5
my_lista2 # ['a','a','a','a','a']
#Dividir listas
my_lista = [1,2,3,4,5]
my_lista_reversed = my_lista[::-1]
my_lista_reversed # [5,4,3,2,1]
#Eliminar último elemento de la lista
my_lista = [1,2,3,4,5]
my_lista = my_lista.pop()
my_lista # [1,2,3,4]
#Ordenar la lista
my_lista = [2,1,5,4,3]
my_lista = my_lista.sort()
my_lista # [1,2,3,4,5]
#Eliminar un elemento
my_lista = [1,2,3,4,5]
del my_lista[0]
my_lista # [2,3,4,5] |
import turtle
import os
# 배경 설정
screen = turtle.Screen()
screen.bgcolor("lightgreen")
screen.title("Turtle Run Ver.1")
# 가장자리 그리기
mypen = turtle.Turtle()
mypen.penup()
mypen.setposition(-300, -300)
mypen.pendown()
mypen.pensize(3)
for side in range(4):
mypen.forward(600)
mypen.left(90)
mypen.hideturtle()
# 배경음악 설정
# os.system("start c:/python/day5/intro.mp3")
# 플레이어
player = turtle.Turtle()
player.color("blue")
player.shape("triangle")
player.penup()
# 스피드 설정하기
speed = 1
# 컨트롤 함수 정의
def turnLeft():
player.left(20)
def turnRight():
player.right(20)
def speedUp():
global speed
speed += 1
def speedDown():
global speed
speed -= 1
# 키보드 바인딩
turtle.listen()
turtle.onkey(turnLeft, "Left")
turtle.onkey(turnRight, "Right")
turtle.onkey(speedUp, "Up")
turtle.onkey(speedDown, "Down")
while True:
# 플레이어 이동
player.forward(speed)
# 가장자리 이탈 확인
if player.xcor() > 300 or player.xcor() < -300:
player.right(180)
if player.ycor() > 300 or player.ycor() < -300:
player.right(180) |
from PagSeguroLib.singleton import Singleton
from PagSeguroLib.config.PagSeguroConfig import PagSeguroConfig
from PagSeguroLib.log.LogPagSeguro import LogPagSeguro
from PagSeguroLib.resources.PagSeguroResources import PagSeguroResources
class PagSeguro(Singleton):
library = None
resources = None
config = None
log = None
@classmethod
def init(cls, email, token):
if cls.library == None:
cls.library = PagSeguro(email, token)
return cls.library
def __init__(self, email, token):
resources = {
'environment': {
'production': {
'webserviceUrl': "https://ws.pagseguro.uol.com.br"
}
},
'paymentService' : {
'servicePath': "/v2/checkout",
'checkoutUrl': "https://pagseguro.uol.com.br/v2/checkout/payment.html",
'serviceTimeout': 20
},
'notificationService': {
'servicePath': "/v2/transactions/notifications",
'serviceTimeout': 20
},
'transactionSearchService': {
'servicePath': "/v2/transactions",
'serviceTimeout': 20
}
}
configs = {
'environment': {
'environment': "production"
},
'credentials': {
'email': email,
'token': token
},
'application': {
'charset': "ISO-8859-1"
},
'log': {
'active': False,
'fileLocation': ""
}
}
self.resources = PagSeguroResources.init(resources)
self.config = PagSeguroConfig.init(configs)
self.log = LogPagSeguro.init()
|
# Instructions:
# =============
# 1. Collect from https://archive.ics.uci.edu/ml/datasets/wine
# 2. Run this script
import pandas as pd
import numpy as np
SEED = 998823
#---
np.random.seed(SEED)
data = pd.read_csv("../data/raw/wine.data", header=None, sep=',')
cols = data.columns.tolist()
cols = cols[1:] + cols[:1]
data = data[cols]
data.iloc[:, 0:-1] = data.iloc[:, 0:-1].astype('float32')
data.iloc[:,-1: ] = data.iloc[:,-1: ].astype('int32') - 1
print(data.head())
print("Size:", data.shape)
TRAIN_SIZE = 70
VAL_SIZE = 30
TEST_SIZE = 78
data = data.sample(frac=1)
data_train = data.iloc[0:TRAIN_SIZE]
data_val = data.iloc[TRAIN_SIZE:TRAIN_SIZE+VAL_SIZE]
data_test = data.iloc[TRAIN_SIZE+VAL_SIZE:]
print("Number of features:", data_train.shape[1] - 1)
print("Classes:", data_train.iloc[:, -1].unique())
print()
print("Total len:", data.shape[0])
print("----------")
print("Train len:", data_train.shape[0])
print("Val len: ", data_val.shape[0])
print("Test len: ", data_test.shape[0])
data_train.to_pickle("../data/wine-train")
data_val.to_pickle("../data/wine-val")
data_test.to_pickle("../data/wine-test")
#--- prepare meta
idx = data.columns[:-1]
meta = pd.DataFrame(index=idx, dtype='float32')
meta['avg'] = data_train.mean()
meta['std'] = data_train.std()
meta['cost'] = np.random.randint(1, 5, size=(len(idx), 1)) / 5.
# meta['cost'] = 1.
meta = meta.astype('float32')
print("\nMeta:")
print(meta)
meta.to_pickle("../data/wine-meta") |
import random
import string
from typing import Tuple
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce_low_res
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.config_methods import using_exchange
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce
CENTRALIZED = True
EXAMPLE_PAIR = "BTC-USDT"
DEFAULT_FEES = [0.1, 0.1]
HBOT_BROKER_ID = "hbot-"
def convert_from_exchange_trading_pair(exchange_trading_pair: str) -> str:
return exchange_trading_pair.replace("/", "-")
def convert_to_exchange_trading_pair(hb_trading_pair: str) -> str:
return hb_trading_pair.replace("-", "/")
# get timestamp in milliseconds
def get_ms_timestamp() -> int:
return get_tracking_nonce_low_res()
def uuid32():
return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(32))
def derive_order_id(user_uid: str, cl_order_id: str, ts: int, order_src='a') -> str:
"""
Server order generator based on user info and input.
:param user_uid: user uid
:param cl_order_id: user random digital and number id
:param ts: order timestamp in milliseconds
:param order_src: 'a' for rest api order, 's' for websocket order.
:return: order id of length 32
"""
return (order_src + format(ts, 'x')[-11:] + user_uid[-11:] + cl_order_id[-9:])[:32]
def gen_exchange_order_id(userUid: str) -> Tuple[str, int]:
"""
Generate an order id
:param user_uid: user uid
:return: order id of length 32
"""
time = get_ms_timestamp()
return [
derive_order_id(
userUid,
uuid32(),
time
),
time
]
def gen_client_order_id(is_buy: bool, trading_pair: str) -> str:
side = "B" if is_buy else "S"
return f"{HBOT_BROKER_ID}{side}-{trading_pair}-{get_tracking_nonce()}"
KEYS = {
"bitmax_api_key":
ConfigVar(key="bitmax_api_key",
prompt="Enter your Bitmax API key >>> ",
required_if=using_exchange("bitmax"),
is_secure=True,
is_connect_key=True),
"bitmax_secret_key":
ConfigVar(key="bitmax_secret_key",
prompt="Enter your Bitmax secret key >>> ",
required_if=using_exchange("bitmax"),
is_secure=True,
is_connect_key=True),
}
|
def label(basename, product, hash, state):
return "solvent__%(basename)s__%(product)s__%(hash)s__%(state)s" % dict(
basename=basename, product=product, hash=hash, state=state)
|
import time
from block import TetrisBlock
from features.feature import Feature
from field import Field
from painter import Led_Matrix_Painter, RGB_Field_Painter
class Startscreen(Feature):
def __init__(self, field_leds: Field, field_matrix: Field, rgb_field_painter: RGB_Field_Painter,
led_matrix_painter: Led_Matrix_Painter):
super(Startscreen, self).__init__(field_leds, field_matrix, rgb_field_painter, led_matrix_painter)
self.current_block = None
self.block_position_x = 0
self.block_position_y = 0
self.new_block()
def new_block(self):
self.current_block = TetrisBlock.get_random_block().double_size()
self.block_position_x = 1
self.block_position_y = -8
def falling_blocks(self):
self.field_leds.set_all_pixels_to_black()
self.field_leds.set_block(self.current_block, self.block_position_x, self.block_position_y)
self.block_position_y += 1
if self.block_position_y >= self.field_leds.height:
self.new_block()
def tick(self):
self.led_matrix_painter.show_Text("Tetris")
self.falling_blocks()
self.rgb_field_painter.draw(self.field_leds)
time.sleep(0.1)
def stop(self):
pass
|
#!/usr/bin/env python
"""
File: plotting
Date: 12/4/18
Author: Jon Deaton (jdeaton@stanford.edu)
"""
import os
import sklearn
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import average_precision_score, precision_recall_curve
from HumanProteinAtlas import organelle_name
def plot_evaluation_metrics(labels, y_score, y_pred, output_dir):
assert isinstance(labels, np.ndarray)
assert isinstance(y_score, np.ndarray)
assert labels.shape == y_score.shape
# list of images which were created
image_files = list()
# plot per-class metrics
per_class_images = plot_per_class_metrics(labels, y_score, y_pred, output_dir)
image_files.extend(per_class_images)
# Plot counts of each type of protein in prediction set
per_class_counts_file = "proteins_label_counts.png"
plot_num_each_protein(labels, y_pred, save_file=os.path.join(output_dir, per_class_counts_file))
image_files.append(per_class_counts_file)
return image_files
def plot_per_class_metrics(y_true, y_probs, y_pred, output_dir):
image_files = list() # save the list of image files created during this function
accuracy_image = "per-class_accuracy.png"
plot_per_class_metric(y_true, y_pred, sklearn.metrics.accuracy_score, "accuracy",
save_file=os.path.join(output_dir, accuracy_image))
image_files.append(accuracy_image)
precision_image = "per-class_precision.png"
plot_per_class_metric(y_true, y_pred, sklearn.metrics.precision_score, "precision",
save_file=os.path.join(output_dir, precision_image))
image_files.append(precision_image)
recall_image = "pre-class_recall.png"
plot_per_class_metric(y_true, y_pred, sklearn.metrics.recall_score, "recall",
save_file=os.path.join(output_dir, recall_image))
image_files.append(recall_image)
f1_image = "per-class_f1.png"
plot_per_class_metric(y_true, y_pred, sklearn.metrics.f1_score, "F1",
save_file=os.path.join(output_dir, f1_image))
image_files.append(f1_image)
roc_auc_image = "per-class_roc_auc.png"
plot_per_class_metric(y_true, y_probs, sklearn.metrics.roc_auc_score, "ROC AUC",
save_file=os.path.join(output_dir, roc_auc_image))
image_files.append(roc_auc_image)
# Per-class ROC
roc_curves_file = "roc_curves.png"
try:
plot_roc(y_true, y_probs)
plt.savefig(os.path.join(output_dir, roc_curves_file))
image_files.append(roc_curves_file)
except:
print("Failed to generate ROC plot")
# Per-class Precision Recall
pr_curves_file = "pr_curves.png"
try:
plot_precision_recall(y_true, y_probs)
plt.savefig(os.path.join(output_dir, pr_curves_file))
image_files.append(pr_curves_file)
except:
print("Failed to generate Precision Recall plot")
return image_files
def plot_per_class_metric(labels, y_probs, get_metric, metric_name, save_file=None):
assert isinstance(labels, np.ndarray)
assert isinstance(y_probs, np.ndarray)
class_values = list()
m, c = labels.shape
for i in range(c):
try:
value = get_metric(labels[:, i], y_probs[:, i])
except:
print("Failed to get metric: %s for class" % (metric_name, i))
value = 0
class_values.append(value)
plt.figure()
plt.bar(range(c), class_values)
plt.title("Per-class %s" % metric_name)
plt.xlabel("Class")
plt.ylabel(metric_name)
plt.ylim((0, 1))
if save_file is None:
save_file = "per-class_%s.png" % metric_name
plt.savefig(save_file)
def plot_roc(y_true, y_probas, title='ROC Curves', plot_micro=True, plot_macro=True, classes_to_plot=None,
ax=None, figsize=None, cmap='nipy_spectral', title_fontsize="large", text_fontsize=5):
y_true = np.array(y_true)
y_probas = np.array(y_probas)
m, num_classes = y_probas.shape
classes = np.arange(num_classes)
probas = y_probas
if classes_to_plot is None:
classes_to_plot = classes
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
fpr_dict = dict()
tpr_dict = dict()
indices_to_plot = np.in1d(classes, classes_to_plot)
for i, to_plot in enumerate(indices_to_plot):
fpr_dict[i], tpr_dict[i], _ = roc_curve(y_true[:, i], probas[:, i])
if to_plot:
roc_auc = auc(fpr_dict[i], tpr_dict[i])
color = plt.cm.get_cmap(cmap)(float(i) / len(classes))
ax.plot(fpr_dict[i], tpr_dict[i], lw=2, color=color,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(classes[i], roc_auc))
ax.plot([0, 1], [0, 1], 'k--', lw=2)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate', fontsize=text_fontsize)
ax.set_ylabel('True Positive Rate', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='lower right', fontsize=text_fontsize)
return ax
def plot_precision_recall(y_true, y_probas, title='Precision-Recall Curve', plot_micro=True, classes_to_plot=None,
ax=None, figsize=None, cmap='nipy_spectral', title_fontsize=5, text_fontsize="medium"):
y_true = np.array(y_true)
y_probas = np.array(y_probas)
m, num_classes = y_probas.shape
classes = np.arange(num_classes)
probas = y_probas
if classes_to_plot is None:
classes_to_plot = classes
binarized_y_true = y_true
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
indices_to_plot = np.in1d(classes, classes_to_plot)
for i, to_plot in enumerate(indices_to_plot):
if to_plot:
average_precision = average_precision_score(binarized_y_true[:, i], probas[:, i])
precision, recall, _ = precision_recall_curve(y_true[:, i], probas[:, i], pos_label=classes[i])
color = plt.cm.get_cmap(cmap)(float(i) / len(classes))
ax.plot(recall, precision, lw=2,
label='Precision-recall curve of class {0} '
'(area = {1:0.3f})'.format(classes[i],
average_precision),
color=color)
if plot_micro:
precision, recall, _ = precision_recall_curve(
binarized_y_true.ravel(), probas.ravel())
average_precision = average_precision_score(binarized_y_true,
probas,
average='micro')
ax.plot(recall, precision,
label='micro-average Precision-recall curve '
'(area = {0:0.3f})'.format(average_precision),
color='navy', linestyle=':', linewidth=4)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.tick_params(labelsize=text_fontsize)
# ax.legend(loc='best', fontsize=text_fontsize)
return ax
def plot_label_histogram(labels, pred_labels, save_file="proteins_cell_counts.png"):
assert isinstance(labels, np.ndarray)
assert isinstance(pred_labels, np.ndarray)
m, num_classes = labels.shape
true_counts_per_cell = labels.sum(axis=0).astype(int)
pred_counts_per_cell = pred_labels.sum(axis=0).astype(int)
max_count = max(true_counts_per_cell.max(), pred_counts_per_cell.max())
count_range = np.arange(max_count + 1)
true_cells_per_count = np.zeros(max_count + 1)
pred_cells_per_count = np.zeros(max_count + 1)
for i in range(num_classes):
true_cells_per_count[true_counts_per_cell[i]] += 1
pred_cells_per_count[pred_counts_per_cell[i]] += 1
width = 0.3
fig = plt.figure()
ax = fig.add_subplot(111)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
# Text below each barplot
plt.xticks([x + width / 2 for x in count_range], count_range)
ax.bar(count_range, true_cells_per_count, width=width, color="g", label="True")
ax.bar(count_range + width, pred_cells_per_count, width=width, color="r", label="Pred")
plt.legend()
plt.title("Number of cells vs. proteins counts in cell")
plt.xlabel("Number of proteins in cell")
plt.ylabel("Number of cells")
plt.savefig(save_file)
def plot_num_each_protein(labels, pred_labels, save_file="proteins_label_counts.png"):
m, n = labels.shape
protein_names = organelle_name.values()
assert len(protein_names) == n
count_range = np.arange(n)
true_protein_counts = [sum(labels[:, i]) for i in range(n)]
pred_protein_counts = [sum(pred_labels[:, i]) for i in range(n)]
width = 0.3
fig = plt.figure()
ax = fig.add_subplot(111)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# Text below each barplot
plt.xticks([x + width / 2 for x in count_range], protein_names, rotation=90)
ax.bar(count_range, true_protein_counts, width=width, color="g", label="True")
ax.bar(count_range + width, pred_protein_counts, width=width, color="r", label="Pred")
plt.legend()
plt.title("Count of labels per protein")
plt.xlabel("Protein")
plt.ylabel("Label count")
plt.savefig(save_file)
|
#garcia, gil
#astr 231 hw 5 prob 1
#4/9/2020
'''
in this script we integrate the eqns of stellar structure to determine the maximum mass
and radius of a white dwarf experiencing ultra-relativistic effects
'''
import numpy as np
import matplotlib.pyplot as plt
DR = 10 # our step size in the integrations
####################
#################### #2
####################
#some constants that will be needed
g = 6.67e-11 #m^3 kg^-1 s^-2
solar_mass = 1.98e30 #kg
solar_radius = 6.963e8 #meters
h = 6.626e-34 #J s (planck constant)
m_e = 9.109e31 #kg (mass of electron)
m_H = 1.67e-27 #kg (mass of hydrogen)
c = 2.99e8 # m/s (speed of light)
####################
#################### #2a
####################
#we use Newton's method to solve the coupled diff eqns
#first, we define the routines that will be needed in our integrations:
k_ultra_rel = ((h*c)/4) * (3/ (8*np.pi))**(1/3) * (1/ (2*m_H))**(4/3) #K constant in ultra-relativistic case
#print(format(k_ultra_rel,'E'))
def density(pressure): #density for ultra-rel case
return (pressure / k_ultra_rel)**(3/4)
def mass_step(density,r,dr): #using mass conservation diff eq to calculate one step in mass
return 4*np.pi*r**2*density*dr
def pressure_step(mass,radius,density,dr): #using hydrostatic diff eq to calculate one step in pressure
return ((g*mass) / radius**2 ) * density * dr
#we define a fxn that will be doing the intergrating
def newton_intergration(initial_density):
# now we initialize our variables and set them equal to the initial conditions
r = 0 #start at the core, radius is 0
m = 0 #at the core, w a radius of 0, there is no mass enclosed
d = initial_density #initial density in kg / m^3. this is the variable we will pass the newton_intergration fcn.
p = k_ultra_rel * (d)**(4/3) #initial pressure at the core
#now we write the integrator using the fact that at the outer boundar, pressure will be 0:
iter=0
while p > 0:
#keep track of num of steps we take
iter+=1
#now we update the values:
r = r + DR #updating our radius value by our radius step size
d = density(p) #calculating the new pressure value
dm = mass_step(d,r,DR) #calculating the change in mass
m = dm + m #updating our mass value
dp = pressure_step(m,r,d,DR) #calculating the change in pressure
p = p - dp #updating our pressure value. pressure decreases as we increase radius.
return m,r
#the range of density values that we will use in the intergration
d_range = np.linspace(10**12,10**18,10000)
#initialize empty lists where we will add the radius and mass for each density from each iteration
mass_lst=[]
radius_lst=[]
#running through our newton_intergration for each value in our d_range
for d_value in d_range:
mass,radius =newton_intergration(d_value)
mass_lst+=[mass]
radius_lst +=[radius]
print(d_value,mass,radius)
#plotting our results:
r_arr = np.array(radius_lst) / solar_radius
m_arr = np.array(mass_lst) / solar_mass
d_arr = np.log10(np.array(d_range))
plt.subplot(211)
plt.title('mass and radius dependance on central density')
plt.plot(d_arr,m_arr,color='k')
plt.ylabel(r'mass [M$_{\odot}$]')
plt.axhline(1.4416,ls='--',color='k',label=r'Chandrasekhar Limit: 1.44 M$_{\odot}$')
#plt.axhline(1.44,ls='--',color='k',label=r'Chandrasekhar Limit: 1.44 M$_{\odot}$')
plt.ylim(0,1.6)
#left,right = plt.xlim()
#plt.xlim(0,right)
plt.grid()
plt.legend()
plt.subplot(212)
plt.plot(d_arr,r_arr,color='k')
plt.ylabel(r'radius [R$_{\odot}$]')
#plt.ylabel('log(pressure)')
#plt.yscale('log')
plt.grid()
#left,right = plt.xlim()
#plt.xlim(0,right)
plt.xlabel(r'log( central density [kg / m$^{3}$] )')
plt.tight_layout()
plt.show()
|
"""Change Table Name
Revision ID: a3135c18513d
Revises: 12b6ae6ce692
Create Date: 2018-11-28 23:03:31.895532
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'a3135c18513d'
down_revision = '12b6ae6ce692'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('scheduled_answer',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('client_id', sa.Integer(), nullable=True),
sa.Column('answer_id', sa.Integer(), nullable=True),
sa.Column('is_sent', sa.Boolean(), nullable=False),
sa.Column('reply_when', sa.DateTime(), nullable=True),
sa.Column('sent_when', sa.DateTime(), nullable=True),
sa.Column('created_at', mysql.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', mysql.TIMESTAMP(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.ForeignKeyConstraint(['answer_id'], ['answer.id'], ),
sa.ForeignKeyConstraint(['client_id'], ['client.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('client_answer')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('client_answer',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('client_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('answer_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('is_sent', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False),
sa.Column('reply_when', mysql.DATETIME(), nullable=True),
sa.Column('sent_when', mysql.DATETIME(), nullable=True),
sa.Column('created_at', mysql.TIMESTAMP(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('updated_at', mysql.TIMESTAMP(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.ForeignKeyConstraint(['answer_id'], ['answer.id'], name='client_answer_ibfk_1'),
sa.ForeignKeyConstraint(['client_id'], ['client.id'], name='client_answer_ibfk_2'),
sa.PrimaryKeyConstraint('id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.drop_table('scheduled_answer')
# ### end Alembic commands ###
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# _Author_: xiaofeng
# Date: 2018-04-01 12:12:42
# Last Modified by: xiaofeng
# Last Modified time: 2018-04-01 12:12:42
from PIL import Image
import tensorflow as tf
import tflib
import tflib.ops
import tflib.network
from tqdm import tqdm
import numpy as np
import data_loaders
import time
import os
import shutil
import datetime
import config_char_formula as cfg
slim = tf.contrib.slim
BATCH_SIZE = 32
EMB_DIM = 80
ENC_DIM = 256
DEC_DIM = ENC_DIM * 2
NUM_FEATS_START = 64
D = NUM_FEATS_START * 8
V = cfg.V_OUT # vocab size
NB_EPOCHS = 100000
H = 20
W = 50
PRECEPTION = 0.6
THREAD = 13
LEARNING_DECAY = 20000
IMG_PATH = cfg.IMG_DATA_PATH
ckpt_path = cfg.CHECKPOINT_PATH
summary_path = cfg.SUMMARY_PATH
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
def exist_or_not(path):
if not os.path.exists(path):
os.makedirs(path)
exist_or_not(summary_path)
with open('config.txt', 'w+') as f:
cfg_dict = cfg.__dict__
for key in sorted(cfg_dict.keys()):
if key[0].isupper():
cfg_str = '{}: {}\n'.format(key, cfg_dict[key])
f.write(cfg_str)
f.close()
X = tf.placeholder(shape=(None, None, None, None), dtype=tf.float32) # 原文中的X占位符
mask = tf.placeholder(shape=(None, None), dtype=tf.int32)
seqs = tf.placeholder(shape=(None, None), dtype=tf.int32)
learn_rate = tf.placeholder(tf.float32)
input_seqs = seqs[:, :-1]
target_seqs = seqs[:, 1:]
ctx = tflib.network.im2latex_cnn(X, NUM_FEATS_START, True) # 原始repo中卷积方式
# ctx = tflib.network.vgg16(X) # 使用vgg16的卷积方式
# 进行编码
emb_seqs = tflib.ops.Embedding('Embedding', V, EMB_DIM, input_seqs)
out, state = tflib.ops.im2latexAttention('AttLSTM', emb_seqs, ctx, EMB_DIM,
ENC_DIM, DEC_DIM, D, H, W)
logits = tflib.ops.Linear('MLP.1', out, DEC_DIM, V)
predictions = tf.argmax(tf.nn.softmax(logits[:, -1]), axis=1)
loss = tf.reshape(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(logits, [-1, V]),
labels=tf.reshape(seqs[:, 1:], [-1])), [tf.shape(X)[0], -1])
# add paragraph ⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️
output = tf.reshape(logits, [-1, V])
output_index = tf.to_int32(tf.argmax(output, 1))
true_labels = tf.reshape(seqs[:, 1:], [-1])
correct_prediction = tf.equal(output_index, true_labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# ⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️
mask_mult = tf.to_float(mask[:, 1:])
loss_total = tf.reduce_sum(loss * mask_mult) / tf.reduce_sum(mask_mult)
optimizer = tf.train.GradientDescentOptimizer(learn_rate)
# optimizer = tf.train.MomentumOptimizer(learn_rate, 0.9)
gvs = optimizer.compute_gradients(loss_total)
capped_gvs = [(tf.clip_by_norm(grad, 5.), var) for grad, var in gvs]
train_step = optimizer.apply_gradients(capped_gvs)
# summary
tf.summary.scalar('model_loss', loss_total)
# tf.summary.scalar('model_prediction', predictions)
tf.summary.scalar('model_accuracy', accuracy)
tf.summary.histogram('model_loss_his', loss_total)
tf.summary.histogram('model_acc_his', accuracy)
gradient_norms = [tf.norm(grad) for grad, var in gvs]
tf.summary.histogram('gradient_norm', gradient_norms)
tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms))
merged = tf.summary.merge_all()
# function to predict the latex
def score(set='valididate', batch_size=32):
score_itr = data_loaders.data_iterator(set, batch_size)
losses = []
for score_imgs, score_seqs, score_mask in score_itr:
_loss = sess.run(
loss_total,
feed_dict={
X: score_imgs,
seqs: score_seqs,
mask: score_mask
})
losses.append(_loss)
set_loss = np.mean(losses)
perp = np.mean(list(map(lambda x: np.power(np.e, x), losses)))
return set_loss, perp
def predict(set='test', batch_size=1, visualize=True):
if visualize:
assert (batch_size == 1), "Batch size should be 1 for visualize mode"
import random
# f = np.load('train_list_buckets.npy').tolist()
f = np.load(set+'_buckets.npy').tolist()
random_key = random.choice(f.keys())
#random_key = (160,40)
f = f[random_key]
imgs = []
print("Image shape: ", random_key)
while len(imgs) != batch_size:
start = np.random.randint(0, len(f), 1)[0]
if os.path.exists('./images_processed/'+f[start][0]):
imgs.append(
np.asarray(Image.open('./images_processed/' + f[start][0]).convert('YCbCr'))[:, :, 0]
[:, :, None])
# imgs = np.asarray(imgs, dtype=np.float32).transpose(0, 3, 1, 2)
inp_seqs = np.zeros((batch_size, 160)).astype('int32')
print(imgs.shape)
inp_seqs[:, 0] = np.load(cfg.PROPERTIES).tolist()['char_to_idx']['#START']
tflib.ops.ctx_vector = []
l_size = random_key[0]*2
r_size = random_key[1]*2
inp_image = Image.fromarray(imgs[0][0]).resize((l_size, r_size))
l = int(np.ceil(random_key[1]/8.))
r = int(np.ceil(random_key[0]/8.))
properties = np.load(cfg.PROPERTIES).tolist()
def idx_to_chars(Y): return ' '.join(map(lambda x: properties['idx_to_char'][x], Y))
for i in range(1, 160):
inp_seqs[:, i] = sess.run(predictions, feed_dict={X: imgs, input_seqs: inp_seqs[:, :i]})
# print i,inp_seqs[:,i]
if visualize == True:
att = sorted(
list(enumerate(tflib.ops.ctx_vector[-1].flatten())),
key=lambda tup: tup[1],
reverse=True)
idxs, att = zip(*att)
j = 1
while sum(att[:j]) < 0.9:
j += 1
positions = idxs[:j]
print("Attention weights: ", att[:j])
positions = [(pos/r, pos % r) for pos in positions]
outarray = np.ones((l, r))*255.
for loc in positions:
outarray[loc] = 0.
out_image = Image.fromarray(outarray).resize((l_size, r_size), Image.NEAREST)
print("Latex sequence: ", idx_to_chars(inp_seqs[0, :i]))
outp = Image.blend(inp_image.convert('RGBA'), out_image.convert('RGBA'), 0.5)
outp.show(title=properties['idx_to_char'][inp_seqs[0, i]])
# raw_input()
time.sleep(3)
os.system('pkill display')
np.save('pred_imgs', imgs)
np.save('pred_latex', inp_seqs)
print("Saved npy files! Use Predict.ipynb to view results")
return inp_seqs
# init = tf.global_variables_initializer()
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
config = tf.ConfigProto(intra_op_parallelism_threads=THREAD)
# config.gpu_options.per_process_gpu_memory_fraction = PRECEPTION
sess = tf.Session(config=config)
sess.run(init)
# restore the weights
saver2 = tf.train.Saver(max_to_keep=3, keep_checkpoint_every_n_hours=0.5)
saver2_path = os.path.join(ckpt_path, 'weights_best.ckpt')
file_list = os.listdir(ckpt_path)
if file_list:
for i in file_list:
if i == 'checkpoint':
print('Restore the weight files form:', ckpt_path)
saver2.restore(sess, tf.train.latest_checkpoint(ckpt_path))
suammry_writer = tf.summary.FileWriter(
summary_path, flush_secs=60, graph=sess.graph)
coord = tf.train.Coordinator()
thread = tf.train.start_queue_runners(sess=sess, coord=coord)
losses = []
times = []
print("Compiled Train function!")
# Test is train func runs
i = 0
iter = 0
best_perp = np.finfo(np.float32).max
property = np.load(cfg.PROPERTIES).tolist()
def idx_to_chars(Y):
return ' '.join(map(lambda x: property['idx_to_char'][x], Y))
for i in range(NB_EPOCHS):
print('best_perp', best_perp)
costs = []
times = []
pred = []
itr = data_loaders.data_iterator('train', BATCH_SIZE)
for train_img, train_seq, train_mask in itr:
iter += 1
start = time.time()
_, _loss, _loss_ori, _acc, _mask_mult, summary, _correct_prediction = sess.run(
[train_step, loss_total, loss, accuracy, mask_mult, merged, correct_prediction],
feed_dict={X: train_img, seqs: train_seq, mask: train_mask, learn_rate: 0.1})
times.append(time.time() - start)
costs.append(_loss)
pred.append(_acc)
suammry_writer.add_summary(summary)
# print('_mask_mult:', tf.reduce_sum(_loss_ori * _mask_mult).eval(session=sess),
# tf.reduce_sum(_mask_mult).eval(session=sess))
# print('acc:', tf.reduce_mean(tf.cast(_correct_prediction, tf.float32)).eval(session=sess))
if iter % 100 == 0:
print("Iter: %d (Epoch %d--%d)" % (iter, i + 1, NB_EPOCHS))
print("\tMean cost: ", np.mean(costs), '===', _loss)
print("\tMean prediction: ", np.mean(pred), '===', _acc)
print("\tMean time: ", np.mean(times))
print('\tSaveing summary to the path:', summary_path)
print('\tSaveing model to the path:', saver2_path)
suammry_writer.add_summary(summary, global_step=iter * i + iter)
saver2.save(sess, saver2_path)
if iter % 200 == 0:
charlength = 200
inp_seqs = np.zeros((1, charlength)).astype('int32')
inp_seqs[:, 0] = property['char_to_idx']['<SPACE>']
tflib.ops.ctx_vector = []
true_char = idx_to_chars(train_seq[0].flatten().tolist())
for i in range(1, charlength):
feed = {X: [train_img[0]], input_seqs: inp_seqs[:, :i]}
inp_seqs[:, i] = sess.run(predictions, feed_dict=feed)
# formula_pred = idx_to_chars(inp_seqs.flatten().tolist()).split('#END')[0].split('#START')[-1]
formula_pred = idx_to_chars(inp_seqs.flatten().tolist())
print('\tTrue char is :', true_char)
print('\tPredict char is:', formula_pred)
print("\n\nEpoch %d Completed!" % (i + 1))
print("\tMean train cost: ", np.mean(costs))
print("\tMean train perplexity: ",
np.mean(list(map(lambda x: np.power(np.e, x), costs))))
print("\tMean time: ", np.mean(times))
print('\n\n')
print('processing the validate data...')
val_loss, val_perp = score('validate', BATCH_SIZE)
print("\tMean val cost: ", val_loss)
print("\tMean val perplexity: ", val_perp)
Info_out = datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S') + ' ' + 'iter/epoch/epoch_nums-%d/%d/%d' % (
iter, i,
NB_EPOCHS) + ' ' + 'val cost/val perplexity:{}/{}'.format(
val_loss, val_perp)
with open(summary_path + 'val_loss.txt', 'a') as file:
file.writelines(Info_out)
file.close()
if val_perp < best_perp:
best_perp = val_perp
saver2.save(sess, saver2_path)
print("\tBest Perplexity Till Now! Saving state!")
coord.request_stop()
coord.join(thread)
|
#Programa: act7.py
#Propósito: Una persona adquirió un producto para pagar en 20 meses. El primer mes pagó 10 €, el segundo 20 €, el tercero 40 € y así sucesivamente. Realizar un programa para determinar cuánto debe pagar mensualmente y el total de lo que pagará después de los 20 meses.
#Autor: Jose Manuel Serrano Palomo.
#Fecha: 29/10/2019
#
#Variables a usar:
# P es el pago inicial, pac es el total del pago tras acumularlo, mes es la cantidad de meses
#
#Algoritmo:
# Debemos ir acumulando los pagos en una serie aritmética de 20 veces multiplicando poe 2
# Desde el mes 1 al 20
# Acumula el pago
# Se dobla
# Mostramos el pago acumulado
print("Calcular el pago acumulado de un comprador")
print("------------------------------------------\n")
# Leemos los datos
P = 10
pac = 0
#Hacemos la acumulación
for mes in range (1, 20):
pac = pac + P
P = P * 2
print(f"El resultado de los 20 meses al pagar es {pac}")
|
import smh
import pickle
from matplotlib import pyplot
import sklearn.metrics
from config import Config
import sys
sys.path.append('../common/')
import evaluation
import dataLoader
# Folder paths
objectsRankingFile = 'allObjetsRankingFile.pickle'
model = smh.listdb_load(Config.MODEL_FILE)
ifs = smh.listdb_load(Config.INVERT_INDEX_FILE)
allObjetsToFileRanked = []
with open(objectsRankingFile, 'rb') as handle:
allObjetsToFileRanked= pickle.load(handle)
print('Size of all objects discovered: {}'.format(len(allObjetsToFileRanked)))
okAndGoodGroundTruthImages,junkGroundTruthImages = dataLoader.groundTruthLoader(Config.GROUND_TRUTH_PATH)
print('Size of allGroundTruthImages: {}'.format(len(okAndGoodGroundTruthImages)))
print(okAndGoodGroundTruthImages.keys())
allObjetsAP = []
averageAP = 0.0
for key in sorted(okAndGoodGroundTruthImages):
value = okAndGoodGroundTruthImages[key]
maxAP = -1
positiveImages = float(len(value))
bestIndex = 0
indexDiscoveredObject = 0
for objectDiscovered in allObjetsToFileRanked:
ap = evaluation.computeMyAP(value,junkGroundTruthImages[key],objectDiscovered)
if ap > maxAP:
maxAP = ap
bestIndex = indexDiscoveredObject
indexDiscoveredObject+=1
'''
with open(key+'.txt', 'w') as handle:
for fileOb in allObjetsToFileRanked[bestIndex]:
handle.write(fileOb+'\n')
'''
print('Best AP for {} : {}'.format(key,maxAP))
averageAP+=maxAP
print('Average AP: {:.2f}'.format(averageAP/11.0))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Clase 1. Primera parte
Introducción a Python
"""
#La programación con la que vamos a trabajar se llama imperativa y se ejecuta línea por línea.
3*4+2
#Variables:
"""
Se puede guardar información en variables, usando el signo =, para ser utilizada después
"""
a=2
b=3
(a+b)*2
c=(a+b)*2
#Printear una variable en la consola
print(c)
#Las variables pueden guardar distintos tipos de datos:
# Números (int o float)
numero_1=1
#palabras (string). Entre ''
texto='Hola, buen día'
texto2='¿Cómo estás?'
texto_total=texto+texto2
print(texto_total)
# listas
lista=[1,2,10,7,9]
#Las listas tienen indices.
lista[2]
lista[0:2]
lista[-1]
#Las listas pueden ser de otros objetos:
lista_palabras=['Mate','Cafe','Harina']
#Cada tipo de dato tienen métodos y funciones que se le pueden aplicar. Estos métodos o funciones pueden necesitar o no argumentos:
texto_en_lista=texto.split(' ')
print(texto_en_lista)
lista_palabras.append('Palmitos')
len(lista_palabras)
lista_palabras.extend(lista)
#Usando for podemos recorrer una lista, y hacer lo indicado abajo con cada elemento
lista_nueva=[]
for x in lista:
print(x)
lista_nueva.append(x+1) #Es muy importante la identación!!
#A veces se hace comprimido.
lista_nueva=[elemento+1 for elemento in lista]
#El otro control de flujo importante es if, que permite evaluar una condición
if len(lista_palabras)==3:
lista_palabras.append('Palmitos')
else:
print(lista_palabras)
#Podemos inventar nuestas propias funciones!
def nuestra_suma(x,y,z):
resultado=x+y+z
return(resultado)
nuestra_suma(1,3.2,a)
def nuestro_sumar_uno(lista):
lista_nueva=[]
for elemento in lista:
lista_nueva.append(elemento+1)
return(lista_nueva)
nuestro_sumar_uno(lista) #Acá la corremos
#Algunas funciones útiles vienen armadas en librerías.
import numpy as np #Librería muy útil para trabajar con vectores
np.mean(lista)
np.max(lista)
#Otras librerías útiles
import pandas as pd #Librería muy útil para trabajar con tablas.
import tweepy #Librería para interactuar con la API de twitter
#Si no tenemos alguna de las librerías la podemos cargar desde la consola con !pip install libreria_faltante
# Vamos a usar la librería armada específicamente para el curso. Para importarla tenemos que estar en la misma carpeta
import codigo_General |
#q7: one add choice: non-zero initialization of the account
from example_creditcard import CreditCard
class CreditCard_q7(CreditCard):
"""non-zero initialization of the account"""
def __init__(self, customer, bank, acnt, limit, start=0):
super().__init__(customer, bank, acnt, limit)
if start != 0:
self._balance = -start # here i set to negative value
if __name__ == '__main__':
card = CreditCard_q7('John', 'Commonwealth', '5907 8901', 2500)
print(card.charge(100))
print(card.charge(300))
print(card.get_balance())
print(card.charge(10000))
card2 = CreditCard_q7('John', 'Commonwealth', '5907 8901', 2500, 2500)
print(card2.get_balance())
print(card2.charge(100))
print(card2.charge(300))
print(card2.get_balance())
print(card2.charge(10000)) |
n,q=list(map(int,input().split()))
D={}
for i in range(n):
S=input()
D[str(i+1)]=S
D[S]=str(i+1)
for i in range(q):
print(D[input()])
|
import day13
import unittest
class Day2Tests(unittest.TestCase):
def test_True(self):
self.assertTrue(True)
def test_Day13_Example(self):
self.assertEqual(day13.solve((1,1),(7,4),data=10)[0], 11)
def test_Day13_Data(self):
self.assertEqual(day13.solve((1,1),(31,39))[0], 90)
def test_Day13_Data_2(self):
self.assertEqual(day13.solve((1,1),(31,39))[1], 135)
if __name__ == '__main__':
unittest.main()
|
errorMessage = "Oh dang! An error has occured."
errorUnknownCommand = "I'm sorry, I don't know this command. Type /help for a list of commands."
errorNoFile = "Either file is missing or is not readable. Creating."
errorCommand = "Unknown command."
errorAdmin = "You must be an admin to issue this command"
errorMsgLong = "The message is too long to send"
stringHelp = "Hello, I am TermoZour's RSS Bot. I can give you updates to RSS links! Type /help for a list of commands."
stringURLadded = "Added URL to subscription"
stringURLremoved = "Removed URL from subscription"
stringInvalidURL = "URL invalid or missing"
stringInvalidURLbozo = "This link is not an RSS Feed link"
stringURLalreadyAdded = "This URL has already been added"
stringURLalreadyRemoved = "You haven't subscribed to this URL yet"
help_message = "\n/help - shows this message"
help_add = "\n/add <link> - add the link to the subscriptions"
help_remove = "\n/remove <link> - removes the link from the subscriptions"
help_url = "\n/url <link> - shows the link's data and the last entry, for testing purposes i suppose"
help_list = "\n/list - shows the list of links you currently subscribed to in that particular chat"
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from lists import views as listsViews
#from learn import views as learn_views # new
# from addview import views as add_test
# from vartest import views as test_var
urlpatterns = [
url(r'^admin/', admin.site.urls),
# url(r'^111$', learn_views.index), # new
# url(r'^add/$', add_test.index,name='index'), # add
# url(r'^add/(\d+)/(\d+)/$', add_test.add,name='add'), # add
# url(r'^home$', test_var.home2,name='home2'), # home
# url(r'^forms$', 'tools.views.index', name='forms'),
# url(r'^$', 'learn.views.home', name='home'), # home
url(r'^$', listsViews.home_page, name='home'),
url(r'^lists/', include('lists.urls')),
# url(r'^polls/',include('polls.urls')), #polls app
]
|
import os
from flask import Flask, request, render_template
from configs import settings
from views.web import web_view
def create_app(debug=settings.DEBUG):
app = Flask(__name__, template_folder=settings.TEMPLATE_FOLDER)
app.register_blueprint(web_view)
app.debug = debug
return app
app = create_app()
if __name__ == '__main__':
host = settings.WEB_SERVER_IP
port = settings.WEB_SERVER_PORT
print "SiteNav starts running on http://%s:%s/" % (host, port)
app.run(host=host, port=port) |
"""Login form."""
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class SignUpForm(UserCreationForm):
"""Specific form with intermediate email value for signup."""
email = forms.EmailField(
max_length=254, help_text='Requis. Renseignez une adresse mail valide.')
class Meta:
"""User fields."""
model = User
fields = ('username', 'email', 'password1', 'password2', )
def clean_email(self):
"""Return the email if entered email is unique.
Otherwise gives duplicate_email error.
"""
email = self.cleaned_data['email']
if User.objects.filter(email=email).exists():
raise forms.ValidationError("Cette adresse mail est déjà utilisé.")
return email
|
"""MKS localzation module
The MKS localization module fits data using the Materials Knowledge
System in Fourier Space.
Example:
>>> from pymks.fmks.bases.primitive import discretize
>>> from pymks.fmks.func import allclose
>>> disc = discretize(n_state=2)
>>> x_data = lambda: da.from_array(np.linspace(0, 1, 8).reshape((2, 2, 2)),
... chunks=(1, 2, 2))
>>> y_data = lambda: x_data().swapaxes(1, 2)
>>> assert pipe(
... fit(x_data(), y_data(), disc),
... predict(x_data(), discretize=disc),
... allclose(y_data())
... )
"""
import dask.array as da
import numpy as np
from scipy.linalg import lstsq
from toolz.curried import pipe
from toolz.curried import map as fmap
from sklearn.base import RegressorMixin, TransformerMixin, BaseEstimator
from .func import (
curry,
array_from_tuple,
rechunk,
dafftshift,
dafftn,
daifftn,
daifftshift,
zero_pad,
)
@curry
def lstsq_mks(fx_data, fy_data, redundancy_func, ijk):
"""Do a least square for a single point in the MKS k-space.
Select a point in k-space using `ijk` tuple and do a least squares
across samples and local state space to calculate parts of the
coefficient matrix.
Args:
fx_data: microstructure in k-space
fy_data: response in k-space
redundancy_func: helps remove redundancies in the coefficient matrix
ijk: a point in k-space
Returns:
a tuple of the coefficient matrix index and vector over local
state space, which can be used to populate the coefficient
matrix
>>> make_data = lambda s: da.from_array(np.arange(np.prod(s),
... dtype=float).reshape(s),
... chunks=s)
>>> index, value = lstsq_mks(make_data((2, 1, 1, 3)),
... make_data((2, 1, 1)),
... lambda _: (slice(None),),
... (0, 0))
>>> print(index)
(0, 0, slice(None, None, None))
>>> assert np.allclose(value, [5. / 18., 1. / 9., -1. / 18.])
"""
fx_data_ = lambda: fx_data[(slice(None),) + ijk + redundancy_func(ijk)]
fy_data_ = lambda: fy_data[(slice(None),) + ijk]
return (
ijk + redundancy_func(ijk),
lstsq(fx_data_(), fy_data_(), np.finfo(float).eps * 1e4)[0],
)
def fit_fourier(fx_data, fy_data, redundancy_func):
"""Fit the data in Fourier space.
Fit the data after it has been discretized and transformed into
Fourier space.
Args:
fx_data: microstructure in k-space
fy_data: response in k-space
redundancy_func: helps remove redundancies in the coefficient matrix
Returns:
the coefficient matrix (unchunked)
>>> make_data = lambda s: da.from_array(np.arange(np.prod(s),
... dtype=float).reshape(s),
... chunks=s)
>>> matrix = fit_fourier(make_data((5, 4, 4, 3)),
... make_data((5, 4, 4)),
... lambda _: (slice(None),))
>>> print(matrix.shape)
(4, 4, 3)
>>> test_matrix = np.resize([5. / 18., 1. / 9., -1. / 18.], (4, 4, 3))
>>> assert np.allclose(matrix, test_matrix)
"""
lstsq_mks_ = lstsq_mks(fx_data.compute(), fy_data.compute(), redundancy_func)
return pipe(
fmap(lstsq_mks_, np.ndindex(fx_data.shape[1:-1])),
list,
array_from_tuple(shape=fx_data.shape[1:], dtype=np.complex),
)
def faxes(arr):
"""Get the spatiol axes to perform the Fourier transform
The first and the last axes should not have the Fourier transform
performed.
Args:
arr: the discretized array
Returns:
an array starting at 1 to n - 2 where n is the length of the
shape of arr
>>> faxes(np.array([1]).reshape((1, 1, 1, 1, 1)))
array([1, 2, 3])
"""
return np.arange(arr.ndim - 2) + 1
@curry
def fit_disc(x_data, y_data, redundancy_func):
"""Fit the discretized data.
Fit the data after the data has already been discretized.
Args:
x_data: the discretized mircrostructure field
y_data: the discretized response field
redundancy_func: helps remove redundancies in the coefficient matrix
Returns:
the chunked coefficient matrix based on the chunking of local
state space from the discretized mircrostructure field
>>> make_data = lambda s, c: da.from_array(
... np.arange(np.prod(s),
... dtype=float).reshape(s),
... chunks=c
... )
>>> matrix = fit_disc(make_data((6, 4, 4, 3), (2, 4, 4, 1)),
... make_data((6, 4, 4), (2, 4, 4)),
... lambda _: (slice(None),))
>>> print(matrix.shape)
(4, 4, 3)
>>> print(matrix.chunks)
((4,), (4,), (1, 1, 1))
>>> assert np.allclose(matrix.compute()[0, 0, 0], 5. / 18.)
"""
chunks = lambda x: (None,) * (len(x.shape) - 1) + (x_data.chunks[-1],)
return pipe(
[x_data, y_data],
fmap(dafftn(axes=faxes(x_data))),
list,
lambda x: fit_fourier(*x, redundancy_func),
lambda x: da.from_array(x, chunks=chunks(x)),
)
@curry
def fit(x_data, y_data, discretize, redundancy_func=lambda _: (slice(None),)):
"""Calculate the MKS influence coefficients.
Args:
x_data: the microstructure field
y_data: the response field
discretize: a function that returns the discretized data and
redundancy function
Returns:
the influence coefficients
>>> from pymks.fmks.bases.primitive import discretize
>>> matrix = fit(da.from_array(np.array([[0], [1]]), chunks=(2, 1)),
... da.from_array(np.array([[2], [1]]), chunks=(2, 1)),
... discretize(n_state=3))
>>> assert np.allclose(matrix, [[2, 0, 1]])
"""
return pipe(
x_data, discretize, fit_disc(y_data=y_data, redundancy_func=redundancy_func)
)
@curry
def _predict_disc(x_data, coeff):
return pipe(
dafftn(x_data, axes=faxes(x_data)),
lambda x: np.sum(x * coeff[None], axis=-1),
daifftn(axes=faxes(x_data), s=x_data.shape[1:-1]),
).real
@curry
def predict(x_data, coeff, discretize):
"""Predict a response given a microstructure
Args:
x_data: the microstructure data
coeff: the influence coefficients
discretize: the basis function
Returns:
the response
"""
return _predict_disc(discretize(x_data), coeff)
def _ini_axes(arr):
return tuple(np.arange(arr.ndim - 1))
@curry
def coeff_to_real(coeff, new_shape=None):
r"""Convert the coefficients to real space
Convert the :class:`pymks.LocalizationRegressor` coefficiencts to
real space. The coefficiencts are calculated in Fourier space, but
best viewed in real space. If the Fourier coefficients are defined
as :math:`\beta\left[l, k\right]` then the real space coefficients
are calculated using,
.. math::
\alpha \left[l, r\right] = \frac{1}{N} \sum_{k=0}^{N-1} \beta\left[l, k\right] e^{i \frac{2 \pi}{N} k r} e^{i \pi}
where :math:`l` is the local state and :math:`r` is the spatial
index from :math:`0` to :math:`N-1`. The :math:`e^{i \pi}` term
is a shift applied to place the 0 coefficient at the center of the
domain for viewing purposes.
Args:
coeff (array): the localization coefficients in Fourier space as a Dask
array `(n_x, n_y, n_state)`
new_shape (tuple): shape of the output to either shorten or pad with
zeros
Returns:
the coefficients in real space
A spike at :math:`k=1` should result in a cosine function on the
real axis.
>>> N = 100
>>> fcoeff = np.zeros((N, 1))
>>> fcoeff[1] = N
>>> x = np.linspace(0, 1, N + 1)[:-1]
>>> assert np.allclose(
... coeff_to_real(da.from_array(fcoeff)).real.compute(),
... np.cos(2 * np.pi * x + np.pi)[:, None]
... )
""" # pylint: disable=line-too-long; # noqa: #501
return pipe(
coeff,
daifftn(axes=_ini_axes(coeff), s=new_shape),
dafftshift(axes=_ini_axes(coeff)),
)
@curry
def coeff_to_frequency(coeff):
"""Convert the coefficients to frequency space.
Args:
coeff: the influence coefficients in real space
Returns:
the influence coefficiencts in frequency space
>>> from .func import rcompose
>>> f = rcompose(
... lambda x: np.concatenate((x, np.ones_like(x)), axis=-1),
... lambda x: da.from_array(x, chunks=x.shape),
... coeff_to_frequency,
... coeff_to_real,
... lambda x: x.real[..., :1].compute()
... )
>>> assert (lambda x: np.allclose(f(x), x))(np.arange(20).reshape((5, 4, 1)))
"""
return pipe(
coeff.copy(), daifftshift(axes=_ini_axes(coeff)), dafftn(axes=_ini_axes(coeff))
)
@curry
def coeff_resize(coeff, shape):
"""Resize the influence coefficients.
Resize the influence coefficients by padding with zeros to the
size determined by shape. Apply to coefficients in frequency space.
Args:
coeff: the influence coefficients with size (nx, ny, nz, nstate)
shape: the new padded shape (NX, NY, NZ)
Returns:
the resized influence coefficients
>>> from .func import ifftshift, fftn
>>> assert pipe(
... np.arange(20).reshape((5, 4, 1)),
... lambda x: np.concatenate((x, np.ones_like(x)), axis=-1),
... ifftshift(axes=(0, 1)),
... fftn(axes=(0, 1)),
... lambda x: da.from_array(x, chunks=x.shape),
... coeff_resize(shape=(10, 7)),
... coeff_to_real,
... lambda x: np.allclose(x.real[..., 0],
... [[0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 1, 2, 3, 0],
... [0, 0, 4, 5, 6, 7, 0],
... [0, 0, 8, 9,10,11, 0],
... [0, 0,12,13,14,15, 0],
... [0, 0,16,17,18,19, 0],
... [0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0]])
... )
"""
return pipe(
coeff,
coeff_to_real,
zero_pad(
shape=shape + coeff.shape[-1:],
chunks=((-1,) * len(shape)) + (coeff.chunks[-1],),
),
coeff_to_frequency,
)
def reshape(data, shape):
"""Reshape data along all but the first axis
Args:
data: the data to reshape
shape: the shape of the new data (not including the first axis)
Returns:
the reshaped data
>>> data = np.arange(18).reshape((2, 9))
>>> reshape(data, (None, 3, 3)).shape
(2, 3, 3)
"""
return data.reshape(data.shape[0], *shape[1:])
class ReshapeTransformer(BaseEstimator, TransformerMixin):
"""Reshape data ready for the LocalizationRegressor
Sklearn likes flat image data, but MKS expects shaped data. This
class transforms the shape of flat data into shaped image data for
MKS.
>>> data = np.arange(18).reshape((2, 9))
>>> ReshapeTransformer((None, 3, 3)).fit(None, None).transform(data).shape
(2, 3, 3)
"""
def __init__(self, shape):
"""Instantiate a ReshapeTransformer
Args:
shape: the shape of the reshaped data (ignoring the first axis)
"""
self.shape = shape
def transform(self, x_data):
"""Transform the X data
Args:
x_data: the data to be transformed
"""
return reshape(x_data, self.shape)
def fit(self, *_):
"""Only necessary to make pipelines work"""
return self
class LocalizationRegressor(BaseEstimator, RegressorMixin):
"""Perform the localization in Sklearn pipelines
Allows the localization to be part of a Sklearn pipeline
>>> make_data = lambda s, c: da.from_array(
... np.arange(np.prod(s),
... dtype=float).reshape(s),
... chunks=c
... )
>>> X = make_data((6, 4, 4, 3), (2, 4, 4, 1))
>>> y = make_data((6, 4, 4), (2, 4, 4))
>>> y_out = LocalizationRegressor().fit(X, y).predict(X)
>>> assert np.allclose(y, y_out)
>>> print(
... pipe(
... LocalizationRegressor(),
... lambda x: x.fit(X, y.reshape(6, 16)).predict(X).shape
... )
... )
(6, 16)
"""
def __init__(self, redundancy_func=lambda _: (slice(None),)):
"""Instantiate a LocalizationRegressor
Args:
redundancy_func: function to remove redundant elements from
the coefficient matrix
"""
self.redundancy_func = redundancy_func
self.coeff = None
self.y_data_shape = None
def fit(self, x_data, y_data):
"""Fit the data
Args:
x_data: the X data to fit
y_data: the y data to fit
Returns:
the fitted LocalizationRegressor
"""
self.y_data_shape = y_data.shape
y_data_reshape = reshape(y_data, x_data.shape[:-1])
y_data_da = rechunk(x_data.chunks[:-1], y_data_reshape)
self.coeff = fit_disc(x_data, y_data_da, self.redundancy_func)
return self
def predict(self, x_data):
"""Predict the data
Args:
x_data: the X data to predict
Returns:
The predicted y data
"""
if len(self.y_data_shape) == len(x_data.shape) - 1:
new_shape = (1,) + self.coeff.shape[:-1]
else:
new_shape = (1, np.prod(self.coeff.shape[:-1]))
return reshape(_predict_disc(x_data, self.coeff), new_shape)
def coeff_resize(self, shape):
"""Generate new model with larger coefficients
Args:
shape: the shape of the new coefficients
Returns:
a new model with larger influence coefficients
"""
self.coeff = coeff_resize(self.coeff, shape)
return self
|
# Generated by Django 3.0.7 on 2020-12-02 08:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cl_table', '0087_remove_stock_favorites'),
]
operations = [
migrations.AlterField(
model_name='posdaud',
name='record_detail_type',
field=models.CharField(blank=True, choices=[('SERVICE', 'SERVICE'), ('TD', 'TD'), ('PRODUCT', 'PRODUCT'), ('PREPAID', 'PREPAID'), ('VOUCHER', 'VOUCHER'), ('PACKAGE', 'PACKAGE'), ('TP SERVICE', 'TP SERVICE'), ('TP PRODUCT', 'TP PRODUCT'), ('TP PREPAID', 'TP PREPAID')], db_column='Record_Detail_Type', max_length=50, null=True),
),
]
|
# extract ingredients from recipes and count frequency
import pandas as pd
import numpy as np
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk import pos_tag
import re
import requests
from bs4 import BeautifulSoup
import ast
import inflect
from help_functions import read_csv_files
# ----------------------------- parse recipes to get clean ingredient -----------------------------
def clean_recipe(ingred_list):
cleanedtext = []
p = inflect.engine()
# change from string to list
ingred_list = ast.literal_eval(ingred_list)
for matchtext in ingred_list:
# Obtain all before first comma
if re.compile('^(.+?)(?=,)').search(matchtext) is not None:
matchtext = re.compile('^(.+?)(?=,)').search(matchtext).group(1)
# Tokenize ingredient list
tokenized = word_tokenize(matchtext)
# Remove words likely to be stop words or measurements
removed_stop = [w for w in tokenized if not w in measure_corpus]
removed_stop = [w for w in removed_stop if not w in stop_words]
# Filter adjectives and nouns
ingred_words = lambda pos: pos[:2] in ['JJ','NN','NNS']
ingreds = [word.lower() for (word, pos) in pos_tag(removed_stop) if ingred_words(pos)]
# Convert to singular
ingreds = [p.singular_noun(word) if p.singular_noun(word) else word for word in ingreds]
# remove special characters (including numbers!)怎么去除 ½, ¼, %等
ingreds = [re.sub('[^ a-zA-Z]', '', i) for i in ingreds]
#print(ingreds)
# Remove common ingredients
common = []
cleanedtext.append(ingreds)
cleanedtext = [[ing for ing in ingreds if not any(word in common for word in ingreds)] for ingreds in cleanedtext]
# Remove additional descriptors for long ingredient names
cleanedtext = [ingreds[-2:] if len(ingreds) > 2 else ingreds for ingreds in cleanedtext]
return [(' ').join(item) for item in cleanedtext if len(item)>0]
def get_stopwords():
stop_words = set(stopwords.words('english'))
page = requests.get('https://www.enchantedlearning.com/wordlist/measurement.shtml')
soup = BeautifulSoup(page.content, "html.parser")
measure_corpus = [tag.text for tag in soup.find_all('div',attrs={'class':'wordlist-item'})]
# add plural form and additional words
measure_corpus = measure_corpus + [text+'s' for text in measure_corpus] + \
['taste','strip', 'strips', 'package', 'packages', 'satchet', \
'satchets', 'sprigs', 'head', 'bunch', 'small', 'large', 'big', 'medium', 'tbsp', 'g']
return stop_words, measure_corpus
def store_parsed_ingredients(data):
df = data[['ingredients']]
df['ingredients'] = df['ingredients'].apply(clean_recipe)
df = df.rename(columns={"ingredients": "parsed_ingredients"})
data = data.join(df)
data.to_csv('./clean_data.csv')
# ----------------------------- count ingredient frequency and find top ingredients -----------------------------
def count_ingredient_frequency(data):
# count the frequency of each ingredient
dic = {}
for ingredient in data['parsed_ingredients']:
#if type(ingredient) != list:
# if load from presaved csv file then type is str
# convert from str to list
ingredient = ast.literal_eval(ingredient)
for item in ingredient:
try:
dic[str(item)] += 1
except:
dic[str(item)] = 1
dic_sorted = sorted(dic.items(),key=lambda item:item[1], reverse=True)
return dic_sorted
def get_top_ingredients(path, number):
# find most common top 10 ingredients in training data
data = pd.read_csv(path)
dic_sorted = count_ingredient_frequency(data)
top_ingredient = []
for (i,j) in dic_sorted[:number]:
top_ingredient.append(i)
return top_ingredient
def ingredient_vector(data, top_ingredients):
# turn ingredients of each recipe into vector
ingre_vectors = []
for parsed_ingre in data['parsed_ingredients']:
if type(parsed_ingre) != list:
ingres = ast.literal_eval(parsed_ingre)
ingre_vector = len(top_ingredients) * [0]
for ingre in ingres:
if ingre in top_ingredients:
ingre_vector[top_ingredients.index(ingre)] = 1
ingre_vectors.append(ingre_vector)
return np.asarray(ingre_vectors)
def clean_ingredient_vector(data, drop_index, top_ingredient):
ingre_vectors = ingredient_vector(data, top_ingredient)
# remove corresponding ingredients of recipes lacking complete nutrients
ingre_vectors = [i for j, i in enumerate(ingre_vectors) if j not in drop_index]
ingre_vectors = np.asarray(ingre_vectors)
return ingre_vectors
stop_words, measure_corpus = get_stopwords()
|
# -*- coding: utf-8 -*-
{
'name': 'conector FTP wav',
'version': '0.1',
'category': 'ftp',
'summary': 'Conector Ftp',
'description': """
Genera un conector ftp, para agregar los archivos ftp, al sistema y relacionarlos con los mandatos
==================================================
""",
'author': 'Econube | Pablo Cabezas',
'website': 'http://www.openerp.com',
'depends': ['base','econube_operaciones'],
'data': [
'test_view.xml'
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
import unittest
from sample.strings_example import StringsExamples
class TestStringsExamples(unittest.TestCase):
def test_concat_strings(self):
string1 = "hola"
string2 = "adios"
result = StringsExamples.concat_strings(string1, string2)
assert result=="holaadios"
def test_concat_int_string(self):
str1 = 1
str2 = "adios"
self.assertRaises (TypeError,StringsExamples.concat_strings,str1,str2)
def test_concat_string_int(self):
str1 = "hola"
str2 = 2
self.assertRaises(TypeError,StringsExamples.concat_strings,str1,str2)
def test_concat_int(self):
str1= 1
str2=2
self.assertRaises(TypeError, StringsExamples.concat_strings,str1, str2)
def test_concat_two_strings(self):
str1= "Espana"
str2= "Belgica"
result = StringsExamples.concat_strings(str1, str2)
assert result == "EspanaBelgica"
def test_concat_string1_1(self):
str1="a"
str2 = "b"
result = StringsExamples.concat_strings(str1,str2)
assert result=="ab"
def test_concat_string1_2(self):
str1="a"
str2 = "bb"
result = StringsExamples.concat_strings(str1,str2)
assert result=="abb"
def test_concat_string2_1(self):
str1="aa"
str2 = "b"
result = StringsExamples.concat_strings(str1,str2)
assert result == "aab"
def test_concat_string2_2(self):
str1="aa"
str2 = "bb"
result = StringsExamples.concat_strings(str1,str2)
assert result == "aabb"
def test_concat_string3_2(self):
str1="aaa"
str2 = "bb"
result = StringsExamples.concat_strings(str1,str2)
assert result == "aaabb"
def test_concat_string3_3(self):
str1="aaa"
str2 = "bbb"
result = StringsExamples.concat_strings(str1,str2)
assert result == "aaabbb"
def test_concat_string3_4(self):
str1="aaa"
str2 = "bbbb"
result = StringsExamples.concat_strings(str1,str2)
assert result == "aaabbbb"
def test_concat_string4_4(self):
str1="aaaa"
str2 = "bbbb"
result = StringsExamples.concat_strings(str1,str2)
assert result == "aaaabbbb"
def test_concat_string5_4(self):
str1 = "aaaaa"
str2 = "bbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaaabbbb"
def test_concat_string5_5(self):
str1 = "aaaaa"
str2 = "bbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaaabbbbb"
def test_concat_string5_6(self):
str1 = "aaaa"
str2 = "bbbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaabbbbbb"
def test_concat_string6_6(self):
str1 = "aaaaaa"
str2 = "bbbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaaaabbbbbb"
def test_concat_string6_7(self):
str1 = "aaaaaa"
str2 = "bbbbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaaaabbbbbbb"
def test_concat_string7_7(self):
str1 = "aaaa"
str2 = "bbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaabbbbb"
def test_concat_string8_7(self):
str1 = "aaaaaaaa"
str2 = "bbbbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaaaaaabbbbbbb"
def test_concat_string8_8(self):
str1 = "aaaaaaaa"
str2 = "bbbbbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaaaaaabbbbbbbb"
def test_concat_string8_9(self):
str1 = "aaaaaaaa"
str2 = "bbbbbbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaaaaaabbbbbbbbb"
def test_concat_string9_9(self):
str1 = "aaaaaaaaa"
str2 = "bbbbbbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaaaaaaabbbbbbbbb"
def test_concat_string9_10(self):
str1 = "aaaaaaaaa"
str2 = "bbbbbbbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaaaaaaabbbbbbbbbb"
def test_concat_string10_10(self):
str1 = "aaaaaaaaaa"
str2 = "bbbbbbbbbb"
result = StringsExamples.concat_strings(str1, str2)
assert result == "aaaaaaaaaabbbbbbbbbb"
if __name__ == '__main__':
unittest.main()
|
from functions import read_ncs, get_vectors, create_batch, train, get_non_comp_args, \
get_poly_features, rank_with_score, write_score, read_eval, build_model, regression_score
from baseline import weighted_add_score
from gensim.models.keyedvectors import KeyedVectors
import logging
from config import logging_config
from config import model_config
import scipy
# TODO extract dimensions directly from input vectors and not as a parameter
if __name__ == '__main__':
args = get_non_comp_args()
logging.info('Reading gensim model')
gensim_w2v_model = KeyedVectors.load_word2vec_format(args.p2vw, binary=False)
logging.info('Reading evaluation set')
eval_ncs, eval_scores, eval_scores_inv = read_eval(args)
logging.info('Calculating additive score')
additive_score = weighted_add_score(eval_ncs, gensim_w2v_model)
write_score(eval_ncs, additive_score, args.p2out+'additive_scores.csv')
logging.info('Reading train set')
ncs = read_ncs(args.p2tc)
logging.info('Creating vector for training instances')
X, Y = get_vectors(ncs, gensim_w2v_model)
if model_config.poly_degree > 1:
X = get_poly_features(X, model_config.poly_degree)
logging.info('Creating batches')
in_batches, tar_batches = create_batch(X, Y, model_config.batch_size)
logging.info('Creating the regression model')
model, optimizer, criterion = build_model(X, Y)
logging.info('Training')
train(in_batches, tar_batches, model, model_config.nb_epochs, optimizer, criterion)
logging.info('Calculating regression-based scores')
reg_score = regression_score(eval_ncs, gensim_w2v_model, model)
write_score(eval_ncs, reg_score, args.p2out+'reg_scores.csv')
print('Spearman rho bet. inv human score and regression', scipy.stats.spearmanr(reg_score, eval_scores_inv))
print('Spearman rho bet. inv human score and additive score', scipy.stats.spearmanr(additive_score, eval_scores_inv))
if args.rank == 'true':
print('Ranking based on regression model: ', rank_with_score(eval_ncs, reg_score))
print('Ranking based on additive model: ', rank_with_score(eval_ncs, additive_score)) |
# -*- coding: utf-8 -*-
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class ProgressExamsWizard(osv.TransientModel):
_name = 'progress.exams.wizard'
_columns = {
'head': fields.text(
string = 'Head',
required=True,
),
'project_ids':fields.many2many(
'data.project',
'data_project_rel',
string = 'โปรเจค',
required=True,
)
}
_defaults = {
'head': 'สอบก้าวหน้า',
}
def print_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'progress.exams.wizard',
'form': data
}
_logger.info('data {}'.format(datas))
return {
'type': 'ir.actions.report.xml',
'report_name': 'progress_exams_report',
'datas': datas,
}
ProgressExamsWizard()
|
"""
a deque with sequence (1,2,3,4,5,6,7,8).
given a queue,
use only the deque and queue,
to shift the sequence to the order (1,2,3,5,4,6,7,8)
"""
from example_queue import ArrayQueue
from example_double_ended_queue import ArrayDoubleEndedQueue
D = ArrayDoubleEndedQueue()
for i in range(1, 8+1): D.add_last(i)
Q = ArrayQueue()
print("initially")
D.show()
Q.show()
print()
# shift the order
for _ in range(3): D.add_last(D.delete_first())
Q.enqueue(D.delete_first())
D.add_last((D.delete_first()))
D.add_last(Q.dequeue())
for _ in range(3): D.add_last(D.delete_first())
print("finally")
D.show()
Q.show() |
from os import system
system("cls")
matriz=[]
matriz2=[]
matriz3=[]
resta=0
res=[]
res2=[]
res3=[]
suma=0
val=0
val1=0
n=[0,1,1,1,1,1,1,1,1]
val2=-1
print(n)
x1=2
y2=3
matriz4 = [[1]*x1 for i in xrange(y2)]
def print_r(matriz4):
for fila in matriz4:
print fila
def transpuesta(matriz4):
rows = len(matriz4)
cols = len(matriz4[0])
return [[matriz4[j][i] for j in xrange(rows)] for i in xrange(cols)]
print "Original"
print(matriz4)
while True:
print("Bienvenido:")
menu=input("Si deseas ingresar: 1[MatrizxEscalar], 2[suma o resta de matriz], 3[Salir]: ")
if menu==1:
for i in range(10):
a=input("Introduce la matriz: ")
matriz3.append(a)
for i in n:
val2+=i
suma=((matriz3[val2]))
res3.append(suma)
print(res3)
if menu==2:
for i in range(5):
x=input("Introduce el primer numero:" )
y=input("Introduce el primer numero de la segunda matriz:")
matriz.append(x)
matriz2.append(y)
print(matriz)
print(matriz2)
while True:
opcion= input("Introduce: [1]Suma, [2] resta, [3]Salir: ")
if opcion==1:
for i in n:
val+=i
suma= matriz2[val]+matriz[val]
res.append(suma)
print("la matriz resultante fue:"+str(res) )
elif opcion==2:
for i in n:
val1+=i
resta= matriz2[val]-matriz[val]
res2.append(resta)
print("la matriz resultante fue:"+str(res2) )
elif opcion==3:
break
elif menu==3: break
"""
x1=3
y2=3
n=x1*y2
a=[[1,2,3],[7,4,5]]
matriz = [[1]*x1 for i in xrange(y2)]
def print_r(matriz):
for fila in matriz:
print fila
def transpuesta(matriz):
rows = len(matriz)
cols = len(matriz[0])
return [[matriz[j][i] for j in xrange(rows)] for i in xrange(cols)]
print "Original"
print_r(matriz)
print "TRANSPUESTA"
print_r(transpuesta(matriz))
print_r(a)
"""
|
# _*_ coding: utf-8 _*_
# @Time : 2021/04/14 17:02:35
# @FileName: mlp.py
# @Author : handy
# @Software: VSCode
import tensorflow as tf
import pandas as pd
data = pd.read_csv('../data/Advertising.csv')
X, Y = data.iloc[:, 1:-1], data.iloc[:, -1]
model = tf.keras.Sequential(
[
# 输入大小即为输入X的维度(即列的大小)
tf.keras.layers.Dense(10, input_shape=(3,), activation='relu',),
tf.keras.layers.Dense(1)
]
)
model.summary()
model.compile(optimizer='Adam', loss='mse')
model.fit(X, Y, epochs=500)
pre_data = data.iloc[:10, 1:-1]
print(model.predict(pre_data)) |
# Copying Holly Grimm's solution https://github.com/hollygrimm/cs294-homework/blob/master/hw1/bc.py
# Copy and pasting and merging it into a copy of my behavior_cloner.py code.
import argparse
import pickle
import os
import sys
import tensorflow.compat.v1 as tf
import numpy as np
from sklearn.model_selection import train_test_split
import mlflow.tensorflow
import gym
from gym import wrappers
from tqdm import tqdm
#Imports copied from hollygrimm's solution
import logging
from hollygrimm_model import Model
# The following doesn't seem to work with the way Holly Grimm builds her tensorflow model.
mlflow.tensorflow.autolog()
def config_logging(log_file):
if os.path.exists(log_file):
os.remove(log_file)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def create_model(session, obs_samples, num_observations, num_actions, logger, optimizer,
learning_rate, restore, checkpoint_dir):
model = Model(obs_samples, num_observations, num_actions, checkpoint_dir, logger,
optimizer, learning_rate)
if restore:
model.load(session)
else:
logger.info("Created model with fresh parameters")
session.run(tf.global_variables_initializer())
return model
def bc(expert_data_filename, env_name, restore, results_dir, max_timesteps=None,
optimizer='adam', num_epochs=100, learning_rate=.001, batch_size=32, keep_prob=1):
# Reset TF env
tf.reset_default_graph()
# Create a gym env.
env = gym.make(env_name)
max_steps = max_timesteps or env.spec.max_episode_steps
with open(expert_data_filename, 'rb') as f:
data = pickle.loads(f.read())
obs = np.stack(data['observations'], axis=0)
actions = np.squeeze(np.stack(data['actions'], axis=0))
x_train, x_test, y_train, y_test = train_test_split(obs, actions, test_size=0.2)
num_samples = len(x_train)
min_val_loss = sys.maxsize
with tf.Session() as session:
model = create_model(session, x_train, x_train.shape[1], y_train.shape[1], logger,
optimizer, learning_rate, restore, results_dir)
file_writer = tf.summary.FileWriter(results_dir, session.graph)
#file_writer = tf.summary.FileWriter(results_dir, session.graph)
for epoch in tqdm(range(num_epochs)):
perm = np.random.permutation(x_train.shape[0])
obs_samples = x_train[perm]
action_samples = y_train[perm]
loss = 0.
for k in range(0, obs_samples.shape[0], batch_size):
batch_loss, training_scalar = model.update(session, obs_samples[k:k + batch_size],
action_samples[k:k + batch_size],
keep_prob)
loss += batch_loss
file_writer.add_summary(training_scalar, epoch)
min_val_loss, validation_scalar = validate(model, logger, session, x_test, y_test,
epoch, batch_size, min_val_loss, results_dir)
file_writer.add_summary(validation_scalar, epoch)
# Test the updated model after each epoch of training the DNN.
new_exp = model.test_run(session, env, max_steps)
tqdm.write(
"Epoch %3d; Loss %f; Reward %f; Steps %d" % (epoch, loss / num_samples,
new_exp['reward'], new_exp['steps']))
# Write a video of the final gym test results.
env = wrappers.Monitor(env, results_dir, force=True)
results = []
for _ in tqdm(range(10)):
results.append(model.test_run(session, env, max_steps)['reward'])
logger.info("Reward mean and std dev with behavior cloning: %f(%f)" % (np.mean(results),
np.std(results)))
mlflow.log_params({"reward_mean": np.mean(results), "reward_std": np.std(results)})
return np.mean(results), np.std(results)
def validate(model, logger, session, x_test, y_test, num_epoch, batch_size, min_loss, checkpoint_dir):
avg_loss = []
# for k in range(0, x_test.shape[0], batch_size):
loss, validation_scalar = model.validate(session, x_test, y_test)
avg_loss.append(loss)
new_loss = sum(avg_loss) / len(avg_loss)
logger.info("Finished epoch %d, average validation loss = %f" % (num_epoch, new_loss))
if new_loss < min_loss: # Only save model if val loss dropped
model.save(session)
min_loss = new_loss
return min_loss, validation_scalar
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('expert_run_id', type=str)
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument("--restore", type=bool, default=False)
args = parser.parse_args()
for k, v in vars(args).items():
mlflow.log_param(k, v)
if not os.path.exists('results'):
os.makedirs('results')
log_file = os.path.join(os.getcwd(), 'results', 'train_out.log')
logger = config_logging(log_file)
#env_models = [('Ant-v1', 'data/Ant-v1_data_250_rollouts.pkl', 'experts/Ant-v1.pkl', 250),
# ('HalfCheetah-v1', 'data/HalfCheetah-v1_data_10_rollouts.pkl', 'experts/HalfCheetah-v1.pkl', 10),
# ('Hopper-v1', 'data/Hopper-v1_data_10_rollouts.pkl', 'experts/Hopper-v1.pkl', 10),
# ('Humanoid-v1', 'data/Humanoid-v1_data_250_rollouts.pkl', 'experts/Humanoid-v1.pkl', 250),
# ('Reacher-v1', 'data/Reacher-v1_data_250_rollouts.pkl', 'experts/Reacher-v1.pkl', 250),
# ('Walker2d-v1', 'data/Walker2d-v1_data_10_rollouts.pkl','experts/Walker2d-v1.pkl', 10)
# ]
#for env_name, rollout_data, expert_policy_file, num_rollouts in env_models :
# ===================================================
# read in dataset from expert policy rollouts.
mlflow_c = mlflow.tracking.MlflowClient()
expert_data_file_base = mlflow_c.download_artifacts(args.expert_run_id, "")
expert_data_file_rel_path = mlflow_c.list_artifacts(args.expert_run_id, "expert_data_file")[
0].path
expert_data_filename = expert_data_file_base + "/" + expert_data_file_rel_path
print("opening {0}".format(expert_data_filename))
env_name = mlflow_c.get_run(args.expert_run_id).data.params["envname"]
bc_results_dir = os.path.join(os.getcwd(), 'results', env_name, 'bc')
bc_reward_mean, bc_reward_std = bc(expert_data_filename, env_name, args.restore, bc_results_dir,
batch_size=args.batch_size, num_epochs=args.num_epochs)
logger.info('Behavior Cloning mean & std rewards: %f(%f))' %
(bc_reward_mean, bc_reward_std))
print("logging 'results' directory to mlflow.")
mlflow.log_artifacts('results')
# Commenting out dagger for now.
#da_results_dir = os.path.join(os.getcwd(), 'results', env_name, 'da')
#if not os.path.exists(da_results_dir):
# os.makedirs(da_results_dir)
#_,_, da_mean,da_std = dagger(rollout_data, expert_policy_file, env_name, args.restore, da_results_dir, num_rollouts)
#results.append((env_name, ex_mean, ex_std, bc_mean, bc_std, da_mean, da_std))
#for env_name, ex_mean, ex_std, bc_mean, bc_std, da_mean, da_std in results :
# logger.info('Env: %s, Expert: %f(%f), Behavior Cloning: %f(%f), Dagger: %f(%f)'%
# (env_name, ex_mean, ex_std, bc_mean, bc_std, da_mean, da_std))
|
'''
Created on 11-Dec-2018
@author: prasannakumar
'''
#https://www.youtube.com/watch?v=rfscVS0vtbw
#this is the URL for the python class that lasts for 4:30 HRS
#from StdSuites.AppleScript_Suite import string
#from idlelib.ReplaceDialog import replace
#from __builtin__ import str
from math import *
print('hello world')
#practice of variables
integer_value = -30
string_value = 'prasanna kumar'
#in python we cannot concatinate an integer value directly with an string value.
#the following line of code will be giving error.
#print("test" + integer_value + string_value)
integer_value2 = str(30)
print(integer_value2 + " "+ string_value)
#user can even do the concatination by backtick(`)
print(str(integer_value)+" "+string_value)
#the variables in python are static(i.e: they always take the lines of code that is written after updating the value of the variable will be displayed.)
string_value ='my name is prasanna kumar'
print(str(integer_value) + string_value)
#in python "\" is an escape character (this will be working for the special characters that are mentioned after he backslash)
print("prasanna\"kumar")
#\n is an command for new line
print("prasanna\nkumar")
print(len(string_value))
print(string_value[5])
print(string_value.index("a"))
print(string_value.replace("prasanna", str(23)))
print(abs(integer_value))
print(pow(2, 3))
print("the round off of the number---> "+str(round(234.231)))
print(floor(3.9))
print(ceil(9.1))
my_name = input("enter your name:")
my_age = input("my age is:")
print("hello "+ my_name + my_age)
|
import tempfile
from pathlib import Path
import numpy as np
import tifffile
from PIL import Image
from xicam.common_ingestors import generic
class TestGeneric:
def test_tiff(self):
# Write a small TIFF file
test_data, _, _ = np.mgrid[0:30, 0:40, 0:50]
test_data = test_data.astype("<u2")
tmp = tempfile.TemporaryDirectory()
file_path = Path(tmp.name) / Path("temp_tif.tif")
tifffile.imsave(
file_path, test_data, imagej=True, resolution=(0.2, 0.2), metadata={"unit": "um"}
)
# TODO test metadata
# Make sure we generated a valid doc
docs = list(generic.ingest([str(file_path)]))
assert len(docs) == 4
expected_keys = ["start", "descriptor", "event", "stop"]
for i, doc in enumerate(docs):
assert doc[0] == expected_keys[i]
# Try to read the event data
dask_array = docs[2][1]["data"]["raw"] # type: dask.array
event_data = dask_array.compute()
assert np.array_equal(event_data, test_data)
def test_jpeg(self):
# Create a test image
image = Image.new("L", (10, 20), color=100)
tmp = tempfile.TemporaryDirectory()
file_path = Path(tmp.name) / Path("temp_jpeg.jpeg")
image.save(file_path)
# Ingest
docs = list(generic.ingest([str(file_path)]))
# Check that the document is valid
assert len(docs) == 4
expected_keys = ["start", "descriptor", "event", "stop"]
for i, _ in enumerate(docs):
assert docs[i][0] == expected_keys[i]
# Try to read the event data
dask_array = docs[2][1]["data"]["raw"] # type: dask.array
event_data = dask_array.compute()
# The ingestor does a dask_array.stack, so we need to squeeze off the extra dimension
assert np.array_equal(event_data.squeeze(), np.asarray(image))
|
import sys
input = sys.stdin.readline
num = 8
def check_right(start, direction):
if start > 4 or meet[start-1] == 1:
return
if meet[start-1] == -1:
check_right(start+1, -direction)
if direction == -1:
temp = tob[start].pop()
tob[start].insert(0, temp)
else:
temp = tob[start].pop(0)
tob[start].append(temp)
def check_left(start, direction):
if start < 1 or meet[start] == 1:
return
if meet[start] == -1:
check_left(start-1, -direction)
if direction == -1:
temp = tob[start].pop()
tob[start].insert(0, temp)
else:
temp = tob[start].pop(0)
tob[start].append(temp)
tob = [0 for i in range(5)]
meet = [0 for i in range(4)]
result = 0
for i in range(1, 5):
tempt = input().strip()
tob[i] = list(map(int, str(tempt)))
K = int(input())
turn = [0 for i in range(K)]
for i in range(K):
turn[i] = list(map(int, input().split()))
for i in range(K):
t_num = turn[i][0]
direction = turn[i][1]
for j in range(1, 4):
if tob[j][2] == tob[j+1][6]:
meet[j] = 1
else:
meet[j] = -1
check_right(t_num+1, direction)
check_left(t_num-1, direction)
if direction == 1:
temp = tob[t_num].pop()
tob[t_num].insert(0, temp)
else:
temp = tob[t_num].pop(0)
tob[t_num].append(temp)
result = tob[1][0]*1 + tob[2][0]*2 + tob[3][0]*4 + tob[4][0] * 8
print(result)
#1번 2 <-> 2번 6 2번 2 <-> 3번 6 3번 2 <-> 4번 6 |
import json
import uuid
from asgiref.sync import sync_to_async
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from chat.models import Message, ChatGroup, MessagesGroups
class TicTacToeConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_code']
self.room_group_name = 'room_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
print("Disconnected")
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
async def receive(self, text_data):
"""
Receive message from WebSocket.
Get the event and send the appropriate event
"""
response = json.loads(text_data)
event = response.get("event", None)
message = response.get("message", None)
if event == 'MOVE':
# Send message to room group
await self.channel_layer.group_send(self.room_group_name, {
'type': 'send_message',
'message': message,
"event": "MOVE"
})
if event == 'START':
# Send message to room group
await self.channel_layer.group_send(self.room_group_name, {
'type': 'send_message',
'message': message,
'event': "START"
})
if event == 'END':
# Send message to room group
await self.channel_layer.group_send(self.room_group_name, {
'type': 'send_message',
'message': message,
'event': "END"
})
if event == 'CHAT':
# Send message to room group
model = Message(message=message, send_by=response.get("send_by", None))
model.save()
group = ChatGroup.objects.filter(id=int(self.room_name))[0]
MessagesGroups(message=model, group=group).save()
mymessage = {}
mymessage['id'] = response.get("id", None)
mymessage['message'] = message
mymessage['send_by'] = 1
mymessage['event'] = "CHAT"
mymessage['group_id'] = int(self.room_name)
await self.channel_layer.group_send(self.room_group_name, {
'type': 'send_message',
'message': mymessage,
'event': "CHAT"
})
async def send_message(self, res):
""" Receive message from room group """
# Send message to WebSocket
await self.send(text_data=json.dumps({
"payload": res,
}))
# /Users/nestap/Downloads/Django-channels-Tic-Tac-Toe-main/chat/admin.py |
import dataclasses
import pickle
import h5py
import numba
import numpy as np
import sklearn
import sklearn.preprocessing
import mabe.config
@dataclasses.dataclass
class TrainingBatch:
X: numba.typed.List # [np.array]
X_extra: numba.typed.List # [np.array]
Y: numba.typed.List # [np.array]
indices: np.array
annotators: numba.typed.List # [np.array]
clf_tasks: np.array
Y_dark_behaviors: numba.typed.List # [np.array]
Y_dark_annotators: numba.typed.List # [np.array]
class DataWrapper:
def __init__(self, feature_path):
self.vocabulary = ["attack", "investigation", "mount", "other"]
with h5py.File(feature_path, "r") as hdf:
def load_all(groupname):
return list(map(lambda v: v[:].astype(np.float32), hdf[groupname].values()))
self.X_labeled = load_all("train/x")
self.Y_labeled = load_all("train/y")
self.annotators_labeled = list(map(lambda v: v[()], hdf["train/annotators"].values()))
self.clf_tasks_labeled = np.array(
list(map(lambda v: int(v[()]), hdf["train/clf_tasks"].values()))
)
self.X_unlabeled = load_all("test/x")
self.Y_unlabeled = load_all("test/y")
self.Y_unlabeled_dark_behaviors = load_all("test/y_dark_behaviors")
num_dark_behaviors = self.Y_unlabeled_dark_behaviors[0].shape[1]
num_dark_behaviors_classes = self.Y_unlabeled_dark_behaviors[0].shape[2]
self.Y_labeled_dark_behaviors = [
np.full(
(len(y), num_dark_behaviors, num_dark_behaviors_classes), -1, dtype=np.float32
)
for y in self.Y_labeled
]
self.Y_unlabeled_dark_annotators = load_all("test/y_dark_annotators")
num_dark_annotators = self.Y_unlabeled_dark_annotators[0].shape[1]
num_dark_annotators_classes = self.Y_unlabeled_dark_annotators[0].shape[2]
self.Y_labeled_dark_annotators = [
np.full(
(len(y), num_dark_annotators, num_dark_annotators_classes), -1, dtype=np.float32
)
for y in self.Y_labeled
]
self.annotators_unlabeled = [-1] * len(self.X_unlabeled)
self.clf_tasks_unlabeled = np.array([-1] * len(self.X_unlabeled))
try:
self.X_labeled_extra = load_all("train/x_extra")
self.X_unlabeled_extra = load_all("test/x_extra")
except KeyError:
self.X_labeled_extra = None
self.X_unlabeled_extra = None
self.groups_unlabeled = list(map(lambda v: v[()], hdf["test/groups"].values()))
self.X = self.X_labeled + self.X_unlabeled
self.Y = self.Y_labeled + self.Y_unlabeled
self.Y_dark_behaviors = self.Y_labeled_dark_behaviors + self.Y_unlabeled_dark_behaviors
self.Y_dark_annotators = self.Y_labeled_dark_annotators + self.Y_unlabeled_dark_annotators
self.annotators = self.annotators_labeled + self.annotators_unlabeled
self.num_annotators = len(np.unique(self.annotators_labeled))
self.clf_tasks = np.concatenate((self.clf_tasks_labeled, self.clf_tasks_unlabeled))
self.num_clf_tasks = len(np.unique(self.clf_tasks))
if self.X_labeled_extra is not None:
self.X_extra = self.X_labeled_extra + self.X_unlabeled_extra
self.num_extra_features = self.X_extra[0].shape[-1]
else:
self.X_extra = None
self.num_extra_features = 0
scaler = sklearn.preprocessing.StandardScaler().fit(np.concatenate(self.X))
self.X = list(map(lambda x: scaler.transform(x), self.X))
self.X_labeled = list(map(lambda x: scaler.transform(x), self.X_labeled))
self.X_unlabeled = list(map(lambda x: scaler.transform(x), self.X_unlabeled))
self.sample_lengths = np.array(list(map(len, self.X)))
class CVSplit:
def __init__(self, split_idx, data):
split = pickle.load(open(mabe.config.ROOT_PATH / f"split_{split_idx}.pkl", "rb"))
self.indices_labeled = split["indices_labeled"]
self.indices_unlabeled = split["indices_unlabeled"]
self.indices = split["indices"]
self.train_indices_labeled = split["train_indices_labeled"]
self.train_indices_unlabeled = split["train_indices_unlabeled"]
self.train_indices = split["train_indices"]
self.val_indices_labeled = split["val_indices_labeled"]
self.val_indices_unlabeled = split["val_indices_unlabeled"]
self.val_indices = split["val_indices"]
self.data = data
self.calculate_draw_probs()
def calculate_draw_probs(self):
data = self.data
sample_lengths = data.sample_lengths
self.p_draw = sample_lengths / np.sum(sample_lengths)
self.p_draw_labeled = sample_lengths[self.indices_labeled] / np.sum(
sample_lengths[self.indices_labeled]
)
self.p_draw_unlabeled = sample_lengths[self.indices_unlabeled] / np.sum(
sample_lengths[self.indices_unlabeled]
)
self.p_draw_train_labeled = sample_lengths[self.train_indices_labeled] / np.sum(
sample_lengths[self.train_indices_labeled]
)
self.p_draw_train_unlabeled = sample_lengths[self.train_indices_unlabeled] / np.sum(
sample_lengths[self.train_indices_unlabeled]
)
self.p_draw_train = sample_lengths[self.train_indices] / np.sum(
sample_lengths[self.train_indices]
)
self.p_draw_val_labeled = sample_lengths[self.val_indices_labeled] / np.sum(
sample_lengths[self.val_indices_labeled]
)
self.p_draw_val_unlabeled = sample_lengths[self.val_indices_unlabeled] / np.sum(
sample_lengths[self.val_indices_unlabeled]
)
self.p_draw_val = sample_lengths[self.val_indices] / np.sum(
sample_lengths[self.val_indices]
)
def get_train_batch(
self, batch_size, random_noise=0.0, extra_features=False, dark_knowledge=False
):
def random_task_train_index(task):
task_train_indices = self.train_indices_labeled[
self.data.clf_tasks[self.train_indices_labeled] == task
]
task_p_draw = self.data.sample_lengths[task_train_indices].astype(np.float)
task_p_draw /= np.sum(task_p_draw)
return np.array([np.random.choice(task_train_indices, p=task_p_draw)])
indices_batch_unlabeled = np.random.choice(
self.indices_unlabeled,
size=int(0.75 * batch_size - self.data.clf_tasks.max() - 1),
p=self.p_draw_unlabeled,
)
# at least one sample per task
indices_batch = np.concatenate(
(
np.random.choice(
self.train_indices_labeled,
size=int(0.25 * batch_size),
p=self.p_draw_train_labeled,
),
*[
random_task_train_index(task)
for task in range(0, self.data.clf_tasks.max() + 1)
],
indices_batch_unlabeled,
)
)
assert np.all([i not in self.val_indices_labeled for i in indices_batch])
X_batch = numba.typed.List()
augment = lambda x: x + np.random.randn(*x.shape) * random_noise
if random_noise > 0:
[X_batch.append(augment(self.data.X[i])) for i in indices_batch]
else:
[X_batch.append(self.data.X[i]) for i in indices_batch]
Y_batch = numba.typed.List()
[Y_batch.append(self.data.Y[i].astype(int)) for i in indices_batch]
if dark_knowledge:
Y_batch_dark_behaviors = numba.typed.List()
[
Y_batch_dark_behaviors.append(self.data.Y_dark_behaviors[i].astype(np.float32))
for i in indices_batch
]
Y_batch_dark_annotators = numba.typed.List()
[
Y_batch_dark_annotators.append(self.data.Y_dark_annotators[i].astype(np.float32))
for i in indices_batch
]
else:
Y_batch_dark_behaviors = None
Y_batch_dark_annotators = None
annotators_batch = numba.typed.List()
[
annotators_batch.append(np.array([self.data.annotators[i]]).repeat(len(y)))
for i, y in zip(indices_batch, Y_batch)
]
clf_tasks_batch = self.data.clf_tasks[indices_batch]
for i, task in enumerate(clf_tasks_batch):
if task > 0:
assert np.all(annotators_batch[i] == 0)
X_extra_batch = None
if extra_features:
X_extra_batch = numba.typed.List()
[X_extra_batch.append(self.data.X_extra[i].astype(int)) for i in indices_batch]
assert np.all([i not in self.val_indices_labeled for i in indices_batch])
assert np.all(
[
Y_batch[i].max() < 0
for i in range(
len(indices_batch) - len(indices_batch_unlabeled), len(indices_batch)
)
]
)
return TrainingBatch(
X_batch,
X_extra_batch,
Y_batch,
indices_batch,
annotators_batch,
clf_tasks_batch,
Y_batch_dark_behaviors,
Y_batch_dark_annotators,
)
|
# This is kept in 0.97.0 and then will be removed
from .template_tools import (
get_template_amplitudes,
get_template_extremum_channel,
get_template_extremum_channel_peak_shift,
get_template_extremum_amplitude,
get_template_channel_sparsity
)
from .template_metrics import (TemplateMetricsCalculator, compute_template_metrics,
calculate_template_metrics, get_template_metric_names)
from .template_similarity import (TemplateSimilarityCalculator,
compute_template_similarity,
check_equal_template_with_distribution_overlap)
from .principal_component import WaveformPrincipalComponent, compute_principal_components
from .spike_amplitudes import compute_spike_amplitudes, SpikeAmplitudesCalculator
from .correlograms import (CorrelogramsCalculator,
compute_autocorrelogram_from_spiketrain,
compute_crosscorrelogram_from_spiketrain,
compute_correlograms, correlogram_for_one_segment,
compute_correlograms_numba, compute_correlograms_numpy)
from .isi import (ISIHistogramsCalculator,
compute_isi_histograms_from_spiketrain, compute_isi_histograms,
compute_isi_histograms_numpy, compute_isi_histograms_numba)
from .spike_locations import compute_spike_locations, SpikeLocationsCalculator
from .unit_localization import (compute_unit_locations, UnitLocationsCalculator,
compute_center_of_mass)
from .alignsorting import align_sorting, AlignSortingExtractor
from .noise_level import compute_noise_levels, NoiseLevelsCalculator |
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(point1, point2) -> float:
"""
Расстояние между двумя точками
"""
return round(((point1.x - point2.x) ** 2 +
(point1.y - point2.y) ** 2) ** 0.5, 2)
# Дано две точки на координатной плоскости
point1 = Point(2, 4)
point2 = Point(5, -2)
# Задание: Найдите расстояние между этими точками. Реализовав и
# используя функцию distance()
print("Расстояние между точками = ", distance(point1, point2))
|
import tweepy
# переменные ключей и токенов
consumer_key = 'JsV6FCCUsCqkKsbc2LzsBLlh2'
consumer_secret = 'flC0NzGyKsrnb8vTLJvUtUoaDkw2UPuchPlgmk3VeFdKKsVkNo'
access_token = '2335158726-XBiP8R9HT7ijIyl0RvHx2UWtmH8gsZYYhXILCY9'
access_token_secret = 'hOwHDL2TlQvPS9N0tfNoWd6hV78J8O7xnrpaHGNTFQ45k'
# настройки OAuth API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# текст поста для учетной записи Twitter
tweet = 'Пробная запись!'
api.update_status(status=tweet)
print ('Запись добавлена успешно!')
|
# import các gói thư viện cần thiết
from keras.applications.resnet import ResNet50
from keras.applications.inception_v3 import InceptionV3
from keras.applications.xception import Xception
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications import imagenet_utils
from keras.applications.inception_v3 import preprocess_input
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
import numpy as np
import argparse
import cv2
from imutils import paths
from datasets import simpledatasetloader
from preprocessing import imagetoarraypreprocessor
from preprocessing import simplepreprocessor
# Cách dùng: python imagenet_pretrained.py -i <path/tên file> [-m vgg16]
# Xây dựng cấu trúc để sử dụng bằng dòng lệnh.
# Lưu ý: model vgg16 là mặc định, nghĩa là không có tham số -m <mô hình>
# Thì mặc định dùng vgg16
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,help="Đường dẫn ảnh đầu vào để dự đoán")
ap.add_argument("-m", "--model", type=str, default="vgg16",help="Tên của model pre-trained")
args = vars(ap.parse_args())
# Định nghĩa từ điển chứa tên các model
MODELS = {
"vgg16": VGG16,
"vgg19": VGG19,
"inception": InceptionV3,
"xception": Xception,
"resnet": ResNet50
}
# Đảm bảo sử dụng tên model là tham số của dòng lệnh
if args["model"] not in MODELS.keys():
raise AssertionError("Tên model không có trong từ điển")
# Khởi tạo ảnh đầu vào có kích thước (224x224) along with
inputShape = (224, 224)
preprocess = imagenet_utils.preprocess_input
# Nếu dùng the InceptionV3 hoặc Xception thì kích thước ảnh đầu vào (299x299)
if args["model"] in ("inception", "xception"):
inputShape = (299, 299)
preprocess = preprocess_input
# Nạp trọng số mạng (network weights)
# (Chú ý: tại thời điểm chạy lần đầu của một mạng nào đó thì các trọng số sẽ được
# down về. Tùy thuộc mạng mà dung lượng các trọng số từ 90-575MB, trọng số sẽ được
# lưu vào bộ nhớ đệm và các lần chạy tiếp theo sẽ nhanh hơn nhiều)
print("[INFO] Đang nạp mô hình {} ...".format(args["model"]))
Network = MODELS[args["model"]]
model = Network(weights="imagenet")
print("[INFO] đang nạp và tiền xử lý ảnh ...")
imagePaths = np.array(list(paths.list_images(args["image"])))
sp = simplepreprocessor.SimplePreprocessor(32, 32) # Thiết lập kích thước ảnh 32 x 32
iap = imagetoarraypreprocessor.ImageToArrayPreprocessor() # Gọi hàm để chuyển ảnh sang mảng
sdl = simpledatasetloader.SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths)
data = data.astype("float") / 255.0
for (i, imagePath) in enumerate(imagePaths):
image = load_img(imagePath, target_size=inputShape)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = preprocess(image)
# Dự đoán và phân lớp ảnh
print("[INFO] Phân lớp ảnh bằng mô hình '{}'...".format(args["model"]))
preds = model.predict(image)
P = imagenet_utils.decode_predictions(preds) # Trả về danh sách các tham số dự đoán P
# Lấy và hiển thị một số thông tin về kết quả dự đoán ảnh:
# - imagenetID: ID của ảnh trong imagenet
# - label: Nhãn của ảnh
# - prob: Phần trăm (xác suất) dự đoán
for (i, (imagenetID, label, prob)) in enumerate(P[0]):
print("{}. {}: {:.2f}%".format(i + 1, label, prob * 100))
# Sau khi đã dự đoán --> Hiển thị ảnh và vẽ kết quả phân loại trên ảnh
img = cv2.imread(imagePath)
(imagenetID, label, prob) = P[0][0]
cv2.putText(img, "Label: {}".format(label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 2)
cv2.imshow("Ket qua phan lop", img)
cv2.waitKey(0)
|
def bfs(heads, tails):
S = set((heads, tails))
Q = [(heads, tails, 0)]
qi = 0
while qi < len(Q):
h, t, d = Q[qi]
qi += 1
if (h, t) in S:
continue
else:
S.add((h, t))
if h == 2 and t == 0:
return d + 1
if t >= 1:
Q.append((h, t + 1, d + 1))
if t >= 2:
Q.append((h + 1, t - 2, d + 1))
if h >= 2:
Q.append((h - 2, t, d + 1))
return -1
def solve(heads, tails):
print(bfs(heads, tails))
if __name__ == "__main__":
while True:
heads, tails = map(int, input().split())
if heads == 0 and tails == 0:
break
solve(heads, tails)
|
'''
############# Need refactor for databas ######################
@app.route('/virtual/api/v1.0/styles',methods=['GET'])
def checkDB():
user_id=request.args.get('user_id')
clothes=models.db.session.query(models.Clothes.style,models.Clothes.user_id).distinct().filter(models.Clothes.user_id==user_id).all()
styles=[]
for clothing in clothes:
styles.append(clothing.style)
return jsonify({'styles':styles})
'''
#Clarfai stuff
'''
###################################Clarifai methods#############################
#returns an an array of possible apparel
#attr
#name-apparelName
#value-confidence
def possibleApparel(appCont,name):
model=appCont.models.get('e0be3b9d6a454f0493ac3a30784001ff')
image = ClImage(file_obj=open(name, 'rb'))
response=model.predict([image])
response=response["outputs"][0]["data"]["concepts"]
item =response
items=[]
items.append(item[0])
items.append(item[2])
items.append(item[3])
return items
@app.route('/virtual/api/v1.0/upload', methods=['POST'])
def sendToClarfai():
#stuff from form can be grabbed by id of the tag
#stuff = request.form['something']
file = request.files['uri']
data = {}
#get working directory
directory_name=os.getcwd()+"/tmp"
print directory_name
#make a filename note need to add extnesion probably only jpg or jpeg at this point less data
filename = secure_filename(file.filename)
#save fiel
file.save(os.path.join(directory_name, filename))
#send to Clarfai API
data["apparel"]=possibleApparel(appClar,directory_name+"/"+file.filename)
# data["styles"]=possibleStyles(appClar,directory_name+"/"+file.filename)
# data["color"]=getColor(appClar,directory_name+"/"+file.filename)
#remove file
os.remove(directory_name+"/"+file.filename)
#does take a little time
#print file.mimetype_params
return jsonify(data)
@app.route('/sendText',methods=['GET'])
def sendText():
# Use sms gateway provided by mobile carrier:
# at&t: number@mms.att.net
# t-mobile: number@tmomail.net
# verizon: number@vtext.com
# sprint: number@page.nextel.com
# Establish a secure session with gmail's outgoing SMTP server using your gmail account4
number=request.args.get('number')
server = smtplib.SMTP( "smtp.gmail.com", 587 )
server.starttls()
server.login( os.getenv('email'), os.getenv('password') )
# Send text message through SMS gateway of destination number
server.sendmail( 'virtualcloset', str(number)+'@mms.att.net', 'hello' )
return "Success"
''''''
#####################################################################################
# MIGRATIONS ABOVE
##################################################################################### |
import sys
sys.path.insert(0,'..')
from Classes.polygon import Polygon
from Classes.rectangle import Rectangle
from Classes.tile import Tile
import math
import pickle
def getROI(filename):
with open(filename, "r") as file:
line = file.readlines()[0]
line = line.split()
p1 = (float(line[2]), float(line[3]))
p2 = (float(line[4]), float(line[5]))
p3 = (float(line[6]), float(line[7]))
p4 = (float(line[8]), float(line[9]))
vertices = [p1, p2, p3, p4, p1]
roi = Rectangle(vertices)
return roi
def getTiles(polygon, tilesize):
xmin, ymin, xmax, ymax = polygon.getBounds()
x = (math.floor(xmin/tilesize))
y = (math.floor(ymin/tilesize))
X = (math.ceil(xmax/tilesize))
Y = (math.ceil(ymax/tilesize))
tiles = []
for i in range(x, X):
row = []
for j in range(y, Y):
row.append(Tile(tilesize, i*tilesize, j*tilesize))
tiles.append(row)
return tiles
if __name__ == "__main__":
input_roi_file = input("Enter the name of the parsed xml file that contains the rectangular ROI: ")
output_pkl_file = input("Enter the name of the file into which the tiles will be saved: ")
retTiles = getTiles(getROI(input_roi_file), 256)
file = open(output_pkl_file, "wb")
pickle.dump(retTiles, file)
|
import sqlite3 as lite
import csv
import re
import pandas
import string
con = lite.connect('cs1656.sqlite')
with con:
cur = con.cursor()
########################################################################
### CREATE TABLES ######################################################
########################################################################
# DO NOT MODIFY - START
cur.execute('DROP TABLE IF EXISTS Actors')
cur.execute("CREATE TABLE Actors(aid INT, fname TEXT, lname TEXT, gender CHAR(6), PRIMARY KEY(aid))")
cur.execute('DROP TABLE IF EXISTS Movies')
cur.execute("CREATE TABLE Movies(mid INT, title TEXT, year INT, rank REAL, PRIMARY KEY(mid))")
cur.execute('DROP TABLE IF EXISTS Directors')
cur.execute("CREATE TABLE Directors(did INT, fname TEXT, lname TEXT, PRIMARY KEY(did))")
cur.execute('DROP TABLE IF EXISTS Cast')
cur.execute("CREATE TABLE Cast(aid INT, mid INT, role TEXT)")
cur.execute('DROP TABLE IF EXISTS Movie_Director')
cur.execute("CREATE TABLE Movie_Director(did INT, mid INT)")
# DO NOT MODIFY - END
########################################################################
### READ DATA FROM FILES ###############################################
########################################################################
# actors.csv, cast.csv, directors.csv, movie_dir.csv, movies.csv
# UPDATE THIS
actors = []
with open('actors.csv') as actorFile:
fileReader = csv.reader(actorFile)
for row in fileReader:
actors.append(row)
cast = []
with open('cast.csv') as castFile:
fileReader = csv.reader(castFile)
for row in fileReader:
cast.append(row)
directors = []
with open('directors.csv') as dirFile:
fileReader = csv.reader(dirFile)
for row in fileReader:
directors.append(row)
movieDir = []
with open('movie_dir.csv') as movieDirFile:
fileReader = csv.reader(movieDirFile)
for row in fileReader:
movieDir.append(row)
movies = []
with open('movies.csv') as moviesFile:
fileReader = csv.reader(moviesFile)
for row in fileReader:
movies.append(row)
########################################################################
### INSERT DATA INTO DATABASE ##########################################
########################################################################
# UPDATE THIS TO WORK WITH DATA READ IN FROM CSV FILES
for actor in actors:
actor[0] = actor[0].replace("'", "''")
actor[1] = actor[1].replace("'", "''")
actor[2] = actor[2].replace("'", "''")
actor[3] = actor[3].replace("'", "''")
cur.execute("INSERT INTO Actors VALUES (" + actor[0] + ", '" + actor[1] + "', '" + actor[2] + "', '" + actor[3] + "')")
for person in cast:
person[0] = person[0].replace("'", "''")
person[1] = person[1].replace("'", "''")
person[2] = person[2].replace("'", "''")
cur.execute("INSERT INTO Cast VALUES (" + person[0] + ", " + person[1] + ", '" + person[2] + "')")
for director in directors:
director[0] = director[0].replace("'", "''")
director[1] = director[1].replace("'", "''")
director[2] = director[2].replace("'", "''")
cur.execute("INSERT INTO Directors VALUES (" + director[0] + ", '" + director[1] + "', '" + director[2] + "')")
for movDir in movieDir:
movDir[0] = movDir[0].replace("'", "''")
movDir[1] = movDir[1].replace("'", "''")
cur.execute("INSERT INTO Movie_Director VALUES (" + movDir[0] + ", " + movDir[1] + ")")
for movie in movies:
movie[0] = movie[0].replace("'", "''")
movie[1] = movie[1].replace("'", "''")
movie[2] = movie[2].replace("'", "''")
movie[3] = movie[3].replace("'", "''")
cur.execute("INSERT INTO Movies VALUES (" + movie[0] + ", '" + movie[1] + "', " + movie[2] + ", " + movie[3] + ")")
#cur.execute("INSERT INTO Actors VALUES(1001, 'Harrison', 'Ford', 'Male')")
#cur.execute("INSERT INTO Actors VALUES(1002, 'Daisy', 'Ridley', 'Female')")
#cur.execute("INSERT INTO Movies VALUES(101, 'Star Wars VII: The Force Awakens', 2015, 8.2)")
#cur.execute("INSERT INTO Movies VALUES(102, 'Rogue One: A Star Wars Story', 2016, 8.0)")
#cur.execute("INSERT INTO Cast VALUES(1001, 101, 'Han Solo')")
#cur.execute("INSERT INTO Cast VALUES(1002, 101, 'Rey')")
#cur.execute("INSERT INTO Directors VALUES(5000, 'J.J.', 'Abrams')")
con.commit()
########################################################################
### QUERY SECTION ######################################################
########################################################################
queries = {}
# DO NOT MODIFY - START
# DEBUG: all_movies ########################
queries['all_movies'] = '''
SELECT * FROM Movies
'''
# DEBUG: all_actors ########################
queries['all_actors'] = '''
SELECT * FROM Actors
'''
# DEBUG: all_cast ########################
queries['all_cast'] = '''
SELECT * FROM Cast
'''
# DEBUG: all_directors ########################
queries['all_directors'] = '''
SELECT * FROM Directors
'''
# DEBUG: all_movie_dir ########################
queries['all_movie_dir'] = '''
SELECT * FROM Movie_Director
'''
# DO NOT MODIFY - END
########################################################################
### INSERT YOUR QUERIES HERE ###########################################
########################################################################
# NOTE: You are allowed to also include other queries here (e.g.,
# for creating views), that will be executed in alphabetical order.
# We will grade your program based on the output files q01.csv,
# q02.csv, ..., q12.csv
# Q01 ########################
queries['q01'] = '''
SELECT DISTINCT a.fname, a.lname
FROM Cast c, Actors a
WHERE c.aid = a.aid
AND c.aid IN (SELECT c.aid
FROM Cast c, Movies m
WHERE m.mid = c.mid AND m.year < 1991 AND m.year > 1979)
AND c.aid IN (SELECT c.aid
FROM Cast c, Movies m
WHERE m.mid = c.mid AND m.year > 1999)
ORDER BY a.lname, a.fname
'''
# Q02 ########################
queries['q02'] = '''
SELECT title, year
FROM Movies
WHERE year IN (SELECT year
FROM Movies
WHERE title = 'Rogue One: A Star Wars Story')
AND rank > (SELECT rank
FROM Movies
WHERE title = 'Rogue One: A Star Wars Story')
ORDER BY Movies.title
'''
# Q03 ########################
queries['q03'] = '''
SELECT a.fname, a.lname, COUNT(m.title) as numMov
FROM Actors a, Cast c, Movies m
WHERE a.aid = c.aid AND c.mid = m.mid AND title like '%Star Wars%'
GROUP BY a.lname, a.fname
ORDER BY numMov DESC
'''
# Q04 ########################
queries['q04'] = '''
SELECT a.fname, a.lname
FROM Actors a
WHERE NOT a.aid IN (SELECT a2.aid
FROM Actors a2, Cast c2, Movies m2
WHERE a2.aid = c2.aid AND m2.mid = c2.mid AND m2.year > 1984)
ORDER BY a.lname, fname
'''
# Q05 ########################
queries['q05'] = '''
SELECT d.fname, d.lname, COUNT(DISTINCT md.mid) AS num_films
FROM Directors d, Movie_Director md
WHERE d.did = md.did
GROUP BY d.did
ORDER BY num_films DESC
LIMIT 20
'''
# Q06 ########################
queries['q06'] = '''
SELECT m.title, COUNT(DISTINCT c.aid) AS num_cast
FROM Movies m, Cast c
WHERE c.mid = m.mid
GROUP BY m.mid
HAVING num_cast >= (SELECT MIN(num_cast2)
FROM (SELECT COUNT(c2.aid) AS num_cast2
FROM Movies m2, Cast c2
WHERE c2.mid = m2.mid
GROUP BY m2.mid
ORDER BY num_cast2 DESC
LIMIT 10))
ORDER BY num_cast DESC
'''
# Q07 ########################
cur.execute('DROP VIEW IF EXISTS Movgender')
createView = '''
CREATE VIEW Movgender AS
SELECT m1.title AS title, a.gender AS gender
FROM Actors a, Cast c1, Movies m1
WHERE c1.aid = a.aid AND c1.mid = m1.mid
GROUP BY fname, lname, m1.title
'''
cur.execute(createView)
cur.execute('DROP VIEW IF EXISTS MovCount')
createView2 = '''
CREATE VIEW MovCount AS
SELECT Movgender.title AS title, SUM(CASE WHEN Movgender.gender = "Female" THEN 1 ELSE 0 END) AS F, SUM(CASE WHEN Movgender.gender = "Male" THEN 1 ELSE 0 END) AS M
FROM Movgender
GROUP BY Movgender.title
'''
cur.execute(createView2)
queries['q07'] = '''
SELECT MovCount.title, MovCount.F, MovCount.M
FROM MovCount
WHERE MovCount.F > MovCount.M
ORDER BY MovCount.title
'''
# Q08 ########################
queries['q08'] = '''
SELECT fname, lname, COUNT(DISTINCT did) AS numDir
FROM Actors NATURAL JOIN Cast NATURAL JOIN Movies NATURAL JOIN Movie_Director
GROUP BY fname
HAVING numDir >= 7
ORDER BY numDir DESC
'''
# Q09 ########################
queries['q09'] = '''
SELECT a.fname, a.lname, COUNT(DISTINCT m.title) as numMovies
FROM Actors a , Movies m
WHERE a.fname like 'T%' AND m.year = (SELECT MIN(m2.year)
FROM Actors a2, Cast c2, Movies m2
WHERE a2.aid = c2.aid AND c2.mid = m2.mid)
GROUP BY a.fname, a.lname
ORDER BY numMovies DESC
'''
# Q10 ########################
queries['q10'] = '''
SELECT a.lname, m.title
FROM Actors a
INNER JOIN Cast c ON a.aid = c.aid
INNER JOIN Movies m ON c.mid = m.mid
INNER JOIN Movie_Director md ON c.mid = md.mid
INNER JOIN Directors d ON d.did = md.did
WHERE a.lname = d.lname
ORDER BY a.lname
'''
# creating view to get all actors, cast, movies, movie_director, director that had Kevin Bacon in it
# Q11 ########################
cur.execute('DROP VIEW IF EXISTS KevBacon')
createView = '''
CREATE VIEW KevBacon AS
SELECT m1.title AS title, m1.mid AS mid
FROM Actors a
INNER JOIN Cast c1 ON c1.aid = a.aid
INNER JOIN Movies m1 ON m1.mid = c1.mid
INNER JOIN Movie_Director md1 ON m1.mid = md1.mid
INNER JOIN Directors d1 ON d1.did = md1.did
WHERE a.fname = "Kevin" AND a.lname = "Bacon"
'''
cur.execute(createView)
# getting all actors whose Bacon number is 2 (acted in same filme as actors w/ Bacon number 1)
queries['q11'] = '''
SELECT a2.fname, a2.lname
FROM KevBacon kb
INNER JOIN Cast c1 ON c1.mid = kb.mid
INNER JOIN Actors a1 ON a1.aid = c1.aid
INNER JOIN Cast c2 ON c2.aid = a1.aid
INNER JOIN Movies m1 ON m1.mid = c2.mid
INNER JOIN Cast c3 ON c3.mid = m1.mid
INNER JOIN Actors a2 ON a2.aid = c3.aid
WHERE c3.mid != kb.mid AND a1.aid != c3.aid AND a1.fname != "Kevin" AND a1.lname != "Bacon"
GROUP BY a2.fname, a2.lname
'''
# Q12 ########################
queries['q12'] = '''
SELECT a.fname, a.lname, COUNT(m.mid), AVG(m.rank) AS score
FROM Actors a
INNER JOIN Cast c ON a.aid = c.aid
INNER JOIN Movies m ON c.mid = m.mid
GROUP BY a.aid
ORDER BY score DESC
LIMIT 20
'''
########################################################################
### SAVE RESULTS TO FILES ##############################################
########################################################################
# DO NOT MODIFY - START
for (qkey, qstring) in sorted(queries.items()):
try:
cur.execute(qstring)
all_rows = cur.fetchall()
print ("=========== ",qkey," QUERY ======================")
print (qstring)
print ("----------- ",qkey," RESULTS --------------------")
for row in all_rows:
print (row)
print (" ")
save_to_file = (re.search(r'q0\d', qkey) or re.search(r'q1[012]', qkey))
if (save_to_file):
with open(qkey+'.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows(all_rows)
f.close()
print ("----------- ",qkey+".csv"," *SAVED* ----------------\n")
except lite.Error as e:
print ("An error occurred:", e.args[0])
# DO NOT MODIFY - END
|
import psycopg2
import os
from databaseconnection import *
def create_FS():
command = "create table FireStation("
command += "FS_ID integer, "
command += "NAME text, "
command += "PASSWORD text, "
command += "PREFECTUR text, "
command += "COTY text, "
command += "ADDRESS text, "
command += "LATLNG text"
command += ")"
return command
def create_Interview():
command = "create table interview("
command += "PATIENT_ID integer, "
command += "DATE timestamp with time zone, "
command += "LATLNG text, "
command += "STATE integer, "
command += "INTERVIEW_SCENARIO_ID integer, "
command += "INTERVIEW_RECORD text, "
command += "CARE_IDs integer[], "
command += "CARE_IDs_RECOMMEND integer[]"
command += ")"
return command
def create_Scenario():
command = "create table scenario("
command += "SCENARIO_ID integer, "
command += "FILENAME text"
command += ")"
return command
def create_RecommendCare():
command = "create table RecommendCare("
command += "PATIENT_ID integer, "
command += "CARE_IDs_RECOMMEND integer[], "
command += "COMMENT text"
command += ")"
return command
def execute():
#commands = [create_FS(), create_Scenario(), create_Interview(), create_RecommendCare()]
commands = [create_RecommendCare()]
for command in commands:
print(command)
cur.execute(command)
connection.commit()
if __name__ == '__main__':
connection = getDatabaseConnection()
print(connection.get_backend_pid())
cur = connection.cursor()
#execute() |
import discord
from discord.ext import commands
import time
import datetime
import pytz
class GameTime(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def time(self, ctx):
"""Displays current game time."""
locationName = self.bot.db.get_val("ServerInfo", "")
print(type(locationName))
print(locationName['CityName'])
embed = discord.Embed(title="Current time in {}".format(locationName['CityName']),description=get_gametime())
await ctx.send(embed=embed)
await ctx.message.delete_message()
def suffix(d):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def get_rawtime():
return datetime.datetime.now(pytz.timezone('UTC'))
def get_gametime():
months = [
"Hammer",
"Alturiak",
"Ches",
"Tarsakh",
"Mirtul",
"Kythorn",
"Flamerule",
"Eleasis",
"Eleint",
"Marpenoth",
"Uktar",
"Nightal"]
aDate = datetime(2020, 10, 18, tzinfo=pytz.timezone('UTC'))
bDate = datetime.now(pytz.timezone('UTC'))
delta = bDate - aDate
gametime = datetime(2020, 10, 18, bDate.hour, bDate.minute, bDate.second) + timedelta(days=delta.days*3) + (timedelta(days=(bDate.hour//8-2)))
if gametime.hour == 0:
gametime_hour = 12
time_decor = "AM"
else:
gametime_hour = gametime.hour-12 if gametime.hour > 12 else gametime.hour
time_decor = "PM" if gametime.hour > 12 else "AM"
gametime_minute = "0{}".format(gametime.minute) if gametime.minute < 10 else gametime.minute
return "{}:{} {} UTC | {}{} of {}".format(gametime_hour, gametime_minute, time_decor, gametime.day, suffix(gametime.day), months[gametime.month-1])
def setup(bot):
bot.add_cog(GameTime(bot))
|
from requests import get
from json import loads
from bs4 import BeautifulSoup
def get_synonym(word: str) -> str:
try:
html_doc = get(f'https://www.thesaurus.com/browse/{word}')
soup = BeautifulSoup(html_doc.content.decode(html_doc.encoding), 'lxml')
script_tags = soup('script')
synonyms_script_tag = None
#this is the start of the script tag that contains the synonyms
syn_list_identifier = 'window.INITIAL_STATE = '
for tag in script_tags:
if syn_list_identifier in str(tag):
synonyms_script_tag = tag
break
data = synonyms_script_tag.contents[0].replace(syn_list_identifier, '').replace('undefined', 'null').replace(';', '')
json_data = loads(data)
synonyms = json_data['searchData']['tunaApiData']['posTabs'][0]['synonyms']
return synonyms[0]['term']
except:
return word |
import math
import numpy as np
import cv2
from pythonRLSA import rlsa
from ijazahpy.preprocessing import remove_noise_bin as remove_noise
class DotsSegmentation:
"""
Pendekatan segmentasi baru
Segmentasi dilakukan dengan mengambil area dots pada ijazah
methods:
segment::
remove_bin_noise::
get_dots_locs::
segment_dots::
"""
def __init__(self, rlsa_val=47):
self.RLSA_VALUE = rlsa_val
def segment(self, img, dot_size=3, min_width=72, min_height=9, imshow=False):
"""
params:
img::ndarray::~ Grayscale image
Returns an array of tuples (x,y,w,h)
"""
og = img.copy()
dots_loc, dots_loc_bin = self.get_dots_loc(og, dot_size=dot_size)
if imshow:
cv2.imshow('dots', dots_loc_bin)
rects = self.segment_dots(dots_loc_bin,
min_width=min_width,
min_height=min_height,
imshow=imshow)
return rects
def remove_bin_noise(self, img_bin, min_line_width=50):
"""
Removes noise in binary image.
params:
img_bin::ndarray::~ binary image, applied threshold
"""
contours, _ = cv2.findContours(img_bin,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
for c in contours:
(x,y,w,h) = cv2.boundingRect(c)
if w <= min_line_width:
(x,y,w,h) = cv2.boundingRect(c)
black = np.zeros((h, w))
img_bin[y:y+h, x:x+w] = black
cv2.fillPoly(img_bin, pts=[c], color=(0,0,0))
return img_bin
@staticmethod
def get_dots_loc(og, dot_size=3):
img = og.copy()
_, img_bin = cv2.threshold(img,
100,
255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
img_bin_og = img_bin.copy()
contours, _ = cv2.findContours(img_bin,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
# Removes except dots
for c in contours:
area = cv2.contourArea(c)
(x,y,w,h) = cv2.boundingRect(c)
if area >= 1 or h > 3:
white = np.full((h, w), 255)
img[y:y+h, x:x+w] = white
cv2.fillPoly(img, pts=[c], color=(255,255,255))
black = np.zeros((h,w))
img_bin[y:y+h, x:x+w] = black
# Adds dot in removed segment
for c in contours:
area = cv2.contourArea(c)
(x,y,w,h) = cv2.boundingRect(c)
if area <= dot_size and h < 5: # dots size
img[y:y+h, x:x+w] = og[y:y+h, x:x+w]
img_bin[y:y+h, x:x+w] = img_bin_og[y:y+h, x:x+w]
return img, img_bin
def segment_dots(self, img_bin, field_height=22, min_width=72, min_height=9, imshow=False):
"""Connect dots horizontal & Find contours"""
img_rlsa = self.connect_horizontal(img_bin, self.RLSA_VALUE)
if imshow:
cv2.imshow('connect', img_rlsa)
return self.segment_line(img_rlsa, field_height, min_width, min_height)
@staticmethod
def connect_horizontal(img_bin, rlsa_val=47):
"""Connect dots horizontal"""
og = img_bin.copy()
# Setting RLSA
RLSA_VALUE = rlsa_val
RLSA_HORIZONTAL = True
RLSA_VERTICAL = False
img_bin = cv2.subtract(255, img_bin)
img_rlsa = rlsa.rlsa(img_bin,
RLSA_HORIZONTAL,
RLSA_VERTICAL,
RLSA_VALUE)
img_rlsa = cv2.subtract(255, img_bin)
return img_rlsa
@staticmethod
def segment_line(img_bin, field_height=22, min_width=72, min_height=9):
"""Segment connected dots"""
img_rlsa = img_bin.copy()
(contours, _) = cv2.findContours(img_rlsa,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
rects = []
for c in contours:
(x,y,w,h) = cv2.boundingRect(c)
if h < min_height and w > min_width:
rects.append((x,y-field_height,w,h+field_height))
rects.sort(key=lambda tup: tup[1])
return rects
class WordSegmentation:
"""
Word Segmentation Object
"""
def segment(self, img, kernelSize=25, sigma=11,
theta=7, minArea=32, imshow_steps=False):
"""Scale space technique for word segmentation proposed by R. Manmatha: http://ciir.cs.umass.edu/pubfiles/mm-27.pdf
params:
img::ndarray::~ grayscale uint8 image of the text-line to be segmented.
kernel_size::uint::~ size of filter kernel, must be an odd integer.
sigma::uint::~ standard deviation of Gaussian function used for filter kernel.
theta::uint::~ approximated width/height ratio of words, filter function is distorted by this factor.
minArea::uint::~ ignore word candidates smaller than specified area.
Returns a list of tuples. Each tuple contains the bounding box and the image of the segmented word.
"""
# apply filter kernel
kernel = self.create_kernel(kernelSize, sigma, theta)
filtered_img = cv2.filter2D(img,
-1,
kernel,
borderType=cv2.BORDER_REPLICATE).astype(
np.uint8)
(_, bin_img) = cv2.threshold(filtered_img,
0,
255,
cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
if imshow_steps:
cv2.imshow('filtered', filtered_img)
cv2.imshow('blob', bin_img)
(contours, _) = cv2.findContours(bin_img,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# append contours to result
res = []
for c in contours:
# skip small word candidates
if cv2.contourArea(c) < minArea:
continue
# append bounding box and image of word to result list
box = cv2.boundingRect(c)
(x, y, w, h) = box
cropped_img = img[y:y+h, x:x+w]
res.append((box, cropped_img))
# return list of words, sorted by x-coordinate
return sorted(res, key=lambda entry:entry[0][0])
@staticmethod
def prepare_img(img, height):
"""convert given image to grayscale image (if needed) and resize to desired height"""
assert img.ndim in (2, 3)
if img.ndim == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h = img.shape[0]
factor = height / h
return cv2.resize(img, dsize=None, fx=factor, fy=factor)
@staticmethod
def create_kernel(kernel_size, sigma, theta):
"""
create anisotropic filter kernel according to given parameters
"""
assert kernel_size % 2 # must be odd size
half_size = kernel_size // 2
kernel = np.zeros([kernel_size, kernel_size])
sigma_x = sigma
sigma_y = sigma * theta
for i in range(kernel_size):
for j in range(kernel_size):
x = i - half_size
y = j - half_size
expTerm = np.exp(-x**2 / (2 * sigma_x) - y**2 / (2 * sigma_y))
xTerm = (x**2 - sigma_x**2) / (2 * math.pi * sigma_x**2 * sigma_y)
yTerm = (y**2 - sigma_y**2) / (2 * math.pi * sigma_y**2 * sigma_x)
kernel[i, j] = (xTerm + yTerm) * expTerm
kernel = kernel / np.sum(kernel)
return kernel
# Fails
def segment_characters(img_gray, walking_kernel=False, noise_remove=False):
"""Segments characters"""
gray = img_gray.copy()
_, img_bin = cv2.threshold(gray,
0,
255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
if noise_remove:
img_bin = remove_noise(img_bin,3)
contours, _ = cv2.findContours(img_bin,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
res = []
if walking_kernel:
heights = [cv2.boundingRect(contour)[3] for contour in contours]
avgheight = sum(heights)/len(heights) # average height
widths = [cv2.boundingRect(contour)[2] for contour in contours]
avgwidth = sum(widths) / len(widths)
rects = []
for c in contours:
(x,y,w,h) = cv2.boundingRect(c)
if w > avgwidth * 0.8:
rects.append((x,y,w,h))
# New Algorithm, Walking Kernel.
for (x,y,w,h) in rects:
# mid_percent = 0.8
mid_index = int(h * 0.5)
# kernel_width = 2
kernel = (2,h)
img_temp = img_bin[y:y+h, x:x+w]
img_temp = remove_noise(img_temp, 6)
# strides = 1
for step in range(0, w, 1):
img_target = img_temp[0:kernel[1], step:step+kernel[0]]
pixels_value = np.sum(img_target)
# minimum_pixels_value = 1020
if pixels_value <= 1020:
img_target[mid_index:] = 0
img_bin[y:y+h, x+step:x+step+kernel[0]] = img_target
# x offset
if step >= w * 0.8:
break
contours, _ = cv2.findContours(img_bin,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
for c in contours:
(x,y,w,h) = cv2.boundingRect(c)
if w > 3 and h > 3:
res.append(((x,y,w,h), gray[y:y+h, x:x+w]))
else:
for c in contours:
(x,y,w,h) = cv2.boundingRect(c)
if h > 3 and h < 60:
res.append(((x,y,w,h), gray[y:y+h, x:x+w]))
return sorted(res, key=lambda entry:entry[0][0])
# Fails
def segment_character2(img_gray):
gray = img_gray.copy()
_, img_bin = cv2.threshold(gray,
0,
255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
img_bin = remove_noise(img_bin, 3)
kernel = np.ones((2,1))
erosion = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel)# cv2.erode(img_bin, kernel, iterations=1)
cv2.imshow('erosion', erosion)
ero_inv = cv2.subtract(255, erosion)
img_rlsa = rlsa.rlsa(ero_inv,
True,
True,
10)
res = cv2.subtract(255, img_rlsa)
cv2.imshow('res', res)
contours, _ = cv2.findContours(res,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
for c in contours:
(x,y,w,h) = cv2.boundingRect(c)
if h > 3:
cv2.rectangle(gray, (x,y), (x+w, y+h), (0, 0, 0), 1)
cv2.imshow('final', gray)
return
def segment_words(img_gray, rlsa_val=7, bin_result=False):
""" Segment words with RLSA
params:
img_gray::ndarray:~ grayscale image
rlsa_val::integer:~ value for run length smoothing algorithm
Returns a list of tuple -> ((x,y,w,h), image_array)
"""
gray = img_gray.copy()
_, img_bin = cv2.threshold(gray,
0,
255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
img_bin = remove_noise(img_bin, 30)
img_bin_og = img_bin.copy()
img_bin = cv2.subtract(255, img_bin)
img_rlsa = rlsa.rlsa(img_bin,
True,
True,
rlsa_val)
res = cv2.subtract(255, img_rlsa)
contours, _ = cv2.findContours(res,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
res = []
for c in contours:
(x,y,w,h) = cv2.boundingRect(c)
if h > 3:
if bin_result:
cropped_img = img_bin_og[y:y+h, x:x+w]
else:
cropped_img = gray[y:y+h, x:x+w]
zipp = ((x,y,w,h), cropped_img)
res.append(zipp)
return res
##if __name__ == '__main__':
## filepath = 'G:\Kuliah\skripsi\github\SimpleApp-master\SimpleApp\media/'.replace('\\', '/')
## filename = '2ijazah3.jpg'
## print(filepath+filename)
## img = cv2.imread(filepath+filename, cv2.IMREAD_GRAYSCALE)
##
## res = segment_words(img, bin_result=True)
##
#### img = cv2.imread('samples/random1.jpg', cv2.IMREAD_GRAYSCALE)
#### wordSegmentation = WordSegmentation()
#### res = wordSegmentation.segment(img, imshow_steps=True)
## for i, entry, in enumerate(res):
## (box, img) = entry
## cv2.imshow(str(i), img)
|
#import sys
#input = sys.stdin.readline
def main():
N = int(input())
# print((2**N-1)//2)
a = "A"
D = 2**N
K = D-1
L = K//2
Z = K
if N >= 3:
ANS = [["A"]+["B"]*K for _ in range(Z)]
for j in range(K):
for i in range(L):
ANS[(i+j)%K][j+1] = a
print(K)
print("\n".join(["".join(ans) for ans in ANS]))
print(N)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.